xref: /freebsd/sys/dev/aacraid/aacraid.c (revision 51e235148a4becba94e824a44bd69687644a7f56)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 /*
35  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
36  */
37 #define AAC_DRIVERNAME			"aacraid"
38 
39 #include "opt_aacraid.h"
40 
41 /* #include <stddef.h> */
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/proc.h>
48 #include <sys/sysctl.h>
49 #include <sys/sysent.h>
50 #include <sys/poll.h>
51 #include <sys/ioccom.h>
52 
53 #include <sys/bus.h>
54 #include <sys/conf.h>
55 #include <sys/signalvar.h>
56 #include <sys/time.h>
57 #include <sys/eventhandler.h>
58 #include <sys/rman.h>
59 
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 
66 #include <dev/aacraid/aacraid_reg.h>
67 #include <sys/aac_ioctl.h>
68 #include <dev/aacraid/aacraid_debug.h>
69 #include <dev/aacraid/aacraid_var.h>
70 #include <dev/aacraid/aacraid_endian.h>
71 
72 #ifndef FILTER_HANDLED
73 #define FILTER_HANDLED	0x02
74 #endif
75 
76 static void	aac_add_container(struct aac_softc *sc,
77 				  struct aac_mntinforesp *mir, int f,
78 				  u_int32_t uid);
79 static void	aac_get_bus_info(struct aac_softc *sc);
80 static void	aac_container_bus(struct aac_softc *sc);
81 static void	aac_daemon(void *arg);
82 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
83 							  int pages, int nseg, int nseg_new);
84 
85 /* Command Processing */
86 static void	aac_timeout(struct aac_softc *sc);
87 static void	aac_command_thread(struct aac_softc *sc);
88 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
89 				     u_int32_t xferstate, struct aac_fib *fib,
90 				     u_int16_t datasize);
91 /* Command Buffer Management */
92 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
93 				       int nseg, int error);
94 static int	aac_alloc_commands(struct aac_softc *sc);
95 static void	aac_free_commands(struct aac_softc *sc);
96 static void	aac_unmap_command(struct aac_command *cm);
97 
98 /* Hardware Interface */
99 static int	aac_alloc(struct aac_softc *sc);
100 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
101 			       int error);
102 static int	aac_check_firmware(struct aac_softc *sc);
103 static void	aac_define_int_mode(struct aac_softc *sc);
104 static int	aac_init(struct aac_softc *sc);
105 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
106 static int	aac_setup_intr(struct aac_softc *sc);
107 static int	aac_check_config(struct aac_softc *sc);
108 
109 /* PMC SRC interface */
110 static int	aac_src_get_fwstatus(struct aac_softc *sc);
111 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
112 static int	aac_src_get_istatus(struct aac_softc *sc);
113 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
114 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
115 				    u_int32_t arg0, u_int32_t arg1,
116 				    u_int32_t arg2, u_int32_t arg3);
117 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
118 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
119 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
120 static int aac_src_get_outb_queue(struct aac_softc *sc);
121 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
122 
123 struct aac_interface aacraid_src_interface = {
124 	aac_src_get_fwstatus,
125 	aac_src_qnotify,
126 	aac_src_get_istatus,
127 	aac_src_clear_istatus,
128 	aac_src_set_mailbox,
129 	aac_src_get_mailbox,
130 	aac_src_access_devreg,
131 	aac_src_send_command,
132 	aac_src_get_outb_queue,
133 	aac_src_set_outb_queue
134 };
135 
136 /* PMC SRCv interface */
137 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
138 				    u_int32_t arg0, u_int32_t arg1,
139 				    u_int32_t arg2, u_int32_t arg3);
140 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
141 
142 struct aac_interface aacraid_srcv_interface = {
143 	aac_src_get_fwstatus,
144 	aac_src_qnotify,
145 	aac_src_get_istatus,
146 	aac_src_clear_istatus,
147 	aac_srcv_set_mailbox,
148 	aac_srcv_get_mailbox,
149 	aac_src_access_devreg,
150 	aac_src_send_command,
151 	aac_src_get_outb_queue,
152 	aac_src_set_outb_queue
153 };
154 
155 /* Debugging and Diagnostics */
156 static struct aac_code_lookup aac_cpu_variant[] = {
157 	{"i960JX",		CPUI960_JX},
158 	{"i960CX",		CPUI960_CX},
159 	{"i960HX",		CPUI960_HX},
160 	{"i960RX",		CPUI960_RX},
161 	{"i960 80303",		CPUI960_80303},
162 	{"StrongARM SA110",	CPUARM_SA110},
163 	{"PPC603e",		CPUPPC_603e},
164 	{"XScale 80321",	CPU_XSCALE_80321},
165 	{"MIPS 4KC",		CPU_MIPS_4KC},
166 	{"MIPS 5KC",		CPU_MIPS_5KC},
167 	{"Unknown StrongARM",	CPUARM_xxx},
168 	{"Unknown PowerPC",	CPUPPC_xxx},
169 	{NULL, 0},
170 	{"Unknown processor",	0}
171 };
172 
173 static struct aac_code_lookup aac_battery_platform[] = {
174 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
175 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
176 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
177 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
178 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
179 	{NULL, 0},
180 	{"unknown battery platform",		0}
181 };
182 static void	aac_describe_controller(struct aac_softc *sc);
183 static char	*aac_describe_code(struct aac_code_lookup *table,
184 				   u_int32_t code);
185 
186 /* Management Interface */
187 static d_open_t		aac_open;
188 static d_ioctl_t	aac_ioctl;
189 static d_poll_t		aac_poll;
190 static void		aac_cdevpriv_dtor(void *arg);
191 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
192 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
193 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
194 static void	aac_request_aif(struct aac_softc *sc);
195 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
196 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
197 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
198 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
199 static int	aac_return_aif(struct aac_softc *sc,
200 			       struct aac_fib_context *ctx, caddr_t uptr);
201 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
202 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
203 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
204 static void	aac_ioctl_event(struct aac_softc *sc,
205 				struct aac_event *event, void *arg);
206 static int	aac_reset_adapter(struct aac_softc *sc);
207 static int	aac_get_container_info(struct aac_softc *sc,
208 				       struct aac_fib *fib, int cid,
209 				       struct aac_mntinforesp *mir,
210 				       u_int32_t *uid);
211 static u_int32_t
212 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
213 
214 static struct cdevsw aacraid_cdevsw = {
215 	.d_version =	D_VERSION,
216 	.d_flags =	0,
217 	.d_open =	aac_open,
218 	.d_ioctl =	aac_ioctl,
219 	.d_poll =	aac_poll,
220 	.d_name =	"aacraid",
221 };
222 
223 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
224 
225 /* sysctl node */
226 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
227     "AACRAID driver parameters");
228 
229 /*
230  * Device Interface
231  */
232 
233 /*
234  * Initialize the controller and softc
235  */
236 int
237 aacraid_attach(struct aac_softc *sc)
238 {
239 	int error, unit;
240 	struct aac_fib *fib;
241 	struct aac_mntinforesp mir;
242 	int count = 0, i = 0;
243 	u_int32_t uid;
244 
245 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
246 	sc->hint_flags = device_get_flags(sc->aac_dev);
247 	/*
248 	 * Initialize per-controller queues.
249 	 */
250 	aac_initq_free(sc);
251 	aac_initq_ready(sc);
252 	aac_initq_busy(sc);
253 
254 	/* mark controller as suspended until we get ourselves organised */
255 	sc->aac_state |= AAC_STATE_SUSPEND;
256 
257 	/*
258 	 * Check that the firmware on the card is supported.
259 	 */
260 	sc->msi_enabled = sc->msi_tupelo = FALSE;
261 	if ((error = aac_check_firmware(sc)) != 0)
262 		return(error);
263 
264 	/*
265 	 * Initialize locks
266 	 */
267 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
268 	TAILQ_INIT(&sc->aac_container_tqh);
269 	TAILQ_INIT(&sc->aac_ev_cmfree);
270 
271 	/* Initialize the clock daemon callout. */
272 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
273 
274 	/*
275 	 * Initialize the adapter.
276 	 */
277 	if ((error = aac_alloc(sc)) != 0)
278 		return(error);
279 	aac_define_int_mode(sc);
280 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
281 		if ((error = aac_init(sc)) != 0)
282 			return(error);
283 	}
284 
285 	/*
286 	 * Allocate and connect our interrupt.
287 	 */
288 	if ((error = aac_setup_intr(sc)) != 0)
289 		return(error);
290 
291 	/*
292 	 * Print a little information about the controller.
293 	 */
294 	aac_describe_controller(sc);
295 
296 	/*
297 	 * Make the control device.
298 	 */
299 	unit = device_get_unit(sc->aac_dev);
300 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
301 				 0640, "aacraid%d", unit);
302 	sc->aac_dev_t->si_drv1 = sc;
303 
304 	/* Create the AIF thread */
305 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
306 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
307 		panic("Could not create AIF thread");
308 
309 	/* Register the shutdown method to only be called post-dump */
310 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
311 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
312 		device_printf(sc->aac_dev,
313 			      "shutdown event registration failed\n");
314 
315 	/* Find containers */
316 	mtx_lock(&sc->aac_io_lock);
317 	aac_alloc_sync_fib(sc, &fib);
318 	/* loop over possible containers */
319 	do {
320 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
321 			continue;
322 		if (i == 0)
323 			count = mir.MntRespCount;
324 		aac_add_container(sc, &mir, 0, uid);
325 		i++;
326 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
327 	aac_release_sync_fib(sc);
328 	mtx_unlock(&sc->aac_io_lock);
329 
330 	/* Register with CAM for the containers */
331 	TAILQ_INIT(&sc->aac_sim_tqh);
332 	aac_container_bus(sc);
333 	/* Register with CAM for the non-DASD devices */
334 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
335 		aac_get_bus_info(sc);
336 
337 	/* poke the bus to actually attach the child devices */
338 	bus_generic_attach(sc->aac_dev);
339 
340 	/* mark the controller up */
341 	sc->aac_state &= ~AAC_STATE_SUSPEND;
342 
343 	/* enable interrupts now */
344 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
345 
346 	mtx_lock(&sc->aac_io_lock);
347 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
348 	mtx_unlock(&sc->aac_io_lock);
349 
350 	return(0);
351 }
352 
353 static void
354 aac_daemon(void *arg)
355 {
356 	struct aac_softc *sc;
357 	struct timeval tv;
358 	struct aac_command *cm;
359 	struct aac_fib *fib;
360 
361 	sc = arg;
362 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
363 
364 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
365 	if (callout_pending(&sc->aac_daemontime) ||
366 	    callout_active(&sc->aac_daemontime) == 0)
367 		return;
368 	getmicrotime(&tv);
369 
370 	if (!aacraid_alloc_command(sc, &cm)) {
371 		fib = cm->cm_fib;
372 		cm->cm_timestamp = time_uptime;
373 		cm->cm_datalen = 0;
374 		cm->cm_flags |= AAC_CMD_WAIT;
375 
376 		fib->Header.Size =
377 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
378 		fib->Header.XferState =
379 			AAC_FIBSTATE_HOSTOWNED   |
380 			AAC_FIBSTATE_INITIALISED |
381 			AAC_FIBSTATE_EMPTY	 |
382 			AAC_FIBSTATE_FROMHOST	 |
383 			AAC_FIBSTATE_REXPECTED   |
384 			AAC_FIBSTATE_NORM	 |
385 			AAC_FIBSTATE_ASYNC	 |
386 			AAC_FIBSTATE_FAST_RESPONSE;
387 		fib->Header.Command = SendHostTime;
388 		*(uint32_t *)fib->data = htole32(tv.tv_sec);
389 
390 		aacraid_map_command_sg(cm, NULL, 0, 0);
391 		aacraid_release_command(cm);
392 	}
393 
394 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
395 }
396 
397 void
398 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
399 {
400 
401 	switch (event->ev_type & AAC_EVENT_MASK) {
402 	case AAC_EVENT_CMFREE:
403 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
404 		break;
405 	default:
406 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
407 		    event->ev_type);
408 		break;
409 	}
410 
411 	return;
412 }
413 
414 /*
415  * Request information of container #cid
416  */
417 static int
418 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
419 		       struct aac_mntinforesp *mir, u_int32_t *uid)
420 {
421 	struct aac_command *cm;
422 	struct aac_fib *fib;
423 	struct aac_mntinfo *mi;
424 	struct aac_cnt_config *ccfg;
425 	int rval;
426 
427 	if (sync_fib == NULL) {
428 		if (aacraid_alloc_command(sc, &cm)) {
429 			device_printf(sc->aac_dev,
430 				"Warning, no free command available\n");
431 			return (-1);
432 		}
433 		fib = cm->cm_fib;
434 	} else {
435 		fib = sync_fib;
436 	}
437 
438 	mi = (struct aac_mntinfo *)&fib->data[0];
439 	/* 4KB support?, 64-bit LBA? */
440 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
441 		mi->Command = VM_NameServeAllBlk;
442 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
443 		mi->Command = VM_NameServe64;
444 	else
445 		mi->Command = VM_NameServe;
446 	mi->MntType = FT_FILESYS;
447 	mi->MntCount = cid;
448 	aac_mntinfo_tole(mi);
449 
450 	if (sync_fib) {
451 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
452 			 sizeof(struct aac_mntinfo))) {
453 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
454 			return (-1);
455 		}
456 	} else {
457 		cm->cm_timestamp = time_uptime;
458 		cm->cm_datalen = 0;
459 
460 		fib->Header.Size =
461 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
462 		fib->Header.XferState =
463 			AAC_FIBSTATE_HOSTOWNED   |
464 			AAC_FIBSTATE_INITIALISED |
465 			AAC_FIBSTATE_EMPTY	 |
466 			AAC_FIBSTATE_FROMHOST	 |
467 			AAC_FIBSTATE_REXPECTED   |
468 			AAC_FIBSTATE_NORM	 |
469 			AAC_FIBSTATE_ASYNC	 |
470 			AAC_FIBSTATE_FAST_RESPONSE;
471 		fib->Header.Command = ContainerCommand;
472 		if (aacraid_wait_command(cm) != 0) {
473 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
474 			aacraid_release_command(cm);
475 			return (-1);
476 		}
477 	}
478 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
479 	aac_mntinforesp_toh(mir);
480 
481 	/* UID */
482 	*uid = cid;
483 	if (mir->MntTable[0].VolType != CT_NONE &&
484 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
485 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
486 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
487 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
488 		}
489 		ccfg = (struct aac_cnt_config *)&fib->data[0];
490 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
491 		ccfg->Command = VM_ContainerConfig;
492 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
493 		ccfg->CTCommand.param[0] = cid;
494 		aac_cnt_config_tole(ccfg);
495 
496 		if (sync_fib) {
497 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
498 				sizeof(struct aac_cnt_config));
499 			aac_cnt_config_toh(ccfg);
500 			if (rval == 0 && ccfg->Command == ST_OK &&
501 				ccfg->CTCommand.param[0] == CT_OK &&
502 				mir->MntTable[0].VolType != CT_PASSTHRU)
503 				*uid = ccfg->CTCommand.param[1];
504 		} else {
505 			fib->Header.Size =
506 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
507 			fib->Header.XferState =
508 				AAC_FIBSTATE_HOSTOWNED   |
509 				AAC_FIBSTATE_INITIALISED |
510 				AAC_FIBSTATE_EMPTY	 |
511 				AAC_FIBSTATE_FROMHOST	 |
512 				AAC_FIBSTATE_REXPECTED   |
513 				AAC_FIBSTATE_NORM	 |
514 				AAC_FIBSTATE_ASYNC	 |
515 				AAC_FIBSTATE_FAST_RESPONSE;
516 			fib->Header.Command = ContainerCommand;
517 			rval = aacraid_wait_command(cm);
518 			aac_cnt_config_toh(ccfg);
519 			if (rval == 0 && ccfg->Command == ST_OK &&
520 				ccfg->CTCommand.param[0] == CT_OK &&
521 				mir->MntTable[0].VolType != CT_PASSTHRU)
522 				*uid = ccfg->CTCommand.param[1];
523 			aacraid_release_command(cm);
524 		}
525 	}
526 
527 	return (0);
528 }
529 
530 /*
531  * Create a device to represent a new container
532  */
533 static void
534 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
535 		  u_int32_t uid)
536 {
537 	struct aac_container *co;
538 
539 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
540 
541 	/*
542 	 * Check container volume type for validity.  Note that many of
543 	 * the possible types may never show up.
544 	 */
545 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
546 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
547 		       M_NOWAIT | M_ZERO);
548 		if (co == NULL) {
549 			panic("Out of memory?!");
550 		}
551 
552 		co->co_found = f;
553 		bcopy(&mir->MntTable[0], &co->co_mntobj,
554 		      sizeof(struct aac_mntobj));
555 		co->co_uid = uid;
556 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
557 	}
558 }
559 
560 /*
561  * Allocate resources associated with (sc)
562  */
563 static int
564 aac_alloc(struct aac_softc *sc)
565 {
566 	bus_size_t maxsize;
567 
568 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
569 
570 	/*
571 	 * Create DMA tag for mapping buffers into controller-addressable space.
572 	 */
573 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
574 			       1, 0, 			/* algnmnt, boundary */
575 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
576 			       BUS_SPACE_MAXADDR :
577 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
578 			       BUS_SPACE_MAXADDR, 	/* highaddr */
579 			       NULL, NULL, 		/* filter, filterarg */
580 			       AAC_MAXIO_SIZE(sc),	/* maxsize */
581 			       sc->aac_sg_tablesize,	/* nsegments */
582 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
583 			       BUS_DMA_ALLOCNOW,	/* flags */
584 			       busdma_lock_mutex,	/* lockfunc */
585 			       &sc->aac_io_lock,	/* lockfuncarg */
586 			       &sc->aac_buffer_dmat)) {
587 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
588 		return (ENOMEM);
589 	}
590 
591 	/*
592 	 * Create DMA tag for mapping FIBs into controller-addressable space..
593 	 */
594 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
595 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
596 			sizeof(struct aac_fib_xporthdr) + 31);
597 	else
598 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
599 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
600 			       1, 0, 			/* algnmnt, boundary */
601 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
602 			       BUS_SPACE_MAXADDR_32BIT :
603 			       0x7fffffff,		/* lowaddr */
604 			       BUS_SPACE_MAXADDR, 	/* highaddr */
605 			       NULL, NULL, 		/* filter, filterarg */
606 			       maxsize,  		/* maxsize */
607 			       1,			/* nsegments */
608 			       maxsize,			/* maxsize */
609 			       0,			/* flags */
610 			       NULL, NULL,		/* No locking needed */
611 			       &sc->aac_fib_dmat)) {
612 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
613 		return (ENOMEM);
614 	}
615 
616 	/*
617 	 * Create DMA tag for the common structure and allocate it.
618 	 */
619 	maxsize = sizeof(struct aac_common);
620 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
621 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
622 			       1, 0,			/* algnmnt, boundary */
623 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
624 			       BUS_SPACE_MAXADDR_32BIT :
625 			       0x7fffffff,		/* lowaddr */
626 			       BUS_SPACE_MAXADDR, 	/* highaddr */
627 			       NULL, NULL, 		/* filter, filterarg */
628 			       maxsize, 		/* maxsize */
629 			       1,			/* nsegments */
630 			       maxsize,			/* maxsegsize */
631 			       0,			/* flags */
632 			       NULL, NULL,		/* No locking needed */
633 			       &sc->aac_common_dmat)) {
634 		device_printf(sc->aac_dev,
635 			      "can't allocate common structure DMA tag\n");
636 		return (ENOMEM);
637 	}
638 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
639 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
640 		device_printf(sc->aac_dev, "can't allocate common structure\n");
641 		return (ENOMEM);
642 	}
643 
644 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
645 			sc->aac_common, maxsize,
646 			aac_common_map, sc, 0);
647 	bzero(sc->aac_common, maxsize);
648 
649 	/* Allocate some FIBs and associated command structs */
650 	TAILQ_INIT(&sc->aac_fibmap_tqh);
651 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
652 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
653 	mtx_lock(&sc->aac_io_lock);
654 	while (sc->total_fibs < sc->aac_max_fibs) {
655 		if (aac_alloc_commands(sc) != 0)
656 			break;
657 	}
658 	mtx_unlock(&sc->aac_io_lock);
659 	if (sc->total_fibs == 0)
660 		return (ENOMEM);
661 
662 	return (0);
663 }
664 
665 /*
666  * Free all of the resources associated with (sc)
667  *
668  * Should not be called if the controller is active.
669  */
670 void
671 aacraid_free(struct aac_softc *sc)
672 {
673 	int i;
674 
675 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
676 
677 	/* remove the control device */
678 	if (sc->aac_dev_t != NULL)
679 		destroy_dev(sc->aac_dev_t);
680 
681 	/* throw away any FIB buffers, discard the FIB DMA tag */
682 	aac_free_commands(sc);
683 	if (sc->aac_fib_dmat)
684 		bus_dma_tag_destroy(sc->aac_fib_dmat);
685 
686 	free(sc->aac_commands, M_AACRAIDBUF);
687 
688 	/* destroy the common area */
689 	if (sc->aac_common) {
690 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
691 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
692 				sc->aac_common_dmamap);
693 	}
694 	if (sc->aac_common_dmat)
695 		bus_dma_tag_destroy(sc->aac_common_dmat);
696 
697 	/* disconnect the interrupt handler */
698 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
699 		if (sc->aac_intr[i])
700 			bus_teardown_intr(sc->aac_dev,
701 				sc->aac_irq[i], sc->aac_intr[i]);
702 		if (sc->aac_irq[i])
703 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
704 				sc->aac_irq_rid[i], sc->aac_irq[i]);
705 		else
706 			break;
707 	}
708 	if (sc->msi_enabled || sc->msi_tupelo)
709 		pci_release_msi(sc->aac_dev);
710 
711 	/* destroy data-transfer DMA tag */
712 	if (sc->aac_buffer_dmat)
713 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
714 
715 	/* destroy the parent DMA tag */
716 	if (sc->aac_parent_dmat)
717 		bus_dma_tag_destroy(sc->aac_parent_dmat);
718 
719 	/* release the register window mapping */
720 	if (sc->aac_regs_res0 != NULL)
721 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
722 				     sc->aac_regs_rid0, sc->aac_regs_res0);
723 	if (sc->aac_regs_res1 != NULL)
724 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
725 				     sc->aac_regs_rid1, sc->aac_regs_res1);
726 }
727 
728 /*
729  * Disconnect from the controller completely, in preparation for unload.
730  */
731 int
732 aacraid_detach(device_t dev)
733 {
734 	struct aac_softc *sc;
735 	struct aac_container *co;
736 	struct aac_sim	*sim;
737 	int error;
738 
739 	sc = device_get_softc(dev);
740 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
741 
742 	callout_drain(&sc->aac_daemontime);
743 	/* Remove the child containers */
744 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
745 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
746 		free(co, M_AACRAIDBUF);
747 	}
748 
749 	/* Remove the CAM SIMs */
750 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
751 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
752 		error = device_delete_child(dev, sim->sim_dev);
753 		if (error)
754 			return (error);
755 		free(sim, M_AACRAIDBUF);
756 	}
757 
758 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
759 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
760 		wakeup(sc->aifthread);
761 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
762 	}
763 
764 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
765 		panic("Cannot shutdown AIF thread");
766 
767 	if ((error = aacraid_shutdown(dev)))
768 		return(error);
769 
770 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
771 
772 	aacraid_free(sc);
773 
774 	mtx_destroy(&sc->aac_io_lock);
775 
776 	return(0);
777 }
778 
779 /*
780  * Bring the controller down to a dormant state and detach all child devices.
781  *
782  * This function is called before detach or system shutdown.
783  *
784  * Note that we can assume that the bioq on the controller is empty, as we won't
785  * allow shutdown if any device is open.
786  */
787 int
788 aacraid_shutdown(device_t dev)
789 {
790 	struct aac_softc *sc;
791 	struct aac_fib *fib;
792 	struct aac_close_command *cc;
793 
794 	sc = device_get_softc(dev);
795 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
796 
797 	sc->aac_state |= AAC_STATE_SUSPEND;
798 
799 	/*
800 	 * Send a Container shutdown followed by a HostShutdown FIB to the
801 	 * controller to convince it that we don't want to talk to it anymore.
802 	 * We've been closed and all I/O completed already
803 	 */
804 	device_printf(sc->aac_dev, "shutting down controller...");
805 
806 	mtx_lock(&sc->aac_io_lock);
807 	aac_alloc_sync_fib(sc, &fib);
808 	cc = (struct aac_close_command *)&fib->data[0];
809 
810 	bzero(cc, sizeof(struct aac_close_command));
811 	cc->Command = htole32(VM_CloseAll);
812 	cc->ContainerId = htole32(0xfffffffe);
813 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
814 	    sizeof(struct aac_close_command)))
815 		printf("FAILED.\n");
816 	else
817 		printf("done\n");
818 
819 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
820 	aac_release_sync_fib(sc);
821 	mtx_unlock(&sc->aac_io_lock);
822 
823 	return(0);
824 }
825 
826 /*
827  * Bring the controller to a quiescent state, ready for system suspend.
828  */
829 int
830 aacraid_suspend(device_t dev)
831 {
832 	struct aac_softc *sc;
833 
834 	sc = device_get_softc(dev);
835 
836 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
837 	sc->aac_state |= AAC_STATE_SUSPEND;
838 
839 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
840 	return(0);
841 }
842 
843 /*
844  * Bring the controller back to a state ready for operation.
845  */
846 int
847 aacraid_resume(device_t dev)
848 {
849 	struct aac_softc *sc;
850 
851 	sc = device_get_softc(dev);
852 
853 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
854 	sc->aac_state &= ~AAC_STATE_SUSPEND;
855 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
856 	return(0);
857 }
858 
859 /*
860  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
861  */
862 void
863 aacraid_new_intr_type1(void *arg)
864 {
865 	struct aac_msix_ctx *ctx;
866 	struct aac_softc *sc;
867 	int vector_no;
868 	struct aac_command *cm;
869 	struct aac_fib *fib;
870 	u_int32_t bellbits, bellbits_shifted, index, handle;
871 	int isFastResponse, isAif, noMoreAif, mode;
872 
873 	ctx = (struct aac_msix_ctx *)arg;
874 	sc = ctx->sc;
875 	vector_no = ctx->vector_no;
876 
877 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
878 	mtx_lock(&sc->aac_io_lock);
879 
880 	if (sc->msi_enabled) {
881 		mode = AAC_INT_MODE_MSI;
882 		if (vector_no == 0) {
883 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
884 			if (bellbits & 0x40000)
885 				mode |= AAC_INT_MODE_AIF;
886 			else if (bellbits & 0x1000)
887 				mode |= AAC_INT_MODE_SYNC;
888 		}
889 	} else {
890 		mode = AAC_INT_MODE_INTX;
891 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
892 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
893 			bellbits = AAC_DB_RESPONSE_SENT_NS;
894 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
895 		} else {
896 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
897 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
898 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
899 				mode |= AAC_INT_MODE_AIF;
900 			if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
901 				mode |= AAC_INT_MODE_SYNC;
902 		}
903 		/* ODR readback, Prep #238630 */
904 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
905 	}
906 
907 	if (mode & AAC_INT_MODE_SYNC) {
908 		if (sc->aac_sync_cm) {
909 			cm = sc->aac_sync_cm;
910 			aac_unmap_command(cm);
911 			cm->cm_flags |= AAC_CMD_COMPLETED;
912 			aac_fib_header_toh(&cm->cm_fib->Header);
913 
914 			/* is there a completion handler? */
915 			if (cm->cm_complete != NULL) {
916 				cm->cm_complete(cm);
917 			} else {
918 				/* assume that someone is sleeping on this command */
919 				wakeup(cm);
920 			}
921 			sc->flags &= ~AAC_QUEUE_FRZN;
922 			sc->aac_sync_cm = NULL;
923 		}
924 		if (mode & AAC_INT_MODE_INTX)
925 			mode &= ~AAC_INT_MODE_SYNC;
926 		else
927 			mode = 0;
928 	}
929 
930 	if (mode & AAC_INT_MODE_AIF) {
931 		if (mode & AAC_INT_MODE_INTX) {
932 			aac_request_aif(sc);
933 			mode = 0;
934 		}
935 	}
936 
937 	if (sc->flags & AAC_FLAGS_SYNC_MODE)
938 		mode = 0;
939 
940 	if (mode) {
941 		/* handle async. status */
942 		index = sc->aac_host_rrq_idx[vector_no];
943 		for (;;) {
944 			isFastResponse = isAif = noMoreAif = 0;
945 			/* remove toggle bit (31) */
946 			handle = (le32toh(sc->aac_common->ac_host_rrq[index]) &
947 			    0x7fffffff);
948 			/* check fast response bit (30) */
949 			if (handle & 0x40000000)
950 				isFastResponse = 1;
951 			/* check AIF bit (23) */
952 			else if (handle & 0x00800000)
953 				isAif = TRUE;
954 			handle &= 0x0000ffff;
955 			if (handle == 0)
956 				break;
957 
958 			cm = sc->aac_commands + (handle - 1);
959 			fib = cm->cm_fib;
960 			aac_fib_header_toh(&fib->Header);
961 			sc->aac_rrq_outstanding[vector_no]--;
962 			if (isAif) {
963 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
964 				if (!noMoreAif)
965 					aac_handle_aif(sc, fib);
966 				aac_remove_busy(cm);
967 				aacraid_release_command(cm);
968 			} else {
969 				if (isFastResponse) {
970 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
971 					*((u_int32_t *)(fib->data)) = htole32(ST_OK);
972 					cm->cm_flags |= AAC_CMD_FASTRESP;
973 				}
974 				aac_remove_busy(cm);
975 				aac_unmap_command(cm);
976 				cm->cm_flags |= AAC_CMD_COMPLETED;
977 
978 				/* is there a completion handler? */
979 				if (cm->cm_complete != NULL) {
980 					cm->cm_complete(cm);
981 				} else {
982 					/* assume that someone is sleeping on this command */
983 					wakeup(cm);
984 				}
985 				sc->flags &= ~AAC_QUEUE_FRZN;
986 			}
987 
988 			sc->aac_common->ac_host_rrq[index++] = 0;
989 			if (index == (vector_no + 1) * sc->aac_vector_cap)
990 				index = vector_no * sc->aac_vector_cap;
991 			sc->aac_host_rrq_idx[vector_no] = index;
992 
993 			if ((isAif && !noMoreAif) || sc->aif_pending)
994 				aac_request_aif(sc);
995 		}
996 	}
997 
998 	if (mode & AAC_INT_MODE_AIF) {
999 		aac_request_aif(sc);
1000 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1001 		mode = 0;
1002 	}
1003 
1004 	/* see if we can start some more I/O */
1005 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1006 		aacraid_startio(sc);
1007 	mtx_unlock(&sc->aac_io_lock);
1008 }
1009 
1010 /*
1011  * Handle notification of one or more FIBs coming from the controller.
1012  */
1013 static void
1014 aac_command_thread(struct aac_softc *sc)
1015 {
1016 	int retval;
1017 
1018 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1019 
1020 	mtx_lock(&sc->aac_io_lock);
1021 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1022 
1023 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1024 		retval = 0;
1025 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1026 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1027 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1028 
1029 		/*
1030 		 * First see if any FIBs need to be allocated.
1031 		 */
1032 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1033 			aac_alloc_commands(sc);
1034 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1035 			aacraid_startio(sc);
1036 		}
1037 
1038 		/*
1039 		 * While we're here, check to see if any commands are stuck.
1040 		 * This is pretty low-priority, so it's ok if it doesn't
1041 		 * always fire.
1042 		 */
1043 		if (retval == EWOULDBLOCK)
1044 			aac_timeout(sc);
1045 
1046 		/* Check the hardware printf message buffer */
1047 		if (sc->aac_common->ac_printf[0] != 0)
1048 			aac_print_printf(sc);
1049 	}
1050 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1051 	mtx_unlock(&sc->aac_io_lock);
1052 	wakeup(sc->aac_dev);
1053 
1054 	aac_kthread_exit(0);
1055 }
1056 
1057 /*
1058  * Submit a command to the controller, return when it completes.
1059  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1060  *     be stuck here forever.  At the same time, signals are not caught
1061  *     because there is a risk that a signal could wakeup the sleep before
1062  *     the card has a chance to complete the command.  Since there is no way
1063  *     to cancel a command that is in progress, we can't protect against the
1064  *     card completing a command late and spamming the command and data
1065  *     memory.  So, we are held hostage until the command completes.
1066  */
1067 int
1068 aacraid_wait_command(struct aac_command *cm)
1069 {
1070 	struct aac_softc *sc;
1071 	int error;
1072 
1073 	sc = cm->cm_sc;
1074 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1075 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1076 
1077 	/* Put the command on the ready queue and get things going */
1078 	aac_enqueue_ready(cm);
1079 	aacraid_startio(sc);
1080 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1081 	return(error);
1082 }
1083 
1084 /*
1085  *Command Buffer Management
1086  */
1087 
1088 /*
1089  * Allocate a command.
1090  */
1091 int
1092 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1093 {
1094 	struct aac_command *cm;
1095 
1096 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1097 
1098 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1099 		if (sc->total_fibs < sc->aac_max_fibs) {
1100 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1101 			wakeup(sc->aifthread);
1102 		}
1103 		return (EBUSY);
1104 	}
1105 
1106 	*cmp = cm;
1107 	return(0);
1108 }
1109 
1110 /*
1111  * Release a command back to the freelist.
1112  */
1113 void
1114 aacraid_release_command(struct aac_command *cm)
1115 {
1116 	struct aac_event *event;
1117 	struct aac_softc *sc;
1118 
1119 	sc = cm->cm_sc;
1120 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1121 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1122 
1123 	/* (re)initialize the command/FIB */
1124 	cm->cm_sgtable = NULL;
1125 	cm->cm_flags = 0;
1126 	cm->cm_complete = NULL;
1127 	cm->cm_ccb = NULL;
1128 	cm->cm_passthr_dmat = 0;
1129 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1130 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1131 	cm->cm_fib->Header.Unused = 0;
1132 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1133 
1134 	/*
1135 	 * These are duplicated in aac_start to cover the case where an
1136 	 * intermediate stage may have destroyed them.  They're left
1137 	 * initialized here for debugging purposes only.
1138 	 */
1139 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1140 	cm->cm_fib->Header.Handle = 0;
1141 
1142 	aac_enqueue_free(cm);
1143 
1144 	/*
1145 	 * Dequeue all events so that there's no risk of events getting
1146 	 * stranded.
1147 	 */
1148 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1149 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1150 		event->ev_callback(sc, event, event->ev_arg);
1151 	}
1152 }
1153 
1154 /*
1155  * Map helper for command/FIB allocation.
1156  */
1157 static void
1158 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1159 {
1160 	uint64_t	*fibphys;
1161 
1162 	fibphys = (uint64_t *)arg;
1163 
1164 	*fibphys = segs[0].ds_addr;
1165 }
1166 
1167 /*
1168  * Allocate and initialize commands/FIBs for this adapter.
1169  */
1170 static int
1171 aac_alloc_commands(struct aac_softc *sc)
1172 {
1173 	struct aac_command *cm;
1174 	struct aac_fibmap *fm;
1175 	uint64_t fibphys;
1176 	int i, error;
1177 	u_int32_t maxsize;
1178 
1179 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1180 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1181 
1182 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1183 		return (ENOMEM);
1184 
1185 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1186 	if (fm == NULL)
1187 		return (ENOMEM);
1188 
1189 	mtx_unlock(&sc->aac_io_lock);
1190 	/* allocate the FIBs in DMAable memory and load them */
1191 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1192 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1193 		device_printf(sc->aac_dev,
1194 			      "Not enough contiguous memory available.\n");
1195 		free(fm, M_AACRAIDBUF);
1196 		mtx_lock(&sc->aac_io_lock);
1197 		return (ENOMEM);
1198 	}
1199 
1200 	maxsize = sc->aac_max_fib_size + 31;
1201 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1202 		maxsize += sizeof(struct aac_fib_xporthdr);
1203 	/* Ignore errors since this doesn't bounce */
1204 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1205 			      sc->aac_max_fibs_alloc * maxsize,
1206 			      aac_map_command_helper, &fibphys, 0);
1207 	mtx_lock(&sc->aac_io_lock);
1208 
1209 	/* initialize constant fields in the command structure */
1210 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1211 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1212 		cm = sc->aac_commands + sc->total_fibs;
1213 		fm->aac_commands = cm;
1214 		cm->cm_sc = sc;
1215 		cm->cm_fib = (struct aac_fib *)
1216 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1217 		cm->cm_fibphys = fibphys + i * maxsize;
1218 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1219 			u_int64_t fibphys_aligned;
1220 			fibphys_aligned =
1221 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1222 			cm->cm_fib = (struct aac_fib *)
1223 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1224 			cm->cm_fibphys = fibphys_aligned;
1225 		} else {
1226 			u_int64_t fibphys_aligned;
1227 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1228 			cm->cm_fib = (struct aac_fib *)
1229 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1230 			cm->cm_fibphys = fibphys_aligned;
1231 		}
1232 		cm->cm_index = sc->total_fibs;
1233 
1234 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1235 					       &cm->cm_datamap)) != 0)
1236 			break;
1237 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1238 			aacraid_release_command(cm);
1239 		sc->total_fibs++;
1240 	}
1241 
1242 	if (i > 0) {
1243 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1244 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1245 		return (0);
1246 	}
1247 
1248 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1249 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1250 	free(fm, M_AACRAIDBUF);
1251 	return (ENOMEM);
1252 }
1253 
1254 /*
1255  * Free FIBs owned by this adapter.
1256  */
1257 static void
1258 aac_free_commands(struct aac_softc *sc)
1259 {
1260 	struct aac_fibmap *fm;
1261 	struct aac_command *cm;
1262 	int i;
1263 
1264 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1265 
1266 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1267 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1268 		/*
1269 		 * We check against total_fibs to handle partially
1270 		 * allocated blocks.
1271 		 */
1272 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1273 			cm = fm->aac_commands + i;
1274 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1275 		}
1276 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1277 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1278 		free(fm, M_AACRAIDBUF);
1279 	}
1280 }
1281 
1282 /*
1283  * Command-mapping helper function - populate this command's s/g table.
1284  */
1285 void
1286 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1287 {
1288 	struct aac_softc *sc;
1289 	struct aac_command *cm;
1290 	struct aac_fib *fib;
1291 	int i;
1292 
1293 	cm = (struct aac_command *)arg;
1294 	sc = cm->cm_sc;
1295 	fib = cm->cm_fib;
1296 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1297 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1298 
1299 	if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1300 		return;
1301 
1302 	/* copy into the FIB */
1303 	if (cm->cm_sgtable != NULL) {
1304 		if (fib->Header.Command == RawIo2) {
1305 			struct aac_raw_io2 *raw;
1306 			struct aac_sge_ieee1212 *sg;
1307 			u_int32_t min_size = PAGE_SIZE, cur_size;
1308 			int conformable = TRUE;
1309 
1310 			raw = (struct aac_raw_io2 *)&fib->data[0];
1311 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1312 			raw->sgeCnt = nseg;
1313 
1314 			for (i = 0; i < nseg; i++) {
1315 				cur_size = segs[i].ds_len;
1316 				sg[i].addrHigh = 0;
1317 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1318 				sg[i].length = cur_size;
1319 				sg[i].flags = 0;
1320 				if (i == 0) {
1321 					raw->sgeFirstSize = cur_size;
1322 				} else if (i == 1) {
1323 					raw->sgeNominalSize = cur_size;
1324 					min_size = cur_size;
1325 				} else if ((i+1) < nseg &&
1326 					cur_size != raw->sgeNominalSize) {
1327 					conformable = FALSE;
1328 					if (cur_size < min_size)
1329 						min_size = cur_size;
1330 				}
1331 			}
1332 
1333 			/* not conformable: evaluate required sg elements */
1334 			if (!conformable) {
1335 				int j, err_found, nseg_new = nseg;
1336 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1337 					err_found = FALSE;
1338 					nseg_new = 2;
1339 					for (j = 1; j < nseg - 1; ++j) {
1340 						if (sg[j].length % (i*PAGE_SIZE)) {
1341 							err_found = TRUE;
1342 							break;
1343 						}
1344 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1345 					}
1346 					if (!err_found)
1347 						break;
1348 				}
1349 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1350 					!(sc->hint_flags & 4))
1351 					nseg = aac_convert_sgraw2(sc,
1352 						raw, i, nseg, nseg_new);
1353 			} else {
1354 				raw->flags |= RIO2_SGL_CONFORMANT;
1355 			}
1356 
1357 			for (i = 0; i < nseg; i++)
1358 				aac_sge_ieee1212_tole(sg + i);
1359 			aac_raw_io2_tole(raw);
1360 
1361 			/* update the FIB size for the s/g count */
1362 			fib->Header.Size += nseg *
1363 				sizeof(struct aac_sge_ieee1212);
1364 
1365 		} else if (fib->Header.Command == RawIo) {
1366 			struct aac_sg_tableraw *sg;
1367 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1368 			sg->SgCount = htole32(nseg);
1369 			for (i = 0; i < nseg; i++) {
1370 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1371 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1372 				sg->SgEntryRaw[i].Next = 0;
1373 				sg->SgEntryRaw[i].Prev = 0;
1374 				sg->SgEntryRaw[i].Flags = 0;
1375 				aac_sg_entryraw_tole(&sg->SgEntryRaw[i]);
1376 			}
1377 			aac_raw_io_tole((struct aac_raw_io *)&fib->data[0]);
1378 			/* update the FIB size for the s/g count */
1379 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1380 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1381 			struct aac_sg_table *sg;
1382 			sg = cm->cm_sgtable;
1383 			sg->SgCount = htole32(nseg);
1384 			for (i = 0; i < nseg; i++) {
1385 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1386 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1387 				aac_sg_entry_tole(&sg->SgEntry[i]);
1388 			}
1389 			/* update the FIB size for the s/g count */
1390 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1391 		} else {
1392 			struct aac_sg_table64 *sg;
1393 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1394 			sg->SgCount = htole32(nseg);
1395 			for (i = 0; i < nseg; i++) {
1396 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1397 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1398 				aac_sg_entry64_tole(&sg->SgEntry64[i]);
1399 			}
1400 			/* update the FIB size for the s/g count */
1401 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1402 		}
1403 	}
1404 
1405 	/* Fix up the address values in the FIB.  Use the command array index
1406 	 * instead of a pointer since these fields are only 32 bits.  Shift
1407 	 * the SenderFibAddress over to make room for the fast response bit
1408 	 * and for the AIF bit
1409 	 */
1410 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1411 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1412 
1413 	/* save a pointer to the command for speedy reverse-lookup */
1414 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1415 
1416 	if (cm->cm_passthr_dmat == 0) {
1417 		if (cm->cm_flags & AAC_CMD_DATAIN)
1418 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1419 							BUS_DMASYNC_PREREAD);
1420 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1421 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1422 							BUS_DMASYNC_PREWRITE);
1423 	}
1424 
1425 	cm->cm_flags |= AAC_CMD_MAPPED;
1426 
1427 	if (cm->cm_flags & AAC_CMD_WAIT) {
1428 		aac_fib_header_tole(&fib->Header);
1429 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1430 			cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1431 	} else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1432 		u_int32_t wait = 0;
1433 		sc->aac_sync_cm = cm;
1434 		aac_fib_header_tole(&fib->Header);
1435 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1436 			cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1437 	} else {
1438 		int count = 10000000L;
1439 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1440 			if (--count == 0) {
1441 				aac_unmap_command(cm);
1442 				sc->flags |= AAC_QUEUE_FRZN;
1443 				aac_requeue_ready(cm);
1444 			}
1445 			DELAY(5);			/* wait 5 usec. */
1446 		}
1447 	}
1448 }
1449 
1450 static int
1451 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1452 				   int pages, int nseg, int nseg_new)
1453 {
1454 	struct aac_sge_ieee1212 *sge;
1455 	int i, j, pos;
1456 	u_int32_t addr_low;
1457 
1458 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1459 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1460 	if (sge == NULL)
1461 		return nseg;
1462 
1463 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1464 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1465 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1466 			sge[pos].addrLow = addr_low;
1467 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1468 			if (addr_low < raw->sge[i].addrLow)
1469 				sge[pos].addrHigh++;
1470 			sge[pos].length = pages * PAGE_SIZE;
1471 			sge[pos].flags = 0;
1472 			pos++;
1473 		}
1474 	}
1475 	sge[pos] = raw->sge[nseg-1];
1476 	for (i = 1; i < nseg_new; ++i)
1477 		raw->sge[i] = sge[i];
1478 
1479 	free(sge, M_AACRAIDBUF);
1480 	raw->sgeCnt = nseg_new;
1481 	raw->flags |= RIO2_SGL_CONFORMANT;
1482 	raw->sgeNominalSize = pages * PAGE_SIZE;
1483 	return nseg_new;
1484 }
1485 
1486 /*
1487  * Unmap a command from controller-visible space.
1488  */
1489 static void
1490 aac_unmap_command(struct aac_command *cm)
1491 {
1492 	struct aac_softc *sc;
1493 
1494 	sc = cm->cm_sc;
1495 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1496 
1497 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1498 		return;
1499 
1500 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1501 		if (cm->cm_flags & AAC_CMD_DATAIN)
1502 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1503 					BUS_DMASYNC_POSTREAD);
1504 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1505 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1506 					BUS_DMASYNC_POSTWRITE);
1507 
1508 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1509 	}
1510 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1511 }
1512 
1513 /*
1514  * Hardware Interface
1515  */
1516 
1517 /*
1518  * Initialize the adapter.
1519  */
1520 static void
1521 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1522 {
1523 	struct aac_softc *sc;
1524 
1525 	sc = (struct aac_softc *)arg;
1526 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1527 
1528 	sc->aac_common_busaddr = segs[0].ds_addr;
1529 }
1530 
1531 static int
1532 aac_check_firmware(struct aac_softc *sc)
1533 {
1534 	u_int32_t code, major, minor, maxsize;
1535 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1536 	time_t then;
1537 
1538 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1539 
1540 	/* check if flash update is running */
1541 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1542 		then = time_uptime;
1543 		do {
1544 			code = AAC_GET_FWSTATUS(sc);
1545 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1546 				device_printf(sc->aac_dev,
1547 						  "FATAL: controller not coming ready, "
1548 						   "status %x\n", code);
1549 				return(ENXIO);
1550 			}
1551 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1552 		/*
1553 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1554 		 * do not read scratch pad register at this time
1555 		 */
1556 		waitCount = 10 * 10000;
1557 		while (waitCount) {
1558 			DELAY(100);		/* delay 100 microseconds */
1559 			waitCount--;
1560 		}
1561 	}
1562 
1563 	/*
1564 	 * Wait for the adapter to come ready.
1565 	 */
1566 	then = time_uptime;
1567 	do {
1568 		code = AAC_GET_FWSTATUS(sc);
1569 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1570 			device_printf(sc->aac_dev,
1571 				      "FATAL: controller not coming ready, "
1572 					   "status %x\n", code);
1573 			return(ENXIO);
1574 		}
1575 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1576 
1577 	/*
1578 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1579 	 * firmware version 1.x are not compatible with this driver.
1580 	 */
1581 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1582 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1583 				     NULL, NULL)) {
1584 			device_printf(sc->aac_dev,
1585 				      "Error reading firmware version\n");
1586 			return (EIO);
1587 		}
1588 
1589 		/* These numbers are stored as ASCII! */
1590 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1591 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1592 		if (major == 1) {
1593 			device_printf(sc->aac_dev,
1594 			    "Firmware version %d.%d is not supported.\n",
1595 			    major, minor);
1596 			return (EINVAL);
1597 		}
1598 	}
1599 	/*
1600 	 * Retrieve the capabilities/supported options word so we know what
1601 	 * work-arounds to enable.  Some firmware revs don't support this
1602 	 * command.
1603 	 */
1604 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1605 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1606 			device_printf(sc->aac_dev,
1607 			     "RequestAdapterInfo failed\n");
1608 			return (EIO);
1609 		}
1610 	} else {
1611 		options = AAC_GET_MAILBOX(sc, 1);
1612 		atu_size = AAC_GET_MAILBOX(sc, 2);
1613 		sc->supported_options = options;
1614 		sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1615 
1616 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1617 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1618 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1619 		if (options & AAC_SUPPORTED_NONDASD)
1620 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1621 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1622 			&& (sizeof(bus_addr_t) > 4)
1623 			&& (sc->hint_flags & 0x1)) {
1624 			device_printf(sc->aac_dev,
1625 			    "Enabling 64-bit address support\n");
1626 			sc->flags |= AAC_FLAGS_SG_64BIT;
1627 		}
1628 		if (sc->aac_if.aif_send_command) {
1629 			if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1630 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1631 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1632 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1633 			else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1634 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1635 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1636 		}
1637 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1638 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1639 	}
1640 
1641 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1642 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1643 		return (ENXIO);
1644 	}
1645 
1646 	if (sc->hint_flags & 2) {
1647 		device_printf(sc->aac_dev,
1648 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1649 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1650 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1651 		device_printf(sc->aac_dev,
1652 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1653 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1654 	}
1655 
1656 	/* Check for broken hardware that does a lower number of commands */
1657 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1658 
1659 	/* Remap mem. resource, if required */
1660 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1661 		bus_release_resource(
1662 			sc->aac_dev, SYS_RES_MEMORY,
1663 			sc->aac_regs_rid0, sc->aac_regs_res0);
1664 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1665 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1666 			atu_size, RF_ACTIVE);
1667 		if (sc->aac_regs_res0 == NULL) {
1668 			sc->aac_regs_res0 = bus_alloc_resource_any(
1669 				sc->aac_dev, SYS_RES_MEMORY,
1670 				&sc->aac_regs_rid0, RF_ACTIVE);
1671 			if (sc->aac_regs_res0 == NULL) {
1672 				device_printf(sc->aac_dev,
1673 					"couldn't allocate register window\n");
1674 				return (ENXIO);
1675 			}
1676 		}
1677 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1678 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1679 	}
1680 
1681 	/* Read preferred settings */
1682 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1683 	sc->aac_max_sectors = 128;				/* 64KB */
1684 	sc->aac_max_aif = 1;
1685 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1686 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1687 		 - sizeof(struct aac_blockwrite64))
1688 		 / sizeof(struct aac_sg_entry64);
1689 	else
1690 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1691 		 - sizeof(struct aac_blockwrite))
1692 		 / sizeof(struct aac_sg_entry);
1693 
1694 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1695 		options = AAC_GET_MAILBOX(sc, 1);
1696 		sc->aac_max_fib_size = (options & 0xFFFF);
1697 		sc->aac_max_sectors = (options >> 16) << 1;
1698 		options = AAC_GET_MAILBOX(sc, 2);
1699 		sc->aac_sg_tablesize = (options >> 16);
1700 		options = AAC_GET_MAILBOX(sc, 3);
1701 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1702 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1703 			sc->aac_max_fibs = (options & 0xFFFF);
1704 		options = AAC_GET_MAILBOX(sc, 4);
1705 		sc->aac_max_aif = (options & 0xFFFF);
1706 		options = AAC_GET_MAILBOX(sc, 5);
1707 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1708 	}
1709 
1710 	maxsize = sc->aac_max_fib_size + 31;
1711 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1712 		maxsize += sizeof(struct aac_fib_xporthdr);
1713 	if (maxsize > PAGE_SIZE) {
1714     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1715 		maxsize = PAGE_SIZE;
1716 	}
1717 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1718 
1719 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1720 		sc->flags |= AAC_FLAGS_RAW_IO;
1721 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1722 	}
1723 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1724 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1725 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1726 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1727 	}
1728 
1729 #ifdef AACRAID_DEBUG
1730 	aacraid_get_fw_debug_buffer(sc);
1731 #endif
1732 	return (0);
1733 }
1734 
1735 static int
1736 aac_init(struct aac_softc *sc)
1737 {
1738 	struct aac_adapter_init	*ip;
1739 	int i, error;
1740 
1741 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1742 
1743 	/* reset rrq index */
1744 	sc->aac_fibs_pushed_no = 0;
1745 	for (i = 0; i < sc->aac_max_msix; i++)
1746 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1747 
1748 	/*
1749 	 * Fill in the init structure.  This tells the adapter about the
1750 	 * physical location of various important shared data structures.
1751 	 */
1752 	ip = &sc->aac_common->ac_init;
1753 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1754 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1755 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1756 		sc->flags |= AAC_FLAGS_RAW_IO;
1757 	}
1758 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1759 
1760 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1761 					 offsetof(struct aac_common, ac_fibs);
1762 	ip->AdapterFibsVirtualAddress = 0;
1763 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1764 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1765 
1766 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1767 				  offsetof(struct aac_common, ac_printf);
1768 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1769 
1770 	/*
1771 	 * The adapter assumes that pages are 4K in size, except on some
1772  	 * broken firmware versions that do the page->byte conversion twice,
1773 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1774 	 * Round up since the granularity is so high.
1775 	 */
1776 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1777 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1778 		ip->HostPhysMemPages =
1779 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1780 	}
1781 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1782 
1783 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1784 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1785 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1786 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1787 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1788 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1789 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1790 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1791 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1792 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1793 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1794 	}
1795 	ip->MaxNumAif = sc->aac_max_aif;
1796 	ip->HostRRQ_AddrLow =
1797 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1798 	/* always 32-bit address */
1799 	ip->HostRRQ_AddrHigh = 0;
1800 
1801 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1802 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1803 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1804 		device_printf(sc->aac_dev, "Power Management enabled\n");
1805 	}
1806 
1807 	ip->MaxIoCommands = sc->aac_max_fibs;
1808 	ip->MaxIoSize = AAC_MAXIO_SIZE(sc);
1809 	ip->MaxFibSize = sc->aac_max_fib_size;
1810 
1811 	aac_adapter_init_tole(ip);
1812 
1813 	/*
1814 	 * Do controller-type-specific initialisation
1815 	 */
1816 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1817 
1818 	/*
1819 	 * Give the init structure to the controller.
1820 	 */
1821 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1822 			     sc->aac_common_busaddr +
1823 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1824 			     NULL, NULL)) {
1825 		device_printf(sc->aac_dev,
1826 			      "error establishing init structure\n");
1827 		error = EIO;
1828 		goto out;
1829 	}
1830 
1831 	/*
1832 	 * Check configuration issues
1833 	 */
1834 	if ((error = aac_check_config(sc)) != 0)
1835 		goto out;
1836 
1837 	error = 0;
1838 out:
1839 	return(error);
1840 }
1841 
1842 static void
1843 aac_define_int_mode(struct aac_softc *sc)
1844 {
1845 	device_t dev;
1846 	int cap, msi_count, error = 0;
1847 	uint32_t val;
1848 
1849 	dev = sc->aac_dev;
1850 
1851 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1852 		device_printf(dev, "using line interrupts\n");
1853 		sc->aac_max_msix = 1;
1854 		sc->aac_vector_cap = sc->aac_max_fibs;
1855 		return;
1856 	}
1857 
1858 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1859 	if (sc->aac_max_msix == 0) {
1860 		if (sc->aac_hwif == AAC_HWIF_SRC) {
1861 			msi_count = 1;
1862 			if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1863 				device_printf(dev, "alloc msi failed - err=%d; "
1864 				    "will use INTx\n", error);
1865 				pci_release_msi(dev);
1866 			} else {
1867 				sc->msi_tupelo = TRUE;
1868 			}
1869 		}
1870 		if (sc->msi_tupelo)
1871 			device_printf(dev, "using MSI interrupts\n");
1872 		else
1873 			device_printf(dev, "using line interrupts\n");
1874 
1875 		sc->aac_max_msix = 1;
1876 		sc->aac_vector_cap = sc->aac_max_fibs;
1877 		return;
1878 	}
1879 
1880 	/* OS capability */
1881 	msi_count = pci_msix_count(dev);
1882 	if (msi_count > AAC_MAX_MSIX)
1883 		msi_count = AAC_MAX_MSIX;
1884 	if (msi_count > sc->aac_max_msix)
1885 		msi_count = sc->aac_max_msix;
1886 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1887 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1888 				   "will try MSI\n", msi_count, error);
1889 		pci_release_msi(dev);
1890 	} else {
1891 		sc->msi_enabled = TRUE;
1892 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1893 			msi_count);
1894 	}
1895 
1896 	if (!sc->msi_enabled) {
1897 		msi_count = 1;
1898 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1899 			device_printf(dev, "alloc msi failed - err=%d; "
1900 				           "will use INTx\n", error);
1901 			pci_release_msi(dev);
1902 		} else {
1903 			sc->msi_enabled = TRUE;
1904 			device_printf(dev, "using MSI interrupts\n");
1905 		}
1906 	}
1907 
1908 	if (sc->msi_enabled) {
1909 		/* now read controller capability from PCI config. space */
1910 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1911 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1912 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1913 			pci_release_msi(dev);
1914 			sc->msi_enabled = FALSE;
1915 		}
1916 	}
1917 
1918 	if (!sc->msi_enabled) {
1919 		device_printf(dev, "using legacy interrupts\n");
1920 		sc->aac_max_msix = 1;
1921 	} else {
1922 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1923 		if (sc->aac_max_msix > msi_count)
1924 			sc->aac_max_msix = msi_count;
1925 	}
1926 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1927 
1928 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1929 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1930 }
1931 
1932 static int
1933 aac_find_pci_capability(struct aac_softc *sc, int cap)
1934 {
1935 	device_t dev;
1936 	uint32_t status;
1937 	uint8_t ptr;
1938 
1939 	dev = sc->aac_dev;
1940 
1941 	status = pci_read_config(dev, PCIR_STATUS, 2);
1942 	if (!(status & PCIM_STATUS_CAPPRESENT))
1943 		return (0);
1944 
1945 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1946 	switch (status & PCIM_HDRTYPE) {
1947 	case 0:
1948 	case 1:
1949 		ptr = PCIR_CAP_PTR;
1950 		break;
1951 	case 2:
1952 		ptr = PCIR_CAP_PTR_2;
1953 		break;
1954 	default:
1955 		return (0);
1956 		break;
1957 	}
1958 	ptr = pci_read_config(dev, ptr, 1);
1959 
1960 	while (ptr != 0) {
1961 		int next, val;
1962 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1963 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1964 		if (val == cap)
1965 			return (ptr);
1966 		ptr = next;
1967 	}
1968 
1969 	return (0);
1970 }
1971 
1972 static int
1973 aac_setup_intr(struct aac_softc *sc)
1974 {
1975 	int i, msi_count, rid;
1976 	struct resource *res;
1977 	void *tag;
1978 
1979 	msi_count = sc->aac_max_msix;
1980 	rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1981 
1982 	for (i = 0; i < msi_count; i++, rid++) {
1983 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1984 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1985 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1986 			return (EINVAL);
1987 		}
1988 		sc->aac_irq_rid[i] = rid;
1989 		sc->aac_irq[i] = res;
1990 		if (aac_bus_setup_intr(sc->aac_dev, res,
1991 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1992 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1993 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1994 			return (EINVAL);
1995 		}
1996 		sc->aac_msix[i].vector_no = i;
1997 		sc->aac_msix[i].sc = sc;
1998 		sc->aac_intr[i] = tag;
1999 	}
2000 
2001 	return (0);
2002 }
2003 
2004 static int
2005 aac_check_config(struct aac_softc *sc)
2006 {
2007 	struct aac_fib *fib;
2008 	struct aac_cnt_config *ccfg;
2009 	struct aac_cf_status_hdr *cf_shdr;
2010 	int rval;
2011 
2012 	mtx_lock(&sc->aac_io_lock);
2013 	aac_alloc_sync_fib(sc, &fib);
2014 
2015 	ccfg = (struct aac_cnt_config *)&fib->data[0];
2016 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2017 	ccfg->Command = VM_ContainerConfig;
2018 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2019 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2020 
2021 	aac_cnt_config_tole(ccfg);
2022 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2023 		sizeof (struct aac_cnt_config));
2024 	aac_cnt_config_toh(ccfg);
2025 
2026 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2027 	if (rval == 0 && ccfg->Command == ST_OK &&
2028 		ccfg->CTCommand.param[0] == CT_OK) {
2029 		if (le32toh(cf_shdr->action) <= CFACT_PAUSE) {
2030 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2031 			ccfg->Command = VM_ContainerConfig;
2032 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2033 
2034 			aac_cnt_config_tole(ccfg);
2035 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2036 				sizeof (struct aac_cnt_config));
2037 			aac_cnt_config_toh(ccfg);
2038 
2039 			if (rval == 0 && ccfg->Command == ST_OK &&
2040 				ccfg->CTCommand.param[0] == CT_OK) {
2041 				/* successful completion */
2042 				rval = 0;
2043 			} else {
2044 				/* auto commit aborted due to error(s) */
2045 				rval = -2;
2046 			}
2047 		} else {
2048 			/* auto commit aborted due to adapter indicating
2049 			   config. issues too dangerous to auto commit  */
2050 			rval = -3;
2051 		}
2052 	} else {
2053 		/* error */
2054 		rval = -1;
2055 	}
2056 
2057 	aac_release_sync_fib(sc);
2058 	mtx_unlock(&sc->aac_io_lock);
2059 	return(rval);
2060 }
2061 
2062 /*
2063  * Send a synchronous command to the controller and wait for a result.
2064  * Indicate if the controller completed the command with an error status.
2065  */
2066 int
2067 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2068 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2069 		 u_int32_t *sp, u_int32_t *r1)
2070 {
2071 	time_t then;
2072 	u_int32_t status;
2073 
2074 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2075 
2076 	/* populate the mailbox */
2077 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2078 
2079 	/* ensure the sync command doorbell flag is cleared */
2080 	if (!sc->msi_enabled)
2081 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2082 
2083 	/* then set it to signal the adapter */
2084 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2085 
2086 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2087 		/* spin waiting for the command to complete */
2088 		then = time_uptime;
2089 		do {
2090 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2091 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2092 				return(EIO);
2093 			}
2094 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2095 
2096 		/* clear the completion flag */
2097 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2098 
2099 		/* get the command status */
2100 		status = AAC_GET_MAILBOX(sc, 0);
2101 		if (sp != NULL)
2102 			*sp = status;
2103 
2104 		/* return parameter */
2105 		if (r1 != NULL)
2106 			*r1 = AAC_GET_MAILBOX(sc, 1);
2107 
2108 		if (status != AAC_SRB_STS_SUCCESS)
2109 			return (-1);
2110 	}
2111 	return(0);
2112 }
2113 
2114 static int
2115 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2116 		 struct aac_fib *fib, u_int16_t datasize)
2117 {
2118 	uint32_t ReceiverFibAddress;
2119 
2120 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2121 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2122 
2123 	if (datasize > AAC_FIB_DATASIZE)
2124 		return(EINVAL);
2125 
2126 	/*
2127 	 * Set up the sync FIB
2128 	 */
2129 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2130 				AAC_FIBSTATE_INITIALISED |
2131 				AAC_FIBSTATE_EMPTY;
2132 	fib->Header.XferState |= xferstate;
2133 	fib->Header.Command = command;
2134 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2135 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2136 	fib->Header.SenderSize = sizeof(struct aac_fib);
2137 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2138 	ReceiverFibAddress = sc->aac_common_busaddr +
2139 		offsetof(struct aac_common, ac_sync_fib);
2140 	fib->Header.u.ReceiverFibAddress = ReceiverFibAddress;
2141 	aac_fib_header_tole(&fib->Header);
2142 
2143 	/*
2144 	 * Give the FIB to the controller, wait for a response.
2145 	 */
2146 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2147 		ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2148 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2149 		aac_fib_header_toh(&fib->Header);
2150 		return(EIO);
2151 	}
2152 
2153 	aac_fib_header_toh(&fib->Header);
2154 	return (0);
2155 }
2156 
2157 /*
2158  * Check for commands that have been outstanding for a suspiciously long time,
2159  * and complain about them.
2160  */
2161 static void
2162 aac_timeout(struct aac_softc *sc)
2163 {
2164 	struct aac_command *cm;
2165 	time_t deadline;
2166 	int timedout;
2167 
2168 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2169 	/*
2170 	 * Traverse the busy command list, bitch about late commands once
2171 	 * only.
2172 	 */
2173 	timedout = 0;
2174 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2175 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2176 		if (cm->cm_timestamp < deadline) {
2177 			device_printf(sc->aac_dev,
2178 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2179 				      cm, (int)(time_uptime-cm->cm_timestamp));
2180 			AAC_PRINT_FIB(sc, cm->cm_fib);
2181 			timedout++;
2182 		}
2183 	}
2184 
2185 	if (timedout)
2186 		aac_reset_adapter(sc);
2187 	aacraid_print_queues(sc);
2188 }
2189 
2190 /*
2191  * Interface Function Vectors
2192  */
2193 
2194 /*
2195  * Read the current firmware status word.
2196  */
2197 static int
2198 aac_src_get_fwstatus(struct aac_softc *sc)
2199 {
2200 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2201 
2202 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2203 }
2204 
2205 /*
2206  * Notify the controller of a change in a given queue
2207  */
2208 static void
2209 aac_src_qnotify(struct aac_softc *sc, int qbit)
2210 {
2211 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2212 
2213 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2214 }
2215 
2216 /*
2217  * Get the interrupt reason bits
2218  */
2219 static int
2220 aac_src_get_istatus(struct aac_softc *sc)
2221 {
2222 	int val;
2223 
2224 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2225 
2226 	if (sc->msi_enabled) {
2227 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2228 		if (val & AAC_MSI_SYNC_STATUS)
2229 			val = AAC_DB_SYNC_COMMAND;
2230 		else
2231 			val = 0;
2232 	} else {
2233 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2234 	}
2235 	return(val);
2236 }
2237 
2238 /*
2239  * Clear some interrupt reason bits
2240  */
2241 static void
2242 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2243 {
2244 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2245 
2246 	if (sc->msi_enabled) {
2247 		if (mask == AAC_DB_SYNC_COMMAND)
2248 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2249 	} else {
2250 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2251 	}
2252 }
2253 
2254 /*
2255  * Populate the mailbox and set the command word
2256  */
2257 static void
2258 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2259 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2260 {
2261 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2262 
2263 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2264 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2265 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2266 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2267 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2268 }
2269 
2270 static void
2271 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2272 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2273 {
2274 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2275 
2276 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2277 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2278 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2279 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2280 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2281 }
2282 
2283 /*
2284  * Fetch the immediate command status word
2285  */
2286 static int
2287 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2288 {
2289 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2290 
2291 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2292 }
2293 
2294 static int
2295 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2296 {
2297 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2298 
2299 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2300 }
2301 
2302 /*
2303  * Set/clear interrupt masks
2304  */
2305 static void
2306 aac_src_access_devreg(struct aac_softc *sc, int mode)
2307 {
2308 	u_int32_t val;
2309 
2310 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2311 
2312 	switch (mode) {
2313 	case AAC_ENABLE_INTERRUPT:
2314 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2315 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2316 				           AAC_INT_ENABLE_TYPE1_INTX));
2317 		break;
2318 
2319 	case AAC_DISABLE_INTERRUPT:
2320 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2321 		break;
2322 
2323 	case AAC_ENABLE_MSIX:
2324 		/* set bit 6 */
2325 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2326 		val |= 0x40;
2327 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2328 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2329 		/* unmask int. */
2330 		val = PMC_ALL_INTERRUPT_BITS;
2331 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2332 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2333 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2334 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2335 		break;
2336 
2337 	case AAC_DISABLE_MSIX:
2338 		/* reset bit 6 */
2339 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2340 		val &= ~0x40;
2341 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2342 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2343 		break;
2344 
2345 	case AAC_CLEAR_AIF_BIT:
2346 		/* set bit 5 */
2347 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2348 		val |= 0x20;
2349 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2350 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2351 		break;
2352 
2353 	case AAC_CLEAR_SYNC_BIT:
2354 		/* set bit 4 */
2355 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2356 		val |= 0x10;
2357 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2358 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2359 		break;
2360 
2361 	case AAC_ENABLE_INTX:
2362 		/* set bit 7 */
2363 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2364 		val |= 0x80;
2365 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2366 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2367 		/* unmask int. */
2368 		val = PMC_ALL_INTERRUPT_BITS;
2369 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2370 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2371 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2372 			val & (~(PMC_GLOBAL_INT_BIT2)));
2373 		break;
2374 
2375 	default:
2376 		break;
2377 	}
2378 }
2379 
2380 /*
2381  * New comm. interface: Send command functions
2382  */
2383 static int
2384 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2385 {
2386 	struct aac_fib_xporthdr *pFibX;
2387 	u_int32_t fibsize, high_addr;
2388 	u_int64_t address;
2389 
2390 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2391 
2392 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2393 		sc->aac_max_msix > 1) {
2394 		u_int16_t vector_no, first_choice = 0xffff;
2395 
2396 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2397 		do {
2398 			vector_no += 1;
2399 			if (vector_no == sc->aac_max_msix)
2400 				vector_no = 1;
2401 			if (sc->aac_rrq_outstanding[vector_no] <
2402 				sc->aac_vector_cap)
2403 				break;
2404 			if (0xffff == first_choice)
2405 				first_choice = vector_no;
2406 			else if (vector_no == first_choice)
2407 				break;
2408 		} while (1);
2409 		if (vector_no == first_choice)
2410 			vector_no = 0;
2411 		sc->aac_rrq_outstanding[vector_no]++;
2412 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2413 			sc->aac_fibs_pushed_no = 0;
2414 		else
2415 			sc->aac_fibs_pushed_no++;
2416 
2417 		cm->cm_fib->Header.Handle += (vector_no << 16);
2418 	}
2419 
2420 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2421 		/* Calculate the amount to the fibsize bits */
2422 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2423 		/* Fill new FIB header */
2424 		address = cm->cm_fibphys;
2425 		high_addr = (u_int32_t)(address >> 32);
2426 		if (high_addr == 0L) {
2427 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2428 			cm->cm_fib->Header.u.TimeStamp = 0L;
2429 		} else {
2430 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2431 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2432 		}
2433 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2434 	} else {
2435 		/* Calculate the amount to the fibsize bits */
2436 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2437 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2438 		/* Fill XPORT header */
2439 		pFibX = (struct aac_fib_xporthdr *)
2440 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2441 		pFibX->Handle = cm->cm_fib->Header.Handle;
2442 		pFibX->HostAddress = cm->cm_fibphys;
2443 		pFibX->Size = cm->cm_fib->Header.Size;
2444 		aac_fib_xporthdr_tole(pFibX);
2445 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2446 		high_addr = (u_int32_t)(address >> 32);
2447 	}
2448 
2449 	aac_fib_header_tole(&cm->cm_fib->Header);
2450 
2451 	if (fibsize > 31)
2452 		fibsize = 31;
2453 	aac_enqueue_busy(cm);
2454 	if (high_addr) {
2455 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2456 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2457 	} else {
2458 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2459 	}
2460 	return 0;
2461 }
2462 
2463 /*
2464  * New comm. interface: get, set outbound queue index
2465  */
2466 static int
2467 aac_src_get_outb_queue(struct aac_softc *sc)
2468 {
2469 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2470 
2471 	return(-1);
2472 }
2473 
2474 static void
2475 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2476 {
2477 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2478 }
2479 
2480 /*
2481  * Debugging and Diagnostics
2482  */
2483 
2484 /*
2485  * Print some information about the controller.
2486  */
2487 static void
2488 aac_describe_controller(struct aac_softc *sc)
2489 {
2490 	struct aac_fib *fib;
2491 	struct aac_adapter_info	*info;
2492 	char *adapter_type = "Adaptec RAID controller";
2493 
2494 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2495 
2496 	mtx_lock(&sc->aac_io_lock);
2497 	aac_alloc_sync_fib(sc, &fib);
2498 
2499 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2500 		fib->data[0] = 0;
2501 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2502 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2503 		else {
2504 			struct aac_supplement_adapter_info *supp_info;
2505 
2506 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2507 			adapter_type = (char *)supp_info->AdapterTypeText;
2508 			sc->aac_feature_bits = le32toh(supp_info->FeatureBits);
2509 			sc->aac_support_opt2 = le32toh(supp_info->SupportedOptions2);
2510 		}
2511 	}
2512 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2513 		adapter_type,
2514 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2515 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2516 
2517 	fib->data[0] = 0;
2518 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2519 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2520 		aac_release_sync_fib(sc);
2521 		mtx_unlock(&sc->aac_io_lock);
2522 		return;
2523 	}
2524 
2525 	/* save the kernel revision structure for later use */
2526 	info = (struct aac_adapter_info *)&fib->data[0];
2527 	aac_adapter_info_toh(info);
2528 	sc->aac_revision = info->KernelRevision;
2529 
2530 	if (bootverbose) {
2531 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2532 		    "(%dMB cache, %dMB execution), %s\n",
2533 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2534 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2535 		    info->BufferMem / (1024 * 1024),
2536 		    info->ExecutionMem / (1024 * 1024),
2537 		    aac_describe_code(aac_battery_platform,
2538 		    info->batteryPlatform));
2539 
2540 		device_printf(sc->aac_dev,
2541 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2542 		    info->KernelRevision.external.comp.major,
2543 		    info->KernelRevision.external.comp.minor,
2544 		    info->KernelRevision.external.comp.dash,
2545 		    info->KernelRevision.buildNumber,
2546 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2547 
2548 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2549 			      sc->supported_options,
2550 			      "\20"
2551 			      "\1SNAPSHOT"
2552 			      "\2CLUSTERS"
2553 			      "\3WCACHE"
2554 			      "\4DATA64"
2555 			      "\5HOSTTIME"
2556 			      "\6RAID50"
2557 			      "\7WINDOW4GB"
2558 			      "\10SCSIUPGD"
2559 			      "\11SOFTERR"
2560 			      "\12NORECOND"
2561 			      "\13SGMAP64"
2562 			      "\14ALARM"
2563 			      "\15NONDASD"
2564 			      "\16SCSIMGT"
2565 			      "\17RAIDSCSI"
2566 			      "\21ADPTINFO"
2567 			      "\22NEWCOMM"
2568 			      "\23ARRAY64BIT"
2569 			      "\24HEATSENSOR");
2570 	}
2571 
2572 	aac_release_sync_fib(sc);
2573 	mtx_unlock(&sc->aac_io_lock);
2574 }
2575 
2576 /*
2577  * Look up a text description of a numeric error code and return a pointer to
2578  * same.
2579  */
2580 static char *
2581 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2582 {
2583 	int i;
2584 
2585 	for (i = 0; table[i].string != NULL; i++)
2586 		if (table[i].code == code)
2587 			return(table[i].string);
2588 	return(table[i + 1].string);
2589 }
2590 
2591 /*
2592  * Management Interface
2593  */
2594 
2595 static int
2596 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2597 {
2598 	struct aac_softc *sc;
2599 
2600 	sc = dev->si_drv1;
2601 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2602 	device_busy(sc->aac_dev);
2603 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2604 	return 0;
2605 }
2606 
2607 static int
2608 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2609 {
2610 	union aac_statrequest *as;
2611 	struct aac_softc *sc;
2612 	int error = 0;
2613 
2614 	as = (union aac_statrequest *)arg;
2615 	sc = dev->si_drv1;
2616 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2617 
2618 	switch (cmd) {
2619 	case AACIO_STATS:
2620 		switch (as->as_item) {
2621 		case AACQ_FREE:
2622 		case AACQ_READY:
2623 		case AACQ_BUSY:
2624 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2625 			      sizeof(struct aac_qstat));
2626 			break;
2627 		default:
2628 			error = ENOENT;
2629 			break;
2630 		}
2631 	break;
2632 
2633 	case FSACTL_SENDFIB:
2634 	case FSACTL_SEND_LARGE_FIB:
2635 		arg = *(caddr_t*)arg;
2636 	case FSACTL_LNX_SENDFIB:
2637 	case FSACTL_LNX_SEND_LARGE_FIB:
2638 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2639 		error = aac_ioctl_sendfib(sc, arg);
2640 		break;
2641 	case FSACTL_SEND_RAW_SRB:
2642 		arg = *(caddr_t*)arg;
2643 	case FSACTL_LNX_SEND_RAW_SRB:
2644 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2645 		error = aac_ioctl_send_raw_srb(sc, arg);
2646 		break;
2647 	case FSACTL_AIF_THREAD:
2648 	case FSACTL_LNX_AIF_THREAD:
2649 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2650 		error = EINVAL;
2651 		break;
2652 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2653 		arg = *(caddr_t*)arg;
2654 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2655 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2656 		error = aac_open_aif(sc, arg);
2657 		break;
2658 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2659 		arg = *(caddr_t*)arg;
2660 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2661 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2662 		error = aac_getnext_aif(sc, arg);
2663 		break;
2664 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2665 		arg = *(caddr_t*)arg;
2666 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2667 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2668 		error = aac_close_aif(sc, arg);
2669 		break;
2670 	case FSACTL_MINIPORT_REV_CHECK:
2671 		arg = *(caddr_t*)arg;
2672 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2673 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2674 		error = aac_rev_check(sc, arg);
2675 		break;
2676 	case FSACTL_QUERY_DISK:
2677 		arg = *(caddr_t*)arg;
2678 	case FSACTL_LNX_QUERY_DISK:
2679 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2680 		error = aac_query_disk(sc, arg);
2681 		break;
2682 	case FSACTL_DELETE_DISK:
2683 	case FSACTL_LNX_DELETE_DISK:
2684 		/*
2685 		 * We don't trust the underland to tell us when to delete a
2686 		 * container, rather we rely on an AIF coming from the
2687 		 * controller
2688 		 */
2689 		error = 0;
2690 		break;
2691 	case FSACTL_GET_PCI_INFO:
2692 		arg = *(caddr_t*)arg;
2693 	case FSACTL_LNX_GET_PCI_INFO:
2694 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2695 		error = aac_get_pci_info(sc, arg);
2696 		break;
2697 	case FSACTL_GET_FEATURES:
2698 		arg = *(caddr_t*)arg;
2699 	case FSACTL_LNX_GET_FEATURES:
2700 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2701 		error = aac_supported_features(sc, arg);
2702 		break;
2703 	default:
2704 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2705 		error = EINVAL;
2706 		break;
2707 	}
2708 	return(error);
2709 }
2710 
2711 static int
2712 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2713 {
2714 	struct aac_softc *sc;
2715 	struct aac_fib_context *ctx;
2716 	int revents;
2717 
2718 	sc = dev->si_drv1;
2719 	revents = 0;
2720 
2721 	mtx_lock(&sc->aac_io_lock);
2722 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2723 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2724 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2725 				revents |= poll_events & (POLLIN | POLLRDNORM);
2726 				break;
2727 			}
2728 		}
2729 	}
2730 	mtx_unlock(&sc->aac_io_lock);
2731 
2732 	if (revents == 0) {
2733 		if (poll_events & (POLLIN | POLLRDNORM))
2734 			selrecord(td, &sc->rcv_select);
2735 	}
2736 
2737 	return (revents);
2738 }
2739 
2740 static void
2741 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2742 {
2743 
2744 	switch (event->ev_type) {
2745 	case AAC_EVENT_CMFREE:
2746 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2747 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2748 			aacraid_add_event(sc, event);
2749 			return;
2750 		}
2751 		free(event, M_AACRAIDBUF);
2752 		wakeup(arg);
2753 		break;
2754 	default:
2755 		break;
2756 	}
2757 }
2758 
2759 /*
2760  * Send a FIB supplied from userspace
2761  *
2762  * Currently, sending a FIB from userspace in BE hosts is not supported.
2763  * There are several things that need to be considered in order to
2764  * support this, such as:
2765  * - At least the FIB data part from userspace should already be in LE,
2766  *   or else the kernel would need to know all FIB types to be able to
2767  *   correctly convert it to BE.
2768  * - SG tables are converted to BE by aacraid_map_command_sg(). This
2769  *   conversion should be supressed if the FIB comes from userspace.
2770  * - aacraid_wait_command() calls functions that convert the FIB header
2771  *   to LE. But if the header is already in LE, the conversion should not
2772  *   be performed.
2773  */
2774 static int
2775 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2776 {
2777 	struct aac_command *cm;
2778 	int size, error;
2779 
2780 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2781 
2782 	cm = NULL;
2783 
2784 	/*
2785 	 * Get a command
2786 	 */
2787 	mtx_lock(&sc->aac_io_lock);
2788 	if (aacraid_alloc_command(sc, &cm)) {
2789 		struct aac_event *event;
2790 
2791 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2792 		    M_NOWAIT | M_ZERO);
2793 		if (event == NULL) {
2794 			error = EBUSY;
2795 			mtx_unlock(&sc->aac_io_lock);
2796 			goto out;
2797 		}
2798 		event->ev_type = AAC_EVENT_CMFREE;
2799 		event->ev_callback = aac_ioctl_event;
2800 		event->ev_arg = &cm;
2801 		aacraid_add_event(sc, event);
2802 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2803 	}
2804 	mtx_unlock(&sc->aac_io_lock);
2805 
2806 	/*
2807 	 * Fetch the FIB header, then re-copy to get data as well.
2808 	 */
2809 	if ((error = copyin(ufib, cm->cm_fib,
2810 			    sizeof(struct aac_fib_header))) != 0)
2811 		goto out;
2812 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2813 	if (size > sc->aac_max_fib_size) {
2814 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2815 			      size, sc->aac_max_fib_size);
2816 		size = sc->aac_max_fib_size;
2817 	}
2818 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2819 		goto out;
2820 	cm->cm_fib->Header.Size = size;
2821 	cm->cm_timestamp = time_uptime;
2822 	cm->cm_datalen = 0;
2823 
2824 	/*
2825 	 * Pass the FIB to the controller, wait for it to complete.
2826 	 */
2827 	mtx_lock(&sc->aac_io_lock);
2828 	error = aacraid_wait_command(cm);
2829 	mtx_unlock(&sc->aac_io_lock);
2830 	if (error != 0) {
2831 		device_printf(sc->aac_dev,
2832 			      "aacraid_wait_command return %d\n", error);
2833 		goto out;
2834 	}
2835 
2836 	/*
2837 	 * Copy the FIB and data back out to the caller.
2838 	 */
2839 	size = cm->cm_fib->Header.Size;
2840 	if (size > sc->aac_max_fib_size) {
2841 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2842 			      size, sc->aac_max_fib_size);
2843 		size = sc->aac_max_fib_size;
2844 	}
2845 	error = copyout(cm->cm_fib, ufib, size);
2846 
2847 out:
2848 	if (cm != NULL) {
2849 		mtx_lock(&sc->aac_io_lock);
2850 		aacraid_release_command(cm);
2851 		mtx_unlock(&sc->aac_io_lock);
2852 	}
2853 	return(error);
2854 }
2855 
2856 /*
2857  * Send a passthrough FIB supplied from userspace
2858  */
2859 static int
2860 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2861 {
2862 	struct aac_command *cm;
2863 	struct aac_fib *fib;
2864 	struct aac_srb *srbcmd;
2865 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2866 	void *user_reply;
2867 	int error, transfer_data = 0;
2868 	bus_dmamap_t orig_map = 0;
2869 	u_int32_t fibsize = 0;
2870 	u_int64_t srb_sg_address;
2871 	u_int32_t srb_sg_bytecount;
2872 
2873 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2874 
2875 	cm = NULL;
2876 
2877 	mtx_lock(&sc->aac_io_lock);
2878 	if (aacraid_alloc_command(sc, &cm)) {
2879 		struct aac_event *event;
2880 
2881 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2882 		    M_NOWAIT | M_ZERO);
2883 		if (event == NULL) {
2884 			error = EBUSY;
2885 			mtx_unlock(&sc->aac_io_lock);
2886 			goto out;
2887 		}
2888 		event->ev_type = AAC_EVENT_CMFREE;
2889 		event->ev_callback = aac_ioctl_event;
2890 		event->ev_arg = &cm;
2891 		aacraid_add_event(sc, event);
2892 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2893 	}
2894 	mtx_unlock(&sc->aac_io_lock);
2895 
2896 	cm->cm_data = NULL;
2897 	/* save original dma map */
2898 	orig_map = cm->cm_datamap;
2899 
2900 	fib = cm->cm_fib;
2901 	srbcmd = (struct aac_srb *)fib->data;
2902 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2903 	    sizeof (u_int32_t))) != 0)
2904 		goto out;
2905 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2906 		error = EINVAL;
2907 		goto out;
2908 	}
2909 	if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2910 		goto out;
2911 
2912 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2913 	srbcmd->retry_limit = 0;	/* obsolete */
2914 
2915 	/* only one sg element from userspace supported */
2916 	if (srbcmd->sg_map.SgCount > 1) {
2917 		error = EINVAL;
2918 		goto out;
2919 	}
2920 	/* check fibsize */
2921 	if (fibsize == (sizeof(struct aac_srb) +
2922 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2923 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2924 		struct aac_sg_entry sg;
2925 
2926 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2927 			goto out;
2928 
2929 		srb_sg_bytecount = sg.SgByteCount;
2930 		srb_sg_address = (u_int64_t)sg.SgAddress;
2931 	} else if (fibsize == (sizeof(struct aac_srb) +
2932 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2933 #ifdef __LP64__
2934 		struct aac_sg_entry64 *sgp =
2935 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2936 		struct aac_sg_entry64 sg;
2937 
2938 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2939 			goto out;
2940 
2941 		srb_sg_bytecount = sg.SgByteCount;
2942 		srb_sg_address = sg.SgAddress;
2943 #else
2944 		error = EINVAL;
2945 		goto out;
2946 #endif
2947 	} else {
2948 		error = EINVAL;
2949 		goto out;
2950 	}
2951 	user_reply = (char *)arg + fibsize;
2952 	srbcmd->data_len = srb_sg_bytecount;
2953 	if (srbcmd->sg_map.SgCount == 1)
2954 		transfer_data = 1;
2955 
2956 	if (transfer_data) {
2957 		/*
2958 		 * Create DMA tag for the passthr. data buffer and allocate it.
2959 		 */
2960 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2961 			1, 0,			/* algnmnt, boundary */
2962 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2963 			BUS_SPACE_MAXADDR_32BIT :
2964 			0x7fffffff,		/* lowaddr */
2965 			BUS_SPACE_MAXADDR, 	/* highaddr */
2966 			NULL, NULL, 		/* filter, filterarg */
2967 			srb_sg_bytecount, 	/* size */
2968 			sc->aac_sg_tablesize,	/* nsegments */
2969 			srb_sg_bytecount, 	/* maxsegsize */
2970 			0,			/* flags */
2971 			NULL, NULL,		/* No locking needed */
2972 			&cm->cm_passthr_dmat)) {
2973 			error = ENOMEM;
2974 			goto out;
2975 		}
2976 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2977 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2978 			error = ENOMEM;
2979 			goto out;
2980 		}
2981 		/* fill some cm variables */
2982 		cm->cm_datalen = srb_sg_bytecount;
2983 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2984 			cm->cm_flags |= AAC_CMD_DATAIN;
2985 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2986 			cm->cm_flags |= AAC_CMD_DATAOUT;
2987 
2988 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2989 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2990 				cm->cm_data, cm->cm_datalen)) != 0)
2991 				goto out;
2992 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2993 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2994 				BUS_DMASYNC_PREWRITE);
2995 		}
2996 	}
2997 
2998 	/* build the FIB */
2999 	fib->Header.Size = sizeof(struct aac_fib_header) +
3000 		sizeof(struct aac_srb);
3001 	fib->Header.XferState =
3002 		AAC_FIBSTATE_HOSTOWNED   |
3003 		AAC_FIBSTATE_INITIALISED |
3004 		AAC_FIBSTATE_EMPTY	 |
3005 		AAC_FIBSTATE_FROMHOST	 |
3006 		AAC_FIBSTATE_REXPECTED   |
3007 		AAC_FIBSTATE_NORM	 |
3008 		AAC_FIBSTATE_ASYNC;
3009 
3010 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
3011 		ScsiPortCommandU64 : ScsiPortCommand;
3012 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3013 
3014 	aac_srb_tole(srbcmd);
3015 
3016 	/* send command */
3017 	if (transfer_data) {
3018 		bus_dmamap_load(cm->cm_passthr_dmat,
3019 			cm->cm_datamap, cm->cm_data,
3020 			cm->cm_datalen,
3021 			aacraid_map_command_sg, cm, 0);
3022 	} else {
3023 		aacraid_map_command_sg(cm, NULL, 0, 0);
3024 	}
3025 
3026 	/* wait for completion */
3027 	mtx_lock(&sc->aac_io_lock);
3028 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
3029 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
3030 	mtx_unlock(&sc->aac_io_lock);
3031 
3032 	/* copy data */
3033 	if (transfer_data && (le32toh(srbcmd->flags) & AAC_SRB_FLAGS_DATA_IN)) {
3034 		if ((error = copyout(cm->cm_data,
3035 			(void *)(uintptr_t)srb_sg_address,
3036 			cm->cm_datalen)) != 0)
3037 			goto out;
3038 		/* sync required for bus_dmamem_alloc() allocated mem.? */
3039 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
3040 				BUS_DMASYNC_POSTREAD);
3041 	}
3042 
3043 	/* status */
3044 	aac_srb_response_toh((struct aac_srb_response *)fib->data);
3045 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3046 
3047 out:
3048 	if (cm && cm->cm_data) {
3049 		if (transfer_data)
3050 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3051 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3052 		cm->cm_datamap = orig_map;
3053 	}
3054 	if (cm && cm->cm_passthr_dmat)
3055 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3056 	if (cm) {
3057 		mtx_lock(&sc->aac_io_lock);
3058 		aacraid_release_command(cm);
3059 		mtx_unlock(&sc->aac_io_lock);
3060 	}
3061 	return(error);
3062 }
3063 
3064 /*
3065  * Request an AIF from the controller (new comm. type1)
3066  */
3067 static void
3068 aac_request_aif(struct aac_softc *sc)
3069 {
3070 	struct aac_command *cm;
3071 	struct aac_fib *fib;
3072 
3073 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3074 
3075 	if (aacraid_alloc_command(sc, &cm)) {
3076 		sc->aif_pending = 1;
3077 		return;
3078 	}
3079 	sc->aif_pending = 0;
3080 
3081 	/* build the FIB */
3082 	fib = cm->cm_fib;
3083 	fib->Header.Size = sizeof(struct aac_fib);
3084 	fib->Header.XferState =
3085         AAC_FIBSTATE_HOSTOWNED   |
3086         AAC_FIBSTATE_INITIALISED |
3087         AAC_FIBSTATE_EMPTY	 |
3088         AAC_FIBSTATE_FROMHOST	 |
3089         AAC_FIBSTATE_REXPECTED   |
3090         AAC_FIBSTATE_NORM	 |
3091         AAC_FIBSTATE_ASYNC;
3092 	/* set AIF marker */
3093 	fib->Header.Handle = 0x00800000;
3094 	fib->Header.Command = AifRequest;
3095 	((struct aac_aif_command *)fib->data)->command = htole32(AifReqEvent);
3096 
3097 	aacraid_map_command_sg(cm, NULL, 0, 0);
3098 }
3099 
3100 /*
3101  * cdevpriv interface private destructor.
3102  */
3103 static void
3104 aac_cdevpriv_dtor(void *arg)
3105 {
3106 	struct aac_softc *sc;
3107 
3108 	sc = arg;
3109 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3110 	device_unbusy(sc->aac_dev);
3111 }
3112 
3113 /*
3114  * Handle an AIF sent to us by the controller; queue it for later reference.
3115  * If the queue fills up, then drop the older entries.
3116  */
3117 static void
3118 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3119 {
3120 	struct aac_aif_command *aif;
3121 	struct aac_container *co, *co_next;
3122 	struct aac_fib_context *ctx;
3123 	struct aac_fib *sync_fib;
3124 	struct aac_mntinforesp mir;
3125 	int next, current, found;
3126 	int count = 0, changed = 0, i = 0;
3127 	u_int32_t channel, uid;
3128 
3129 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3130 
3131 	aif = (struct aac_aif_command*)&fib->data[0];
3132 	aacraid_print_aif(sc, aif);
3133 
3134 	/* Is it an event that we should care about? */
3135 	switch (le32toh(aif->command)) {
3136 	case AifCmdEventNotify:
3137 		switch (le32toh(aif->data.EN.type)) {
3138 		case AifEnAddContainer:
3139 		case AifEnDeleteContainer:
3140 			/*
3141 			 * A container was added or deleted, but the message
3142 			 * doesn't tell us anything else!  Re-enumerate the
3143 			 * containers and sort things out.
3144 			 */
3145 			aac_alloc_sync_fib(sc, &sync_fib);
3146 			do {
3147 				/*
3148 				 * Ask the controller for its containers one at
3149 				 * a time.
3150 				 * XXX What if the controller's list changes
3151 				 * midway through this enumaration?
3152 				 * XXX This should be done async.
3153 				 */
3154 				if (aac_get_container_info(sc, sync_fib, i,
3155 					&mir, &uid) != 0)
3156 					continue;
3157 				if (i == 0)
3158 					count = mir.MntRespCount;
3159 				/*
3160 				 * Check the container against our list.
3161 				 * co->co_found was already set to 0 in a
3162 				 * previous run.
3163 				 */
3164 				if ((mir.Status == ST_OK) &&
3165 				    (mir.MntTable[0].VolType != CT_NONE)) {
3166 					found = 0;
3167 					TAILQ_FOREACH(co,
3168 						      &sc->aac_container_tqh,
3169 						      co_link) {
3170 						if (co->co_mntobj.ObjectId ==
3171 						    mir.MntTable[0].ObjectId) {
3172 							co->co_found = 1;
3173 							found = 1;
3174 							break;
3175 						}
3176 					}
3177 					/*
3178 					 * If the container matched, continue
3179 					 * in the list.
3180 					 */
3181 					if (found) {
3182 						i++;
3183 						continue;
3184 					}
3185 
3186 					/*
3187 					 * This is a new container.  Do all the
3188 					 * appropriate things to set it up.
3189 					 */
3190 					aac_add_container(sc, &mir, 1, uid);
3191 					changed = 1;
3192 				}
3193 				i++;
3194 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3195 			aac_release_sync_fib(sc);
3196 
3197 			/*
3198 			 * Go through our list of containers and see which ones
3199 			 * were not marked 'found'.  Since the controller didn't
3200 			 * list them they must have been deleted.  Do the
3201 			 * appropriate steps to destroy the device.  Also reset
3202 			 * the co->co_found field.
3203 			 */
3204 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3205 			while (co != NULL) {
3206 				if (co->co_found == 0) {
3207 					co_next = TAILQ_NEXT(co, co_link);
3208 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3209 						     co_link);
3210 					free(co, M_AACRAIDBUF);
3211 					changed = 1;
3212 					co = co_next;
3213 				} else {
3214 					co->co_found = 0;
3215 					co = TAILQ_NEXT(co, co_link);
3216 				}
3217 			}
3218 
3219 			/* Attach the newly created containers */
3220 			if (changed) {
3221 				if (sc->cam_rescan_cb != NULL)
3222 					sc->cam_rescan_cb(sc, 0,
3223 				    	AAC_CAM_TARGET_WILDCARD);
3224 			}
3225 
3226 			break;
3227 
3228 		case AifEnEnclosureManagement:
3229 			switch (le32toh(aif->data.EN.data.EEE.eventType)) {
3230 			case AIF_EM_DRIVE_INSERTION:
3231 			case AIF_EM_DRIVE_REMOVAL:
3232 				channel = le32toh(aif->data.EN.data.EEE.unitID);
3233 				if (sc->cam_rescan_cb != NULL)
3234 					sc->cam_rescan_cb(sc,
3235 					    ((channel>>24) & 0xF) + 1,
3236 					    (channel & 0xFFFF));
3237 				break;
3238 			}
3239 			break;
3240 
3241 		case AifEnAddJBOD:
3242 		case AifEnDeleteJBOD:
3243 		case AifRawDeviceRemove:
3244 			channel = le32toh(aif->data.EN.data.ECE.container);
3245 			if (sc->cam_rescan_cb != NULL)
3246 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3247 				    AAC_CAM_TARGET_WILDCARD);
3248 			break;
3249 
3250 		default:
3251 			break;
3252 		}
3253 
3254 	default:
3255 		break;
3256 	}
3257 
3258 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3259 	current = sc->aifq_idx;
3260 	next = (current + 1) % AAC_AIFQ_LENGTH;
3261 	if (next == 0)
3262 		sc->aifq_filled = 1;
3263 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3264 	/* Make aifq's FIB header and data LE */
3265 	aac_fib_header_tole(&sc->aac_aifq[current].Header);
3266 	/* modify AIF contexts */
3267 	if (sc->aifq_filled) {
3268 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3269 			if (next == ctx->ctx_idx)
3270 				ctx->ctx_wrap = 1;
3271 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3272 				ctx->ctx_idx = next;
3273 		}
3274 	}
3275 	sc->aifq_idx = next;
3276 	/* On the off chance that someone is sleeping for an aif... */
3277 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3278 		wakeup(sc->aac_aifq);
3279 	/* Wakeup any poll()ers */
3280 	selwakeuppri(&sc->rcv_select, PRIBIO);
3281 
3282 	return;
3283 }
3284 
3285 /*
3286  * Return the Revision of the driver to userspace and check to see if the
3287  * userspace app is possibly compatible.  This is extremely bogus since
3288  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3289  * returning what the card reported.
3290  */
3291 static int
3292 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3293 {
3294 	struct aac_rev_check rev_check;
3295 	struct aac_rev_check_resp rev_check_resp;
3296 	int error = 0;
3297 
3298 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3299 
3300 	/*
3301 	 * Copyin the revision struct from userspace
3302 	 */
3303 	if ((error = copyin(udata, (caddr_t)&rev_check,
3304 			sizeof(struct aac_rev_check))) != 0) {
3305 		return error;
3306 	}
3307 
3308 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3309 	      rev_check.callingRevision.buildNumber);
3310 
3311 	/*
3312 	 * Doctor up the response struct.
3313 	 */
3314 	rev_check_resp.possiblyCompatible = 1;
3315 	rev_check_resp.adapterSWRevision.external.comp.major =
3316 	    AAC_DRIVER_MAJOR_VERSION;
3317 	rev_check_resp.adapterSWRevision.external.comp.minor =
3318 	    AAC_DRIVER_MINOR_VERSION;
3319 	rev_check_resp.adapterSWRevision.external.comp.type =
3320 	    AAC_DRIVER_TYPE;
3321 	rev_check_resp.adapterSWRevision.external.comp.dash =
3322 	    AAC_DRIVER_BUGFIX_LEVEL;
3323 	rev_check_resp.adapterSWRevision.buildNumber =
3324 	    AAC_DRIVER_BUILD;
3325 
3326 	return(copyout((caddr_t)&rev_check_resp, udata,
3327 			sizeof(struct aac_rev_check_resp)));
3328 }
3329 
3330 /*
3331  * Pass the fib context to the caller
3332  */
3333 static int
3334 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3335 {
3336 	struct aac_fib_context *fibctx, *ctx;
3337 	int error = 0;
3338 
3339 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3340 
3341 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3342 	if (fibctx == NULL)
3343 		return (ENOMEM);
3344 
3345 	mtx_lock(&sc->aac_io_lock);
3346 	/* all elements are already 0, add to queue */
3347 	if (sc->fibctx == NULL)
3348 		sc->fibctx = fibctx;
3349 	else {
3350 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3351 			;
3352 		ctx->next = fibctx;
3353 		fibctx->prev = ctx;
3354 	}
3355 
3356 	/* evaluate unique value */
3357 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3358 	ctx = sc->fibctx;
3359 	while (ctx != fibctx) {
3360 		if (ctx->unique == fibctx->unique) {
3361 			fibctx->unique++;
3362 			ctx = sc->fibctx;
3363 		} else {
3364 			ctx = ctx->next;
3365 		}
3366 	}
3367 
3368 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3369 	mtx_unlock(&sc->aac_io_lock);
3370 	if (error)
3371 		aac_close_aif(sc, (caddr_t)ctx);
3372 	return error;
3373 }
3374 
3375 /*
3376  * Close the caller's fib context
3377  */
3378 static int
3379 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3380 {
3381 	struct aac_fib_context *ctx;
3382 
3383 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3384 
3385 	mtx_lock(&sc->aac_io_lock);
3386 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3387 		if (ctx->unique == *(uint32_t *)&arg) {
3388 			if (ctx == sc->fibctx)
3389 				sc->fibctx = NULL;
3390 			else {
3391 				ctx->prev->next = ctx->next;
3392 				if (ctx->next)
3393 					ctx->next->prev = ctx->prev;
3394 			}
3395 			break;
3396 		}
3397 	}
3398 	if (ctx)
3399 		free(ctx, M_AACRAIDBUF);
3400 
3401 	mtx_unlock(&sc->aac_io_lock);
3402 	return 0;
3403 }
3404 
3405 /*
3406  * Pass the caller the next AIF in their queue
3407  */
3408 static int
3409 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3410 {
3411 	struct get_adapter_fib_ioctl agf;
3412 	struct aac_fib_context *ctx;
3413 	int error;
3414 
3415 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3416 
3417 	mtx_lock(&sc->aac_io_lock);
3418 #ifdef COMPAT_FREEBSD32
3419 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3420 		struct get_adapter_fib_ioctl32 agf32;
3421 		error = copyin(arg, &agf32, sizeof(agf32));
3422 		if (error == 0) {
3423 			agf.AdapterFibContext = agf32.AdapterFibContext;
3424 			agf.Wait = agf32.Wait;
3425 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3426 		}
3427 	} else
3428 #endif
3429 		error = copyin(arg, &agf, sizeof(agf));
3430 	if (error == 0) {
3431 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3432 			if (agf.AdapterFibContext == ctx->unique)
3433 				break;
3434 		}
3435 		if (!ctx) {
3436 			mtx_unlock(&sc->aac_io_lock);
3437 			return (EFAULT);
3438 		}
3439 
3440 		error = aac_return_aif(sc, ctx, agf.AifFib);
3441 		if (error == EAGAIN && agf.Wait) {
3442 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3443 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3444 			while (error == EAGAIN) {
3445 				mtx_unlock(&sc->aac_io_lock);
3446 				error = tsleep(sc->aac_aifq, PRIBIO |
3447 					       PCATCH, "aacaif", 0);
3448 				mtx_lock(&sc->aac_io_lock);
3449 				if (error == 0)
3450 					error = aac_return_aif(sc, ctx, agf.AifFib);
3451 			}
3452 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3453 		}
3454 	}
3455 	mtx_unlock(&sc->aac_io_lock);
3456 	return(error);
3457 }
3458 
3459 /*
3460  * Hand the next AIF off the top of the queue out to userspace.
3461  */
3462 static int
3463 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3464 {
3465 	int current, error;
3466 
3467 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3468 
3469 	current = ctx->ctx_idx;
3470 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3471 		/* empty */
3472 		return (EAGAIN);
3473 	}
3474 	error =
3475 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3476 	if (error)
3477 		device_printf(sc->aac_dev,
3478 		    "aac_return_aif: copyout returned %d\n", error);
3479 	else {
3480 		ctx->ctx_wrap = 0;
3481 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3482 	}
3483 	return(error);
3484 }
3485 
3486 static int
3487 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3488 {
3489 	struct aac_pci_info {
3490 		u_int32_t bus;
3491 		u_int32_t slot;
3492 	} pciinf;
3493 	int error;
3494 
3495 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3496 
3497 	pciinf.bus = pci_get_bus(sc->aac_dev);
3498 	pciinf.slot = pci_get_slot(sc->aac_dev);
3499 
3500 	error = copyout((caddr_t)&pciinf, uptr,
3501 			sizeof(struct aac_pci_info));
3502 
3503 	return (error);
3504 }
3505 
3506 static int
3507 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3508 {
3509 	struct aac_features f;
3510 	int error;
3511 
3512 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3513 
3514 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3515 		return (error);
3516 
3517 	/*
3518 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3519 	 * ALL zero in the featuresState, the driver will return the current
3520 	 * state of all the supported features, the data field will not be
3521 	 * valid.
3522 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3523 	 * a specific bit set in the featuresState, the driver will return the
3524 	 * current state of this specific feature and whatever data that are
3525 	 * associated with the feature in the data field or perform whatever
3526 	 * action needed indicates in the data field.
3527 	 */
3528 	 if (f.feat.fValue == 0) {
3529 		f.feat.fBits.largeLBA =
3530 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3531 		f.feat.fBits.JBODSupport = 1;
3532 		/* TODO: In the future, add other features state here as well */
3533 	} else {
3534 		if (f.feat.fBits.largeLBA)
3535 			f.feat.fBits.largeLBA =
3536 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3537 		/* TODO: Add other features state and data in the future */
3538 	}
3539 
3540 	error = copyout(&f, uptr, sizeof (f));
3541 	return (error);
3542 }
3543 
3544 /*
3545  * Give the userland some information about the container.  The AAC arch
3546  * expects the driver to be a SCSI passthrough type driver, so it expects
3547  * the containers to have b:t:l numbers.  Fake it.
3548  */
3549 static int
3550 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3551 {
3552 	struct aac_query_disk query_disk;
3553 	struct aac_container *co;
3554 	int error, id;
3555 
3556 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3557 
3558 	mtx_lock(&sc->aac_io_lock);
3559 	error = copyin(uptr, (caddr_t)&query_disk,
3560 		       sizeof(struct aac_query_disk));
3561 	if (error) {
3562 		mtx_unlock(&sc->aac_io_lock);
3563 		return (error);
3564 	}
3565 
3566 	id = query_disk.ContainerNumber;
3567 	if (id == -1) {
3568 		mtx_unlock(&sc->aac_io_lock);
3569 		return (EINVAL);
3570 	}
3571 
3572 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3573 		if (co->co_mntobj.ObjectId == id)
3574 			break;
3575 		}
3576 
3577 	if (co == NULL) {
3578 			query_disk.Valid = 0;
3579 			query_disk.Locked = 0;
3580 			query_disk.Deleted = 1;		/* XXX is this right? */
3581 	} else {
3582 		query_disk.Valid = 1;
3583 		query_disk.Locked = 1;
3584 		query_disk.Deleted = 0;
3585 		query_disk.Bus = device_get_unit(sc->aac_dev);
3586 		query_disk.Target = 0;
3587 		query_disk.Lun = 0;
3588 		query_disk.UnMapped = 0;
3589 	}
3590 
3591 	error = copyout((caddr_t)&query_disk, uptr,
3592 			sizeof(struct aac_query_disk));
3593 
3594 	mtx_unlock(&sc->aac_io_lock);
3595 	return (error);
3596 }
3597 
3598 static void
3599 aac_container_bus(struct aac_softc *sc)
3600 {
3601 	struct aac_sim *sim;
3602 	device_t child;
3603 
3604 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3605 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3606 	if (sim == NULL) {
3607 		device_printf(sc->aac_dev,
3608 	    	"No memory to add container bus\n");
3609 		panic("Out of memory?!");
3610 	}
3611 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3612 	if (child == NULL) {
3613 		device_printf(sc->aac_dev,
3614 	    	"device_add_child failed for container bus\n");
3615 		free(sim, M_AACRAIDBUF);
3616 		panic("Out of memory?!");
3617 	}
3618 
3619 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3620 	sim->BusNumber = 0;
3621 	sim->BusType = CONTAINER_BUS;
3622 	sim->InitiatorBusId = -1;
3623 	sim->aac_sc = sc;
3624 	sim->sim_dev = child;
3625 	sim->aac_cam = NULL;
3626 
3627 	device_set_ivars(child, sim);
3628 	device_set_desc(child, "Container Bus");
3629 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3630 	/*
3631 	device_set_desc(child, aac_describe_code(aac_container_types,
3632 			mir->MntTable[0].VolType));
3633 	*/
3634 	bus_generic_attach(sc->aac_dev);
3635 }
3636 
3637 static void
3638 aac_get_bus_info(struct aac_softc *sc)
3639 {
3640 	struct aac_fib *fib;
3641 	struct aac_ctcfg *c_cmd;
3642 	struct aac_ctcfg_resp *c_resp;
3643 	struct aac_vmioctl *vmi;
3644 	struct aac_vmi_businf_resp *vmi_resp;
3645 	struct aac_getbusinf businfo;
3646 	struct aac_sim *caminf;
3647 	device_t child;
3648 	int i, error;
3649 
3650 	mtx_lock(&sc->aac_io_lock);
3651 	aac_alloc_sync_fib(sc, &fib);
3652 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3653 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3654 
3655 	c_cmd->Command = VM_ContainerConfig;
3656 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3657 	c_cmd->param = 0;
3658 
3659 	aac_ctcfg_tole(c_cmd);
3660 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3661 	    sizeof(struct aac_ctcfg));
3662 	if (error) {
3663 		device_printf(sc->aac_dev, "Error %d sending "
3664 		    "VM_ContainerConfig command\n", error);
3665 		aac_release_sync_fib(sc);
3666 		mtx_unlock(&sc->aac_io_lock);
3667 		return;
3668 	}
3669 
3670 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3671 	aac_ctcfg_resp_toh(c_resp);
3672 	if (c_resp->Status != ST_OK) {
3673 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3674 		    c_resp->Status);
3675 		aac_release_sync_fib(sc);
3676 		mtx_unlock(&sc->aac_io_lock);
3677 		return;
3678 	}
3679 
3680 	sc->scsi_method_id = c_resp->param;
3681 
3682 	vmi = (struct aac_vmioctl *)&fib->data[0];
3683 	bzero(vmi, sizeof(struct aac_vmioctl));
3684 
3685 	vmi->Command = VM_Ioctl;
3686 	vmi->ObjType = FT_DRIVE;
3687 	vmi->MethId = sc->scsi_method_id;
3688 	vmi->ObjId = 0;
3689 	vmi->IoctlCmd = GetBusInfo;
3690 
3691 	aac_vmioctl_tole(vmi);
3692 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3693 	    sizeof(struct aac_vmi_businf_resp));
3694 	if (error) {
3695 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3696 		    error);
3697 		aac_release_sync_fib(sc);
3698 		mtx_unlock(&sc->aac_io_lock);
3699 		return;
3700 	}
3701 
3702 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3703 	aac_vmi_businf_resp_toh(vmi_resp);
3704 	if (vmi_resp->Status != ST_OK) {
3705 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3706 		    vmi_resp->Status);
3707 		aac_release_sync_fib(sc);
3708 		mtx_unlock(&sc->aac_io_lock);
3709 		return;
3710 	}
3711 
3712 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3713 	aac_release_sync_fib(sc);
3714 	mtx_unlock(&sc->aac_io_lock);
3715 
3716 	for (i = 0; i < businfo.BusCount; i++) {
3717 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3718 			continue;
3719 
3720 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3721 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3722 		if (caminf == NULL) {
3723 			device_printf(sc->aac_dev,
3724 			    "No memory to add passthrough bus %d\n", i);
3725 			break;
3726 		}
3727 
3728 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3729 		if (child == NULL) {
3730 			device_printf(sc->aac_dev,
3731 			    "device_add_child failed for passthrough bus %d\n",
3732 			    i);
3733 			free(caminf, M_AACRAIDBUF);
3734 			break;
3735 		}
3736 
3737 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3738 		caminf->BusNumber = i+1;
3739 		caminf->BusType = PASSTHROUGH_BUS;
3740 		caminf->InitiatorBusId = -1;
3741 		caminf->aac_sc = sc;
3742 		caminf->sim_dev = child;
3743 		caminf->aac_cam = NULL;
3744 
3745 		device_set_ivars(child, caminf);
3746 		device_set_desc(child, "SCSI Passthrough Bus");
3747 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3748 	}
3749 }
3750 
3751 /*
3752  * Check to see if the kernel is up and running. If we are in a
3753  * BlinkLED state, return the BlinkLED code.
3754  */
3755 static u_int32_t
3756 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3757 {
3758 	u_int32_t ret;
3759 
3760 	ret = AAC_GET_FWSTATUS(sc);
3761 
3762 	if (ret & AAC_UP_AND_RUNNING)
3763 		ret = 0;
3764 	else if (ret & AAC_KERNEL_PANIC && bled)
3765 		*bled = (ret >> 16) & 0xff;
3766 
3767 	return (ret);
3768 }
3769 
3770 /*
3771  * Once do an IOP reset, basically have to re-initialize the card as
3772  * if coming up from a cold boot, and the driver is responsible for
3773  * any IO that was outstanding to the adapter at the time of the IOP
3774  * RESET. And prepare the driver for IOP RESET by making the init code
3775  * modular with the ability to call it from multiple places.
3776  */
3777 static int
3778 aac_reset_adapter(struct aac_softc *sc)
3779 {
3780 	struct aac_command *cm;
3781 	struct aac_fib *fib;
3782 	struct aac_pause_command *pc;
3783 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3784 	int ret, msi_enabled_orig;
3785 
3786 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3787 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3788 
3789 	if (sc->aac_state & AAC_STATE_RESET) {
3790 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3791 		return (EINVAL);
3792 	}
3793 	sc->aac_state |= AAC_STATE_RESET;
3794 
3795 	/* disable interrupt */
3796 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3797 
3798 	/*
3799 	 * Abort all pending commands:
3800 	 * a) on the controller
3801 	 */
3802 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3803 		cm->cm_flags |= AAC_CMD_RESET;
3804 
3805 		/* is there a completion handler? */
3806 		if (cm->cm_complete != NULL) {
3807 			cm->cm_complete(cm);
3808 		} else {
3809 			/* assume that someone is sleeping on this
3810 			 * command
3811 			 */
3812 			wakeup(cm);
3813 		}
3814 	}
3815 
3816 	/* b) in the waiting queues */
3817 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3818 		cm->cm_flags |= AAC_CMD_RESET;
3819 
3820 		/* is there a completion handler? */
3821 		if (cm->cm_complete != NULL) {
3822 			cm->cm_complete(cm);
3823 		} else {
3824 			/* assume that someone is sleeping on this
3825 			 * command
3826 			 */
3827 			wakeup(cm);
3828 		}
3829 	}
3830 
3831 	/* flush drives */
3832 	if (aac_check_adapter_health(sc, NULL) == 0) {
3833 		mtx_unlock(&sc->aac_io_lock);
3834 		(void) aacraid_shutdown(sc->aac_dev);
3835 		mtx_lock(&sc->aac_io_lock);
3836 	}
3837 
3838 	/* execute IOP reset */
3839 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3840 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3841 
3842 		/* We need to wait for 5 seconds before accessing the MU again
3843 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3844 		 */
3845 		waitCount = 5 * 10000;
3846 		while (waitCount) {
3847 			DELAY(100);			/* delay 100 microseconds */
3848 			waitCount--;
3849 		}
3850 	} else {
3851 		ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3852 			0, 0, 0, 0, &status, &reset_mask);
3853 		if (ret && !sc->doorbell_mask) {
3854 			/* call IOP_RESET for older firmware */
3855 			if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3856 			    &status, NULL)) != 0) {
3857 				if (status == AAC_SRB_STS_INVALID_REQUEST) {
3858 					device_printf(sc->aac_dev,
3859 					    "IOP_RESET not supported\n");
3860 				} else {
3861 					/* probably timeout */
3862 					device_printf(sc->aac_dev,
3863 					    "IOP_RESET failed\n");
3864 				}
3865 
3866 				/* unwind aac_shutdown() */
3867 				aac_alloc_sync_fib(sc, &fib);
3868 				pc = (struct aac_pause_command *)&fib->data[0];
3869 				pc->Command = VM_ContainerConfig;
3870 				pc->Type = CT_PAUSE_IO;
3871 				pc->Timeout = 1;
3872 				pc->Min = 1;
3873 				pc->NoRescan = 1;
3874 
3875 				aac_pause_command_tole(pc);
3876 				(void) aac_sync_fib(sc, ContainerCommand, 0,
3877 				    fib, sizeof (struct aac_pause_command));
3878 				aac_release_sync_fib(sc);
3879 
3880 				goto finish;
3881 			}
3882 		} else if (sc->doorbell_mask) {
3883 			ret = 0;
3884 			reset_mask = sc->doorbell_mask;
3885 		}
3886 		if (!ret &&
3887 		    (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3888 			AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3889 			/*
3890 			 * We need to wait for 5 seconds before accessing the
3891 			 * doorbell again;
3892 			 * 10000 * 100us = 1000,000us = 1000ms = 1s
3893 			 */
3894 			waitCount = 5 * 10000;
3895 			while (waitCount) {
3896 				DELAY(100);	/* delay 100 microseconds */
3897 				waitCount--;
3898 			}
3899 		}
3900 	}
3901 
3902 	/*
3903 	 * Initialize the adapter.
3904 	 */
3905 	max_msix_orig = sc->aac_max_msix;
3906 	msi_enabled_orig = sc->msi_enabled;
3907 	sc->msi_enabled = FALSE;
3908 	if (aac_check_firmware(sc) != 0)
3909 		goto finish;
3910 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3911 		sc->aac_max_msix = max_msix_orig;
3912 		if (msi_enabled_orig) {
3913 			sc->msi_enabled = msi_enabled_orig;
3914 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3915 		}
3916 		mtx_unlock(&sc->aac_io_lock);
3917 		aac_init(sc);
3918 		mtx_lock(&sc->aac_io_lock);
3919 	}
3920 
3921 finish:
3922 	sc->aac_state &= ~AAC_STATE_RESET;
3923 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3924 	aacraid_startio(sc);
3925 	return (0);
3926 }
3927