xref: /freebsd/sys/dev/aacraid/aacraid.c (revision da759cfa320d5076b075d15ff3f00ab3ba5634fd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
38  */
39 #define AAC_DRIVERNAME			"aacraid"
40 
41 #include "opt_aacraid.h"
42 
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/proc.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
52 #include <sys/poll.h>
53 #include <sys/ioccom.h>
54 
55 #include <sys/bus.h>
56 #include <sys/conf.h>
57 #include <sys/signalvar.h>
58 #include <sys/time.h>
59 #include <sys/eventhandler.h>
60 #include <sys/rman.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
72 #include <dev/aacraid/aacraid_endian.h>
73 
74 #ifndef FILTER_HANDLED
75 #define FILTER_HANDLED	0x02
76 #endif
77 
78 static void	aac_add_container(struct aac_softc *sc,
79 				  struct aac_mntinforesp *mir, int f,
80 				  u_int32_t uid);
81 static void	aac_get_bus_info(struct aac_softc *sc);
82 static void	aac_container_bus(struct aac_softc *sc);
83 static void	aac_daemon(void *arg);
84 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
85 							  int pages, int nseg, int nseg_new);
86 
87 /* Command Processing */
88 static void	aac_timeout(struct aac_softc *sc);
89 static void	aac_command_thread(struct aac_softc *sc);
90 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
91 				     u_int32_t xferstate, struct aac_fib *fib,
92 				     u_int16_t datasize);
93 /* Command Buffer Management */
94 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
95 				       int nseg, int error);
96 static int	aac_alloc_commands(struct aac_softc *sc);
97 static void	aac_free_commands(struct aac_softc *sc);
98 static void	aac_unmap_command(struct aac_command *cm);
99 
100 /* Hardware Interface */
101 static int	aac_alloc(struct aac_softc *sc);
102 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
103 			       int error);
104 static int	aac_check_firmware(struct aac_softc *sc);
105 static void	aac_define_int_mode(struct aac_softc *sc);
106 static int	aac_init(struct aac_softc *sc);
107 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
108 static int	aac_setup_intr(struct aac_softc *sc);
109 static int	aac_check_config(struct aac_softc *sc);
110 
111 /* PMC SRC interface */
112 static int	aac_src_get_fwstatus(struct aac_softc *sc);
113 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
114 static int	aac_src_get_istatus(struct aac_softc *sc);
115 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
116 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
117 				    u_int32_t arg0, u_int32_t arg1,
118 				    u_int32_t arg2, u_int32_t arg3);
119 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
120 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
121 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
122 static int aac_src_get_outb_queue(struct aac_softc *sc);
123 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
124 
125 struct aac_interface aacraid_src_interface = {
126 	aac_src_get_fwstatus,
127 	aac_src_qnotify,
128 	aac_src_get_istatus,
129 	aac_src_clear_istatus,
130 	aac_src_set_mailbox,
131 	aac_src_get_mailbox,
132 	aac_src_access_devreg,
133 	aac_src_send_command,
134 	aac_src_get_outb_queue,
135 	aac_src_set_outb_queue
136 };
137 
138 /* PMC SRCv interface */
139 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
140 				    u_int32_t arg0, u_int32_t arg1,
141 				    u_int32_t arg2, u_int32_t arg3);
142 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
143 
144 struct aac_interface aacraid_srcv_interface = {
145 	aac_src_get_fwstatus,
146 	aac_src_qnotify,
147 	aac_src_get_istatus,
148 	aac_src_clear_istatus,
149 	aac_srcv_set_mailbox,
150 	aac_srcv_get_mailbox,
151 	aac_src_access_devreg,
152 	aac_src_send_command,
153 	aac_src_get_outb_queue,
154 	aac_src_set_outb_queue
155 };
156 
157 /* Debugging and Diagnostics */
158 static struct aac_code_lookup aac_cpu_variant[] = {
159 	{"i960JX",		CPUI960_JX},
160 	{"i960CX",		CPUI960_CX},
161 	{"i960HX",		CPUI960_HX},
162 	{"i960RX",		CPUI960_RX},
163 	{"i960 80303",		CPUI960_80303},
164 	{"StrongARM SA110",	CPUARM_SA110},
165 	{"PPC603e",		CPUPPC_603e},
166 	{"XScale 80321",	CPU_XSCALE_80321},
167 	{"MIPS 4KC",		CPU_MIPS_4KC},
168 	{"MIPS 5KC",		CPU_MIPS_5KC},
169 	{"Unknown StrongARM",	CPUARM_xxx},
170 	{"Unknown PowerPC",	CPUPPC_xxx},
171 	{NULL, 0},
172 	{"Unknown processor",	0}
173 };
174 
175 static struct aac_code_lookup aac_battery_platform[] = {
176 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
177 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
178 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
179 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
180 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
181 	{NULL, 0},
182 	{"unknown battery platform",		0}
183 };
184 static void	aac_describe_controller(struct aac_softc *sc);
185 static char	*aac_describe_code(struct aac_code_lookup *table,
186 				   u_int32_t code);
187 
188 /* Management Interface */
189 static d_open_t		aac_open;
190 static d_ioctl_t	aac_ioctl;
191 static d_poll_t		aac_poll;
192 static void		aac_cdevpriv_dtor(void *arg);
193 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
194 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
195 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
196 static void	aac_request_aif(struct aac_softc *sc);
197 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
198 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
199 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
200 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
201 static int	aac_return_aif(struct aac_softc *sc,
202 			       struct aac_fib_context *ctx, caddr_t uptr);
203 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
204 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
205 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
206 static void	aac_ioctl_event(struct aac_softc *sc,
207 				struct aac_event *event, void *arg);
208 static int	aac_reset_adapter(struct aac_softc *sc);
209 static int	aac_get_container_info(struct aac_softc *sc,
210 				       struct aac_fib *fib, int cid,
211 				       struct aac_mntinforesp *mir,
212 				       u_int32_t *uid);
213 static u_int32_t
214 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
215 
216 static struct cdevsw aacraid_cdevsw = {
217 	.d_version =	D_VERSION,
218 	.d_flags =	0,
219 	.d_open =	aac_open,
220 	.d_ioctl =	aac_ioctl,
221 	.d_poll =	aac_poll,
222 	.d_name =	"aacraid",
223 };
224 
225 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
226 
227 /* sysctl node */
228 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
229     "AACRAID driver parameters");
230 
231 /*
232  * Device Interface
233  */
234 
235 /*
236  * Initialize the controller and softc
237  */
238 int
239 aacraid_attach(struct aac_softc *sc)
240 {
241 	int error, unit;
242 	struct aac_fib *fib;
243 	struct aac_mntinforesp mir;
244 	int count = 0, i = 0;
245 	u_int32_t uid;
246 
247 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
248 	sc->hint_flags = device_get_flags(sc->aac_dev);
249 	/*
250 	 * Initialize per-controller queues.
251 	 */
252 	aac_initq_free(sc);
253 	aac_initq_ready(sc);
254 	aac_initq_busy(sc);
255 
256 	/* mark controller as suspended until we get ourselves organised */
257 	sc->aac_state |= AAC_STATE_SUSPEND;
258 
259 	/*
260 	 * Check that the firmware on the card is supported.
261 	 */
262 	sc->msi_enabled = sc->msi_tupelo = FALSE;
263 	if ((error = aac_check_firmware(sc)) != 0)
264 		return(error);
265 
266 	/*
267 	 * Initialize locks
268 	 */
269 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
270 	TAILQ_INIT(&sc->aac_container_tqh);
271 	TAILQ_INIT(&sc->aac_ev_cmfree);
272 
273 	/* Initialize the clock daemon callout. */
274 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
275 
276 	/*
277 	 * Initialize the adapter.
278 	 */
279 	if ((error = aac_alloc(sc)) != 0)
280 		return(error);
281 	aac_define_int_mode(sc);
282 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
283 		if ((error = aac_init(sc)) != 0)
284 			return(error);
285 	}
286 
287 	/*
288 	 * Allocate and connect our interrupt.
289 	 */
290 	if ((error = aac_setup_intr(sc)) != 0)
291 		return(error);
292 
293 	/*
294 	 * Print a little information about the controller.
295 	 */
296 	aac_describe_controller(sc);
297 
298 	/*
299 	 * Make the control device.
300 	 */
301 	unit = device_get_unit(sc->aac_dev);
302 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
303 				 0640, "aacraid%d", unit);
304 	sc->aac_dev_t->si_drv1 = sc;
305 
306 	/* Create the AIF thread */
307 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
308 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
309 		panic("Could not create AIF thread");
310 
311 	/* Register the shutdown method to only be called post-dump */
312 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
313 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
314 		device_printf(sc->aac_dev,
315 			      "shutdown event registration failed\n");
316 
317 	/* Find containers */
318 	mtx_lock(&sc->aac_io_lock);
319 	aac_alloc_sync_fib(sc, &fib);
320 	/* loop over possible containers */
321 	do {
322 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
323 			continue;
324 		if (i == 0)
325 			count = mir.MntRespCount;
326 		aac_add_container(sc, &mir, 0, uid);
327 		i++;
328 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
329 	aac_release_sync_fib(sc);
330 	mtx_unlock(&sc->aac_io_lock);
331 
332 	/* Register with CAM for the containers */
333 	TAILQ_INIT(&sc->aac_sim_tqh);
334 	aac_container_bus(sc);
335 	/* Register with CAM for the non-DASD devices */
336 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
337 		aac_get_bus_info(sc);
338 
339 	/* poke the bus to actually attach the child devices */
340 	bus_generic_attach(sc->aac_dev);
341 
342 	/* mark the controller up */
343 	sc->aac_state &= ~AAC_STATE_SUSPEND;
344 
345 	/* enable interrupts now */
346 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
347 
348 	mtx_lock(&sc->aac_io_lock);
349 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
350 	mtx_unlock(&sc->aac_io_lock);
351 
352 	return(0);
353 }
354 
355 static void
356 aac_daemon(void *arg)
357 {
358 	struct aac_softc *sc;
359 	struct timeval tv;
360 	struct aac_command *cm;
361 	struct aac_fib *fib;
362 
363 	sc = arg;
364 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
365 
366 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
367 	if (callout_pending(&sc->aac_daemontime) ||
368 	    callout_active(&sc->aac_daemontime) == 0)
369 		return;
370 	getmicrotime(&tv);
371 
372 	if (!aacraid_alloc_command(sc, &cm)) {
373 		fib = cm->cm_fib;
374 		cm->cm_timestamp = time_uptime;
375 		cm->cm_datalen = 0;
376 		cm->cm_flags |= AAC_CMD_WAIT;
377 
378 		fib->Header.Size =
379 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
380 		fib->Header.XferState =
381 			AAC_FIBSTATE_HOSTOWNED   |
382 			AAC_FIBSTATE_INITIALISED |
383 			AAC_FIBSTATE_EMPTY	 |
384 			AAC_FIBSTATE_FROMHOST	 |
385 			AAC_FIBSTATE_REXPECTED   |
386 			AAC_FIBSTATE_NORM	 |
387 			AAC_FIBSTATE_ASYNC	 |
388 			AAC_FIBSTATE_FAST_RESPONSE;
389 		fib->Header.Command = SendHostTime;
390 		*(uint32_t *)fib->data = htole32(tv.tv_sec);
391 
392 		aacraid_map_command_sg(cm, NULL, 0, 0);
393 		aacraid_release_command(cm);
394 	}
395 
396 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
397 }
398 
399 void
400 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
401 {
402 
403 	switch (event->ev_type & AAC_EVENT_MASK) {
404 	case AAC_EVENT_CMFREE:
405 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
406 		break;
407 	default:
408 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
409 		    event->ev_type);
410 		break;
411 	}
412 
413 	return;
414 }
415 
416 /*
417  * Request information of container #cid
418  */
419 static int
420 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
421 		       struct aac_mntinforesp *mir, u_int32_t *uid)
422 {
423 	struct aac_command *cm;
424 	struct aac_fib *fib;
425 	struct aac_mntinfo *mi;
426 	struct aac_cnt_config *ccfg;
427 	int rval;
428 
429 	if (sync_fib == NULL) {
430 		if (aacraid_alloc_command(sc, &cm)) {
431 			device_printf(sc->aac_dev,
432 				"Warning, no free command available\n");
433 			return (-1);
434 		}
435 		fib = cm->cm_fib;
436 	} else {
437 		fib = sync_fib;
438 	}
439 
440 	mi = (struct aac_mntinfo *)&fib->data[0];
441 	/* 4KB support?, 64-bit LBA? */
442 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
443 		mi->Command = VM_NameServeAllBlk;
444 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
445 		mi->Command = VM_NameServe64;
446 	else
447 		mi->Command = VM_NameServe;
448 	mi->MntType = FT_FILESYS;
449 	mi->MntCount = cid;
450 	aac_mntinfo_tole(mi);
451 
452 	if (sync_fib) {
453 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
454 			 sizeof(struct aac_mntinfo))) {
455 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
456 			return (-1);
457 		}
458 	} else {
459 		cm->cm_timestamp = time_uptime;
460 		cm->cm_datalen = 0;
461 
462 		fib->Header.Size =
463 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
464 		fib->Header.XferState =
465 			AAC_FIBSTATE_HOSTOWNED   |
466 			AAC_FIBSTATE_INITIALISED |
467 			AAC_FIBSTATE_EMPTY	 |
468 			AAC_FIBSTATE_FROMHOST	 |
469 			AAC_FIBSTATE_REXPECTED   |
470 			AAC_FIBSTATE_NORM	 |
471 			AAC_FIBSTATE_ASYNC	 |
472 			AAC_FIBSTATE_FAST_RESPONSE;
473 		fib->Header.Command = ContainerCommand;
474 		if (aacraid_wait_command(cm) != 0) {
475 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
476 			aacraid_release_command(cm);
477 			return (-1);
478 		}
479 	}
480 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
481 	aac_mntinforesp_toh(mir);
482 
483 	/* UID */
484 	*uid = cid;
485 	if (mir->MntTable[0].VolType != CT_NONE &&
486 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
487 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
488 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
489 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
490 		}
491 		ccfg = (struct aac_cnt_config *)&fib->data[0];
492 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
493 		ccfg->Command = VM_ContainerConfig;
494 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
495 		ccfg->CTCommand.param[0] = cid;
496 		aac_cnt_config_tole(ccfg);
497 
498 		if (sync_fib) {
499 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
500 				sizeof(struct aac_cnt_config));
501 			aac_cnt_config_toh(ccfg);
502 			if (rval == 0 && ccfg->Command == ST_OK &&
503 				ccfg->CTCommand.param[0] == CT_OK &&
504 				mir->MntTable[0].VolType != CT_PASSTHRU)
505 				*uid = ccfg->CTCommand.param[1];
506 		} else {
507 			fib->Header.Size =
508 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
509 			fib->Header.XferState =
510 				AAC_FIBSTATE_HOSTOWNED   |
511 				AAC_FIBSTATE_INITIALISED |
512 				AAC_FIBSTATE_EMPTY	 |
513 				AAC_FIBSTATE_FROMHOST	 |
514 				AAC_FIBSTATE_REXPECTED   |
515 				AAC_FIBSTATE_NORM	 |
516 				AAC_FIBSTATE_ASYNC	 |
517 				AAC_FIBSTATE_FAST_RESPONSE;
518 			fib->Header.Command = ContainerCommand;
519 			rval = aacraid_wait_command(cm);
520 			aac_cnt_config_toh(ccfg);
521 			if (rval == 0 && ccfg->Command == ST_OK &&
522 				ccfg->CTCommand.param[0] == CT_OK &&
523 				mir->MntTable[0].VolType != CT_PASSTHRU)
524 				*uid = ccfg->CTCommand.param[1];
525 			aacraid_release_command(cm);
526 		}
527 	}
528 
529 	return (0);
530 }
531 
532 /*
533  * Create a device to represent a new container
534  */
535 static void
536 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
537 		  u_int32_t uid)
538 {
539 	struct aac_container *co;
540 
541 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
542 
543 	/*
544 	 * Check container volume type for validity.  Note that many of
545 	 * the possible types may never show up.
546 	 */
547 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
548 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
549 		       M_NOWAIT | M_ZERO);
550 		if (co == NULL) {
551 			panic("Out of memory?!");
552 		}
553 
554 		co->co_found = f;
555 		bcopy(&mir->MntTable[0], &co->co_mntobj,
556 		      sizeof(struct aac_mntobj));
557 		co->co_uid = uid;
558 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
559 	}
560 }
561 
562 /*
563  * Allocate resources associated with (sc)
564  */
565 static int
566 aac_alloc(struct aac_softc *sc)
567 {
568 	bus_size_t maxsize;
569 
570 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
571 
572 	/*
573 	 * Create DMA tag for mapping buffers into controller-addressable space.
574 	 */
575 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
576 			       1, 0, 			/* algnmnt, boundary */
577 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
578 			       BUS_SPACE_MAXADDR :
579 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
580 			       BUS_SPACE_MAXADDR, 	/* highaddr */
581 			       NULL, NULL, 		/* filter, filterarg */
582 			       sc->aac_max_sectors << 9, /* maxsize */
583 			       sc->aac_sg_tablesize,	/* nsegments */
584 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
585 			       BUS_DMA_ALLOCNOW,	/* flags */
586 			       busdma_lock_mutex,	/* lockfunc */
587 			       &sc->aac_io_lock,	/* lockfuncarg */
588 			       &sc->aac_buffer_dmat)) {
589 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
590 		return (ENOMEM);
591 	}
592 
593 	/*
594 	 * Create DMA tag for mapping FIBs into controller-addressable space..
595 	 */
596 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
597 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
598 			sizeof(struct aac_fib_xporthdr) + 31);
599 	else
600 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
601 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
602 			       1, 0, 			/* algnmnt, boundary */
603 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
604 			       BUS_SPACE_MAXADDR_32BIT :
605 			       0x7fffffff,		/* lowaddr */
606 			       BUS_SPACE_MAXADDR, 	/* highaddr */
607 			       NULL, NULL, 		/* filter, filterarg */
608 			       maxsize,  		/* maxsize */
609 			       1,			/* nsegments */
610 			       maxsize,			/* maxsize */
611 			       0,			/* flags */
612 			       NULL, NULL,		/* No locking needed */
613 			       &sc->aac_fib_dmat)) {
614 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
615 		return (ENOMEM);
616 	}
617 
618 	/*
619 	 * Create DMA tag for the common structure and allocate it.
620 	 */
621 	maxsize = sizeof(struct aac_common);
622 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
623 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
624 			       1, 0,			/* algnmnt, boundary */
625 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
626 			       BUS_SPACE_MAXADDR_32BIT :
627 			       0x7fffffff,		/* lowaddr */
628 			       BUS_SPACE_MAXADDR, 	/* highaddr */
629 			       NULL, NULL, 		/* filter, filterarg */
630 			       maxsize, 		/* maxsize */
631 			       1,			/* nsegments */
632 			       maxsize,			/* maxsegsize */
633 			       0,			/* flags */
634 			       NULL, NULL,		/* No locking needed */
635 			       &sc->aac_common_dmat)) {
636 		device_printf(sc->aac_dev,
637 			      "can't allocate common structure DMA tag\n");
638 		return (ENOMEM);
639 	}
640 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
641 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
642 		device_printf(sc->aac_dev, "can't allocate common structure\n");
643 		return (ENOMEM);
644 	}
645 
646 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
647 			sc->aac_common, maxsize,
648 			aac_common_map, sc, 0);
649 	bzero(sc->aac_common, maxsize);
650 
651 	/* Allocate some FIBs and associated command structs */
652 	TAILQ_INIT(&sc->aac_fibmap_tqh);
653 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
654 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
655 	mtx_lock(&sc->aac_io_lock);
656 	while (sc->total_fibs < sc->aac_max_fibs) {
657 		if (aac_alloc_commands(sc) != 0)
658 			break;
659 	}
660 	mtx_unlock(&sc->aac_io_lock);
661 	if (sc->total_fibs == 0)
662 		return (ENOMEM);
663 
664 	return (0);
665 }
666 
667 /*
668  * Free all of the resources associated with (sc)
669  *
670  * Should not be called if the controller is active.
671  */
672 void
673 aacraid_free(struct aac_softc *sc)
674 {
675 	int i;
676 
677 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
678 
679 	/* remove the control device */
680 	if (sc->aac_dev_t != NULL)
681 		destroy_dev(sc->aac_dev_t);
682 
683 	/* throw away any FIB buffers, discard the FIB DMA tag */
684 	aac_free_commands(sc);
685 	if (sc->aac_fib_dmat)
686 		bus_dma_tag_destroy(sc->aac_fib_dmat);
687 
688 	free(sc->aac_commands, M_AACRAIDBUF);
689 
690 	/* destroy the common area */
691 	if (sc->aac_common) {
692 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
693 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
694 				sc->aac_common_dmamap);
695 	}
696 	if (sc->aac_common_dmat)
697 		bus_dma_tag_destroy(sc->aac_common_dmat);
698 
699 	/* disconnect the interrupt handler */
700 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
701 		if (sc->aac_intr[i])
702 			bus_teardown_intr(sc->aac_dev,
703 				sc->aac_irq[i], sc->aac_intr[i]);
704 		if (sc->aac_irq[i])
705 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
706 				sc->aac_irq_rid[i], sc->aac_irq[i]);
707 		else
708 			break;
709 	}
710 	if (sc->msi_enabled || sc->msi_tupelo)
711 		pci_release_msi(sc->aac_dev);
712 
713 	/* destroy data-transfer DMA tag */
714 	if (sc->aac_buffer_dmat)
715 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
716 
717 	/* destroy the parent DMA tag */
718 	if (sc->aac_parent_dmat)
719 		bus_dma_tag_destroy(sc->aac_parent_dmat);
720 
721 	/* release the register window mapping */
722 	if (sc->aac_regs_res0 != NULL)
723 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
724 				     sc->aac_regs_rid0, sc->aac_regs_res0);
725 	if (sc->aac_regs_res1 != NULL)
726 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
727 				     sc->aac_regs_rid1, sc->aac_regs_res1);
728 }
729 
730 /*
731  * Disconnect from the controller completely, in preparation for unload.
732  */
733 int
734 aacraid_detach(device_t dev)
735 {
736 	struct aac_softc *sc;
737 	struct aac_container *co;
738 	struct aac_sim	*sim;
739 	int error;
740 
741 	sc = device_get_softc(dev);
742 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
743 
744 	callout_drain(&sc->aac_daemontime);
745 	/* Remove the child containers */
746 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
747 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
748 		free(co, M_AACRAIDBUF);
749 	}
750 
751 	/* Remove the CAM SIMs */
752 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
753 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
754 		error = device_delete_child(dev, sim->sim_dev);
755 		if (error)
756 			return (error);
757 		free(sim, M_AACRAIDBUF);
758 	}
759 
760 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
761 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
762 		wakeup(sc->aifthread);
763 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
764 	}
765 
766 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
767 		panic("Cannot shutdown AIF thread");
768 
769 	if ((error = aacraid_shutdown(dev)))
770 		return(error);
771 
772 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
773 
774 	aacraid_free(sc);
775 
776 	mtx_destroy(&sc->aac_io_lock);
777 
778 	return(0);
779 }
780 
781 /*
782  * Bring the controller down to a dormant state and detach all child devices.
783  *
784  * This function is called before detach or system shutdown.
785  *
786  * Note that we can assume that the bioq on the controller is empty, as we won't
787  * allow shutdown if any device is open.
788  */
789 int
790 aacraid_shutdown(device_t dev)
791 {
792 	struct aac_softc *sc;
793 	struct aac_fib *fib;
794 	struct aac_close_command *cc;
795 
796 	sc = device_get_softc(dev);
797 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
798 
799 	sc->aac_state |= AAC_STATE_SUSPEND;
800 
801 	/*
802 	 * Send a Container shutdown followed by a HostShutdown FIB to the
803 	 * controller to convince it that we don't want to talk to it anymore.
804 	 * We've been closed and all I/O completed already
805 	 */
806 	device_printf(sc->aac_dev, "shutting down controller...");
807 
808 	mtx_lock(&sc->aac_io_lock);
809 	aac_alloc_sync_fib(sc, &fib);
810 	cc = (struct aac_close_command *)&fib->data[0];
811 
812 	bzero(cc, sizeof(struct aac_close_command));
813 	cc->Command = htole32(VM_CloseAll);
814 	cc->ContainerId = htole32(0xfffffffe);
815 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
816 	    sizeof(struct aac_close_command)))
817 		printf("FAILED.\n");
818 	else
819 		printf("done\n");
820 
821 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
822 	aac_release_sync_fib(sc);
823 	mtx_unlock(&sc->aac_io_lock);
824 
825 	return(0);
826 }
827 
828 /*
829  * Bring the controller to a quiescent state, ready for system suspend.
830  */
831 int
832 aacraid_suspend(device_t dev)
833 {
834 	struct aac_softc *sc;
835 
836 	sc = device_get_softc(dev);
837 
838 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
839 	sc->aac_state |= AAC_STATE_SUSPEND;
840 
841 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
842 	return(0);
843 }
844 
845 /*
846  * Bring the controller back to a state ready for operation.
847  */
848 int
849 aacraid_resume(device_t dev)
850 {
851 	struct aac_softc *sc;
852 
853 	sc = device_get_softc(dev);
854 
855 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
856 	sc->aac_state &= ~AAC_STATE_SUSPEND;
857 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
858 	return(0);
859 }
860 
861 /*
862  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
863  */
864 void
865 aacraid_new_intr_type1(void *arg)
866 {
867 	struct aac_msix_ctx *ctx;
868 	struct aac_softc *sc;
869 	int vector_no;
870 	struct aac_command *cm;
871 	struct aac_fib *fib;
872 	u_int32_t bellbits, bellbits_shifted, index, handle;
873 	int isFastResponse, isAif, noMoreAif, mode;
874 
875 	ctx = (struct aac_msix_ctx *)arg;
876 	sc = ctx->sc;
877 	vector_no = ctx->vector_no;
878 
879 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
880 	mtx_lock(&sc->aac_io_lock);
881 
882 	if (sc->msi_enabled) {
883 		mode = AAC_INT_MODE_MSI;
884 		if (vector_no == 0) {
885 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
886 			if (bellbits & 0x40000)
887 				mode |= AAC_INT_MODE_AIF;
888 			else if (bellbits & 0x1000)
889 				mode |= AAC_INT_MODE_SYNC;
890 		}
891 	} else {
892 		mode = AAC_INT_MODE_INTX;
893 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
894 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
895 			bellbits = AAC_DB_RESPONSE_SENT_NS;
896 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
897 		} else {
898 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
899 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
900 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
901 				mode |= AAC_INT_MODE_AIF;
902 			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
903 				mode |= AAC_INT_MODE_SYNC;
904 		}
905 		/* ODR readback, Prep #238630 */
906 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
907 	}
908 
909 	if (mode & AAC_INT_MODE_SYNC) {
910 		if (sc->aac_sync_cm) {
911 			cm = sc->aac_sync_cm;
912 			aac_unmap_command(cm);
913 			cm->cm_flags |= AAC_CMD_COMPLETED;
914 			aac_fib_header_toh(&cm->cm_fib->Header);
915 
916 			/* is there a completion handler? */
917 			if (cm->cm_complete != NULL) {
918 				cm->cm_complete(cm);
919 			} else {
920 				/* assume that someone is sleeping on this command */
921 				wakeup(cm);
922 			}
923 			sc->flags &= ~AAC_QUEUE_FRZN;
924 			sc->aac_sync_cm = NULL;
925 		}
926 		mode = 0;
927 	}
928 
929 	if (mode & AAC_INT_MODE_AIF) {
930 		if (mode & AAC_INT_MODE_INTX) {
931 			aac_request_aif(sc);
932 			mode = 0;
933 		}
934 	}
935 
936 	if (mode) {
937 		/* handle async. status */
938 		index = sc->aac_host_rrq_idx[vector_no];
939 		for (;;) {
940 			isFastResponse = isAif = noMoreAif = 0;
941 			/* remove toggle bit (31) */
942 			handle = (le32toh(sc->aac_common->ac_host_rrq[index]) &
943 			    0x7fffffff);
944 			/* check fast response bit (30) */
945 			if (handle & 0x40000000)
946 				isFastResponse = 1;
947 			/* check AIF bit (23) */
948 			else if (handle & 0x00800000)
949 				isAif = TRUE;
950 			handle &= 0x0000ffff;
951 			if (handle == 0)
952 				break;
953 
954 			cm = sc->aac_commands + (handle - 1);
955 			fib = cm->cm_fib;
956 			aac_fib_header_toh(&fib->Header);
957 			sc->aac_rrq_outstanding[vector_no]--;
958 			if (isAif) {
959 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
960 				if (!noMoreAif)
961 					aac_handle_aif(sc, fib);
962 				aac_remove_busy(cm);
963 				aacraid_release_command(cm);
964 			} else {
965 				if (isFastResponse) {
966 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
967 					*((u_int32_t *)(fib->data)) = htole32(ST_OK);
968 					cm->cm_flags |= AAC_CMD_FASTRESP;
969 				}
970 				aac_remove_busy(cm);
971 				aac_unmap_command(cm);
972 				cm->cm_flags |= AAC_CMD_COMPLETED;
973 
974 				/* is there a completion handler? */
975 				if (cm->cm_complete != NULL) {
976 					cm->cm_complete(cm);
977 				} else {
978 					/* assume that someone is sleeping on this command */
979 					wakeup(cm);
980 				}
981 				sc->flags &= ~AAC_QUEUE_FRZN;
982 			}
983 
984 			sc->aac_common->ac_host_rrq[index++] = 0;
985 			if (index == (vector_no + 1) * sc->aac_vector_cap)
986 				index = vector_no * sc->aac_vector_cap;
987 			sc->aac_host_rrq_idx[vector_no] = index;
988 
989 			if ((isAif && !noMoreAif) || sc->aif_pending)
990 				aac_request_aif(sc);
991 		}
992 	}
993 
994 	if (mode & AAC_INT_MODE_AIF) {
995 		aac_request_aif(sc);
996 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
997 		mode = 0;
998 	}
999 
1000 	/* see if we can start some more I/O */
1001 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1002 		aacraid_startio(sc);
1003 	mtx_unlock(&sc->aac_io_lock);
1004 }
1005 
1006 /*
1007  * Handle notification of one or more FIBs coming from the controller.
1008  */
1009 static void
1010 aac_command_thread(struct aac_softc *sc)
1011 {
1012 	int retval;
1013 
1014 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1015 
1016 	mtx_lock(&sc->aac_io_lock);
1017 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1018 
1019 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1020 
1021 		retval = 0;
1022 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1023 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1024 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1025 
1026 		/*
1027 		 * First see if any FIBs need to be allocated.
1028 		 */
1029 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1030 			aac_alloc_commands(sc);
1031 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1032 			aacraid_startio(sc);
1033 		}
1034 
1035 		/*
1036 		 * While we're here, check to see if any commands are stuck.
1037 		 * This is pretty low-priority, so it's ok if it doesn't
1038 		 * always fire.
1039 		 */
1040 		if (retval == EWOULDBLOCK)
1041 			aac_timeout(sc);
1042 
1043 		/* Check the hardware printf message buffer */
1044 		if (sc->aac_common->ac_printf[0] != 0)
1045 			aac_print_printf(sc);
1046 	}
1047 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1048 	mtx_unlock(&sc->aac_io_lock);
1049 	wakeup(sc->aac_dev);
1050 
1051 	aac_kthread_exit(0);
1052 }
1053 
1054 /*
1055  * Submit a command to the controller, return when it completes.
1056  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1057  *     be stuck here forever.  At the same time, signals are not caught
1058  *     because there is a risk that a signal could wakeup the sleep before
1059  *     the card has a chance to complete the command.  Since there is no way
1060  *     to cancel a command that is in progress, we can't protect against the
1061  *     card completing a command late and spamming the command and data
1062  *     memory.  So, we are held hostage until the command completes.
1063  */
1064 int
1065 aacraid_wait_command(struct aac_command *cm)
1066 {
1067 	struct aac_softc *sc;
1068 	int error;
1069 
1070 	sc = cm->cm_sc;
1071 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1072 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1073 
1074 	/* Put the command on the ready queue and get things going */
1075 	aac_enqueue_ready(cm);
1076 	aacraid_startio(sc);
1077 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1078 	return(error);
1079 }
1080 
1081 /*
1082  *Command Buffer Management
1083  */
1084 
1085 /*
1086  * Allocate a command.
1087  */
1088 int
1089 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1090 {
1091 	struct aac_command *cm;
1092 
1093 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1094 
1095 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1096 		if (sc->total_fibs < sc->aac_max_fibs) {
1097 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1098 			wakeup(sc->aifthread);
1099 		}
1100 		return (EBUSY);
1101 	}
1102 
1103 	*cmp = cm;
1104 	return(0);
1105 }
1106 
1107 /*
1108  * Release a command back to the freelist.
1109  */
1110 void
1111 aacraid_release_command(struct aac_command *cm)
1112 {
1113 	struct aac_event *event;
1114 	struct aac_softc *sc;
1115 
1116 	sc = cm->cm_sc;
1117 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1118 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1119 
1120 	/* (re)initialize the command/FIB */
1121 	cm->cm_sgtable = NULL;
1122 	cm->cm_flags = 0;
1123 	cm->cm_complete = NULL;
1124 	cm->cm_ccb = NULL;
1125 	cm->cm_passthr_dmat = 0;
1126 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1127 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1128 	cm->cm_fib->Header.Unused = 0;
1129 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1130 
1131 	/*
1132 	 * These are duplicated in aac_start to cover the case where an
1133 	 * intermediate stage may have destroyed them.  They're left
1134 	 * initialized here for debugging purposes only.
1135 	 */
1136 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1137 	cm->cm_fib->Header.Handle = 0;
1138 
1139 	aac_enqueue_free(cm);
1140 
1141 	/*
1142 	 * Dequeue all events so that there's no risk of events getting
1143 	 * stranded.
1144 	 */
1145 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1146 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1147 		event->ev_callback(sc, event, event->ev_arg);
1148 	}
1149 }
1150 
1151 /*
1152  * Map helper for command/FIB allocation.
1153  */
1154 static void
1155 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1156 {
1157 	uint64_t	*fibphys;
1158 
1159 	fibphys = (uint64_t *)arg;
1160 
1161 	*fibphys = segs[0].ds_addr;
1162 }
1163 
1164 /*
1165  * Allocate and initialize commands/FIBs for this adapter.
1166  */
1167 static int
1168 aac_alloc_commands(struct aac_softc *sc)
1169 {
1170 	struct aac_command *cm;
1171 	struct aac_fibmap *fm;
1172 	uint64_t fibphys;
1173 	int i, error;
1174 	u_int32_t maxsize;
1175 
1176 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1177 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1178 
1179 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1180 		return (ENOMEM);
1181 
1182 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1183 	if (fm == NULL)
1184 		return (ENOMEM);
1185 
1186 	mtx_unlock(&sc->aac_io_lock);
1187 	/* allocate the FIBs in DMAable memory and load them */
1188 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1189 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1190 		device_printf(sc->aac_dev,
1191 			      "Not enough contiguous memory available.\n");
1192 		free(fm, M_AACRAIDBUF);
1193 		mtx_lock(&sc->aac_io_lock);
1194 		return (ENOMEM);
1195 	}
1196 
1197 	maxsize = sc->aac_max_fib_size + 31;
1198 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1199 		maxsize += sizeof(struct aac_fib_xporthdr);
1200 	/* Ignore errors since this doesn't bounce */
1201 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1202 			      sc->aac_max_fibs_alloc * maxsize,
1203 			      aac_map_command_helper, &fibphys, 0);
1204 	mtx_lock(&sc->aac_io_lock);
1205 
1206 	/* initialize constant fields in the command structure */
1207 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1208 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1209 		cm = sc->aac_commands + sc->total_fibs;
1210 		fm->aac_commands = cm;
1211 		cm->cm_sc = sc;
1212 		cm->cm_fib = (struct aac_fib *)
1213 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1214 		cm->cm_fibphys = fibphys + i * maxsize;
1215 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1216 			u_int64_t fibphys_aligned;
1217 			fibphys_aligned =
1218 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1219 			cm->cm_fib = (struct aac_fib *)
1220 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1221 			cm->cm_fibphys = fibphys_aligned;
1222 		} else {
1223 			u_int64_t fibphys_aligned;
1224 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1225 			cm->cm_fib = (struct aac_fib *)
1226 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1227 			cm->cm_fibphys = fibphys_aligned;
1228 		}
1229 		cm->cm_index = sc->total_fibs;
1230 
1231 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1232 					       &cm->cm_datamap)) != 0)
1233 			break;
1234 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1235 			aacraid_release_command(cm);
1236 		sc->total_fibs++;
1237 	}
1238 
1239 	if (i > 0) {
1240 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1241 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1242 		return (0);
1243 	}
1244 
1245 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1246 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1247 	free(fm, M_AACRAIDBUF);
1248 	return (ENOMEM);
1249 }
1250 
1251 /*
1252  * Free FIBs owned by this adapter.
1253  */
1254 static void
1255 aac_free_commands(struct aac_softc *sc)
1256 {
1257 	struct aac_fibmap *fm;
1258 	struct aac_command *cm;
1259 	int i;
1260 
1261 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1262 
1263 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1264 
1265 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1266 		/*
1267 		 * We check against total_fibs to handle partially
1268 		 * allocated blocks.
1269 		 */
1270 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1271 			cm = fm->aac_commands + i;
1272 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1273 		}
1274 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1275 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1276 		free(fm, M_AACRAIDBUF);
1277 	}
1278 }
1279 
1280 /*
1281  * Command-mapping helper function - populate this command's s/g table.
1282  */
1283 void
1284 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1285 {
1286 	struct aac_softc *sc;
1287 	struct aac_command *cm;
1288 	struct aac_fib *fib;
1289 	int i;
1290 
1291 	cm = (struct aac_command *)arg;
1292 	sc = cm->cm_sc;
1293 	fib = cm->cm_fib;
1294 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1295 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1296 
1297 	if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1298 		return;
1299 
1300 	/* copy into the FIB */
1301 	if (cm->cm_sgtable != NULL) {
1302 		if (fib->Header.Command == RawIo2) {
1303 			struct aac_raw_io2 *raw;
1304 			struct aac_sge_ieee1212 *sg;
1305 			u_int32_t min_size = PAGE_SIZE, cur_size;
1306 			int conformable = TRUE;
1307 
1308 			raw = (struct aac_raw_io2 *)&fib->data[0];
1309 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1310 			raw->sgeCnt = nseg;
1311 
1312 			for (i = 0; i < nseg; i++) {
1313 				cur_size = segs[i].ds_len;
1314 				sg[i].addrHigh = 0;
1315 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1316 				sg[i].length = cur_size;
1317 				sg[i].flags = 0;
1318 				if (i == 0) {
1319 					raw->sgeFirstSize = cur_size;
1320 				} else if (i == 1) {
1321 					raw->sgeNominalSize = cur_size;
1322 					min_size = cur_size;
1323 				} else if ((i+1) < nseg &&
1324 					cur_size != raw->sgeNominalSize) {
1325 					conformable = FALSE;
1326 					if (cur_size < min_size)
1327 						min_size = cur_size;
1328 				}
1329 			}
1330 
1331 			/* not conformable: evaluate required sg elements */
1332 			if (!conformable) {
1333 				int j, err_found, nseg_new = nseg;
1334 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1335 					err_found = FALSE;
1336 					nseg_new = 2;
1337 					for (j = 1; j < nseg - 1; ++j) {
1338 						if (sg[j].length % (i*PAGE_SIZE)) {
1339 							err_found = TRUE;
1340 							break;
1341 						}
1342 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1343 					}
1344 					if (!err_found)
1345 						break;
1346 				}
1347 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1348 					!(sc->hint_flags & 4))
1349 					nseg = aac_convert_sgraw2(sc,
1350 						raw, i, nseg, nseg_new);
1351 			} else {
1352 				raw->flags |= RIO2_SGL_CONFORMANT;
1353 			}
1354 
1355 			for (i = 0; i < nseg; i++)
1356 				aac_sge_ieee1212_tole(sg + i);
1357 			aac_raw_io2_tole(raw);
1358 
1359 			/* update the FIB size for the s/g count */
1360 			fib->Header.Size += nseg *
1361 				sizeof(struct aac_sge_ieee1212);
1362 
1363 		} else if (fib->Header.Command == RawIo) {
1364 			struct aac_sg_tableraw *sg;
1365 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1366 			sg->SgCount = htole32(nseg);
1367 			for (i = 0; i < nseg; i++) {
1368 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1369 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1370 				sg->SgEntryRaw[i].Next = 0;
1371 				sg->SgEntryRaw[i].Prev = 0;
1372 				sg->SgEntryRaw[i].Flags = 0;
1373 				aac_sg_entryraw_tole(&sg->SgEntryRaw[i]);
1374 			}
1375 			aac_raw_io_tole((struct aac_raw_io *)&fib->data[0]);
1376 			/* update the FIB size for the s/g count */
1377 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1378 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1379 			struct aac_sg_table *sg;
1380 			sg = cm->cm_sgtable;
1381 			sg->SgCount = htole32(nseg);
1382 			for (i = 0; i < nseg; i++) {
1383 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1384 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1385 				aac_sg_entry_tole(&sg->SgEntry[i]);
1386 			}
1387 			/* update the FIB size for the s/g count */
1388 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1389 		} else {
1390 			struct aac_sg_table64 *sg;
1391 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1392 			sg->SgCount = htole32(nseg);
1393 			for (i = 0; i < nseg; i++) {
1394 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1395 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1396 				aac_sg_entry64_tole(&sg->SgEntry64[i]);
1397 			}
1398 			/* update the FIB size for the s/g count */
1399 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1400 		}
1401 	}
1402 
1403 	/* Fix up the address values in the FIB.  Use the command array index
1404 	 * instead of a pointer since these fields are only 32 bits.  Shift
1405 	 * the SenderFibAddress over to make room for the fast response bit
1406 	 * and for the AIF bit
1407 	 */
1408 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1409 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1410 
1411 	/* save a pointer to the command for speedy reverse-lookup */
1412 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1413 
1414 	if (cm->cm_passthr_dmat == 0) {
1415 		if (cm->cm_flags & AAC_CMD_DATAIN)
1416 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1417 							BUS_DMASYNC_PREREAD);
1418 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1419 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1420 							BUS_DMASYNC_PREWRITE);
1421 	}
1422 
1423 	cm->cm_flags |= AAC_CMD_MAPPED;
1424 
1425 	if (cm->cm_flags & AAC_CMD_WAIT) {
1426 		aac_fib_header_tole(&fib->Header);
1427 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1428 			cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1429 	} else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1430 		u_int32_t wait = 0;
1431 		sc->aac_sync_cm = cm;
1432 		aac_fib_header_tole(&fib->Header);
1433 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1434 			cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1435 	} else {
1436 		int count = 10000000L;
1437 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1438 			if (--count == 0) {
1439 				aac_unmap_command(cm);
1440 				sc->flags |= AAC_QUEUE_FRZN;
1441 				aac_requeue_ready(cm);
1442 			}
1443 			DELAY(5);			/* wait 5 usec. */
1444 		}
1445 	}
1446 }
1447 
1448 
1449 static int
1450 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1451 				   int pages, int nseg, int nseg_new)
1452 {
1453 	struct aac_sge_ieee1212 *sge;
1454 	int i, j, pos;
1455 	u_int32_t addr_low;
1456 
1457 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1458 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1459 	if (sge == NULL)
1460 		return nseg;
1461 
1462 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1463 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1464 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1465 			sge[pos].addrLow = addr_low;
1466 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1467 			if (addr_low < raw->sge[i].addrLow)
1468 				sge[pos].addrHigh++;
1469 			sge[pos].length = pages * PAGE_SIZE;
1470 			sge[pos].flags = 0;
1471 			pos++;
1472 		}
1473 	}
1474 	sge[pos] = raw->sge[nseg-1];
1475 	for (i = 1; i < nseg_new; ++i)
1476 		raw->sge[i] = sge[i];
1477 
1478 	free(sge, M_AACRAIDBUF);
1479 	raw->sgeCnt = nseg_new;
1480 	raw->flags |= RIO2_SGL_CONFORMANT;
1481 	raw->sgeNominalSize = pages * PAGE_SIZE;
1482 	return nseg_new;
1483 }
1484 
1485 
1486 /*
1487  * Unmap a command from controller-visible space.
1488  */
1489 static void
1490 aac_unmap_command(struct aac_command *cm)
1491 {
1492 	struct aac_softc *sc;
1493 
1494 	sc = cm->cm_sc;
1495 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1496 
1497 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1498 		return;
1499 
1500 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1501 		if (cm->cm_flags & AAC_CMD_DATAIN)
1502 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1503 					BUS_DMASYNC_POSTREAD);
1504 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1505 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1506 					BUS_DMASYNC_POSTWRITE);
1507 
1508 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1509 	}
1510 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1511 }
1512 
1513 /*
1514  * Hardware Interface
1515  */
1516 
1517 /*
1518  * Initialize the adapter.
1519  */
1520 static void
1521 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1522 {
1523 	struct aac_softc *sc;
1524 
1525 	sc = (struct aac_softc *)arg;
1526 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1527 
1528 	sc->aac_common_busaddr = segs[0].ds_addr;
1529 }
1530 
1531 static int
1532 aac_check_firmware(struct aac_softc *sc)
1533 {
1534 	u_int32_t code, major, minor, maxsize;
1535 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1536 	time_t then;
1537 
1538 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1539 
1540 	/* check if flash update is running */
1541 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1542 		then = time_uptime;
1543 		do {
1544 			code = AAC_GET_FWSTATUS(sc);
1545 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1546 				device_printf(sc->aac_dev,
1547 						  "FATAL: controller not coming ready, "
1548 						   "status %x\n", code);
1549 				return(ENXIO);
1550 			}
1551 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1552 		/*
1553 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1554 		 * do not read scratch pad register at this time
1555 		 */
1556 		waitCount = 10 * 10000;
1557 		while (waitCount) {
1558 			DELAY(100);		/* delay 100 microseconds */
1559 			waitCount--;
1560 		}
1561 	}
1562 
1563 	/*
1564 	 * Wait for the adapter to come ready.
1565 	 */
1566 	then = time_uptime;
1567 	do {
1568 		code = AAC_GET_FWSTATUS(sc);
1569 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1570 			device_printf(sc->aac_dev,
1571 				      "FATAL: controller not coming ready, "
1572 					   "status %x\n", code);
1573 			return(ENXIO);
1574 		}
1575 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1576 
1577 	/*
1578 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1579 	 * firmware version 1.x are not compatible with this driver.
1580 	 */
1581 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1582 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1583 				     NULL, NULL)) {
1584 			device_printf(sc->aac_dev,
1585 				      "Error reading firmware version\n");
1586 			return (EIO);
1587 		}
1588 
1589 		/* These numbers are stored as ASCII! */
1590 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1591 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1592 		if (major == 1) {
1593 			device_printf(sc->aac_dev,
1594 			    "Firmware version %d.%d is not supported.\n",
1595 			    major, minor);
1596 			return (EINVAL);
1597 		}
1598 	}
1599 	/*
1600 	 * Retrieve the capabilities/supported options word so we know what
1601 	 * work-arounds to enable.  Some firmware revs don't support this
1602 	 * command.
1603 	 */
1604 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1605 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1606 			device_printf(sc->aac_dev,
1607 			     "RequestAdapterInfo failed\n");
1608 			return (EIO);
1609 		}
1610 	} else {
1611 		options = AAC_GET_MAILBOX(sc, 1);
1612 		atu_size = AAC_GET_MAILBOX(sc, 2);
1613 		sc->supported_options = options;
1614 		sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1615 
1616 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1617 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1618 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1619 		if (options & AAC_SUPPORTED_NONDASD)
1620 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1621 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1622 			&& (sizeof(bus_addr_t) > 4)
1623 			&& (sc->hint_flags & 0x1)) {
1624 			device_printf(sc->aac_dev,
1625 			    "Enabling 64-bit address support\n");
1626 			sc->flags |= AAC_FLAGS_SG_64BIT;
1627 		}
1628 		if (sc->aac_if.aif_send_command) {
1629 			if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1630 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1631 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1632 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1633 			else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1634 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1635 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1636 		}
1637 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1638 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1639 	}
1640 
1641 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1642 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1643 		return (ENXIO);
1644 	}
1645 
1646 	if (sc->hint_flags & 2) {
1647 		device_printf(sc->aac_dev,
1648 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1649 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1650 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1651 		device_printf(sc->aac_dev,
1652 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1653 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1654 	}
1655 
1656 	/* Check for broken hardware that does a lower number of commands */
1657 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1658 
1659 	/* Remap mem. resource, if required */
1660 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1661 		bus_release_resource(
1662 			sc->aac_dev, SYS_RES_MEMORY,
1663 			sc->aac_regs_rid0, sc->aac_regs_res0);
1664 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1665 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1666 			atu_size, RF_ACTIVE);
1667 		if (sc->aac_regs_res0 == NULL) {
1668 			sc->aac_regs_res0 = bus_alloc_resource_any(
1669 				sc->aac_dev, SYS_RES_MEMORY,
1670 				&sc->aac_regs_rid0, RF_ACTIVE);
1671 			if (sc->aac_regs_res0 == NULL) {
1672 				device_printf(sc->aac_dev,
1673 					"couldn't allocate register window\n");
1674 				return (ENXIO);
1675 			}
1676 		}
1677 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1678 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1679 	}
1680 
1681 	/* Read preferred settings */
1682 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1683 	sc->aac_max_sectors = 128;				/* 64KB */
1684 	sc->aac_max_aif = 1;
1685 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1686 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1687 		 - sizeof(struct aac_blockwrite64))
1688 		 / sizeof(struct aac_sg_entry64);
1689 	else
1690 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1691 		 - sizeof(struct aac_blockwrite))
1692 		 / sizeof(struct aac_sg_entry);
1693 
1694 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1695 		options = AAC_GET_MAILBOX(sc, 1);
1696 		sc->aac_max_fib_size = (options & 0xFFFF);
1697 		sc->aac_max_sectors = (options >> 16) << 1;
1698 		options = AAC_GET_MAILBOX(sc, 2);
1699 		sc->aac_sg_tablesize = (options >> 16);
1700 		options = AAC_GET_MAILBOX(sc, 3);
1701 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1702 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1703 			sc->aac_max_fibs = (options & 0xFFFF);
1704 		options = AAC_GET_MAILBOX(sc, 4);
1705 		sc->aac_max_aif = (options & 0xFFFF);
1706 		options = AAC_GET_MAILBOX(sc, 5);
1707 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1708 	}
1709 
1710 	maxsize = sc->aac_max_fib_size + 31;
1711 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1712 		maxsize += sizeof(struct aac_fib_xporthdr);
1713 	if (maxsize > PAGE_SIZE) {
1714     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1715 		maxsize = PAGE_SIZE;
1716 	}
1717 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1718 
1719 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1720 		sc->flags |= AAC_FLAGS_RAW_IO;
1721 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1722 	}
1723 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1724 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1725 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1726 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1727 	}
1728 
1729 #ifdef AACRAID_DEBUG
1730 	aacraid_get_fw_debug_buffer(sc);
1731 #endif
1732 	return (0);
1733 }
1734 
1735 static int
1736 aac_init(struct aac_softc *sc)
1737 {
1738 	struct aac_adapter_init	*ip;
1739 	int i, error;
1740 
1741 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1742 
1743 	/* reset rrq index */
1744 	sc->aac_fibs_pushed_no = 0;
1745 	for (i = 0; i < sc->aac_max_msix; i++)
1746 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1747 
1748 	/*
1749 	 * Fill in the init structure.  This tells the adapter about the
1750 	 * physical location of various important shared data structures.
1751 	 */
1752 	ip = &sc->aac_common->ac_init;
1753 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1754 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1755 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1756 		sc->flags |= AAC_FLAGS_RAW_IO;
1757 	}
1758 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1759 
1760 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1761 					 offsetof(struct aac_common, ac_fibs);
1762 	ip->AdapterFibsVirtualAddress = 0;
1763 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1764 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1765 
1766 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1767 				  offsetof(struct aac_common, ac_printf);
1768 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1769 
1770 	/*
1771 	 * The adapter assumes that pages are 4K in size, except on some
1772  	 * broken firmware versions that do the page->byte conversion twice,
1773 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1774 	 * Round up since the granularity is so high.
1775 	 */
1776 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1777 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1778 		ip->HostPhysMemPages =
1779 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1780 	}
1781 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1782 
1783 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1784 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1785 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1786 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1787 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1788 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1789 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1790 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1791 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1792 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1793 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1794 	}
1795 	ip->MaxNumAif = sc->aac_max_aif;
1796 	ip->HostRRQ_AddrLow =
1797 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1798 	/* always 32-bit address */
1799 	ip->HostRRQ_AddrHigh = 0;
1800 
1801 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1802 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1803 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1804 		device_printf(sc->aac_dev, "Power Management enabled\n");
1805 	}
1806 
1807 	ip->MaxIoCommands = sc->aac_max_fibs;
1808 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1809 	ip->MaxFibSize = sc->aac_max_fib_size;
1810 
1811 	aac_adapter_init_tole(ip);
1812 
1813 	/*
1814 	 * Do controller-type-specific initialisation
1815 	 */
1816 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1817 
1818 	/*
1819 	 * Give the init structure to the controller.
1820 	 */
1821 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1822 			     sc->aac_common_busaddr +
1823 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1824 			     NULL, NULL)) {
1825 		device_printf(sc->aac_dev,
1826 			      "error establishing init structure\n");
1827 		error = EIO;
1828 		goto out;
1829 	}
1830 
1831 	/*
1832 	 * Check configuration issues
1833 	 */
1834 	if ((error = aac_check_config(sc)) != 0)
1835 		goto out;
1836 
1837 	error = 0;
1838 out:
1839 	return(error);
1840 }
1841 
1842 static void
1843 aac_define_int_mode(struct aac_softc *sc)
1844 {
1845 	device_t dev;
1846 	int cap, msi_count, error = 0;
1847 	uint32_t val;
1848 
1849 	dev = sc->aac_dev;
1850 
1851 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1852 		device_printf(dev, "using line interrupts\n");
1853 		sc->aac_max_msix = 1;
1854 		sc->aac_vector_cap = sc->aac_max_fibs;
1855 		return;
1856 	}
1857 
1858 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1859 	if (sc->aac_max_msix == 0) {
1860 		if (sc->aac_hwif == AAC_HWIF_SRC) {
1861 			msi_count = 1;
1862 			if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1863 				device_printf(dev, "alloc msi failed - err=%d; "
1864 				    "will use INTx\n", error);
1865 				pci_release_msi(dev);
1866 			} else {
1867 				sc->msi_tupelo = TRUE;
1868 			}
1869 		}
1870 		if (sc->msi_tupelo)
1871 			device_printf(dev, "using MSI interrupts\n");
1872 		else
1873 			device_printf(dev, "using line interrupts\n");
1874 
1875 		sc->aac_max_msix = 1;
1876 		sc->aac_vector_cap = sc->aac_max_fibs;
1877 		return;
1878 	}
1879 
1880 	/* OS capability */
1881 	msi_count = pci_msix_count(dev);
1882 	if (msi_count > AAC_MAX_MSIX)
1883 		msi_count = AAC_MAX_MSIX;
1884 	if (msi_count > sc->aac_max_msix)
1885 		msi_count = sc->aac_max_msix;
1886 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1887 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1888 				   "will try MSI\n", msi_count, error);
1889 		pci_release_msi(dev);
1890 	} else {
1891 		sc->msi_enabled = TRUE;
1892 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1893 			msi_count);
1894 	}
1895 
1896 	if (!sc->msi_enabled) {
1897 		msi_count = 1;
1898 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1899 			device_printf(dev, "alloc msi failed - err=%d; "
1900 				           "will use INTx\n", error);
1901 			pci_release_msi(dev);
1902 		} else {
1903 			sc->msi_enabled = TRUE;
1904 			device_printf(dev, "using MSI interrupts\n");
1905 		}
1906 	}
1907 
1908 	if (sc->msi_enabled) {
1909 		/* now read controller capability from PCI config. space */
1910 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1911 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1912 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1913 			pci_release_msi(dev);
1914 			sc->msi_enabled = FALSE;
1915 		}
1916 	}
1917 
1918 	if (!sc->msi_enabled) {
1919 		device_printf(dev, "using legacy interrupts\n");
1920 		sc->aac_max_msix = 1;
1921 	} else {
1922 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1923 		if (sc->aac_max_msix > msi_count)
1924 			sc->aac_max_msix = msi_count;
1925 	}
1926 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1927 
1928 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1929 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1930 }
1931 
1932 static int
1933 aac_find_pci_capability(struct aac_softc *sc, int cap)
1934 {
1935 	device_t dev;
1936 	uint32_t status;
1937 	uint8_t ptr;
1938 
1939 	dev = sc->aac_dev;
1940 
1941 	status = pci_read_config(dev, PCIR_STATUS, 2);
1942 	if (!(status & PCIM_STATUS_CAPPRESENT))
1943 		return (0);
1944 
1945 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1946 	switch (status & PCIM_HDRTYPE) {
1947 	case 0:
1948 	case 1:
1949 		ptr = PCIR_CAP_PTR;
1950 		break;
1951 	case 2:
1952 		ptr = PCIR_CAP_PTR_2;
1953 		break;
1954 	default:
1955 		return (0);
1956 		break;
1957 	}
1958 	ptr = pci_read_config(dev, ptr, 1);
1959 
1960 	while (ptr != 0) {
1961 		int next, val;
1962 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1963 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1964 		if (val == cap)
1965 			return (ptr);
1966 		ptr = next;
1967 	}
1968 
1969 	return (0);
1970 }
1971 
1972 static int
1973 aac_setup_intr(struct aac_softc *sc)
1974 {
1975 	int i, msi_count, rid;
1976 	struct resource *res;
1977 	void *tag;
1978 
1979 	msi_count = sc->aac_max_msix;
1980 	rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1981 
1982 	for (i = 0; i < msi_count; i++, rid++) {
1983 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1984 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1985 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1986 			return (EINVAL);
1987 		}
1988 		sc->aac_irq_rid[i] = rid;
1989 		sc->aac_irq[i] = res;
1990 		if (aac_bus_setup_intr(sc->aac_dev, res,
1991 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1992 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1993 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1994 			return (EINVAL);
1995 		}
1996 		sc->aac_msix[i].vector_no = i;
1997 		sc->aac_msix[i].sc = sc;
1998 		sc->aac_intr[i] = tag;
1999 	}
2000 
2001 	return (0);
2002 }
2003 
2004 static int
2005 aac_check_config(struct aac_softc *sc)
2006 {
2007 	struct aac_fib *fib;
2008 	struct aac_cnt_config *ccfg;
2009 	struct aac_cf_status_hdr *cf_shdr;
2010 	int rval;
2011 
2012 	mtx_lock(&sc->aac_io_lock);
2013 	aac_alloc_sync_fib(sc, &fib);
2014 
2015 	ccfg = (struct aac_cnt_config *)&fib->data[0];
2016 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2017 	ccfg->Command = VM_ContainerConfig;
2018 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2019 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2020 
2021 	aac_cnt_config_tole(ccfg);
2022 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2023 		sizeof (struct aac_cnt_config));
2024 	aac_cnt_config_toh(ccfg);
2025 
2026 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2027 	if (rval == 0 && ccfg->Command == ST_OK &&
2028 		ccfg->CTCommand.param[0] == CT_OK) {
2029 		if (le32toh(cf_shdr->action) <= CFACT_PAUSE) {
2030 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2031 			ccfg->Command = VM_ContainerConfig;
2032 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2033 
2034 			aac_cnt_config_tole(ccfg);
2035 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2036 				sizeof (struct aac_cnt_config));
2037 			aac_cnt_config_toh(ccfg);
2038 
2039 			if (rval == 0 && ccfg->Command == ST_OK &&
2040 				ccfg->CTCommand.param[0] == CT_OK) {
2041 				/* successful completion */
2042 				rval = 0;
2043 			} else {
2044 				/* auto commit aborted due to error(s) */
2045 				rval = -2;
2046 			}
2047 		} else {
2048 			/* auto commit aborted due to adapter indicating
2049 			   config. issues too dangerous to auto commit  */
2050 			rval = -3;
2051 		}
2052 	} else {
2053 		/* error */
2054 		rval = -1;
2055 	}
2056 
2057 	aac_release_sync_fib(sc);
2058 	mtx_unlock(&sc->aac_io_lock);
2059 	return(rval);
2060 }
2061 
2062 /*
2063  * Send a synchronous command to the controller and wait for a result.
2064  * Indicate if the controller completed the command with an error status.
2065  */
2066 int
2067 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2068 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2069 		 u_int32_t *sp, u_int32_t *r1)
2070 {
2071 	time_t then;
2072 	u_int32_t status;
2073 
2074 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2075 
2076 	/* populate the mailbox */
2077 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2078 
2079 	/* ensure the sync command doorbell flag is cleared */
2080 	if (!sc->msi_enabled)
2081 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2082 
2083 	/* then set it to signal the adapter */
2084 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2085 
2086 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2087 		/* spin waiting for the command to complete */
2088 		then = time_uptime;
2089 		do {
2090 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2091 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2092 				return(EIO);
2093 			}
2094 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2095 
2096 		/* clear the completion flag */
2097 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2098 
2099 		/* get the command status */
2100 		status = AAC_GET_MAILBOX(sc, 0);
2101 		if (sp != NULL)
2102 			*sp = status;
2103 
2104 		/* return parameter */
2105 		if (r1 != NULL)
2106 			*r1 = AAC_GET_MAILBOX(sc, 1);
2107 
2108 		if (status != AAC_SRB_STS_SUCCESS)
2109 			return (-1);
2110 	}
2111 	return(0);
2112 }
2113 
2114 static int
2115 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2116 		 struct aac_fib *fib, u_int16_t datasize)
2117 {
2118 	uint32_t ReceiverFibAddress;
2119 
2120 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2121 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2122 
2123 	if (datasize > AAC_FIB_DATASIZE)
2124 		return(EINVAL);
2125 
2126 	/*
2127 	 * Set up the sync FIB
2128 	 */
2129 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2130 				AAC_FIBSTATE_INITIALISED |
2131 				AAC_FIBSTATE_EMPTY;
2132 	fib->Header.XferState |= xferstate;
2133 	fib->Header.Command = command;
2134 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2135 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2136 	fib->Header.SenderSize = sizeof(struct aac_fib);
2137 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2138 	ReceiverFibAddress = sc->aac_common_busaddr +
2139 		offsetof(struct aac_common, ac_sync_fib);
2140 	fib->Header.u.ReceiverFibAddress = ReceiverFibAddress;
2141 	aac_fib_header_tole(&fib->Header);
2142 
2143 	/*
2144 	 * Give the FIB to the controller, wait for a response.
2145 	 */
2146 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2147 		ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2148 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2149 		aac_fib_header_toh(&fib->Header);
2150 		return(EIO);
2151 	}
2152 
2153 	aac_fib_header_toh(&fib->Header);
2154 	return (0);
2155 }
2156 
2157 /*
2158  * Check for commands that have been outstanding for a suspiciously long time,
2159  * and complain about them.
2160  */
2161 static void
2162 aac_timeout(struct aac_softc *sc)
2163 {
2164 	struct aac_command *cm;
2165 	time_t deadline;
2166 	int timedout;
2167 
2168 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2169 	/*
2170 	 * Traverse the busy command list, bitch about late commands once
2171 	 * only.
2172 	 */
2173 	timedout = 0;
2174 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2175 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2176 		if (cm->cm_timestamp < deadline) {
2177 			device_printf(sc->aac_dev,
2178 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2179 				      cm, (int)(time_uptime-cm->cm_timestamp));
2180 			AAC_PRINT_FIB(sc, cm->cm_fib);
2181 			timedout++;
2182 		}
2183 	}
2184 
2185 	if (timedout)
2186 		aac_reset_adapter(sc);
2187 	aacraid_print_queues(sc);
2188 }
2189 
2190 /*
2191  * Interface Function Vectors
2192  */
2193 
2194 /*
2195  * Read the current firmware status word.
2196  */
2197 static int
2198 aac_src_get_fwstatus(struct aac_softc *sc)
2199 {
2200 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2201 
2202 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2203 }
2204 
2205 /*
2206  * Notify the controller of a change in a given queue
2207  */
2208 static void
2209 aac_src_qnotify(struct aac_softc *sc, int qbit)
2210 {
2211 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2212 
2213 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2214 }
2215 
2216 /*
2217  * Get the interrupt reason bits
2218  */
2219 static int
2220 aac_src_get_istatus(struct aac_softc *sc)
2221 {
2222 	int val;
2223 
2224 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2225 
2226 	if (sc->msi_enabled) {
2227 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2228 		if (val & AAC_MSI_SYNC_STATUS)
2229 			val = AAC_DB_SYNC_COMMAND;
2230 		else
2231 			val = 0;
2232 	} else {
2233 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2234 	}
2235 	return(val);
2236 }
2237 
2238 /*
2239  * Clear some interrupt reason bits
2240  */
2241 static void
2242 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2243 {
2244 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2245 
2246 	if (sc->msi_enabled) {
2247 		if (mask == AAC_DB_SYNC_COMMAND)
2248 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2249 	} else {
2250 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2251 	}
2252 }
2253 
2254 /*
2255  * Populate the mailbox and set the command word
2256  */
2257 static void
2258 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2259 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2260 {
2261 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2262 
2263 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2264 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2265 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2266 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2267 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2268 }
2269 
2270 static void
2271 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2272 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2273 {
2274 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2275 
2276 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2277 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2278 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2279 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2280 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2281 }
2282 
2283 /*
2284  * Fetch the immediate command status word
2285  */
2286 static int
2287 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2288 {
2289 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2290 
2291 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2292 }
2293 
2294 static int
2295 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2296 {
2297 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2298 
2299 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2300 }
2301 
2302 /*
2303  * Set/clear interrupt masks
2304  */
2305 static void
2306 aac_src_access_devreg(struct aac_softc *sc, int mode)
2307 {
2308 	u_int32_t val;
2309 
2310 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2311 
2312 	switch (mode) {
2313 	case AAC_ENABLE_INTERRUPT:
2314 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2315 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2316 				           AAC_INT_ENABLE_TYPE1_INTX));
2317 		break;
2318 
2319 	case AAC_DISABLE_INTERRUPT:
2320 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2321 		break;
2322 
2323 	case AAC_ENABLE_MSIX:
2324 		/* set bit 6 */
2325 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2326 		val |= 0x40;
2327 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2328 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2329 		/* unmask int. */
2330 		val = PMC_ALL_INTERRUPT_BITS;
2331 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2332 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2333 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2334 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2335 		break;
2336 
2337 	case AAC_DISABLE_MSIX:
2338 		/* reset bit 6 */
2339 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2340 		val &= ~0x40;
2341 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2342 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2343 		break;
2344 
2345 	case AAC_CLEAR_AIF_BIT:
2346 		/* set bit 5 */
2347 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2348 		val |= 0x20;
2349 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2350 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2351 		break;
2352 
2353 	case AAC_CLEAR_SYNC_BIT:
2354 		/* set bit 4 */
2355 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2356 		val |= 0x10;
2357 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2358 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2359 		break;
2360 
2361 	case AAC_ENABLE_INTX:
2362 		/* set bit 7 */
2363 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2364 		val |= 0x80;
2365 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2366 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2367 		/* unmask int. */
2368 		val = PMC_ALL_INTERRUPT_BITS;
2369 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2370 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2371 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2372 			val & (~(PMC_GLOBAL_INT_BIT2)));
2373 		break;
2374 
2375 	default:
2376 		break;
2377 	}
2378 }
2379 
2380 /*
2381  * New comm. interface: Send command functions
2382  */
2383 static int
2384 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2385 {
2386 	struct aac_fib_xporthdr *pFibX;
2387 	u_int32_t fibsize, high_addr;
2388 	u_int64_t address;
2389 
2390 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2391 
2392 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2393 		sc->aac_max_msix > 1) {
2394 		u_int16_t vector_no, first_choice = 0xffff;
2395 
2396 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2397 		do {
2398 			vector_no += 1;
2399 			if (vector_no == sc->aac_max_msix)
2400 				vector_no = 1;
2401 			if (sc->aac_rrq_outstanding[vector_no] <
2402 				sc->aac_vector_cap)
2403 				break;
2404 			if (0xffff == first_choice)
2405 				first_choice = vector_no;
2406 			else if (vector_no == first_choice)
2407 				break;
2408 		} while (1);
2409 		if (vector_no == first_choice)
2410 			vector_no = 0;
2411 		sc->aac_rrq_outstanding[vector_no]++;
2412 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2413 			sc->aac_fibs_pushed_no = 0;
2414 		else
2415 			sc->aac_fibs_pushed_no++;
2416 
2417 		cm->cm_fib->Header.Handle += (vector_no << 16);
2418 	}
2419 
2420 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2421 		/* Calculate the amount to the fibsize bits */
2422 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2423 		/* Fill new FIB header */
2424 		address = cm->cm_fibphys;
2425 		high_addr = (u_int32_t)(address >> 32);
2426 		if (high_addr == 0L) {
2427 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2428 			cm->cm_fib->Header.u.TimeStamp = 0L;
2429 		} else {
2430 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2431 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2432 		}
2433 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2434 	} else {
2435 		/* Calculate the amount to the fibsize bits */
2436 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2437 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2438 		/* Fill XPORT header */
2439 		pFibX = (struct aac_fib_xporthdr *)
2440 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2441 		pFibX->Handle = cm->cm_fib->Header.Handle;
2442 		pFibX->HostAddress = cm->cm_fibphys;
2443 		pFibX->Size = cm->cm_fib->Header.Size;
2444 		aac_fib_xporthdr_tole(pFibX);
2445 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2446 		high_addr = (u_int32_t)(address >> 32);
2447 	}
2448 
2449 	aac_fib_header_tole(&cm->cm_fib->Header);
2450 
2451 	if (fibsize > 31)
2452 		fibsize = 31;
2453 	aac_enqueue_busy(cm);
2454 	if (high_addr) {
2455 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2456 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2457 	} else {
2458 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2459 	}
2460 	return 0;
2461 }
2462 
2463 /*
2464  * New comm. interface: get, set outbound queue index
2465  */
2466 static int
2467 aac_src_get_outb_queue(struct aac_softc *sc)
2468 {
2469 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2470 
2471 	return(-1);
2472 }
2473 
2474 static void
2475 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2476 {
2477 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2478 }
2479 
2480 /*
2481  * Debugging and Diagnostics
2482  */
2483 
2484 /*
2485  * Print some information about the controller.
2486  */
2487 static void
2488 aac_describe_controller(struct aac_softc *sc)
2489 {
2490 	struct aac_fib *fib;
2491 	struct aac_adapter_info	*info;
2492 	char *adapter_type = "Adaptec RAID controller";
2493 
2494 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2495 
2496 	mtx_lock(&sc->aac_io_lock);
2497 	aac_alloc_sync_fib(sc, &fib);
2498 
2499 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2500 		fib->data[0] = 0;
2501 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2502 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2503 		else {
2504 			struct aac_supplement_adapter_info *supp_info;
2505 
2506 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2507 			adapter_type = (char *)supp_info->AdapterTypeText;
2508 			sc->aac_feature_bits = le32toh(supp_info->FeatureBits);
2509 			sc->aac_support_opt2 = le32toh(supp_info->SupportedOptions2);
2510 		}
2511 	}
2512 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2513 		adapter_type,
2514 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2515 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2516 
2517 	fib->data[0] = 0;
2518 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2519 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2520 		aac_release_sync_fib(sc);
2521 		mtx_unlock(&sc->aac_io_lock);
2522 		return;
2523 	}
2524 
2525 	/* save the kernel revision structure for later use */
2526 	info = (struct aac_adapter_info *)&fib->data[0];
2527 	aac_adapter_info_toh(info);
2528 	sc->aac_revision = info->KernelRevision;
2529 
2530 	if (bootverbose) {
2531 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2532 		    "(%dMB cache, %dMB execution), %s\n",
2533 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2534 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2535 		    info->BufferMem / (1024 * 1024),
2536 		    info->ExecutionMem / (1024 * 1024),
2537 		    aac_describe_code(aac_battery_platform,
2538 		    info->batteryPlatform));
2539 
2540 		device_printf(sc->aac_dev,
2541 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2542 		    info->KernelRevision.external.comp.major,
2543 		    info->KernelRevision.external.comp.minor,
2544 		    info->KernelRevision.external.comp.dash,
2545 		    info->KernelRevision.buildNumber,
2546 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2547 
2548 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2549 			      sc->supported_options,
2550 			      "\20"
2551 			      "\1SNAPSHOT"
2552 			      "\2CLUSTERS"
2553 			      "\3WCACHE"
2554 			      "\4DATA64"
2555 			      "\5HOSTTIME"
2556 			      "\6RAID50"
2557 			      "\7WINDOW4GB"
2558 			      "\10SCSIUPGD"
2559 			      "\11SOFTERR"
2560 			      "\12NORECOND"
2561 			      "\13SGMAP64"
2562 			      "\14ALARM"
2563 			      "\15NONDASD"
2564 			      "\16SCSIMGT"
2565 			      "\17RAIDSCSI"
2566 			      "\21ADPTINFO"
2567 			      "\22NEWCOMM"
2568 			      "\23ARRAY64BIT"
2569 			      "\24HEATSENSOR");
2570 	}
2571 
2572 	aac_release_sync_fib(sc);
2573 	mtx_unlock(&sc->aac_io_lock);
2574 }
2575 
2576 /*
2577  * Look up a text description of a numeric error code and return a pointer to
2578  * same.
2579  */
2580 static char *
2581 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2582 {
2583 	int i;
2584 
2585 	for (i = 0; table[i].string != NULL; i++)
2586 		if (table[i].code == code)
2587 			return(table[i].string);
2588 	return(table[i + 1].string);
2589 }
2590 
2591 /*
2592  * Management Interface
2593  */
2594 
2595 static int
2596 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2597 {
2598 	struct aac_softc *sc;
2599 
2600 	sc = dev->si_drv1;
2601 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2602 	device_busy(sc->aac_dev);
2603 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2604 	return 0;
2605 }
2606 
2607 static int
2608 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2609 {
2610 	union aac_statrequest *as;
2611 	struct aac_softc *sc;
2612 	int error = 0;
2613 
2614 	as = (union aac_statrequest *)arg;
2615 	sc = dev->si_drv1;
2616 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2617 
2618 	switch (cmd) {
2619 	case AACIO_STATS:
2620 		switch (as->as_item) {
2621 		case AACQ_FREE:
2622 		case AACQ_READY:
2623 		case AACQ_BUSY:
2624 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2625 			      sizeof(struct aac_qstat));
2626 			break;
2627 		default:
2628 			error = ENOENT;
2629 			break;
2630 		}
2631 	break;
2632 
2633 	case FSACTL_SENDFIB:
2634 	case FSACTL_SEND_LARGE_FIB:
2635 		arg = *(caddr_t*)arg;
2636 	case FSACTL_LNX_SENDFIB:
2637 	case FSACTL_LNX_SEND_LARGE_FIB:
2638 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2639 		error = aac_ioctl_sendfib(sc, arg);
2640 		break;
2641 	case FSACTL_SEND_RAW_SRB:
2642 		arg = *(caddr_t*)arg;
2643 	case FSACTL_LNX_SEND_RAW_SRB:
2644 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2645 		error = aac_ioctl_send_raw_srb(sc, arg);
2646 		break;
2647 	case FSACTL_AIF_THREAD:
2648 	case FSACTL_LNX_AIF_THREAD:
2649 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2650 		error = EINVAL;
2651 		break;
2652 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2653 		arg = *(caddr_t*)arg;
2654 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2655 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2656 		error = aac_open_aif(sc, arg);
2657 		break;
2658 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2659 		arg = *(caddr_t*)arg;
2660 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2661 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2662 		error = aac_getnext_aif(sc, arg);
2663 		break;
2664 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2665 		arg = *(caddr_t*)arg;
2666 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2667 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2668 		error = aac_close_aif(sc, arg);
2669 		break;
2670 	case FSACTL_MINIPORT_REV_CHECK:
2671 		arg = *(caddr_t*)arg;
2672 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2673 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2674 		error = aac_rev_check(sc, arg);
2675 		break;
2676 	case FSACTL_QUERY_DISK:
2677 		arg = *(caddr_t*)arg;
2678 	case FSACTL_LNX_QUERY_DISK:
2679 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2680 		error = aac_query_disk(sc, arg);
2681 		break;
2682 	case FSACTL_DELETE_DISK:
2683 	case FSACTL_LNX_DELETE_DISK:
2684 		/*
2685 		 * We don't trust the underland to tell us when to delete a
2686 		 * container, rather we rely on an AIF coming from the
2687 		 * controller
2688 		 */
2689 		error = 0;
2690 		break;
2691 	case FSACTL_GET_PCI_INFO:
2692 		arg = *(caddr_t*)arg;
2693 	case FSACTL_LNX_GET_PCI_INFO:
2694 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2695 		error = aac_get_pci_info(sc, arg);
2696 		break;
2697 	case FSACTL_GET_FEATURES:
2698 		arg = *(caddr_t*)arg;
2699 	case FSACTL_LNX_GET_FEATURES:
2700 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2701 		error = aac_supported_features(sc, arg);
2702 		break;
2703 	default:
2704 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2705 		error = EINVAL;
2706 		break;
2707 	}
2708 	return(error);
2709 }
2710 
2711 static int
2712 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2713 {
2714 	struct aac_softc *sc;
2715 	struct aac_fib_context *ctx;
2716 	int revents;
2717 
2718 	sc = dev->si_drv1;
2719 	revents = 0;
2720 
2721 	mtx_lock(&sc->aac_io_lock);
2722 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2723 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2724 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2725 				revents |= poll_events & (POLLIN | POLLRDNORM);
2726 				break;
2727 			}
2728 		}
2729 	}
2730 	mtx_unlock(&sc->aac_io_lock);
2731 
2732 	if (revents == 0) {
2733 		if (poll_events & (POLLIN | POLLRDNORM))
2734 			selrecord(td, &sc->rcv_select);
2735 	}
2736 
2737 	return (revents);
2738 }
2739 
2740 static void
2741 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2742 {
2743 
2744 	switch (event->ev_type) {
2745 	case AAC_EVENT_CMFREE:
2746 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2747 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2748 			aacraid_add_event(sc, event);
2749 			return;
2750 		}
2751 		free(event, M_AACRAIDBUF);
2752 		wakeup(arg);
2753 		break;
2754 	default:
2755 		break;
2756 	}
2757 }
2758 
2759 /*
2760  * Send a FIB supplied from userspace
2761  *
2762  * Currently, sending a FIB from userspace in BE hosts is not supported.
2763  * There are several things that need to be considered in order to
2764  * support this, such as:
2765  * - At least the FIB data part from userspace should already be in LE,
2766  *   or else the kernel would need to know all FIB types to be able to
2767  *   correctly convert it to BE.
2768  * - SG tables are converted to BE by aacraid_map_command_sg(). This
2769  *   conversion should be supressed if the FIB comes from userspace.
2770  * - aacraid_wait_command() calls functions that convert the FIB header
2771  *   to LE. But if the header is already in LE, the conversion should not
2772  *   be performed.
2773  */
2774 static int
2775 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2776 {
2777 	struct aac_command *cm;
2778 	int size, error;
2779 
2780 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2781 
2782 	cm = NULL;
2783 
2784 	/*
2785 	 * Get a command
2786 	 */
2787 	mtx_lock(&sc->aac_io_lock);
2788 	if (aacraid_alloc_command(sc, &cm)) {
2789 		struct aac_event *event;
2790 
2791 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2792 		    M_NOWAIT | M_ZERO);
2793 		if (event == NULL) {
2794 			error = EBUSY;
2795 			mtx_unlock(&sc->aac_io_lock);
2796 			goto out;
2797 		}
2798 		event->ev_type = AAC_EVENT_CMFREE;
2799 		event->ev_callback = aac_ioctl_event;
2800 		event->ev_arg = &cm;
2801 		aacraid_add_event(sc, event);
2802 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2803 	}
2804 	mtx_unlock(&sc->aac_io_lock);
2805 
2806 	/*
2807 	 * Fetch the FIB header, then re-copy to get data as well.
2808 	 */
2809 	if ((error = copyin(ufib, cm->cm_fib,
2810 			    sizeof(struct aac_fib_header))) != 0)
2811 		goto out;
2812 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2813 	if (size > sc->aac_max_fib_size) {
2814 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2815 			      size, sc->aac_max_fib_size);
2816 		size = sc->aac_max_fib_size;
2817 	}
2818 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2819 		goto out;
2820 	cm->cm_fib->Header.Size = size;
2821 	cm->cm_timestamp = time_uptime;
2822 	cm->cm_datalen = 0;
2823 
2824 	/*
2825 	 * Pass the FIB to the controller, wait for it to complete.
2826 	 */
2827 	mtx_lock(&sc->aac_io_lock);
2828 	error = aacraid_wait_command(cm);
2829 	mtx_unlock(&sc->aac_io_lock);
2830 	if (error != 0) {
2831 		device_printf(sc->aac_dev,
2832 			      "aacraid_wait_command return %d\n", error);
2833 		goto out;
2834 	}
2835 
2836 	/*
2837 	 * Copy the FIB and data back out to the caller.
2838 	 */
2839 	size = cm->cm_fib->Header.Size;
2840 	if (size > sc->aac_max_fib_size) {
2841 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2842 			      size, sc->aac_max_fib_size);
2843 		size = sc->aac_max_fib_size;
2844 	}
2845 	error = copyout(cm->cm_fib, ufib, size);
2846 
2847 out:
2848 	if (cm != NULL) {
2849 		mtx_lock(&sc->aac_io_lock);
2850 		aacraid_release_command(cm);
2851 		mtx_unlock(&sc->aac_io_lock);
2852 	}
2853 	return(error);
2854 }
2855 
2856 /*
2857  * Send a passthrough FIB supplied from userspace
2858  */
2859 static int
2860 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2861 {
2862 	struct aac_command *cm;
2863 	struct aac_fib *fib;
2864 	struct aac_srb *srbcmd;
2865 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2866 	void *user_reply;
2867 	int error, transfer_data = 0;
2868 	bus_dmamap_t orig_map = 0;
2869 	u_int32_t fibsize = 0;
2870 	u_int64_t srb_sg_address;
2871 	u_int32_t srb_sg_bytecount;
2872 
2873 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2874 
2875 	cm = NULL;
2876 
2877 	mtx_lock(&sc->aac_io_lock);
2878 	if (aacraid_alloc_command(sc, &cm)) {
2879 		struct aac_event *event;
2880 
2881 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2882 		    M_NOWAIT | M_ZERO);
2883 		if (event == NULL) {
2884 			error = EBUSY;
2885 			mtx_unlock(&sc->aac_io_lock);
2886 			goto out;
2887 		}
2888 		event->ev_type = AAC_EVENT_CMFREE;
2889 		event->ev_callback = aac_ioctl_event;
2890 		event->ev_arg = &cm;
2891 		aacraid_add_event(sc, event);
2892 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2893 	}
2894 	mtx_unlock(&sc->aac_io_lock);
2895 
2896 	cm->cm_data = NULL;
2897 	/* save original dma map */
2898 	orig_map = cm->cm_datamap;
2899 
2900 	fib = cm->cm_fib;
2901 	srbcmd = (struct aac_srb *)fib->data;
2902 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2903 	    sizeof (u_int32_t))) != 0)
2904 		goto out;
2905 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2906 		error = EINVAL;
2907 		goto out;
2908 	}
2909 	if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2910 		goto out;
2911 
2912 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2913 	srbcmd->retry_limit = 0;	/* obsolete */
2914 
2915 	/* only one sg element from userspace supported */
2916 	if (srbcmd->sg_map.SgCount > 1) {
2917 		error = EINVAL;
2918 		goto out;
2919 	}
2920 	/* check fibsize */
2921 	if (fibsize == (sizeof(struct aac_srb) +
2922 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2923 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2924 		struct aac_sg_entry sg;
2925 
2926 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2927 			goto out;
2928 
2929 		srb_sg_bytecount = sg.SgByteCount;
2930 		srb_sg_address = (u_int64_t)sg.SgAddress;
2931 	} else if (fibsize == (sizeof(struct aac_srb) +
2932 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2933 #ifdef __LP64__
2934 		struct aac_sg_entry64 *sgp =
2935 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2936 		struct aac_sg_entry64 sg;
2937 
2938 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2939 			goto out;
2940 
2941 		srb_sg_bytecount = sg.SgByteCount;
2942 		srb_sg_address = sg.SgAddress;
2943 #else
2944 		error = EINVAL;
2945 		goto out;
2946 #endif
2947 	} else {
2948 		error = EINVAL;
2949 		goto out;
2950 	}
2951 	user_reply = (char *)arg + fibsize;
2952 	srbcmd->data_len = srb_sg_bytecount;
2953 	if (srbcmd->sg_map.SgCount == 1)
2954 		transfer_data = 1;
2955 
2956 	if (transfer_data) {
2957 		/*
2958 		 * Create DMA tag for the passthr. data buffer and allocate it.
2959 		 */
2960 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2961 			1, 0,			/* algnmnt, boundary */
2962 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2963 			BUS_SPACE_MAXADDR_32BIT :
2964 			0x7fffffff,		/* lowaddr */
2965 			BUS_SPACE_MAXADDR, 	/* highaddr */
2966 			NULL, NULL, 		/* filter, filterarg */
2967 			srb_sg_bytecount, 	/* size */
2968 			sc->aac_sg_tablesize,	/* nsegments */
2969 			srb_sg_bytecount, 	/* maxsegsize */
2970 			0,			/* flags */
2971 			NULL, NULL,		/* No locking needed */
2972 			&cm->cm_passthr_dmat)) {
2973 			error = ENOMEM;
2974 			goto out;
2975 		}
2976 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2977 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2978 			error = ENOMEM;
2979 			goto out;
2980 		}
2981 		/* fill some cm variables */
2982 		cm->cm_datalen = srb_sg_bytecount;
2983 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2984 			cm->cm_flags |= AAC_CMD_DATAIN;
2985 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2986 			cm->cm_flags |= AAC_CMD_DATAOUT;
2987 
2988 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2989 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2990 				cm->cm_data, cm->cm_datalen)) != 0)
2991 				goto out;
2992 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2993 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2994 				BUS_DMASYNC_PREWRITE);
2995 		}
2996 	}
2997 
2998 	/* build the FIB */
2999 	fib->Header.Size = sizeof(struct aac_fib_header) +
3000 		sizeof(struct aac_srb);
3001 	fib->Header.XferState =
3002 		AAC_FIBSTATE_HOSTOWNED   |
3003 		AAC_FIBSTATE_INITIALISED |
3004 		AAC_FIBSTATE_EMPTY	 |
3005 		AAC_FIBSTATE_FROMHOST	 |
3006 		AAC_FIBSTATE_REXPECTED   |
3007 		AAC_FIBSTATE_NORM	 |
3008 		AAC_FIBSTATE_ASYNC;
3009 
3010 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
3011 		ScsiPortCommandU64 : ScsiPortCommand;
3012 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3013 
3014 	aac_srb_tole(srbcmd);
3015 
3016 	/* send command */
3017 	if (transfer_data) {
3018 		bus_dmamap_load(cm->cm_passthr_dmat,
3019 			cm->cm_datamap, cm->cm_data,
3020 			cm->cm_datalen,
3021 			aacraid_map_command_sg, cm, 0);
3022 	} else {
3023 		aacraid_map_command_sg(cm, NULL, 0, 0);
3024 	}
3025 
3026 	/* wait for completion */
3027 	mtx_lock(&sc->aac_io_lock);
3028 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
3029 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
3030 	mtx_unlock(&sc->aac_io_lock);
3031 
3032 	/* copy data */
3033 	if (transfer_data && (le32toh(srbcmd->flags) & AAC_SRB_FLAGS_DATA_IN)) {
3034 		if ((error = copyout(cm->cm_data,
3035 			(void *)(uintptr_t)srb_sg_address,
3036 			cm->cm_datalen)) != 0)
3037 			goto out;
3038 		/* sync required for bus_dmamem_alloc() allocated mem.? */
3039 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
3040 				BUS_DMASYNC_POSTREAD);
3041 	}
3042 
3043 	/* status */
3044 	aac_srb_response_toh((struct aac_srb_response *)fib->data);
3045 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3046 
3047 out:
3048 	if (cm && cm->cm_data) {
3049 		if (transfer_data)
3050 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3051 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3052 		cm->cm_datamap = orig_map;
3053 	}
3054 	if (cm && cm->cm_passthr_dmat)
3055 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3056 	if (cm) {
3057 		mtx_lock(&sc->aac_io_lock);
3058 		aacraid_release_command(cm);
3059 		mtx_unlock(&sc->aac_io_lock);
3060 	}
3061 	return(error);
3062 }
3063 
3064 /*
3065  * Request an AIF from the controller (new comm. type1)
3066  */
3067 static void
3068 aac_request_aif(struct aac_softc *sc)
3069 {
3070 	struct aac_command *cm;
3071 	struct aac_fib *fib;
3072 
3073 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3074 
3075 	if (aacraid_alloc_command(sc, &cm)) {
3076 		sc->aif_pending = 1;
3077 		return;
3078 	}
3079 	sc->aif_pending = 0;
3080 
3081 	/* build the FIB */
3082 	fib = cm->cm_fib;
3083 	fib->Header.Size = sizeof(struct aac_fib);
3084 	fib->Header.XferState =
3085         AAC_FIBSTATE_HOSTOWNED   |
3086         AAC_FIBSTATE_INITIALISED |
3087         AAC_FIBSTATE_EMPTY	 |
3088         AAC_FIBSTATE_FROMHOST	 |
3089         AAC_FIBSTATE_REXPECTED   |
3090         AAC_FIBSTATE_NORM	 |
3091         AAC_FIBSTATE_ASYNC;
3092 	/* set AIF marker */
3093 	fib->Header.Handle = 0x00800000;
3094 	fib->Header.Command = AifRequest;
3095 	((struct aac_aif_command *)fib->data)->command = htole32(AifReqEvent);
3096 
3097 	aacraid_map_command_sg(cm, NULL, 0, 0);
3098 }
3099 
3100 
3101 /*
3102  * cdevpriv interface private destructor.
3103  */
3104 static void
3105 aac_cdevpriv_dtor(void *arg)
3106 {
3107 	struct aac_softc *sc;
3108 
3109 	sc = arg;
3110 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3111 	device_unbusy(sc->aac_dev);
3112 }
3113 
3114 /*
3115  * Handle an AIF sent to us by the controller; queue it for later reference.
3116  * If the queue fills up, then drop the older entries.
3117  */
3118 static void
3119 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3120 {
3121 	struct aac_aif_command *aif;
3122 	struct aac_container *co, *co_next;
3123 	struct aac_fib_context *ctx;
3124 	struct aac_fib *sync_fib;
3125 	struct aac_mntinforesp mir;
3126 	int next, current, found;
3127 	int count = 0, changed = 0, i = 0;
3128 	u_int32_t channel, uid;
3129 
3130 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3131 
3132 	aif = (struct aac_aif_command*)&fib->data[0];
3133 	aacraid_print_aif(sc, aif);
3134 
3135 	/* Is it an event that we should care about? */
3136 	switch (le32toh(aif->command)) {
3137 	case AifCmdEventNotify:
3138 		switch (le32toh(aif->data.EN.type)) {
3139 		case AifEnAddContainer:
3140 		case AifEnDeleteContainer:
3141 			/*
3142 			 * A container was added or deleted, but the message
3143 			 * doesn't tell us anything else!  Re-enumerate the
3144 			 * containers and sort things out.
3145 			 */
3146 			aac_alloc_sync_fib(sc, &sync_fib);
3147 			do {
3148 				/*
3149 				 * Ask the controller for its containers one at
3150 				 * a time.
3151 				 * XXX What if the controller's list changes
3152 				 * midway through this enumaration?
3153 				 * XXX This should be done async.
3154 				 */
3155 				if (aac_get_container_info(sc, sync_fib, i,
3156 					&mir, &uid) != 0)
3157 					continue;
3158 				if (i == 0)
3159 					count = mir.MntRespCount;
3160 				/*
3161 				 * Check the container against our list.
3162 				 * co->co_found was already set to 0 in a
3163 				 * previous run.
3164 				 */
3165 				if ((mir.Status == ST_OK) &&
3166 				    (mir.MntTable[0].VolType != CT_NONE)) {
3167 					found = 0;
3168 					TAILQ_FOREACH(co,
3169 						      &sc->aac_container_tqh,
3170 						      co_link) {
3171 						if (co->co_mntobj.ObjectId ==
3172 						    mir.MntTable[0].ObjectId) {
3173 							co->co_found = 1;
3174 							found = 1;
3175 							break;
3176 						}
3177 					}
3178 					/*
3179 					 * If the container matched, continue
3180 					 * in the list.
3181 					 */
3182 					if (found) {
3183 						i++;
3184 						continue;
3185 					}
3186 
3187 					/*
3188 					 * This is a new container.  Do all the
3189 					 * appropriate things to set it up.
3190 					 */
3191 					aac_add_container(sc, &mir, 1, uid);
3192 					changed = 1;
3193 				}
3194 				i++;
3195 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3196 			aac_release_sync_fib(sc);
3197 
3198 			/*
3199 			 * Go through our list of containers and see which ones
3200 			 * were not marked 'found'.  Since the controller didn't
3201 			 * list them they must have been deleted.  Do the
3202 			 * appropriate steps to destroy the device.  Also reset
3203 			 * the co->co_found field.
3204 			 */
3205 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3206 			while (co != NULL) {
3207 				if (co->co_found == 0) {
3208 					co_next = TAILQ_NEXT(co, co_link);
3209 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3210 						     co_link);
3211 					free(co, M_AACRAIDBUF);
3212 					changed = 1;
3213 					co = co_next;
3214 				} else {
3215 					co->co_found = 0;
3216 					co = TAILQ_NEXT(co, co_link);
3217 				}
3218 			}
3219 
3220 			/* Attach the newly created containers */
3221 			if (changed) {
3222 				if (sc->cam_rescan_cb != NULL)
3223 					sc->cam_rescan_cb(sc, 0,
3224 				    	AAC_CAM_TARGET_WILDCARD);
3225 			}
3226 
3227 			break;
3228 
3229 		case AifEnEnclosureManagement:
3230 			switch (le32toh(aif->data.EN.data.EEE.eventType)) {
3231 			case AIF_EM_DRIVE_INSERTION:
3232 			case AIF_EM_DRIVE_REMOVAL:
3233 				channel = le32toh(aif->data.EN.data.EEE.unitID);
3234 				if (sc->cam_rescan_cb != NULL)
3235 					sc->cam_rescan_cb(sc,
3236 					    ((channel>>24) & 0xF) + 1,
3237 					    (channel & 0xFFFF));
3238 				break;
3239 			}
3240 			break;
3241 
3242 		case AifEnAddJBOD:
3243 		case AifEnDeleteJBOD:
3244 		case AifRawDeviceRemove:
3245 			channel = le32toh(aif->data.EN.data.ECE.container);
3246 			if (sc->cam_rescan_cb != NULL)
3247 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3248 				    AAC_CAM_TARGET_WILDCARD);
3249 			break;
3250 
3251 		default:
3252 			break;
3253 		}
3254 
3255 	default:
3256 		break;
3257 	}
3258 
3259 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3260 	current = sc->aifq_idx;
3261 	next = (current + 1) % AAC_AIFQ_LENGTH;
3262 	if (next == 0)
3263 		sc->aifq_filled = 1;
3264 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3265 	/* Make aifq's FIB header and data LE */
3266 	aac_fib_header_tole(&sc->aac_aifq[current].Header);
3267 	/* modify AIF contexts */
3268 	if (sc->aifq_filled) {
3269 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3270 			if (next == ctx->ctx_idx)
3271 				ctx->ctx_wrap = 1;
3272 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3273 				ctx->ctx_idx = next;
3274 		}
3275 	}
3276 	sc->aifq_idx = next;
3277 	/* On the off chance that someone is sleeping for an aif... */
3278 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3279 		wakeup(sc->aac_aifq);
3280 	/* Wakeup any poll()ers */
3281 	selwakeuppri(&sc->rcv_select, PRIBIO);
3282 
3283 	return;
3284 }
3285 
3286 /*
3287  * Return the Revision of the driver to userspace and check to see if the
3288  * userspace app is possibly compatible.  This is extremely bogus since
3289  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3290  * returning what the card reported.
3291  */
3292 static int
3293 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3294 {
3295 	struct aac_rev_check rev_check;
3296 	struct aac_rev_check_resp rev_check_resp;
3297 	int error = 0;
3298 
3299 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3300 
3301 	/*
3302 	 * Copyin the revision struct from userspace
3303 	 */
3304 	if ((error = copyin(udata, (caddr_t)&rev_check,
3305 			sizeof(struct aac_rev_check))) != 0) {
3306 		return error;
3307 	}
3308 
3309 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3310 	      rev_check.callingRevision.buildNumber);
3311 
3312 	/*
3313 	 * Doctor up the response struct.
3314 	 */
3315 	rev_check_resp.possiblyCompatible = 1;
3316 	rev_check_resp.adapterSWRevision.external.comp.major =
3317 	    AAC_DRIVER_MAJOR_VERSION;
3318 	rev_check_resp.adapterSWRevision.external.comp.minor =
3319 	    AAC_DRIVER_MINOR_VERSION;
3320 	rev_check_resp.adapterSWRevision.external.comp.type =
3321 	    AAC_DRIVER_TYPE;
3322 	rev_check_resp.adapterSWRevision.external.comp.dash =
3323 	    AAC_DRIVER_BUGFIX_LEVEL;
3324 	rev_check_resp.adapterSWRevision.buildNumber =
3325 	    AAC_DRIVER_BUILD;
3326 
3327 	return(copyout((caddr_t)&rev_check_resp, udata,
3328 			sizeof(struct aac_rev_check_resp)));
3329 }
3330 
3331 /*
3332  * Pass the fib context to the caller
3333  */
3334 static int
3335 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3336 {
3337 	struct aac_fib_context *fibctx, *ctx;
3338 	int error = 0;
3339 
3340 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3341 
3342 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3343 	if (fibctx == NULL)
3344 		return (ENOMEM);
3345 
3346 	mtx_lock(&sc->aac_io_lock);
3347 	/* all elements are already 0, add to queue */
3348 	if (sc->fibctx == NULL)
3349 		sc->fibctx = fibctx;
3350 	else {
3351 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3352 			;
3353 		ctx->next = fibctx;
3354 		fibctx->prev = ctx;
3355 	}
3356 
3357 	/* evaluate unique value */
3358 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3359 	ctx = sc->fibctx;
3360 	while (ctx != fibctx) {
3361 		if (ctx->unique == fibctx->unique) {
3362 			fibctx->unique++;
3363 			ctx = sc->fibctx;
3364 		} else {
3365 			ctx = ctx->next;
3366 		}
3367 	}
3368 
3369 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3370 	mtx_unlock(&sc->aac_io_lock);
3371 	if (error)
3372 		aac_close_aif(sc, (caddr_t)ctx);
3373 	return error;
3374 }
3375 
3376 /*
3377  * Close the caller's fib context
3378  */
3379 static int
3380 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3381 {
3382 	struct aac_fib_context *ctx;
3383 
3384 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3385 
3386 	mtx_lock(&sc->aac_io_lock);
3387 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3388 		if (ctx->unique == *(uint32_t *)&arg) {
3389 			if (ctx == sc->fibctx)
3390 				sc->fibctx = NULL;
3391 			else {
3392 				ctx->prev->next = ctx->next;
3393 				if (ctx->next)
3394 					ctx->next->prev = ctx->prev;
3395 			}
3396 			break;
3397 		}
3398 	}
3399 	if (ctx)
3400 		free(ctx, M_AACRAIDBUF);
3401 
3402 	mtx_unlock(&sc->aac_io_lock);
3403 	return 0;
3404 }
3405 
3406 /*
3407  * Pass the caller the next AIF in their queue
3408  */
3409 static int
3410 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3411 {
3412 	struct get_adapter_fib_ioctl agf;
3413 	struct aac_fib_context *ctx;
3414 	int error;
3415 
3416 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3417 
3418 	mtx_lock(&sc->aac_io_lock);
3419 #ifdef COMPAT_FREEBSD32
3420 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3421 		struct get_adapter_fib_ioctl32 agf32;
3422 		error = copyin(arg, &agf32, sizeof(agf32));
3423 		if (error == 0) {
3424 			agf.AdapterFibContext = agf32.AdapterFibContext;
3425 			agf.Wait = agf32.Wait;
3426 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3427 		}
3428 	} else
3429 #endif
3430 		error = copyin(arg, &agf, sizeof(agf));
3431 	if (error == 0) {
3432 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3433 			if (agf.AdapterFibContext == ctx->unique)
3434 				break;
3435 		}
3436 		if (!ctx) {
3437 			mtx_unlock(&sc->aac_io_lock);
3438 			return (EFAULT);
3439 		}
3440 
3441 		error = aac_return_aif(sc, ctx, agf.AifFib);
3442 		if (error == EAGAIN && agf.Wait) {
3443 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3444 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3445 			while (error == EAGAIN) {
3446 				mtx_unlock(&sc->aac_io_lock);
3447 				error = tsleep(sc->aac_aifq, PRIBIO |
3448 					       PCATCH, "aacaif", 0);
3449 				mtx_lock(&sc->aac_io_lock);
3450 				if (error == 0)
3451 					error = aac_return_aif(sc, ctx, agf.AifFib);
3452 			}
3453 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3454 		}
3455 	}
3456 	mtx_unlock(&sc->aac_io_lock);
3457 	return(error);
3458 }
3459 
3460 /*
3461  * Hand the next AIF off the top of the queue out to userspace.
3462  */
3463 static int
3464 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3465 {
3466 	int current, error;
3467 
3468 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3469 
3470 	current = ctx->ctx_idx;
3471 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3472 		/* empty */
3473 		return (EAGAIN);
3474 	}
3475 	error =
3476 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3477 	if (error)
3478 		device_printf(sc->aac_dev,
3479 		    "aac_return_aif: copyout returned %d\n", error);
3480 	else {
3481 		ctx->ctx_wrap = 0;
3482 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3483 	}
3484 	return(error);
3485 }
3486 
3487 static int
3488 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3489 {
3490 	struct aac_pci_info {
3491 		u_int32_t bus;
3492 		u_int32_t slot;
3493 	} pciinf;
3494 	int error;
3495 
3496 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3497 
3498 	pciinf.bus = pci_get_bus(sc->aac_dev);
3499 	pciinf.slot = pci_get_slot(sc->aac_dev);
3500 
3501 	error = copyout((caddr_t)&pciinf, uptr,
3502 			sizeof(struct aac_pci_info));
3503 
3504 	return (error);
3505 }
3506 
3507 static int
3508 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3509 {
3510 	struct aac_features f;
3511 	int error;
3512 
3513 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3514 
3515 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3516 		return (error);
3517 
3518 	/*
3519 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3520 	 * ALL zero in the featuresState, the driver will return the current
3521 	 * state of all the supported features, the data field will not be
3522 	 * valid.
3523 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3524 	 * a specific bit set in the featuresState, the driver will return the
3525 	 * current state of this specific feature and whatever data that are
3526 	 * associated with the feature in the data field or perform whatever
3527 	 * action needed indicates in the data field.
3528 	 */
3529 	 if (f.feat.fValue == 0) {
3530 		f.feat.fBits.largeLBA =
3531 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3532 		f.feat.fBits.JBODSupport = 1;
3533 		/* TODO: In the future, add other features state here as well */
3534 	} else {
3535 		if (f.feat.fBits.largeLBA)
3536 			f.feat.fBits.largeLBA =
3537 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3538 		/* TODO: Add other features state and data in the future */
3539 	}
3540 
3541 	error = copyout(&f, uptr, sizeof (f));
3542 	return (error);
3543 }
3544 
3545 /*
3546  * Give the userland some information about the container.  The AAC arch
3547  * expects the driver to be a SCSI passthrough type driver, so it expects
3548  * the containers to have b:t:l numbers.  Fake it.
3549  */
3550 static int
3551 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3552 {
3553 	struct aac_query_disk query_disk;
3554 	struct aac_container *co;
3555 	int error, id;
3556 
3557 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3558 
3559 	mtx_lock(&sc->aac_io_lock);
3560 	error = copyin(uptr, (caddr_t)&query_disk,
3561 		       sizeof(struct aac_query_disk));
3562 	if (error) {
3563 		mtx_unlock(&sc->aac_io_lock);
3564 		return (error);
3565 	}
3566 
3567 	id = query_disk.ContainerNumber;
3568 	if (id == -1) {
3569 		mtx_unlock(&sc->aac_io_lock);
3570 		return (EINVAL);
3571 	}
3572 
3573 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3574 		if (co->co_mntobj.ObjectId == id)
3575 			break;
3576 		}
3577 
3578 	if (co == NULL) {
3579 			query_disk.Valid = 0;
3580 			query_disk.Locked = 0;
3581 			query_disk.Deleted = 1;		/* XXX is this right? */
3582 	} else {
3583 		query_disk.Valid = 1;
3584 		query_disk.Locked = 1;
3585 		query_disk.Deleted = 0;
3586 		query_disk.Bus = device_get_unit(sc->aac_dev);
3587 		query_disk.Target = 0;
3588 		query_disk.Lun = 0;
3589 		query_disk.UnMapped = 0;
3590 	}
3591 
3592 	error = copyout((caddr_t)&query_disk, uptr,
3593 			sizeof(struct aac_query_disk));
3594 
3595 	mtx_unlock(&sc->aac_io_lock);
3596 	return (error);
3597 }
3598 
3599 static void
3600 aac_container_bus(struct aac_softc *sc)
3601 {
3602 	struct aac_sim *sim;
3603 	device_t child;
3604 
3605 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3606 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3607 	if (sim == NULL) {
3608 		device_printf(sc->aac_dev,
3609 	    	"No memory to add container bus\n");
3610 		panic("Out of memory?!");
3611 	}
3612 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3613 	if (child == NULL) {
3614 		device_printf(sc->aac_dev,
3615 	    	"device_add_child failed for container bus\n");
3616 		free(sim, M_AACRAIDBUF);
3617 		panic("Out of memory?!");
3618 	}
3619 
3620 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3621 	sim->BusNumber = 0;
3622 	sim->BusType = CONTAINER_BUS;
3623 	sim->InitiatorBusId = -1;
3624 	sim->aac_sc = sc;
3625 	sim->sim_dev = child;
3626 	sim->aac_cam = NULL;
3627 
3628 	device_set_ivars(child, sim);
3629 	device_set_desc(child, "Container Bus");
3630 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3631 	/*
3632 	device_set_desc(child, aac_describe_code(aac_container_types,
3633 			mir->MntTable[0].VolType));
3634 	*/
3635 	bus_generic_attach(sc->aac_dev);
3636 }
3637 
3638 static void
3639 aac_get_bus_info(struct aac_softc *sc)
3640 {
3641 	struct aac_fib *fib;
3642 	struct aac_ctcfg *c_cmd;
3643 	struct aac_ctcfg_resp *c_resp;
3644 	struct aac_vmioctl *vmi;
3645 	struct aac_vmi_businf_resp *vmi_resp;
3646 	struct aac_getbusinf businfo;
3647 	struct aac_sim *caminf;
3648 	device_t child;
3649 	int i, error;
3650 
3651 	mtx_lock(&sc->aac_io_lock);
3652 	aac_alloc_sync_fib(sc, &fib);
3653 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3654 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3655 
3656 	c_cmd->Command = VM_ContainerConfig;
3657 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3658 	c_cmd->param = 0;
3659 
3660 	aac_ctcfg_tole(c_cmd);
3661 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3662 	    sizeof(struct aac_ctcfg));
3663 	if (error) {
3664 		device_printf(sc->aac_dev, "Error %d sending "
3665 		    "VM_ContainerConfig command\n", error);
3666 		aac_release_sync_fib(sc);
3667 		mtx_unlock(&sc->aac_io_lock);
3668 		return;
3669 	}
3670 
3671 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3672 	aac_ctcfg_resp_toh(c_resp);
3673 	if (c_resp->Status != ST_OK) {
3674 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3675 		    c_resp->Status);
3676 		aac_release_sync_fib(sc);
3677 		mtx_unlock(&sc->aac_io_lock);
3678 		return;
3679 	}
3680 
3681 	sc->scsi_method_id = c_resp->param;
3682 
3683 	vmi = (struct aac_vmioctl *)&fib->data[0];
3684 	bzero(vmi, sizeof(struct aac_vmioctl));
3685 
3686 	vmi->Command = VM_Ioctl;
3687 	vmi->ObjType = FT_DRIVE;
3688 	vmi->MethId = sc->scsi_method_id;
3689 	vmi->ObjId = 0;
3690 	vmi->IoctlCmd = GetBusInfo;
3691 
3692 	aac_vmioctl_tole(vmi);
3693 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3694 	    sizeof(struct aac_vmi_businf_resp));
3695 	if (error) {
3696 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3697 		    error);
3698 		aac_release_sync_fib(sc);
3699 		mtx_unlock(&sc->aac_io_lock);
3700 		return;
3701 	}
3702 
3703 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3704 	aac_vmi_businf_resp_toh(vmi_resp);
3705 	if (vmi_resp->Status != ST_OK) {
3706 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3707 		    vmi_resp->Status);
3708 		aac_release_sync_fib(sc);
3709 		mtx_unlock(&sc->aac_io_lock);
3710 		return;
3711 	}
3712 
3713 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3714 	aac_release_sync_fib(sc);
3715 	mtx_unlock(&sc->aac_io_lock);
3716 
3717 	for (i = 0; i < businfo.BusCount; i++) {
3718 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3719 			continue;
3720 
3721 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3722 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3723 		if (caminf == NULL) {
3724 			device_printf(sc->aac_dev,
3725 			    "No memory to add passthrough bus %d\n", i);
3726 			break;
3727 		}
3728 
3729 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3730 		if (child == NULL) {
3731 			device_printf(sc->aac_dev,
3732 			    "device_add_child failed for passthrough bus %d\n",
3733 			    i);
3734 			free(caminf, M_AACRAIDBUF);
3735 			break;
3736 		}
3737 
3738 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3739 		caminf->BusNumber = i+1;
3740 		caminf->BusType = PASSTHROUGH_BUS;
3741 		caminf->InitiatorBusId = -1;
3742 		caminf->aac_sc = sc;
3743 		caminf->sim_dev = child;
3744 		caminf->aac_cam = NULL;
3745 
3746 		device_set_ivars(child, caminf);
3747 		device_set_desc(child, "SCSI Passthrough Bus");
3748 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3749 	}
3750 }
3751 
3752 /*
3753  * Check to see if the kernel is up and running. If we are in a
3754  * BlinkLED state, return the BlinkLED code.
3755  */
3756 static u_int32_t
3757 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3758 {
3759 	u_int32_t ret;
3760 
3761 	ret = AAC_GET_FWSTATUS(sc);
3762 
3763 	if (ret & AAC_UP_AND_RUNNING)
3764 		ret = 0;
3765 	else if (ret & AAC_KERNEL_PANIC && bled)
3766 		*bled = (ret >> 16) & 0xff;
3767 
3768 	return (ret);
3769 }
3770 
3771 /*
3772  * Once do an IOP reset, basically have to re-initialize the card as
3773  * if coming up from a cold boot, and the driver is responsible for
3774  * any IO that was outstanding to the adapter at the time of the IOP
3775  * RESET. And prepare the driver for IOP RESET by making the init code
3776  * modular with the ability to call it from multiple places.
3777  */
3778 static int
3779 aac_reset_adapter(struct aac_softc *sc)
3780 {
3781 	struct aac_command *cm;
3782 	struct aac_fib *fib;
3783 	struct aac_pause_command *pc;
3784 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3785 	int ret, msi_enabled_orig;
3786 
3787 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3788 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3789 
3790 	if (sc->aac_state & AAC_STATE_RESET) {
3791 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3792 		return (EINVAL);
3793 	}
3794 	sc->aac_state |= AAC_STATE_RESET;
3795 
3796 	/* disable interrupt */
3797 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3798 
3799 	/*
3800 	 * Abort all pending commands:
3801 	 * a) on the controller
3802 	 */
3803 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3804 		cm->cm_flags |= AAC_CMD_RESET;
3805 
3806 		/* is there a completion handler? */
3807 		if (cm->cm_complete != NULL) {
3808 			cm->cm_complete(cm);
3809 		} else {
3810 			/* assume that someone is sleeping on this
3811 			 * command
3812 			 */
3813 			wakeup(cm);
3814 		}
3815 	}
3816 
3817 	/* b) in the waiting queues */
3818 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3819 		cm->cm_flags |= AAC_CMD_RESET;
3820 
3821 		/* is there a completion handler? */
3822 		if (cm->cm_complete != NULL) {
3823 			cm->cm_complete(cm);
3824 		} else {
3825 			/* assume that someone is sleeping on this
3826 			 * command
3827 			 */
3828 			wakeup(cm);
3829 		}
3830 	}
3831 
3832 	/* flush drives */
3833 	if (aac_check_adapter_health(sc, NULL) == 0) {
3834 		mtx_unlock(&sc->aac_io_lock);
3835 		(void) aacraid_shutdown(sc->aac_dev);
3836 		mtx_lock(&sc->aac_io_lock);
3837 	}
3838 
3839 	/* execute IOP reset */
3840 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3841 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3842 
3843 		/* We need to wait for 5 seconds before accessing the MU again
3844 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3845 		 */
3846 		waitCount = 5 * 10000;
3847 		while (waitCount) {
3848 			DELAY(100);			/* delay 100 microseconds */
3849 			waitCount--;
3850 		}
3851 	} else {
3852 		ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3853 			0, 0, 0, 0, &status, &reset_mask);
3854 		if (ret && !sc->doorbell_mask) {
3855 			/* call IOP_RESET for older firmware */
3856 			if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3857 			    &status, NULL)) != 0) {
3858 				if (status == AAC_SRB_STS_INVALID_REQUEST) {
3859 					device_printf(sc->aac_dev,
3860 					    "IOP_RESET not supported\n");
3861 				} else {
3862 					/* probably timeout */
3863 					device_printf(sc->aac_dev,
3864 					    "IOP_RESET failed\n");
3865 				}
3866 
3867 				/* unwind aac_shutdown() */
3868 				aac_alloc_sync_fib(sc, &fib);
3869 				pc = (struct aac_pause_command *)&fib->data[0];
3870 				pc->Command = VM_ContainerConfig;
3871 				pc->Type = CT_PAUSE_IO;
3872 				pc->Timeout = 1;
3873 				pc->Min = 1;
3874 				pc->NoRescan = 1;
3875 
3876 				aac_pause_command_tole(pc);
3877 				(void) aac_sync_fib(sc, ContainerCommand, 0,
3878 				    fib, sizeof (struct aac_pause_command));
3879 				aac_release_sync_fib(sc);
3880 
3881 				goto finish;
3882 			}
3883 		} else if (sc->doorbell_mask) {
3884 			ret = 0;
3885 			reset_mask = sc->doorbell_mask;
3886 		}
3887 		if (!ret &&
3888 		    (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3889 			AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3890 			/*
3891 			 * We need to wait for 5 seconds before accessing the
3892 			 * doorbell again;
3893 			 * 10000 * 100us = 1000,000us = 1000ms = 1s
3894 			 */
3895 			waitCount = 5 * 10000;
3896 			while (waitCount) {
3897 				DELAY(100);	/* delay 100 microseconds */
3898 				waitCount--;
3899 			}
3900 		}
3901 	}
3902 
3903 	/*
3904 	 * Initialize the adapter.
3905 	 */
3906 	max_msix_orig = sc->aac_max_msix;
3907 	msi_enabled_orig = sc->msi_enabled;
3908 	sc->msi_enabled = FALSE;
3909 	if (aac_check_firmware(sc) != 0)
3910 		goto finish;
3911 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3912 		sc->aac_max_msix = max_msix_orig;
3913 		if (msi_enabled_orig) {
3914 			sc->msi_enabled = msi_enabled_orig;
3915 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3916 		}
3917 		mtx_unlock(&sc->aac_io_lock);
3918 		aac_init(sc);
3919 		mtx_lock(&sc->aac_io_lock);
3920 	}
3921 
3922 finish:
3923 	sc->aac_state &= ~AAC_STATE_RESET;
3924 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3925 	aacraid_startio(sc);
3926 	return (0);
3927 }
3928