xref: /freebsd/sys/dev/aacraid/aacraid.c (revision 8ddb146abcdf061be9f2c0db7e391697dafad85c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
38  */
39 #define AAC_DRIVERNAME			"aacraid"
40 
41 #include "opt_aacraid.h"
42 
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/proc.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
52 #include <sys/poll.h>
53 #include <sys/ioccom.h>
54 
55 #include <sys/bus.h>
56 #include <sys/conf.h>
57 #include <sys/signalvar.h>
58 #include <sys/time.h>
59 #include <sys/eventhandler.h>
60 #include <sys/rman.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
72 #include <dev/aacraid/aacraid_endian.h>
73 
74 #ifndef FILTER_HANDLED
75 #define FILTER_HANDLED	0x02
76 #endif
77 
78 static void	aac_add_container(struct aac_softc *sc,
79 				  struct aac_mntinforesp *mir, int f,
80 				  u_int32_t uid);
81 static void	aac_get_bus_info(struct aac_softc *sc);
82 static void	aac_container_bus(struct aac_softc *sc);
83 static void	aac_daemon(void *arg);
84 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
85 							  int pages, int nseg, int nseg_new);
86 
87 /* Command Processing */
88 static void	aac_timeout(struct aac_softc *sc);
89 static void	aac_command_thread(struct aac_softc *sc);
90 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
91 				     u_int32_t xferstate, struct aac_fib *fib,
92 				     u_int16_t datasize);
93 /* Command Buffer Management */
94 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
95 				       int nseg, int error);
96 static int	aac_alloc_commands(struct aac_softc *sc);
97 static void	aac_free_commands(struct aac_softc *sc);
98 static void	aac_unmap_command(struct aac_command *cm);
99 
100 /* Hardware Interface */
101 static int	aac_alloc(struct aac_softc *sc);
102 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
103 			       int error);
104 static int	aac_check_firmware(struct aac_softc *sc);
105 static void	aac_define_int_mode(struct aac_softc *sc);
106 static int	aac_init(struct aac_softc *sc);
107 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
108 static int	aac_setup_intr(struct aac_softc *sc);
109 static int	aac_check_config(struct aac_softc *sc);
110 
111 /* PMC SRC interface */
112 static int	aac_src_get_fwstatus(struct aac_softc *sc);
113 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
114 static int	aac_src_get_istatus(struct aac_softc *sc);
115 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
116 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
117 				    u_int32_t arg0, u_int32_t arg1,
118 				    u_int32_t arg2, u_int32_t arg3);
119 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
120 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
121 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
122 static int aac_src_get_outb_queue(struct aac_softc *sc);
123 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
124 
125 struct aac_interface aacraid_src_interface = {
126 	aac_src_get_fwstatus,
127 	aac_src_qnotify,
128 	aac_src_get_istatus,
129 	aac_src_clear_istatus,
130 	aac_src_set_mailbox,
131 	aac_src_get_mailbox,
132 	aac_src_access_devreg,
133 	aac_src_send_command,
134 	aac_src_get_outb_queue,
135 	aac_src_set_outb_queue
136 };
137 
138 /* PMC SRCv interface */
139 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
140 				    u_int32_t arg0, u_int32_t arg1,
141 				    u_int32_t arg2, u_int32_t arg3);
142 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
143 
144 struct aac_interface aacraid_srcv_interface = {
145 	aac_src_get_fwstatus,
146 	aac_src_qnotify,
147 	aac_src_get_istatus,
148 	aac_src_clear_istatus,
149 	aac_srcv_set_mailbox,
150 	aac_srcv_get_mailbox,
151 	aac_src_access_devreg,
152 	aac_src_send_command,
153 	aac_src_get_outb_queue,
154 	aac_src_set_outb_queue
155 };
156 
157 /* Debugging and Diagnostics */
158 static struct aac_code_lookup aac_cpu_variant[] = {
159 	{"i960JX",		CPUI960_JX},
160 	{"i960CX",		CPUI960_CX},
161 	{"i960HX",		CPUI960_HX},
162 	{"i960RX",		CPUI960_RX},
163 	{"i960 80303",		CPUI960_80303},
164 	{"StrongARM SA110",	CPUARM_SA110},
165 	{"PPC603e",		CPUPPC_603e},
166 	{"XScale 80321",	CPU_XSCALE_80321},
167 	{"MIPS 4KC",		CPU_MIPS_4KC},
168 	{"MIPS 5KC",		CPU_MIPS_5KC},
169 	{"Unknown StrongARM",	CPUARM_xxx},
170 	{"Unknown PowerPC",	CPUPPC_xxx},
171 	{NULL, 0},
172 	{"Unknown processor",	0}
173 };
174 
175 static struct aac_code_lookup aac_battery_platform[] = {
176 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
177 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
178 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
179 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
180 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
181 	{NULL, 0},
182 	{"unknown battery platform",		0}
183 };
184 static void	aac_describe_controller(struct aac_softc *sc);
185 static char	*aac_describe_code(struct aac_code_lookup *table,
186 				   u_int32_t code);
187 
188 /* Management Interface */
189 static d_open_t		aac_open;
190 static d_ioctl_t	aac_ioctl;
191 static d_poll_t		aac_poll;
192 static void		aac_cdevpriv_dtor(void *arg);
193 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
194 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
195 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
196 static void	aac_request_aif(struct aac_softc *sc);
197 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
198 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
199 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
200 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
201 static int	aac_return_aif(struct aac_softc *sc,
202 			       struct aac_fib_context *ctx, caddr_t uptr);
203 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
204 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
205 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
206 static void	aac_ioctl_event(struct aac_softc *sc,
207 				struct aac_event *event, void *arg);
208 static int	aac_reset_adapter(struct aac_softc *sc);
209 static int	aac_get_container_info(struct aac_softc *sc,
210 				       struct aac_fib *fib, int cid,
211 				       struct aac_mntinforesp *mir,
212 				       u_int32_t *uid);
213 static u_int32_t
214 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
215 
216 static struct cdevsw aacraid_cdevsw = {
217 	.d_version =	D_VERSION,
218 	.d_flags =	0,
219 	.d_open =	aac_open,
220 	.d_ioctl =	aac_ioctl,
221 	.d_poll =	aac_poll,
222 	.d_name =	"aacraid",
223 };
224 
225 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
226 
227 /* sysctl node */
228 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
229     "AACRAID driver parameters");
230 
231 /*
232  * Device Interface
233  */
234 
235 /*
236  * Initialize the controller and softc
237  */
238 int
239 aacraid_attach(struct aac_softc *sc)
240 {
241 	int error, unit;
242 	struct aac_fib *fib;
243 	struct aac_mntinforesp mir;
244 	int count = 0, i = 0;
245 	u_int32_t uid;
246 
247 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
248 	sc->hint_flags = device_get_flags(sc->aac_dev);
249 	/*
250 	 * Initialize per-controller queues.
251 	 */
252 	aac_initq_free(sc);
253 	aac_initq_ready(sc);
254 	aac_initq_busy(sc);
255 
256 	/* mark controller as suspended until we get ourselves organised */
257 	sc->aac_state |= AAC_STATE_SUSPEND;
258 
259 	/*
260 	 * Check that the firmware on the card is supported.
261 	 */
262 	sc->msi_enabled = sc->msi_tupelo = FALSE;
263 	if ((error = aac_check_firmware(sc)) != 0)
264 		return(error);
265 
266 	/*
267 	 * Initialize locks
268 	 */
269 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
270 	TAILQ_INIT(&sc->aac_container_tqh);
271 	TAILQ_INIT(&sc->aac_ev_cmfree);
272 
273 	/* Initialize the clock daemon callout. */
274 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
275 
276 	/*
277 	 * Initialize the adapter.
278 	 */
279 	if ((error = aac_alloc(sc)) != 0)
280 		return(error);
281 	aac_define_int_mode(sc);
282 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
283 		if ((error = aac_init(sc)) != 0)
284 			return(error);
285 	}
286 
287 	/*
288 	 * Allocate and connect our interrupt.
289 	 */
290 	if ((error = aac_setup_intr(sc)) != 0)
291 		return(error);
292 
293 	/*
294 	 * Print a little information about the controller.
295 	 */
296 	aac_describe_controller(sc);
297 
298 	/*
299 	 * Make the control device.
300 	 */
301 	unit = device_get_unit(sc->aac_dev);
302 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
303 				 0640, "aacraid%d", unit);
304 	sc->aac_dev_t->si_drv1 = sc;
305 
306 	/* Create the AIF thread */
307 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
308 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
309 		panic("Could not create AIF thread");
310 
311 	/* Register the shutdown method to only be called post-dump */
312 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
313 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
314 		device_printf(sc->aac_dev,
315 			      "shutdown event registration failed\n");
316 
317 	/* Find containers */
318 	mtx_lock(&sc->aac_io_lock);
319 	aac_alloc_sync_fib(sc, &fib);
320 	/* loop over possible containers */
321 	do {
322 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
323 			continue;
324 		if (i == 0)
325 			count = mir.MntRespCount;
326 		aac_add_container(sc, &mir, 0, uid);
327 		i++;
328 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
329 	aac_release_sync_fib(sc);
330 	mtx_unlock(&sc->aac_io_lock);
331 
332 	/* Register with CAM for the containers */
333 	TAILQ_INIT(&sc->aac_sim_tqh);
334 	aac_container_bus(sc);
335 	/* Register with CAM for the non-DASD devices */
336 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
337 		aac_get_bus_info(sc);
338 
339 	/* poke the bus to actually attach the child devices */
340 	bus_generic_attach(sc->aac_dev);
341 
342 	/* mark the controller up */
343 	sc->aac_state &= ~AAC_STATE_SUSPEND;
344 
345 	/* enable interrupts now */
346 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
347 
348 	mtx_lock(&sc->aac_io_lock);
349 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
350 	mtx_unlock(&sc->aac_io_lock);
351 
352 	return(0);
353 }
354 
355 static void
356 aac_daemon(void *arg)
357 {
358 	struct aac_softc *sc;
359 	struct timeval tv;
360 	struct aac_command *cm;
361 	struct aac_fib *fib;
362 
363 	sc = arg;
364 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
365 
366 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
367 	if (callout_pending(&sc->aac_daemontime) ||
368 	    callout_active(&sc->aac_daemontime) == 0)
369 		return;
370 	getmicrotime(&tv);
371 
372 	if (!aacraid_alloc_command(sc, &cm)) {
373 		fib = cm->cm_fib;
374 		cm->cm_timestamp = time_uptime;
375 		cm->cm_datalen = 0;
376 		cm->cm_flags |= AAC_CMD_WAIT;
377 
378 		fib->Header.Size =
379 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
380 		fib->Header.XferState =
381 			AAC_FIBSTATE_HOSTOWNED   |
382 			AAC_FIBSTATE_INITIALISED |
383 			AAC_FIBSTATE_EMPTY	 |
384 			AAC_FIBSTATE_FROMHOST	 |
385 			AAC_FIBSTATE_REXPECTED   |
386 			AAC_FIBSTATE_NORM	 |
387 			AAC_FIBSTATE_ASYNC	 |
388 			AAC_FIBSTATE_FAST_RESPONSE;
389 		fib->Header.Command = SendHostTime;
390 		*(uint32_t *)fib->data = htole32(tv.tv_sec);
391 
392 		aacraid_map_command_sg(cm, NULL, 0, 0);
393 		aacraid_release_command(cm);
394 	}
395 
396 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
397 }
398 
399 void
400 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
401 {
402 
403 	switch (event->ev_type & AAC_EVENT_MASK) {
404 	case AAC_EVENT_CMFREE:
405 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
406 		break;
407 	default:
408 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
409 		    event->ev_type);
410 		break;
411 	}
412 
413 	return;
414 }
415 
416 /*
417  * Request information of container #cid
418  */
419 static int
420 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
421 		       struct aac_mntinforesp *mir, u_int32_t *uid)
422 {
423 	struct aac_command *cm;
424 	struct aac_fib *fib;
425 	struct aac_mntinfo *mi;
426 	struct aac_cnt_config *ccfg;
427 	int rval;
428 
429 	if (sync_fib == NULL) {
430 		if (aacraid_alloc_command(sc, &cm)) {
431 			device_printf(sc->aac_dev,
432 				"Warning, no free command available\n");
433 			return (-1);
434 		}
435 		fib = cm->cm_fib;
436 	} else {
437 		fib = sync_fib;
438 	}
439 
440 	mi = (struct aac_mntinfo *)&fib->data[0];
441 	/* 4KB support?, 64-bit LBA? */
442 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
443 		mi->Command = VM_NameServeAllBlk;
444 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
445 		mi->Command = VM_NameServe64;
446 	else
447 		mi->Command = VM_NameServe;
448 	mi->MntType = FT_FILESYS;
449 	mi->MntCount = cid;
450 	aac_mntinfo_tole(mi);
451 
452 	if (sync_fib) {
453 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
454 			 sizeof(struct aac_mntinfo))) {
455 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
456 			return (-1);
457 		}
458 	} else {
459 		cm->cm_timestamp = time_uptime;
460 		cm->cm_datalen = 0;
461 
462 		fib->Header.Size =
463 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
464 		fib->Header.XferState =
465 			AAC_FIBSTATE_HOSTOWNED   |
466 			AAC_FIBSTATE_INITIALISED |
467 			AAC_FIBSTATE_EMPTY	 |
468 			AAC_FIBSTATE_FROMHOST	 |
469 			AAC_FIBSTATE_REXPECTED   |
470 			AAC_FIBSTATE_NORM	 |
471 			AAC_FIBSTATE_ASYNC	 |
472 			AAC_FIBSTATE_FAST_RESPONSE;
473 		fib->Header.Command = ContainerCommand;
474 		if (aacraid_wait_command(cm) != 0) {
475 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
476 			aacraid_release_command(cm);
477 			return (-1);
478 		}
479 	}
480 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
481 	aac_mntinforesp_toh(mir);
482 
483 	/* UID */
484 	*uid = cid;
485 	if (mir->MntTable[0].VolType != CT_NONE &&
486 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
487 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
488 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
489 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
490 		}
491 		ccfg = (struct aac_cnt_config *)&fib->data[0];
492 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
493 		ccfg->Command = VM_ContainerConfig;
494 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
495 		ccfg->CTCommand.param[0] = cid;
496 		aac_cnt_config_tole(ccfg);
497 
498 		if (sync_fib) {
499 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
500 				sizeof(struct aac_cnt_config));
501 			aac_cnt_config_toh(ccfg);
502 			if (rval == 0 && ccfg->Command == ST_OK &&
503 				ccfg->CTCommand.param[0] == CT_OK &&
504 				mir->MntTable[0].VolType != CT_PASSTHRU)
505 				*uid = ccfg->CTCommand.param[1];
506 		} else {
507 			fib->Header.Size =
508 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
509 			fib->Header.XferState =
510 				AAC_FIBSTATE_HOSTOWNED   |
511 				AAC_FIBSTATE_INITIALISED |
512 				AAC_FIBSTATE_EMPTY	 |
513 				AAC_FIBSTATE_FROMHOST	 |
514 				AAC_FIBSTATE_REXPECTED   |
515 				AAC_FIBSTATE_NORM	 |
516 				AAC_FIBSTATE_ASYNC	 |
517 				AAC_FIBSTATE_FAST_RESPONSE;
518 			fib->Header.Command = ContainerCommand;
519 			rval = aacraid_wait_command(cm);
520 			aac_cnt_config_toh(ccfg);
521 			if (rval == 0 && ccfg->Command == ST_OK &&
522 				ccfg->CTCommand.param[0] == CT_OK &&
523 				mir->MntTable[0].VolType != CT_PASSTHRU)
524 				*uid = ccfg->CTCommand.param[1];
525 			aacraid_release_command(cm);
526 		}
527 	}
528 
529 	return (0);
530 }
531 
532 /*
533  * Create a device to represent a new container
534  */
535 static void
536 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
537 		  u_int32_t uid)
538 {
539 	struct aac_container *co;
540 
541 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
542 
543 	/*
544 	 * Check container volume type for validity.  Note that many of
545 	 * the possible types may never show up.
546 	 */
547 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
548 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
549 		       M_NOWAIT | M_ZERO);
550 		if (co == NULL) {
551 			panic("Out of memory?!");
552 		}
553 
554 		co->co_found = f;
555 		bcopy(&mir->MntTable[0], &co->co_mntobj,
556 		      sizeof(struct aac_mntobj));
557 		co->co_uid = uid;
558 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
559 	}
560 }
561 
562 /*
563  * Allocate resources associated with (sc)
564  */
565 static int
566 aac_alloc(struct aac_softc *sc)
567 {
568 	bus_size_t maxsize;
569 
570 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
571 
572 	/*
573 	 * Create DMA tag for mapping buffers into controller-addressable space.
574 	 */
575 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
576 			       1, 0, 			/* algnmnt, boundary */
577 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
578 			       BUS_SPACE_MAXADDR :
579 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
580 			       BUS_SPACE_MAXADDR, 	/* highaddr */
581 			       NULL, NULL, 		/* filter, filterarg */
582 			       AAC_MAXIO_SIZE(sc),	/* maxsize */
583 			       sc->aac_sg_tablesize,	/* nsegments */
584 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
585 			       BUS_DMA_ALLOCNOW,	/* flags */
586 			       busdma_lock_mutex,	/* lockfunc */
587 			       &sc->aac_io_lock,	/* lockfuncarg */
588 			       &sc->aac_buffer_dmat)) {
589 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
590 		return (ENOMEM);
591 	}
592 
593 	/*
594 	 * Create DMA tag for mapping FIBs into controller-addressable space..
595 	 */
596 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
597 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
598 			sizeof(struct aac_fib_xporthdr) + 31);
599 	else
600 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
601 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
602 			       1, 0, 			/* algnmnt, boundary */
603 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
604 			       BUS_SPACE_MAXADDR_32BIT :
605 			       0x7fffffff,		/* lowaddr */
606 			       BUS_SPACE_MAXADDR, 	/* highaddr */
607 			       NULL, NULL, 		/* filter, filterarg */
608 			       maxsize,  		/* maxsize */
609 			       1,			/* nsegments */
610 			       maxsize,			/* maxsize */
611 			       0,			/* flags */
612 			       NULL, NULL,		/* No locking needed */
613 			       &sc->aac_fib_dmat)) {
614 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
615 		return (ENOMEM);
616 	}
617 
618 	/*
619 	 * Create DMA tag for the common structure and allocate it.
620 	 */
621 	maxsize = sizeof(struct aac_common);
622 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
623 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
624 			       1, 0,			/* algnmnt, boundary */
625 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
626 			       BUS_SPACE_MAXADDR_32BIT :
627 			       0x7fffffff,		/* lowaddr */
628 			       BUS_SPACE_MAXADDR, 	/* highaddr */
629 			       NULL, NULL, 		/* filter, filterarg */
630 			       maxsize, 		/* maxsize */
631 			       1,			/* nsegments */
632 			       maxsize,			/* maxsegsize */
633 			       0,			/* flags */
634 			       NULL, NULL,		/* No locking needed */
635 			       &sc->aac_common_dmat)) {
636 		device_printf(sc->aac_dev,
637 			      "can't allocate common structure DMA tag\n");
638 		return (ENOMEM);
639 	}
640 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
641 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
642 		device_printf(sc->aac_dev, "can't allocate common structure\n");
643 		return (ENOMEM);
644 	}
645 
646 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
647 			sc->aac_common, maxsize,
648 			aac_common_map, sc, 0);
649 	bzero(sc->aac_common, maxsize);
650 
651 	/* Allocate some FIBs and associated command structs */
652 	TAILQ_INIT(&sc->aac_fibmap_tqh);
653 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
654 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
655 	mtx_lock(&sc->aac_io_lock);
656 	while (sc->total_fibs < sc->aac_max_fibs) {
657 		if (aac_alloc_commands(sc) != 0)
658 			break;
659 	}
660 	mtx_unlock(&sc->aac_io_lock);
661 	if (sc->total_fibs == 0)
662 		return (ENOMEM);
663 
664 	return (0);
665 }
666 
667 /*
668  * Free all of the resources associated with (sc)
669  *
670  * Should not be called if the controller is active.
671  */
672 void
673 aacraid_free(struct aac_softc *sc)
674 {
675 	int i;
676 
677 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
678 
679 	/* remove the control device */
680 	if (sc->aac_dev_t != NULL)
681 		destroy_dev(sc->aac_dev_t);
682 
683 	/* throw away any FIB buffers, discard the FIB DMA tag */
684 	aac_free_commands(sc);
685 	if (sc->aac_fib_dmat)
686 		bus_dma_tag_destroy(sc->aac_fib_dmat);
687 
688 	free(sc->aac_commands, M_AACRAIDBUF);
689 
690 	/* destroy the common area */
691 	if (sc->aac_common) {
692 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
693 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
694 				sc->aac_common_dmamap);
695 	}
696 	if (sc->aac_common_dmat)
697 		bus_dma_tag_destroy(sc->aac_common_dmat);
698 
699 	/* disconnect the interrupt handler */
700 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
701 		if (sc->aac_intr[i])
702 			bus_teardown_intr(sc->aac_dev,
703 				sc->aac_irq[i], sc->aac_intr[i]);
704 		if (sc->aac_irq[i])
705 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
706 				sc->aac_irq_rid[i], sc->aac_irq[i]);
707 		else
708 			break;
709 	}
710 	if (sc->msi_enabled || sc->msi_tupelo)
711 		pci_release_msi(sc->aac_dev);
712 
713 	/* destroy data-transfer DMA tag */
714 	if (sc->aac_buffer_dmat)
715 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
716 
717 	/* destroy the parent DMA tag */
718 	if (sc->aac_parent_dmat)
719 		bus_dma_tag_destroy(sc->aac_parent_dmat);
720 
721 	/* release the register window mapping */
722 	if (sc->aac_regs_res0 != NULL)
723 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
724 				     sc->aac_regs_rid0, sc->aac_regs_res0);
725 	if (sc->aac_regs_res1 != NULL)
726 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
727 				     sc->aac_regs_rid1, sc->aac_regs_res1);
728 }
729 
730 /*
731  * Disconnect from the controller completely, in preparation for unload.
732  */
733 int
734 aacraid_detach(device_t dev)
735 {
736 	struct aac_softc *sc;
737 	struct aac_container *co;
738 	struct aac_sim	*sim;
739 	int error;
740 
741 	sc = device_get_softc(dev);
742 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
743 
744 	callout_drain(&sc->aac_daemontime);
745 	/* Remove the child containers */
746 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
747 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
748 		free(co, M_AACRAIDBUF);
749 	}
750 
751 	/* Remove the CAM SIMs */
752 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
753 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
754 		error = device_delete_child(dev, sim->sim_dev);
755 		if (error)
756 			return (error);
757 		free(sim, M_AACRAIDBUF);
758 	}
759 
760 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
761 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
762 		wakeup(sc->aifthread);
763 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
764 	}
765 
766 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
767 		panic("Cannot shutdown AIF thread");
768 
769 	if ((error = aacraid_shutdown(dev)))
770 		return(error);
771 
772 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
773 
774 	aacraid_free(sc);
775 
776 	mtx_destroy(&sc->aac_io_lock);
777 
778 	return(0);
779 }
780 
781 /*
782  * Bring the controller down to a dormant state and detach all child devices.
783  *
784  * This function is called before detach or system shutdown.
785  *
786  * Note that we can assume that the bioq on the controller is empty, as we won't
787  * allow shutdown if any device is open.
788  */
789 int
790 aacraid_shutdown(device_t dev)
791 {
792 	struct aac_softc *sc;
793 	struct aac_fib *fib;
794 	struct aac_close_command *cc;
795 
796 	sc = device_get_softc(dev);
797 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
798 
799 	sc->aac_state |= AAC_STATE_SUSPEND;
800 
801 	/*
802 	 * Send a Container shutdown followed by a HostShutdown FIB to the
803 	 * controller to convince it that we don't want to talk to it anymore.
804 	 * We've been closed and all I/O completed already
805 	 */
806 	device_printf(sc->aac_dev, "shutting down controller...");
807 
808 	mtx_lock(&sc->aac_io_lock);
809 	aac_alloc_sync_fib(sc, &fib);
810 	cc = (struct aac_close_command *)&fib->data[0];
811 
812 	bzero(cc, sizeof(struct aac_close_command));
813 	cc->Command = htole32(VM_CloseAll);
814 	cc->ContainerId = htole32(0xfffffffe);
815 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
816 	    sizeof(struct aac_close_command)))
817 		printf("FAILED.\n");
818 	else
819 		printf("done\n");
820 
821 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
822 	aac_release_sync_fib(sc);
823 	mtx_unlock(&sc->aac_io_lock);
824 
825 	return(0);
826 }
827 
828 /*
829  * Bring the controller to a quiescent state, ready for system suspend.
830  */
831 int
832 aacraid_suspend(device_t dev)
833 {
834 	struct aac_softc *sc;
835 
836 	sc = device_get_softc(dev);
837 
838 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
839 	sc->aac_state |= AAC_STATE_SUSPEND;
840 
841 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
842 	return(0);
843 }
844 
845 /*
846  * Bring the controller back to a state ready for operation.
847  */
848 int
849 aacraid_resume(device_t dev)
850 {
851 	struct aac_softc *sc;
852 
853 	sc = device_get_softc(dev);
854 
855 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
856 	sc->aac_state &= ~AAC_STATE_SUSPEND;
857 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
858 	return(0);
859 }
860 
861 /*
862  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
863  */
864 void
865 aacraid_new_intr_type1(void *arg)
866 {
867 	struct aac_msix_ctx *ctx;
868 	struct aac_softc *sc;
869 	int vector_no;
870 	struct aac_command *cm;
871 	struct aac_fib *fib;
872 	u_int32_t bellbits, bellbits_shifted, index, handle;
873 	int isFastResponse, isAif, noMoreAif, mode;
874 
875 	ctx = (struct aac_msix_ctx *)arg;
876 	sc = ctx->sc;
877 	vector_no = ctx->vector_no;
878 
879 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
880 	mtx_lock(&sc->aac_io_lock);
881 
882 	if (sc->msi_enabled) {
883 		mode = AAC_INT_MODE_MSI;
884 		if (vector_no == 0) {
885 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
886 			if (bellbits & 0x40000)
887 				mode |= AAC_INT_MODE_AIF;
888 			else if (bellbits & 0x1000)
889 				mode |= AAC_INT_MODE_SYNC;
890 		}
891 	} else {
892 		mode = AAC_INT_MODE_INTX;
893 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
894 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
895 			bellbits = AAC_DB_RESPONSE_SENT_NS;
896 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
897 		} else {
898 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
899 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
900 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
901 				mode |= AAC_INT_MODE_AIF;
902 			if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
903 				mode |= AAC_INT_MODE_SYNC;
904 		}
905 		/* ODR readback, Prep #238630 */
906 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
907 	}
908 
909 	if (mode & AAC_INT_MODE_SYNC) {
910 		if (sc->aac_sync_cm) {
911 			cm = sc->aac_sync_cm;
912 			aac_unmap_command(cm);
913 			cm->cm_flags |= AAC_CMD_COMPLETED;
914 			aac_fib_header_toh(&cm->cm_fib->Header);
915 
916 			/* is there a completion handler? */
917 			if (cm->cm_complete != NULL) {
918 				cm->cm_complete(cm);
919 			} else {
920 				/* assume that someone is sleeping on this command */
921 				wakeup(cm);
922 			}
923 			sc->flags &= ~AAC_QUEUE_FRZN;
924 			sc->aac_sync_cm = NULL;
925 		}
926 		if (mode & AAC_INT_MODE_INTX)
927 			mode &= ~AAC_INT_MODE_SYNC;
928 		else
929 			mode = 0;
930 	}
931 
932 	if (mode & AAC_INT_MODE_AIF) {
933 		if (mode & AAC_INT_MODE_INTX) {
934 			aac_request_aif(sc);
935 			mode = 0;
936 		}
937 	}
938 
939 	if (sc->flags & AAC_FLAGS_SYNC_MODE)
940 		mode = 0;
941 
942 	if (mode) {
943 		/* handle async. status */
944 		index = sc->aac_host_rrq_idx[vector_no];
945 		for (;;) {
946 			isFastResponse = isAif = noMoreAif = 0;
947 			/* remove toggle bit (31) */
948 			handle = (le32toh(sc->aac_common->ac_host_rrq[index]) &
949 			    0x7fffffff);
950 			/* check fast response bit (30) */
951 			if (handle & 0x40000000)
952 				isFastResponse = 1;
953 			/* check AIF bit (23) */
954 			else if (handle & 0x00800000)
955 				isAif = TRUE;
956 			handle &= 0x0000ffff;
957 			if (handle == 0)
958 				break;
959 
960 			cm = sc->aac_commands + (handle - 1);
961 			fib = cm->cm_fib;
962 			aac_fib_header_toh(&fib->Header);
963 			sc->aac_rrq_outstanding[vector_no]--;
964 			if (isAif) {
965 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
966 				if (!noMoreAif)
967 					aac_handle_aif(sc, fib);
968 				aac_remove_busy(cm);
969 				aacraid_release_command(cm);
970 			} else {
971 				if (isFastResponse) {
972 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
973 					*((u_int32_t *)(fib->data)) = htole32(ST_OK);
974 					cm->cm_flags |= AAC_CMD_FASTRESP;
975 				}
976 				aac_remove_busy(cm);
977 				aac_unmap_command(cm);
978 				cm->cm_flags |= AAC_CMD_COMPLETED;
979 
980 				/* is there a completion handler? */
981 				if (cm->cm_complete != NULL) {
982 					cm->cm_complete(cm);
983 				} else {
984 					/* assume that someone is sleeping on this command */
985 					wakeup(cm);
986 				}
987 				sc->flags &= ~AAC_QUEUE_FRZN;
988 			}
989 
990 			sc->aac_common->ac_host_rrq[index++] = 0;
991 			if (index == (vector_no + 1) * sc->aac_vector_cap)
992 				index = vector_no * sc->aac_vector_cap;
993 			sc->aac_host_rrq_idx[vector_no] = index;
994 
995 			if ((isAif && !noMoreAif) || sc->aif_pending)
996 				aac_request_aif(sc);
997 		}
998 	}
999 
1000 	if (mode & AAC_INT_MODE_AIF) {
1001 		aac_request_aif(sc);
1002 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1003 		mode = 0;
1004 	}
1005 
1006 	/* see if we can start some more I/O */
1007 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1008 		aacraid_startio(sc);
1009 	mtx_unlock(&sc->aac_io_lock);
1010 }
1011 
1012 /*
1013  * Handle notification of one or more FIBs coming from the controller.
1014  */
1015 static void
1016 aac_command_thread(struct aac_softc *sc)
1017 {
1018 	int retval;
1019 
1020 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1021 
1022 	mtx_lock(&sc->aac_io_lock);
1023 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1024 
1025 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1026 		retval = 0;
1027 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1028 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1029 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1030 
1031 		/*
1032 		 * First see if any FIBs need to be allocated.
1033 		 */
1034 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1035 			aac_alloc_commands(sc);
1036 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1037 			aacraid_startio(sc);
1038 		}
1039 
1040 		/*
1041 		 * While we're here, check to see if any commands are stuck.
1042 		 * This is pretty low-priority, so it's ok if it doesn't
1043 		 * always fire.
1044 		 */
1045 		if (retval == EWOULDBLOCK)
1046 			aac_timeout(sc);
1047 
1048 		/* Check the hardware printf message buffer */
1049 		if (sc->aac_common->ac_printf[0] != 0)
1050 			aac_print_printf(sc);
1051 	}
1052 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1053 	mtx_unlock(&sc->aac_io_lock);
1054 	wakeup(sc->aac_dev);
1055 
1056 	aac_kthread_exit(0);
1057 }
1058 
1059 /*
1060  * Submit a command to the controller, return when it completes.
1061  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1062  *     be stuck here forever.  At the same time, signals are not caught
1063  *     because there is a risk that a signal could wakeup the sleep before
1064  *     the card has a chance to complete the command.  Since there is no way
1065  *     to cancel a command that is in progress, we can't protect against the
1066  *     card completing a command late and spamming the command and data
1067  *     memory.  So, we are held hostage until the command completes.
1068  */
1069 int
1070 aacraid_wait_command(struct aac_command *cm)
1071 {
1072 	struct aac_softc *sc;
1073 	int error;
1074 
1075 	sc = cm->cm_sc;
1076 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1077 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1078 
1079 	/* Put the command on the ready queue and get things going */
1080 	aac_enqueue_ready(cm);
1081 	aacraid_startio(sc);
1082 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1083 	return(error);
1084 }
1085 
1086 /*
1087  *Command Buffer Management
1088  */
1089 
1090 /*
1091  * Allocate a command.
1092  */
1093 int
1094 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1095 {
1096 	struct aac_command *cm;
1097 
1098 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1099 
1100 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1101 		if (sc->total_fibs < sc->aac_max_fibs) {
1102 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1103 			wakeup(sc->aifthread);
1104 		}
1105 		return (EBUSY);
1106 	}
1107 
1108 	*cmp = cm;
1109 	return(0);
1110 }
1111 
1112 /*
1113  * Release a command back to the freelist.
1114  */
1115 void
1116 aacraid_release_command(struct aac_command *cm)
1117 {
1118 	struct aac_event *event;
1119 	struct aac_softc *sc;
1120 
1121 	sc = cm->cm_sc;
1122 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1123 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1124 
1125 	/* (re)initialize the command/FIB */
1126 	cm->cm_sgtable = NULL;
1127 	cm->cm_flags = 0;
1128 	cm->cm_complete = NULL;
1129 	cm->cm_ccb = NULL;
1130 	cm->cm_passthr_dmat = 0;
1131 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1132 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1133 	cm->cm_fib->Header.Unused = 0;
1134 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1135 
1136 	/*
1137 	 * These are duplicated in aac_start to cover the case where an
1138 	 * intermediate stage may have destroyed them.  They're left
1139 	 * initialized here for debugging purposes only.
1140 	 */
1141 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1142 	cm->cm_fib->Header.Handle = 0;
1143 
1144 	aac_enqueue_free(cm);
1145 
1146 	/*
1147 	 * Dequeue all events so that there's no risk of events getting
1148 	 * stranded.
1149 	 */
1150 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1151 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1152 		event->ev_callback(sc, event, event->ev_arg);
1153 	}
1154 }
1155 
1156 /*
1157  * Map helper for command/FIB allocation.
1158  */
1159 static void
1160 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1161 {
1162 	uint64_t	*fibphys;
1163 
1164 	fibphys = (uint64_t *)arg;
1165 
1166 	*fibphys = segs[0].ds_addr;
1167 }
1168 
1169 /*
1170  * Allocate and initialize commands/FIBs for this adapter.
1171  */
1172 static int
1173 aac_alloc_commands(struct aac_softc *sc)
1174 {
1175 	struct aac_command *cm;
1176 	struct aac_fibmap *fm;
1177 	uint64_t fibphys;
1178 	int i, error;
1179 	u_int32_t maxsize;
1180 
1181 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1182 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1183 
1184 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1185 		return (ENOMEM);
1186 
1187 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1188 	if (fm == NULL)
1189 		return (ENOMEM);
1190 
1191 	mtx_unlock(&sc->aac_io_lock);
1192 	/* allocate the FIBs in DMAable memory and load them */
1193 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1194 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1195 		device_printf(sc->aac_dev,
1196 			      "Not enough contiguous memory available.\n");
1197 		free(fm, M_AACRAIDBUF);
1198 		mtx_lock(&sc->aac_io_lock);
1199 		return (ENOMEM);
1200 	}
1201 
1202 	maxsize = sc->aac_max_fib_size + 31;
1203 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1204 		maxsize += sizeof(struct aac_fib_xporthdr);
1205 	/* Ignore errors since this doesn't bounce */
1206 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1207 			      sc->aac_max_fibs_alloc * maxsize,
1208 			      aac_map_command_helper, &fibphys, 0);
1209 	mtx_lock(&sc->aac_io_lock);
1210 
1211 	/* initialize constant fields in the command structure */
1212 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1213 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1214 		cm = sc->aac_commands + sc->total_fibs;
1215 		fm->aac_commands = cm;
1216 		cm->cm_sc = sc;
1217 		cm->cm_fib = (struct aac_fib *)
1218 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1219 		cm->cm_fibphys = fibphys + i * maxsize;
1220 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1221 			u_int64_t fibphys_aligned;
1222 			fibphys_aligned =
1223 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1224 			cm->cm_fib = (struct aac_fib *)
1225 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1226 			cm->cm_fibphys = fibphys_aligned;
1227 		} else {
1228 			u_int64_t fibphys_aligned;
1229 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1230 			cm->cm_fib = (struct aac_fib *)
1231 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1232 			cm->cm_fibphys = fibphys_aligned;
1233 		}
1234 		cm->cm_index = sc->total_fibs;
1235 
1236 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1237 					       &cm->cm_datamap)) != 0)
1238 			break;
1239 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1240 			aacraid_release_command(cm);
1241 		sc->total_fibs++;
1242 	}
1243 
1244 	if (i > 0) {
1245 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1246 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1247 		return (0);
1248 	}
1249 
1250 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1251 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1252 	free(fm, M_AACRAIDBUF);
1253 	return (ENOMEM);
1254 }
1255 
1256 /*
1257  * Free FIBs owned by this adapter.
1258  */
1259 static void
1260 aac_free_commands(struct aac_softc *sc)
1261 {
1262 	struct aac_fibmap *fm;
1263 	struct aac_command *cm;
1264 	int i;
1265 
1266 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1267 
1268 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1269 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1270 		/*
1271 		 * We check against total_fibs to handle partially
1272 		 * allocated blocks.
1273 		 */
1274 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1275 			cm = fm->aac_commands + i;
1276 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1277 		}
1278 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1279 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1280 		free(fm, M_AACRAIDBUF);
1281 	}
1282 }
1283 
1284 /*
1285  * Command-mapping helper function - populate this command's s/g table.
1286  */
1287 void
1288 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1289 {
1290 	struct aac_softc *sc;
1291 	struct aac_command *cm;
1292 	struct aac_fib *fib;
1293 	int i;
1294 
1295 	cm = (struct aac_command *)arg;
1296 	sc = cm->cm_sc;
1297 	fib = cm->cm_fib;
1298 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1299 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1300 
1301 	if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1302 		return;
1303 
1304 	/* copy into the FIB */
1305 	if (cm->cm_sgtable != NULL) {
1306 		if (fib->Header.Command == RawIo2) {
1307 			struct aac_raw_io2 *raw;
1308 			struct aac_sge_ieee1212 *sg;
1309 			u_int32_t min_size = PAGE_SIZE, cur_size;
1310 			int conformable = TRUE;
1311 
1312 			raw = (struct aac_raw_io2 *)&fib->data[0];
1313 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1314 			raw->sgeCnt = nseg;
1315 
1316 			for (i = 0; i < nseg; i++) {
1317 				cur_size = segs[i].ds_len;
1318 				sg[i].addrHigh = 0;
1319 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1320 				sg[i].length = cur_size;
1321 				sg[i].flags = 0;
1322 				if (i == 0) {
1323 					raw->sgeFirstSize = cur_size;
1324 				} else if (i == 1) {
1325 					raw->sgeNominalSize = cur_size;
1326 					min_size = cur_size;
1327 				} else if ((i+1) < nseg &&
1328 					cur_size != raw->sgeNominalSize) {
1329 					conformable = FALSE;
1330 					if (cur_size < min_size)
1331 						min_size = cur_size;
1332 				}
1333 			}
1334 
1335 			/* not conformable: evaluate required sg elements */
1336 			if (!conformable) {
1337 				int j, err_found, nseg_new = nseg;
1338 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1339 					err_found = FALSE;
1340 					nseg_new = 2;
1341 					for (j = 1; j < nseg - 1; ++j) {
1342 						if (sg[j].length % (i*PAGE_SIZE)) {
1343 							err_found = TRUE;
1344 							break;
1345 						}
1346 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1347 					}
1348 					if (!err_found)
1349 						break;
1350 				}
1351 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1352 					!(sc->hint_flags & 4))
1353 					nseg = aac_convert_sgraw2(sc,
1354 						raw, i, nseg, nseg_new);
1355 			} else {
1356 				raw->flags |= RIO2_SGL_CONFORMANT;
1357 			}
1358 
1359 			for (i = 0; i < nseg; i++)
1360 				aac_sge_ieee1212_tole(sg + i);
1361 			aac_raw_io2_tole(raw);
1362 
1363 			/* update the FIB size for the s/g count */
1364 			fib->Header.Size += nseg *
1365 				sizeof(struct aac_sge_ieee1212);
1366 
1367 		} else if (fib->Header.Command == RawIo) {
1368 			struct aac_sg_tableraw *sg;
1369 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1370 			sg->SgCount = htole32(nseg);
1371 			for (i = 0; i < nseg; i++) {
1372 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1373 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1374 				sg->SgEntryRaw[i].Next = 0;
1375 				sg->SgEntryRaw[i].Prev = 0;
1376 				sg->SgEntryRaw[i].Flags = 0;
1377 				aac_sg_entryraw_tole(&sg->SgEntryRaw[i]);
1378 			}
1379 			aac_raw_io_tole((struct aac_raw_io *)&fib->data[0]);
1380 			/* update the FIB size for the s/g count */
1381 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1382 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1383 			struct aac_sg_table *sg;
1384 			sg = cm->cm_sgtable;
1385 			sg->SgCount = htole32(nseg);
1386 			for (i = 0; i < nseg; i++) {
1387 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1388 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1389 				aac_sg_entry_tole(&sg->SgEntry[i]);
1390 			}
1391 			/* update the FIB size for the s/g count */
1392 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1393 		} else {
1394 			struct aac_sg_table64 *sg;
1395 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1396 			sg->SgCount = htole32(nseg);
1397 			for (i = 0; i < nseg; i++) {
1398 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1399 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1400 				aac_sg_entry64_tole(&sg->SgEntry64[i]);
1401 			}
1402 			/* update the FIB size for the s/g count */
1403 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1404 		}
1405 	}
1406 
1407 	/* Fix up the address values in the FIB.  Use the command array index
1408 	 * instead of a pointer since these fields are only 32 bits.  Shift
1409 	 * the SenderFibAddress over to make room for the fast response bit
1410 	 * and for the AIF bit
1411 	 */
1412 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1413 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1414 
1415 	/* save a pointer to the command for speedy reverse-lookup */
1416 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1417 
1418 	if (cm->cm_passthr_dmat == 0) {
1419 		if (cm->cm_flags & AAC_CMD_DATAIN)
1420 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1421 							BUS_DMASYNC_PREREAD);
1422 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1423 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1424 							BUS_DMASYNC_PREWRITE);
1425 	}
1426 
1427 	cm->cm_flags |= AAC_CMD_MAPPED;
1428 
1429 	if (cm->cm_flags & AAC_CMD_WAIT) {
1430 		aac_fib_header_tole(&fib->Header);
1431 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1432 			cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1433 	} else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1434 		u_int32_t wait = 0;
1435 		sc->aac_sync_cm = cm;
1436 		aac_fib_header_tole(&fib->Header);
1437 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1438 			cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1439 	} else {
1440 		int count = 10000000L;
1441 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1442 			if (--count == 0) {
1443 				aac_unmap_command(cm);
1444 				sc->flags |= AAC_QUEUE_FRZN;
1445 				aac_requeue_ready(cm);
1446 			}
1447 			DELAY(5);			/* wait 5 usec. */
1448 		}
1449 	}
1450 }
1451 
1452 static int
1453 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1454 				   int pages, int nseg, int nseg_new)
1455 {
1456 	struct aac_sge_ieee1212 *sge;
1457 	int i, j, pos;
1458 	u_int32_t addr_low;
1459 
1460 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1461 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1462 	if (sge == NULL)
1463 		return nseg;
1464 
1465 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1466 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1467 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1468 			sge[pos].addrLow = addr_low;
1469 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1470 			if (addr_low < raw->sge[i].addrLow)
1471 				sge[pos].addrHigh++;
1472 			sge[pos].length = pages * PAGE_SIZE;
1473 			sge[pos].flags = 0;
1474 			pos++;
1475 		}
1476 	}
1477 	sge[pos] = raw->sge[nseg-1];
1478 	for (i = 1; i < nseg_new; ++i)
1479 		raw->sge[i] = sge[i];
1480 
1481 	free(sge, M_AACRAIDBUF);
1482 	raw->sgeCnt = nseg_new;
1483 	raw->flags |= RIO2_SGL_CONFORMANT;
1484 	raw->sgeNominalSize = pages * PAGE_SIZE;
1485 	return nseg_new;
1486 }
1487 
1488 /*
1489  * Unmap a command from controller-visible space.
1490  */
1491 static void
1492 aac_unmap_command(struct aac_command *cm)
1493 {
1494 	struct aac_softc *sc;
1495 
1496 	sc = cm->cm_sc;
1497 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1498 
1499 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1500 		return;
1501 
1502 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1503 		if (cm->cm_flags & AAC_CMD_DATAIN)
1504 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1505 					BUS_DMASYNC_POSTREAD);
1506 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1507 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1508 					BUS_DMASYNC_POSTWRITE);
1509 
1510 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1511 	}
1512 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1513 }
1514 
1515 /*
1516  * Hardware Interface
1517  */
1518 
1519 /*
1520  * Initialize the adapter.
1521  */
1522 static void
1523 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1524 {
1525 	struct aac_softc *sc;
1526 
1527 	sc = (struct aac_softc *)arg;
1528 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1529 
1530 	sc->aac_common_busaddr = segs[0].ds_addr;
1531 }
1532 
1533 static int
1534 aac_check_firmware(struct aac_softc *sc)
1535 {
1536 	u_int32_t code, major, minor, maxsize;
1537 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1538 	time_t then;
1539 
1540 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1541 
1542 	/* check if flash update is running */
1543 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1544 		then = time_uptime;
1545 		do {
1546 			code = AAC_GET_FWSTATUS(sc);
1547 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1548 				device_printf(sc->aac_dev,
1549 						  "FATAL: controller not coming ready, "
1550 						   "status %x\n", code);
1551 				return(ENXIO);
1552 			}
1553 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1554 		/*
1555 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1556 		 * do not read scratch pad register at this time
1557 		 */
1558 		waitCount = 10 * 10000;
1559 		while (waitCount) {
1560 			DELAY(100);		/* delay 100 microseconds */
1561 			waitCount--;
1562 		}
1563 	}
1564 
1565 	/*
1566 	 * Wait for the adapter to come ready.
1567 	 */
1568 	then = time_uptime;
1569 	do {
1570 		code = AAC_GET_FWSTATUS(sc);
1571 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1572 			device_printf(sc->aac_dev,
1573 				      "FATAL: controller not coming ready, "
1574 					   "status %x\n", code);
1575 			return(ENXIO);
1576 		}
1577 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1578 
1579 	/*
1580 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1581 	 * firmware version 1.x are not compatible with this driver.
1582 	 */
1583 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1584 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1585 				     NULL, NULL)) {
1586 			device_printf(sc->aac_dev,
1587 				      "Error reading firmware version\n");
1588 			return (EIO);
1589 		}
1590 
1591 		/* These numbers are stored as ASCII! */
1592 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1593 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1594 		if (major == 1) {
1595 			device_printf(sc->aac_dev,
1596 			    "Firmware version %d.%d is not supported.\n",
1597 			    major, minor);
1598 			return (EINVAL);
1599 		}
1600 	}
1601 	/*
1602 	 * Retrieve the capabilities/supported options word so we know what
1603 	 * work-arounds to enable.  Some firmware revs don't support this
1604 	 * command.
1605 	 */
1606 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1607 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1608 			device_printf(sc->aac_dev,
1609 			     "RequestAdapterInfo failed\n");
1610 			return (EIO);
1611 		}
1612 	} else {
1613 		options = AAC_GET_MAILBOX(sc, 1);
1614 		atu_size = AAC_GET_MAILBOX(sc, 2);
1615 		sc->supported_options = options;
1616 		sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1617 
1618 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1619 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1620 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1621 		if (options & AAC_SUPPORTED_NONDASD)
1622 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1623 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1624 			&& (sizeof(bus_addr_t) > 4)
1625 			&& (sc->hint_flags & 0x1)) {
1626 			device_printf(sc->aac_dev,
1627 			    "Enabling 64-bit address support\n");
1628 			sc->flags |= AAC_FLAGS_SG_64BIT;
1629 		}
1630 		if (sc->aac_if.aif_send_command) {
1631 			if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1632 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1633 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1634 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1635 			else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1636 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1637 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1638 		}
1639 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1640 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1641 	}
1642 
1643 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1644 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1645 		return (ENXIO);
1646 	}
1647 
1648 	if (sc->hint_flags & 2) {
1649 		device_printf(sc->aac_dev,
1650 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1651 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1652 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1653 		device_printf(sc->aac_dev,
1654 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1655 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1656 	}
1657 
1658 	/* Check for broken hardware that does a lower number of commands */
1659 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1660 
1661 	/* Remap mem. resource, if required */
1662 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1663 		bus_release_resource(
1664 			sc->aac_dev, SYS_RES_MEMORY,
1665 			sc->aac_regs_rid0, sc->aac_regs_res0);
1666 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1667 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1668 			atu_size, RF_ACTIVE);
1669 		if (sc->aac_regs_res0 == NULL) {
1670 			sc->aac_regs_res0 = bus_alloc_resource_any(
1671 				sc->aac_dev, SYS_RES_MEMORY,
1672 				&sc->aac_regs_rid0, RF_ACTIVE);
1673 			if (sc->aac_regs_res0 == NULL) {
1674 				device_printf(sc->aac_dev,
1675 					"couldn't allocate register window\n");
1676 				return (ENXIO);
1677 			}
1678 		}
1679 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1680 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1681 	}
1682 
1683 	/* Read preferred settings */
1684 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1685 	sc->aac_max_sectors = 128;				/* 64KB */
1686 	sc->aac_max_aif = 1;
1687 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1688 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1689 		 - sizeof(struct aac_blockwrite64))
1690 		 / sizeof(struct aac_sg_entry64);
1691 	else
1692 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1693 		 - sizeof(struct aac_blockwrite))
1694 		 / sizeof(struct aac_sg_entry);
1695 
1696 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1697 		options = AAC_GET_MAILBOX(sc, 1);
1698 		sc->aac_max_fib_size = (options & 0xFFFF);
1699 		sc->aac_max_sectors = (options >> 16) << 1;
1700 		options = AAC_GET_MAILBOX(sc, 2);
1701 		sc->aac_sg_tablesize = (options >> 16);
1702 		options = AAC_GET_MAILBOX(sc, 3);
1703 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1704 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1705 			sc->aac_max_fibs = (options & 0xFFFF);
1706 		options = AAC_GET_MAILBOX(sc, 4);
1707 		sc->aac_max_aif = (options & 0xFFFF);
1708 		options = AAC_GET_MAILBOX(sc, 5);
1709 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1710 	}
1711 
1712 	maxsize = sc->aac_max_fib_size + 31;
1713 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1714 		maxsize += sizeof(struct aac_fib_xporthdr);
1715 	if (maxsize > PAGE_SIZE) {
1716     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1717 		maxsize = PAGE_SIZE;
1718 	}
1719 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1720 
1721 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1722 		sc->flags |= AAC_FLAGS_RAW_IO;
1723 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1724 	}
1725 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1726 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1727 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1728 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1729 	}
1730 
1731 #ifdef AACRAID_DEBUG
1732 	aacraid_get_fw_debug_buffer(sc);
1733 #endif
1734 	return (0);
1735 }
1736 
1737 static int
1738 aac_init(struct aac_softc *sc)
1739 {
1740 	struct aac_adapter_init	*ip;
1741 	int i, error;
1742 
1743 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1744 
1745 	/* reset rrq index */
1746 	sc->aac_fibs_pushed_no = 0;
1747 	for (i = 0; i < sc->aac_max_msix; i++)
1748 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1749 
1750 	/*
1751 	 * Fill in the init structure.  This tells the adapter about the
1752 	 * physical location of various important shared data structures.
1753 	 */
1754 	ip = &sc->aac_common->ac_init;
1755 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1756 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1757 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1758 		sc->flags |= AAC_FLAGS_RAW_IO;
1759 	}
1760 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1761 
1762 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1763 					 offsetof(struct aac_common, ac_fibs);
1764 	ip->AdapterFibsVirtualAddress = 0;
1765 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1766 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1767 
1768 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1769 				  offsetof(struct aac_common, ac_printf);
1770 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1771 
1772 	/*
1773 	 * The adapter assumes that pages are 4K in size, except on some
1774  	 * broken firmware versions that do the page->byte conversion twice,
1775 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1776 	 * Round up since the granularity is so high.
1777 	 */
1778 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1779 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1780 		ip->HostPhysMemPages =
1781 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1782 	}
1783 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1784 
1785 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1786 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1787 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1788 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1789 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1790 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1791 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1792 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1793 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1794 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1795 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1796 	}
1797 	ip->MaxNumAif = sc->aac_max_aif;
1798 	ip->HostRRQ_AddrLow =
1799 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1800 	/* always 32-bit address */
1801 	ip->HostRRQ_AddrHigh = 0;
1802 
1803 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1804 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1805 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1806 		device_printf(sc->aac_dev, "Power Management enabled\n");
1807 	}
1808 
1809 	ip->MaxIoCommands = sc->aac_max_fibs;
1810 	ip->MaxIoSize = AAC_MAXIO_SIZE(sc);
1811 	ip->MaxFibSize = sc->aac_max_fib_size;
1812 
1813 	aac_adapter_init_tole(ip);
1814 
1815 	/*
1816 	 * Do controller-type-specific initialisation
1817 	 */
1818 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1819 
1820 	/*
1821 	 * Give the init structure to the controller.
1822 	 */
1823 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1824 			     sc->aac_common_busaddr +
1825 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1826 			     NULL, NULL)) {
1827 		device_printf(sc->aac_dev,
1828 			      "error establishing init structure\n");
1829 		error = EIO;
1830 		goto out;
1831 	}
1832 
1833 	/*
1834 	 * Check configuration issues
1835 	 */
1836 	if ((error = aac_check_config(sc)) != 0)
1837 		goto out;
1838 
1839 	error = 0;
1840 out:
1841 	return(error);
1842 }
1843 
1844 static void
1845 aac_define_int_mode(struct aac_softc *sc)
1846 {
1847 	device_t dev;
1848 	int cap, msi_count, error = 0;
1849 	uint32_t val;
1850 
1851 	dev = sc->aac_dev;
1852 
1853 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1854 		device_printf(dev, "using line interrupts\n");
1855 		sc->aac_max_msix = 1;
1856 		sc->aac_vector_cap = sc->aac_max_fibs;
1857 		return;
1858 	}
1859 
1860 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1861 	if (sc->aac_max_msix == 0) {
1862 		if (sc->aac_hwif == AAC_HWIF_SRC) {
1863 			msi_count = 1;
1864 			if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1865 				device_printf(dev, "alloc msi failed - err=%d; "
1866 				    "will use INTx\n", error);
1867 				pci_release_msi(dev);
1868 			} else {
1869 				sc->msi_tupelo = TRUE;
1870 			}
1871 		}
1872 		if (sc->msi_tupelo)
1873 			device_printf(dev, "using MSI interrupts\n");
1874 		else
1875 			device_printf(dev, "using line interrupts\n");
1876 
1877 		sc->aac_max_msix = 1;
1878 		sc->aac_vector_cap = sc->aac_max_fibs;
1879 		return;
1880 	}
1881 
1882 	/* OS capability */
1883 	msi_count = pci_msix_count(dev);
1884 	if (msi_count > AAC_MAX_MSIX)
1885 		msi_count = AAC_MAX_MSIX;
1886 	if (msi_count > sc->aac_max_msix)
1887 		msi_count = sc->aac_max_msix;
1888 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1889 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1890 				   "will try MSI\n", msi_count, error);
1891 		pci_release_msi(dev);
1892 	} else {
1893 		sc->msi_enabled = TRUE;
1894 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1895 			msi_count);
1896 	}
1897 
1898 	if (!sc->msi_enabled) {
1899 		msi_count = 1;
1900 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1901 			device_printf(dev, "alloc msi failed - err=%d; "
1902 				           "will use INTx\n", error);
1903 			pci_release_msi(dev);
1904 		} else {
1905 			sc->msi_enabled = TRUE;
1906 			device_printf(dev, "using MSI interrupts\n");
1907 		}
1908 	}
1909 
1910 	if (sc->msi_enabled) {
1911 		/* now read controller capability from PCI config. space */
1912 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1913 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1914 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1915 			pci_release_msi(dev);
1916 			sc->msi_enabled = FALSE;
1917 		}
1918 	}
1919 
1920 	if (!sc->msi_enabled) {
1921 		device_printf(dev, "using legacy interrupts\n");
1922 		sc->aac_max_msix = 1;
1923 	} else {
1924 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1925 		if (sc->aac_max_msix > msi_count)
1926 			sc->aac_max_msix = msi_count;
1927 	}
1928 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1929 
1930 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1931 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1932 }
1933 
1934 static int
1935 aac_find_pci_capability(struct aac_softc *sc, int cap)
1936 {
1937 	device_t dev;
1938 	uint32_t status;
1939 	uint8_t ptr;
1940 
1941 	dev = sc->aac_dev;
1942 
1943 	status = pci_read_config(dev, PCIR_STATUS, 2);
1944 	if (!(status & PCIM_STATUS_CAPPRESENT))
1945 		return (0);
1946 
1947 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1948 	switch (status & PCIM_HDRTYPE) {
1949 	case 0:
1950 	case 1:
1951 		ptr = PCIR_CAP_PTR;
1952 		break;
1953 	case 2:
1954 		ptr = PCIR_CAP_PTR_2;
1955 		break;
1956 	default:
1957 		return (0);
1958 		break;
1959 	}
1960 	ptr = pci_read_config(dev, ptr, 1);
1961 
1962 	while (ptr != 0) {
1963 		int next, val;
1964 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1965 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1966 		if (val == cap)
1967 			return (ptr);
1968 		ptr = next;
1969 	}
1970 
1971 	return (0);
1972 }
1973 
1974 static int
1975 aac_setup_intr(struct aac_softc *sc)
1976 {
1977 	int i, msi_count, rid;
1978 	struct resource *res;
1979 	void *tag;
1980 
1981 	msi_count = sc->aac_max_msix;
1982 	rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1983 
1984 	for (i = 0; i < msi_count; i++, rid++) {
1985 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1986 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1987 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1988 			return (EINVAL);
1989 		}
1990 		sc->aac_irq_rid[i] = rid;
1991 		sc->aac_irq[i] = res;
1992 		if (aac_bus_setup_intr(sc->aac_dev, res,
1993 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1994 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1995 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1996 			return (EINVAL);
1997 		}
1998 		sc->aac_msix[i].vector_no = i;
1999 		sc->aac_msix[i].sc = sc;
2000 		sc->aac_intr[i] = tag;
2001 	}
2002 
2003 	return (0);
2004 }
2005 
2006 static int
2007 aac_check_config(struct aac_softc *sc)
2008 {
2009 	struct aac_fib *fib;
2010 	struct aac_cnt_config *ccfg;
2011 	struct aac_cf_status_hdr *cf_shdr;
2012 	int rval;
2013 
2014 	mtx_lock(&sc->aac_io_lock);
2015 	aac_alloc_sync_fib(sc, &fib);
2016 
2017 	ccfg = (struct aac_cnt_config *)&fib->data[0];
2018 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2019 	ccfg->Command = VM_ContainerConfig;
2020 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2021 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2022 
2023 	aac_cnt_config_tole(ccfg);
2024 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2025 		sizeof (struct aac_cnt_config));
2026 	aac_cnt_config_toh(ccfg);
2027 
2028 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2029 	if (rval == 0 && ccfg->Command == ST_OK &&
2030 		ccfg->CTCommand.param[0] == CT_OK) {
2031 		if (le32toh(cf_shdr->action) <= CFACT_PAUSE) {
2032 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2033 			ccfg->Command = VM_ContainerConfig;
2034 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2035 
2036 			aac_cnt_config_tole(ccfg);
2037 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2038 				sizeof (struct aac_cnt_config));
2039 			aac_cnt_config_toh(ccfg);
2040 
2041 			if (rval == 0 && ccfg->Command == ST_OK &&
2042 				ccfg->CTCommand.param[0] == CT_OK) {
2043 				/* successful completion */
2044 				rval = 0;
2045 			} else {
2046 				/* auto commit aborted due to error(s) */
2047 				rval = -2;
2048 			}
2049 		} else {
2050 			/* auto commit aborted due to adapter indicating
2051 			   config. issues too dangerous to auto commit  */
2052 			rval = -3;
2053 		}
2054 	} else {
2055 		/* error */
2056 		rval = -1;
2057 	}
2058 
2059 	aac_release_sync_fib(sc);
2060 	mtx_unlock(&sc->aac_io_lock);
2061 	return(rval);
2062 }
2063 
2064 /*
2065  * Send a synchronous command to the controller and wait for a result.
2066  * Indicate if the controller completed the command with an error status.
2067  */
2068 int
2069 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2070 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2071 		 u_int32_t *sp, u_int32_t *r1)
2072 {
2073 	time_t then;
2074 	u_int32_t status;
2075 
2076 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2077 
2078 	/* populate the mailbox */
2079 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2080 
2081 	/* ensure the sync command doorbell flag is cleared */
2082 	if (!sc->msi_enabled)
2083 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2084 
2085 	/* then set it to signal the adapter */
2086 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2087 
2088 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2089 		/* spin waiting for the command to complete */
2090 		then = time_uptime;
2091 		do {
2092 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2093 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2094 				return(EIO);
2095 			}
2096 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2097 
2098 		/* clear the completion flag */
2099 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2100 
2101 		/* get the command status */
2102 		status = AAC_GET_MAILBOX(sc, 0);
2103 		if (sp != NULL)
2104 			*sp = status;
2105 
2106 		/* return parameter */
2107 		if (r1 != NULL)
2108 			*r1 = AAC_GET_MAILBOX(sc, 1);
2109 
2110 		if (status != AAC_SRB_STS_SUCCESS)
2111 			return (-1);
2112 	}
2113 	return(0);
2114 }
2115 
2116 static int
2117 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2118 		 struct aac_fib *fib, u_int16_t datasize)
2119 {
2120 	uint32_t ReceiverFibAddress;
2121 
2122 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2123 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2124 
2125 	if (datasize > AAC_FIB_DATASIZE)
2126 		return(EINVAL);
2127 
2128 	/*
2129 	 * Set up the sync FIB
2130 	 */
2131 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2132 				AAC_FIBSTATE_INITIALISED |
2133 				AAC_FIBSTATE_EMPTY;
2134 	fib->Header.XferState |= xferstate;
2135 	fib->Header.Command = command;
2136 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2137 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2138 	fib->Header.SenderSize = sizeof(struct aac_fib);
2139 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2140 	ReceiverFibAddress = sc->aac_common_busaddr +
2141 		offsetof(struct aac_common, ac_sync_fib);
2142 	fib->Header.u.ReceiverFibAddress = ReceiverFibAddress;
2143 	aac_fib_header_tole(&fib->Header);
2144 
2145 	/*
2146 	 * Give the FIB to the controller, wait for a response.
2147 	 */
2148 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2149 		ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2150 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2151 		aac_fib_header_toh(&fib->Header);
2152 		return(EIO);
2153 	}
2154 
2155 	aac_fib_header_toh(&fib->Header);
2156 	return (0);
2157 }
2158 
2159 /*
2160  * Check for commands that have been outstanding for a suspiciously long time,
2161  * and complain about them.
2162  */
2163 static void
2164 aac_timeout(struct aac_softc *sc)
2165 {
2166 	struct aac_command *cm;
2167 	time_t deadline;
2168 	int timedout;
2169 
2170 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2171 	/*
2172 	 * Traverse the busy command list, bitch about late commands once
2173 	 * only.
2174 	 */
2175 	timedout = 0;
2176 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2177 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2178 		if (cm->cm_timestamp < deadline) {
2179 			device_printf(sc->aac_dev,
2180 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2181 				      cm, (int)(time_uptime-cm->cm_timestamp));
2182 			AAC_PRINT_FIB(sc, cm->cm_fib);
2183 			timedout++;
2184 		}
2185 	}
2186 
2187 	if (timedout)
2188 		aac_reset_adapter(sc);
2189 	aacraid_print_queues(sc);
2190 }
2191 
2192 /*
2193  * Interface Function Vectors
2194  */
2195 
2196 /*
2197  * Read the current firmware status word.
2198  */
2199 static int
2200 aac_src_get_fwstatus(struct aac_softc *sc)
2201 {
2202 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2203 
2204 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2205 }
2206 
2207 /*
2208  * Notify the controller of a change in a given queue
2209  */
2210 static void
2211 aac_src_qnotify(struct aac_softc *sc, int qbit)
2212 {
2213 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2214 
2215 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2216 }
2217 
2218 /*
2219  * Get the interrupt reason bits
2220  */
2221 static int
2222 aac_src_get_istatus(struct aac_softc *sc)
2223 {
2224 	int val;
2225 
2226 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2227 
2228 	if (sc->msi_enabled) {
2229 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2230 		if (val & AAC_MSI_SYNC_STATUS)
2231 			val = AAC_DB_SYNC_COMMAND;
2232 		else
2233 			val = 0;
2234 	} else {
2235 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2236 	}
2237 	return(val);
2238 }
2239 
2240 /*
2241  * Clear some interrupt reason bits
2242  */
2243 static void
2244 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2245 {
2246 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2247 
2248 	if (sc->msi_enabled) {
2249 		if (mask == AAC_DB_SYNC_COMMAND)
2250 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2251 	} else {
2252 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2253 	}
2254 }
2255 
2256 /*
2257  * Populate the mailbox and set the command word
2258  */
2259 static void
2260 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2261 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2262 {
2263 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2264 
2265 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2266 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2267 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2268 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2269 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2270 }
2271 
2272 static void
2273 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2274 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2275 {
2276 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2277 
2278 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2279 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2280 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2281 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2282 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2283 }
2284 
2285 /*
2286  * Fetch the immediate command status word
2287  */
2288 static int
2289 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2290 {
2291 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2292 
2293 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2294 }
2295 
2296 static int
2297 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2298 {
2299 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2300 
2301 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2302 }
2303 
2304 /*
2305  * Set/clear interrupt masks
2306  */
2307 static void
2308 aac_src_access_devreg(struct aac_softc *sc, int mode)
2309 {
2310 	u_int32_t val;
2311 
2312 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2313 
2314 	switch (mode) {
2315 	case AAC_ENABLE_INTERRUPT:
2316 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2317 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2318 				           AAC_INT_ENABLE_TYPE1_INTX));
2319 		break;
2320 
2321 	case AAC_DISABLE_INTERRUPT:
2322 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2323 		break;
2324 
2325 	case AAC_ENABLE_MSIX:
2326 		/* set bit 6 */
2327 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2328 		val |= 0x40;
2329 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2330 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2331 		/* unmask int. */
2332 		val = PMC_ALL_INTERRUPT_BITS;
2333 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2334 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2335 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2336 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2337 		break;
2338 
2339 	case AAC_DISABLE_MSIX:
2340 		/* reset bit 6 */
2341 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2342 		val &= ~0x40;
2343 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2344 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2345 		break;
2346 
2347 	case AAC_CLEAR_AIF_BIT:
2348 		/* set bit 5 */
2349 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2350 		val |= 0x20;
2351 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2352 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2353 		break;
2354 
2355 	case AAC_CLEAR_SYNC_BIT:
2356 		/* set bit 4 */
2357 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2358 		val |= 0x10;
2359 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2360 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2361 		break;
2362 
2363 	case AAC_ENABLE_INTX:
2364 		/* set bit 7 */
2365 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2366 		val |= 0x80;
2367 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2368 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2369 		/* unmask int. */
2370 		val = PMC_ALL_INTERRUPT_BITS;
2371 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2372 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2373 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2374 			val & (~(PMC_GLOBAL_INT_BIT2)));
2375 		break;
2376 
2377 	default:
2378 		break;
2379 	}
2380 }
2381 
2382 /*
2383  * New comm. interface: Send command functions
2384  */
2385 static int
2386 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2387 {
2388 	struct aac_fib_xporthdr *pFibX;
2389 	u_int32_t fibsize, high_addr;
2390 	u_int64_t address;
2391 
2392 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2393 
2394 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2395 		sc->aac_max_msix > 1) {
2396 		u_int16_t vector_no, first_choice = 0xffff;
2397 
2398 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2399 		do {
2400 			vector_no += 1;
2401 			if (vector_no == sc->aac_max_msix)
2402 				vector_no = 1;
2403 			if (sc->aac_rrq_outstanding[vector_no] <
2404 				sc->aac_vector_cap)
2405 				break;
2406 			if (0xffff == first_choice)
2407 				first_choice = vector_no;
2408 			else if (vector_no == first_choice)
2409 				break;
2410 		} while (1);
2411 		if (vector_no == first_choice)
2412 			vector_no = 0;
2413 		sc->aac_rrq_outstanding[vector_no]++;
2414 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2415 			sc->aac_fibs_pushed_no = 0;
2416 		else
2417 			sc->aac_fibs_pushed_no++;
2418 
2419 		cm->cm_fib->Header.Handle += (vector_no << 16);
2420 	}
2421 
2422 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2423 		/* Calculate the amount to the fibsize bits */
2424 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2425 		/* Fill new FIB header */
2426 		address = cm->cm_fibphys;
2427 		high_addr = (u_int32_t)(address >> 32);
2428 		if (high_addr == 0L) {
2429 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2430 			cm->cm_fib->Header.u.TimeStamp = 0L;
2431 		} else {
2432 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2433 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2434 		}
2435 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2436 	} else {
2437 		/* Calculate the amount to the fibsize bits */
2438 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2439 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2440 		/* Fill XPORT header */
2441 		pFibX = (struct aac_fib_xporthdr *)
2442 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2443 		pFibX->Handle = cm->cm_fib->Header.Handle;
2444 		pFibX->HostAddress = cm->cm_fibphys;
2445 		pFibX->Size = cm->cm_fib->Header.Size;
2446 		aac_fib_xporthdr_tole(pFibX);
2447 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2448 		high_addr = (u_int32_t)(address >> 32);
2449 	}
2450 
2451 	aac_fib_header_tole(&cm->cm_fib->Header);
2452 
2453 	if (fibsize > 31)
2454 		fibsize = 31;
2455 	aac_enqueue_busy(cm);
2456 	if (high_addr) {
2457 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2458 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2459 	} else {
2460 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2461 	}
2462 	return 0;
2463 }
2464 
2465 /*
2466  * New comm. interface: get, set outbound queue index
2467  */
2468 static int
2469 aac_src_get_outb_queue(struct aac_softc *sc)
2470 {
2471 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2472 
2473 	return(-1);
2474 }
2475 
2476 static void
2477 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2478 {
2479 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2480 }
2481 
2482 /*
2483  * Debugging and Diagnostics
2484  */
2485 
2486 /*
2487  * Print some information about the controller.
2488  */
2489 static void
2490 aac_describe_controller(struct aac_softc *sc)
2491 {
2492 	struct aac_fib *fib;
2493 	struct aac_adapter_info	*info;
2494 	char *adapter_type = "Adaptec RAID controller";
2495 
2496 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2497 
2498 	mtx_lock(&sc->aac_io_lock);
2499 	aac_alloc_sync_fib(sc, &fib);
2500 
2501 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2502 		fib->data[0] = 0;
2503 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2504 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2505 		else {
2506 			struct aac_supplement_adapter_info *supp_info;
2507 
2508 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2509 			adapter_type = (char *)supp_info->AdapterTypeText;
2510 			sc->aac_feature_bits = le32toh(supp_info->FeatureBits);
2511 			sc->aac_support_opt2 = le32toh(supp_info->SupportedOptions2);
2512 		}
2513 	}
2514 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2515 		adapter_type,
2516 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2517 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2518 
2519 	fib->data[0] = 0;
2520 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2521 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2522 		aac_release_sync_fib(sc);
2523 		mtx_unlock(&sc->aac_io_lock);
2524 		return;
2525 	}
2526 
2527 	/* save the kernel revision structure for later use */
2528 	info = (struct aac_adapter_info *)&fib->data[0];
2529 	aac_adapter_info_toh(info);
2530 	sc->aac_revision = info->KernelRevision;
2531 
2532 	if (bootverbose) {
2533 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2534 		    "(%dMB cache, %dMB execution), %s\n",
2535 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2536 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2537 		    info->BufferMem / (1024 * 1024),
2538 		    info->ExecutionMem / (1024 * 1024),
2539 		    aac_describe_code(aac_battery_platform,
2540 		    info->batteryPlatform));
2541 
2542 		device_printf(sc->aac_dev,
2543 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2544 		    info->KernelRevision.external.comp.major,
2545 		    info->KernelRevision.external.comp.minor,
2546 		    info->KernelRevision.external.comp.dash,
2547 		    info->KernelRevision.buildNumber,
2548 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2549 
2550 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2551 			      sc->supported_options,
2552 			      "\20"
2553 			      "\1SNAPSHOT"
2554 			      "\2CLUSTERS"
2555 			      "\3WCACHE"
2556 			      "\4DATA64"
2557 			      "\5HOSTTIME"
2558 			      "\6RAID50"
2559 			      "\7WINDOW4GB"
2560 			      "\10SCSIUPGD"
2561 			      "\11SOFTERR"
2562 			      "\12NORECOND"
2563 			      "\13SGMAP64"
2564 			      "\14ALARM"
2565 			      "\15NONDASD"
2566 			      "\16SCSIMGT"
2567 			      "\17RAIDSCSI"
2568 			      "\21ADPTINFO"
2569 			      "\22NEWCOMM"
2570 			      "\23ARRAY64BIT"
2571 			      "\24HEATSENSOR");
2572 	}
2573 
2574 	aac_release_sync_fib(sc);
2575 	mtx_unlock(&sc->aac_io_lock);
2576 }
2577 
2578 /*
2579  * Look up a text description of a numeric error code and return a pointer to
2580  * same.
2581  */
2582 static char *
2583 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2584 {
2585 	int i;
2586 
2587 	for (i = 0; table[i].string != NULL; i++)
2588 		if (table[i].code == code)
2589 			return(table[i].string);
2590 	return(table[i + 1].string);
2591 }
2592 
2593 /*
2594  * Management Interface
2595  */
2596 
2597 static int
2598 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2599 {
2600 	struct aac_softc *sc;
2601 
2602 	sc = dev->si_drv1;
2603 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2604 	device_busy(sc->aac_dev);
2605 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2606 	return 0;
2607 }
2608 
2609 static int
2610 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2611 {
2612 	union aac_statrequest *as;
2613 	struct aac_softc *sc;
2614 	int error = 0;
2615 
2616 	as = (union aac_statrequest *)arg;
2617 	sc = dev->si_drv1;
2618 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2619 
2620 	switch (cmd) {
2621 	case AACIO_STATS:
2622 		switch (as->as_item) {
2623 		case AACQ_FREE:
2624 		case AACQ_READY:
2625 		case AACQ_BUSY:
2626 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2627 			      sizeof(struct aac_qstat));
2628 			break;
2629 		default:
2630 			error = ENOENT;
2631 			break;
2632 		}
2633 	break;
2634 
2635 	case FSACTL_SENDFIB:
2636 	case FSACTL_SEND_LARGE_FIB:
2637 		arg = *(caddr_t*)arg;
2638 	case FSACTL_LNX_SENDFIB:
2639 	case FSACTL_LNX_SEND_LARGE_FIB:
2640 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2641 		error = aac_ioctl_sendfib(sc, arg);
2642 		break;
2643 	case FSACTL_SEND_RAW_SRB:
2644 		arg = *(caddr_t*)arg;
2645 	case FSACTL_LNX_SEND_RAW_SRB:
2646 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2647 		error = aac_ioctl_send_raw_srb(sc, arg);
2648 		break;
2649 	case FSACTL_AIF_THREAD:
2650 	case FSACTL_LNX_AIF_THREAD:
2651 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2652 		error = EINVAL;
2653 		break;
2654 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2655 		arg = *(caddr_t*)arg;
2656 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2657 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2658 		error = aac_open_aif(sc, arg);
2659 		break;
2660 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2661 		arg = *(caddr_t*)arg;
2662 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2663 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2664 		error = aac_getnext_aif(sc, arg);
2665 		break;
2666 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2667 		arg = *(caddr_t*)arg;
2668 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2669 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2670 		error = aac_close_aif(sc, arg);
2671 		break;
2672 	case FSACTL_MINIPORT_REV_CHECK:
2673 		arg = *(caddr_t*)arg;
2674 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2675 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2676 		error = aac_rev_check(sc, arg);
2677 		break;
2678 	case FSACTL_QUERY_DISK:
2679 		arg = *(caddr_t*)arg;
2680 	case FSACTL_LNX_QUERY_DISK:
2681 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2682 		error = aac_query_disk(sc, arg);
2683 		break;
2684 	case FSACTL_DELETE_DISK:
2685 	case FSACTL_LNX_DELETE_DISK:
2686 		/*
2687 		 * We don't trust the underland to tell us when to delete a
2688 		 * container, rather we rely on an AIF coming from the
2689 		 * controller
2690 		 */
2691 		error = 0;
2692 		break;
2693 	case FSACTL_GET_PCI_INFO:
2694 		arg = *(caddr_t*)arg;
2695 	case FSACTL_LNX_GET_PCI_INFO:
2696 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2697 		error = aac_get_pci_info(sc, arg);
2698 		break;
2699 	case FSACTL_GET_FEATURES:
2700 		arg = *(caddr_t*)arg;
2701 	case FSACTL_LNX_GET_FEATURES:
2702 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2703 		error = aac_supported_features(sc, arg);
2704 		break;
2705 	default:
2706 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2707 		error = EINVAL;
2708 		break;
2709 	}
2710 	return(error);
2711 }
2712 
2713 static int
2714 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2715 {
2716 	struct aac_softc *sc;
2717 	struct aac_fib_context *ctx;
2718 	int revents;
2719 
2720 	sc = dev->si_drv1;
2721 	revents = 0;
2722 
2723 	mtx_lock(&sc->aac_io_lock);
2724 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2725 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2726 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2727 				revents |= poll_events & (POLLIN | POLLRDNORM);
2728 				break;
2729 			}
2730 		}
2731 	}
2732 	mtx_unlock(&sc->aac_io_lock);
2733 
2734 	if (revents == 0) {
2735 		if (poll_events & (POLLIN | POLLRDNORM))
2736 			selrecord(td, &sc->rcv_select);
2737 	}
2738 
2739 	return (revents);
2740 }
2741 
2742 static void
2743 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2744 {
2745 
2746 	switch (event->ev_type) {
2747 	case AAC_EVENT_CMFREE:
2748 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2749 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2750 			aacraid_add_event(sc, event);
2751 			return;
2752 		}
2753 		free(event, M_AACRAIDBUF);
2754 		wakeup(arg);
2755 		break;
2756 	default:
2757 		break;
2758 	}
2759 }
2760 
2761 /*
2762  * Send a FIB supplied from userspace
2763  *
2764  * Currently, sending a FIB from userspace in BE hosts is not supported.
2765  * There are several things that need to be considered in order to
2766  * support this, such as:
2767  * - At least the FIB data part from userspace should already be in LE,
2768  *   or else the kernel would need to know all FIB types to be able to
2769  *   correctly convert it to BE.
2770  * - SG tables are converted to BE by aacraid_map_command_sg(). This
2771  *   conversion should be supressed if the FIB comes from userspace.
2772  * - aacraid_wait_command() calls functions that convert the FIB header
2773  *   to LE. But if the header is already in LE, the conversion should not
2774  *   be performed.
2775  */
2776 static int
2777 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2778 {
2779 	struct aac_command *cm;
2780 	int size, error;
2781 
2782 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2783 
2784 	cm = NULL;
2785 
2786 	/*
2787 	 * Get a command
2788 	 */
2789 	mtx_lock(&sc->aac_io_lock);
2790 	if (aacraid_alloc_command(sc, &cm)) {
2791 		struct aac_event *event;
2792 
2793 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2794 		    M_NOWAIT | M_ZERO);
2795 		if (event == NULL) {
2796 			error = EBUSY;
2797 			mtx_unlock(&sc->aac_io_lock);
2798 			goto out;
2799 		}
2800 		event->ev_type = AAC_EVENT_CMFREE;
2801 		event->ev_callback = aac_ioctl_event;
2802 		event->ev_arg = &cm;
2803 		aacraid_add_event(sc, event);
2804 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2805 	}
2806 	mtx_unlock(&sc->aac_io_lock);
2807 
2808 	/*
2809 	 * Fetch the FIB header, then re-copy to get data as well.
2810 	 */
2811 	if ((error = copyin(ufib, cm->cm_fib,
2812 			    sizeof(struct aac_fib_header))) != 0)
2813 		goto out;
2814 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2815 	if (size > sc->aac_max_fib_size) {
2816 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2817 			      size, sc->aac_max_fib_size);
2818 		size = sc->aac_max_fib_size;
2819 	}
2820 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2821 		goto out;
2822 	cm->cm_fib->Header.Size = size;
2823 	cm->cm_timestamp = time_uptime;
2824 	cm->cm_datalen = 0;
2825 
2826 	/*
2827 	 * Pass the FIB to the controller, wait for it to complete.
2828 	 */
2829 	mtx_lock(&sc->aac_io_lock);
2830 	error = aacraid_wait_command(cm);
2831 	mtx_unlock(&sc->aac_io_lock);
2832 	if (error != 0) {
2833 		device_printf(sc->aac_dev,
2834 			      "aacraid_wait_command return %d\n", error);
2835 		goto out;
2836 	}
2837 
2838 	/*
2839 	 * Copy the FIB and data back out to the caller.
2840 	 */
2841 	size = cm->cm_fib->Header.Size;
2842 	if (size > sc->aac_max_fib_size) {
2843 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2844 			      size, sc->aac_max_fib_size);
2845 		size = sc->aac_max_fib_size;
2846 	}
2847 	error = copyout(cm->cm_fib, ufib, size);
2848 
2849 out:
2850 	if (cm != NULL) {
2851 		mtx_lock(&sc->aac_io_lock);
2852 		aacraid_release_command(cm);
2853 		mtx_unlock(&sc->aac_io_lock);
2854 	}
2855 	return(error);
2856 }
2857 
2858 /*
2859  * Send a passthrough FIB supplied from userspace
2860  */
2861 static int
2862 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2863 {
2864 	struct aac_command *cm;
2865 	struct aac_fib *fib;
2866 	struct aac_srb *srbcmd;
2867 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2868 	void *user_reply;
2869 	int error, transfer_data = 0;
2870 	bus_dmamap_t orig_map = 0;
2871 	u_int32_t fibsize = 0;
2872 	u_int64_t srb_sg_address;
2873 	u_int32_t srb_sg_bytecount;
2874 
2875 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2876 
2877 	cm = NULL;
2878 
2879 	mtx_lock(&sc->aac_io_lock);
2880 	if (aacraid_alloc_command(sc, &cm)) {
2881 		struct aac_event *event;
2882 
2883 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2884 		    M_NOWAIT | M_ZERO);
2885 		if (event == NULL) {
2886 			error = EBUSY;
2887 			mtx_unlock(&sc->aac_io_lock);
2888 			goto out;
2889 		}
2890 		event->ev_type = AAC_EVENT_CMFREE;
2891 		event->ev_callback = aac_ioctl_event;
2892 		event->ev_arg = &cm;
2893 		aacraid_add_event(sc, event);
2894 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2895 	}
2896 	mtx_unlock(&sc->aac_io_lock);
2897 
2898 	cm->cm_data = NULL;
2899 	/* save original dma map */
2900 	orig_map = cm->cm_datamap;
2901 
2902 	fib = cm->cm_fib;
2903 	srbcmd = (struct aac_srb *)fib->data;
2904 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2905 	    sizeof (u_int32_t))) != 0)
2906 		goto out;
2907 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2908 		error = EINVAL;
2909 		goto out;
2910 	}
2911 	if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2912 		goto out;
2913 
2914 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2915 	srbcmd->retry_limit = 0;	/* obsolete */
2916 
2917 	/* only one sg element from userspace supported */
2918 	if (srbcmd->sg_map.SgCount > 1) {
2919 		error = EINVAL;
2920 		goto out;
2921 	}
2922 	/* check fibsize */
2923 	if (fibsize == (sizeof(struct aac_srb) +
2924 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2925 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2926 		struct aac_sg_entry sg;
2927 
2928 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2929 			goto out;
2930 
2931 		srb_sg_bytecount = sg.SgByteCount;
2932 		srb_sg_address = (u_int64_t)sg.SgAddress;
2933 	} else if (fibsize == (sizeof(struct aac_srb) +
2934 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2935 #ifdef __LP64__
2936 		struct aac_sg_entry64 *sgp =
2937 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2938 		struct aac_sg_entry64 sg;
2939 
2940 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2941 			goto out;
2942 
2943 		srb_sg_bytecount = sg.SgByteCount;
2944 		srb_sg_address = sg.SgAddress;
2945 #else
2946 		error = EINVAL;
2947 		goto out;
2948 #endif
2949 	} else {
2950 		error = EINVAL;
2951 		goto out;
2952 	}
2953 	user_reply = (char *)arg + fibsize;
2954 	srbcmd->data_len = srb_sg_bytecount;
2955 	if (srbcmd->sg_map.SgCount == 1)
2956 		transfer_data = 1;
2957 
2958 	if (transfer_data) {
2959 		/*
2960 		 * Create DMA tag for the passthr. data buffer and allocate it.
2961 		 */
2962 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2963 			1, 0,			/* algnmnt, boundary */
2964 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2965 			BUS_SPACE_MAXADDR_32BIT :
2966 			0x7fffffff,		/* lowaddr */
2967 			BUS_SPACE_MAXADDR, 	/* highaddr */
2968 			NULL, NULL, 		/* filter, filterarg */
2969 			srb_sg_bytecount, 	/* size */
2970 			sc->aac_sg_tablesize,	/* nsegments */
2971 			srb_sg_bytecount, 	/* maxsegsize */
2972 			0,			/* flags */
2973 			NULL, NULL,		/* No locking needed */
2974 			&cm->cm_passthr_dmat)) {
2975 			error = ENOMEM;
2976 			goto out;
2977 		}
2978 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2979 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2980 			error = ENOMEM;
2981 			goto out;
2982 		}
2983 		/* fill some cm variables */
2984 		cm->cm_datalen = srb_sg_bytecount;
2985 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2986 			cm->cm_flags |= AAC_CMD_DATAIN;
2987 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2988 			cm->cm_flags |= AAC_CMD_DATAOUT;
2989 
2990 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2991 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2992 				cm->cm_data, cm->cm_datalen)) != 0)
2993 				goto out;
2994 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2995 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2996 				BUS_DMASYNC_PREWRITE);
2997 		}
2998 	}
2999 
3000 	/* build the FIB */
3001 	fib->Header.Size = sizeof(struct aac_fib_header) +
3002 		sizeof(struct aac_srb);
3003 	fib->Header.XferState =
3004 		AAC_FIBSTATE_HOSTOWNED   |
3005 		AAC_FIBSTATE_INITIALISED |
3006 		AAC_FIBSTATE_EMPTY	 |
3007 		AAC_FIBSTATE_FROMHOST	 |
3008 		AAC_FIBSTATE_REXPECTED   |
3009 		AAC_FIBSTATE_NORM	 |
3010 		AAC_FIBSTATE_ASYNC;
3011 
3012 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
3013 		ScsiPortCommandU64 : ScsiPortCommand;
3014 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3015 
3016 	aac_srb_tole(srbcmd);
3017 
3018 	/* send command */
3019 	if (transfer_data) {
3020 		bus_dmamap_load(cm->cm_passthr_dmat,
3021 			cm->cm_datamap, cm->cm_data,
3022 			cm->cm_datalen,
3023 			aacraid_map_command_sg, cm, 0);
3024 	} else {
3025 		aacraid_map_command_sg(cm, NULL, 0, 0);
3026 	}
3027 
3028 	/* wait for completion */
3029 	mtx_lock(&sc->aac_io_lock);
3030 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
3031 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
3032 	mtx_unlock(&sc->aac_io_lock);
3033 
3034 	/* copy data */
3035 	if (transfer_data && (le32toh(srbcmd->flags) & AAC_SRB_FLAGS_DATA_IN)) {
3036 		if ((error = copyout(cm->cm_data,
3037 			(void *)(uintptr_t)srb_sg_address,
3038 			cm->cm_datalen)) != 0)
3039 			goto out;
3040 		/* sync required for bus_dmamem_alloc() allocated mem.? */
3041 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
3042 				BUS_DMASYNC_POSTREAD);
3043 	}
3044 
3045 	/* status */
3046 	aac_srb_response_toh((struct aac_srb_response *)fib->data);
3047 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3048 
3049 out:
3050 	if (cm && cm->cm_data) {
3051 		if (transfer_data)
3052 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3053 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3054 		cm->cm_datamap = orig_map;
3055 	}
3056 	if (cm && cm->cm_passthr_dmat)
3057 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3058 	if (cm) {
3059 		mtx_lock(&sc->aac_io_lock);
3060 		aacraid_release_command(cm);
3061 		mtx_unlock(&sc->aac_io_lock);
3062 	}
3063 	return(error);
3064 }
3065 
3066 /*
3067  * Request an AIF from the controller (new comm. type1)
3068  */
3069 static void
3070 aac_request_aif(struct aac_softc *sc)
3071 {
3072 	struct aac_command *cm;
3073 	struct aac_fib *fib;
3074 
3075 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3076 
3077 	if (aacraid_alloc_command(sc, &cm)) {
3078 		sc->aif_pending = 1;
3079 		return;
3080 	}
3081 	sc->aif_pending = 0;
3082 
3083 	/* build the FIB */
3084 	fib = cm->cm_fib;
3085 	fib->Header.Size = sizeof(struct aac_fib);
3086 	fib->Header.XferState =
3087         AAC_FIBSTATE_HOSTOWNED   |
3088         AAC_FIBSTATE_INITIALISED |
3089         AAC_FIBSTATE_EMPTY	 |
3090         AAC_FIBSTATE_FROMHOST	 |
3091         AAC_FIBSTATE_REXPECTED   |
3092         AAC_FIBSTATE_NORM	 |
3093         AAC_FIBSTATE_ASYNC;
3094 	/* set AIF marker */
3095 	fib->Header.Handle = 0x00800000;
3096 	fib->Header.Command = AifRequest;
3097 	((struct aac_aif_command *)fib->data)->command = htole32(AifReqEvent);
3098 
3099 	aacraid_map_command_sg(cm, NULL, 0, 0);
3100 }
3101 
3102 /*
3103  * cdevpriv interface private destructor.
3104  */
3105 static void
3106 aac_cdevpriv_dtor(void *arg)
3107 {
3108 	struct aac_softc *sc;
3109 
3110 	sc = arg;
3111 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3112 	device_unbusy(sc->aac_dev);
3113 }
3114 
3115 /*
3116  * Handle an AIF sent to us by the controller; queue it for later reference.
3117  * If the queue fills up, then drop the older entries.
3118  */
3119 static void
3120 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3121 {
3122 	struct aac_aif_command *aif;
3123 	struct aac_container *co, *co_next;
3124 	struct aac_fib_context *ctx;
3125 	struct aac_fib *sync_fib;
3126 	struct aac_mntinforesp mir;
3127 	int next, current, found;
3128 	int count = 0, changed = 0, i = 0;
3129 	u_int32_t channel, uid;
3130 
3131 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3132 
3133 	aif = (struct aac_aif_command*)&fib->data[0];
3134 	aacraid_print_aif(sc, aif);
3135 
3136 	/* Is it an event that we should care about? */
3137 	switch (le32toh(aif->command)) {
3138 	case AifCmdEventNotify:
3139 		switch (le32toh(aif->data.EN.type)) {
3140 		case AifEnAddContainer:
3141 		case AifEnDeleteContainer:
3142 			/*
3143 			 * A container was added or deleted, but the message
3144 			 * doesn't tell us anything else!  Re-enumerate the
3145 			 * containers and sort things out.
3146 			 */
3147 			aac_alloc_sync_fib(sc, &sync_fib);
3148 			do {
3149 				/*
3150 				 * Ask the controller for its containers one at
3151 				 * a time.
3152 				 * XXX What if the controller's list changes
3153 				 * midway through this enumaration?
3154 				 * XXX This should be done async.
3155 				 */
3156 				if (aac_get_container_info(sc, sync_fib, i,
3157 					&mir, &uid) != 0)
3158 					continue;
3159 				if (i == 0)
3160 					count = mir.MntRespCount;
3161 				/*
3162 				 * Check the container against our list.
3163 				 * co->co_found was already set to 0 in a
3164 				 * previous run.
3165 				 */
3166 				if ((mir.Status == ST_OK) &&
3167 				    (mir.MntTable[0].VolType != CT_NONE)) {
3168 					found = 0;
3169 					TAILQ_FOREACH(co,
3170 						      &sc->aac_container_tqh,
3171 						      co_link) {
3172 						if (co->co_mntobj.ObjectId ==
3173 						    mir.MntTable[0].ObjectId) {
3174 							co->co_found = 1;
3175 							found = 1;
3176 							break;
3177 						}
3178 					}
3179 					/*
3180 					 * If the container matched, continue
3181 					 * in the list.
3182 					 */
3183 					if (found) {
3184 						i++;
3185 						continue;
3186 					}
3187 
3188 					/*
3189 					 * This is a new container.  Do all the
3190 					 * appropriate things to set it up.
3191 					 */
3192 					aac_add_container(sc, &mir, 1, uid);
3193 					changed = 1;
3194 				}
3195 				i++;
3196 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3197 			aac_release_sync_fib(sc);
3198 
3199 			/*
3200 			 * Go through our list of containers and see which ones
3201 			 * were not marked 'found'.  Since the controller didn't
3202 			 * list them they must have been deleted.  Do the
3203 			 * appropriate steps to destroy the device.  Also reset
3204 			 * the co->co_found field.
3205 			 */
3206 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3207 			while (co != NULL) {
3208 				if (co->co_found == 0) {
3209 					co_next = TAILQ_NEXT(co, co_link);
3210 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3211 						     co_link);
3212 					free(co, M_AACRAIDBUF);
3213 					changed = 1;
3214 					co = co_next;
3215 				} else {
3216 					co->co_found = 0;
3217 					co = TAILQ_NEXT(co, co_link);
3218 				}
3219 			}
3220 
3221 			/* Attach the newly created containers */
3222 			if (changed) {
3223 				if (sc->cam_rescan_cb != NULL)
3224 					sc->cam_rescan_cb(sc, 0,
3225 				    	AAC_CAM_TARGET_WILDCARD);
3226 			}
3227 
3228 			break;
3229 
3230 		case AifEnEnclosureManagement:
3231 			switch (le32toh(aif->data.EN.data.EEE.eventType)) {
3232 			case AIF_EM_DRIVE_INSERTION:
3233 			case AIF_EM_DRIVE_REMOVAL:
3234 				channel = le32toh(aif->data.EN.data.EEE.unitID);
3235 				if (sc->cam_rescan_cb != NULL)
3236 					sc->cam_rescan_cb(sc,
3237 					    ((channel>>24) & 0xF) + 1,
3238 					    (channel & 0xFFFF));
3239 				break;
3240 			}
3241 			break;
3242 
3243 		case AifEnAddJBOD:
3244 		case AifEnDeleteJBOD:
3245 		case AifRawDeviceRemove:
3246 			channel = le32toh(aif->data.EN.data.ECE.container);
3247 			if (sc->cam_rescan_cb != NULL)
3248 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3249 				    AAC_CAM_TARGET_WILDCARD);
3250 			break;
3251 
3252 		default:
3253 			break;
3254 		}
3255 
3256 	default:
3257 		break;
3258 	}
3259 
3260 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3261 	current = sc->aifq_idx;
3262 	next = (current + 1) % AAC_AIFQ_LENGTH;
3263 	if (next == 0)
3264 		sc->aifq_filled = 1;
3265 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3266 	/* Make aifq's FIB header and data LE */
3267 	aac_fib_header_tole(&sc->aac_aifq[current].Header);
3268 	/* modify AIF contexts */
3269 	if (sc->aifq_filled) {
3270 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3271 			if (next == ctx->ctx_idx)
3272 				ctx->ctx_wrap = 1;
3273 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3274 				ctx->ctx_idx = next;
3275 		}
3276 	}
3277 	sc->aifq_idx = next;
3278 	/* On the off chance that someone is sleeping for an aif... */
3279 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3280 		wakeup(sc->aac_aifq);
3281 	/* Wakeup any poll()ers */
3282 	selwakeuppri(&sc->rcv_select, PRIBIO);
3283 
3284 	return;
3285 }
3286 
3287 /*
3288  * Return the Revision of the driver to userspace and check to see if the
3289  * userspace app is possibly compatible.  This is extremely bogus since
3290  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3291  * returning what the card reported.
3292  */
3293 static int
3294 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3295 {
3296 	struct aac_rev_check rev_check;
3297 	struct aac_rev_check_resp rev_check_resp;
3298 	int error = 0;
3299 
3300 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3301 
3302 	/*
3303 	 * Copyin the revision struct from userspace
3304 	 */
3305 	if ((error = copyin(udata, (caddr_t)&rev_check,
3306 			sizeof(struct aac_rev_check))) != 0) {
3307 		return error;
3308 	}
3309 
3310 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3311 	      rev_check.callingRevision.buildNumber);
3312 
3313 	/*
3314 	 * Doctor up the response struct.
3315 	 */
3316 	rev_check_resp.possiblyCompatible = 1;
3317 	rev_check_resp.adapterSWRevision.external.comp.major =
3318 	    AAC_DRIVER_MAJOR_VERSION;
3319 	rev_check_resp.adapterSWRevision.external.comp.minor =
3320 	    AAC_DRIVER_MINOR_VERSION;
3321 	rev_check_resp.adapterSWRevision.external.comp.type =
3322 	    AAC_DRIVER_TYPE;
3323 	rev_check_resp.adapterSWRevision.external.comp.dash =
3324 	    AAC_DRIVER_BUGFIX_LEVEL;
3325 	rev_check_resp.adapterSWRevision.buildNumber =
3326 	    AAC_DRIVER_BUILD;
3327 
3328 	return(copyout((caddr_t)&rev_check_resp, udata,
3329 			sizeof(struct aac_rev_check_resp)));
3330 }
3331 
3332 /*
3333  * Pass the fib context to the caller
3334  */
3335 static int
3336 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3337 {
3338 	struct aac_fib_context *fibctx, *ctx;
3339 	int error = 0;
3340 
3341 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3342 
3343 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3344 	if (fibctx == NULL)
3345 		return (ENOMEM);
3346 
3347 	mtx_lock(&sc->aac_io_lock);
3348 	/* all elements are already 0, add to queue */
3349 	if (sc->fibctx == NULL)
3350 		sc->fibctx = fibctx;
3351 	else {
3352 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3353 			;
3354 		ctx->next = fibctx;
3355 		fibctx->prev = ctx;
3356 	}
3357 
3358 	/* evaluate unique value */
3359 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3360 	ctx = sc->fibctx;
3361 	while (ctx != fibctx) {
3362 		if (ctx->unique == fibctx->unique) {
3363 			fibctx->unique++;
3364 			ctx = sc->fibctx;
3365 		} else {
3366 			ctx = ctx->next;
3367 		}
3368 	}
3369 
3370 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3371 	mtx_unlock(&sc->aac_io_lock);
3372 	if (error)
3373 		aac_close_aif(sc, (caddr_t)ctx);
3374 	return error;
3375 }
3376 
3377 /*
3378  * Close the caller's fib context
3379  */
3380 static int
3381 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3382 {
3383 	struct aac_fib_context *ctx;
3384 
3385 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3386 
3387 	mtx_lock(&sc->aac_io_lock);
3388 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3389 		if (ctx->unique == *(uint32_t *)&arg) {
3390 			if (ctx == sc->fibctx)
3391 				sc->fibctx = NULL;
3392 			else {
3393 				ctx->prev->next = ctx->next;
3394 				if (ctx->next)
3395 					ctx->next->prev = ctx->prev;
3396 			}
3397 			break;
3398 		}
3399 	}
3400 	if (ctx)
3401 		free(ctx, M_AACRAIDBUF);
3402 
3403 	mtx_unlock(&sc->aac_io_lock);
3404 	return 0;
3405 }
3406 
3407 /*
3408  * Pass the caller the next AIF in their queue
3409  */
3410 static int
3411 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3412 {
3413 	struct get_adapter_fib_ioctl agf;
3414 	struct aac_fib_context *ctx;
3415 	int error;
3416 
3417 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3418 
3419 	mtx_lock(&sc->aac_io_lock);
3420 #ifdef COMPAT_FREEBSD32
3421 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3422 		struct get_adapter_fib_ioctl32 agf32;
3423 		error = copyin(arg, &agf32, sizeof(agf32));
3424 		if (error == 0) {
3425 			agf.AdapterFibContext = agf32.AdapterFibContext;
3426 			agf.Wait = agf32.Wait;
3427 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3428 		}
3429 	} else
3430 #endif
3431 		error = copyin(arg, &agf, sizeof(agf));
3432 	if (error == 0) {
3433 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3434 			if (agf.AdapterFibContext == ctx->unique)
3435 				break;
3436 		}
3437 		if (!ctx) {
3438 			mtx_unlock(&sc->aac_io_lock);
3439 			return (EFAULT);
3440 		}
3441 
3442 		error = aac_return_aif(sc, ctx, agf.AifFib);
3443 		if (error == EAGAIN && agf.Wait) {
3444 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3445 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3446 			while (error == EAGAIN) {
3447 				mtx_unlock(&sc->aac_io_lock);
3448 				error = tsleep(sc->aac_aifq, PRIBIO |
3449 					       PCATCH, "aacaif", 0);
3450 				mtx_lock(&sc->aac_io_lock);
3451 				if (error == 0)
3452 					error = aac_return_aif(sc, ctx, agf.AifFib);
3453 			}
3454 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3455 		}
3456 	}
3457 	mtx_unlock(&sc->aac_io_lock);
3458 	return(error);
3459 }
3460 
3461 /*
3462  * Hand the next AIF off the top of the queue out to userspace.
3463  */
3464 static int
3465 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3466 {
3467 	int current, error;
3468 
3469 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3470 
3471 	current = ctx->ctx_idx;
3472 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3473 		/* empty */
3474 		return (EAGAIN);
3475 	}
3476 	error =
3477 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3478 	if (error)
3479 		device_printf(sc->aac_dev,
3480 		    "aac_return_aif: copyout returned %d\n", error);
3481 	else {
3482 		ctx->ctx_wrap = 0;
3483 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3484 	}
3485 	return(error);
3486 }
3487 
3488 static int
3489 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3490 {
3491 	struct aac_pci_info {
3492 		u_int32_t bus;
3493 		u_int32_t slot;
3494 	} pciinf;
3495 	int error;
3496 
3497 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3498 
3499 	pciinf.bus = pci_get_bus(sc->aac_dev);
3500 	pciinf.slot = pci_get_slot(sc->aac_dev);
3501 
3502 	error = copyout((caddr_t)&pciinf, uptr,
3503 			sizeof(struct aac_pci_info));
3504 
3505 	return (error);
3506 }
3507 
3508 static int
3509 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3510 {
3511 	struct aac_features f;
3512 	int error;
3513 
3514 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3515 
3516 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3517 		return (error);
3518 
3519 	/*
3520 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3521 	 * ALL zero in the featuresState, the driver will return the current
3522 	 * state of all the supported features, the data field will not be
3523 	 * valid.
3524 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3525 	 * a specific bit set in the featuresState, the driver will return the
3526 	 * current state of this specific feature and whatever data that are
3527 	 * associated with the feature in the data field or perform whatever
3528 	 * action needed indicates in the data field.
3529 	 */
3530 	 if (f.feat.fValue == 0) {
3531 		f.feat.fBits.largeLBA =
3532 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3533 		f.feat.fBits.JBODSupport = 1;
3534 		/* TODO: In the future, add other features state here as well */
3535 	} else {
3536 		if (f.feat.fBits.largeLBA)
3537 			f.feat.fBits.largeLBA =
3538 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3539 		/* TODO: Add other features state and data in the future */
3540 	}
3541 
3542 	error = copyout(&f, uptr, sizeof (f));
3543 	return (error);
3544 }
3545 
3546 /*
3547  * Give the userland some information about the container.  The AAC arch
3548  * expects the driver to be a SCSI passthrough type driver, so it expects
3549  * the containers to have b:t:l numbers.  Fake it.
3550  */
3551 static int
3552 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3553 {
3554 	struct aac_query_disk query_disk;
3555 	struct aac_container *co;
3556 	int error, id;
3557 
3558 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3559 
3560 	mtx_lock(&sc->aac_io_lock);
3561 	error = copyin(uptr, (caddr_t)&query_disk,
3562 		       sizeof(struct aac_query_disk));
3563 	if (error) {
3564 		mtx_unlock(&sc->aac_io_lock);
3565 		return (error);
3566 	}
3567 
3568 	id = query_disk.ContainerNumber;
3569 	if (id == -1) {
3570 		mtx_unlock(&sc->aac_io_lock);
3571 		return (EINVAL);
3572 	}
3573 
3574 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3575 		if (co->co_mntobj.ObjectId == id)
3576 			break;
3577 		}
3578 
3579 	if (co == NULL) {
3580 			query_disk.Valid = 0;
3581 			query_disk.Locked = 0;
3582 			query_disk.Deleted = 1;		/* XXX is this right? */
3583 	} else {
3584 		query_disk.Valid = 1;
3585 		query_disk.Locked = 1;
3586 		query_disk.Deleted = 0;
3587 		query_disk.Bus = device_get_unit(sc->aac_dev);
3588 		query_disk.Target = 0;
3589 		query_disk.Lun = 0;
3590 		query_disk.UnMapped = 0;
3591 	}
3592 
3593 	error = copyout((caddr_t)&query_disk, uptr,
3594 			sizeof(struct aac_query_disk));
3595 
3596 	mtx_unlock(&sc->aac_io_lock);
3597 	return (error);
3598 }
3599 
3600 static void
3601 aac_container_bus(struct aac_softc *sc)
3602 {
3603 	struct aac_sim *sim;
3604 	device_t child;
3605 
3606 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3607 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3608 	if (sim == NULL) {
3609 		device_printf(sc->aac_dev,
3610 	    	"No memory to add container bus\n");
3611 		panic("Out of memory?!");
3612 	}
3613 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3614 	if (child == NULL) {
3615 		device_printf(sc->aac_dev,
3616 	    	"device_add_child failed for container bus\n");
3617 		free(sim, M_AACRAIDBUF);
3618 		panic("Out of memory?!");
3619 	}
3620 
3621 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3622 	sim->BusNumber = 0;
3623 	sim->BusType = CONTAINER_BUS;
3624 	sim->InitiatorBusId = -1;
3625 	sim->aac_sc = sc;
3626 	sim->sim_dev = child;
3627 	sim->aac_cam = NULL;
3628 
3629 	device_set_ivars(child, sim);
3630 	device_set_desc(child, "Container Bus");
3631 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3632 	/*
3633 	device_set_desc(child, aac_describe_code(aac_container_types,
3634 			mir->MntTable[0].VolType));
3635 	*/
3636 	bus_generic_attach(sc->aac_dev);
3637 }
3638 
3639 static void
3640 aac_get_bus_info(struct aac_softc *sc)
3641 {
3642 	struct aac_fib *fib;
3643 	struct aac_ctcfg *c_cmd;
3644 	struct aac_ctcfg_resp *c_resp;
3645 	struct aac_vmioctl *vmi;
3646 	struct aac_vmi_businf_resp *vmi_resp;
3647 	struct aac_getbusinf businfo;
3648 	struct aac_sim *caminf;
3649 	device_t child;
3650 	int i, error;
3651 
3652 	mtx_lock(&sc->aac_io_lock);
3653 	aac_alloc_sync_fib(sc, &fib);
3654 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3655 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3656 
3657 	c_cmd->Command = VM_ContainerConfig;
3658 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3659 	c_cmd->param = 0;
3660 
3661 	aac_ctcfg_tole(c_cmd);
3662 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3663 	    sizeof(struct aac_ctcfg));
3664 	if (error) {
3665 		device_printf(sc->aac_dev, "Error %d sending "
3666 		    "VM_ContainerConfig command\n", error);
3667 		aac_release_sync_fib(sc);
3668 		mtx_unlock(&sc->aac_io_lock);
3669 		return;
3670 	}
3671 
3672 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3673 	aac_ctcfg_resp_toh(c_resp);
3674 	if (c_resp->Status != ST_OK) {
3675 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3676 		    c_resp->Status);
3677 		aac_release_sync_fib(sc);
3678 		mtx_unlock(&sc->aac_io_lock);
3679 		return;
3680 	}
3681 
3682 	sc->scsi_method_id = c_resp->param;
3683 
3684 	vmi = (struct aac_vmioctl *)&fib->data[0];
3685 	bzero(vmi, sizeof(struct aac_vmioctl));
3686 
3687 	vmi->Command = VM_Ioctl;
3688 	vmi->ObjType = FT_DRIVE;
3689 	vmi->MethId = sc->scsi_method_id;
3690 	vmi->ObjId = 0;
3691 	vmi->IoctlCmd = GetBusInfo;
3692 
3693 	aac_vmioctl_tole(vmi);
3694 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3695 	    sizeof(struct aac_vmi_businf_resp));
3696 	if (error) {
3697 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3698 		    error);
3699 		aac_release_sync_fib(sc);
3700 		mtx_unlock(&sc->aac_io_lock);
3701 		return;
3702 	}
3703 
3704 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3705 	aac_vmi_businf_resp_toh(vmi_resp);
3706 	if (vmi_resp->Status != ST_OK) {
3707 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3708 		    vmi_resp->Status);
3709 		aac_release_sync_fib(sc);
3710 		mtx_unlock(&sc->aac_io_lock);
3711 		return;
3712 	}
3713 
3714 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3715 	aac_release_sync_fib(sc);
3716 	mtx_unlock(&sc->aac_io_lock);
3717 
3718 	for (i = 0; i < businfo.BusCount; i++) {
3719 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3720 			continue;
3721 
3722 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3723 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3724 		if (caminf == NULL) {
3725 			device_printf(sc->aac_dev,
3726 			    "No memory to add passthrough bus %d\n", i);
3727 			break;
3728 		}
3729 
3730 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3731 		if (child == NULL) {
3732 			device_printf(sc->aac_dev,
3733 			    "device_add_child failed for passthrough bus %d\n",
3734 			    i);
3735 			free(caminf, M_AACRAIDBUF);
3736 			break;
3737 		}
3738 
3739 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3740 		caminf->BusNumber = i+1;
3741 		caminf->BusType = PASSTHROUGH_BUS;
3742 		caminf->InitiatorBusId = -1;
3743 		caminf->aac_sc = sc;
3744 		caminf->sim_dev = child;
3745 		caminf->aac_cam = NULL;
3746 
3747 		device_set_ivars(child, caminf);
3748 		device_set_desc(child, "SCSI Passthrough Bus");
3749 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3750 	}
3751 }
3752 
3753 /*
3754  * Check to see if the kernel is up and running. If we are in a
3755  * BlinkLED state, return the BlinkLED code.
3756  */
3757 static u_int32_t
3758 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3759 {
3760 	u_int32_t ret;
3761 
3762 	ret = AAC_GET_FWSTATUS(sc);
3763 
3764 	if (ret & AAC_UP_AND_RUNNING)
3765 		ret = 0;
3766 	else if (ret & AAC_KERNEL_PANIC && bled)
3767 		*bled = (ret >> 16) & 0xff;
3768 
3769 	return (ret);
3770 }
3771 
3772 /*
3773  * Once do an IOP reset, basically have to re-initialize the card as
3774  * if coming up from a cold boot, and the driver is responsible for
3775  * any IO that was outstanding to the adapter at the time of the IOP
3776  * RESET. And prepare the driver for IOP RESET by making the init code
3777  * modular with the ability to call it from multiple places.
3778  */
3779 static int
3780 aac_reset_adapter(struct aac_softc *sc)
3781 {
3782 	struct aac_command *cm;
3783 	struct aac_fib *fib;
3784 	struct aac_pause_command *pc;
3785 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3786 	int ret, msi_enabled_orig;
3787 
3788 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3789 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3790 
3791 	if (sc->aac_state & AAC_STATE_RESET) {
3792 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3793 		return (EINVAL);
3794 	}
3795 	sc->aac_state |= AAC_STATE_RESET;
3796 
3797 	/* disable interrupt */
3798 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3799 
3800 	/*
3801 	 * Abort all pending commands:
3802 	 * a) on the controller
3803 	 */
3804 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3805 		cm->cm_flags |= AAC_CMD_RESET;
3806 
3807 		/* is there a completion handler? */
3808 		if (cm->cm_complete != NULL) {
3809 			cm->cm_complete(cm);
3810 		} else {
3811 			/* assume that someone is sleeping on this
3812 			 * command
3813 			 */
3814 			wakeup(cm);
3815 		}
3816 	}
3817 
3818 	/* b) in the waiting queues */
3819 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3820 		cm->cm_flags |= AAC_CMD_RESET;
3821 
3822 		/* is there a completion handler? */
3823 		if (cm->cm_complete != NULL) {
3824 			cm->cm_complete(cm);
3825 		} else {
3826 			/* assume that someone is sleeping on this
3827 			 * command
3828 			 */
3829 			wakeup(cm);
3830 		}
3831 	}
3832 
3833 	/* flush drives */
3834 	if (aac_check_adapter_health(sc, NULL) == 0) {
3835 		mtx_unlock(&sc->aac_io_lock);
3836 		(void) aacraid_shutdown(sc->aac_dev);
3837 		mtx_lock(&sc->aac_io_lock);
3838 	}
3839 
3840 	/* execute IOP reset */
3841 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3842 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3843 
3844 		/* We need to wait for 5 seconds before accessing the MU again
3845 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3846 		 */
3847 		waitCount = 5 * 10000;
3848 		while (waitCount) {
3849 			DELAY(100);			/* delay 100 microseconds */
3850 			waitCount--;
3851 		}
3852 	} else {
3853 		ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3854 			0, 0, 0, 0, &status, &reset_mask);
3855 		if (ret && !sc->doorbell_mask) {
3856 			/* call IOP_RESET for older firmware */
3857 			if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3858 			    &status, NULL)) != 0) {
3859 				if (status == AAC_SRB_STS_INVALID_REQUEST) {
3860 					device_printf(sc->aac_dev,
3861 					    "IOP_RESET not supported\n");
3862 				} else {
3863 					/* probably timeout */
3864 					device_printf(sc->aac_dev,
3865 					    "IOP_RESET failed\n");
3866 				}
3867 
3868 				/* unwind aac_shutdown() */
3869 				aac_alloc_sync_fib(sc, &fib);
3870 				pc = (struct aac_pause_command *)&fib->data[0];
3871 				pc->Command = VM_ContainerConfig;
3872 				pc->Type = CT_PAUSE_IO;
3873 				pc->Timeout = 1;
3874 				pc->Min = 1;
3875 				pc->NoRescan = 1;
3876 
3877 				aac_pause_command_tole(pc);
3878 				(void) aac_sync_fib(sc, ContainerCommand, 0,
3879 				    fib, sizeof (struct aac_pause_command));
3880 				aac_release_sync_fib(sc);
3881 
3882 				goto finish;
3883 			}
3884 		} else if (sc->doorbell_mask) {
3885 			ret = 0;
3886 			reset_mask = sc->doorbell_mask;
3887 		}
3888 		if (!ret &&
3889 		    (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3890 			AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3891 			/*
3892 			 * We need to wait for 5 seconds before accessing the
3893 			 * doorbell again;
3894 			 * 10000 * 100us = 1000,000us = 1000ms = 1s
3895 			 */
3896 			waitCount = 5 * 10000;
3897 			while (waitCount) {
3898 				DELAY(100);	/* delay 100 microseconds */
3899 				waitCount--;
3900 			}
3901 		}
3902 	}
3903 
3904 	/*
3905 	 * Initialize the adapter.
3906 	 */
3907 	max_msix_orig = sc->aac_max_msix;
3908 	msi_enabled_orig = sc->msi_enabled;
3909 	sc->msi_enabled = FALSE;
3910 	if (aac_check_firmware(sc) != 0)
3911 		goto finish;
3912 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3913 		sc->aac_max_msix = max_msix_orig;
3914 		if (msi_enabled_orig) {
3915 			sc->msi_enabled = msi_enabled_orig;
3916 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3917 		}
3918 		mtx_unlock(&sc->aac_io_lock);
3919 		aac_init(sc);
3920 		mtx_lock(&sc->aac_io_lock);
3921 	}
3922 
3923 finish:
3924 	sc->aac_state &= ~AAC_STATE_RESET;
3925 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3926 	aacraid_startio(sc);
3927 	return (0);
3928 }
3929