xref: /freebsd/sys/dev/aacraid/aacraid.c (revision f6a3b357e9be4c6423c85eff9a847163a0d307c8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
38  */
39 #define AAC_DRIVERNAME			"aacraid"
40 
41 #include "opt_aacraid.h"
42 
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/proc.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
52 #include <sys/poll.h>
53 #include <sys/ioccom.h>
54 
55 #include <sys/bus.h>
56 #include <sys/conf.h>
57 #include <sys/signalvar.h>
58 #include <sys/time.h>
59 #include <sys/eventhandler.h>
60 #include <sys/rman.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
72 
73 #ifndef FILTER_HANDLED
74 #define FILTER_HANDLED	0x02
75 #endif
76 
77 static void	aac_add_container(struct aac_softc *sc,
78 				  struct aac_mntinforesp *mir, int f,
79 				  u_int32_t uid);
80 static void	aac_get_bus_info(struct aac_softc *sc);
81 static void	aac_container_bus(struct aac_softc *sc);
82 static void	aac_daemon(void *arg);
83 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
84 							  int pages, int nseg, int nseg_new);
85 
86 /* Command Processing */
87 static void	aac_timeout(struct aac_softc *sc);
88 static void	aac_command_thread(struct aac_softc *sc);
89 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
90 				     u_int32_t xferstate, struct aac_fib *fib,
91 				     u_int16_t datasize);
92 /* Command Buffer Management */
93 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
94 				       int nseg, int error);
95 static int	aac_alloc_commands(struct aac_softc *sc);
96 static void	aac_free_commands(struct aac_softc *sc);
97 static void	aac_unmap_command(struct aac_command *cm);
98 
99 /* Hardware Interface */
100 static int	aac_alloc(struct aac_softc *sc);
101 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
102 			       int error);
103 static int	aac_check_firmware(struct aac_softc *sc);
104 static void	aac_define_int_mode(struct aac_softc *sc);
105 static int	aac_init(struct aac_softc *sc);
106 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
107 static int	aac_setup_intr(struct aac_softc *sc);
108 static int	aac_check_config(struct aac_softc *sc);
109 
110 /* PMC SRC interface */
111 static int	aac_src_get_fwstatus(struct aac_softc *sc);
112 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
113 static int	aac_src_get_istatus(struct aac_softc *sc);
114 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
115 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
116 				    u_int32_t arg0, u_int32_t arg1,
117 				    u_int32_t arg2, u_int32_t arg3);
118 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
119 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
120 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
121 static int aac_src_get_outb_queue(struct aac_softc *sc);
122 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
123 
124 struct aac_interface aacraid_src_interface = {
125 	aac_src_get_fwstatus,
126 	aac_src_qnotify,
127 	aac_src_get_istatus,
128 	aac_src_clear_istatus,
129 	aac_src_set_mailbox,
130 	aac_src_get_mailbox,
131 	aac_src_access_devreg,
132 	aac_src_send_command,
133 	aac_src_get_outb_queue,
134 	aac_src_set_outb_queue
135 };
136 
137 /* PMC SRCv interface */
138 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
139 				    u_int32_t arg0, u_int32_t arg1,
140 				    u_int32_t arg2, u_int32_t arg3);
141 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
142 
143 struct aac_interface aacraid_srcv_interface = {
144 	aac_src_get_fwstatus,
145 	aac_src_qnotify,
146 	aac_src_get_istatus,
147 	aac_src_clear_istatus,
148 	aac_srcv_set_mailbox,
149 	aac_srcv_get_mailbox,
150 	aac_src_access_devreg,
151 	aac_src_send_command,
152 	aac_src_get_outb_queue,
153 	aac_src_set_outb_queue
154 };
155 
156 /* Debugging and Diagnostics */
157 static struct aac_code_lookup aac_cpu_variant[] = {
158 	{"i960JX",		CPUI960_JX},
159 	{"i960CX",		CPUI960_CX},
160 	{"i960HX",		CPUI960_HX},
161 	{"i960RX",		CPUI960_RX},
162 	{"i960 80303",		CPUI960_80303},
163 	{"StrongARM SA110",	CPUARM_SA110},
164 	{"PPC603e",		CPUPPC_603e},
165 	{"XScale 80321",	CPU_XSCALE_80321},
166 	{"MIPS 4KC",		CPU_MIPS_4KC},
167 	{"MIPS 5KC",		CPU_MIPS_5KC},
168 	{"Unknown StrongARM",	CPUARM_xxx},
169 	{"Unknown PowerPC",	CPUPPC_xxx},
170 	{NULL, 0},
171 	{"Unknown processor",	0}
172 };
173 
174 static struct aac_code_lookup aac_battery_platform[] = {
175 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
176 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
177 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
178 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
179 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
180 	{NULL, 0},
181 	{"unknown battery platform",		0}
182 };
183 static void	aac_describe_controller(struct aac_softc *sc);
184 static char	*aac_describe_code(struct aac_code_lookup *table,
185 				   u_int32_t code);
186 
187 /* Management Interface */
188 static d_open_t		aac_open;
189 static d_ioctl_t	aac_ioctl;
190 static d_poll_t		aac_poll;
191 #if __FreeBSD_version >= 702000
192 static void		aac_cdevpriv_dtor(void *arg);
193 #else
194 static d_close_t	aac_close;
195 #endif
196 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
197 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
198 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
199 static void	aac_request_aif(struct aac_softc *sc);
200 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
201 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
202 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
203 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
204 static int	aac_return_aif(struct aac_softc *sc,
205 			       struct aac_fib_context *ctx, caddr_t uptr);
206 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
207 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
208 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
209 static void	aac_ioctl_event(struct aac_softc *sc,
210 				struct aac_event *event, void *arg);
211 static int	aac_reset_adapter(struct aac_softc *sc);
212 static int	aac_get_container_info(struct aac_softc *sc,
213 				       struct aac_fib *fib, int cid,
214 				       struct aac_mntinforesp *mir,
215 				       u_int32_t *uid);
216 static u_int32_t
217 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
218 
219 static struct cdevsw aacraid_cdevsw = {
220 	.d_version =	D_VERSION,
221 	.d_flags =	D_NEEDGIANT,
222 	.d_open =	aac_open,
223 #if __FreeBSD_version < 702000
224 	.d_close =	aac_close,
225 #endif
226 	.d_ioctl =	aac_ioctl,
227 	.d_poll =	aac_poll,
228 	.d_name =	"aacraid",
229 };
230 
231 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
232 
233 /* sysctl node */
234 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
235 
236 /*
237  * Device Interface
238  */
239 
240 /*
241  * Initialize the controller and softc
242  */
243 int
244 aacraid_attach(struct aac_softc *sc)
245 {
246 	int error, unit;
247 	struct aac_fib *fib;
248 	struct aac_mntinforesp mir;
249 	int count = 0, i = 0;
250 	u_int32_t uid;
251 
252 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
253 	sc->hint_flags = device_get_flags(sc->aac_dev);
254 	/*
255 	 * Initialize per-controller queues.
256 	 */
257 	aac_initq_free(sc);
258 	aac_initq_ready(sc);
259 	aac_initq_busy(sc);
260 
261 	/* mark controller as suspended until we get ourselves organised */
262 	sc->aac_state |= AAC_STATE_SUSPEND;
263 
264 	/*
265 	 * Check that the firmware on the card is supported.
266 	 */
267 	sc->msi_enabled = sc->msi_tupelo = FALSE;
268 	if ((error = aac_check_firmware(sc)) != 0)
269 		return(error);
270 
271 	/*
272 	 * Initialize locks
273 	 */
274 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
275 	TAILQ_INIT(&sc->aac_container_tqh);
276 	TAILQ_INIT(&sc->aac_ev_cmfree);
277 
278 #if __FreeBSD_version >= 800000
279 	/* Initialize the clock daemon callout. */
280 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
281 #endif
282 	/*
283 	 * Initialize the adapter.
284 	 */
285 	if ((error = aac_alloc(sc)) != 0)
286 		return(error);
287 	aac_define_int_mode(sc);
288 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
289 		if ((error = aac_init(sc)) != 0)
290 			return(error);
291 	}
292 
293 	/*
294 	 * Allocate and connect our interrupt.
295 	 */
296 	if ((error = aac_setup_intr(sc)) != 0)
297 		return(error);
298 
299 	/*
300 	 * Print a little information about the controller.
301 	 */
302 	aac_describe_controller(sc);
303 
304 	/*
305 	 * Make the control device.
306 	 */
307 	unit = device_get_unit(sc->aac_dev);
308 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
309 				 0640, "aacraid%d", unit);
310 	sc->aac_dev_t->si_drv1 = sc;
311 
312 	/* Create the AIF thread */
313 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
314 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
315 		panic("Could not create AIF thread");
316 
317 	/* Register the shutdown method to only be called post-dump */
318 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
319 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
320 		device_printf(sc->aac_dev,
321 			      "shutdown event registration failed\n");
322 
323 	/* Find containers */
324 	mtx_lock(&sc->aac_io_lock);
325 	aac_alloc_sync_fib(sc, &fib);
326 	/* loop over possible containers */
327 	do {
328 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
329 			continue;
330 		if (i == 0)
331 			count = mir.MntRespCount;
332 		aac_add_container(sc, &mir, 0, uid);
333 		i++;
334 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
335 	aac_release_sync_fib(sc);
336 	mtx_unlock(&sc->aac_io_lock);
337 
338 	/* Register with CAM for the containers */
339 	TAILQ_INIT(&sc->aac_sim_tqh);
340 	aac_container_bus(sc);
341 	/* Register with CAM for the non-DASD devices */
342 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
343 		aac_get_bus_info(sc);
344 
345 	/* poke the bus to actually attach the child devices */
346 	bus_generic_attach(sc->aac_dev);
347 
348 	/* mark the controller up */
349 	sc->aac_state &= ~AAC_STATE_SUSPEND;
350 
351 	/* enable interrupts now */
352 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
353 
354 #if __FreeBSD_version >= 800000
355 	mtx_lock(&sc->aac_io_lock);
356 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
357 	mtx_unlock(&sc->aac_io_lock);
358 #else
359 	{
360 		struct timeval tv;
361 		tv.tv_sec = 60;
362 		tv.tv_usec = 0;
363 		sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
364 	}
365 #endif
366 
367 	return(0);
368 }
369 
370 static void
371 aac_daemon(void *arg)
372 {
373 	struct aac_softc *sc;
374 	struct timeval tv;
375 	struct aac_command *cm;
376 	struct aac_fib *fib;
377 
378 	sc = arg;
379 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
380 
381 #if __FreeBSD_version >= 800000
382 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
383 	if (callout_pending(&sc->aac_daemontime) ||
384 	    callout_active(&sc->aac_daemontime) == 0)
385 		return;
386 #else
387 	mtx_lock(&sc->aac_io_lock);
388 #endif
389 	getmicrotime(&tv);
390 
391 	if (!aacraid_alloc_command(sc, &cm)) {
392 		fib = cm->cm_fib;
393 		cm->cm_timestamp = time_uptime;
394 		cm->cm_datalen = 0;
395 		cm->cm_flags |= AAC_CMD_WAIT;
396 
397 		fib->Header.Size =
398 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
399 		fib->Header.XferState =
400 			AAC_FIBSTATE_HOSTOWNED   |
401 			AAC_FIBSTATE_INITIALISED |
402 			AAC_FIBSTATE_EMPTY	 |
403 			AAC_FIBSTATE_FROMHOST	 |
404 			AAC_FIBSTATE_REXPECTED   |
405 			AAC_FIBSTATE_NORM	 |
406 			AAC_FIBSTATE_ASYNC	 |
407 			AAC_FIBSTATE_FAST_RESPONSE;
408 		fib->Header.Command = SendHostTime;
409 		*(uint32_t *)fib->data = tv.tv_sec;
410 
411 		aacraid_map_command_sg(cm, NULL, 0, 0);
412 		aacraid_release_command(cm);
413 	}
414 
415 #if __FreeBSD_version >= 800000
416 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
417 #else
418 	mtx_unlock(&sc->aac_io_lock);
419 	tv.tv_sec = 30 * 60;
420 	tv.tv_usec = 0;
421 	sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
422 #endif
423 }
424 
425 void
426 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
427 {
428 
429 	switch (event->ev_type & AAC_EVENT_MASK) {
430 	case AAC_EVENT_CMFREE:
431 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
432 		break;
433 	default:
434 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
435 		    event->ev_type);
436 		break;
437 	}
438 
439 	return;
440 }
441 
442 /*
443  * Request information of container #cid
444  */
445 static int
446 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
447 		       struct aac_mntinforesp *mir, u_int32_t *uid)
448 {
449 	struct aac_command *cm;
450 	struct aac_fib *fib;
451 	struct aac_mntinfo *mi;
452 	struct aac_cnt_config *ccfg;
453 	int rval;
454 
455 	if (sync_fib == NULL) {
456 		if (aacraid_alloc_command(sc, &cm)) {
457 			device_printf(sc->aac_dev,
458 				"Warning, no free command available\n");
459 			return (-1);
460 		}
461 		fib = cm->cm_fib;
462 	} else {
463 		fib = sync_fib;
464 	}
465 
466 	mi = (struct aac_mntinfo *)&fib->data[0];
467 	/* 4KB support?, 64-bit LBA? */
468 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
469 		mi->Command = VM_NameServeAllBlk;
470 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
471 		mi->Command = VM_NameServe64;
472 	else
473 		mi->Command = VM_NameServe;
474 	mi->MntType = FT_FILESYS;
475 	mi->MntCount = cid;
476 
477 	if (sync_fib) {
478 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
479 			 sizeof(struct aac_mntinfo))) {
480 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
481 			return (-1);
482 		}
483 	} else {
484 		cm->cm_timestamp = time_uptime;
485 		cm->cm_datalen = 0;
486 
487 		fib->Header.Size =
488 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
489 		fib->Header.XferState =
490 			AAC_FIBSTATE_HOSTOWNED   |
491 			AAC_FIBSTATE_INITIALISED |
492 			AAC_FIBSTATE_EMPTY	 |
493 			AAC_FIBSTATE_FROMHOST	 |
494 			AAC_FIBSTATE_REXPECTED   |
495 			AAC_FIBSTATE_NORM	 |
496 			AAC_FIBSTATE_ASYNC	 |
497 			AAC_FIBSTATE_FAST_RESPONSE;
498 		fib->Header.Command = ContainerCommand;
499 		if (aacraid_wait_command(cm) != 0) {
500 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
501 			aacraid_release_command(cm);
502 			return (-1);
503 		}
504 	}
505 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
506 
507 	/* UID */
508 	*uid = cid;
509 	if (mir->MntTable[0].VolType != CT_NONE &&
510 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
511 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
512 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
513 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
514 		}
515 		ccfg = (struct aac_cnt_config *)&fib->data[0];
516 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
517 		ccfg->Command = VM_ContainerConfig;
518 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
519 		ccfg->CTCommand.param[0] = cid;
520 
521 		if (sync_fib) {
522 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
523 				sizeof(struct aac_cnt_config));
524 			if (rval == 0 && ccfg->Command == ST_OK &&
525 				ccfg->CTCommand.param[0] == CT_OK &&
526 				mir->MntTable[0].VolType != CT_PASSTHRU)
527 				*uid = ccfg->CTCommand.param[1];
528 		} else {
529 			fib->Header.Size =
530 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
531 			fib->Header.XferState =
532 				AAC_FIBSTATE_HOSTOWNED   |
533 				AAC_FIBSTATE_INITIALISED |
534 				AAC_FIBSTATE_EMPTY	 |
535 				AAC_FIBSTATE_FROMHOST	 |
536 				AAC_FIBSTATE_REXPECTED   |
537 				AAC_FIBSTATE_NORM	 |
538 				AAC_FIBSTATE_ASYNC	 |
539 				AAC_FIBSTATE_FAST_RESPONSE;
540 			fib->Header.Command = ContainerCommand;
541 			rval = aacraid_wait_command(cm);
542 			if (rval == 0 && ccfg->Command == ST_OK &&
543 				ccfg->CTCommand.param[0] == CT_OK &&
544 				mir->MntTable[0].VolType != CT_PASSTHRU)
545 				*uid = ccfg->CTCommand.param[1];
546 			aacraid_release_command(cm);
547 		}
548 	}
549 
550 	return (0);
551 }
552 
553 /*
554  * Create a device to represent a new container
555  */
556 static void
557 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
558 		  u_int32_t uid)
559 {
560 	struct aac_container *co;
561 
562 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
563 
564 	/*
565 	 * Check container volume type for validity.  Note that many of
566 	 * the possible types may never show up.
567 	 */
568 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
569 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
570 		       M_NOWAIT | M_ZERO);
571 		if (co == NULL) {
572 			panic("Out of memory?!");
573 		}
574 
575 		co->co_found = f;
576 		bcopy(&mir->MntTable[0], &co->co_mntobj,
577 		      sizeof(struct aac_mntobj));
578 		co->co_uid = uid;
579 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
580 	}
581 }
582 
583 /*
584  * Allocate resources associated with (sc)
585  */
586 static int
587 aac_alloc(struct aac_softc *sc)
588 {
589 	bus_size_t maxsize;
590 
591 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
592 
593 	/*
594 	 * Create DMA tag for mapping buffers into controller-addressable space.
595 	 */
596 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
597 			       1, 0, 			/* algnmnt, boundary */
598 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
599 			       BUS_SPACE_MAXADDR :
600 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
601 			       BUS_SPACE_MAXADDR, 	/* highaddr */
602 			       NULL, NULL, 		/* filter, filterarg */
603 			       sc->aac_max_sectors << 9, /* maxsize */
604 			       sc->aac_sg_tablesize,	/* nsegments */
605 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
606 			       BUS_DMA_ALLOCNOW,	/* flags */
607 			       busdma_lock_mutex,	/* lockfunc */
608 			       &sc->aac_io_lock,	/* lockfuncarg */
609 			       &sc->aac_buffer_dmat)) {
610 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
611 		return (ENOMEM);
612 	}
613 
614 	/*
615 	 * Create DMA tag for mapping FIBs into controller-addressable space..
616 	 */
617 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
618 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
619 			sizeof(struct aac_fib_xporthdr) + 31);
620 	else
621 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
622 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
623 			       1, 0, 			/* algnmnt, boundary */
624 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
625 			       BUS_SPACE_MAXADDR_32BIT :
626 			       0x7fffffff,		/* lowaddr */
627 			       BUS_SPACE_MAXADDR, 	/* highaddr */
628 			       NULL, NULL, 		/* filter, filterarg */
629 			       maxsize,  		/* maxsize */
630 			       1,			/* nsegments */
631 			       maxsize,			/* maxsize */
632 			       0,			/* flags */
633 			       NULL, NULL,		/* No locking needed */
634 			       &sc->aac_fib_dmat)) {
635 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
636 		return (ENOMEM);
637 	}
638 
639 	/*
640 	 * Create DMA tag for the common structure and allocate it.
641 	 */
642 	maxsize = sizeof(struct aac_common);
643 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
644 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
645 			       1, 0,			/* algnmnt, boundary */
646 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
647 			       BUS_SPACE_MAXADDR_32BIT :
648 			       0x7fffffff,		/* lowaddr */
649 			       BUS_SPACE_MAXADDR, 	/* highaddr */
650 			       NULL, NULL, 		/* filter, filterarg */
651 			       maxsize, 		/* maxsize */
652 			       1,			/* nsegments */
653 			       maxsize,			/* maxsegsize */
654 			       0,			/* flags */
655 			       NULL, NULL,		/* No locking needed */
656 			       &sc->aac_common_dmat)) {
657 		device_printf(sc->aac_dev,
658 			      "can't allocate common structure DMA tag\n");
659 		return (ENOMEM);
660 	}
661 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
662 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
663 		device_printf(sc->aac_dev, "can't allocate common structure\n");
664 		return (ENOMEM);
665 	}
666 
667 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
668 			sc->aac_common, maxsize,
669 			aac_common_map, sc, 0);
670 	bzero(sc->aac_common, maxsize);
671 
672 	/* Allocate some FIBs and associated command structs */
673 	TAILQ_INIT(&sc->aac_fibmap_tqh);
674 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
675 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
676 	mtx_lock(&sc->aac_io_lock);
677 	while (sc->total_fibs < sc->aac_max_fibs) {
678 		if (aac_alloc_commands(sc) != 0)
679 			break;
680 	}
681 	mtx_unlock(&sc->aac_io_lock);
682 	if (sc->total_fibs == 0)
683 		return (ENOMEM);
684 
685 	return (0);
686 }
687 
688 /*
689  * Free all of the resources associated with (sc)
690  *
691  * Should not be called if the controller is active.
692  */
693 void
694 aacraid_free(struct aac_softc *sc)
695 {
696 	int i;
697 
698 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
699 
700 	/* remove the control device */
701 	if (sc->aac_dev_t != NULL)
702 		destroy_dev(sc->aac_dev_t);
703 
704 	/* throw away any FIB buffers, discard the FIB DMA tag */
705 	aac_free_commands(sc);
706 	if (sc->aac_fib_dmat)
707 		bus_dma_tag_destroy(sc->aac_fib_dmat);
708 
709 	free(sc->aac_commands, M_AACRAIDBUF);
710 
711 	/* destroy the common area */
712 	if (sc->aac_common) {
713 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
714 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
715 				sc->aac_common_dmamap);
716 	}
717 	if (sc->aac_common_dmat)
718 		bus_dma_tag_destroy(sc->aac_common_dmat);
719 
720 	/* disconnect the interrupt handler */
721 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
722 		if (sc->aac_intr[i])
723 			bus_teardown_intr(sc->aac_dev,
724 				sc->aac_irq[i], sc->aac_intr[i]);
725 		if (sc->aac_irq[i])
726 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
727 				sc->aac_irq_rid[i], sc->aac_irq[i]);
728 		else
729 			break;
730 	}
731 	if (sc->msi_enabled || sc->msi_tupelo)
732 		pci_release_msi(sc->aac_dev);
733 
734 	/* destroy data-transfer DMA tag */
735 	if (sc->aac_buffer_dmat)
736 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
737 
738 	/* destroy the parent DMA tag */
739 	if (sc->aac_parent_dmat)
740 		bus_dma_tag_destroy(sc->aac_parent_dmat);
741 
742 	/* release the register window mapping */
743 	if (sc->aac_regs_res0 != NULL)
744 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
745 				     sc->aac_regs_rid0, sc->aac_regs_res0);
746 	if (sc->aac_regs_res1 != NULL)
747 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
748 				     sc->aac_regs_rid1, sc->aac_regs_res1);
749 }
750 
751 /*
752  * Disconnect from the controller completely, in preparation for unload.
753  */
754 int
755 aacraid_detach(device_t dev)
756 {
757 	struct aac_softc *sc;
758 	struct aac_container *co;
759 	struct aac_sim	*sim;
760 	int error;
761 
762 	sc = device_get_softc(dev);
763 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
764 
765 #if __FreeBSD_version >= 800000
766 	callout_drain(&sc->aac_daemontime);
767 #else
768 	untimeout(aac_daemon, (void *)sc, sc->timeout_id);
769 #endif
770 	/* Remove the child containers */
771 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
772 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
773 		free(co, M_AACRAIDBUF);
774 	}
775 
776 	/* Remove the CAM SIMs */
777 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
778 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
779 		error = device_delete_child(dev, sim->sim_dev);
780 		if (error)
781 			return (error);
782 		free(sim, M_AACRAIDBUF);
783 	}
784 
785 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
786 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
787 		wakeup(sc->aifthread);
788 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
789 	}
790 
791 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
792 		panic("Cannot shutdown AIF thread");
793 
794 	if ((error = aacraid_shutdown(dev)))
795 		return(error);
796 
797 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
798 
799 	aacraid_free(sc);
800 
801 	mtx_destroy(&sc->aac_io_lock);
802 
803 	return(0);
804 }
805 
806 /*
807  * Bring the controller down to a dormant state and detach all child devices.
808  *
809  * This function is called before detach or system shutdown.
810  *
811  * Note that we can assume that the bioq on the controller is empty, as we won't
812  * allow shutdown if any device is open.
813  */
814 int
815 aacraid_shutdown(device_t dev)
816 {
817 	struct aac_softc *sc;
818 	struct aac_fib *fib;
819 	struct aac_close_command *cc;
820 
821 	sc = device_get_softc(dev);
822 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
823 
824 	sc->aac_state |= AAC_STATE_SUSPEND;
825 
826 	/*
827 	 * Send a Container shutdown followed by a HostShutdown FIB to the
828 	 * controller to convince it that we don't want to talk to it anymore.
829 	 * We've been closed and all I/O completed already
830 	 */
831 	device_printf(sc->aac_dev, "shutting down controller...");
832 
833 	mtx_lock(&sc->aac_io_lock);
834 	aac_alloc_sync_fib(sc, &fib);
835 	cc = (struct aac_close_command *)&fib->data[0];
836 
837 	bzero(cc, sizeof(struct aac_close_command));
838 	cc->Command = VM_CloseAll;
839 	cc->ContainerId = 0xfffffffe;
840 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
841 	    sizeof(struct aac_close_command)))
842 		printf("FAILED.\n");
843 	else
844 		printf("done\n");
845 
846 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
847 	aac_release_sync_fib(sc);
848 	mtx_unlock(&sc->aac_io_lock);
849 
850 	return(0);
851 }
852 
853 /*
854  * Bring the controller to a quiescent state, ready for system suspend.
855  */
856 int
857 aacraid_suspend(device_t dev)
858 {
859 	struct aac_softc *sc;
860 
861 	sc = device_get_softc(dev);
862 
863 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
864 	sc->aac_state |= AAC_STATE_SUSPEND;
865 
866 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
867 	return(0);
868 }
869 
870 /*
871  * Bring the controller back to a state ready for operation.
872  */
873 int
874 aacraid_resume(device_t dev)
875 {
876 	struct aac_softc *sc;
877 
878 	sc = device_get_softc(dev);
879 
880 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
881 	sc->aac_state &= ~AAC_STATE_SUSPEND;
882 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
883 	return(0);
884 }
885 
886 /*
887  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
888  */
889 void
890 aacraid_new_intr_type1(void *arg)
891 {
892 	struct aac_msix_ctx *ctx;
893 	struct aac_softc *sc;
894 	int vector_no;
895 	struct aac_command *cm;
896 	struct aac_fib *fib;
897 	u_int32_t bellbits, bellbits_shifted, index, handle;
898 	int isFastResponse, isAif, noMoreAif, mode;
899 
900 	ctx = (struct aac_msix_ctx *)arg;
901 	sc = ctx->sc;
902 	vector_no = ctx->vector_no;
903 
904 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
905 	mtx_lock(&sc->aac_io_lock);
906 
907 	if (sc->msi_enabled) {
908 		mode = AAC_INT_MODE_MSI;
909 		if (vector_no == 0) {
910 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
911 			if (bellbits & 0x40000)
912 				mode |= AAC_INT_MODE_AIF;
913 			else if (bellbits & 0x1000)
914 				mode |= AAC_INT_MODE_SYNC;
915 		}
916 	} else {
917 		mode = AAC_INT_MODE_INTX;
918 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
919 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
920 			bellbits = AAC_DB_RESPONSE_SENT_NS;
921 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
922 		} else {
923 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
924 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
925 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
926 				mode |= AAC_INT_MODE_AIF;
927 			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
928 				mode |= AAC_INT_MODE_SYNC;
929 		}
930 		/* ODR readback, Prep #238630 */
931 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
932 	}
933 
934 	if (mode & AAC_INT_MODE_SYNC) {
935 		if (sc->aac_sync_cm) {
936 			cm = sc->aac_sync_cm;
937 			cm->cm_flags |= AAC_CMD_COMPLETED;
938 			/* is there a completion handler? */
939 			if (cm->cm_complete != NULL) {
940 				cm->cm_complete(cm);
941 			} else {
942 				/* assume that someone is sleeping on this command */
943 				wakeup(cm);
944 			}
945 			sc->flags &= ~AAC_QUEUE_FRZN;
946 			sc->aac_sync_cm = NULL;
947 		}
948 		mode = 0;
949 	}
950 
951 	if (mode & AAC_INT_MODE_AIF) {
952 		if (mode & AAC_INT_MODE_INTX) {
953 			aac_request_aif(sc);
954 			mode = 0;
955 		}
956 	}
957 
958 	if (mode) {
959 		/* handle async. status */
960 		index = sc->aac_host_rrq_idx[vector_no];
961 		for (;;) {
962 			isFastResponse = isAif = noMoreAif = 0;
963 			/* remove toggle bit (31) */
964 			handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
965 			/* check fast response bit (30) */
966 			if (handle & 0x40000000)
967 				isFastResponse = 1;
968 			/* check AIF bit (23) */
969 			else if (handle & 0x00800000)
970 				isAif = TRUE;
971 			handle &= 0x0000ffff;
972 			if (handle == 0)
973 				break;
974 
975 			cm = sc->aac_commands + (handle - 1);
976 			fib = cm->cm_fib;
977 			sc->aac_rrq_outstanding[vector_no]--;
978 			if (isAif) {
979 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
980 				if (!noMoreAif)
981 					aac_handle_aif(sc, fib);
982 				aac_remove_busy(cm);
983 				aacraid_release_command(cm);
984 			} else {
985 				if (isFastResponse) {
986 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
987 					*((u_int32_t *)(fib->data)) = ST_OK;
988 					cm->cm_flags |= AAC_CMD_FASTRESP;
989 				}
990 				aac_remove_busy(cm);
991 				aac_unmap_command(cm);
992 				cm->cm_flags |= AAC_CMD_COMPLETED;
993 
994 				/* is there a completion handler? */
995 				if (cm->cm_complete != NULL) {
996 					cm->cm_complete(cm);
997 				} else {
998 					/* assume that someone is sleeping on this command */
999 					wakeup(cm);
1000 				}
1001 				sc->flags &= ~AAC_QUEUE_FRZN;
1002 			}
1003 
1004 			sc->aac_common->ac_host_rrq[index++] = 0;
1005 			if (index == (vector_no + 1) * sc->aac_vector_cap)
1006 				index = vector_no * sc->aac_vector_cap;
1007 			sc->aac_host_rrq_idx[vector_no] = index;
1008 
1009 			if ((isAif && !noMoreAif) || sc->aif_pending)
1010 				aac_request_aif(sc);
1011 		}
1012 	}
1013 
1014 	if (mode & AAC_INT_MODE_AIF) {
1015 		aac_request_aif(sc);
1016 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1017 		mode = 0;
1018 	}
1019 
1020 	/* see if we can start some more I/O */
1021 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1022 		aacraid_startio(sc);
1023 	mtx_unlock(&sc->aac_io_lock);
1024 }
1025 
1026 /*
1027  * Handle notification of one or more FIBs coming from the controller.
1028  */
1029 static void
1030 aac_command_thread(struct aac_softc *sc)
1031 {
1032 	int retval;
1033 
1034 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1035 
1036 	mtx_lock(&sc->aac_io_lock);
1037 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1038 
1039 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1040 
1041 		retval = 0;
1042 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1043 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1044 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1045 
1046 		/*
1047 		 * First see if any FIBs need to be allocated.  This needs
1048 		 * to be called without the driver lock because contigmalloc
1049 		 * will grab Giant, and would result in an LOR.
1050 		 */
1051 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1052 			aac_alloc_commands(sc);
1053 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1054 			aacraid_startio(sc);
1055 		}
1056 
1057 		/*
1058 		 * While we're here, check to see if any commands are stuck.
1059 		 * This is pretty low-priority, so it's ok if it doesn't
1060 		 * always fire.
1061 		 */
1062 		if (retval == EWOULDBLOCK)
1063 			aac_timeout(sc);
1064 
1065 		/* Check the hardware printf message buffer */
1066 		if (sc->aac_common->ac_printf[0] != 0)
1067 			aac_print_printf(sc);
1068 	}
1069 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1070 	mtx_unlock(&sc->aac_io_lock);
1071 	wakeup(sc->aac_dev);
1072 
1073 	aac_kthread_exit(0);
1074 }
1075 
1076 /*
1077  * Submit a command to the controller, return when it completes.
1078  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1079  *     be stuck here forever.  At the same time, signals are not caught
1080  *     because there is a risk that a signal could wakeup the sleep before
1081  *     the card has a chance to complete the command.  Since there is no way
1082  *     to cancel a command that is in progress, we can't protect against the
1083  *     card completing a command late and spamming the command and data
1084  *     memory.  So, we are held hostage until the command completes.
1085  */
1086 int
1087 aacraid_wait_command(struct aac_command *cm)
1088 {
1089 	struct aac_softc *sc;
1090 	int error;
1091 
1092 	sc = cm->cm_sc;
1093 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1094 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1095 
1096 	/* Put the command on the ready queue and get things going */
1097 	aac_enqueue_ready(cm);
1098 	aacraid_startio(sc);
1099 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1100 	return(error);
1101 }
1102 
1103 /*
1104  *Command Buffer Management
1105  */
1106 
1107 /*
1108  * Allocate a command.
1109  */
1110 int
1111 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1112 {
1113 	struct aac_command *cm;
1114 
1115 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1116 
1117 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1118 		if (sc->total_fibs < sc->aac_max_fibs) {
1119 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1120 			wakeup(sc->aifthread);
1121 		}
1122 		return (EBUSY);
1123 	}
1124 
1125 	*cmp = cm;
1126 	return(0);
1127 }
1128 
1129 /*
1130  * Release a command back to the freelist.
1131  */
1132 void
1133 aacraid_release_command(struct aac_command *cm)
1134 {
1135 	struct aac_event *event;
1136 	struct aac_softc *sc;
1137 
1138 	sc = cm->cm_sc;
1139 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1140 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1141 
1142 	/* (re)initialize the command/FIB */
1143 	cm->cm_sgtable = NULL;
1144 	cm->cm_flags = 0;
1145 	cm->cm_complete = NULL;
1146 	cm->cm_ccb = NULL;
1147 	cm->cm_passthr_dmat = 0;
1148 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1149 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1150 	cm->cm_fib->Header.Unused = 0;
1151 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1152 
1153 	/*
1154 	 * These are duplicated in aac_start to cover the case where an
1155 	 * intermediate stage may have destroyed them.  They're left
1156 	 * initialized here for debugging purposes only.
1157 	 */
1158 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1159 	cm->cm_fib->Header.Handle = 0;
1160 
1161 	aac_enqueue_free(cm);
1162 
1163 	/*
1164 	 * Dequeue all events so that there's no risk of events getting
1165 	 * stranded.
1166 	 */
1167 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1168 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1169 		event->ev_callback(sc, event, event->ev_arg);
1170 	}
1171 }
1172 
1173 /*
1174  * Map helper for command/FIB allocation.
1175  */
1176 static void
1177 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1178 {
1179 	uint64_t	*fibphys;
1180 
1181 	fibphys = (uint64_t *)arg;
1182 
1183 	*fibphys = segs[0].ds_addr;
1184 }
1185 
1186 /*
1187  * Allocate and initialize commands/FIBs for this adapter.
1188  */
1189 static int
1190 aac_alloc_commands(struct aac_softc *sc)
1191 {
1192 	struct aac_command *cm;
1193 	struct aac_fibmap *fm;
1194 	uint64_t fibphys;
1195 	int i, error;
1196 	u_int32_t maxsize;
1197 
1198 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1199 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1200 
1201 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1202 		return (ENOMEM);
1203 
1204 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1205 	if (fm == NULL)
1206 		return (ENOMEM);
1207 
1208 	mtx_unlock(&sc->aac_io_lock);
1209 	/* allocate the FIBs in DMAable memory and load them */
1210 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1211 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1212 		device_printf(sc->aac_dev,
1213 			      "Not enough contiguous memory available.\n");
1214 		free(fm, M_AACRAIDBUF);
1215 		mtx_lock(&sc->aac_io_lock);
1216 		return (ENOMEM);
1217 	}
1218 
1219 	maxsize = sc->aac_max_fib_size + 31;
1220 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1221 		maxsize += sizeof(struct aac_fib_xporthdr);
1222 	/* Ignore errors since this doesn't bounce */
1223 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1224 			      sc->aac_max_fibs_alloc * maxsize,
1225 			      aac_map_command_helper, &fibphys, 0);
1226 	mtx_lock(&sc->aac_io_lock);
1227 
1228 	/* initialize constant fields in the command structure */
1229 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1230 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1231 		cm = sc->aac_commands + sc->total_fibs;
1232 		fm->aac_commands = cm;
1233 		cm->cm_sc = sc;
1234 		cm->cm_fib = (struct aac_fib *)
1235 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1236 		cm->cm_fibphys = fibphys + i * maxsize;
1237 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1238 			u_int64_t fibphys_aligned;
1239 			fibphys_aligned =
1240 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1241 			cm->cm_fib = (struct aac_fib *)
1242 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1243 			cm->cm_fibphys = fibphys_aligned;
1244 		} else {
1245 			u_int64_t fibphys_aligned;
1246 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1247 			cm->cm_fib = (struct aac_fib *)
1248 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1249 			cm->cm_fibphys = fibphys_aligned;
1250 		}
1251 		cm->cm_index = sc->total_fibs;
1252 
1253 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1254 					       &cm->cm_datamap)) != 0)
1255 			break;
1256 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1257 			aacraid_release_command(cm);
1258 		sc->total_fibs++;
1259 	}
1260 
1261 	if (i > 0) {
1262 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1263 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1264 		return (0);
1265 	}
1266 
1267 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1268 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1269 	free(fm, M_AACRAIDBUF);
1270 	return (ENOMEM);
1271 }
1272 
1273 /*
1274  * Free FIBs owned by this adapter.
1275  */
1276 static void
1277 aac_free_commands(struct aac_softc *sc)
1278 {
1279 	struct aac_fibmap *fm;
1280 	struct aac_command *cm;
1281 	int i;
1282 
1283 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1284 
1285 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1286 
1287 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1288 		/*
1289 		 * We check against total_fibs to handle partially
1290 		 * allocated blocks.
1291 		 */
1292 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1293 			cm = fm->aac_commands + i;
1294 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1295 		}
1296 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1297 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1298 		free(fm, M_AACRAIDBUF);
1299 	}
1300 }
1301 
1302 /*
1303  * Command-mapping helper function - populate this command's s/g table.
1304  */
1305 void
1306 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1307 {
1308 	struct aac_softc *sc;
1309 	struct aac_command *cm;
1310 	struct aac_fib *fib;
1311 	int i;
1312 
1313 	cm = (struct aac_command *)arg;
1314 	sc = cm->cm_sc;
1315 	fib = cm->cm_fib;
1316 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1317 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1318 
1319 	if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1320 		return;
1321 
1322 	/* copy into the FIB */
1323 	if (cm->cm_sgtable != NULL) {
1324 		if (fib->Header.Command == RawIo2) {
1325 			struct aac_raw_io2 *raw;
1326 			struct aac_sge_ieee1212 *sg;
1327 			u_int32_t min_size = PAGE_SIZE, cur_size;
1328 			int conformable = TRUE;
1329 
1330 			raw = (struct aac_raw_io2 *)&fib->data[0];
1331 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1332 			raw->sgeCnt = nseg;
1333 
1334 			for (i = 0; i < nseg; i++) {
1335 				cur_size = segs[i].ds_len;
1336 				sg[i].addrHigh = 0;
1337 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1338 				sg[i].length = cur_size;
1339 				sg[i].flags = 0;
1340 				if (i == 0) {
1341 					raw->sgeFirstSize = cur_size;
1342 				} else if (i == 1) {
1343 					raw->sgeNominalSize = cur_size;
1344 					min_size = cur_size;
1345 				} else if ((i+1) < nseg &&
1346 					cur_size != raw->sgeNominalSize) {
1347 					conformable = FALSE;
1348 					if (cur_size < min_size)
1349 						min_size = cur_size;
1350 				}
1351 			}
1352 
1353 			/* not conformable: evaluate required sg elements */
1354 			if (!conformable) {
1355 				int j, err_found, nseg_new = nseg;
1356 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1357 					err_found = FALSE;
1358 					nseg_new = 2;
1359 					for (j = 1; j < nseg - 1; ++j) {
1360 						if (sg[j].length % (i*PAGE_SIZE)) {
1361 							err_found = TRUE;
1362 							break;
1363 						}
1364 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1365 					}
1366 					if (!err_found)
1367 						break;
1368 				}
1369 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1370 					!(sc->hint_flags & 4))
1371 					nseg = aac_convert_sgraw2(sc,
1372 						raw, i, nseg, nseg_new);
1373 			} else {
1374 				raw->flags |= RIO2_SGL_CONFORMANT;
1375 			}
1376 
1377 			/* update the FIB size for the s/g count */
1378 			fib->Header.Size += nseg *
1379 				sizeof(struct aac_sge_ieee1212);
1380 
1381 		} else if (fib->Header.Command == RawIo) {
1382 			struct aac_sg_tableraw *sg;
1383 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1384 			sg->SgCount = nseg;
1385 			for (i = 0; i < nseg; i++) {
1386 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1387 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1388 				sg->SgEntryRaw[i].Next = 0;
1389 				sg->SgEntryRaw[i].Prev = 0;
1390 				sg->SgEntryRaw[i].Flags = 0;
1391 			}
1392 			/* update the FIB size for the s/g count */
1393 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1394 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1395 			struct aac_sg_table *sg;
1396 			sg = cm->cm_sgtable;
1397 			sg->SgCount = nseg;
1398 			for (i = 0; i < nseg; i++) {
1399 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1400 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1401 			}
1402 			/* update the FIB size for the s/g count */
1403 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1404 		} else {
1405 			struct aac_sg_table64 *sg;
1406 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1407 			sg->SgCount = nseg;
1408 			for (i = 0; i < nseg; i++) {
1409 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1410 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1411 			}
1412 			/* update the FIB size for the s/g count */
1413 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1414 		}
1415 	}
1416 
1417 	/* Fix up the address values in the FIB.  Use the command array index
1418 	 * instead of a pointer since these fields are only 32 bits.  Shift
1419 	 * the SenderFibAddress over to make room for the fast response bit
1420 	 * and for the AIF bit
1421 	 */
1422 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1423 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1424 
1425 	/* save a pointer to the command for speedy reverse-lookup */
1426 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1427 
1428 	if (cm->cm_passthr_dmat == 0) {
1429 		if (cm->cm_flags & AAC_CMD_DATAIN)
1430 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1431 							BUS_DMASYNC_PREREAD);
1432 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1433 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1434 							BUS_DMASYNC_PREWRITE);
1435 	}
1436 
1437 	cm->cm_flags |= AAC_CMD_MAPPED;
1438 
1439 	if (cm->cm_flags & AAC_CMD_WAIT) {
1440 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1441 			cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1442 	} else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1443 		u_int32_t wait = 0;
1444 		sc->aac_sync_cm = cm;
1445 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1446 			cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1447 	} else {
1448 		int count = 10000000L;
1449 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1450 			if (--count == 0) {
1451 				aac_unmap_command(cm);
1452 				sc->flags |= AAC_QUEUE_FRZN;
1453 				aac_requeue_ready(cm);
1454 			}
1455 			DELAY(5);			/* wait 5 usec. */
1456 		}
1457 	}
1458 }
1459 
1460 
1461 static int
1462 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1463 				   int pages, int nseg, int nseg_new)
1464 {
1465 	struct aac_sge_ieee1212 *sge;
1466 	int i, j, pos;
1467 	u_int32_t addr_low;
1468 
1469 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1470 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1471 	if (sge == NULL)
1472 		return nseg;
1473 
1474 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1475 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1476 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1477 			sge[pos].addrLow = addr_low;
1478 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1479 			if (addr_low < raw->sge[i].addrLow)
1480 				sge[pos].addrHigh++;
1481 			sge[pos].length = pages * PAGE_SIZE;
1482 			sge[pos].flags = 0;
1483 			pos++;
1484 		}
1485 	}
1486 	sge[pos] = raw->sge[nseg-1];
1487 	for (i = 1; i < nseg_new; ++i)
1488 		raw->sge[i] = sge[i];
1489 
1490 	free(sge, M_AACRAIDBUF);
1491 	raw->sgeCnt = nseg_new;
1492 	raw->flags |= RIO2_SGL_CONFORMANT;
1493 	raw->sgeNominalSize = pages * PAGE_SIZE;
1494 	return nseg_new;
1495 }
1496 
1497 
1498 /*
1499  * Unmap a command from controller-visible space.
1500  */
1501 static void
1502 aac_unmap_command(struct aac_command *cm)
1503 {
1504 	struct aac_softc *sc;
1505 
1506 	sc = cm->cm_sc;
1507 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1508 
1509 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1510 		return;
1511 
1512 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1513 		if (cm->cm_flags & AAC_CMD_DATAIN)
1514 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1515 					BUS_DMASYNC_POSTREAD);
1516 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1517 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1518 					BUS_DMASYNC_POSTWRITE);
1519 
1520 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1521 	}
1522 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1523 }
1524 
1525 /*
1526  * Hardware Interface
1527  */
1528 
1529 /*
1530  * Initialize the adapter.
1531  */
1532 static void
1533 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1534 {
1535 	struct aac_softc *sc;
1536 
1537 	sc = (struct aac_softc *)arg;
1538 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1539 
1540 	sc->aac_common_busaddr = segs[0].ds_addr;
1541 }
1542 
1543 static int
1544 aac_check_firmware(struct aac_softc *sc)
1545 {
1546 	u_int32_t code, major, minor, maxsize;
1547 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1548 	time_t then;
1549 
1550 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1551 
1552 	/* check if flash update is running */
1553 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1554 		then = time_uptime;
1555 		do {
1556 			code = AAC_GET_FWSTATUS(sc);
1557 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1558 				device_printf(sc->aac_dev,
1559 						  "FATAL: controller not coming ready, "
1560 						   "status %x\n", code);
1561 				return(ENXIO);
1562 			}
1563 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1564 		/*
1565 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1566 		 * do not read scratch pad register at this time
1567 		 */
1568 		waitCount = 10 * 10000;
1569 		while (waitCount) {
1570 			DELAY(100);		/* delay 100 microseconds */
1571 			waitCount--;
1572 		}
1573 	}
1574 
1575 	/*
1576 	 * Wait for the adapter to come ready.
1577 	 */
1578 	then = time_uptime;
1579 	do {
1580 		code = AAC_GET_FWSTATUS(sc);
1581 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1582 			device_printf(sc->aac_dev,
1583 				      "FATAL: controller not coming ready, "
1584 					   "status %x\n", code);
1585 			return(ENXIO);
1586 		}
1587 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1588 
1589 	/*
1590 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1591 	 * firmware version 1.x are not compatible with this driver.
1592 	 */
1593 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1594 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1595 				     NULL, NULL)) {
1596 			device_printf(sc->aac_dev,
1597 				      "Error reading firmware version\n");
1598 			return (EIO);
1599 		}
1600 
1601 		/* These numbers are stored as ASCII! */
1602 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1603 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1604 		if (major == 1) {
1605 			device_printf(sc->aac_dev,
1606 			    "Firmware version %d.%d is not supported.\n",
1607 			    major, minor);
1608 			return (EINVAL);
1609 		}
1610 	}
1611 	/*
1612 	 * Retrieve the capabilities/supported options word so we know what
1613 	 * work-arounds to enable.  Some firmware revs don't support this
1614 	 * command.
1615 	 */
1616 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1617 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1618 			device_printf(sc->aac_dev,
1619 			     "RequestAdapterInfo failed\n");
1620 			return (EIO);
1621 		}
1622 	} else {
1623 		options = AAC_GET_MAILBOX(sc, 1);
1624 		atu_size = AAC_GET_MAILBOX(sc, 2);
1625 		sc->supported_options = options;
1626 		sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1627 
1628 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1629 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1630 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1631 		if (options & AAC_SUPPORTED_NONDASD)
1632 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1633 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1634 			&& (sizeof(bus_addr_t) > 4)
1635 			&& (sc->hint_flags & 0x1)) {
1636 			device_printf(sc->aac_dev,
1637 			    "Enabling 64-bit address support\n");
1638 			sc->flags |= AAC_FLAGS_SG_64BIT;
1639 		}
1640 		if (sc->aac_if.aif_send_command) {
1641 			if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1642 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1643 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1644 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1645 			else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1646 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1647 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1648 		}
1649 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1650 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1651 	}
1652 
1653 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1654 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1655 		return (ENXIO);
1656 	}
1657 
1658 	if (sc->hint_flags & 2) {
1659 		device_printf(sc->aac_dev,
1660 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1661 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1662 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1663 		device_printf(sc->aac_dev,
1664 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1665 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1666 	}
1667 
1668 	/* Check for broken hardware that does a lower number of commands */
1669 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1670 
1671 	/* Remap mem. resource, if required */
1672 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1673 		bus_release_resource(
1674 			sc->aac_dev, SYS_RES_MEMORY,
1675 			sc->aac_regs_rid0, sc->aac_regs_res0);
1676 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1677 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1678 			atu_size, RF_ACTIVE);
1679 		if (sc->aac_regs_res0 == NULL) {
1680 			sc->aac_regs_res0 = bus_alloc_resource_any(
1681 				sc->aac_dev, SYS_RES_MEMORY,
1682 				&sc->aac_regs_rid0, RF_ACTIVE);
1683 			if (sc->aac_regs_res0 == NULL) {
1684 				device_printf(sc->aac_dev,
1685 					"couldn't allocate register window\n");
1686 				return (ENXIO);
1687 			}
1688 		}
1689 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1690 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1691 	}
1692 
1693 	/* Read preferred settings */
1694 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1695 	sc->aac_max_sectors = 128;				/* 64KB */
1696 	sc->aac_max_aif = 1;
1697 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1698 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1699 		 - sizeof(struct aac_blockwrite64))
1700 		 / sizeof(struct aac_sg_entry64);
1701 	else
1702 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1703 		 - sizeof(struct aac_blockwrite))
1704 		 / sizeof(struct aac_sg_entry);
1705 
1706 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1707 		options = AAC_GET_MAILBOX(sc, 1);
1708 		sc->aac_max_fib_size = (options & 0xFFFF);
1709 		sc->aac_max_sectors = (options >> 16) << 1;
1710 		options = AAC_GET_MAILBOX(sc, 2);
1711 		sc->aac_sg_tablesize = (options >> 16);
1712 		options = AAC_GET_MAILBOX(sc, 3);
1713 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1714 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1715 			sc->aac_max_fibs = (options & 0xFFFF);
1716 		options = AAC_GET_MAILBOX(sc, 4);
1717 		sc->aac_max_aif = (options & 0xFFFF);
1718 		options = AAC_GET_MAILBOX(sc, 5);
1719 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1720 	}
1721 
1722 	maxsize = sc->aac_max_fib_size + 31;
1723 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1724 		maxsize += sizeof(struct aac_fib_xporthdr);
1725 	if (maxsize > PAGE_SIZE) {
1726     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1727 		maxsize = PAGE_SIZE;
1728 	}
1729 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1730 
1731 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1732 		sc->flags |= AAC_FLAGS_RAW_IO;
1733 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1734 	}
1735 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1736 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1737 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1738 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1739 	}
1740 
1741 #ifdef AACRAID_DEBUG
1742 	aacraid_get_fw_debug_buffer(sc);
1743 #endif
1744 	return (0);
1745 }
1746 
1747 static int
1748 aac_init(struct aac_softc *sc)
1749 {
1750 	struct aac_adapter_init	*ip;
1751 	int i, error;
1752 
1753 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1754 
1755 	/* reset rrq index */
1756 	sc->aac_fibs_pushed_no = 0;
1757 	for (i = 0; i < sc->aac_max_msix; i++)
1758 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1759 
1760 	/*
1761 	 * Fill in the init structure.  This tells the adapter about the
1762 	 * physical location of various important shared data structures.
1763 	 */
1764 	ip = &sc->aac_common->ac_init;
1765 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1766 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1767 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1768 		sc->flags |= AAC_FLAGS_RAW_IO;
1769 	}
1770 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1771 
1772 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1773 					 offsetof(struct aac_common, ac_fibs);
1774 	ip->AdapterFibsVirtualAddress = 0;
1775 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1776 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1777 
1778 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1779 				  offsetof(struct aac_common, ac_printf);
1780 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1781 
1782 	/*
1783 	 * The adapter assumes that pages are 4K in size, except on some
1784  	 * broken firmware versions that do the page->byte conversion twice,
1785 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1786 	 * Round up since the granularity is so high.
1787 	 */
1788 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1789 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1790 		ip->HostPhysMemPages =
1791 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1792 	}
1793 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1794 
1795 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1796 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1797 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1798 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1799 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1800 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1801 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1802 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1803 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1804 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1805 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1806 	}
1807 	ip->MaxNumAif = sc->aac_max_aif;
1808 	ip->HostRRQ_AddrLow =
1809 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1810 	/* always 32-bit address */
1811 	ip->HostRRQ_AddrHigh = 0;
1812 
1813 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1814 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1815 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1816 		device_printf(sc->aac_dev, "Power Management enabled\n");
1817 	}
1818 
1819 	ip->MaxIoCommands = sc->aac_max_fibs;
1820 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1821 	ip->MaxFibSize = sc->aac_max_fib_size;
1822 
1823 	/*
1824 	 * Do controller-type-specific initialisation
1825 	 */
1826 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1827 
1828 	/*
1829 	 * Give the init structure to the controller.
1830 	 */
1831 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1832 			     sc->aac_common_busaddr +
1833 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1834 			     NULL, NULL)) {
1835 		device_printf(sc->aac_dev,
1836 			      "error establishing init structure\n");
1837 		error = EIO;
1838 		goto out;
1839 	}
1840 
1841 	/*
1842 	 * Check configuration issues
1843 	 */
1844 	if ((error = aac_check_config(sc)) != 0)
1845 		goto out;
1846 
1847 	error = 0;
1848 out:
1849 	return(error);
1850 }
1851 
1852 static void
1853 aac_define_int_mode(struct aac_softc *sc)
1854 {
1855 	device_t dev;
1856 	int cap, msi_count, error = 0;
1857 	uint32_t val;
1858 
1859 	dev = sc->aac_dev;
1860 
1861 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1862 		device_printf(dev, "using line interrupts\n");
1863 		sc->aac_max_msix = 1;
1864 		sc->aac_vector_cap = sc->aac_max_fibs;
1865 		return;
1866 	}
1867 
1868 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1869 	if (sc->aac_max_msix == 0) {
1870 		if (sc->aac_hwif == AAC_HWIF_SRC) {
1871 			msi_count = 1;
1872 			if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1873 				device_printf(dev, "alloc msi failed - err=%d; "
1874 				    "will use INTx\n", error);
1875 				pci_release_msi(dev);
1876 			} else {
1877 				sc->msi_tupelo = TRUE;
1878 			}
1879 		}
1880 		if (sc->msi_tupelo)
1881 			device_printf(dev, "using MSI interrupts\n");
1882 		else
1883 			device_printf(dev, "using line interrupts\n");
1884 
1885 		sc->aac_max_msix = 1;
1886 		sc->aac_vector_cap = sc->aac_max_fibs;
1887 		return;
1888 	}
1889 
1890 	/* OS capability */
1891 	msi_count = pci_msix_count(dev);
1892 	if (msi_count > AAC_MAX_MSIX)
1893 		msi_count = AAC_MAX_MSIX;
1894 	if (msi_count > sc->aac_max_msix)
1895 		msi_count = sc->aac_max_msix;
1896 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1897 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1898 				   "will try MSI\n", msi_count, error);
1899 		pci_release_msi(dev);
1900 	} else {
1901 		sc->msi_enabled = TRUE;
1902 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1903 			msi_count);
1904 	}
1905 
1906 	if (!sc->msi_enabled) {
1907 		msi_count = 1;
1908 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1909 			device_printf(dev, "alloc msi failed - err=%d; "
1910 				           "will use INTx\n", error);
1911 			pci_release_msi(dev);
1912 		} else {
1913 			sc->msi_enabled = TRUE;
1914 			device_printf(dev, "using MSI interrupts\n");
1915 		}
1916 	}
1917 
1918 	if (sc->msi_enabled) {
1919 		/* now read controller capability from PCI config. space */
1920 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1921 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1922 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1923 			pci_release_msi(dev);
1924 			sc->msi_enabled = FALSE;
1925 		}
1926 	}
1927 
1928 	if (!sc->msi_enabled) {
1929 		device_printf(dev, "using legacy interrupts\n");
1930 		sc->aac_max_msix = 1;
1931 	} else {
1932 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1933 		if (sc->aac_max_msix > msi_count)
1934 			sc->aac_max_msix = msi_count;
1935 	}
1936 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1937 
1938 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1939 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1940 }
1941 
1942 static int
1943 aac_find_pci_capability(struct aac_softc *sc, int cap)
1944 {
1945 	device_t dev;
1946 	uint32_t status;
1947 	uint8_t ptr;
1948 
1949 	dev = sc->aac_dev;
1950 
1951 	status = pci_read_config(dev, PCIR_STATUS, 2);
1952 	if (!(status & PCIM_STATUS_CAPPRESENT))
1953 		return (0);
1954 
1955 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1956 	switch (status & PCIM_HDRTYPE) {
1957 	case 0:
1958 	case 1:
1959 		ptr = PCIR_CAP_PTR;
1960 		break;
1961 	case 2:
1962 		ptr = PCIR_CAP_PTR_2;
1963 		break;
1964 	default:
1965 		return (0);
1966 		break;
1967 	}
1968 	ptr = pci_read_config(dev, ptr, 1);
1969 
1970 	while (ptr != 0) {
1971 		int next, val;
1972 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1973 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1974 		if (val == cap)
1975 			return (ptr);
1976 		ptr = next;
1977 	}
1978 
1979 	return (0);
1980 }
1981 
1982 static int
1983 aac_setup_intr(struct aac_softc *sc)
1984 {
1985 	int i, msi_count, rid;
1986 	struct resource *res;
1987 	void *tag;
1988 
1989 	msi_count = sc->aac_max_msix;
1990 	rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1991 
1992 	for (i = 0; i < msi_count; i++, rid++) {
1993 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1994 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1995 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1996 			return (EINVAL);
1997 		}
1998 		sc->aac_irq_rid[i] = rid;
1999 		sc->aac_irq[i] = res;
2000 		if (aac_bus_setup_intr(sc->aac_dev, res,
2001 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
2002 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
2003 			device_printf(sc->aac_dev, "can't set up interrupt\n");
2004 			return (EINVAL);
2005 		}
2006 		sc->aac_msix[i].vector_no = i;
2007 		sc->aac_msix[i].sc = sc;
2008 		sc->aac_intr[i] = tag;
2009 	}
2010 
2011 	return (0);
2012 }
2013 
2014 static int
2015 aac_check_config(struct aac_softc *sc)
2016 {
2017 	struct aac_fib *fib;
2018 	struct aac_cnt_config *ccfg;
2019 	struct aac_cf_status_hdr *cf_shdr;
2020 	int rval;
2021 
2022 	mtx_lock(&sc->aac_io_lock);
2023 	aac_alloc_sync_fib(sc, &fib);
2024 
2025 	ccfg = (struct aac_cnt_config *)&fib->data[0];
2026 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2027 	ccfg->Command = VM_ContainerConfig;
2028 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2029 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2030 
2031 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2032 		sizeof (struct aac_cnt_config));
2033 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2034 	if (rval == 0 && ccfg->Command == ST_OK &&
2035 		ccfg->CTCommand.param[0] == CT_OK) {
2036 		if (cf_shdr->action <= CFACT_PAUSE) {
2037 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2038 			ccfg->Command = VM_ContainerConfig;
2039 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2040 
2041 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2042 				sizeof (struct aac_cnt_config));
2043 			if (rval == 0 && ccfg->Command == ST_OK &&
2044 				ccfg->CTCommand.param[0] == CT_OK) {
2045 				/* successful completion */
2046 				rval = 0;
2047 			} else {
2048 				/* auto commit aborted due to error(s) */
2049 				rval = -2;
2050 			}
2051 		} else {
2052 			/* auto commit aborted due to adapter indicating
2053 			   config. issues too dangerous to auto commit  */
2054 			rval = -3;
2055 		}
2056 	} else {
2057 		/* error */
2058 		rval = -1;
2059 	}
2060 
2061 	aac_release_sync_fib(sc);
2062 	mtx_unlock(&sc->aac_io_lock);
2063 	return(rval);
2064 }
2065 
2066 /*
2067  * Send a synchronous command to the controller and wait for a result.
2068  * Indicate if the controller completed the command with an error status.
2069  */
2070 int
2071 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2072 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2073 		 u_int32_t *sp, u_int32_t *r1)
2074 {
2075 	time_t then;
2076 	u_int32_t status;
2077 
2078 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2079 
2080 	/* populate the mailbox */
2081 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2082 
2083 	/* ensure the sync command doorbell flag is cleared */
2084 	if (!sc->msi_enabled)
2085 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2086 
2087 	/* then set it to signal the adapter */
2088 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2089 
2090 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2091 		/* spin waiting for the command to complete */
2092 		then = time_uptime;
2093 		do {
2094 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2095 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2096 				return(EIO);
2097 			}
2098 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2099 
2100 		/* clear the completion flag */
2101 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2102 
2103 		/* get the command status */
2104 		status = AAC_GET_MAILBOX(sc, 0);
2105 		if (sp != NULL)
2106 			*sp = status;
2107 
2108 		/* return parameter */
2109 		if (r1 != NULL)
2110 			*r1 = AAC_GET_MAILBOX(sc, 1);
2111 
2112 		if (status != AAC_SRB_STS_SUCCESS)
2113 			return (-1);
2114 	}
2115 	return(0);
2116 }
2117 
2118 static int
2119 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2120 		 struct aac_fib *fib, u_int16_t datasize)
2121 {
2122 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2123 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2124 
2125 	if (datasize > AAC_FIB_DATASIZE)
2126 		return(EINVAL);
2127 
2128 	/*
2129 	 * Set up the sync FIB
2130 	 */
2131 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2132 				AAC_FIBSTATE_INITIALISED |
2133 				AAC_FIBSTATE_EMPTY;
2134 	fib->Header.XferState |= xferstate;
2135 	fib->Header.Command = command;
2136 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2137 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2138 	fib->Header.SenderSize = sizeof(struct aac_fib);
2139 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2140 	fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2141 		offsetof(struct aac_common, ac_sync_fib);
2142 
2143 	/*
2144 	 * Give the FIB to the controller, wait for a response.
2145 	 */
2146 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2147 		fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2148 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2149 		return(EIO);
2150 	}
2151 
2152 	return (0);
2153 }
2154 
2155 /*
2156  * Check for commands that have been outstanding for a suspiciously long time,
2157  * and complain about them.
2158  */
2159 static void
2160 aac_timeout(struct aac_softc *sc)
2161 {
2162 	struct aac_command *cm;
2163 	time_t deadline;
2164 	int timedout;
2165 
2166 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2167 	/*
2168 	 * Traverse the busy command list, bitch about late commands once
2169 	 * only.
2170 	 */
2171 	timedout = 0;
2172 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2173 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2174 		if (cm->cm_timestamp < deadline) {
2175 			device_printf(sc->aac_dev,
2176 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2177 				      cm, (int)(time_uptime-cm->cm_timestamp));
2178 			AAC_PRINT_FIB(sc, cm->cm_fib);
2179 			timedout++;
2180 		}
2181 	}
2182 
2183 	if (timedout)
2184 		aac_reset_adapter(sc);
2185 	aacraid_print_queues(sc);
2186 }
2187 
2188 /*
2189  * Interface Function Vectors
2190  */
2191 
2192 /*
2193  * Read the current firmware status word.
2194  */
2195 static int
2196 aac_src_get_fwstatus(struct aac_softc *sc)
2197 {
2198 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2199 
2200 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2201 }
2202 
2203 /*
2204  * Notify the controller of a change in a given queue
2205  */
2206 static void
2207 aac_src_qnotify(struct aac_softc *sc, int qbit)
2208 {
2209 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2210 
2211 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2212 }
2213 
2214 /*
2215  * Get the interrupt reason bits
2216  */
2217 static int
2218 aac_src_get_istatus(struct aac_softc *sc)
2219 {
2220 	int val;
2221 
2222 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2223 
2224 	if (sc->msi_enabled) {
2225 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2226 		if (val & AAC_MSI_SYNC_STATUS)
2227 			val = AAC_DB_SYNC_COMMAND;
2228 		else
2229 			val = 0;
2230 	} else {
2231 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2232 	}
2233 	return(val);
2234 }
2235 
2236 /*
2237  * Clear some interrupt reason bits
2238  */
2239 static void
2240 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2241 {
2242 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2243 
2244 	if (sc->msi_enabled) {
2245 		if (mask == AAC_DB_SYNC_COMMAND)
2246 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2247 	} else {
2248 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2249 	}
2250 }
2251 
2252 /*
2253  * Populate the mailbox and set the command word
2254  */
2255 static void
2256 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2257 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2258 {
2259 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2260 
2261 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2262 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2263 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2264 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2265 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2266 }
2267 
2268 static void
2269 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2270 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2271 {
2272 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2273 
2274 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2275 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2276 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2277 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2278 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2279 }
2280 
2281 /*
2282  * Fetch the immediate command status word
2283  */
2284 static int
2285 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2286 {
2287 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2288 
2289 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2290 }
2291 
2292 static int
2293 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2294 {
2295 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2296 
2297 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2298 }
2299 
2300 /*
2301  * Set/clear interrupt masks
2302  */
2303 static void
2304 aac_src_access_devreg(struct aac_softc *sc, int mode)
2305 {
2306 	u_int32_t val;
2307 
2308 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2309 
2310 	switch (mode) {
2311 	case AAC_ENABLE_INTERRUPT:
2312 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2313 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2314 				           AAC_INT_ENABLE_TYPE1_INTX));
2315 		break;
2316 
2317 	case AAC_DISABLE_INTERRUPT:
2318 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2319 		break;
2320 
2321 	case AAC_ENABLE_MSIX:
2322 		/* set bit 6 */
2323 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2324 		val |= 0x40;
2325 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2326 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2327 		/* unmask int. */
2328 		val = PMC_ALL_INTERRUPT_BITS;
2329 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2330 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2331 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2332 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2333 		break;
2334 
2335 	case AAC_DISABLE_MSIX:
2336 		/* reset bit 6 */
2337 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2338 		val &= ~0x40;
2339 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2340 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2341 		break;
2342 
2343 	case AAC_CLEAR_AIF_BIT:
2344 		/* set bit 5 */
2345 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2346 		val |= 0x20;
2347 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2348 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2349 		break;
2350 
2351 	case AAC_CLEAR_SYNC_BIT:
2352 		/* set bit 4 */
2353 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2354 		val |= 0x10;
2355 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2356 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2357 		break;
2358 
2359 	case AAC_ENABLE_INTX:
2360 		/* set bit 7 */
2361 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2362 		val |= 0x80;
2363 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2364 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2365 		/* unmask int. */
2366 		val = PMC_ALL_INTERRUPT_BITS;
2367 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2368 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2369 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2370 			val & (~(PMC_GLOBAL_INT_BIT2)));
2371 		break;
2372 
2373 	default:
2374 		break;
2375 	}
2376 }
2377 
2378 /*
2379  * New comm. interface: Send command functions
2380  */
2381 static int
2382 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2383 {
2384 	struct aac_fib_xporthdr *pFibX;
2385 	u_int32_t fibsize, high_addr;
2386 	u_int64_t address;
2387 
2388 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2389 
2390 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2391 		sc->aac_max_msix > 1) {
2392 		u_int16_t vector_no, first_choice = 0xffff;
2393 
2394 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2395 		do {
2396 			vector_no += 1;
2397 			if (vector_no == sc->aac_max_msix)
2398 				vector_no = 1;
2399 			if (sc->aac_rrq_outstanding[vector_no] <
2400 				sc->aac_vector_cap)
2401 				break;
2402 			if (0xffff == first_choice)
2403 				first_choice = vector_no;
2404 			else if (vector_no == first_choice)
2405 				break;
2406 		} while (1);
2407 		if (vector_no == first_choice)
2408 			vector_no = 0;
2409 		sc->aac_rrq_outstanding[vector_no]++;
2410 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2411 			sc->aac_fibs_pushed_no = 0;
2412 		else
2413 			sc->aac_fibs_pushed_no++;
2414 
2415 		cm->cm_fib->Header.Handle += (vector_no << 16);
2416 	}
2417 
2418 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2419 		/* Calculate the amount to the fibsize bits */
2420 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2421 		/* Fill new FIB header */
2422 		address = cm->cm_fibphys;
2423 		high_addr = (u_int32_t)(address >> 32);
2424 		if (high_addr == 0L) {
2425 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2426 			cm->cm_fib->Header.u.TimeStamp = 0L;
2427 		} else {
2428 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2429 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2430 		}
2431 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2432 	} else {
2433 		/* Calculate the amount to the fibsize bits */
2434 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2435 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2436 		/* Fill XPORT header */
2437 		pFibX = (struct aac_fib_xporthdr *)
2438 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2439 		pFibX->Handle = cm->cm_fib->Header.Handle;
2440 		pFibX->HostAddress = cm->cm_fibphys;
2441 		pFibX->Size = cm->cm_fib->Header.Size;
2442 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2443 		high_addr = (u_int32_t)(address >> 32);
2444 	}
2445 
2446 	if (fibsize > 31)
2447 		fibsize = 31;
2448 	aac_enqueue_busy(cm);
2449 	if (high_addr) {
2450 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2451 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2452 	} else {
2453 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2454 	}
2455 	return 0;
2456 }
2457 
2458 /*
2459  * New comm. interface: get, set outbound queue index
2460  */
2461 static int
2462 aac_src_get_outb_queue(struct aac_softc *sc)
2463 {
2464 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2465 
2466 	return(-1);
2467 }
2468 
2469 static void
2470 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2471 {
2472 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2473 }
2474 
2475 /*
2476  * Debugging and Diagnostics
2477  */
2478 
2479 /*
2480  * Print some information about the controller.
2481  */
2482 static void
2483 aac_describe_controller(struct aac_softc *sc)
2484 {
2485 	struct aac_fib *fib;
2486 	struct aac_adapter_info	*info;
2487 	char *adapter_type = "Adaptec RAID controller";
2488 
2489 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2490 
2491 	mtx_lock(&sc->aac_io_lock);
2492 	aac_alloc_sync_fib(sc, &fib);
2493 
2494 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2495 		fib->data[0] = 0;
2496 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2497 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2498 		else {
2499 			struct aac_supplement_adapter_info *supp_info;
2500 
2501 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2502 			adapter_type = (char *)supp_info->AdapterTypeText;
2503 			sc->aac_feature_bits = supp_info->FeatureBits;
2504 			sc->aac_support_opt2 = supp_info->SupportedOptions2;
2505 		}
2506 	}
2507 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2508 		adapter_type,
2509 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2510 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2511 
2512 	fib->data[0] = 0;
2513 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2514 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2515 		aac_release_sync_fib(sc);
2516 		mtx_unlock(&sc->aac_io_lock);
2517 		return;
2518 	}
2519 
2520 	/* save the kernel revision structure for later use */
2521 	info = (struct aac_adapter_info *)&fib->data[0];
2522 	sc->aac_revision = info->KernelRevision;
2523 
2524 	if (bootverbose) {
2525 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2526 		    "(%dMB cache, %dMB execution), %s\n",
2527 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2528 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2529 		    info->BufferMem / (1024 * 1024),
2530 		    info->ExecutionMem / (1024 * 1024),
2531 		    aac_describe_code(aac_battery_platform,
2532 		    info->batteryPlatform));
2533 
2534 		device_printf(sc->aac_dev,
2535 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2536 		    info->KernelRevision.external.comp.major,
2537 		    info->KernelRevision.external.comp.minor,
2538 		    info->KernelRevision.external.comp.dash,
2539 		    info->KernelRevision.buildNumber,
2540 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2541 
2542 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2543 			      sc->supported_options,
2544 			      "\20"
2545 			      "\1SNAPSHOT"
2546 			      "\2CLUSTERS"
2547 			      "\3WCACHE"
2548 			      "\4DATA64"
2549 			      "\5HOSTTIME"
2550 			      "\6RAID50"
2551 			      "\7WINDOW4GB"
2552 			      "\10SCSIUPGD"
2553 			      "\11SOFTERR"
2554 			      "\12NORECOND"
2555 			      "\13SGMAP64"
2556 			      "\14ALARM"
2557 			      "\15NONDASD"
2558 			      "\16SCSIMGT"
2559 			      "\17RAIDSCSI"
2560 			      "\21ADPTINFO"
2561 			      "\22NEWCOMM"
2562 			      "\23ARRAY64BIT"
2563 			      "\24HEATSENSOR");
2564 	}
2565 
2566 	aac_release_sync_fib(sc);
2567 	mtx_unlock(&sc->aac_io_lock);
2568 }
2569 
2570 /*
2571  * Look up a text description of a numeric error code and return a pointer to
2572  * same.
2573  */
2574 static char *
2575 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2576 {
2577 	int i;
2578 
2579 	for (i = 0; table[i].string != NULL; i++)
2580 		if (table[i].code == code)
2581 			return(table[i].string);
2582 	return(table[i + 1].string);
2583 }
2584 
2585 /*
2586  * Management Interface
2587  */
2588 
2589 static int
2590 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2591 {
2592 	struct aac_softc *sc;
2593 
2594 	sc = dev->si_drv1;
2595 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2596 #if __FreeBSD_version >= 702000
2597 	device_busy(sc->aac_dev);
2598 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2599 #endif
2600 	return 0;
2601 }
2602 
2603 static int
2604 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2605 {
2606 	union aac_statrequest *as;
2607 	struct aac_softc *sc;
2608 	int error = 0;
2609 
2610 	as = (union aac_statrequest *)arg;
2611 	sc = dev->si_drv1;
2612 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2613 
2614 	switch (cmd) {
2615 	case AACIO_STATS:
2616 		switch (as->as_item) {
2617 		case AACQ_FREE:
2618 		case AACQ_READY:
2619 		case AACQ_BUSY:
2620 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2621 			      sizeof(struct aac_qstat));
2622 			break;
2623 		default:
2624 			error = ENOENT;
2625 			break;
2626 		}
2627 	break;
2628 
2629 	case FSACTL_SENDFIB:
2630 	case FSACTL_SEND_LARGE_FIB:
2631 		arg = *(caddr_t*)arg;
2632 	case FSACTL_LNX_SENDFIB:
2633 	case FSACTL_LNX_SEND_LARGE_FIB:
2634 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2635 		error = aac_ioctl_sendfib(sc, arg);
2636 		break;
2637 	case FSACTL_SEND_RAW_SRB:
2638 		arg = *(caddr_t*)arg;
2639 	case FSACTL_LNX_SEND_RAW_SRB:
2640 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2641 		error = aac_ioctl_send_raw_srb(sc, arg);
2642 		break;
2643 	case FSACTL_AIF_THREAD:
2644 	case FSACTL_LNX_AIF_THREAD:
2645 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2646 		error = EINVAL;
2647 		break;
2648 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2649 		arg = *(caddr_t*)arg;
2650 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2651 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2652 		error = aac_open_aif(sc, arg);
2653 		break;
2654 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2655 		arg = *(caddr_t*)arg;
2656 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2657 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2658 		error = aac_getnext_aif(sc, arg);
2659 		break;
2660 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2661 		arg = *(caddr_t*)arg;
2662 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2663 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2664 		error = aac_close_aif(sc, arg);
2665 		break;
2666 	case FSACTL_MINIPORT_REV_CHECK:
2667 		arg = *(caddr_t*)arg;
2668 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2669 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2670 		error = aac_rev_check(sc, arg);
2671 		break;
2672 	case FSACTL_QUERY_DISK:
2673 		arg = *(caddr_t*)arg;
2674 	case FSACTL_LNX_QUERY_DISK:
2675 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2676 		error = aac_query_disk(sc, arg);
2677 		break;
2678 	case FSACTL_DELETE_DISK:
2679 	case FSACTL_LNX_DELETE_DISK:
2680 		/*
2681 		 * We don't trust the underland to tell us when to delete a
2682 		 * container, rather we rely on an AIF coming from the
2683 		 * controller
2684 		 */
2685 		error = 0;
2686 		break;
2687 	case FSACTL_GET_PCI_INFO:
2688 		arg = *(caddr_t*)arg;
2689 	case FSACTL_LNX_GET_PCI_INFO:
2690 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2691 		error = aac_get_pci_info(sc, arg);
2692 		break;
2693 	case FSACTL_GET_FEATURES:
2694 		arg = *(caddr_t*)arg;
2695 	case FSACTL_LNX_GET_FEATURES:
2696 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2697 		error = aac_supported_features(sc, arg);
2698 		break;
2699 	default:
2700 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2701 		error = EINVAL;
2702 		break;
2703 	}
2704 	return(error);
2705 }
2706 
2707 static int
2708 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2709 {
2710 	struct aac_softc *sc;
2711 	struct aac_fib_context *ctx;
2712 	int revents;
2713 
2714 	sc = dev->si_drv1;
2715 	revents = 0;
2716 
2717 	mtx_lock(&sc->aac_io_lock);
2718 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2719 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2720 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2721 				revents |= poll_events & (POLLIN | POLLRDNORM);
2722 				break;
2723 			}
2724 		}
2725 	}
2726 	mtx_unlock(&sc->aac_io_lock);
2727 
2728 	if (revents == 0) {
2729 		if (poll_events & (POLLIN | POLLRDNORM))
2730 			selrecord(td, &sc->rcv_select);
2731 	}
2732 
2733 	return (revents);
2734 }
2735 
2736 static void
2737 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2738 {
2739 
2740 	switch (event->ev_type) {
2741 	case AAC_EVENT_CMFREE:
2742 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2743 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2744 			aacraid_add_event(sc, event);
2745 			return;
2746 		}
2747 		free(event, M_AACRAIDBUF);
2748 		wakeup(arg);
2749 		break;
2750 	default:
2751 		break;
2752 	}
2753 }
2754 
2755 /*
2756  * Send a FIB supplied from userspace
2757  */
2758 static int
2759 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2760 {
2761 	struct aac_command *cm;
2762 	int size, error;
2763 
2764 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2765 
2766 	cm = NULL;
2767 
2768 	/*
2769 	 * Get a command
2770 	 */
2771 	mtx_lock(&sc->aac_io_lock);
2772 	if (aacraid_alloc_command(sc, &cm)) {
2773 		struct aac_event *event;
2774 
2775 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2776 		    M_NOWAIT | M_ZERO);
2777 		if (event == NULL) {
2778 			error = EBUSY;
2779 			mtx_unlock(&sc->aac_io_lock);
2780 			goto out;
2781 		}
2782 		event->ev_type = AAC_EVENT_CMFREE;
2783 		event->ev_callback = aac_ioctl_event;
2784 		event->ev_arg = &cm;
2785 		aacraid_add_event(sc, event);
2786 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2787 	}
2788 	mtx_unlock(&sc->aac_io_lock);
2789 
2790 	/*
2791 	 * Fetch the FIB header, then re-copy to get data as well.
2792 	 */
2793 	if ((error = copyin(ufib, cm->cm_fib,
2794 			    sizeof(struct aac_fib_header))) != 0)
2795 		goto out;
2796 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2797 	if (size > sc->aac_max_fib_size) {
2798 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2799 			      size, sc->aac_max_fib_size);
2800 		size = sc->aac_max_fib_size;
2801 	}
2802 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2803 		goto out;
2804 	cm->cm_fib->Header.Size = size;
2805 	cm->cm_timestamp = time_uptime;
2806 	cm->cm_datalen = 0;
2807 
2808 	/*
2809 	 * Pass the FIB to the controller, wait for it to complete.
2810 	 */
2811 	mtx_lock(&sc->aac_io_lock);
2812 	error = aacraid_wait_command(cm);
2813 	mtx_unlock(&sc->aac_io_lock);
2814 	if (error != 0) {
2815 		device_printf(sc->aac_dev,
2816 			      "aacraid_wait_command return %d\n", error);
2817 		goto out;
2818 	}
2819 
2820 	/*
2821 	 * Copy the FIB and data back out to the caller.
2822 	 */
2823 	size = cm->cm_fib->Header.Size;
2824 	if (size > sc->aac_max_fib_size) {
2825 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2826 			      size, sc->aac_max_fib_size);
2827 		size = sc->aac_max_fib_size;
2828 	}
2829 	error = copyout(cm->cm_fib, ufib, size);
2830 
2831 out:
2832 	if (cm != NULL) {
2833 		mtx_lock(&sc->aac_io_lock);
2834 		aacraid_release_command(cm);
2835 		mtx_unlock(&sc->aac_io_lock);
2836 	}
2837 	return(error);
2838 }
2839 
2840 /*
2841  * Send a passthrough FIB supplied from userspace
2842  */
2843 static int
2844 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2845 {
2846 	struct aac_command *cm;
2847 	struct aac_fib *fib;
2848 	struct aac_srb *srbcmd;
2849 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2850 	void *user_reply;
2851 	int error, transfer_data = 0;
2852 	bus_dmamap_t orig_map = 0;
2853 	u_int32_t fibsize = 0;
2854 	u_int64_t srb_sg_address;
2855 	u_int32_t srb_sg_bytecount;
2856 
2857 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2858 
2859 	cm = NULL;
2860 
2861 	mtx_lock(&sc->aac_io_lock);
2862 	if (aacraid_alloc_command(sc, &cm)) {
2863 		struct aac_event *event;
2864 
2865 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2866 		    M_NOWAIT | M_ZERO);
2867 		if (event == NULL) {
2868 			error = EBUSY;
2869 			mtx_unlock(&sc->aac_io_lock);
2870 			goto out;
2871 		}
2872 		event->ev_type = AAC_EVENT_CMFREE;
2873 		event->ev_callback = aac_ioctl_event;
2874 		event->ev_arg = &cm;
2875 		aacraid_add_event(sc, event);
2876 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2877 	}
2878 	mtx_unlock(&sc->aac_io_lock);
2879 
2880 	cm->cm_data = NULL;
2881 	/* save original dma map */
2882 	orig_map = cm->cm_datamap;
2883 
2884 	fib = cm->cm_fib;
2885 	srbcmd = (struct aac_srb *)fib->data;
2886 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2887 	    sizeof (u_int32_t))) != 0)
2888 		goto out;
2889 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2890 		error = EINVAL;
2891 		goto out;
2892 	}
2893 	if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2894 		goto out;
2895 
2896 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2897 	srbcmd->retry_limit = 0;	/* obsolete */
2898 
2899 	/* only one sg element from userspace supported */
2900 	if (srbcmd->sg_map.SgCount > 1) {
2901 		error = EINVAL;
2902 		goto out;
2903 	}
2904 	/* check fibsize */
2905 	if (fibsize == (sizeof(struct aac_srb) +
2906 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2907 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2908 		struct aac_sg_entry sg;
2909 
2910 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2911 			goto out;
2912 
2913 		srb_sg_bytecount = sg.SgByteCount;
2914 		srb_sg_address = (u_int64_t)sg.SgAddress;
2915 	} else if (fibsize == (sizeof(struct aac_srb) +
2916 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2917 #ifdef __LP64__
2918 		struct aac_sg_entry64 *sgp =
2919 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2920 		struct aac_sg_entry64 sg;
2921 
2922 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2923 			goto out;
2924 
2925 		srb_sg_bytecount = sg.SgByteCount;
2926 		srb_sg_address = sg.SgAddress;
2927 #else
2928 		error = EINVAL;
2929 		goto out;
2930 #endif
2931 	} else {
2932 		error = EINVAL;
2933 		goto out;
2934 	}
2935 	user_reply = (char *)arg + fibsize;
2936 	srbcmd->data_len = srb_sg_bytecount;
2937 	if (srbcmd->sg_map.SgCount == 1)
2938 		transfer_data = 1;
2939 
2940 	if (transfer_data) {
2941 		/*
2942 		 * Create DMA tag for the passthr. data buffer and allocate it.
2943 		 */
2944 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2945 			1, 0,			/* algnmnt, boundary */
2946 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2947 			BUS_SPACE_MAXADDR_32BIT :
2948 			0x7fffffff,		/* lowaddr */
2949 			BUS_SPACE_MAXADDR, 	/* highaddr */
2950 			NULL, NULL, 		/* filter, filterarg */
2951 			srb_sg_bytecount, 	/* size */
2952 			sc->aac_sg_tablesize,	/* nsegments */
2953 			srb_sg_bytecount, 	/* maxsegsize */
2954 			0,			/* flags */
2955 			NULL, NULL,		/* No locking needed */
2956 			&cm->cm_passthr_dmat)) {
2957 			error = ENOMEM;
2958 			goto out;
2959 		}
2960 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2961 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2962 			error = ENOMEM;
2963 			goto out;
2964 		}
2965 		/* fill some cm variables */
2966 		cm->cm_datalen = srb_sg_bytecount;
2967 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2968 			cm->cm_flags |= AAC_CMD_DATAIN;
2969 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2970 			cm->cm_flags |= AAC_CMD_DATAOUT;
2971 
2972 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2973 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2974 				cm->cm_data, cm->cm_datalen)) != 0)
2975 				goto out;
2976 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2977 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2978 				BUS_DMASYNC_PREWRITE);
2979 		}
2980 	}
2981 
2982 	/* build the FIB */
2983 	fib->Header.Size = sizeof(struct aac_fib_header) +
2984 		sizeof(struct aac_srb);
2985 	fib->Header.XferState =
2986 		AAC_FIBSTATE_HOSTOWNED   |
2987 		AAC_FIBSTATE_INITIALISED |
2988 		AAC_FIBSTATE_EMPTY	 |
2989 		AAC_FIBSTATE_FROMHOST	 |
2990 		AAC_FIBSTATE_REXPECTED   |
2991 		AAC_FIBSTATE_NORM	 |
2992 		AAC_FIBSTATE_ASYNC;
2993 
2994 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2995 		ScsiPortCommandU64 : ScsiPortCommand;
2996 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2997 
2998 	/* send command */
2999 	if (transfer_data) {
3000 		bus_dmamap_load(cm->cm_passthr_dmat,
3001 			cm->cm_datamap, cm->cm_data,
3002 			cm->cm_datalen,
3003 			aacraid_map_command_sg, cm, 0);
3004 	} else {
3005 		aacraid_map_command_sg(cm, NULL, 0, 0);
3006 	}
3007 
3008 	/* wait for completion */
3009 	mtx_lock(&sc->aac_io_lock);
3010 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
3011 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
3012 	mtx_unlock(&sc->aac_io_lock);
3013 
3014 	/* copy data */
3015 	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
3016 		if ((error = copyout(cm->cm_data,
3017 			(void *)(uintptr_t)srb_sg_address,
3018 			cm->cm_datalen)) != 0)
3019 			goto out;
3020 		/* sync required for bus_dmamem_alloc() allocated mem.? */
3021 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
3022 				BUS_DMASYNC_POSTREAD);
3023 	}
3024 
3025 	/* status */
3026 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3027 
3028 out:
3029 	if (cm && cm->cm_data) {
3030 		if (transfer_data)
3031 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3032 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3033 		cm->cm_datamap = orig_map;
3034 	}
3035 	if (cm && cm->cm_passthr_dmat)
3036 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3037 	if (cm) {
3038 		mtx_lock(&sc->aac_io_lock);
3039 		aacraid_release_command(cm);
3040 		mtx_unlock(&sc->aac_io_lock);
3041 	}
3042 	return(error);
3043 }
3044 
3045 /*
3046  * Request an AIF from the controller (new comm. type1)
3047  */
3048 static void
3049 aac_request_aif(struct aac_softc *sc)
3050 {
3051 	struct aac_command *cm;
3052 	struct aac_fib *fib;
3053 
3054 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3055 
3056 	if (aacraid_alloc_command(sc, &cm)) {
3057 		sc->aif_pending = 1;
3058 		return;
3059 	}
3060 	sc->aif_pending = 0;
3061 
3062 	/* build the FIB */
3063 	fib = cm->cm_fib;
3064 	fib->Header.Size = sizeof(struct aac_fib);
3065 	fib->Header.XferState =
3066         AAC_FIBSTATE_HOSTOWNED   |
3067         AAC_FIBSTATE_INITIALISED |
3068         AAC_FIBSTATE_EMPTY	 |
3069         AAC_FIBSTATE_FROMHOST	 |
3070         AAC_FIBSTATE_REXPECTED   |
3071         AAC_FIBSTATE_NORM	 |
3072         AAC_FIBSTATE_ASYNC;
3073 	/* set AIF marker */
3074 	fib->Header.Handle = 0x00800000;
3075 	fib->Header.Command = AifRequest;
3076 	((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3077 
3078 	aacraid_map_command_sg(cm, NULL, 0, 0);
3079 }
3080 
3081 
3082 #if __FreeBSD_version >= 702000
3083 /*
3084  * cdevpriv interface private destructor.
3085  */
3086 static void
3087 aac_cdevpriv_dtor(void *arg)
3088 {
3089 	struct aac_softc *sc;
3090 
3091 	sc = arg;
3092 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3093 	mtx_lock(&Giant);
3094 	device_unbusy(sc->aac_dev);
3095 	mtx_unlock(&Giant);
3096 }
3097 #else
3098 static int
3099 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3100 {
3101 	struct aac_softc *sc;
3102 
3103 	sc = dev->si_drv1;
3104 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3105 	return 0;
3106 }
3107 #endif
3108 
3109 /*
3110  * Handle an AIF sent to us by the controller; queue it for later reference.
3111  * If the queue fills up, then drop the older entries.
3112  */
3113 static void
3114 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3115 {
3116 	struct aac_aif_command *aif;
3117 	struct aac_container *co, *co_next;
3118 	struct aac_fib_context *ctx;
3119 	struct aac_fib *sync_fib;
3120 	struct aac_mntinforesp mir;
3121 	int next, current, found;
3122 	int count = 0, changed = 0, i = 0;
3123 	u_int32_t channel, uid;
3124 
3125 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3126 
3127 	aif = (struct aac_aif_command*)&fib->data[0];
3128 	aacraid_print_aif(sc, aif);
3129 
3130 	/* Is it an event that we should care about? */
3131 	switch (aif->command) {
3132 	case AifCmdEventNotify:
3133 		switch (aif->data.EN.type) {
3134 		case AifEnAddContainer:
3135 		case AifEnDeleteContainer:
3136 			/*
3137 			 * A container was added or deleted, but the message
3138 			 * doesn't tell us anything else!  Re-enumerate the
3139 			 * containers and sort things out.
3140 			 */
3141 			aac_alloc_sync_fib(sc, &sync_fib);
3142 			do {
3143 				/*
3144 				 * Ask the controller for its containers one at
3145 				 * a time.
3146 				 * XXX What if the controller's list changes
3147 				 * midway through this enumaration?
3148 				 * XXX This should be done async.
3149 				 */
3150 				if (aac_get_container_info(sc, sync_fib, i,
3151 					&mir, &uid) != 0)
3152 					continue;
3153 				if (i == 0)
3154 					count = mir.MntRespCount;
3155 				/*
3156 				 * Check the container against our list.
3157 				 * co->co_found was already set to 0 in a
3158 				 * previous run.
3159 				 */
3160 				if ((mir.Status == ST_OK) &&
3161 				    (mir.MntTable[0].VolType != CT_NONE)) {
3162 					found = 0;
3163 					TAILQ_FOREACH(co,
3164 						      &sc->aac_container_tqh,
3165 						      co_link) {
3166 						if (co->co_mntobj.ObjectId ==
3167 						    mir.MntTable[0].ObjectId) {
3168 							co->co_found = 1;
3169 							found = 1;
3170 							break;
3171 						}
3172 					}
3173 					/*
3174 					 * If the container matched, continue
3175 					 * in the list.
3176 					 */
3177 					if (found) {
3178 						i++;
3179 						continue;
3180 					}
3181 
3182 					/*
3183 					 * This is a new container.  Do all the
3184 					 * appropriate things to set it up.
3185 					 */
3186 					aac_add_container(sc, &mir, 1, uid);
3187 					changed = 1;
3188 				}
3189 				i++;
3190 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3191 			aac_release_sync_fib(sc);
3192 
3193 			/*
3194 			 * Go through our list of containers and see which ones
3195 			 * were not marked 'found'.  Since the controller didn't
3196 			 * list them they must have been deleted.  Do the
3197 			 * appropriate steps to destroy the device.  Also reset
3198 			 * the co->co_found field.
3199 			 */
3200 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3201 			while (co != NULL) {
3202 				if (co->co_found == 0) {
3203 					co_next = TAILQ_NEXT(co, co_link);
3204 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3205 						     co_link);
3206 					free(co, M_AACRAIDBUF);
3207 					changed = 1;
3208 					co = co_next;
3209 				} else {
3210 					co->co_found = 0;
3211 					co = TAILQ_NEXT(co, co_link);
3212 				}
3213 			}
3214 
3215 			/* Attach the newly created containers */
3216 			if (changed) {
3217 				if (sc->cam_rescan_cb != NULL)
3218 					sc->cam_rescan_cb(sc, 0,
3219 				    	AAC_CAM_TARGET_WILDCARD);
3220 			}
3221 
3222 			break;
3223 
3224 		case AifEnEnclosureManagement:
3225 			switch (aif->data.EN.data.EEE.eventType) {
3226 			case AIF_EM_DRIVE_INSERTION:
3227 			case AIF_EM_DRIVE_REMOVAL:
3228 				channel = aif->data.EN.data.EEE.unitID;
3229 				if (sc->cam_rescan_cb != NULL)
3230 					sc->cam_rescan_cb(sc,
3231 					    ((channel>>24) & 0xF) + 1,
3232 					    (channel & 0xFFFF));
3233 				break;
3234 			}
3235 			break;
3236 
3237 		case AifEnAddJBOD:
3238 		case AifEnDeleteJBOD:
3239 		case AifRawDeviceRemove:
3240 			channel = aif->data.EN.data.ECE.container;
3241 			if (sc->cam_rescan_cb != NULL)
3242 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3243 				    AAC_CAM_TARGET_WILDCARD);
3244 			break;
3245 
3246 		default:
3247 			break;
3248 		}
3249 
3250 	default:
3251 		break;
3252 	}
3253 
3254 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3255 	current = sc->aifq_idx;
3256 	next = (current + 1) % AAC_AIFQ_LENGTH;
3257 	if (next == 0)
3258 		sc->aifq_filled = 1;
3259 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3260 	/* modify AIF contexts */
3261 	if (sc->aifq_filled) {
3262 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3263 			if (next == ctx->ctx_idx)
3264 				ctx->ctx_wrap = 1;
3265 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3266 				ctx->ctx_idx = next;
3267 		}
3268 	}
3269 	sc->aifq_idx = next;
3270 	/* On the off chance that someone is sleeping for an aif... */
3271 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3272 		wakeup(sc->aac_aifq);
3273 	/* Wakeup any poll()ers */
3274 	selwakeuppri(&sc->rcv_select, PRIBIO);
3275 
3276 	return;
3277 }
3278 
3279 /*
3280  * Return the Revision of the driver to userspace and check to see if the
3281  * userspace app is possibly compatible.  This is extremely bogus since
3282  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3283  * returning what the card reported.
3284  */
3285 static int
3286 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3287 {
3288 	struct aac_rev_check rev_check;
3289 	struct aac_rev_check_resp rev_check_resp;
3290 	int error = 0;
3291 
3292 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3293 
3294 	/*
3295 	 * Copyin the revision struct from userspace
3296 	 */
3297 	if ((error = copyin(udata, (caddr_t)&rev_check,
3298 			sizeof(struct aac_rev_check))) != 0) {
3299 		return error;
3300 	}
3301 
3302 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3303 	      rev_check.callingRevision.buildNumber);
3304 
3305 	/*
3306 	 * Doctor up the response struct.
3307 	 */
3308 	rev_check_resp.possiblyCompatible = 1;
3309 	rev_check_resp.adapterSWRevision.external.comp.major =
3310 	    AAC_DRIVER_MAJOR_VERSION;
3311 	rev_check_resp.adapterSWRevision.external.comp.minor =
3312 	    AAC_DRIVER_MINOR_VERSION;
3313 	rev_check_resp.adapterSWRevision.external.comp.type =
3314 	    AAC_DRIVER_TYPE;
3315 	rev_check_resp.adapterSWRevision.external.comp.dash =
3316 	    AAC_DRIVER_BUGFIX_LEVEL;
3317 	rev_check_resp.adapterSWRevision.buildNumber =
3318 	    AAC_DRIVER_BUILD;
3319 
3320 	return(copyout((caddr_t)&rev_check_resp, udata,
3321 			sizeof(struct aac_rev_check_resp)));
3322 }
3323 
3324 /*
3325  * Pass the fib context to the caller
3326  */
3327 static int
3328 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3329 {
3330 	struct aac_fib_context *fibctx, *ctx;
3331 	int error = 0;
3332 
3333 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3334 
3335 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3336 	if (fibctx == NULL)
3337 		return (ENOMEM);
3338 
3339 	mtx_lock(&sc->aac_io_lock);
3340 	/* all elements are already 0, add to queue */
3341 	if (sc->fibctx == NULL)
3342 		sc->fibctx = fibctx;
3343 	else {
3344 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3345 			;
3346 		ctx->next = fibctx;
3347 		fibctx->prev = ctx;
3348 	}
3349 
3350 	/* evaluate unique value */
3351 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3352 	ctx = sc->fibctx;
3353 	while (ctx != fibctx) {
3354 		if (ctx->unique == fibctx->unique) {
3355 			fibctx->unique++;
3356 			ctx = sc->fibctx;
3357 		} else {
3358 			ctx = ctx->next;
3359 		}
3360 	}
3361 
3362 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3363 	mtx_unlock(&sc->aac_io_lock);
3364 	if (error)
3365 		aac_close_aif(sc, (caddr_t)ctx);
3366 	return error;
3367 }
3368 
3369 /*
3370  * Close the caller's fib context
3371  */
3372 static int
3373 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3374 {
3375 	struct aac_fib_context *ctx;
3376 
3377 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3378 
3379 	mtx_lock(&sc->aac_io_lock);
3380 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3381 		if (ctx->unique == *(uint32_t *)&arg) {
3382 			if (ctx == sc->fibctx)
3383 				sc->fibctx = NULL;
3384 			else {
3385 				ctx->prev->next = ctx->next;
3386 				if (ctx->next)
3387 					ctx->next->prev = ctx->prev;
3388 			}
3389 			break;
3390 		}
3391 	}
3392 	if (ctx)
3393 		free(ctx, M_AACRAIDBUF);
3394 
3395 	mtx_unlock(&sc->aac_io_lock);
3396 	return 0;
3397 }
3398 
3399 /*
3400  * Pass the caller the next AIF in their queue
3401  */
3402 static int
3403 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3404 {
3405 	struct get_adapter_fib_ioctl agf;
3406 	struct aac_fib_context *ctx;
3407 	int error;
3408 
3409 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3410 
3411 	mtx_lock(&sc->aac_io_lock);
3412 #ifdef COMPAT_FREEBSD32
3413 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3414 		struct get_adapter_fib_ioctl32 agf32;
3415 		error = copyin(arg, &agf32, sizeof(agf32));
3416 		if (error == 0) {
3417 			agf.AdapterFibContext = agf32.AdapterFibContext;
3418 			agf.Wait = agf32.Wait;
3419 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3420 		}
3421 	} else
3422 #endif
3423 		error = copyin(arg, &agf, sizeof(agf));
3424 	if (error == 0) {
3425 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3426 			if (agf.AdapterFibContext == ctx->unique)
3427 				break;
3428 		}
3429 		if (!ctx) {
3430 			mtx_unlock(&sc->aac_io_lock);
3431 			return (EFAULT);
3432 		}
3433 
3434 		error = aac_return_aif(sc, ctx, agf.AifFib);
3435 		if (error == EAGAIN && agf.Wait) {
3436 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3437 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3438 			while (error == EAGAIN) {
3439 				mtx_unlock(&sc->aac_io_lock);
3440 				error = tsleep(sc->aac_aifq, PRIBIO |
3441 					       PCATCH, "aacaif", 0);
3442 				mtx_lock(&sc->aac_io_lock);
3443 				if (error == 0)
3444 					error = aac_return_aif(sc, ctx, agf.AifFib);
3445 			}
3446 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3447 		}
3448 	}
3449 	mtx_unlock(&sc->aac_io_lock);
3450 	return(error);
3451 }
3452 
3453 /*
3454  * Hand the next AIF off the top of the queue out to userspace.
3455  */
3456 static int
3457 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3458 {
3459 	int current, error;
3460 
3461 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3462 
3463 	current = ctx->ctx_idx;
3464 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3465 		/* empty */
3466 		return (EAGAIN);
3467 	}
3468 	error =
3469 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3470 	if (error)
3471 		device_printf(sc->aac_dev,
3472 		    "aac_return_aif: copyout returned %d\n", error);
3473 	else {
3474 		ctx->ctx_wrap = 0;
3475 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3476 	}
3477 	return(error);
3478 }
3479 
3480 static int
3481 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3482 {
3483 	struct aac_pci_info {
3484 		u_int32_t bus;
3485 		u_int32_t slot;
3486 	} pciinf;
3487 	int error;
3488 
3489 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3490 
3491 	pciinf.bus = pci_get_bus(sc->aac_dev);
3492 	pciinf.slot = pci_get_slot(sc->aac_dev);
3493 
3494 	error = copyout((caddr_t)&pciinf, uptr,
3495 			sizeof(struct aac_pci_info));
3496 
3497 	return (error);
3498 }
3499 
3500 static int
3501 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3502 {
3503 	struct aac_features f;
3504 	int error;
3505 
3506 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3507 
3508 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3509 		return (error);
3510 
3511 	/*
3512 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3513 	 * ALL zero in the featuresState, the driver will return the current
3514 	 * state of all the supported features, the data field will not be
3515 	 * valid.
3516 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3517 	 * a specific bit set in the featuresState, the driver will return the
3518 	 * current state of this specific feature and whatever data that are
3519 	 * associated with the feature in the data field or perform whatever
3520 	 * action needed indicates in the data field.
3521 	 */
3522 	 if (f.feat.fValue == 0) {
3523 		f.feat.fBits.largeLBA =
3524 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3525 		f.feat.fBits.JBODSupport = 1;
3526 		/* TODO: In the future, add other features state here as well */
3527 	} else {
3528 		if (f.feat.fBits.largeLBA)
3529 			f.feat.fBits.largeLBA =
3530 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3531 		/* TODO: Add other features state and data in the future */
3532 	}
3533 
3534 	error = copyout(&f, uptr, sizeof (f));
3535 	return (error);
3536 }
3537 
3538 /*
3539  * Give the userland some information about the container.  The AAC arch
3540  * expects the driver to be a SCSI passthrough type driver, so it expects
3541  * the containers to have b:t:l numbers.  Fake it.
3542  */
3543 static int
3544 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3545 {
3546 	struct aac_query_disk query_disk;
3547 	struct aac_container *co;
3548 	int error, id;
3549 
3550 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3551 
3552 	mtx_lock(&sc->aac_io_lock);
3553 	error = copyin(uptr, (caddr_t)&query_disk,
3554 		       sizeof(struct aac_query_disk));
3555 	if (error) {
3556 		mtx_unlock(&sc->aac_io_lock);
3557 		return (error);
3558 	}
3559 
3560 	id = query_disk.ContainerNumber;
3561 	if (id == -1) {
3562 		mtx_unlock(&sc->aac_io_lock);
3563 		return (EINVAL);
3564 	}
3565 
3566 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3567 		if (co->co_mntobj.ObjectId == id)
3568 			break;
3569 		}
3570 
3571 	if (co == NULL) {
3572 			query_disk.Valid = 0;
3573 			query_disk.Locked = 0;
3574 			query_disk.Deleted = 1;		/* XXX is this right? */
3575 	} else {
3576 		query_disk.Valid = 1;
3577 		query_disk.Locked = 1;
3578 		query_disk.Deleted = 0;
3579 		query_disk.Bus = device_get_unit(sc->aac_dev);
3580 		query_disk.Target = 0;
3581 		query_disk.Lun = 0;
3582 		query_disk.UnMapped = 0;
3583 	}
3584 
3585 	error = copyout((caddr_t)&query_disk, uptr,
3586 			sizeof(struct aac_query_disk));
3587 
3588 	mtx_unlock(&sc->aac_io_lock);
3589 	return (error);
3590 }
3591 
3592 static void
3593 aac_container_bus(struct aac_softc *sc)
3594 {
3595 	struct aac_sim *sim;
3596 	device_t child;
3597 
3598 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3599 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3600 	if (sim == NULL) {
3601 		device_printf(sc->aac_dev,
3602 	    	"No memory to add container bus\n");
3603 		panic("Out of memory?!");
3604 	}
3605 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3606 	if (child == NULL) {
3607 		device_printf(sc->aac_dev,
3608 	    	"device_add_child failed for container bus\n");
3609 		free(sim, M_AACRAIDBUF);
3610 		panic("Out of memory?!");
3611 	}
3612 
3613 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3614 	sim->BusNumber = 0;
3615 	sim->BusType = CONTAINER_BUS;
3616 	sim->InitiatorBusId = -1;
3617 	sim->aac_sc = sc;
3618 	sim->sim_dev = child;
3619 	sim->aac_cam = NULL;
3620 
3621 	device_set_ivars(child, sim);
3622 	device_set_desc(child, "Container Bus");
3623 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3624 	/*
3625 	device_set_desc(child, aac_describe_code(aac_container_types,
3626 			mir->MntTable[0].VolType));
3627 	*/
3628 	bus_generic_attach(sc->aac_dev);
3629 }
3630 
3631 static void
3632 aac_get_bus_info(struct aac_softc *sc)
3633 {
3634 	struct aac_fib *fib;
3635 	struct aac_ctcfg *c_cmd;
3636 	struct aac_ctcfg_resp *c_resp;
3637 	struct aac_vmioctl *vmi;
3638 	struct aac_vmi_businf_resp *vmi_resp;
3639 	struct aac_getbusinf businfo;
3640 	struct aac_sim *caminf;
3641 	device_t child;
3642 	int i, error;
3643 
3644 	mtx_lock(&sc->aac_io_lock);
3645 	aac_alloc_sync_fib(sc, &fib);
3646 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3647 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3648 
3649 	c_cmd->Command = VM_ContainerConfig;
3650 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3651 	c_cmd->param = 0;
3652 
3653 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3654 	    sizeof(struct aac_ctcfg));
3655 	if (error) {
3656 		device_printf(sc->aac_dev, "Error %d sending "
3657 		    "VM_ContainerConfig command\n", error);
3658 		aac_release_sync_fib(sc);
3659 		mtx_unlock(&sc->aac_io_lock);
3660 		return;
3661 	}
3662 
3663 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3664 	if (c_resp->Status != ST_OK) {
3665 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3666 		    c_resp->Status);
3667 		aac_release_sync_fib(sc);
3668 		mtx_unlock(&sc->aac_io_lock);
3669 		return;
3670 	}
3671 
3672 	sc->scsi_method_id = c_resp->param;
3673 
3674 	vmi = (struct aac_vmioctl *)&fib->data[0];
3675 	bzero(vmi, sizeof(struct aac_vmioctl));
3676 
3677 	vmi->Command = VM_Ioctl;
3678 	vmi->ObjType = FT_DRIVE;
3679 	vmi->MethId = sc->scsi_method_id;
3680 	vmi->ObjId = 0;
3681 	vmi->IoctlCmd = GetBusInfo;
3682 
3683 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3684 	    sizeof(struct aac_vmi_businf_resp));
3685 	if (error) {
3686 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3687 		    error);
3688 		aac_release_sync_fib(sc);
3689 		mtx_unlock(&sc->aac_io_lock);
3690 		return;
3691 	}
3692 
3693 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3694 	if (vmi_resp->Status != ST_OK) {
3695 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3696 		    vmi_resp->Status);
3697 		aac_release_sync_fib(sc);
3698 		mtx_unlock(&sc->aac_io_lock);
3699 		return;
3700 	}
3701 
3702 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3703 	aac_release_sync_fib(sc);
3704 	mtx_unlock(&sc->aac_io_lock);
3705 
3706 	for (i = 0; i < businfo.BusCount; i++) {
3707 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3708 			continue;
3709 
3710 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3711 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3712 		if (caminf == NULL) {
3713 			device_printf(sc->aac_dev,
3714 			    "No memory to add passthrough bus %d\n", i);
3715 			break;
3716 		}
3717 
3718 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3719 		if (child == NULL) {
3720 			device_printf(sc->aac_dev,
3721 			    "device_add_child failed for passthrough bus %d\n",
3722 			    i);
3723 			free(caminf, M_AACRAIDBUF);
3724 			break;
3725 		}
3726 
3727 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3728 		caminf->BusNumber = i+1;
3729 		caminf->BusType = PASSTHROUGH_BUS;
3730 		caminf->InitiatorBusId = -1;
3731 		caminf->aac_sc = sc;
3732 		caminf->sim_dev = child;
3733 		caminf->aac_cam = NULL;
3734 
3735 		device_set_ivars(child, caminf);
3736 		device_set_desc(child, "SCSI Passthrough Bus");
3737 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3738 	}
3739 }
3740 
3741 /*
3742  * Check to see if the kernel is up and running. If we are in a
3743  * BlinkLED state, return the BlinkLED code.
3744  */
3745 static u_int32_t
3746 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3747 {
3748 	u_int32_t ret;
3749 
3750 	ret = AAC_GET_FWSTATUS(sc);
3751 
3752 	if (ret & AAC_UP_AND_RUNNING)
3753 		ret = 0;
3754 	else if (ret & AAC_KERNEL_PANIC && bled)
3755 		*bled = (ret >> 16) & 0xff;
3756 
3757 	return (ret);
3758 }
3759 
3760 /*
3761  * Once do an IOP reset, basically have to re-initialize the card as
3762  * if coming up from a cold boot, and the driver is responsible for
3763  * any IO that was outstanding to the adapter at the time of the IOP
3764  * RESET. And prepare the driver for IOP RESET by making the init code
3765  * modular with the ability to call it from multiple places.
3766  */
3767 static int
3768 aac_reset_adapter(struct aac_softc *sc)
3769 {
3770 	struct aac_command *cm;
3771 	struct aac_fib *fib;
3772 	struct aac_pause_command *pc;
3773 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3774 	int ret, msi_enabled_orig;
3775 
3776 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3777 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3778 
3779 	if (sc->aac_state & AAC_STATE_RESET) {
3780 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3781 		return (EINVAL);
3782 	}
3783 	sc->aac_state |= AAC_STATE_RESET;
3784 
3785 	/* disable interrupt */
3786 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3787 
3788 	/*
3789 	 * Abort all pending commands:
3790 	 * a) on the controller
3791 	 */
3792 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3793 		cm->cm_flags |= AAC_CMD_RESET;
3794 
3795 		/* is there a completion handler? */
3796 		if (cm->cm_complete != NULL) {
3797 			cm->cm_complete(cm);
3798 		} else {
3799 			/* assume that someone is sleeping on this
3800 			 * command
3801 			 */
3802 			wakeup(cm);
3803 		}
3804 	}
3805 
3806 	/* b) in the waiting queues */
3807 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3808 		cm->cm_flags |= AAC_CMD_RESET;
3809 
3810 		/* is there a completion handler? */
3811 		if (cm->cm_complete != NULL) {
3812 			cm->cm_complete(cm);
3813 		} else {
3814 			/* assume that someone is sleeping on this
3815 			 * command
3816 			 */
3817 			wakeup(cm);
3818 		}
3819 	}
3820 
3821 	/* flush drives */
3822 	if (aac_check_adapter_health(sc, NULL) == 0) {
3823 		mtx_unlock(&sc->aac_io_lock);
3824 		(void) aacraid_shutdown(sc->aac_dev);
3825 		mtx_lock(&sc->aac_io_lock);
3826 	}
3827 
3828 	/* execute IOP reset */
3829 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3830 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3831 
3832 		/* We need to wait for 5 seconds before accessing the MU again
3833 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3834 		 */
3835 		waitCount = 5 * 10000;
3836 		while (waitCount) {
3837 			DELAY(100);			/* delay 100 microseconds */
3838 			waitCount--;
3839 		}
3840 	} else {
3841 		ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3842 			0, 0, 0, 0, &status, &reset_mask);
3843 		if (ret && !sc->doorbell_mask) {
3844 			/* call IOP_RESET for older firmware */
3845 			if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3846 			    &status, NULL)) != 0) {
3847 				if (status == AAC_SRB_STS_INVALID_REQUEST) {
3848 					device_printf(sc->aac_dev,
3849 					    "IOP_RESET not supported\n");
3850 				} else {
3851 					/* probably timeout */
3852 					device_printf(sc->aac_dev,
3853 					    "IOP_RESET failed\n");
3854 				}
3855 
3856 				/* unwind aac_shutdown() */
3857 				aac_alloc_sync_fib(sc, &fib);
3858 				pc = (struct aac_pause_command *)&fib->data[0];
3859 				pc->Command = VM_ContainerConfig;
3860 				pc->Type = CT_PAUSE_IO;
3861 				pc->Timeout = 1;
3862 				pc->Min = 1;
3863 				pc->NoRescan = 1;
3864 
3865 				(void) aac_sync_fib(sc, ContainerCommand, 0,
3866 				    fib, sizeof (struct aac_pause_command));
3867 				aac_release_sync_fib(sc);
3868 
3869 				goto finish;
3870 			}
3871 		} else if (sc->doorbell_mask) {
3872 			ret = 0;
3873 			reset_mask = sc->doorbell_mask;
3874 		}
3875 		if (!ret &&
3876 		    (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3877 			AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3878 			/*
3879 			 * We need to wait for 5 seconds before accessing the
3880 			 * doorbell again;
3881 			 * 10000 * 100us = 1000,000us = 1000ms = 1s
3882 			 */
3883 			waitCount = 5 * 10000;
3884 			while (waitCount) {
3885 				DELAY(100);	/* delay 100 microseconds */
3886 				waitCount--;
3887 			}
3888 		}
3889 	}
3890 
3891 	/*
3892 	 * Initialize the adapter.
3893 	 */
3894 	max_msix_orig = sc->aac_max_msix;
3895 	msi_enabled_orig = sc->msi_enabled;
3896 	sc->msi_enabled = FALSE;
3897 	if (aac_check_firmware(sc) != 0)
3898 		goto finish;
3899 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3900 		sc->aac_max_msix = max_msix_orig;
3901 		if (msi_enabled_orig) {
3902 			sc->msi_enabled = msi_enabled_orig;
3903 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3904 		}
3905 		mtx_unlock(&sc->aac_io_lock);
3906 		aac_init(sc);
3907 		mtx_lock(&sc->aac_io_lock);
3908 	}
3909 
3910 finish:
3911 	sc->aac_state &= ~AAC_STATE_RESET;
3912 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3913 	aacraid_startio(sc);
3914 	return (0);
3915 }
3916