xref: /freebsd/sys/dev/aacraid/aacraid.c (revision 2d4e511ca269f1908d27f4e5779c53475527391d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
38  */
39 #define AAC_DRIVERNAME			"aacraid"
40 
41 #include "opt_aacraid.h"
42 
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/proc.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
52 #include <sys/poll.h>
53 #include <sys/ioccom.h>
54 
55 #include <sys/bus.h>
56 #include <sys/conf.h>
57 #include <sys/signalvar.h>
58 #include <sys/time.h>
59 #include <sys/eventhandler.h>
60 #include <sys/rman.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
72 
73 #ifndef FILTER_HANDLED
74 #define FILTER_HANDLED	0x02
75 #endif
76 
77 static void	aac_add_container(struct aac_softc *sc,
78 				  struct aac_mntinforesp *mir, int f,
79 				  u_int32_t uid);
80 static void	aac_get_bus_info(struct aac_softc *sc);
81 static void	aac_container_bus(struct aac_softc *sc);
82 static void	aac_daemon(void *arg);
83 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
84 							  int pages, int nseg, int nseg_new);
85 
86 /* Command Processing */
87 static void	aac_timeout(struct aac_softc *sc);
88 static void	aac_command_thread(struct aac_softc *sc);
89 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
90 				     u_int32_t xferstate, struct aac_fib *fib,
91 				     u_int16_t datasize);
92 /* Command Buffer Management */
93 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
94 				       int nseg, int error);
95 static int	aac_alloc_commands(struct aac_softc *sc);
96 static void	aac_free_commands(struct aac_softc *sc);
97 static void	aac_unmap_command(struct aac_command *cm);
98 
99 /* Hardware Interface */
100 static int	aac_alloc(struct aac_softc *sc);
101 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
102 			       int error);
103 static int	aac_check_firmware(struct aac_softc *sc);
104 static void	aac_define_int_mode(struct aac_softc *sc);
105 static int	aac_init(struct aac_softc *sc);
106 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
107 static int	aac_setup_intr(struct aac_softc *sc);
108 static int	aac_check_config(struct aac_softc *sc);
109 
110 /* PMC SRC interface */
111 static int	aac_src_get_fwstatus(struct aac_softc *sc);
112 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
113 static int	aac_src_get_istatus(struct aac_softc *sc);
114 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
115 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
116 				    u_int32_t arg0, u_int32_t arg1,
117 				    u_int32_t arg2, u_int32_t arg3);
118 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
119 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
120 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
121 static int aac_src_get_outb_queue(struct aac_softc *sc);
122 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
123 
124 struct aac_interface aacraid_src_interface = {
125 	aac_src_get_fwstatus,
126 	aac_src_qnotify,
127 	aac_src_get_istatus,
128 	aac_src_clear_istatus,
129 	aac_src_set_mailbox,
130 	aac_src_get_mailbox,
131 	aac_src_access_devreg,
132 	aac_src_send_command,
133 	aac_src_get_outb_queue,
134 	aac_src_set_outb_queue
135 };
136 
137 /* PMC SRCv interface */
138 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
139 				    u_int32_t arg0, u_int32_t arg1,
140 				    u_int32_t arg2, u_int32_t arg3);
141 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
142 
143 struct aac_interface aacraid_srcv_interface = {
144 	aac_src_get_fwstatus,
145 	aac_src_qnotify,
146 	aac_src_get_istatus,
147 	aac_src_clear_istatus,
148 	aac_srcv_set_mailbox,
149 	aac_srcv_get_mailbox,
150 	aac_src_access_devreg,
151 	aac_src_send_command,
152 	aac_src_get_outb_queue,
153 	aac_src_set_outb_queue
154 };
155 
156 /* Debugging and Diagnostics */
157 static struct aac_code_lookup aac_cpu_variant[] = {
158 	{"i960JX",		CPUI960_JX},
159 	{"i960CX",		CPUI960_CX},
160 	{"i960HX",		CPUI960_HX},
161 	{"i960RX",		CPUI960_RX},
162 	{"i960 80303",		CPUI960_80303},
163 	{"StrongARM SA110",	CPUARM_SA110},
164 	{"PPC603e",		CPUPPC_603e},
165 	{"XScale 80321",	CPU_XSCALE_80321},
166 	{"MIPS 4KC",		CPU_MIPS_4KC},
167 	{"MIPS 5KC",		CPU_MIPS_5KC},
168 	{"Unknown StrongARM",	CPUARM_xxx},
169 	{"Unknown PowerPC",	CPUPPC_xxx},
170 	{NULL, 0},
171 	{"Unknown processor",	0}
172 };
173 
174 static struct aac_code_lookup aac_battery_platform[] = {
175 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
176 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
177 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
178 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
179 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
180 	{NULL, 0},
181 	{"unknown battery platform",		0}
182 };
183 static void	aac_describe_controller(struct aac_softc *sc);
184 static char	*aac_describe_code(struct aac_code_lookup *table,
185 				   u_int32_t code);
186 
187 /* Management Interface */
188 static d_open_t		aac_open;
189 static d_ioctl_t	aac_ioctl;
190 static d_poll_t		aac_poll;
191 static void		aac_cdevpriv_dtor(void *arg);
192 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
193 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
194 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
195 static void	aac_request_aif(struct aac_softc *sc);
196 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
197 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
198 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
199 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
200 static int	aac_return_aif(struct aac_softc *sc,
201 			       struct aac_fib_context *ctx, caddr_t uptr);
202 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
203 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
204 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
205 static void	aac_ioctl_event(struct aac_softc *sc,
206 				struct aac_event *event, void *arg);
207 static int	aac_reset_adapter(struct aac_softc *sc);
208 static int	aac_get_container_info(struct aac_softc *sc,
209 				       struct aac_fib *fib, int cid,
210 				       struct aac_mntinforesp *mir,
211 				       u_int32_t *uid);
212 static u_int32_t
213 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
214 
215 static struct cdevsw aacraid_cdevsw = {
216 	.d_version =	D_VERSION,
217 	.d_flags =	0,
218 	.d_open =	aac_open,
219 	.d_ioctl =	aac_ioctl,
220 	.d_poll =	aac_poll,
221 	.d_name =	"aacraid",
222 };
223 
224 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
225 
226 /* sysctl node */
227 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
228     "AACRAID driver parameters");
229 
230 /*
231  * Device Interface
232  */
233 
234 /*
235  * Initialize the controller and softc
236  */
237 int
238 aacraid_attach(struct aac_softc *sc)
239 {
240 	int error, unit;
241 	struct aac_fib *fib;
242 	struct aac_mntinforesp mir;
243 	int count = 0, i = 0;
244 	u_int32_t uid;
245 
246 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
247 	sc->hint_flags = device_get_flags(sc->aac_dev);
248 	/*
249 	 * Initialize per-controller queues.
250 	 */
251 	aac_initq_free(sc);
252 	aac_initq_ready(sc);
253 	aac_initq_busy(sc);
254 
255 	/* mark controller as suspended until we get ourselves organised */
256 	sc->aac_state |= AAC_STATE_SUSPEND;
257 
258 	/*
259 	 * Check that the firmware on the card is supported.
260 	 */
261 	sc->msi_enabled = sc->msi_tupelo = FALSE;
262 	if ((error = aac_check_firmware(sc)) != 0)
263 		return(error);
264 
265 	/*
266 	 * Initialize locks
267 	 */
268 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
269 	TAILQ_INIT(&sc->aac_container_tqh);
270 	TAILQ_INIT(&sc->aac_ev_cmfree);
271 
272 	/* Initialize the clock daemon callout. */
273 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
274 
275 	/*
276 	 * Initialize the adapter.
277 	 */
278 	if ((error = aac_alloc(sc)) != 0)
279 		return(error);
280 	aac_define_int_mode(sc);
281 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
282 		if ((error = aac_init(sc)) != 0)
283 			return(error);
284 	}
285 
286 	/*
287 	 * Allocate and connect our interrupt.
288 	 */
289 	if ((error = aac_setup_intr(sc)) != 0)
290 		return(error);
291 
292 	/*
293 	 * Print a little information about the controller.
294 	 */
295 	aac_describe_controller(sc);
296 
297 	/*
298 	 * Make the control device.
299 	 */
300 	unit = device_get_unit(sc->aac_dev);
301 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
302 				 0640, "aacraid%d", unit);
303 	sc->aac_dev_t->si_drv1 = sc;
304 
305 	/* Create the AIF thread */
306 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
307 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
308 		panic("Could not create AIF thread");
309 
310 	/* Register the shutdown method to only be called post-dump */
311 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
312 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
313 		device_printf(sc->aac_dev,
314 			      "shutdown event registration failed\n");
315 
316 	/* Find containers */
317 	mtx_lock(&sc->aac_io_lock);
318 	aac_alloc_sync_fib(sc, &fib);
319 	/* loop over possible containers */
320 	do {
321 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
322 			continue;
323 		if (i == 0)
324 			count = mir.MntRespCount;
325 		aac_add_container(sc, &mir, 0, uid);
326 		i++;
327 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
328 	aac_release_sync_fib(sc);
329 	mtx_unlock(&sc->aac_io_lock);
330 
331 	/* Register with CAM for the containers */
332 	TAILQ_INIT(&sc->aac_sim_tqh);
333 	aac_container_bus(sc);
334 	/* Register with CAM for the non-DASD devices */
335 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
336 		aac_get_bus_info(sc);
337 
338 	/* poke the bus to actually attach the child devices */
339 	bus_generic_attach(sc->aac_dev);
340 
341 	/* mark the controller up */
342 	sc->aac_state &= ~AAC_STATE_SUSPEND;
343 
344 	/* enable interrupts now */
345 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
346 
347 	mtx_lock(&sc->aac_io_lock);
348 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
349 	mtx_unlock(&sc->aac_io_lock);
350 
351 	return(0);
352 }
353 
354 static void
355 aac_daemon(void *arg)
356 {
357 	struct aac_softc *sc;
358 	struct timeval tv;
359 	struct aac_command *cm;
360 	struct aac_fib *fib;
361 
362 	sc = arg;
363 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
364 
365 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
366 	if (callout_pending(&sc->aac_daemontime) ||
367 	    callout_active(&sc->aac_daemontime) == 0)
368 		return;
369 	getmicrotime(&tv);
370 
371 	if (!aacraid_alloc_command(sc, &cm)) {
372 		fib = cm->cm_fib;
373 		cm->cm_timestamp = time_uptime;
374 		cm->cm_datalen = 0;
375 		cm->cm_flags |= AAC_CMD_WAIT;
376 
377 		fib->Header.Size =
378 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
379 		fib->Header.XferState =
380 			AAC_FIBSTATE_HOSTOWNED   |
381 			AAC_FIBSTATE_INITIALISED |
382 			AAC_FIBSTATE_EMPTY	 |
383 			AAC_FIBSTATE_FROMHOST	 |
384 			AAC_FIBSTATE_REXPECTED   |
385 			AAC_FIBSTATE_NORM	 |
386 			AAC_FIBSTATE_ASYNC	 |
387 			AAC_FIBSTATE_FAST_RESPONSE;
388 		fib->Header.Command = SendHostTime;
389 		*(uint32_t *)fib->data = tv.tv_sec;
390 
391 		aacraid_map_command_sg(cm, NULL, 0, 0);
392 		aacraid_release_command(cm);
393 	}
394 
395 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
396 }
397 
398 void
399 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
400 {
401 
402 	switch (event->ev_type & AAC_EVENT_MASK) {
403 	case AAC_EVENT_CMFREE:
404 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
405 		break;
406 	default:
407 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
408 		    event->ev_type);
409 		break;
410 	}
411 
412 	return;
413 }
414 
415 /*
416  * Request information of container #cid
417  */
418 static int
419 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
420 		       struct aac_mntinforesp *mir, u_int32_t *uid)
421 {
422 	struct aac_command *cm;
423 	struct aac_fib *fib;
424 	struct aac_mntinfo *mi;
425 	struct aac_cnt_config *ccfg;
426 	int rval;
427 
428 	if (sync_fib == NULL) {
429 		if (aacraid_alloc_command(sc, &cm)) {
430 			device_printf(sc->aac_dev,
431 				"Warning, no free command available\n");
432 			return (-1);
433 		}
434 		fib = cm->cm_fib;
435 	} else {
436 		fib = sync_fib;
437 	}
438 
439 	mi = (struct aac_mntinfo *)&fib->data[0];
440 	/* 4KB support?, 64-bit LBA? */
441 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
442 		mi->Command = VM_NameServeAllBlk;
443 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
444 		mi->Command = VM_NameServe64;
445 	else
446 		mi->Command = VM_NameServe;
447 	mi->MntType = FT_FILESYS;
448 	mi->MntCount = cid;
449 
450 	if (sync_fib) {
451 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
452 			 sizeof(struct aac_mntinfo))) {
453 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
454 			return (-1);
455 		}
456 	} else {
457 		cm->cm_timestamp = time_uptime;
458 		cm->cm_datalen = 0;
459 
460 		fib->Header.Size =
461 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
462 		fib->Header.XferState =
463 			AAC_FIBSTATE_HOSTOWNED   |
464 			AAC_FIBSTATE_INITIALISED |
465 			AAC_FIBSTATE_EMPTY	 |
466 			AAC_FIBSTATE_FROMHOST	 |
467 			AAC_FIBSTATE_REXPECTED   |
468 			AAC_FIBSTATE_NORM	 |
469 			AAC_FIBSTATE_ASYNC	 |
470 			AAC_FIBSTATE_FAST_RESPONSE;
471 		fib->Header.Command = ContainerCommand;
472 		if (aacraid_wait_command(cm) != 0) {
473 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
474 			aacraid_release_command(cm);
475 			return (-1);
476 		}
477 	}
478 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
479 
480 	/* UID */
481 	*uid = cid;
482 	if (mir->MntTable[0].VolType != CT_NONE &&
483 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
484 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
485 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
486 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
487 		}
488 		ccfg = (struct aac_cnt_config *)&fib->data[0];
489 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
490 		ccfg->Command = VM_ContainerConfig;
491 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
492 		ccfg->CTCommand.param[0] = cid;
493 
494 		if (sync_fib) {
495 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
496 				sizeof(struct aac_cnt_config));
497 			if (rval == 0 && ccfg->Command == ST_OK &&
498 				ccfg->CTCommand.param[0] == CT_OK &&
499 				mir->MntTable[0].VolType != CT_PASSTHRU)
500 				*uid = ccfg->CTCommand.param[1];
501 		} else {
502 			fib->Header.Size =
503 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
504 			fib->Header.XferState =
505 				AAC_FIBSTATE_HOSTOWNED   |
506 				AAC_FIBSTATE_INITIALISED |
507 				AAC_FIBSTATE_EMPTY	 |
508 				AAC_FIBSTATE_FROMHOST	 |
509 				AAC_FIBSTATE_REXPECTED   |
510 				AAC_FIBSTATE_NORM	 |
511 				AAC_FIBSTATE_ASYNC	 |
512 				AAC_FIBSTATE_FAST_RESPONSE;
513 			fib->Header.Command = ContainerCommand;
514 			rval = aacraid_wait_command(cm);
515 			if (rval == 0 && ccfg->Command == ST_OK &&
516 				ccfg->CTCommand.param[0] == CT_OK &&
517 				mir->MntTable[0].VolType != CT_PASSTHRU)
518 				*uid = ccfg->CTCommand.param[1];
519 			aacraid_release_command(cm);
520 		}
521 	}
522 
523 	return (0);
524 }
525 
526 /*
527  * Create a device to represent a new container
528  */
529 static void
530 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
531 		  u_int32_t uid)
532 {
533 	struct aac_container *co;
534 
535 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
536 
537 	/*
538 	 * Check container volume type for validity.  Note that many of
539 	 * the possible types may never show up.
540 	 */
541 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
542 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
543 		       M_NOWAIT | M_ZERO);
544 		if (co == NULL) {
545 			panic("Out of memory?!");
546 		}
547 
548 		co->co_found = f;
549 		bcopy(&mir->MntTable[0], &co->co_mntobj,
550 		      sizeof(struct aac_mntobj));
551 		co->co_uid = uid;
552 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
553 	}
554 }
555 
556 /*
557  * Allocate resources associated with (sc)
558  */
559 static int
560 aac_alloc(struct aac_softc *sc)
561 {
562 	bus_size_t maxsize;
563 
564 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
565 
566 	/*
567 	 * Create DMA tag for mapping buffers into controller-addressable space.
568 	 */
569 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
570 			       1, 0, 			/* algnmnt, boundary */
571 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
572 			       BUS_SPACE_MAXADDR :
573 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
574 			       BUS_SPACE_MAXADDR, 	/* highaddr */
575 			       NULL, NULL, 		/* filter, filterarg */
576 			       sc->aac_max_sectors << 9, /* maxsize */
577 			       sc->aac_sg_tablesize,	/* nsegments */
578 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
579 			       BUS_DMA_ALLOCNOW,	/* flags */
580 			       busdma_lock_mutex,	/* lockfunc */
581 			       &sc->aac_io_lock,	/* lockfuncarg */
582 			       &sc->aac_buffer_dmat)) {
583 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
584 		return (ENOMEM);
585 	}
586 
587 	/*
588 	 * Create DMA tag for mapping FIBs into controller-addressable space..
589 	 */
590 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
591 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
592 			sizeof(struct aac_fib_xporthdr) + 31);
593 	else
594 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
595 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
596 			       1, 0, 			/* algnmnt, boundary */
597 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
598 			       BUS_SPACE_MAXADDR_32BIT :
599 			       0x7fffffff,		/* lowaddr */
600 			       BUS_SPACE_MAXADDR, 	/* highaddr */
601 			       NULL, NULL, 		/* filter, filterarg */
602 			       maxsize,  		/* maxsize */
603 			       1,			/* nsegments */
604 			       maxsize,			/* maxsize */
605 			       0,			/* flags */
606 			       NULL, NULL,		/* No locking needed */
607 			       &sc->aac_fib_dmat)) {
608 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
609 		return (ENOMEM);
610 	}
611 
612 	/*
613 	 * Create DMA tag for the common structure and allocate it.
614 	 */
615 	maxsize = sizeof(struct aac_common);
616 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
617 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
618 			       1, 0,			/* algnmnt, boundary */
619 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
620 			       BUS_SPACE_MAXADDR_32BIT :
621 			       0x7fffffff,		/* lowaddr */
622 			       BUS_SPACE_MAXADDR, 	/* highaddr */
623 			       NULL, NULL, 		/* filter, filterarg */
624 			       maxsize, 		/* maxsize */
625 			       1,			/* nsegments */
626 			       maxsize,			/* maxsegsize */
627 			       0,			/* flags */
628 			       NULL, NULL,		/* No locking needed */
629 			       &sc->aac_common_dmat)) {
630 		device_printf(sc->aac_dev,
631 			      "can't allocate common structure DMA tag\n");
632 		return (ENOMEM);
633 	}
634 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
635 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
636 		device_printf(sc->aac_dev, "can't allocate common structure\n");
637 		return (ENOMEM);
638 	}
639 
640 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
641 			sc->aac_common, maxsize,
642 			aac_common_map, sc, 0);
643 	bzero(sc->aac_common, maxsize);
644 
645 	/* Allocate some FIBs and associated command structs */
646 	TAILQ_INIT(&sc->aac_fibmap_tqh);
647 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
648 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
649 	mtx_lock(&sc->aac_io_lock);
650 	while (sc->total_fibs < sc->aac_max_fibs) {
651 		if (aac_alloc_commands(sc) != 0)
652 			break;
653 	}
654 	mtx_unlock(&sc->aac_io_lock);
655 	if (sc->total_fibs == 0)
656 		return (ENOMEM);
657 
658 	return (0);
659 }
660 
661 /*
662  * Free all of the resources associated with (sc)
663  *
664  * Should not be called if the controller is active.
665  */
666 void
667 aacraid_free(struct aac_softc *sc)
668 {
669 	int i;
670 
671 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
672 
673 	/* remove the control device */
674 	if (sc->aac_dev_t != NULL)
675 		destroy_dev(sc->aac_dev_t);
676 
677 	/* throw away any FIB buffers, discard the FIB DMA tag */
678 	aac_free_commands(sc);
679 	if (sc->aac_fib_dmat)
680 		bus_dma_tag_destroy(sc->aac_fib_dmat);
681 
682 	free(sc->aac_commands, M_AACRAIDBUF);
683 
684 	/* destroy the common area */
685 	if (sc->aac_common) {
686 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
687 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
688 				sc->aac_common_dmamap);
689 	}
690 	if (sc->aac_common_dmat)
691 		bus_dma_tag_destroy(sc->aac_common_dmat);
692 
693 	/* disconnect the interrupt handler */
694 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
695 		if (sc->aac_intr[i])
696 			bus_teardown_intr(sc->aac_dev,
697 				sc->aac_irq[i], sc->aac_intr[i]);
698 		if (sc->aac_irq[i])
699 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
700 				sc->aac_irq_rid[i], sc->aac_irq[i]);
701 		else
702 			break;
703 	}
704 	if (sc->msi_enabled || sc->msi_tupelo)
705 		pci_release_msi(sc->aac_dev);
706 
707 	/* destroy data-transfer DMA tag */
708 	if (sc->aac_buffer_dmat)
709 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
710 
711 	/* destroy the parent DMA tag */
712 	if (sc->aac_parent_dmat)
713 		bus_dma_tag_destroy(sc->aac_parent_dmat);
714 
715 	/* release the register window mapping */
716 	if (sc->aac_regs_res0 != NULL)
717 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
718 				     sc->aac_regs_rid0, sc->aac_regs_res0);
719 	if (sc->aac_regs_res1 != NULL)
720 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
721 				     sc->aac_regs_rid1, sc->aac_regs_res1);
722 }
723 
724 /*
725  * Disconnect from the controller completely, in preparation for unload.
726  */
727 int
728 aacraid_detach(device_t dev)
729 {
730 	struct aac_softc *sc;
731 	struct aac_container *co;
732 	struct aac_sim	*sim;
733 	int error;
734 
735 	sc = device_get_softc(dev);
736 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
737 
738 	callout_drain(&sc->aac_daemontime);
739 	/* Remove the child containers */
740 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
741 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
742 		free(co, M_AACRAIDBUF);
743 	}
744 
745 	/* Remove the CAM SIMs */
746 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
747 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
748 		error = device_delete_child(dev, sim->sim_dev);
749 		if (error)
750 			return (error);
751 		free(sim, M_AACRAIDBUF);
752 	}
753 
754 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
755 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
756 		wakeup(sc->aifthread);
757 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
758 	}
759 
760 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
761 		panic("Cannot shutdown AIF thread");
762 
763 	if ((error = aacraid_shutdown(dev)))
764 		return(error);
765 
766 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
767 
768 	aacraid_free(sc);
769 
770 	mtx_destroy(&sc->aac_io_lock);
771 
772 	return(0);
773 }
774 
775 /*
776  * Bring the controller down to a dormant state and detach all child devices.
777  *
778  * This function is called before detach or system shutdown.
779  *
780  * Note that we can assume that the bioq on the controller is empty, as we won't
781  * allow shutdown if any device is open.
782  */
783 int
784 aacraid_shutdown(device_t dev)
785 {
786 	struct aac_softc *sc;
787 	struct aac_fib *fib;
788 	struct aac_close_command *cc;
789 
790 	sc = device_get_softc(dev);
791 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
792 
793 	sc->aac_state |= AAC_STATE_SUSPEND;
794 
795 	/*
796 	 * Send a Container shutdown followed by a HostShutdown FIB to the
797 	 * controller to convince it that we don't want to talk to it anymore.
798 	 * We've been closed and all I/O completed already
799 	 */
800 	device_printf(sc->aac_dev, "shutting down controller...");
801 
802 	mtx_lock(&sc->aac_io_lock);
803 	aac_alloc_sync_fib(sc, &fib);
804 	cc = (struct aac_close_command *)&fib->data[0];
805 
806 	bzero(cc, sizeof(struct aac_close_command));
807 	cc->Command = VM_CloseAll;
808 	cc->ContainerId = 0xfffffffe;
809 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
810 	    sizeof(struct aac_close_command)))
811 		printf("FAILED.\n");
812 	else
813 		printf("done\n");
814 
815 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
816 	aac_release_sync_fib(sc);
817 	mtx_unlock(&sc->aac_io_lock);
818 
819 	return(0);
820 }
821 
822 /*
823  * Bring the controller to a quiescent state, ready for system suspend.
824  */
825 int
826 aacraid_suspend(device_t dev)
827 {
828 	struct aac_softc *sc;
829 
830 	sc = device_get_softc(dev);
831 
832 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
833 	sc->aac_state |= AAC_STATE_SUSPEND;
834 
835 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
836 	return(0);
837 }
838 
839 /*
840  * Bring the controller back to a state ready for operation.
841  */
842 int
843 aacraid_resume(device_t dev)
844 {
845 	struct aac_softc *sc;
846 
847 	sc = device_get_softc(dev);
848 
849 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
850 	sc->aac_state &= ~AAC_STATE_SUSPEND;
851 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
852 	return(0);
853 }
854 
855 /*
856  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
857  */
858 void
859 aacraid_new_intr_type1(void *arg)
860 {
861 	struct aac_msix_ctx *ctx;
862 	struct aac_softc *sc;
863 	int vector_no;
864 	struct aac_command *cm;
865 	struct aac_fib *fib;
866 	u_int32_t bellbits, bellbits_shifted, index, handle;
867 	int isFastResponse, isAif, noMoreAif, mode;
868 
869 	ctx = (struct aac_msix_ctx *)arg;
870 	sc = ctx->sc;
871 	vector_no = ctx->vector_no;
872 
873 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
874 	mtx_lock(&sc->aac_io_lock);
875 
876 	if (sc->msi_enabled) {
877 		mode = AAC_INT_MODE_MSI;
878 		if (vector_no == 0) {
879 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
880 			if (bellbits & 0x40000)
881 				mode |= AAC_INT_MODE_AIF;
882 			else if (bellbits & 0x1000)
883 				mode |= AAC_INT_MODE_SYNC;
884 		}
885 	} else {
886 		mode = AAC_INT_MODE_INTX;
887 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
888 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
889 			bellbits = AAC_DB_RESPONSE_SENT_NS;
890 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
891 		} else {
892 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
893 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
894 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
895 				mode |= AAC_INT_MODE_AIF;
896 			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
897 				mode |= AAC_INT_MODE_SYNC;
898 		}
899 		/* ODR readback, Prep #238630 */
900 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
901 	}
902 
903 	if (mode & AAC_INT_MODE_SYNC) {
904 		if (sc->aac_sync_cm) {
905 			cm = sc->aac_sync_cm;
906 			aac_unmap_command(cm);
907 			cm->cm_flags |= AAC_CMD_COMPLETED;
908 			/* is there a completion handler? */
909 			if (cm->cm_complete != NULL) {
910 				cm->cm_complete(cm);
911 			} else {
912 				/* assume that someone is sleeping on this command */
913 				wakeup(cm);
914 			}
915 			sc->flags &= ~AAC_QUEUE_FRZN;
916 			sc->aac_sync_cm = NULL;
917 		}
918 		mode = 0;
919 	}
920 
921 	if (mode & AAC_INT_MODE_AIF) {
922 		if (mode & AAC_INT_MODE_INTX) {
923 			aac_request_aif(sc);
924 			mode = 0;
925 		}
926 	}
927 
928 	if (mode) {
929 		/* handle async. status */
930 		index = sc->aac_host_rrq_idx[vector_no];
931 		for (;;) {
932 			isFastResponse = isAif = noMoreAif = 0;
933 			/* remove toggle bit (31) */
934 			handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
935 			/* check fast response bit (30) */
936 			if (handle & 0x40000000)
937 				isFastResponse = 1;
938 			/* check AIF bit (23) */
939 			else if (handle & 0x00800000)
940 				isAif = TRUE;
941 			handle &= 0x0000ffff;
942 			if (handle == 0)
943 				break;
944 
945 			cm = sc->aac_commands + (handle - 1);
946 			fib = cm->cm_fib;
947 			sc->aac_rrq_outstanding[vector_no]--;
948 			if (isAif) {
949 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
950 				if (!noMoreAif)
951 					aac_handle_aif(sc, fib);
952 				aac_remove_busy(cm);
953 				aacraid_release_command(cm);
954 			} else {
955 				if (isFastResponse) {
956 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
957 					*((u_int32_t *)(fib->data)) = ST_OK;
958 					cm->cm_flags |= AAC_CMD_FASTRESP;
959 				}
960 				aac_remove_busy(cm);
961 				aac_unmap_command(cm);
962 				cm->cm_flags |= AAC_CMD_COMPLETED;
963 
964 				/* is there a completion handler? */
965 				if (cm->cm_complete != NULL) {
966 					cm->cm_complete(cm);
967 				} else {
968 					/* assume that someone is sleeping on this command */
969 					wakeup(cm);
970 				}
971 				sc->flags &= ~AAC_QUEUE_FRZN;
972 			}
973 
974 			sc->aac_common->ac_host_rrq[index++] = 0;
975 			if (index == (vector_no + 1) * sc->aac_vector_cap)
976 				index = vector_no * sc->aac_vector_cap;
977 			sc->aac_host_rrq_idx[vector_no] = index;
978 
979 			if ((isAif && !noMoreAif) || sc->aif_pending)
980 				aac_request_aif(sc);
981 		}
982 	}
983 
984 	if (mode & AAC_INT_MODE_AIF) {
985 		aac_request_aif(sc);
986 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
987 		mode = 0;
988 	}
989 
990 	/* see if we can start some more I/O */
991 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
992 		aacraid_startio(sc);
993 	mtx_unlock(&sc->aac_io_lock);
994 }
995 
996 /*
997  * Handle notification of one or more FIBs coming from the controller.
998  */
999 static void
1000 aac_command_thread(struct aac_softc *sc)
1001 {
1002 	int retval;
1003 
1004 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1005 
1006 	mtx_lock(&sc->aac_io_lock);
1007 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1008 
1009 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1010 
1011 		retval = 0;
1012 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1013 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1014 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1015 
1016 		/*
1017 		 * First see if any FIBs need to be allocated.
1018 		 */
1019 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1020 			aac_alloc_commands(sc);
1021 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1022 			aacraid_startio(sc);
1023 		}
1024 
1025 		/*
1026 		 * While we're here, check to see if any commands are stuck.
1027 		 * This is pretty low-priority, so it's ok if it doesn't
1028 		 * always fire.
1029 		 */
1030 		if (retval == EWOULDBLOCK)
1031 			aac_timeout(sc);
1032 
1033 		/* Check the hardware printf message buffer */
1034 		if (sc->aac_common->ac_printf[0] != 0)
1035 			aac_print_printf(sc);
1036 	}
1037 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1038 	mtx_unlock(&sc->aac_io_lock);
1039 	wakeup(sc->aac_dev);
1040 
1041 	aac_kthread_exit(0);
1042 }
1043 
1044 /*
1045  * Submit a command to the controller, return when it completes.
1046  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1047  *     be stuck here forever.  At the same time, signals are not caught
1048  *     because there is a risk that a signal could wakeup the sleep before
1049  *     the card has a chance to complete the command.  Since there is no way
1050  *     to cancel a command that is in progress, we can't protect against the
1051  *     card completing a command late and spamming the command and data
1052  *     memory.  So, we are held hostage until the command completes.
1053  */
1054 int
1055 aacraid_wait_command(struct aac_command *cm)
1056 {
1057 	struct aac_softc *sc;
1058 	int error;
1059 
1060 	sc = cm->cm_sc;
1061 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1062 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1063 
1064 	/* Put the command on the ready queue and get things going */
1065 	aac_enqueue_ready(cm);
1066 	aacraid_startio(sc);
1067 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1068 	return(error);
1069 }
1070 
1071 /*
1072  *Command Buffer Management
1073  */
1074 
1075 /*
1076  * Allocate a command.
1077  */
1078 int
1079 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1080 {
1081 	struct aac_command *cm;
1082 
1083 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1084 
1085 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1086 		if (sc->total_fibs < sc->aac_max_fibs) {
1087 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1088 			wakeup(sc->aifthread);
1089 		}
1090 		return (EBUSY);
1091 	}
1092 
1093 	*cmp = cm;
1094 	return(0);
1095 }
1096 
1097 /*
1098  * Release a command back to the freelist.
1099  */
1100 void
1101 aacraid_release_command(struct aac_command *cm)
1102 {
1103 	struct aac_event *event;
1104 	struct aac_softc *sc;
1105 
1106 	sc = cm->cm_sc;
1107 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1108 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1109 
1110 	/* (re)initialize the command/FIB */
1111 	cm->cm_sgtable = NULL;
1112 	cm->cm_flags = 0;
1113 	cm->cm_complete = NULL;
1114 	cm->cm_ccb = NULL;
1115 	cm->cm_passthr_dmat = 0;
1116 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1117 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1118 	cm->cm_fib->Header.Unused = 0;
1119 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1120 
1121 	/*
1122 	 * These are duplicated in aac_start to cover the case where an
1123 	 * intermediate stage may have destroyed them.  They're left
1124 	 * initialized here for debugging purposes only.
1125 	 */
1126 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1127 	cm->cm_fib->Header.Handle = 0;
1128 
1129 	aac_enqueue_free(cm);
1130 
1131 	/*
1132 	 * Dequeue all events so that there's no risk of events getting
1133 	 * stranded.
1134 	 */
1135 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1136 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1137 		event->ev_callback(sc, event, event->ev_arg);
1138 	}
1139 }
1140 
1141 /*
1142  * Map helper for command/FIB allocation.
1143  */
1144 static void
1145 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1146 {
1147 	uint64_t	*fibphys;
1148 
1149 	fibphys = (uint64_t *)arg;
1150 
1151 	*fibphys = segs[0].ds_addr;
1152 }
1153 
1154 /*
1155  * Allocate and initialize commands/FIBs for this adapter.
1156  */
1157 static int
1158 aac_alloc_commands(struct aac_softc *sc)
1159 {
1160 	struct aac_command *cm;
1161 	struct aac_fibmap *fm;
1162 	uint64_t fibphys;
1163 	int i, error;
1164 	u_int32_t maxsize;
1165 
1166 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1167 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1168 
1169 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1170 		return (ENOMEM);
1171 
1172 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1173 	if (fm == NULL)
1174 		return (ENOMEM);
1175 
1176 	mtx_unlock(&sc->aac_io_lock);
1177 	/* allocate the FIBs in DMAable memory and load them */
1178 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1179 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1180 		device_printf(sc->aac_dev,
1181 			      "Not enough contiguous memory available.\n");
1182 		free(fm, M_AACRAIDBUF);
1183 		mtx_lock(&sc->aac_io_lock);
1184 		return (ENOMEM);
1185 	}
1186 
1187 	maxsize = sc->aac_max_fib_size + 31;
1188 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1189 		maxsize += sizeof(struct aac_fib_xporthdr);
1190 	/* Ignore errors since this doesn't bounce */
1191 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1192 			      sc->aac_max_fibs_alloc * maxsize,
1193 			      aac_map_command_helper, &fibphys, 0);
1194 	mtx_lock(&sc->aac_io_lock);
1195 
1196 	/* initialize constant fields in the command structure */
1197 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1198 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1199 		cm = sc->aac_commands + sc->total_fibs;
1200 		fm->aac_commands = cm;
1201 		cm->cm_sc = sc;
1202 		cm->cm_fib = (struct aac_fib *)
1203 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1204 		cm->cm_fibphys = fibphys + i * maxsize;
1205 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1206 			u_int64_t fibphys_aligned;
1207 			fibphys_aligned =
1208 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1209 			cm->cm_fib = (struct aac_fib *)
1210 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1211 			cm->cm_fibphys = fibphys_aligned;
1212 		} else {
1213 			u_int64_t fibphys_aligned;
1214 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1215 			cm->cm_fib = (struct aac_fib *)
1216 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1217 			cm->cm_fibphys = fibphys_aligned;
1218 		}
1219 		cm->cm_index = sc->total_fibs;
1220 
1221 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1222 					       &cm->cm_datamap)) != 0)
1223 			break;
1224 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1225 			aacraid_release_command(cm);
1226 		sc->total_fibs++;
1227 	}
1228 
1229 	if (i > 0) {
1230 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1231 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1232 		return (0);
1233 	}
1234 
1235 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1236 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1237 	free(fm, M_AACRAIDBUF);
1238 	return (ENOMEM);
1239 }
1240 
1241 /*
1242  * Free FIBs owned by this adapter.
1243  */
1244 static void
1245 aac_free_commands(struct aac_softc *sc)
1246 {
1247 	struct aac_fibmap *fm;
1248 	struct aac_command *cm;
1249 	int i;
1250 
1251 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1252 
1253 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1254 
1255 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1256 		/*
1257 		 * We check against total_fibs to handle partially
1258 		 * allocated blocks.
1259 		 */
1260 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1261 			cm = fm->aac_commands + i;
1262 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1263 		}
1264 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1265 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1266 		free(fm, M_AACRAIDBUF);
1267 	}
1268 }
1269 
1270 /*
1271  * Command-mapping helper function - populate this command's s/g table.
1272  */
1273 void
1274 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1275 {
1276 	struct aac_softc *sc;
1277 	struct aac_command *cm;
1278 	struct aac_fib *fib;
1279 	int i;
1280 
1281 	cm = (struct aac_command *)arg;
1282 	sc = cm->cm_sc;
1283 	fib = cm->cm_fib;
1284 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1285 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1286 
1287 	if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1288 		return;
1289 
1290 	/* copy into the FIB */
1291 	if (cm->cm_sgtable != NULL) {
1292 		if (fib->Header.Command == RawIo2) {
1293 			struct aac_raw_io2 *raw;
1294 			struct aac_sge_ieee1212 *sg;
1295 			u_int32_t min_size = PAGE_SIZE, cur_size;
1296 			int conformable = TRUE;
1297 
1298 			raw = (struct aac_raw_io2 *)&fib->data[0];
1299 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1300 			raw->sgeCnt = nseg;
1301 
1302 			for (i = 0; i < nseg; i++) {
1303 				cur_size = segs[i].ds_len;
1304 				sg[i].addrHigh = 0;
1305 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1306 				sg[i].length = cur_size;
1307 				sg[i].flags = 0;
1308 				if (i == 0) {
1309 					raw->sgeFirstSize = cur_size;
1310 				} else if (i == 1) {
1311 					raw->sgeNominalSize = cur_size;
1312 					min_size = cur_size;
1313 				} else if ((i+1) < nseg &&
1314 					cur_size != raw->sgeNominalSize) {
1315 					conformable = FALSE;
1316 					if (cur_size < min_size)
1317 						min_size = cur_size;
1318 				}
1319 			}
1320 
1321 			/* not conformable: evaluate required sg elements */
1322 			if (!conformable) {
1323 				int j, err_found, nseg_new = nseg;
1324 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1325 					err_found = FALSE;
1326 					nseg_new = 2;
1327 					for (j = 1; j < nseg - 1; ++j) {
1328 						if (sg[j].length % (i*PAGE_SIZE)) {
1329 							err_found = TRUE;
1330 							break;
1331 						}
1332 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1333 					}
1334 					if (!err_found)
1335 						break;
1336 				}
1337 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1338 					!(sc->hint_flags & 4))
1339 					nseg = aac_convert_sgraw2(sc,
1340 						raw, i, nseg, nseg_new);
1341 			} else {
1342 				raw->flags |= RIO2_SGL_CONFORMANT;
1343 			}
1344 
1345 			/* update the FIB size for the s/g count */
1346 			fib->Header.Size += nseg *
1347 				sizeof(struct aac_sge_ieee1212);
1348 
1349 		} else if (fib->Header.Command == RawIo) {
1350 			struct aac_sg_tableraw *sg;
1351 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1352 			sg->SgCount = nseg;
1353 			for (i = 0; i < nseg; i++) {
1354 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1355 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1356 				sg->SgEntryRaw[i].Next = 0;
1357 				sg->SgEntryRaw[i].Prev = 0;
1358 				sg->SgEntryRaw[i].Flags = 0;
1359 			}
1360 			/* update the FIB size for the s/g count */
1361 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1362 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1363 			struct aac_sg_table *sg;
1364 			sg = cm->cm_sgtable;
1365 			sg->SgCount = nseg;
1366 			for (i = 0; i < nseg; i++) {
1367 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1368 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1369 			}
1370 			/* update the FIB size for the s/g count */
1371 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1372 		} else {
1373 			struct aac_sg_table64 *sg;
1374 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1375 			sg->SgCount = nseg;
1376 			for (i = 0; i < nseg; i++) {
1377 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1378 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1379 			}
1380 			/* update the FIB size for the s/g count */
1381 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1382 		}
1383 	}
1384 
1385 	/* Fix up the address values in the FIB.  Use the command array index
1386 	 * instead of a pointer since these fields are only 32 bits.  Shift
1387 	 * the SenderFibAddress over to make room for the fast response bit
1388 	 * and for the AIF bit
1389 	 */
1390 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1391 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1392 
1393 	/* save a pointer to the command for speedy reverse-lookup */
1394 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1395 
1396 	if (cm->cm_passthr_dmat == 0) {
1397 		if (cm->cm_flags & AAC_CMD_DATAIN)
1398 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1399 							BUS_DMASYNC_PREREAD);
1400 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1401 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1402 							BUS_DMASYNC_PREWRITE);
1403 	}
1404 
1405 	cm->cm_flags |= AAC_CMD_MAPPED;
1406 
1407 	if (cm->cm_flags & AAC_CMD_WAIT) {
1408 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1409 			cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1410 	} else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1411 		u_int32_t wait = 0;
1412 		sc->aac_sync_cm = cm;
1413 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1414 			cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1415 	} else {
1416 		int count = 10000000L;
1417 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1418 			if (--count == 0) {
1419 				aac_unmap_command(cm);
1420 				sc->flags |= AAC_QUEUE_FRZN;
1421 				aac_requeue_ready(cm);
1422 			}
1423 			DELAY(5);			/* wait 5 usec. */
1424 		}
1425 	}
1426 }
1427 
1428 
1429 static int
1430 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1431 				   int pages, int nseg, int nseg_new)
1432 {
1433 	struct aac_sge_ieee1212 *sge;
1434 	int i, j, pos;
1435 	u_int32_t addr_low;
1436 
1437 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1438 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1439 	if (sge == NULL)
1440 		return nseg;
1441 
1442 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1443 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1444 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1445 			sge[pos].addrLow = addr_low;
1446 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1447 			if (addr_low < raw->sge[i].addrLow)
1448 				sge[pos].addrHigh++;
1449 			sge[pos].length = pages * PAGE_SIZE;
1450 			sge[pos].flags = 0;
1451 			pos++;
1452 		}
1453 	}
1454 	sge[pos] = raw->sge[nseg-1];
1455 	for (i = 1; i < nseg_new; ++i)
1456 		raw->sge[i] = sge[i];
1457 
1458 	free(sge, M_AACRAIDBUF);
1459 	raw->sgeCnt = nseg_new;
1460 	raw->flags |= RIO2_SGL_CONFORMANT;
1461 	raw->sgeNominalSize = pages * PAGE_SIZE;
1462 	return nseg_new;
1463 }
1464 
1465 
1466 /*
1467  * Unmap a command from controller-visible space.
1468  */
1469 static void
1470 aac_unmap_command(struct aac_command *cm)
1471 {
1472 	struct aac_softc *sc;
1473 
1474 	sc = cm->cm_sc;
1475 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1476 
1477 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1478 		return;
1479 
1480 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1481 		if (cm->cm_flags & AAC_CMD_DATAIN)
1482 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1483 					BUS_DMASYNC_POSTREAD);
1484 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1485 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1486 					BUS_DMASYNC_POSTWRITE);
1487 
1488 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1489 	}
1490 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1491 }
1492 
1493 /*
1494  * Hardware Interface
1495  */
1496 
1497 /*
1498  * Initialize the adapter.
1499  */
1500 static void
1501 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1502 {
1503 	struct aac_softc *sc;
1504 
1505 	sc = (struct aac_softc *)arg;
1506 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1507 
1508 	sc->aac_common_busaddr = segs[0].ds_addr;
1509 }
1510 
1511 static int
1512 aac_check_firmware(struct aac_softc *sc)
1513 {
1514 	u_int32_t code, major, minor, maxsize;
1515 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1516 	time_t then;
1517 
1518 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1519 
1520 	/* check if flash update is running */
1521 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1522 		then = time_uptime;
1523 		do {
1524 			code = AAC_GET_FWSTATUS(sc);
1525 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1526 				device_printf(sc->aac_dev,
1527 						  "FATAL: controller not coming ready, "
1528 						   "status %x\n", code);
1529 				return(ENXIO);
1530 			}
1531 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1532 		/*
1533 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1534 		 * do not read scratch pad register at this time
1535 		 */
1536 		waitCount = 10 * 10000;
1537 		while (waitCount) {
1538 			DELAY(100);		/* delay 100 microseconds */
1539 			waitCount--;
1540 		}
1541 	}
1542 
1543 	/*
1544 	 * Wait for the adapter to come ready.
1545 	 */
1546 	then = time_uptime;
1547 	do {
1548 		code = AAC_GET_FWSTATUS(sc);
1549 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1550 			device_printf(sc->aac_dev,
1551 				      "FATAL: controller not coming ready, "
1552 					   "status %x\n", code);
1553 			return(ENXIO);
1554 		}
1555 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1556 
1557 	/*
1558 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1559 	 * firmware version 1.x are not compatible with this driver.
1560 	 */
1561 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1562 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1563 				     NULL, NULL)) {
1564 			device_printf(sc->aac_dev,
1565 				      "Error reading firmware version\n");
1566 			return (EIO);
1567 		}
1568 
1569 		/* These numbers are stored as ASCII! */
1570 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1571 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1572 		if (major == 1) {
1573 			device_printf(sc->aac_dev,
1574 			    "Firmware version %d.%d is not supported.\n",
1575 			    major, minor);
1576 			return (EINVAL);
1577 		}
1578 	}
1579 	/*
1580 	 * Retrieve the capabilities/supported options word so we know what
1581 	 * work-arounds to enable.  Some firmware revs don't support this
1582 	 * command.
1583 	 */
1584 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1585 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1586 			device_printf(sc->aac_dev,
1587 			     "RequestAdapterInfo failed\n");
1588 			return (EIO);
1589 		}
1590 	} else {
1591 		options = AAC_GET_MAILBOX(sc, 1);
1592 		atu_size = AAC_GET_MAILBOX(sc, 2);
1593 		sc->supported_options = options;
1594 		sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1595 
1596 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1597 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1598 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1599 		if (options & AAC_SUPPORTED_NONDASD)
1600 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1601 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1602 			&& (sizeof(bus_addr_t) > 4)
1603 			&& (sc->hint_flags & 0x1)) {
1604 			device_printf(sc->aac_dev,
1605 			    "Enabling 64-bit address support\n");
1606 			sc->flags |= AAC_FLAGS_SG_64BIT;
1607 		}
1608 		if (sc->aac_if.aif_send_command) {
1609 			if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1610 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1611 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1612 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1613 			else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1614 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1615 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1616 		}
1617 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1618 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1619 	}
1620 
1621 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1622 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1623 		return (ENXIO);
1624 	}
1625 
1626 	if (sc->hint_flags & 2) {
1627 		device_printf(sc->aac_dev,
1628 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1629 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1630 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1631 		device_printf(sc->aac_dev,
1632 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1633 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1634 	}
1635 
1636 	/* Check for broken hardware that does a lower number of commands */
1637 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1638 
1639 	/* Remap mem. resource, if required */
1640 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1641 		bus_release_resource(
1642 			sc->aac_dev, SYS_RES_MEMORY,
1643 			sc->aac_regs_rid0, sc->aac_regs_res0);
1644 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1645 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1646 			atu_size, RF_ACTIVE);
1647 		if (sc->aac_regs_res0 == NULL) {
1648 			sc->aac_regs_res0 = bus_alloc_resource_any(
1649 				sc->aac_dev, SYS_RES_MEMORY,
1650 				&sc->aac_regs_rid0, RF_ACTIVE);
1651 			if (sc->aac_regs_res0 == NULL) {
1652 				device_printf(sc->aac_dev,
1653 					"couldn't allocate register window\n");
1654 				return (ENXIO);
1655 			}
1656 		}
1657 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1658 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1659 	}
1660 
1661 	/* Read preferred settings */
1662 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1663 	sc->aac_max_sectors = 128;				/* 64KB */
1664 	sc->aac_max_aif = 1;
1665 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1666 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1667 		 - sizeof(struct aac_blockwrite64))
1668 		 / sizeof(struct aac_sg_entry64);
1669 	else
1670 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1671 		 - sizeof(struct aac_blockwrite))
1672 		 / sizeof(struct aac_sg_entry);
1673 
1674 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1675 		options = AAC_GET_MAILBOX(sc, 1);
1676 		sc->aac_max_fib_size = (options & 0xFFFF);
1677 		sc->aac_max_sectors = (options >> 16) << 1;
1678 		options = AAC_GET_MAILBOX(sc, 2);
1679 		sc->aac_sg_tablesize = (options >> 16);
1680 		options = AAC_GET_MAILBOX(sc, 3);
1681 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1682 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1683 			sc->aac_max_fibs = (options & 0xFFFF);
1684 		options = AAC_GET_MAILBOX(sc, 4);
1685 		sc->aac_max_aif = (options & 0xFFFF);
1686 		options = AAC_GET_MAILBOX(sc, 5);
1687 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1688 	}
1689 
1690 	maxsize = sc->aac_max_fib_size + 31;
1691 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1692 		maxsize += sizeof(struct aac_fib_xporthdr);
1693 	if (maxsize > PAGE_SIZE) {
1694     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1695 		maxsize = PAGE_SIZE;
1696 	}
1697 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1698 
1699 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1700 		sc->flags |= AAC_FLAGS_RAW_IO;
1701 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1702 	}
1703 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1704 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1705 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1706 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1707 	}
1708 
1709 #ifdef AACRAID_DEBUG
1710 	aacraid_get_fw_debug_buffer(sc);
1711 #endif
1712 	return (0);
1713 }
1714 
1715 static int
1716 aac_init(struct aac_softc *sc)
1717 {
1718 	struct aac_adapter_init	*ip;
1719 	int i, error;
1720 
1721 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1722 
1723 	/* reset rrq index */
1724 	sc->aac_fibs_pushed_no = 0;
1725 	for (i = 0; i < sc->aac_max_msix; i++)
1726 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1727 
1728 	/*
1729 	 * Fill in the init structure.  This tells the adapter about the
1730 	 * physical location of various important shared data structures.
1731 	 */
1732 	ip = &sc->aac_common->ac_init;
1733 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1734 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1735 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1736 		sc->flags |= AAC_FLAGS_RAW_IO;
1737 	}
1738 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1739 
1740 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1741 					 offsetof(struct aac_common, ac_fibs);
1742 	ip->AdapterFibsVirtualAddress = 0;
1743 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1744 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1745 
1746 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1747 				  offsetof(struct aac_common, ac_printf);
1748 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1749 
1750 	/*
1751 	 * The adapter assumes that pages are 4K in size, except on some
1752  	 * broken firmware versions that do the page->byte conversion twice,
1753 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1754 	 * Round up since the granularity is so high.
1755 	 */
1756 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1757 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1758 		ip->HostPhysMemPages =
1759 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1760 	}
1761 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1762 
1763 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1764 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1765 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1766 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1767 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1768 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1769 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1770 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1771 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1772 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1773 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1774 	}
1775 	ip->MaxNumAif = sc->aac_max_aif;
1776 	ip->HostRRQ_AddrLow =
1777 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1778 	/* always 32-bit address */
1779 	ip->HostRRQ_AddrHigh = 0;
1780 
1781 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1782 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1783 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1784 		device_printf(sc->aac_dev, "Power Management enabled\n");
1785 	}
1786 
1787 	ip->MaxIoCommands = sc->aac_max_fibs;
1788 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1789 	ip->MaxFibSize = sc->aac_max_fib_size;
1790 
1791 	/*
1792 	 * Do controller-type-specific initialisation
1793 	 */
1794 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1795 
1796 	/*
1797 	 * Give the init structure to the controller.
1798 	 */
1799 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1800 			     sc->aac_common_busaddr +
1801 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1802 			     NULL, NULL)) {
1803 		device_printf(sc->aac_dev,
1804 			      "error establishing init structure\n");
1805 		error = EIO;
1806 		goto out;
1807 	}
1808 
1809 	/*
1810 	 * Check configuration issues
1811 	 */
1812 	if ((error = aac_check_config(sc)) != 0)
1813 		goto out;
1814 
1815 	error = 0;
1816 out:
1817 	return(error);
1818 }
1819 
1820 static void
1821 aac_define_int_mode(struct aac_softc *sc)
1822 {
1823 	device_t dev;
1824 	int cap, msi_count, error = 0;
1825 	uint32_t val;
1826 
1827 	dev = sc->aac_dev;
1828 
1829 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1830 		device_printf(dev, "using line interrupts\n");
1831 		sc->aac_max_msix = 1;
1832 		sc->aac_vector_cap = sc->aac_max_fibs;
1833 		return;
1834 	}
1835 
1836 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1837 	if (sc->aac_max_msix == 0) {
1838 		if (sc->aac_hwif == AAC_HWIF_SRC) {
1839 			msi_count = 1;
1840 			if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1841 				device_printf(dev, "alloc msi failed - err=%d; "
1842 				    "will use INTx\n", error);
1843 				pci_release_msi(dev);
1844 			} else {
1845 				sc->msi_tupelo = TRUE;
1846 			}
1847 		}
1848 		if (sc->msi_tupelo)
1849 			device_printf(dev, "using MSI interrupts\n");
1850 		else
1851 			device_printf(dev, "using line interrupts\n");
1852 
1853 		sc->aac_max_msix = 1;
1854 		sc->aac_vector_cap = sc->aac_max_fibs;
1855 		return;
1856 	}
1857 
1858 	/* OS capability */
1859 	msi_count = pci_msix_count(dev);
1860 	if (msi_count > AAC_MAX_MSIX)
1861 		msi_count = AAC_MAX_MSIX;
1862 	if (msi_count > sc->aac_max_msix)
1863 		msi_count = sc->aac_max_msix;
1864 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1865 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1866 				   "will try MSI\n", msi_count, error);
1867 		pci_release_msi(dev);
1868 	} else {
1869 		sc->msi_enabled = TRUE;
1870 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1871 			msi_count);
1872 	}
1873 
1874 	if (!sc->msi_enabled) {
1875 		msi_count = 1;
1876 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1877 			device_printf(dev, "alloc msi failed - err=%d; "
1878 				           "will use INTx\n", error);
1879 			pci_release_msi(dev);
1880 		} else {
1881 			sc->msi_enabled = TRUE;
1882 			device_printf(dev, "using MSI interrupts\n");
1883 		}
1884 	}
1885 
1886 	if (sc->msi_enabled) {
1887 		/* now read controller capability from PCI config. space */
1888 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1889 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1890 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1891 			pci_release_msi(dev);
1892 			sc->msi_enabled = FALSE;
1893 		}
1894 	}
1895 
1896 	if (!sc->msi_enabled) {
1897 		device_printf(dev, "using legacy interrupts\n");
1898 		sc->aac_max_msix = 1;
1899 	} else {
1900 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1901 		if (sc->aac_max_msix > msi_count)
1902 			sc->aac_max_msix = msi_count;
1903 	}
1904 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1905 
1906 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1907 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1908 }
1909 
1910 static int
1911 aac_find_pci_capability(struct aac_softc *sc, int cap)
1912 {
1913 	device_t dev;
1914 	uint32_t status;
1915 	uint8_t ptr;
1916 
1917 	dev = sc->aac_dev;
1918 
1919 	status = pci_read_config(dev, PCIR_STATUS, 2);
1920 	if (!(status & PCIM_STATUS_CAPPRESENT))
1921 		return (0);
1922 
1923 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1924 	switch (status & PCIM_HDRTYPE) {
1925 	case 0:
1926 	case 1:
1927 		ptr = PCIR_CAP_PTR;
1928 		break;
1929 	case 2:
1930 		ptr = PCIR_CAP_PTR_2;
1931 		break;
1932 	default:
1933 		return (0);
1934 		break;
1935 	}
1936 	ptr = pci_read_config(dev, ptr, 1);
1937 
1938 	while (ptr != 0) {
1939 		int next, val;
1940 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1941 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1942 		if (val == cap)
1943 			return (ptr);
1944 		ptr = next;
1945 	}
1946 
1947 	return (0);
1948 }
1949 
1950 static int
1951 aac_setup_intr(struct aac_softc *sc)
1952 {
1953 	int i, msi_count, rid;
1954 	struct resource *res;
1955 	void *tag;
1956 
1957 	msi_count = sc->aac_max_msix;
1958 	rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1959 
1960 	for (i = 0; i < msi_count; i++, rid++) {
1961 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1962 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1963 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1964 			return (EINVAL);
1965 		}
1966 		sc->aac_irq_rid[i] = rid;
1967 		sc->aac_irq[i] = res;
1968 		if (aac_bus_setup_intr(sc->aac_dev, res,
1969 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1970 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1971 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1972 			return (EINVAL);
1973 		}
1974 		sc->aac_msix[i].vector_no = i;
1975 		sc->aac_msix[i].sc = sc;
1976 		sc->aac_intr[i] = tag;
1977 	}
1978 
1979 	return (0);
1980 }
1981 
1982 static int
1983 aac_check_config(struct aac_softc *sc)
1984 {
1985 	struct aac_fib *fib;
1986 	struct aac_cnt_config *ccfg;
1987 	struct aac_cf_status_hdr *cf_shdr;
1988 	int rval;
1989 
1990 	mtx_lock(&sc->aac_io_lock);
1991 	aac_alloc_sync_fib(sc, &fib);
1992 
1993 	ccfg = (struct aac_cnt_config *)&fib->data[0];
1994 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1995 	ccfg->Command = VM_ContainerConfig;
1996 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
1997 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
1998 
1999 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2000 		sizeof (struct aac_cnt_config));
2001 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2002 	if (rval == 0 && ccfg->Command == ST_OK &&
2003 		ccfg->CTCommand.param[0] == CT_OK) {
2004 		if (cf_shdr->action <= CFACT_PAUSE) {
2005 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2006 			ccfg->Command = VM_ContainerConfig;
2007 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2008 
2009 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2010 				sizeof (struct aac_cnt_config));
2011 			if (rval == 0 && ccfg->Command == ST_OK &&
2012 				ccfg->CTCommand.param[0] == CT_OK) {
2013 				/* successful completion */
2014 				rval = 0;
2015 			} else {
2016 				/* auto commit aborted due to error(s) */
2017 				rval = -2;
2018 			}
2019 		} else {
2020 			/* auto commit aborted due to adapter indicating
2021 			   config. issues too dangerous to auto commit  */
2022 			rval = -3;
2023 		}
2024 	} else {
2025 		/* error */
2026 		rval = -1;
2027 	}
2028 
2029 	aac_release_sync_fib(sc);
2030 	mtx_unlock(&sc->aac_io_lock);
2031 	return(rval);
2032 }
2033 
2034 /*
2035  * Send a synchronous command to the controller and wait for a result.
2036  * Indicate if the controller completed the command with an error status.
2037  */
2038 int
2039 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2040 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2041 		 u_int32_t *sp, u_int32_t *r1)
2042 {
2043 	time_t then;
2044 	u_int32_t status;
2045 
2046 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2047 
2048 	/* populate the mailbox */
2049 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2050 
2051 	/* ensure the sync command doorbell flag is cleared */
2052 	if (!sc->msi_enabled)
2053 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2054 
2055 	/* then set it to signal the adapter */
2056 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2057 
2058 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2059 		/* spin waiting for the command to complete */
2060 		then = time_uptime;
2061 		do {
2062 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2063 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2064 				return(EIO);
2065 			}
2066 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2067 
2068 		/* clear the completion flag */
2069 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2070 
2071 		/* get the command status */
2072 		status = AAC_GET_MAILBOX(sc, 0);
2073 		if (sp != NULL)
2074 			*sp = status;
2075 
2076 		/* return parameter */
2077 		if (r1 != NULL)
2078 			*r1 = AAC_GET_MAILBOX(sc, 1);
2079 
2080 		if (status != AAC_SRB_STS_SUCCESS)
2081 			return (-1);
2082 	}
2083 	return(0);
2084 }
2085 
2086 static int
2087 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2088 		 struct aac_fib *fib, u_int16_t datasize)
2089 {
2090 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2091 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2092 
2093 	if (datasize > AAC_FIB_DATASIZE)
2094 		return(EINVAL);
2095 
2096 	/*
2097 	 * Set up the sync FIB
2098 	 */
2099 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2100 				AAC_FIBSTATE_INITIALISED |
2101 				AAC_FIBSTATE_EMPTY;
2102 	fib->Header.XferState |= xferstate;
2103 	fib->Header.Command = command;
2104 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2105 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2106 	fib->Header.SenderSize = sizeof(struct aac_fib);
2107 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2108 	fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2109 		offsetof(struct aac_common, ac_sync_fib);
2110 
2111 	/*
2112 	 * Give the FIB to the controller, wait for a response.
2113 	 */
2114 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2115 		fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2116 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2117 		return(EIO);
2118 	}
2119 
2120 	return (0);
2121 }
2122 
2123 /*
2124  * Check for commands that have been outstanding for a suspiciously long time,
2125  * and complain about them.
2126  */
2127 static void
2128 aac_timeout(struct aac_softc *sc)
2129 {
2130 	struct aac_command *cm;
2131 	time_t deadline;
2132 	int timedout;
2133 
2134 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2135 	/*
2136 	 * Traverse the busy command list, bitch about late commands once
2137 	 * only.
2138 	 */
2139 	timedout = 0;
2140 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2141 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2142 		if (cm->cm_timestamp < deadline) {
2143 			device_printf(sc->aac_dev,
2144 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2145 				      cm, (int)(time_uptime-cm->cm_timestamp));
2146 			AAC_PRINT_FIB(sc, cm->cm_fib);
2147 			timedout++;
2148 		}
2149 	}
2150 
2151 	if (timedout)
2152 		aac_reset_adapter(sc);
2153 	aacraid_print_queues(sc);
2154 }
2155 
2156 /*
2157  * Interface Function Vectors
2158  */
2159 
2160 /*
2161  * Read the current firmware status word.
2162  */
2163 static int
2164 aac_src_get_fwstatus(struct aac_softc *sc)
2165 {
2166 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2167 
2168 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2169 }
2170 
2171 /*
2172  * Notify the controller of a change in a given queue
2173  */
2174 static void
2175 aac_src_qnotify(struct aac_softc *sc, int qbit)
2176 {
2177 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2178 
2179 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2180 }
2181 
2182 /*
2183  * Get the interrupt reason bits
2184  */
2185 static int
2186 aac_src_get_istatus(struct aac_softc *sc)
2187 {
2188 	int val;
2189 
2190 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2191 
2192 	if (sc->msi_enabled) {
2193 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2194 		if (val & AAC_MSI_SYNC_STATUS)
2195 			val = AAC_DB_SYNC_COMMAND;
2196 		else
2197 			val = 0;
2198 	} else {
2199 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2200 	}
2201 	return(val);
2202 }
2203 
2204 /*
2205  * Clear some interrupt reason bits
2206  */
2207 static void
2208 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2209 {
2210 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2211 
2212 	if (sc->msi_enabled) {
2213 		if (mask == AAC_DB_SYNC_COMMAND)
2214 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2215 	} else {
2216 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2217 	}
2218 }
2219 
2220 /*
2221  * Populate the mailbox and set the command word
2222  */
2223 static void
2224 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2225 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2226 {
2227 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2228 
2229 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2230 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2231 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2232 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2233 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2234 }
2235 
2236 static void
2237 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2238 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2239 {
2240 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2241 
2242 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2243 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2244 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2245 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2246 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2247 }
2248 
2249 /*
2250  * Fetch the immediate command status word
2251  */
2252 static int
2253 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2254 {
2255 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2256 
2257 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2258 }
2259 
2260 static int
2261 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2262 {
2263 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2264 
2265 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2266 }
2267 
2268 /*
2269  * Set/clear interrupt masks
2270  */
2271 static void
2272 aac_src_access_devreg(struct aac_softc *sc, int mode)
2273 {
2274 	u_int32_t val;
2275 
2276 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2277 
2278 	switch (mode) {
2279 	case AAC_ENABLE_INTERRUPT:
2280 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2281 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2282 				           AAC_INT_ENABLE_TYPE1_INTX));
2283 		break;
2284 
2285 	case AAC_DISABLE_INTERRUPT:
2286 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2287 		break;
2288 
2289 	case AAC_ENABLE_MSIX:
2290 		/* set bit 6 */
2291 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2292 		val |= 0x40;
2293 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2294 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2295 		/* unmask int. */
2296 		val = PMC_ALL_INTERRUPT_BITS;
2297 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2298 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2299 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2300 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2301 		break;
2302 
2303 	case AAC_DISABLE_MSIX:
2304 		/* reset bit 6 */
2305 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2306 		val &= ~0x40;
2307 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2308 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2309 		break;
2310 
2311 	case AAC_CLEAR_AIF_BIT:
2312 		/* set bit 5 */
2313 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2314 		val |= 0x20;
2315 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2316 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2317 		break;
2318 
2319 	case AAC_CLEAR_SYNC_BIT:
2320 		/* set bit 4 */
2321 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2322 		val |= 0x10;
2323 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2324 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2325 		break;
2326 
2327 	case AAC_ENABLE_INTX:
2328 		/* set bit 7 */
2329 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2330 		val |= 0x80;
2331 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2332 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2333 		/* unmask int. */
2334 		val = PMC_ALL_INTERRUPT_BITS;
2335 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2336 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2337 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2338 			val & (~(PMC_GLOBAL_INT_BIT2)));
2339 		break;
2340 
2341 	default:
2342 		break;
2343 	}
2344 }
2345 
2346 /*
2347  * New comm. interface: Send command functions
2348  */
2349 static int
2350 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2351 {
2352 	struct aac_fib_xporthdr *pFibX;
2353 	u_int32_t fibsize, high_addr;
2354 	u_int64_t address;
2355 
2356 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2357 
2358 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2359 		sc->aac_max_msix > 1) {
2360 		u_int16_t vector_no, first_choice = 0xffff;
2361 
2362 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2363 		do {
2364 			vector_no += 1;
2365 			if (vector_no == sc->aac_max_msix)
2366 				vector_no = 1;
2367 			if (sc->aac_rrq_outstanding[vector_no] <
2368 				sc->aac_vector_cap)
2369 				break;
2370 			if (0xffff == first_choice)
2371 				first_choice = vector_no;
2372 			else if (vector_no == first_choice)
2373 				break;
2374 		} while (1);
2375 		if (vector_no == first_choice)
2376 			vector_no = 0;
2377 		sc->aac_rrq_outstanding[vector_no]++;
2378 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2379 			sc->aac_fibs_pushed_no = 0;
2380 		else
2381 			sc->aac_fibs_pushed_no++;
2382 
2383 		cm->cm_fib->Header.Handle += (vector_no << 16);
2384 	}
2385 
2386 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2387 		/* Calculate the amount to the fibsize bits */
2388 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2389 		/* Fill new FIB header */
2390 		address = cm->cm_fibphys;
2391 		high_addr = (u_int32_t)(address >> 32);
2392 		if (high_addr == 0L) {
2393 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2394 			cm->cm_fib->Header.u.TimeStamp = 0L;
2395 		} else {
2396 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2397 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2398 		}
2399 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2400 	} else {
2401 		/* Calculate the amount to the fibsize bits */
2402 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2403 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2404 		/* Fill XPORT header */
2405 		pFibX = (struct aac_fib_xporthdr *)
2406 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2407 		pFibX->Handle = cm->cm_fib->Header.Handle;
2408 		pFibX->HostAddress = cm->cm_fibphys;
2409 		pFibX->Size = cm->cm_fib->Header.Size;
2410 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2411 		high_addr = (u_int32_t)(address >> 32);
2412 	}
2413 
2414 	if (fibsize > 31)
2415 		fibsize = 31;
2416 	aac_enqueue_busy(cm);
2417 	if (high_addr) {
2418 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2419 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2420 	} else {
2421 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2422 	}
2423 	return 0;
2424 }
2425 
2426 /*
2427  * New comm. interface: get, set outbound queue index
2428  */
2429 static int
2430 aac_src_get_outb_queue(struct aac_softc *sc)
2431 {
2432 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2433 
2434 	return(-1);
2435 }
2436 
2437 static void
2438 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2439 {
2440 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2441 }
2442 
2443 /*
2444  * Debugging and Diagnostics
2445  */
2446 
2447 /*
2448  * Print some information about the controller.
2449  */
2450 static void
2451 aac_describe_controller(struct aac_softc *sc)
2452 {
2453 	struct aac_fib *fib;
2454 	struct aac_adapter_info	*info;
2455 	char *adapter_type = "Adaptec RAID controller";
2456 
2457 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2458 
2459 	mtx_lock(&sc->aac_io_lock);
2460 	aac_alloc_sync_fib(sc, &fib);
2461 
2462 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2463 		fib->data[0] = 0;
2464 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2465 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2466 		else {
2467 			struct aac_supplement_adapter_info *supp_info;
2468 
2469 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2470 			adapter_type = (char *)supp_info->AdapterTypeText;
2471 			sc->aac_feature_bits = supp_info->FeatureBits;
2472 			sc->aac_support_opt2 = supp_info->SupportedOptions2;
2473 		}
2474 	}
2475 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2476 		adapter_type,
2477 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2478 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2479 
2480 	fib->data[0] = 0;
2481 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2482 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2483 		aac_release_sync_fib(sc);
2484 		mtx_unlock(&sc->aac_io_lock);
2485 		return;
2486 	}
2487 
2488 	/* save the kernel revision structure for later use */
2489 	info = (struct aac_adapter_info *)&fib->data[0];
2490 	sc->aac_revision = info->KernelRevision;
2491 
2492 	if (bootverbose) {
2493 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2494 		    "(%dMB cache, %dMB execution), %s\n",
2495 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2496 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2497 		    info->BufferMem / (1024 * 1024),
2498 		    info->ExecutionMem / (1024 * 1024),
2499 		    aac_describe_code(aac_battery_platform,
2500 		    info->batteryPlatform));
2501 
2502 		device_printf(sc->aac_dev,
2503 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2504 		    info->KernelRevision.external.comp.major,
2505 		    info->KernelRevision.external.comp.minor,
2506 		    info->KernelRevision.external.comp.dash,
2507 		    info->KernelRevision.buildNumber,
2508 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2509 
2510 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2511 			      sc->supported_options,
2512 			      "\20"
2513 			      "\1SNAPSHOT"
2514 			      "\2CLUSTERS"
2515 			      "\3WCACHE"
2516 			      "\4DATA64"
2517 			      "\5HOSTTIME"
2518 			      "\6RAID50"
2519 			      "\7WINDOW4GB"
2520 			      "\10SCSIUPGD"
2521 			      "\11SOFTERR"
2522 			      "\12NORECOND"
2523 			      "\13SGMAP64"
2524 			      "\14ALARM"
2525 			      "\15NONDASD"
2526 			      "\16SCSIMGT"
2527 			      "\17RAIDSCSI"
2528 			      "\21ADPTINFO"
2529 			      "\22NEWCOMM"
2530 			      "\23ARRAY64BIT"
2531 			      "\24HEATSENSOR");
2532 	}
2533 
2534 	aac_release_sync_fib(sc);
2535 	mtx_unlock(&sc->aac_io_lock);
2536 }
2537 
2538 /*
2539  * Look up a text description of a numeric error code and return a pointer to
2540  * same.
2541  */
2542 static char *
2543 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2544 {
2545 	int i;
2546 
2547 	for (i = 0; table[i].string != NULL; i++)
2548 		if (table[i].code == code)
2549 			return(table[i].string);
2550 	return(table[i + 1].string);
2551 }
2552 
2553 /*
2554  * Management Interface
2555  */
2556 
2557 static int
2558 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2559 {
2560 	struct aac_softc *sc;
2561 
2562 	sc = dev->si_drv1;
2563 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2564 	device_busy(sc->aac_dev);
2565 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2566 	return 0;
2567 }
2568 
2569 static int
2570 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2571 {
2572 	union aac_statrequest *as;
2573 	struct aac_softc *sc;
2574 	int error = 0;
2575 
2576 	as = (union aac_statrequest *)arg;
2577 	sc = dev->si_drv1;
2578 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2579 
2580 	switch (cmd) {
2581 	case AACIO_STATS:
2582 		switch (as->as_item) {
2583 		case AACQ_FREE:
2584 		case AACQ_READY:
2585 		case AACQ_BUSY:
2586 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2587 			      sizeof(struct aac_qstat));
2588 			break;
2589 		default:
2590 			error = ENOENT;
2591 			break;
2592 		}
2593 	break;
2594 
2595 	case FSACTL_SENDFIB:
2596 	case FSACTL_SEND_LARGE_FIB:
2597 		arg = *(caddr_t*)arg;
2598 	case FSACTL_LNX_SENDFIB:
2599 	case FSACTL_LNX_SEND_LARGE_FIB:
2600 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2601 		error = aac_ioctl_sendfib(sc, arg);
2602 		break;
2603 	case FSACTL_SEND_RAW_SRB:
2604 		arg = *(caddr_t*)arg;
2605 	case FSACTL_LNX_SEND_RAW_SRB:
2606 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2607 		error = aac_ioctl_send_raw_srb(sc, arg);
2608 		break;
2609 	case FSACTL_AIF_THREAD:
2610 	case FSACTL_LNX_AIF_THREAD:
2611 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2612 		error = EINVAL;
2613 		break;
2614 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2615 		arg = *(caddr_t*)arg;
2616 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2617 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2618 		error = aac_open_aif(sc, arg);
2619 		break;
2620 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2621 		arg = *(caddr_t*)arg;
2622 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2623 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2624 		error = aac_getnext_aif(sc, arg);
2625 		break;
2626 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2627 		arg = *(caddr_t*)arg;
2628 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2629 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2630 		error = aac_close_aif(sc, arg);
2631 		break;
2632 	case FSACTL_MINIPORT_REV_CHECK:
2633 		arg = *(caddr_t*)arg;
2634 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2635 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2636 		error = aac_rev_check(sc, arg);
2637 		break;
2638 	case FSACTL_QUERY_DISK:
2639 		arg = *(caddr_t*)arg;
2640 	case FSACTL_LNX_QUERY_DISK:
2641 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2642 		error = aac_query_disk(sc, arg);
2643 		break;
2644 	case FSACTL_DELETE_DISK:
2645 	case FSACTL_LNX_DELETE_DISK:
2646 		/*
2647 		 * We don't trust the underland to tell us when to delete a
2648 		 * container, rather we rely on an AIF coming from the
2649 		 * controller
2650 		 */
2651 		error = 0;
2652 		break;
2653 	case FSACTL_GET_PCI_INFO:
2654 		arg = *(caddr_t*)arg;
2655 	case FSACTL_LNX_GET_PCI_INFO:
2656 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2657 		error = aac_get_pci_info(sc, arg);
2658 		break;
2659 	case FSACTL_GET_FEATURES:
2660 		arg = *(caddr_t*)arg;
2661 	case FSACTL_LNX_GET_FEATURES:
2662 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2663 		error = aac_supported_features(sc, arg);
2664 		break;
2665 	default:
2666 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2667 		error = EINVAL;
2668 		break;
2669 	}
2670 	return(error);
2671 }
2672 
2673 static int
2674 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2675 {
2676 	struct aac_softc *sc;
2677 	struct aac_fib_context *ctx;
2678 	int revents;
2679 
2680 	sc = dev->si_drv1;
2681 	revents = 0;
2682 
2683 	mtx_lock(&sc->aac_io_lock);
2684 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2685 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2686 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2687 				revents |= poll_events & (POLLIN | POLLRDNORM);
2688 				break;
2689 			}
2690 		}
2691 	}
2692 	mtx_unlock(&sc->aac_io_lock);
2693 
2694 	if (revents == 0) {
2695 		if (poll_events & (POLLIN | POLLRDNORM))
2696 			selrecord(td, &sc->rcv_select);
2697 	}
2698 
2699 	return (revents);
2700 }
2701 
2702 static void
2703 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2704 {
2705 
2706 	switch (event->ev_type) {
2707 	case AAC_EVENT_CMFREE:
2708 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2709 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2710 			aacraid_add_event(sc, event);
2711 			return;
2712 		}
2713 		free(event, M_AACRAIDBUF);
2714 		wakeup(arg);
2715 		break;
2716 	default:
2717 		break;
2718 	}
2719 }
2720 
2721 /*
2722  * Send a FIB supplied from userspace
2723  */
2724 static int
2725 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2726 {
2727 	struct aac_command *cm;
2728 	int size, error;
2729 
2730 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2731 
2732 	cm = NULL;
2733 
2734 	/*
2735 	 * Get a command
2736 	 */
2737 	mtx_lock(&sc->aac_io_lock);
2738 	if (aacraid_alloc_command(sc, &cm)) {
2739 		struct aac_event *event;
2740 
2741 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2742 		    M_NOWAIT | M_ZERO);
2743 		if (event == NULL) {
2744 			error = EBUSY;
2745 			mtx_unlock(&sc->aac_io_lock);
2746 			goto out;
2747 		}
2748 		event->ev_type = AAC_EVENT_CMFREE;
2749 		event->ev_callback = aac_ioctl_event;
2750 		event->ev_arg = &cm;
2751 		aacraid_add_event(sc, event);
2752 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2753 	}
2754 	mtx_unlock(&sc->aac_io_lock);
2755 
2756 	/*
2757 	 * Fetch the FIB header, then re-copy to get data as well.
2758 	 */
2759 	if ((error = copyin(ufib, cm->cm_fib,
2760 			    sizeof(struct aac_fib_header))) != 0)
2761 		goto out;
2762 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2763 	if (size > sc->aac_max_fib_size) {
2764 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2765 			      size, sc->aac_max_fib_size);
2766 		size = sc->aac_max_fib_size;
2767 	}
2768 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2769 		goto out;
2770 	cm->cm_fib->Header.Size = size;
2771 	cm->cm_timestamp = time_uptime;
2772 	cm->cm_datalen = 0;
2773 
2774 	/*
2775 	 * Pass the FIB to the controller, wait for it to complete.
2776 	 */
2777 	mtx_lock(&sc->aac_io_lock);
2778 	error = aacraid_wait_command(cm);
2779 	mtx_unlock(&sc->aac_io_lock);
2780 	if (error != 0) {
2781 		device_printf(sc->aac_dev,
2782 			      "aacraid_wait_command return %d\n", error);
2783 		goto out;
2784 	}
2785 
2786 	/*
2787 	 * Copy the FIB and data back out to the caller.
2788 	 */
2789 	size = cm->cm_fib->Header.Size;
2790 	if (size > sc->aac_max_fib_size) {
2791 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2792 			      size, sc->aac_max_fib_size);
2793 		size = sc->aac_max_fib_size;
2794 	}
2795 	error = copyout(cm->cm_fib, ufib, size);
2796 
2797 out:
2798 	if (cm != NULL) {
2799 		mtx_lock(&sc->aac_io_lock);
2800 		aacraid_release_command(cm);
2801 		mtx_unlock(&sc->aac_io_lock);
2802 	}
2803 	return(error);
2804 }
2805 
2806 /*
2807  * Send a passthrough FIB supplied from userspace
2808  */
2809 static int
2810 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2811 {
2812 	struct aac_command *cm;
2813 	struct aac_fib *fib;
2814 	struct aac_srb *srbcmd;
2815 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2816 	void *user_reply;
2817 	int error, transfer_data = 0;
2818 	bus_dmamap_t orig_map = 0;
2819 	u_int32_t fibsize = 0;
2820 	u_int64_t srb_sg_address;
2821 	u_int32_t srb_sg_bytecount;
2822 
2823 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2824 
2825 	cm = NULL;
2826 
2827 	mtx_lock(&sc->aac_io_lock);
2828 	if (aacraid_alloc_command(sc, &cm)) {
2829 		struct aac_event *event;
2830 
2831 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2832 		    M_NOWAIT | M_ZERO);
2833 		if (event == NULL) {
2834 			error = EBUSY;
2835 			mtx_unlock(&sc->aac_io_lock);
2836 			goto out;
2837 		}
2838 		event->ev_type = AAC_EVENT_CMFREE;
2839 		event->ev_callback = aac_ioctl_event;
2840 		event->ev_arg = &cm;
2841 		aacraid_add_event(sc, event);
2842 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2843 	}
2844 	mtx_unlock(&sc->aac_io_lock);
2845 
2846 	cm->cm_data = NULL;
2847 	/* save original dma map */
2848 	orig_map = cm->cm_datamap;
2849 
2850 	fib = cm->cm_fib;
2851 	srbcmd = (struct aac_srb *)fib->data;
2852 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2853 	    sizeof (u_int32_t))) != 0)
2854 		goto out;
2855 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2856 		error = EINVAL;
2857 		goto out;
2858 	}
2859 	if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2860 		goto out;
2861 
2862 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2863 	srbcmd->retry_limit = 0;	/* obsolete */
2864 
2865 	/* only one sg element from userspace supported */
2866 	if (srbcmd->sg_map.SgCount > 1) {
2867 		error = EINVAL;
2868 		goto out;
2869 	}
2870 	/* check fibsize */
2871 	if (fibsize == (sizeof(struct aac_srb) +
2872 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2873 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2874 		struct aac_sg_entry sg;
2875 
2876 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2877 			goto out;
2878 
2879 		srb_sg_bytecount = sg.SgByteCount;
2880 		srb_sg_address = (u_int64_t)sg.SgAddress;
2881 	} else if (fibsize == (sizeof(struct aac_srb) +
2882 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2883 #ifdef __LP64__
2884 		struct aac_sg_entry64 *sgp =
2885 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2886 		struct aac_sg_entry64 sg;
2887 
2888 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2889 			goto out;
2890 
2891 		srb_sg_bytecount = sg.SgByteCount;
2892 		srb_sg_address = sg.SgAddress;
2893 #else
2894 		error = EINVAL;
2895 		goto out;
2896 #endif
2897 	} else {
2898 		error = EINVAL;
2899 		goto out;
2900 	}
2901 	user_reply = (char *)arg + fibsize;
2902 	srbcmd->data_len = srb_sg_bytecount;
2903 	if (srbcmd->sg_map.SgCount == 1)
2904 		transfer_data = 1;
2905 
2906 	if (transfer_data) {
2907 		/*
2908 		 * Create DMA tag for the passthr. data buffer and allocate it.
2909 		 */
2910 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2911 			1, 0,			/* algnmnt, boundary */
2912 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2913 			BUS_SPACE_MAXADDR_32BIT :
2914 			0x7fffffff,		/* lowaddr */
2915 			BUS_SPACE_MAXADDR, 	/* highaddr */
2916 			NULL, NULL, 		/* filter, filterarg */
2917 			srb_sg_bytecount, 	/* size */
2918 			sc->aac_sg_tablesize,	/* nsegments */
2919 			srb_sg_bytecount, 	/* maxsegsize */
2920 			0,			/* flags */
2921 			NULL, NULL,		/* No locking needed */
2922 			&cm->cm_passthr_dmat)) {
2923 			error = ENOMEM;
2924 			goto out;
2925 		}
2926 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2927 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2928 			error = ENOMEM;
2929 			goto out;
2930 		}
2931 		/* fill some cm variables */
2932 		cm->cm_datalen = srb_sg_bytecount;
2933 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2934 			cm->cm_flags |= AAC_CMD_DATAIN;
2935 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2936 			cm->cm_flags |= AAC_CMD_DATAOUT;
2937 
2938 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2939 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2940 				cm->cm_data, cm->cm_datalen)) != 0)
2941 				goto out;
2942 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2943 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2944 				BUS_DMASYNC_PREWRITE);
2945 		}
2946 	}
2947 
2948 	/* build the FIB */
2949 	fib->Header.Size = sizeof(struct aac_fib_header) +
2950 		sizeof(struct aac_srb);
2951 	fib->Header.XferState =
2952 		AAC_FIBSTATE_HOSTOWNED   |
2953 		AAC_FIBSTATE_INITIALISED |
2954 		AAC_FIBSTATE_EMPTY	 |
2955 		AAC_FIBSTATE_FROMHOST	 |
2956 		AAC_FIBSTATE_REXPECTED   |
2957 		AAC_FIBSTATE_NORM	 |
2958 		AAC_FIBSTATE_ASYNC;
2959 
2960 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2961 		ScsiPortCommandU64 : ScsiPortCommand;
2962 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2963 
2964 	/* send command */
2965 	if (transfer_data) {
2966 		bus_dmamap_load(cm->cm_passthr_dmat,
2967 			cm->cm_datamap, cm->cm_data,
2968 			cm->cm_datalen,
2969 			aacraid_map_command_sg, cm, 0);
2970 	} else {
2971 		aacraid_map_command_sg(cm, NULL, 0, 0);
2972 	}
2973 
2974 	/* wait for completion */
2975 	mtx_lock(&sc->aac_io_lock);
2976 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2977 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2978 	mtx_unlock(&sc->aac_io_lock);
2979 
2980 	/* copy data */
2981 	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2982 		if ((error = copyout(cm->cm_data,
2983 			(void *)(uintptr_t)srb_sg_address,
2984 			cm->cm_datalen)) != 0)
2985 			goto out;
2986 		/* sync required for bus_dmamem_alloc() allocated mem.? */
2987 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2988 				BUS_DMASYNC_POSTREAD);
2989 	}
2990 
2991 	/* status */
2992 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
2993 
2994 out:
2995 	if (cm && cm->cm_data) {
2996 		if (transfer_data)
2997 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
2998 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
2999 		cm->cm_datamap = orig_map;
3000 	}
3001 	if (cm && cm->cm_passthr_dmat)
3002 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3003 	if (cm) {
3004 		mtx_lock(&sc->aac_io_lock);
3005 		aacraid_release_command(cm);
3006 		mtx_unlock(&sc->aac_io_lock);
3007 	}
3008 	return(error);
3009 }
3010 
3011 /*
3012  * Request an AIF from the controller (new comm. type1)
3013  */
3014 static void
3015 aac_request_aif(struct aac_softc *sc)
3016 {
3017 	struct aac_command *cm;
3018 	struct aac_fib *fib;
3019 
3020 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3021 
3022 	if (aacraid_alloc_command(sc, &cm)) {
3023 		sc->aif_pending = 1;
3024 		return;
3025 	}
3026 	sc->aif_pending = 0;
3027 
3028 	/* build the FIB */
3029 	fib = cm->cm_fib;
3030 	fib->Header.Size = sizeof(struct aac_fib);
3031 	fib->Header.XferState =
3032         AAC_FIBSTATE_HOSTOWNED   |
3033         AAC_FIBSTATE_INITIALISED |
3034         AAC_FIBSTATE_EMPTY	 |
3035         AAC_FIBSTATE_FROMHOST	 |
3036         AAC_FIBSTATE_REXPECTED   |
3037         AAC_FIBSTATE_NORM	 |
3038         AAC_FIBSTATE_ASYNC;
3039 	/* set AIF marker */
3040 	fib->Header.Handle = 0x00800000;
3041 	fib->Header.Command = AifRequest;
3042 	((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3043 
3044 	aacraid_map_command_sg(cm, NULL, 0, 0);
3045 }
3046 
3047 
3048 /*
3049  * cdevpriv interface private destructor.
3050  */
3051 static void
3052 aac_cdevpriv_dtor(void *arg)
3053 {
3054 	struct aac_softc *sc;
3055 
3056 	sc = arg;
3057 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3058 	device_unbusy(sc->aac_dev);
3059 }
3060 
3061 /*
3062  * Handle an AIF sent to us by the controller; queue it for later reference.
3063  * If the queue fills up, then drop the older entries.
3064  */
3065 static void
3066 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3067 {
3068 	struct aac_aif_command *aif;
3069 	struct aac_container *co, *co_next;
3070 	struct aac_fib_context *ctx;
3071 	struct aac_fib *sync_fib;
3072 	struct aac_mntinforesp mir;
3073 	int next, current, found;
3074 	int count = 0, changed = 0, i = 0;
3075 	u_int32_t channel, uid;
3076 
3077 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3078 
3079 	aif = (struct aac_aif_command*)&fib->data[0];
3080 	aacraid_print_aif(sc, aif);
3081 
3082 	/* Is it an event that we should care about? */
3083 	switch (aif->command) {
3084 	case AifCmdEventNotify:
3085 		switch (aif->data.EN.type) {
3086 		case AifEnAddContainer:
3087 		case AifEnDeleteContainer:
3088 			/*
3089 			 * A container was added or deleted, but the message
3090 			 * doesn't tell us anything else!  Re-enumerate the
3091 			 * containers and sort things out.
3092 			 */
3093 			aac_alloc_sync_fib(sc, &sync_fib);
3094 			do {
3095 				/*
3096 				 * Ask the controller for its containers one at
3097 				 * a time.
3098 				 * XXX What if the controller's list changes
3099 				 * midway through this enumaration?
3100 				 * XXX This should be done async.
3101 				 */
3102 				if (aac_get_container_info(sc, sync_fib, i,
3103 					&mir, &uid) != 0)
3104 					continue;
3105 				if (i == 0)
3106 					count = mir.MntRespCount;
3107 				/*
3108 				 * Check the container against our list.
3109 				 * co->co_found was already set to 0 in a
3110 				 * previous run.
3111 				 */
3112 				if ((mir.Status == ST_OK) &&
3113 				    (mir.MntTable[0].VolType != CT_NONE)) {
3114 					found = 0;
3115 					TAILQ_FOREACH(co,
3116 						      &sc->aac_container_tqh,
3117 						      co_link) {
3118 						if (co->co_mntobj.ObjectId ==
3119 						    mir.MntTable[0].ObjectId) {
3120 							co->co_found = 1;
3121 							found = 1;
3122 							break;
3123 						}
3124 					}
3125 					/*
3126 					 * If the container matched, continue
3127 					 * in the list.
3128 					 */
3129 					if (found) {
3130 						i++;
3131 						continue;
3132 					}
3133 
3134 					/*
3135 					 * This is a new container.  Do all the
3136 					 * appropriate things to set it up.
3137 					 */
3138 					aac_add_container(sc, &mir, 1, uid);
3139 					changed = 1;
3140 				}
3141 				i++;
3142 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3143 			aac_release_sync_fib(sc);
3144 
3145 			/*
3146 			 * Go through our list of containers and see which ones
3147 			 * were not marked 'found'.  Since the controller didn't
3148 			 * list them they must have been deleted.  Do the
3149 			 * appropriate steps to destroy the device.  Also reset
3150 			 * the co->co_found field.
3151 			 */
3152 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3153 			while (co != NULL) {
3154 				if (co->co_found == 0) {
3155 					co_next = TAILQ_NEXT(co, co_link);
3156 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3157 						     co_link);
3158 					free(co, M_AACRAIDBUF);
3159 					changed = 1;
3160 					co = co_next;
3161 				} else {
3162 					co->co_found = 0;
3163 					co = TAILQ_NEXT(co, co_link);
3164 				}
3165 			}
3166 
3167 			/* Attach the newly created containers */
3168 			if (changed) {
3169 				if (sc->cam_rescan_cb != NULL)
3170 					sc->cam_rescan_cb(sc, 0,
3171 				    	AAC_CAM_TARGET_WILDCARD);
3172 			}
3173 
3174 			break;
3175 
3176 		case AifEnEnclosureManagement:
3177 			switch (aif->data.EN.data.EEE.eventType) {
3178 			case AIF_EM_DRIVE_INSERTION:
3179 			case AIF_EM_DRIVE_REMOVAL:
3180 				channel = aif->data.EN.data.EEE.unitID;
3181 				if (sc->cam_rescan_cb != NULL)
3182 					sc->cam_rescan_cb(sc,
3183 					    ((channel>>24) & 0xF) + 1,
3184 					    (channel & 0xFFFF));
3185 				break;
3186 			}
3187 			break;
3188 
3189 		case AifEnAddJBOD:
3190 		case AifEnDeleteJBOD:
3191 		case AifRawDeviceRemove:
3192 			channel = aif->data.EN.data.ECE.container;
3193 			if (sc->cam_rescan_cb != NULL)
3194 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3195 				    AAC_CAM_TARGET_WILDCARD);
3196 			break;
3197 
3198 		default:
3199 			break;
3200 		}
3201 
3202 	default:
3203 		break;
3204 	}
3205 
3206 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3207 	current = sc->aifq_idx;
3208 	next = (current + 1) % AAC_AIFQ_LENGTH;
3209 	if (next == 0)
3210 		sc->aifq_filled = 1;
3211 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3212 	/* modify AIF contexts */
3213 	if (sc->aifq_filled) {
3214 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3215 			if (next == ctx->ctx_idx)
3216 				ctx->ctx_wrap = 1;
3217 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3218 				ctx->ctx_idx = next;
3219 		}
3220 	}
3221 	sc->aifq_idx = next;
3222 	/* On the off chance that someone is sleeping for an aif... */
3223 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3224 		wakeup(sc->aac_aifq);
3225 	/* Wakeup any poll()ers */
3226 	selwakeuppri(&sc->rcv_select, PRIBIO);
3227 
3228 	return;
3229 }
3230 
3231 /*
3232  * Return the Revision of the driver to userspace and check to see if the
3233  * userspace app is possibly compatible.  This is extremely bogus since
3234  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3235  * returning what the card reported.
3236  */
3237 static int
3238 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3239 {
3240 	struct aac_rev_check rev_check;
3241 	struct aac_rev_check_resp rev_check_resp;
3242 	int error = 0;
3243 
3244 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3245 
3246 	/*
3247 	 * Copyin the revision struct from userspace
3248 	 */
3249 	if ((error = copyin(udata, (caddr_t)&rev_check,
3250 			sizeof(struct aac_rev_check))) != 0) {
3251 		return error;
3252 	}
3253 
3254 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3255 	      rev_check.callingRevision.buildNumber);
3256 
3257 	/*
3258 	 * Doctor up the response struct.
3259 	 */
3260 	rev_check_resp.possiblyCompatible = 1;
3261 	rev_check_resp.adapterSWRevision.external.comp.major =
3262 	    AAC_DRIVER_MAJOR_VERSION;
3263 	rev_check_resp.adapterSWRevision.external.comp.minor =
3264 	    AAC_DRIVER_MINOR_VERSION;
3265 	rev_check_resp.adapterSWRevision.external.comp.type =
3266 	    AAC_DRIVER_TYPE;
3267 	rev_check_resp.adapterSWRevision.external.comp.dash =
3268 	    AAC_DRIVER_BUGFIX_LEVEL;
3269 	rev_check_resp.adapterSWRevision.buildNumber =
3270 	    AAC_DRIVER_BUILD;
3271 
3272 	return(copyout((caddr_t)&rev_check_resp, udata,
3273 			sizeof(struct aac_rev_check_resp)));
3274 }
3275 
3276 /*
3277  * Pass the fib context to the caller
3278  */
3279 static int
3280 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3281 {
3282 	struct aac_fib_context *fibctx, *ctx;
3283 	int error = 0;
3284 
3285 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3286 
3287 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3288 	if (fibctx == NULL)
3289 		return (ENOMEM);
3290 
3291 	mtx_lock(&sc->aac_io_lock);
3292 	/* all elements are already 0, add to queue */
3293 	if (sc->fibctx == NULL)
3294 		sc->fibctx = fibctx;
3295 	else {
3296 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3297 			;
3298 		ctx->next = fibctx;
3299 		fibctx->prev = ctx;
3300 	}
3301 
3302 	/* evaluate unique value */
3303 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3304 	ctx = sc->fibctx;
3305 	while (ctx != fibctx) {
3306 		if (ctx->unique == fibctx->unique) {
3307 			fibctx->unique++;
3308 			ctx = sc->fibctx;
3309 		} else {
3310 			ctx = ctx->next;
3311 		}
3312 	}
3313 
3314 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3315 	mtx_unlock(&sc->aac_io_lock);
3316 	if (error)
3317 		aac_close_aif(sc, (caddr_t)ctx);
3318 	return error;
3319 }
3320 
3321 /*
3322  * Close the caller's fib context
3323  */
3324 static int
3325 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3326 {
3327 	struct aac_fib_context *ctx;
3328 
3329 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3330 
3331 	mtx_lock(&sc->aac_io_lock);
3332 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3333 		if (ctx->unique == *(uint32_t *)&arg) {
3334 			if (ctx == sc->fibctx)
3335 				sc->fibctx = NULL;
3336 			else {
3337 				ctx->prev->next = ctx->next;
3338 				if (ctx->next)
3339 					ctx->next->prev = ctx->prev;
3340 			}
3341 			break;
3342 		}
3343 	}
3344 	if (ctx)
3345 		free(ctx, M_AACRAIDBUF);
3346 
3347 	mtx_unlock(&sc->aac_io_lock);
3348 	return 0;
3349 }
3350 
3351 /*
3352  * Pass the caller the next AIF in their queue
3353  */
3354 static int
3355 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3356 {
3357 	struct get_adapter_fib_ioctl agf;
3358 	struct aac_fib_context *ctx;
3359 	int error;
3360 
3361 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3362 
3363 	mtx_lock(&sc->aac_io_lock);
3364 #ifdef COMPAT_FREEBSD32
3365 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3366 		struct get_adapter_fib_ioctl32 agf32;
3367 		error = copyin(arg, &agf32, sizeof(agf32));
3368 		if (error == 0) {
3369 			agf.AdapterFibContext = agf32.AdapterFibContext;
3370 			agf.Wait = agf32.Wait;
3371 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3372 		}
3373 	} else
3374 #endif
3375 		error = copyin(arg, &agf, sizeof(agf));
3376 	if (error == 0) {
3377 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3378 			if (agf.AdapterFibContext == ctx->unique)
3379 				break;
3380 		}
3381 		if (!ctx) {
3382 			mtx_unlock(&sc->aac_io_lock);
3383 			return (EFAULT);
3384 		}
3385 
3386 		error = aac_return_aif(sc, ctx, agf.AifFib);
3387 		if (error == EAGAIN && agf.Wait) {
3388 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3389 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3390 			while (error == EAGAIN) {
3391 				mtx_unlock(&sc->aac_io_lock);
3392 				error = tsleep(sc->aac_aifq, PRIBIO |
3393 					       PCATCH, "aacaif", 0);
3394 				mtx_lock(&sc->aac_io_lock);
3395 				if (error == 0)
3396 					error = aac_return_aif(sc, ctx, agf.AifFib);
3397 			}
3398 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3399 		}
3400 	}
3401 	mtx_unlock(&sc->aac_io_lock);
3402 	return(error);
3403 }
3404 
3405 /*
3406  * Hand the next AIF off the top of the queue out to userspace.
3407  */
3408 static int
3409 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3410 {
3411 	int current, error;
3412 
3413 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3414 
3415 	current = ctx->ctx_idx;
3416 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3417 		/* empty */
3418 		return (EAGAIN);
3419 	}
3420 	error =
3421 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3422 	if (error)
3423 		device_printf(sc->aac_dev,
3424 		    "aac_return_aif: copyout returned %d\n", error);
3425 	else {
3426 		ctx->ctx_wrap = 0;
3427 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3428 	}
3429 	return(error);
3430 }
3431 
3432 static int
3433 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3434 {
3435 	struct aac_pci_info {
3436 		u_int32_t bus;
3437 		u_int32_t slot;
3438 	} pciinf;
3439 	int error;
3440 
3441 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3442 
3443 	pciinf.bus = pci_get_bus(sc->aac_dev);
3444 	pciinf.slot = pci_get_slot(sc->aac_dev);
3445 
3446 	error = copyout((caddr_t)&pciinf, uptr,
3447 			sizeof(struct aac_pci_info));
3448 
3449 	return (error);
3450 }
3451 
3452 static int
3453 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3454 {
3455 	struct aac_features f;
3456 	int error;
3457 
3458 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3459 
3460 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3461 		return (error);
3462 
3463 	/*
3464 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3465 	 * ALL zero in the featuresState, the driver will return the current
3466 	 * state of all the supported features, the data field will not be
3467 	 * valid.
3468 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3469 	 * a specific bit set in the featuresState, the driver will return the
3470 	 * current state of this specific feature and whatever data that are
3471 	 * associated with the feature in the data field or perform whatever
3472 	 * action needed indicates in the data field.
3473 	 */
3474 	 if (f.feat.fValue == 0) {
3475 		f.feat.fBits.largeLBA =
3476 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3477 		f.feat.fBits.JBODSupport = 1;
3478 		/* TODO: In the future, add other features state here as well */
3479 	} else {
3480 		if (f.feat.fBits.largeLBA)
3481 			f.feat.fBits.largeLBA =
3482 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3483 		/* TODO: Add other features state and data in the future */
3484 	}
3485 
3486 	error = copyout(&f, uptr, sizeof (f));
3487 	return (error);
3488 }
3489 
3490 /*
3491  * Give the userland some information about the container.  The AAC arch
3492  * expects the driver to be a SCSI passthrough type driver, so it expects
3493  * the containers to have b:t:l numbers.  Fake it.
3494  */
3495 static int
3496 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3497 {
3498 	struct aac_query_disk query_disk;
3499 	struct aac_container *co;
3500 	int error, id;
3501 
3502 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3503 
3504 	mtx_lock(&sc->aac_io_lock);
3505 	error = copyin(uptr, (caddr_t)&query_disk,
3506 		       sizeof(struct aac_query_disk));
3507 	if (error) {
3508 		mtx_unlock(&sc->aac_io_lock);
3509 		return (error);
3510 	}
3511 
3512 	id = query_disk.ContainerNumber;
3513 	if (id == -1) {
3514 		mtx_unlock(&sc->aac_io_lock);
3515 		return (EINVAL);
3516 	}
3517 
3518 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3519 		if (co->co_mntobj.ObjectId == id)
3520 			break;
3521 		}
3522 
3523 	if (co == NULL) {
3524 			query_disk.Valid = 0;
3525 			query_disk.Locked = 0;
3526 			query_disk.Deleted = 1;		/* XXX is this right? */
3527 	} else {
3528 		query_disk.Valid = 1;
3529 		query_disk.Locked = 1;
3530 		query_disk.Deleted = 0;
3531 		query_disk.Bus = device_get_unit(sc->aac_dev);
3532 		query_disk.Target = 0;
3533 		query_disk.Lun = 0;
3534 		query_disk.UnMapped = 0;
3535 	}
3536 
3537 	error = copyout((caddr_t)&query_disk, uptr,
3538 			sizeof(struct aac_query_disk));
3539 
3540 	mtx_unlock(&sc->aac_io_lock);
3541 	return (error);
3542 }
3543 
3544 static void
3545 aac_container_bus(struct aac_softc *sc)
3546 {
3547 	struct aac_sim *sim;
3548 	device_t child;
3549 
3550 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3551 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3552 	if (sim == NULL) {
3553 		device_printf(sc->aac_dev,
3554 	    	"No memory to add container bus\n");
3555 		panic("Out of memory?!");
3556 	}
3557 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3558 	if (child == NULL) {
3559 		device_printf(sc->aac_dev,
3560 	    	"device_add_child failed for container bus\n");
3561 		free(sim, M_AACRAIDBUF);
3562 		panic("Out of memory?!");
3563 	}
3564 
3565 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3566 	sim->BusNumber = 0;
3567 	sim->BusType = CONTAINER_BUS;
3568 	sim->InitiatorBusId = -1;
3569 	sim->aac_sc = sc;
3570 	sim->sim_dev = child;
3571 	sim->aac_cam = NULL;
3572 
3573 	device_set_ivars(child, sim);
3574 	device_set_desc(child, "Container Bus");
3575 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3576 	/*
3577 	device_set_desc(child, aac_describe_code(aac_container_types,
3578 			mir->MntTable[0].VolType));
3579 	*/
3580 	bus_generic_attach(sc->aac_dev);
3581 }
3582 
3583 static void
3584 aac_get_bus_info(struct aac_softc *sc)
3585 {
3586 	struct aac_fib *fib;
3587 	struct aac_ctcfg *c_cmd;
3588 	struct aac_ctcfg_resp *c_resp;
3589 	struct aac_vmioctl *vmi;
3590 	struct aac_vmi_businf_resp *vmi_resp;
3591 	struct aac_getbusinf businfo;
3592 	struct aac_sim *caminf;
3593 	device_t child;
3594 	int i, error;
3595 
3596 	mtx_lock(&sc->aac_io_lock);
3597 	aac_alloc_sync_fib(sc, &fib);
3598 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3599 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3600 
3601 	c_cmd->Command = VM_ContainerConfig;
3602 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3603 	c_cmd->param = 0;
3604 
3605 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3606 	    sizeof(struct aac_ctcfg));
3607 	if (error) {
3608 		device_printf(sc->aac_dev, "Error %d sending "
3609 		    "VM_ContainerConfig command\n", error);
3610 		aac_release_sync_fib(sc);
3611 		mtx_unlock(&sc->aac_io_lock);
3612 		return;
3613 	}
3614 
3615 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3616 	if (c_resp->Status != ST_OK) {
3617 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3618 		    c_resp->Status);
3619 		aac_release_sync_fib(sc);
3620 		mtx_unlock(&sc->aac_io_lock);
3621 		return;
3622 	}
3623 
3624 	sc->scsi_method_id = c_resp->param;
3625 
3626 	vmi = (struct aac_vmioctl *)&fib->data[0];
3627 	bzero(vmi, sizeof(struct aac_vmioctl));
3628 
3629 	vmi->Command = VM_Ioctl;
3630 	vmi->ObjType = FT_DRIVE;
3631 	vmi->MethId = sc->scsi_method_id;
3632 	vmi->ObjId = 0;
3633 	vmi->IoctlCmd = GetBusInfo;
3634 
3635 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3636 	    sizeof(struct aac_vmi_businf_resp));
3637 	if (error) {
3638 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3639 		    error);
3640 		aac_release_sync_fib(sc);
3641 		mtx_unlock(&sc->aac_io_lock);
3642 		return;
3643 	}
3644 
3645 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3646 	if (vmi_resp->Status != ST_OK) {
3647 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3648 		    vmi_resp->Status);
3649 		aac_release_sync_fib(sc);
3650 		mtx_unlock(&sc->aac_io_lock);
3651 		return;
3652 	}
3653 
3654 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3655 	aac_release_sync_fib(sc);
3656 	mtx_unlock(&sc->aac_io_lock);
3657 
3658 	for (i = 0; i < businfo.BusCount; i++) {
3659 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3660 			continue;
3661 
3662 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3663 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3664 		if (caminf == NULL) {
3665 			device_printf(sc->aac_dev,
3666 			    "No memory to add passthrough bus %d\n", i);
3667 			break;
3668 		}
3669 
3670 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3671 		if (child == NULL) {
3672 			device_printf(sc->aac_dev,
3673 			    "device_add_child failed for passthrough bus %d\n",
3674 			    i);
3675 			free(caminf, M_AACRAIDBUF);
3676 			break;
3677 		}
3678 
3679 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3680 		caminf->BusNumber = i+1;
3681 		caminf->BusType = PASSTHROUGH_BUS;
3682 		caminf->InitiatorBusId = -1;
3683 		caminf->aac_sc = sc;
3684 		caminf->sim_dev = child;
3685 		caminf->aac_cam = NULL;
3686 
3687 		device_set_ivars(child, caminf);
3688 		device_set_desc(child, "SCSI Passthrough Bus");
3689 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3690 	}
3691 }
3692 
3693 /*
3694  * Check to see if the kernel is up and running. If we are in a
3695  * BlinkLED state, return the BlinkLED code.
3696  */
3697 static u_int32_t
3698 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3699 {
3700 	u_int32_t ret;
3701 
3702 	ret = AAC_GET_FWSTATUS(sc);
3703 
3704 	if (ret & AAC_UP_AND_RUNNING)
3705 		ret = 0;
3706 	else if (ret & AAC_KERNEL_PANIC && bled)
3707 		*bled = (ret >> 16) & 0xff;
3708 
3709 	return (ret);
3710 }
3711 
3712 /*
3713  * Once do an IOP reset, basically have to re-initialize the card as
3714  * if coming up from a cold boot, and the driver is responsible for
3715  * any IO that was outstanding to the adapter at the time of the IOP
3716  * RESET. And prepare the driver for IOP RESET by making the init code
3717  * modular with the ability to call it from multiple places.
3718  */
3719 static int
3720 aac_reset_adapter(struct aac_softc *sc)
3721 {
3722 	struct aac_command *cm;
3723 	struct aac_fib *fib;
3724 	struct aac_pause_command *pc;
3725 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3726 	int ret, msi_enabled_orig;
3727 
3728 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3729 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3730 
3731 	if (sc->aac_state & AAC_STATE_RESET) {
3732 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3733 		return (EINVAL);
3734 	}
3735 	sc->aac_state |= AAC_STATE_RESET;
3736 
3737 	/* disable interrupt */
3738 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3739 
3740 	/*
3741 	 * Abort all pending commands:
3742 	 * a) on the controller
3743 	 */
3744 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3745 		cm->cm_flags |= AAC_CMD_RESET;
3746 
3747 		/* is there a completion handler? */
3748 		if (cm->cm_complete != NULL) {
3749 			cm->cm_complete(cm);
3750 		} else {
3751 			/* assume that someone is sleeping on this
3752 			 * command
3753 			 */
3754 			wakeup(cm);
3755 		}
3756 	}
3757 
3758 	/* b) in the waiting queues */
3759 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3760 		cm->cm_flags |= AAC_CMD_RESET;
3761 
3762 		/* is there a completion handler? */
3763 		if (cm->cm_complete != NULL) {
3764 			cm->cm_complete(cm);
3765 		} else {
3766 			/* assume that someone is sleeping on this
3767 			 * command
3768 			 */
3769 			wakeup(cm);
3770 		}
3771 	}
3772 
3773 	/* flush drives */
3774 	if (aac_check_adapter_health(sc, NULL) == 0) {
3775 		mtx_unlock(&sc->aac_io_lock);
3776 		(void) aacraid_shutdown(sc->aac_dev);
3777 		mtx_lock(&sc->aac_io_lock);
3778 	}
3779 
3780 	/* execute IOP reset */
3781 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3782 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3783 
3784 		/* We need to wait for 5 seconds before accessing the MU again
3785 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3786 		 */
3787 		waitCount = 5 * 10000;
3788 		while (waitCount) {
3789 			DELAY(100);			/* delay 100 microseconds */
3790 			waitCount--;
3791 		}
3792 	} else {
3793 		ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3794 			0, 0, 0, 0, &status, &reset_mask);
3795 		if (ret && !sc->doorbell_mask) {
3796 			/* call IOP_RESET for older firmware */
3797 			if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3798 			    &status, NULL)) != 0) {
3799 				if (status == AAC_SRB_STS_INVALID_REQUEST) {
3800 					device_printf(sc->aac_dev,
3801 					    "IOP_RESET not supported\n");
3802 				} else {
3803 					/* probably timeout */
3804 					device_printf(sc->aac_dev,
3805 					    "IOP_RESET failed\n");
3806 				}
3807 
3808 				/* unwind aac_shutdown() */
3809 				aac_alloc_sync_fib(sc, &fib);
3810 				pc = (struct aac_pause_command *)&fib->data[0];
3811 				pc->Command = VM_ContainerConfig;
3812 				pc->Type = CT_PAUSE_IO;
3813 				pc->Timeout = 1;
3814 				pc->Min = 1;
3815 				pc->NoRescan = 1;
3816 
3817 				(void) aac_sync_fib(sc, ContainerCommand, 0,
3818 				    fib, sizeof (struct aac_pause_command));
3819 				aac_release_sync_fib(sc);
3820 
3821 				goto finish;
3822 			}
3823 		} else if (sc->doorbell_mask) {
3824 			ret = 0;
3825 			reset_mask = sc->doorbell_mask;
3826 		}
3827 		if (!ret &&
3828 		    (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3829 			AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3830 			/*
3831 			 * We need to wait for 5 seconds before accessing the
3832 			 * doorbell again;
3833 			 * 10000 * 100us = 1000,000us = 1000ms = 1s
3834 			 */
3835 			waitCount = 5 * 10000;
3836 			while (waitCount) {
3837 				DELAY(100);	/* delay 100 microseconds */
3838 				waitCount--;
3839 			}
3840 		}
3841 	}
3842 
3843 	/*
3844 	 * Initialize the adapter.
3845 	 */
3846 	max_msix_orig = sc->aac_max_msix;
3847 	msi_enabled_orig = sc->msi_enabled;
3848 	sc->msi_enabled = FALSE;
3849 	if (aac_check_firmware(sc) != 0)
3850 		goto finish;
3851 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3852 		sc->aac_max_msix = max_msix_orig;
3853 		if (msi_enabled_orig) {
3854 			sc->msi_enabled = msi_enabled_orig;
3855 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3856 		}
3857 		mtx_unlock(&sc->aac_io_lock);
3858 		aac_init(sc);
3859 		mtx_lock(&sc->aac_io_lock);
3860 	}
3861 
3862 finish:
3863 	sc->aac_state &= ~AAC_STATE_RESET;
3864 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3865 	aacraid_startio(sc);
3866 	return (0);
3867 }
3868