xref: /freebsd/sys/dev/aacraid/aacraid.c (revision a8197ad3aa952a03fc2aeebc2eafe9bb9de54550)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
38  */
39 #define AAC_DRIVERNAME			"aacraid"
40 
41 #include "opt_aacraid.h"
42 
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/proc.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
52 #include <sys/poll.h>
53 #include <sys/ioccom.h>
54 
55 #include <sys/bus.h>
56 #include <sys/conf.h>
57 #include <sys/signalvar.h>
58 #include <sys/time.h>
59 #include <sys/eventhandler.h>
60 #include <sys/rman.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
72 
73 #ifndef FILTER_HANDLED
74 #define FILTER_HANDLED	0x02
75 #endif
76 
77 static void	aac_add_container(struct aac_softc *sc,
78 				  struct aac_mntinforesp *mir, int f,
79 				  u_int32_t uid);
80 static void	aac_get_bus_info(struct aac_softc *sc);
81 static void	aac_container_bus(struct aac_softc *sc);
82 static void	aac_daemon(void *arg);
83 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
84 							  int pages, int nseg, int nseg_new);
85 
86 /* Command Processing */
87 static void	aac_timeout(struct aac_softc *sc);
88 static void	aac_command_thread(struct aac_softc *sc);
89 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
90 				     u_int32_t xferstate, struct aac_fib *fib,
91 				     u_int16_t datasize);
92 /* Command Buffer Management */
93 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
94 				       int nseg, int error);
95 static int	aac_alloc_commands(struct aac_softc *sc);
96 static void	aac_free_commands(struct aac_softc *sc);
97 static void	aac_unmap_command(struct aac_command *cm);
98 
99 /* Hardware Interface */
100 static int	aac_alloc(struct aac_softc *sc);
101 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
102 			       int error);
103 static int	aac_check_firmware(struct aac_softc *sc);
104 static void	aac_define_int_mode(struct aac_softc *sc);
105 static int	aac_init(struct aac_softc *sc);
106 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
107 static int	aac_setup_intr(struct aac_softc *sc);
108 static int	aac_check_config(struct aac_softc *sc);
109 
110 /* PMC SRC interface */
111 static int	aac_src_get_fwstatus(struct aac_softc *sc);
112 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
113 static int	aac_src_get_istatus(struct aac_softc *sc);
114 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
115 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
116 				    u_int32_t arg0, u_int32_t arg1,
117 				    u_int32_t arg2, u_int32_t arg3);
118 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
119 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
120 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
121 static int aac_src_get_outb_queue(struct aac_softc *sc);
122 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
123 
124 struct aac_interface aacraid_src_interface = {
125 	aac_src_get_fwstatus,
126 	aac_src_qnotify,
127 	aac_src_get_istatus,
128 	aac_src_clear_istatus,
129 	aac_src_set_mailbox,
130 	aac_src_get_mailbox,
131 	aac_src_access_devreg,
132 	aac_src_send_command,
133 	aac_src_get_outb_queue,
134 	aac_src_set_outb_queue
135 };
136 
137 /* PMC SRCv interface */
138 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
139 				    u_int32_t arg0, u_int32_t arg1,
140 				    u_int32_t arg2, u_int32_t arg3);
141 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
142 
143 struct aac_interface aacraid_srcv_interface = {
144 	aac_src_get_fwstatus,
145 	aac_src_qnotify,
146 	aac_src_get_istatus,
147 	aac_src_clear_istatus,
148 	aac_srcv_set_mailbox,
149 	aac_srcv_get_mailbox,
150 	aac_src_access_devreg,
151 	aac_src_send_command,
152 	aac_src_get_outb_queue,
153 	aac_src_set_outb_queue
154 };
155 
156 /* Debugging and Diagnostics */
157 static struct aac_code_lookup aac_cpu_variant[] = {
158 	{"i960JX",		CPUI960_JX},
159 	{"i960CX",		CPUI960_CX},
160 	{"i960HX",		CPUI960_HX},
161 	{"i960RX",		CPUI960_RX},
162 	{"i960 80303",		CPUI960_80303},
163 	{"StrongARM SA110",	CPUARM_SA110},
164 	{"PPC603e",		CPUPPC_603e},
165 	{"XScale 80321",	CPU_XSCALE_80321},
166 	{"MIPS 4KC",		CPU_MIPS_4KC},
167 	{"MIPS 5KC",		CPU_MIPS_5KC},
168 	{"Unknown StrongARM",	CPUARM_xxx},
169 	{"Unknown PowerPC",	CPUPPC_xxx},
170 	{NULL, 0},
171 	{"Unknown processor",	0}
172 };
173 
174 static struct aac_code_lookup aac_battery_platform[] = {
175 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
176 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
177 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
178 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
179 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
180 	{NULL, 0},
181 	{"unknown battery platform",		0}
182 };
183 static void	aac_describe_controller(struct aac_softc *sc);
184 static char	*aac_describe_code(struct aac_code_lookup *table,
185 				   u_int32_t code);
186 
187 /* Management Interface */
188 static d_open_t		aac_open;
189 static d_ioctl_t	aac_ioctl;
190 static d_poll_t		aac_poll;
191 static void		aac_cdevpriv_dtor(void *arg);
192 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
193 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
194 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
195 static void	aac_request_aif(struct aac_softc *sc);
196 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
197 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
198 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
199 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
200 static int	aac_return_aif(struct aac_softc *sc,
201 			       struct aac_fib_context *ctx, caddr_t uptr);
202 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
203 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
204 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
205 static void	aac_ioctl_event(struct aac_softc *sc,
206 				struct aac_event *event, void *arg);
207 static int	aac_reset_adapter(struct aac_softc *sc);
208 static int	aac_get_container_info(struct aac_softc *sc,
209 				       struct aac_fib *fib, int cid,
210 				       struct aac_mntinforesp *mir,
211 				       u_int32_t *uid);
212 static u_int32_t
213 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
214 
215 static struct cdevsw aacraid_cdevsw = {
216 	.d_version =	D_VERSION,
217 	.d_flags =	0,
218 	.d_open =	aac_open,
219 	.d_ioctl =	aac_ioctl,
220 	.d_poll =	aac_poll,
221 	.d_name =	"aacraid",
222 };
223 
224 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
225 
226 /* sysctl node */
227 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
228     "AACRAID driver parameters");
229 
230 /*
231  * Device Interface
232  */
233 
234 /*
235  * Initialize the controller and softc
236  */
237 int
238 aacraid_attach(struct aac_softc *sc)
239 {
240 	int error, unit;
241 	struct aac_fib *fib;
242 	struct aac_mntinforesp mir;
243 	int count = 0, i = 0;
244 	u_int32_t uid;
245 
246 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
247 	sc->hint_flags = device_get_flags(sc->aac_dev);
248 	/*
249 	 * Initialize per-controller queues.
250 	 */
251 	aac_initq_free(sc);
252 	aac_initq_ready(sc);
253 	aac_initq_busy(sc);
254 
255 	/* mark controller as suspended until we get ourselves organised */
256 	sc->aac_state |= AAC_STATE_SUSPEND;
257 
258 	/*
259 	 * Check that the firmware on the card is supported.
260 	 */
261 	sc->msi_enabled = sc->msi_tupelo = FALSE;
262 	if ((error = aac_check_firmware(sc)) != 0)
263 		return(error);
264 
265 	/*
266 	 * Initialize locks
267 	 */
268 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
269 	TAILQ_INIT(&sc->aac_container_tqh);
270 	TAILQ_INIT(&sc->aac_ev_cmfree);
271 
272 	/* Initialize the clock daemon callout. */
273 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
274 
275 	/*
276 	 * Initialize the adapter.
277 	 */
278 	if ((error = aac_alloc(sc)) != 0)
279 		return(error);
280 	aac_define_int_mode(sc);
281 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
282 		if ((error = aac_init(sc)) != 0)
283 			return(error);
284 	}
285 
286 	/*
287 	 * Allocate and connect our interrupt.
288 	 */
289 	if ((error = aac_setup_intr(sc)) != 0)
290 		return(error);
291 
292 	/*
293 	 * Print a little information about the controller.
294 	 */
295 	aac_describe_controller(sc);
296 
297 	/*
298 	 * Make the control device.
299 	 */
300 	unit = device_get_unit(sc->aac_dev);
301 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
302 				 0640, "aacraid%d", unit);
303 	sc->aac_dev_t->si_drv1 = sc;
304 
305 	/* Create the AIF thread */
306 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
307 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
308 		panic("Could not create AIF thread");
309 
310 	/* Register the shutdown method to only be called post-dump */
311 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
312 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
313 		device_printf(sc->aac_dev,
314 			      "shutdown event registration failed\n");
315 
316 	/* Find containers */
317 	mtx_lock(&sc->aac_io_lock);
318 	aac_alloc_sync_fib(sc, &fib);
319 	/* loop over possible containers */
320 	do {
321 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
322 			continue;
323 		if (i == 0)
324 			count = mir.MntRespCount;
325 		aac_add_container(sc, &mir, 0, uid);
326 		i++;
327 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
328 	aac_release_sync_fib(sc);
329 	mtx_unlock(&sc->aac_io_lock);
330 
331 	/* Register with CAM for the containers */
332 	TAILQ_INIT(&sc->aac_sim_tqh);
333 	aac_container_bus(sc);
334 	/* Register with CAM for the non-DASD devices */
335 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
336 		aac_get_bus_info(sc);
337 
338 	/* poke the bus to actually attach the child devices */
339 	bus_generic_attach(sc->aac_dev);
340 
341 	/* mark the controller up */
342 	sc->aac_state &= ~AAC_STATE_SUSPEND;
343 
344 	/* enable interrupts now */
345 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
346 
347 	mtx_lock(&sc->aac_io_lock);
348 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
349 	mtx_unlock(&sc->aac_io_lock);
350 
351 	return(0);
352 }
353 
354 static void
355 aac_daemon(void *arg)
356 {
357 	struct aac_softc *sc;
358 	struct timeval tv;
359 	struct aac_command *cm;
360 	struct aac_fib *fib;
361 
362 	sc = arg;
363 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
364 
365 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
366 	if (callout_pending(&sc->aac_daemontime) ||
367 	    callout_active(&sc->aac_daemontime) == 0)
368 		return;
369 	getmicrotime(&tv);
370 
371 	if (!aacraid_alloc_command(sc, &cm)) {
372 		fib = cm->cm_fib;
373 		cm->cm_timestamp = time_uptime;
374 		cm->cm_datalen = 0;
375 		cm->cm_flags |= AAC_CMD_WAIT;
376 
377 		fib->Header.Size =
378 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
379 		fib->Header.XferState =
380 			AAC_FIBSTATE_HOSTOWNED   |
381 			AAC_FIBSTATE_INITIALISED |
382 			AAC_FIBSTATE_EMPTY	 |
383 			AAC_FIBSTATE_FROMHOST	 |
384 			AAC_FIBSTATE_REXPECTED   |
385 			AAC_FIBSTATE_NORM	 |
386 			AAC_FIBSTATE_ASYNC	 |
387 			AAC_FIBSTATE_FAST_RESPONSE;
388 		fib->Header.Command = SendHostTime;
389 		*(uint32_t *)fib->data = tv.tv_sec;
390 
391 		aacraid_map_command_sg(cm, NULL, 0, 0);
392 		aacraid_release_command(cm);
393 	}
394 
395 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
396 }
397 
398 void
399 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
400 {
401 
402 	switch (event->ev_type & AAC_EVENT_MASK) {
403 	case AAC_EVENT_CMFREE:
404 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
405 		break;
406 	default:
407 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
408 		    event->ev_type);
409 		break;
410 	}
411 
412 	return;
413 }
414 
415 /*
416  * Request information of container #cid
417  */
418 static int
419 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
420 		       struct aac_mntinforesp *mir, u_int32_t *uid)
421 {
422 	struct aac_command *cm;
423 	struct aac_fib *fib;
424 	struct aac_mntinfo *mi;
425 	struct aac_cnt_config *ccfg;
426 	int rval;
427 
428 	if (sync_fib == NULL) {
429 		if (aacraid_alloc_command(sc, &cm)) {
430 			device_printf(sc->aac_dev,
431 				"Warning, no free command available\n");
432 			return (-1);
433 		}
434 		fib = cm->cm_fib;
435 	} else {
436 		fib = sync_fib;
437 	}
438 
439 	mi = (struct aac_mntinfo *)&fib->data[0];
440 	/* 4KB support?, 64-bit LBA? */
441 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
442 		mi->Command = VM_NameServeAllBlk;
443 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
444 		mi->Command = VM_NameServe64;
445 	else
446 		mi->Command = VM_NameServe;
447 	mi->MntType = FT_FILESYS;
448 	mi->MntCount = cid;
449 
450 	if (sync_fib) {
451 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
452 			 sizeof(struct aac_mntinfo))) {
453 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
454 			return (-1);
455 		}
456 	} else {
457 		cm->cm_timestamp = time_uptime;
458 		cm->cm_datalen = 0;
459 
460 		fib->Header.Size =
461 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
462 		fib->Header.XferState =
463 			AAC_FIBSTATE_HOSTOWNED   |
464 			AAC_FIBSTATE_INITIALISED |
465 			AAC_FIBSTATE_EMPTY	 |
466 			AAC_FIBSTATE_FROMHOST	 |
467 			AAC_FIBSTATE_REXPECTED   |
468 			AAC_FIBSTATE_NORM	 |
469 			AAC_FIBSTATE_ASYNC	 |
470 			AAC_FIBSTATE_FAST_RESPONSE;
471 		fib->Header.Command = ContainerCommand;
472 		if (aacraid_wait_command(cm) != 0) {
473 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
474 			aacraid_release_command(cm);
475 			return (-1);
476 		}
477 	}
478 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
479 
480 	/* UID */
481 	*uid = cid;
482 	if (mir->MntTable[0].VolType != CT_NONE &&
483 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
484 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
485 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
486 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
487 		}
488 		ccfg = (struct aac_cnt_config *)&fib->data[0];
489 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
490 		ccfg->Command = VM_ContainerConfig;
491 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
492 		ccfg->CTCommand.param[0] = cid;
493 
494 		if (sync_fib) {
495 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
496 				sizeof(struct aac_cnt_config));
497 			if (rval == 0 && ccfg->Command == ST_OK &&
498 				ccfg->CTCommand.param[0] == CT_OK &&
499 				mir->MntTable[0].VolType != CT_PASSTHRU)
500 				*uid = ccfg->CTCommand.param[1];
501 		} else {
502 			fib->Header.Size =
503 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
504 			fib->Header.XferState =
505 				AAC_FIBSTATE_HOSTOWNED   |
506 				AAC_FIBSTATE_INITIALISED |
507 				AAC_FIBSTATE_EMPTY	 |
508 				AAC_FIBSTATE_FROMHOST	 |
509 				AAC_FIBSTATE_REXPECTED   |
510 				AAC_FIBSTATE_NORM	 |
511 				AAC_FIBSTATE_ASYNC	 |
512 				AAC_FIBSTATE_FAST_RESPONSE;
513 			fib->Header.Command = ContainerCommand;
514 			rval = aacraid_wait_command(cm);
515 			if (rval == 0 && ccfg->Command == ST_OK &&
516 				ccfg->CTCommand.param[0] == CT_OK &&
517 				mir->MntTable[0].VolType != CT_PASSTHRU)
518 				*uid = ccfg->CTCommand.param[1];
519 			aacraid_release_command(cm);
520 		}
521 	}
522 
523 	return (0);
524 }
525 
526 /*
527  * Create a device to represent a new container
528  */
529 static void
530 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
531 		  u_int32_t uid)
532 {
533 	struct aac_container *co;
534 
535 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
536 
537 	/*
538 	 * Check container volume type for validity.  Note that many of
539 	 * the possible types may never show up.
540 	 */
541 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
542 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
543 		       M_NOWAIT | M_ZERO);
544 		if (co == NULL) {
545 			panic("Out of memory?!");
546 		}
547 
548 		co->co_found = f;
549 		bcopy(&mir->MntTable[0], &co->co_mntobj,
550 		      sizeof(struct aac_mntobj));
551 		co->co_uid = uid;
552 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
553 	}
554 }
555 
556 /*
557  * Allocate resources associated with (sc)
558  */
559 static int
560 aac_alloc(struct aac_softc *sc)
561 {
562 	bus_size_t maxsize;
563 
564 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
565 
566 	/*
567 	 * Create DMA tag for mapping buffers into controller-addressable space.
568 	 */
569 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
570 			       1, 0, 			/* algnmnt, boundary */
571 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
572 			       BUS_SPACE_MAXADDR :
573 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
574 			       BUS_SPACE_MAXADDR, 	/* highaddr */
575 			       NULL, NULL, 		/* filter, filterarg */
576 			       sc->aac_max_sectors << 9, /* maxsize */
577 			       sc->aac_sg_tablesize,	/* nsegments */
578 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
579 			       BUS_DMA_ALLOCNOW,	/* flags */
580 			       busdma_lock_mutex,	/* lockfunc */
581 			       &sc->aac_io_lock,	/* lockfuncarg */
582 			       &sc->aac_buffer_dmat)) {
583 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
584 		return (ENOMEM);
585 	}
586 
587 	/*
588 	 * Create DMA tag for mapping FIBs into controller-addressable space..
589 	 */
590 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
591 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
592 			sizeof(struct aac_fib_xporthdr) + 31);
593 	else
594 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
595 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
596 			       1, 0, 			/* algnmnt, boundary */
597 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
598 			       BUS_SPACE_MAXADDR_32BIT :
599 			       0x7fffffff,		/* lowaddr */
600 			       BUS_SPACE_MAXADDR, 	/* highaddr */
601 			       NULL, NULL, 		/* filter, filterarg */
602 			       maxsize,  		/* maxsize */
603 			       1,			/* nsegments */
604 			       maxsize,			/* maxsize */
605 			       0,			/* flags */
606 			       NULL, NULL,		/* No locking needed */
607 			       &sc->aac_fib_dmat)) {
608 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
609 		return (ENOMEM);
610 	}
611 
612 	/*
613 	 * Create DMA tag for the common structure and allocate it.
614 	 */
615 	maxsize = sizeof(struct aac_common);
616 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
617 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
618 			       1, 0,			/* algnmnt, boundary */
619 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
620 			       BUS_SPACE_MAXADDR_32BIT :
621 			       0x7fffffff,		/* lowaddr */
622 			       BUS_SPACE_MAXADDR, 	/* highaddr */
623 			       NULL, NULL, 		/* filter, filterarg */
624 			       maxsize, 		/* maxsize */
625 			       1,			/* nsegments */
626 			       maxsize,			/* maxsegsize */
627 			       0,			/* flags */
628 			       NULL, NULL,		/* No locking needed */
629 			       &sc->aac_common_dmat)) {
630 		device_printf(sc->aac_dev,
631 			      "can't allocate common structure DMA tag\n");
632 		return (ENOMEM);
633 	}
634 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
635 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
636 		device_printf(sc->aac_dev, "can't allocate common structure\n");
637 		return (ENOMEM);
638 	}
639 
640 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
641 			sc->aac_common, maxsize,
642 			aac_common_map, sc, 0);
643 	bzero(sc->aac_common, maxsize);
644 
645 	/* Allocate some FIBs and associated command structs */
646 	TAILQ_INIT(&sc->aac_fibmap_tqh);
647 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
648 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
649 	mtx_lock(&sc->aac_io_lock);
650 	while (sc->total_fibs < sc->aac_max_fibs) {
651 		if (aac_alloc_commands(sc) != 0)
652 			break;
653 	}
654 	mtx_unlock(&sc->aac_io_lock);
655 	if (sc->total_fibs == 0)
656 		return (ENOMEM);
657 
658 	return (0);
659 }
660 
661 /*
662  * Free all of the resources associated with (sc)
663  *
664  * Should not be called if the controller is active.
665  */
666 void
667 aacraid_free(struct aac_softc *sc)
668 {
669 	int i;
670 
671 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
672 
673 	/* remove the control device */
674 	if (sc->aac_dev_t != NULL)
675 		destroy_dev(sc->aac_dev_t);
676 
677 	/* throw away any FIB buffers, discard the FIB DMA tag */
678 	aac_free_commands(sc);
679 	if (sc->aac_fib_dmat)
680 		bus_dma_tag_destroy(sc->aac_fib_dmat);
681 
682 	free(sc->aac_commands, M_AACRAIDBUF);
683 
684 	/* destroy the common area */
685 	if (sc->aac_common) {
686 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
687 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
688 				sc->aac_common_dmamap);
689 	}
690 	if (sc->aac_common_dmat)
691 		bus_dma_tag_destroy(sc->aac_common_dmat);
692 
693 	/* disconnect the interrupt handler */
694 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
695 		if (sc->aac_intr[i])
696 			bus_teardown_intr(sc->aac_dev,
697 				sc->aac_irq[i], sc->aac_intr[i]);
698 		if (sc->aac_irq[i])
699 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
700 				sc->aac_irq_rid[i], sc->aac_irq[i]);
701 		else
702 			break;
703 	}
704 	if (sc->msi_enabled || sc->msi_tupelo)
705 		pci_release_msi(sc->aac_dev);
706 
707 	/* destroy data-transfer DMA tag */
708 	if (sc->aac_buffer_dmat)
709 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
710 
711 	/* destroy the parent DMA tag */
712 	if (sc->aac_parent_dmat)
713 		bus_dma_tag_destroy(sc->aac_parent_dmat);
714 
715 	/* release the register window mapping */
716 	if (sc->aac_regs_res0 != NULL)
717 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
718 				     sc->aac_regs_rid0, sc->aac_regs_res0);
719 	if (sc->aac_regs_res1 != NULL)
720 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
721 				     sc->aac_regs_rid1, sc->aac_regs_res1);
722 }
723 
724 /*
725  * Disconnect from the controller completely, in preparation for unload.
726  */
727 int
728 aacraid_detach(device_t dev)
729 {
730 	struct aac_softc *sc;
731 	struct aac_container *co;
732 	struct aac_sim	*sim;
733 	int error;
734 
735 	sc = device_get_softc(dev);
736 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
737 
738 	callout_drain(&sc->aac_daemontime);
739 	/* Remove the child containers */
740 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
741 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
742 		free(co, M_AACRAIDBUF);
743 	}
744 
745 	/* Remove the CAM SIMs */
746 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
747 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
748 		error = device_delete_child(dev, sim->sim_dev);
749 		if (error)
750 			return (error);
751 		free(sim, M_AACRAIDBUF);
752 	}
753 
754 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
755 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
756 		wakeup(sc->aifthread);
757 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
758 	}
759 
760 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
761 		panic("Cannot shutdown AIF thread");
762 
763 	if ((error = aacraid_shutdown(dev)))
764 		return(error);
765 
766 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
767 
768 	aacraid_free(sc);
769 
770 	mtx_destroy(&sc->aac_io_lock);
771 
772 	return(0);
773 }
774 
775 /*
776  * Bring the controller down to a dormant state and detach all child devices.
777  *
778  * This function is called before detach or system shutdown.
779  *
780  * Note that we can assume that the bioq on the controller is empty, as we won't
781  * allow shutdown if any device is open.
782  */
783 int
784 aacraid_shutdown(device_t dev)
785 {
786 	struct aac_softc *sc;
787 	struct aac_fib *fib;
788 	struct aac_close_command *cc;
789 
790 	sc = device_get_softc(dev);
791 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
792 
793 	sc->aac_state |= AAC_STATE_SUSPEND;
794 
795 	/*
796 	 * Send a Container shutdown followed by a HostShutdown FIB to the
797 	 * controller to convince it that we don't want to talk to it anymore.
798 	 * We've been closed and all I/O completed already
799 	 */
800 	device_printf(sc->aac_dev, "shutting down controller...");
801 
802 	mtx_lock(&sc->aac_io_lock);
803 	aac_alloc_sync_fib(sc, &fib);
804 	cc = (struct aac_close_command *)&fib->data[0];
805 
806 	bzero(cc, sizeof(struct aac_close_command));
807 	cc->Command = VM_CloseAll;
808 	cc->ContainerId = 0xfffffffe;
809 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
810 	    sizeof(struct aac_close_command)))
811 		printf("FAILED.\n");
812 	else
813 		printf("done\n");
814 
815 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
816 	aac_release_sync_fib(sc);
817 	mtx_unlock(&sc->aac_io_lock);
818 
819 	return(0);
820 }
821 
822 /*
823  * Bring the controller to a quiescent state, ready for system suspend.
824  */
825 int
826 aacraid_suspend(device_t dev)
827 {
828 	struct aac_softc *sc;
829 
830 	sc = device_get_softc(dev);
831 
832 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
833 	sc->aac_state |= AAC_STATE_SUSPEND;
834 
835 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
836 	return(0);
837 }
838 
839 /*
840  * Bring the controller back to a state ready for operation.
841  */
842 int
843 aacraid_resume(device_t dev)
844 {
845 	struct aac_softc *sc;
846 
847 	sc = device_get_softc(dev);
848 
849 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
850 	sc->aac_state &= ~AAC_STATE_SUSPEND;
851 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
852 	return(0);
853 }
854 
855 /*
856  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
857  */
858 void
859 aacraid_new_intr_type1(void *arg)
860 {
861 	struct aac_msix_ctx *ctx;
862 	struct aac_softc *sc;
863 	int vector_no;
864 	struct aac_command *cm;
865 	struct aac_fib *fib;
866 	u_int32_t bellbits, bellbits_shifted, index, handle;
867 	int isFastResponse, isAif, noMoreAif, mode;
868 
869 	ctx = (struct aac_msix_ctx *)arg;
870 	sc = ctx->sc;
871 	vector_no = ctx->vector_no;
872 
873 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
874 	mtx_lock(&sc->aac_io_lock);
875 
876 	if (sc->msi_enabled) {
877 		mode = AAC_INT_MODE_MSI;
878 		if (vector_no == 0) {
879 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
880 			if (bellbits & 0x40000)
881 				mode |= AAC_INT_MODE_AIF;
882 			else if (bellbits & 0x1000)
883 				mode |= AAC_INT_MODE_SYNC;
884 		}
885 	} else {
886 		mode = AAC_INT_MODE_INTX;
887 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
888 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
889 			bellbits = AAC_DB_RESPONSE_SENT_NS;
890 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
891 		} else {
892 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
893 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
894 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
895 				mode |= AAC_INT_MODE_AIF;
896 			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
897 				mode |= AAC_INT_MODE_SYNC;
898 		}
899 		/* ODR readback, Prep #238630 */
900 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
901 	}
902 
903 	if (mode & AAC_INT_MODE_SYNC) {
904 		if (sc->aac_sync_cm) {
905 			cm = sc->aac_sync_cm;
906 			cm->cm_flags |= AAC_CMD_COMPLETED;
907 			/* is there a completion handler? */
908 			if (cm->cm_complete != NULL) {
909 				cm->cm_complete(cm);
910 			} else {
911 				/* assume that someone is sleeping on this command */
912 				wakeup(cm);
913 			}
914 			sc->flags &= ~AAC_QUEUE_FRZN;
915 			sc->aac_sync_cm = NULL;
916 		}
917 		mode = 0;
918 	}
919 
920 	if (mode & AAC_INT_MODE_AIF) {
921 		if (mode & AAC_INT_MODE_INTX) {
922 			aac_request_aif(sc);
923 			mode = 0;
924 		}
925 	}
926 
927 	if (mode) {
928 		/* handle async. status */
929 		index = sc->aac_host_rrq_idx[vector_no];
930 		for (;;) {
931 			isFastResponse = isAif = noMoreAif = 0;
932 			/* remove toggle bit (31) */
933 			handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
934 			/* check fast response bit (30) */
935 			if (handle & 0x40000000)
936 				isFastResponse = 1;
937 			/* check AIF bit (23) */
938 			else if (handle & 0x00800000)
939 				isAif = TRUE;
940 			handle &= 0x0000ffff;
941 			if (handle == 0)
942 				break;
943 
944 			cm = sc->aac_commands + (handle - 1);
945 			fib = cm->cm_fib;
946 			sc->aac_rrq_outstanding[vector_no]--;
947 			if (isAif) {
948 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
949 				if (!noMoreAif)
950 					aac_handle_aif(sc, fib);
951 				aac_remove_busy(cm);
952 				aacraid_release_command(cm);
953 			} else {
954 				if (isFastResponse) {
955 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
956 					*((u_int32_t *)(fib->data)) = ST_OK;
957 					cm->cm_flags |= AAC_CMD_FASTRESP;
958 				}
959 				aac_remove_busy(cm);
960 				aac_unmap_command(cm);
961 				cm->cm_flags |= AAC_CMD_COMPLETED;
962 
963 				/* is there a completion handler? */
964 				if (cm->cm_complete != NULL) {
965 					cm->cm_complete(cm);
966 				} else {
967 					/* assume that someone is sleeping on this command */
968 					wakeup(cm);
969 				}
970 				sc->flags &= ~AAC_QUEUE_FRZN;
971 			}
972 
973 			sc->aac_common->ac_host_rrq[index++] = 0;
974 			if (index == (vector_no + 1) * sc->aac_vector_cap)
975 				index = vector_no * sc->aac_vector_cap;
976 			sc->aac_host_rrq_idx[vector_no] = index;
977 
978 			if ((isAif && !noMoreAif) || sc->aif_pending)
979 				aac_request_aif(sc);
980 		}
981 	}
982 
983 	if (mode & AAC_INT_MODE_AIF) {
984 		aac_request_aif(sc);
985 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
986 		mode = 0;
987 	}
988 
989 	/* see if we can start some more I/O */
990 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
991 		aacraid_startio(sc);
992 	mtx_unlock(&sc->aac_io_lock);
993 }
994 
995 /*
996  * Handle notification of one or more FIBs coming from the controller.
997  */
998 static void
999 aac_command_thread(struct aac_softc *sc)
1000 {
1001 	int retval;
1002 
1003 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1004 
1005 	mtx_lock(&sc->aac_io_lock);
1006 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1007 
1008 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1009 
1010 		retval = 0;
1011 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1012 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1013 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1014 
1015 		/*
1016 		 * First see if any FIBs need to be allocated.
1017 		 */
1018 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1019 			aac_alloc_commands(sc);
1020 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1021 			aacraid_startio(sc);
1022 		}
1023 
1024 		/*
1025 		 * While we're here, check to see if any commands are stuck.
1026 		 * This is pretty low-priority, so it's ok if it doesn't
1027 		 * always fire.
1028 		 */
1029 		if (retval == EWOULDBLOCK)
1030 			aac_timeout(sc);
1031 
1032 		/* Check the hardware printf message buffer */
1033 		if (sc->aac_common->ac_printf[0] != 0)
1034 			aac_print_printf(sc);
1035 	}
1036 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1037 	mtx_unlock(&sc->aac_io_lock);
1038 	wakeup(sc->aac_dev);
1039 
1040 	aac_kthread_exit(0);
1041 }
1042 
1043 /*
1044  * Submit a command to the controller, return when it completes.
1045  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1046  *     be stuck here forever.  At the same time, signals are not caught
1047  *     because there is a risk that a signal could wakeup the sleep before
1048  *     the card has a chance to complete the command.  Since there is no way
1049  *     to cancel a command that is in progress, we can't protect against the
1050  *     card completing a command late and spamming the command and data
1051  *     memory.  So, we are held hostage until the command completes.
1052  */
1053 int
1054 aacraid_wait_command(struct aac_command *cm)
1055 {
1056 	struct aac_softc *sc;
1057 	int error;
1058 
1059 	sc = cm->cm_sc;
1060 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1061 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1062 
1063 	/* Put the command on the ready queue and get things going */
1064 	aac_enqueue_ready(cm);
1065 	aacraid_startio(sc);
1066 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1067 	return(error);
1068 }
1069 
1070 /*
1071  *Command Buffer Management
1072  */
1073 
1074 /*
1075  * Allocate a command.
1076  */
1077 int
1078 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1079 {
1080 	struct aac_command *cm;
1081 
1082 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1083 
1084 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1085 		if (sc->total_fibs < sc->aac_max_fibs) {
1086 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1087 			wakeup(sc->aifthread);
1088 		}
1089 		return (EBUSY);
1090 	}
1091 
1092 	*cmp = cm;
1093 	return(0);
1094 }
1095 
1096 /*
1097  * Release a command back to the freelist.
1098  */
1099 void
1100 aacraid_release_command(struct aac_command *cm)
1101 {
1102 	struct aac_event *event;
1103 	struct aac_softc *sc;
1104 
1105 	sc = cm->cm_sc;
1106 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1107 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1108 
1109 	/* (re)initialize the command/FIB */
1110 	cm->cm_sgtable = NULL;
1111 	cm->cm_flags = 0;
1112 	cm->cm_complete = NULL;
1113 	cm->cm_ccb = NULL;
1114 	cm->cm_passthr_dmat = 0;
1115 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1116 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1117 	cm->cm_fib->Header.Unused = 0;
1118 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1119 
1120 	/*
1121 	 * These are duplicated in aac_start to cover the case where an
1122 	 * intermediate stage may have destroyed them.  They're left
1123 	 * initialized here for debugging purposes only.
1124 	 */
1125 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1126 	cm->cm_fib->Header.Handle = 0;
1127 
1128 	aac_enqueue_free(cm);
1129 
1130 	/*
1131 	 * Dequeue all events so that there's no risk of events getting
1132 	 * stranded.
1133 	 */
1134 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1135 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1136 		event->ev_callback(sc, event, event->ev_arg);
1137 	}
1138 }
1139 
1140 /*
1141  * Map helper for command/FIB allocation.
1142  */
1143 static void
1144 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1145 {
1146 	uint64_t	*fibphys;
1147 
1148 	fibphys = (uint64_t *)arg;
1149 
1150 	*fibphys = segs[0].ds_addr;
1151 }
1152 
1153 /*
1154  * Allocate and initialize commands/FIBs for this adapter.
1155  */
1156 static int
1157 aac_alloc_commands(struct aac_softc *sc)
1158 {
1159 	struct aac_command *cm;
1160 	struct aac_fibmap *fm;
1161 	uint64_t fibphys;
1162 	int i, error;
1163 	u_int32_t maxsize;
1164 
1165 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1166 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1167 
1168 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1169 		return (ENOMEM);
1170 
1171 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1172 	if (fm == NULL)
1173 		return (ENOMEM);
1174 
1175 	mtx_unlock(&sc->aac_io_lock);
1176 	/* allocate the FIBs in DMAable memory and load them */
1177 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1178 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1179 		device_printf(sc->aac_dev,
1180 			      "Not enough contiguous memory available.\n");
1181 		free(fm, M_AACRAIDBUF);
1182 		mtx_lock(&sc->aac_io_lock);
1183 		return (ENOMEM);
1184 	}
1185 
1186 	maxsize = sc->aac_max_fib_size + 31;
1187 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1188 		maxsize += sizeof(struct aac_fib_xporthdr);
1189 	/* Ignore errors since this doesn't bounce */
1190 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1191 			      sc->aac_max_fibs_alloc * maxsize,
1192 			      aac_map_command_helper, &fibphys, 0);
1193 	mtx_lock(&sc->aac_io_lock);
1194 
1195 	/* initialize constant fields in the command structure */
1196 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1197 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1198 		cm = sc->aac_commands + sc->total_fibs;
1199 		fm->aac_commands = cm;
1200 		cm->cm_sc = sc;
1201 		cm->cm_fib = (struct aac_fib *)
1202 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1203 		cm->cm_fibphys = fibphys + i * maxsize;
1204 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1205 			u_int64_t fibphys_aligned;
1206 			fibphys_aligned =
1207 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1208 			cm->cm_fib = (struct aac_fib *)
1209 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1210 			cm->cm_fibphys = fibphys_aligned;
1211 		} else {
1212 			u_int64_t fibphys_aligned;
1213 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1214 			cm->cm_fib = (struct aac_fib *)
1215 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1216 			cm->cm_fibphys = fibphys_aligned;
1217 		}
1218 		cm->cm_index = sc->total_fibs;
1219 
1220 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1221 					       &cm->cm_datamap)) != 0)
1222 			break;
1223 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1224 			aacraid_release_command(cm);
1225 		sc->total_fibs++;
1226 	}
1227 
1228 	if (i > 0) {
1229 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1230 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1231 		return (0);
1232 	}
1233 
1234 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1235 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1236 	free(fm, M_AACRAIDBUF);
1237 	return (ENOMEM);
1238 }
1239 
1240 /*
1241  * Free FIBs owned by this adapter.
1242  */
1243 static void
1244 aac_free_commands(struct aac_softc *sc)
1245 {
1246 	struct aac_fibmap *fm;
1247 	struct aac_command *cm;
1248 	int i;
1249 
1250 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1251 
1252 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1253 
1254 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1255 		/*
1256 		 * We check against total_fibs to handle partially
1257 		 * allocated blocks.
1258 		 */
1259 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1260 			cm = fm->aac_commands + i;
1261 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1262 		}
1263 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1264 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1265 		free(fm, M_AACRAIDBUF);
1266 	}
1267 }
1268 
1269 /*
1270  * Command-mapping helper function - populate this command's s/g table.
1271  */
1272 void
1273 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1274 {
1275 	struct aac_softc *sc;
1276 	struct aac_command *cm;
1277 	struct aac_fib *fib;
1278 	int i;
1279 
1280 	cm = (struct aac_command *)arg;
1281 	sc = cm->cm_sc;
1282 	fib = cm->cm_fib;
1283 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1284 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1285 
1286 	if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1287 		return;
1288 
1289 	/* copy into the FIB */
1290 	if (cm->cm_sgtable != NULL) {
1291 		if (fib->Header.Command == RawIo2) {
1292 			struct aac_raw_io2 *raw;
1293 			struct aac_sge_ieee1212 *sg;
1294 			u_int32_t min_size = PAGE_SIZE, cur_size;
1295 			int conformable = TRUE;
1296 
1297 			raw = (struct aac_raw_io2 *)&fib->data[0];
1298 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1299 			raw->sgeCnt = nseg;
1300 
1301 			for (i = 0; i < nseg; i++) {
1302 				cur_size = segs[i].ds_len;
1303 				sg[i].addrHigh = 0;
1304 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1305 				sg[i].length = cur_size;
1306 				sg[i].flags = 0;
1307 				if (i == 0) {
1308 					raw->sgeFirstSize = cur_size;
1309 				} else if (i == 1) {
1310 					raw->sgeNominalSize = cur_size;
1311 					min_size = cur_size;
1312 				} else if ((i+1) < nseg &&
1313 					cur_size != raw->sgeNominalSize) {
1314 					conformable = FALSE;
1315 					if (cur_size < min_size)
1316 						min_size = cur_size;
1317 				}
1318 			}
1319 
1320 			/* not conformable: evaluate required sg elements */
1321 			if (!conformable) {
1322 				int j, err_found, nseg_new = nseg;
1323 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1324 					err_found = FALSE;
1325 					nseg_new = 2;
1326 					for (j = 1; j < nseg - 1; ++j) {
1327 						if (sg[j].length % (i*PAGE_SIZE)) {
1328 							err_found = TRUE;
1329 							break;
1330 						}
1331 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1332 					}
1333 					if (!err_found)
1334 						break;
1335 				}
1336 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1337 					!(sc->hint_flags & 4))
1338 					nseg = aac_convert_sgraw2(sc,
1339 						raw, i, nseg, nseg_new);
1340 			} else {
1341 				raw->flags |= RIO2_SGL_CONFORMANT;
1342 			}
1343 
1344 			/* update the FIB size for the s/g count */
1345 			fib->Header.Size += nseg *
1346 				sizeof(struct aac_sge_ieee1212);
1347 
1348 		} else if (fib->Header.Command == RawIo) {
1349 			struct aac_sg_tableraw *sg;
1350 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1351 			sg->SgCount = nseg;
1352 			for (i = 0; i < nseg; i++) {
1353 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1354 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1355 				sg->SgEntryRaw[i].Next = 0;
1356 				sg->SgEntryRaw[i].Prev = 0;
1357 				sg->SgEntryRaw[i].Flags = 0;
1358 			}
1359 			/* update the FIB size for the s/g count */
1360 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1361 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1362 			struct aac_sg_table *sg;
1363 			sg = cm->cm_sgtable;
1364 			sg->SgCount = nseg;
1365 			for (i = 0; i < nseg; i++) {
1366 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1367 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1368 			}
1369 			/* update the FIB size for the s/g count */
1370 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1371 		} else {
1372 			struct aac_sg_table64 *sg;
1373 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1374 			sg->SgCount = nseg;
1375 			for (i = 0; i < nseg; i++) {
1376 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1377 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1378 			}
1379 			/* update the FIB size for the s/g count */
1380 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1381 		}
1382 	}
1383 
1384 	/* Fix up the address values in the FIB.  Use the command array index
1385 	 * instead of a pointer since these fields are only 32 bits.  Shift
1386 	 * the SenderFibAddress over to make room for the fast response bit
1387 	 * and for the AIF bit
1388 	 */
1389 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1390 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1391 
1392 	/* save a pointer to the command for speedy reverse-lookup */
1393 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1394 
1395 	if (cm->cm_passthr_dmat == 0) {
1396 		if (cm->cm_flags & AAC_CMD_DATAIN)
1397 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1398 							BUS_DMASYNC_PREREAD);
1399 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1400 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1401 							BUS_DMASYNC_PREWRITE);
1402 	}
1403 
1404 	cm->cm_flags |= AAC_CMD_MAPPED;
1405 
1406 	if (cm->cm_flags & AAC_CMD_WAIT) {
1407 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1408 			cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1409 	} else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1410 		u_int32_t wait = 0;
1411 		sc->aac_sync_cm = cm;
1412 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1413 			cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1414 	} else {
1415 		int count = 10000000L;
1416 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1417 			if (--count == 0) {
1418 				aac_unmap_command(cm);
1419 				sc->flags |= AAC_QUEUE_FRZN;
1420 				aac_requeue_ready(cm);
1421 			}
1422 			DELAY(5);			/* wait 5 usec. */
1423 		}
1424 	}
1425 }
1426 
1427 
1428 static int
1429 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1430 				   int pages, int nseg, int nseg_new)
1431 {
1432 	struct aac_sge_ieee1212 *sge;
1433 	int i, j, pos;
1434 	u_int32_t addr_low;
1435 
1436 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1437 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1438 	if (sge == NULL)
1439 		return nseg;
1440 
1441 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1442 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1443 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1444 			sge[pos].addrLow = addr_low;
1445 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1446 			if (addr_low < raw->sge[i].addrLow)
1447 				sge[pos].addrHigh++;
1448 			sge[pos].length = pages * PAGE_SIZE;
1449 			sge[pos].flags = 0;
1450 			pos++;
1451 		}
1452 	}
1453 	sge[pos] = raw->sge[nseg-1];
1454 	for (i = 1; i < nseg_new; ++i)
1455 		raw->sge[i] = sge[i];
1456 
1457 	free(sge, M_AACRAIDBUF);
1458 	raw->sgeCnt = nseg_new;
1459 	raw->flags |= RIO2_SGL_CONFORMANT;
1460 	raw->sgeNominalSize = pages * PAGE_SIZE;
1461 	return nseg_new;
1462 }
1463 
1464 
1465 /*
1466  * Unmap a command from controller-visible space.
1467  */
1468 static void
1469 aac_unmap_command(struct aac_command *cm)
1470 {
1471 	struct aac_softc *sc;
1472 
1473 	sc = cm->cm_sc;
1474 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1475 
1476 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1477 		return;
1478 
1479 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1480 		if (cm->cm_flags & AAC_CMD_DATAIN)
1481 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1482 					BUS_DMASYNC_POSTREAD);
1483 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1484 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1485 					BUS_DMASYNC_POSTWRITE);
1486 
1487 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1488 	}
1489 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1490 }
1491 
1492 /*
1493  * Hardware Interface
1494  */
1495 
1496 /*
1497  * Initialize the adapter.
1498  */
1499 static void
1500 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1501 {
1502 	struct aac_softc *sc;
1503 
1504 	sc = (struct aac_softc *)arg;
1505 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1506 
1507 	sc->aac_common_busaddr = segs[0].ds_addr;
1508 }
1509 
1510 static int
1511 aac_check_firmware(struct aac_softc *sc)
1512 {
1513 	u_int32_t code, major, minor, maxsize;
1514 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1515 	time_t then;
1516 
1517 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1518 
1519 	/* check if flash update is running */
1520 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1521 		then = time_uptime;
1522 		do {
1523 			code = AAC_GET_FWSTATUS(sc);
1524 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1525 				device_printf(sc->aac_dev,
1526 						  "FATAL: controller not coming ready, "
1527 						   "status %x\n", code);
1528 				return(ENXIO);
1529 			}
1530 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1531 		/*
1532 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1533 		 * do not read scratch pad register at this time
1534 		 */
1535 		waitCount = 10 * 10000;
1536 		while (waitCount) {
1537 			DELAY(100);		/* delay 100 microseconds */
1538 			waitCount--;
1539 		}
1540 	}
1541 
1542 	/*
1543 	 * Wait for the adapter to come ready.
1544 	 */
1545 	then = time_uptime;
1546 	do {
1547 		code = AAC_GET_FWSTATUS(sc);
1548 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1549 			device_printf(sc->aac_dev,
1550 				      "FATAL: controller not coming ready, "
1551 					   "status %x\n", code);
1552 			return(ENXIO);
1553 		}
1554 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1555 
1556 	/*
1557 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1558 	 * firmware version 1.x are not compatible with this driver.
1559 	 */
1560 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1561 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1562 				     NULL, NULL)) {
1563 			device_printf(sc->aac_dev,
1564 				      "Error reading firmware version\n");
1565 			return (EIO);
1566 		}
1567 
1568 		/* These numbers are stored as ASCII! */
1569 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1570 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1571 		if (major == 1) {
1572 			device_printf(sc->aac_dev,
1573 			    "Firmware version %d.%d is not supported.\n",
1574 			    major, minor);
1575 			return (EINVAL);
1576 		}
1577 	}
1578 	/*
1579 	 * Retrieve the capabilities/supported options word so we know what
1580 	 * work-arounds to enable.  Some firmware revs don't support this
1581 	 * command.
1582 	 */
1583 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1584 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1585 			device_printf(sc->aac_dev,
1586 			     "RequestAdapterInfo failed\n");
1587 			return (EIO);
1588 		}
1589 	} else {
1590 		options = AAC_GET_MAILBOX(sc, 1);
1591 		atu_size = AAC_GET_MAILBOX(sc, 2);
1592 		sc->supported_options = options;
1593 		sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1594 
1595 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1596 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1597 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1598 		if (options & AAC_SUPPORTED_NONDASD)
1599 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1600 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1601 			&& (sizeof(bus_addr_t) > 4)
1602 			&& (sc->hint_flags & 0x1)) {
1603 			device_printf(sc->aac_dev,
1604 			    "Enabling 64-bit address support\n");
1605 			sc->flags |= AAC_FLAGS_SG_64BIT;
1606 		}
1607 		if (sc->aac_if.aif_send_command) {
1608 			if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1609 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1610 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1611 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1612 			else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1613 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1614 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1615 		}
1616 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1617 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1618 	}
1619 
1620 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1621 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1622 		return (ENXIO);
1623 	}
1624 
1625 	if (sc->hint_flags & 2) {
1626 		device_printf(sc->aac_dev,
1627 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1628 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1629 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1630 		device_printf(sc->aac_dev,
1631 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1632 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1633 	}
1634 
1635 	/* Check for broken hardware that does a lower number of commands */
1636 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1637 
1638 	/* Remap mem. resource, if required */
1639 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1640 		bus_release_resource(
1641 			sc->aac_dev, SYS_RES_MEMORY,
1642 			sc->aac_regs_rid0, sc->aac_regs_res0);
1643 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1644 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1645 			atu_size, RF_ACTIVE);
1646 		if (sc->aac_regs_res0 == NULL) {
1647 			sc->aac_regs_res0 = bus_alloc_resource_any(
1648 				sc->aac_dev, SYS_RES_MEMORY,
1649 				&sc->aac_regs_rid0, RF_ACTIVE);
1650 			if (sc->aac_regs_res0 == NULL) {
1651 				device_printf(sc->aac_dev,
1652 					"couldn't allocate register window\n");
1653 				return (ENXIO);
1654 			}
1655 		}
1656 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1657 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1658 	}
1659 
1660 	/* Read preferred settings */
1661 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1662 	sc->aac_max_sectors = 128;				/* 64KB */
1663 	sc->aac_max_aif = 1;
1664 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1665 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1666 		 - sizeof(struct aac_blockwrite64))
1667 		 / sizeof(struct aac_sg_entry64);
1668 	else
1669 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1670 		 - sizeof(struct aac_blockwrite))
1671 		 / sizeof(struct aac_sg_entry);
1672 
1673 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1674 		options = AAC_GET_MAILBOX(sc, 1);
1675 		sc->aac_max_fib_size = (options & 0xFFFF);
1676 		sc->aac_max_sectors = (options >> 16) << 1;
1677 		options = AAC_GET_MAILBOX(sc, 2);
1678 		sc->aac_sg_tablesize = (options >> 16);
1679 		options = AAC_GET_MAILBOX(sc, 3);
1680 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1681 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1682 			sc->aac_max_fibs = (options & 0xFFFF);
1683 		options = AAC_GET_MAILBOX(sc, 4);
1684 		sc->aac_max_aif = (options & 0xFFFF);
1685 		options = AAC_GET_MAILBOX(sc, 5);
1686 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1687 	}
1688 
1689 	maxsize = sc->aac_max_fib_size + 31;
1690 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1691 		maxsize += sizeof(struct aac_fib_xporthdr);
1692 	if (maxsize > PAGE_SIZE) {
1693     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1694 		maxsize = PAGE_SIZE;
1695 	}
1696 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1697 
1698 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1699 		sc->flags |= AAC_FLAGS_RAW_IO;
1700 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1701 	}
1702 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1703 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1704 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1705 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1706 	}
1707 
1708 #ifdef AACRAID_DEBUG
1709 	aacraid_get_fw_debug_buffer(sc);
1710 #endif
1711 	return (0);
1712 }
1713 
1714 static int
1715 aac_init(struct aac_softc *sc)
1716 {
1717 	struct aac_adapter_init	*ip;
1718 	int i, error;
1719 
1720 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1721 
1722 	/* reset rrq index */
1723 	sc->aac_fibs_pushed_no = 0;
1724 	for (i = 0; i < sc->aac_max_msix; i++)
1725 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1726 
1727 	/*
1728 	 * Fill in the init structure.  This tells the adapter about the
1729 	 * physical location of various important shared data structures.
1730 	 */
1731 	ip = &sc->aac_common->ac_init;
1732 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1733 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1734 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1735 		sc->flags |= AAC_FLAGS_RAW_IO;
1736 	}
1737 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1738 
1739 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1740 					 offsetof(struct aac_common, ac_fibs);
1741 	ip->AdapterFibsVirtualAddress = 0;
1742 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1743 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1744 
1745 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1746 				  offsetof(struct aac_common, ac_printf);
1747 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1748 
1749 	/*
1750 	 * The adapter assumes that pages are 4K in size, except on some
1751  	 * broken firmware versions that do the page->byte conversion twice,
1752 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1753 	 * Round up since the granularity is so high.
1754 	 */
1755 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1756 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1757 		ip->HostPhysMemPages =
1758 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1759 	}
1760 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1761 
1762 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1763 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1764 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1765 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1766 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1767 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1768 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1769 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1770 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1771 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1772 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1773 	}
1774 	ip->MaxNumAif = sc->aac_max_aif;
1775 	ip->HostRRQ_AddrLow =
1776 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1777 	/* always 32-bit address */
1778 	ip->HostRRQ_AddrHigh = 0;
1779 
1780 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1781 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1782 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1783 		device_printf(sc->aac_dev, "Power Management enabled\n");
1784 	}
1785 
1786 	ip->MaxIoCommands = sc->aac_max_fibs;
1787 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1788 	ip->MaxFibSize = sc->aac_max_fib_size;
1789 
1790 	/*
1791 	 * Do controller-type-specific initialisation
1792 	 */
1793 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1794 
1795 	/*
1796 	 * Give the init structure to the controller.
1797 	 */
1798 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1799 			     sc->aac_common_busaddr +
1800 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1801 			     NULL, NULL)) {
1802 		device_printf(sc->aac_dev,
1803 			      "error establishing init structure\n");
1804 		error = EIO;
1805 		goto out;
1806 	}
1807 
1808 	/*
1809 	 * Check configuration issues
1810 	 */
1811 	if ((error = aac_check_config(sc)) != 0)
1812 		goto out;
1813 
1814 	error = 0;
1815 out:
1816 	return(error);
1817 }
1818 
1819 static void
1820 aac_define_int_mode(struct aac_softc *sc)
1821 {
1822 	device_t dev;
1823 	int cap, msi_count, error = 0;
1824 	uint32_t val;
1825 
1826 	dev = sc->aac_dev;
1827 
1828 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1829 		device_printf(dev, "using line interrupts\n");
1830 		sc->aac_max_msix = 1;
1831 		sc->aac_vector_cap = sc->aac_max_fibs;
1832 		return;
1833 	}
1834 
1835 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1836 	if (sc->aac_max_msix == 0) {
1837 		if (sc->aac_hwif == AAC_HWIF_SRC) {
1838 			msi_count = 1;
1839 			if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1840 				device_printf(dev, "alloc msi failed - err=%d; "
1841 				    "will use INTx\n", error);
1842 				pci_release_msi(dev);
1843 			} else {
1844 				sc->msi_tupelo = TRUE;
1845 			}
1846 		}
1847 		if (sc->msi_tupelo)
1848 			device_printf(dev, "using MSI interrupts\n");
1849 		else
1850 			device_printf(dev, "using line interrupts\n");
1851 
1852 		sc->aac_max_msix = 1;
1853 		sc->aac_vector_cap = sc->aac_max_fibs;
1854 		return;
1855 	}
1856 
1857 	/* OS capability */
1858 	msi_count = pci_msix_count(dev);
1859 	if (msi_count > AAC_MAX_MSIX)
1860 		msi_count = AAC_MAX_MSIX;
1861 	if (msi_count > sc->aac_max_msix)
1862 		msi_count = sc->aac_max_msix;
1863 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1864 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1865 				   "will try MSI\n", msi_count, error);
1866 		pci_release_msi(dev);
1867 	} else {
1868 		sc->msi_enabled = TRUE;
1869 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1870 			msi_count);
1871 	}
1872 
1873 	if (!sc->msi_enabled) {
1874 		msi_count = 1;
1875 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1876 			device_printf(dev, "alloc msi failed - err=%d; "
1877 				           "will use INTx\n", error);
1878 			pci_release_msi(dev);
1879 		} else {
1880 			sc->msi_enabled = TRUE;
1881 			device_printf(dev, "using MSI interrupts\n");
1882 		}
1883 	}
1884 
1885 	if (sc->msi_enabled) {
1886 		/* now read controller capability from PCI config. space */
1887 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1888 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1889 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1890 			pci_release_msi(dev);
1891 			sc->msi_enabled = FALSE;
1892 		}
1893 	}
1894 
1895 	if (!sc->msi_enabled) {
1896 		device_printf(dev, "using legacy interrupts\n");
1897 		sc->aac_max_msix = 1;
1898 	} else {
1899 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1900 		if (sc->aac_max_msix > msi_count)
1901 			sc->aac_max_msix = msi_count;
1902 	}
1903 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1904 
1905 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1906 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1907 }
1908 
1909 static int
1910 aac_find_pci_capability(struct aac_softc *sc, int cap)
1911 {
1912 	device_t dev;
1913 	uint32_t status;
1914 	uint8_t ptr;
1915 
1916 	dev = sc->aac_dev;
1917 
1918 	status = pci_read_config(dev, PCIR_STATUS, 2);
1919 	if (!(status & PCIM_STATUS_CAPPRESENT))
1920 		return (0);
1921 
1922 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1923 	switch (status & PCIM_HDRTYPE) {
1924 	case 0:
1925 	case 1:
1926 		ptr = PCIR_CAP_PTR;
1927 		break;
1928 	case 2:
1929 		ptr = PCIR_CAP_PTR_2;
1930 		break;
1931 	default:
1932 		return (0);
1933 		break;
1934 	}
1935 	ptr = pci_read_config(dev, ptr, 1);
1936 
1937 	while (ptr != 0) {
1938 		int next, val;
1939 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1940 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1941 		if (val == cap)
1942 			return (ptr);
1943 		ptr = next;
1944 	}
1945 
1946 	return (0);
1947 }
1948 
1949 static int
1950 aac_setup_intr(struct aac_softc *sc)
1951 {
1952 	int i, msi_count, rid;
1953 	struct resource *res;
1954 	void *tag;
1955 
1956 	msi_count = sc->aac_max_msix;
1957 	rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1958 
1959 	for (i = 0; i < msi_count; i++, rid++) {
1960 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1961 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1962 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1963 			return (EINVAL);
1964 		}
1965 		sc->aac_irq_rid[i] = rid;
1966 		sc->aac_irq[i] = res;
1967 		if (aac_bus_setup_intr(sc->aac_dev, res,
1968 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1969 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1970 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1971 			return (EINVAL);
1972 		}
1973 		sc->aac_msix[i].vector_no = i;
1974 		sc->aac_msix[i].sc = sc;
1975 		sc->aac_intr[i] = tag;
1976 	}
1977 
1978 	return (0);
1979 }
1980 
1981 static int
1982 aac_check_config(struct aac_softc *sc)
1983 {
1984 	struct aac_fib *fib;
1985 	struct aac_cnt_config *ccfg;
1986 	struct aac_cf_status_hdr *cf_shdr;
1987 	int rval;
1988 
1989 	mtx_lock(&sc->aac_io_lock);
1990 	aac_alloc_sync_fib(sc, &fib);
1991 
1992 	ccfg = (struct aac_cnt_config *)&fib->data[0];
1993 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1994 	ccfg->Command = VM_ContainerConfig;
1995 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
1996 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
1997 
1998 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
1999 		sizeof (struct aac_cnt_config));
2000 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2001 	if (rval == 0 && ccfg->Command == ST_OK &&
2002 		ccfg->CTCommand.param[0] == CT_OK) {
2003 		if (cf_shdr->action <= CFACT_PAUSE) {
2004 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2005 			ccfg->Command = VM_ContainerConfig;
2006 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2007 
2008 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2009 				sizeof (struct aac_cnt_config));
2010 			if (rval == 0 && ccfg->Command == ST_OK &&
2011 				ccfg->CTCommand.param[0] == CT_OK) {
2012 				/* successful completion */
2013 				rval = 0;
2014 			} else {
2015 				/* auto commit aborted due to error(s) */
2016 				rval = -2;
2017 			}
2018 		} else {
2019 			/* auto commit aborted due to adapter indicating
2020 			   config. issues too dangerous to auto commit  */
2021 			rval = -3;
2022 		}
2023 	} else {
2024 		/* error */
2025 		rval = -1;
2026 	}
2027 
2028 	aac_release_sync_fib(sc);
2029 	mtx_unlock(&sc->aac_io_lock);
2030 	return(rval);
2031 }
2032 
2033 /*
2034  * Send a synchronous command to the controller and wait for a result.
2035  * Indicate if the controller completed the command with an error status.
2036  */
2037 int
2038 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2039 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2040 		 u_int32_t *sp, u_int32_t *r1)
2041 {
2042 	time_t then;
2043 	u_int32_t status;
2044 
2045 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2046 
2047 	/* populate the mailbox */
2048 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2049 
2050 	/* ensure the sync command doorbell flag is cleared */
2051 	if (!sc->msi_enabled)
2052 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2053 
2054 	/* then set it to signal the adapter */
2055 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2056 
2057 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2058 		/* spin waiting for the command to complete */
2059 		then = time_uptime;
2060 		do {
2061 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2062 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2063 				return(EIO);
2064 			}
2065 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2066 
2067 		/* clear the completion flag */
2068 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2069 
2070 		/* get the command status */
2071 		status = AAC_GET_MAILBOX(sc, 0);
2072 		if (sp != NULL)
2073 			*sp = status;
2074 
2075 		/* return parameter */
2076 		if (r1 != NULL)
2077 			*r1 = AAC_GET_MAILBOX(sc, 1);
2078 
2079 		if (status != AAC_SRB_STS_SUCCESS)
2080 			return (-1);
2081 	}
2082 	return(0);
2083 }
2084 
2085 static int
2086 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2087 		 struct aac_fib *fib, u_int16_t datasize)
2088 {
2089 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2090 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2091 
2092 	if (datasize > AAC_FIB_DATASIZE)
2093 		return(EINVAL);
2094 
2095 	/*
2096 	 * Set up the sync FIB
2097 	 */
2098 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2099 				AAC_FIBSTATE_INITIALISED |
2100 				AAC_FIBSTATE_EMPTY;
2101 	fib->Header.XferState |= xferstate;
2102 	fib->Header.Command = command;
2103 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2104 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2105 	fib->Header.SenderSize = sizeof(struct aac_fib);
2106 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2107 	fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2108 		offsetof(struct aac_common, ac_sync_fib);
2109 
2110 	/*
2111 	 * Give the FIB to the controller, wait for a response.
2112 	 */
2113 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2114 		fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2115 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2116 		return(EIO);
2117 	}
2118 
2119 	return (0);
2120 }
2121 
2122 /*
2123  * Check for commands that have been outstanding for a suspiciously long time,
2124  * and complain about them.
2125  */
2126 static void
2127 aac_timeout(struct aac_softc *sc)
2128 {
2129 	struct aac_command *cm;
2130 	time_t deadline;
2131 	int timedout;
2132 
2133 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2134 	/*
2135 	 * Traverse the busy command list, bitch about late commands once
2136 	 * only.
2137 	 */
2138 	timedout = 0;
2139 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2140 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2141 		if (cm->cm_timestamp < deadline) {
2142 			device_printf(sc->aac_dev,
2143 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2144 				      cm, (int)(time_uptime-cm->cm_timestamp));
2145 			AAC_PRINT_FIB(sc, cm->cm_fib);
2146 			timedout++;
2147 		}
2148 	}
2149 
2150 	if (timedout)
2151 		aac_reset_adapter(sc);
2152 	aacraid_print_queues(sc);
2153 }
2154 
2155 /*
2156  * Interface Function Vectors
2157  */
2158 
2159 /*
2160  * Read the current firmware status word.
2161  */
2162 static int
2163 aac_src_get_fwstatus(struct aac_softc *sc)
2164 {
2165 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2166 
2167 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2168 }
2169 
2170 /*
2171  * Notify the controller of a change in a given queue
2172  */
2173 static void
2174 aac_src_qnotify(struct aac_softc *sc, int qbit)
2175 {
2176 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2177 
2178 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2179 }
2180 
2181 /*
2182  * Get the interrupt reason bits
2183  */
2184 static int
2185 aac_src_get_istatus(struct aac_softc *sc)
2186 {
2187 	int val;
2188 
2189 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2190 
2191 	if (sc->msi_enabled) {
2192 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2193 		if (val & AAC_MSI_SYNC_STATUS)
2194 			val = AAC_DB_SYNC_COMMAND;
2195 		else
2196 			val = 0;
2197 	} else {
2198 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2199 	}
2200 	return(val);
2201 }
2202 
2203 /*
2204  * Clear some interrupt reason bits
2205  */
2206 static void
2207 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2208 {
2209 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2210 
2211 	if (sc->msi_enabled) {
2212 		if (mask == AAC_DB_SYNC_COMMAND)
2213 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2214 	} else {
2215 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2216 	}
2217 }
2218 
2219 /*
2220  * Populate the mailbox and set the command word
2221  */
2222 static void
2223 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2224 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2225 {
2226 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2227 
2228 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2229 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2230 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2231 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2232 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2233 }
2234 
2235 static void
2236 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2237 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2238 {
2239 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2240 
2241 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2242 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2243 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2244 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2245 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2246 }
2247 
2248 /*
2249  * Fetch the immediate command status word
2250  */
2251 static int
2252 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2253 {
2254 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2255 
2256 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2257 }
2258 
2259 static int
2260 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2261 {
2262 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2263 
2264 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2265 }
2266 
2267 /*
2268  * Set/clear interrupt masks
2269  */
2270 static void
2271 aac_src_access_devreg(struct aac_softc *sc, int mode)
2272 {
2273 	u_int32_t val;
2274 
2275 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2276 
2277 	switch (mode) {
2278 	case AAC_ENABLE_INTERRUPT:
2279 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2280 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2281 				           AAC_INT_ENABLE_TYPE1_INTX));
2282 		break;
2283 
2284 	case AAC_DISABLE_INTERRUPT:
2285 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2286 		break;
2287 
2288 	case AAC_ENABLE_MSIX:
2289 		/* set bit 6 */
2290 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2291 		val |= 0x40;
2292 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2293 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2294 		/* unmask int. */
2295 		val = PMC_ALL_INTERRUPT_BITS;
2296 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2297 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2298 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2299 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2300 		break;
2301 
2302 	case AAC_DISABLE_MSIX:
2303 		/* reset bit 6 */
2304 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2305 		val &= ~0x40;
2306 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2307 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2308 		break;
2309 
2310 	case AAC_CLEAR_AIF_BIT:
2311 		/* set bit 5 */
2312 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2313 		val |= 0x20;
2314 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2315 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2316 		break;
2317 
2318 	case AAC_CLEAR_SYNC_BIT:
2319 		/* set bit 4 */
2320 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2321 		val |= 0x10;
2322 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2323 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2324 		break;
2325 
2326 	case AAC_ENABLE_INTX:
2327 		/* set bit 7 */
2328 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2329 		val |= 0x80;
2330 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2331 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2332 		/* unmask int. */
2333 		val = PMC_ALL_INTERRUPT_BITS;
2334 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2335 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2336 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2337 			val & (~(PMC_GLOBAL_INT_BIT2)));
2338 		break;
2339 
2340 	default:
2341 		break;
2342 	}
2343 }
2344 
2345 /*
2346  * New comm. interface: Send command functions
2347  */
2348 static int
2349 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2350 {
2351 	struct aac_fib_xporthdr *pFibX;
2352 	u_int32_t fibsize, high_addr;
2353 	u_int64_t address;
2354 
2355 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2356 
2357 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2358 		sc->aac_max_msix > 1) {
2359 		u_int16_t vector_no, first_choice = 0xffff;
2360 
2361 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2362 		do {
2363 			vector_no += 1;
2364 			if (vector_no == sc->aac_max_msix)
2365 				vector_no = 1;
2366 			if (sc->aac_rrq_outstanding[vector_no] <
2367 				sc->aac_vector_cap)
2368 				break;
2369 			if (0xffff == first_choice)
2370 				first_choice = vector_no;
2371 			else if (vector_no == first_choice)
2372 				break;
2373 		} while (1);
2374 		if (vector_no == first_choice)
2375 			vector_no = 0;
2376 		sc->aac_rrq_outstanding[vector_no]++;
2377 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2378 			sc->aac_fibs_pushed_no = 0;
2379 		else
2380 			sc->aac_fibs_pushed_no++;
2381 
2382 		cm->cm_fib->Header.Handle += (vector_no << 16);
2383 	}
2384 
2385 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2386 		/* Calculate the amount to the fibsize bits */
2387 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2388 		/* Fill new FIB header */
2389 		address = cm->cm_fibphys;
2390 		high_addr = (u_int32_t)(address >> 32);
2391 		if (high_addr == 0L) {
2392 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2393 			cm->cm_fib->Header.u.TimeStamp = 0L;
2394 		} else {
2395 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2396 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2397 		}
2398 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2399 	} else {
2400 		/* Calculate the amount to the fibsize bits */
2401 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2402 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2403 		/* Fill XPORT header */
2404 		pFibX = (struct aac_fib_xporthdr *)
2405 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2406 		pFibX->Handle = cm->cm_fib->Header.Handle;
2407 		pFibX->HostAddress = cm->cm_fibphys;
2408 		pFibX->Size = cm->cm_fib->Header.Size;
2409 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2410 		high_addr = (u_int32_t)(address >> 32);
2411 	}
2412 
2413 	if (fibsize > 31)
2414 		fibsize = 31;
2415 	aac_enqueue_busy(cm);
2416 	if (high_addr) {
2417 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2418 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2419 	} else {
2420 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2421 	}
2422 	return 0;
2423 }
2424 
2425 /*
2426  * New comm. interface: get, set outbound queue index
2427  */
2428 static int
2429 aac_src_get_outb_queue(struct aac_softc *sc)
2430 {
2431 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2432 
2433 	return(-1);
2434 }
2435 
2436 static void
2437 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2438 {
2439 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2440 }
2441 
2442 /*
2443  * Debugging and Diagnostics
2444  */
2445 
2446 /*
2447  * Print some information about the controller.
2448  */
2449 static void
2450 aac_describe_controller(struct aac_softc *sc)
2451 {
2452 	struct aac_fib *fib;
2453 	struct aac_adapter_info	*info;
2454 	char *adapter_type = "Adaptec RAID controller";
2455 
2456 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2457 
2458 	mtx_lock(&sc->aac_io_lock);
2459 	aac_alloc_sync_fib(sc, &fib);
2460 
2461 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2462 		fib->data[0] = 0;
2463 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2464 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2465 		else {
2466 			struct aac_supplement_adapter_info *supp_info;
2467 
2468 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2469 			adapter_type = (char *)supp_info->AdapterTypeText;
2470 			sc->aac_feature_bits = supp_info->FeatureBits;
2471 			sc->aac_support_opt2 = supp_info->SupportedOptions2;
2472 		}
2473 	}
2474 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2475 		adapter_type,
2476 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2477 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2478 
2479 	fib->data[0] = 0;
2480 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2481 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2482 		aac_release_sync_fib(sc);
2483 		mtx_unlock(&sc->aac_io_lock);
2484 		return;
2485 	}
2486 
2487 	/* save the kernel revision structure for later use */
2488 	info = (struct aac_adapter_info *)&fib->data[0];
2489 	sc->aac_revision = info->KernelRevision;
2490 
2491 	if (bootverbose) {
2492 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2493 		    "(%dMB cache, %dMB execution), %s\n",
2494 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2495 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2496 		    info->BufferMem / (1024 * 1024),
2497 		    info->ExecutionMem / (1024 * 1024),
2498 		    aac_describe_code(aac_battery_platform,
2499 		    info->batteryPlatform));
2500 
2501 		device_printf(sc->aac_dev,
2502 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2503 		    info->KernelRevision.external.comp.major,
2504 		    info->KernelRevision.external.comp.minor,
2505 		    info->KernelRevision.external.comp.dash,
2506 		    info->KernelRevision.buildNumber,
2507 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2508 
2509 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2510 			      sc->supported_options,
2511 			      "\20"
2512 			      "\1SNAPSHOT"
2513 			      "\2CLUSTERS"
2514 			      "\3WCACHE"
2515 			      "\4DATA64"
2516 			      "\5HOSTTIME"
2517 			      "\6RAID50"
2518 			      "\7WINDOW4GB"
2519 			      "\10SCSIUPGD"
2520 			      "\11SOFTERR"
2521 			      "\12NORECOND"
2522 			      "\13SGMAP64"
2523 			      "\14ALARM"
2524 			      "\15NONDASD"
2525 			      "\16SCSIMGT"
2526 			      "\17RAIDSCSI"
2527 			      "\21ADPTINFO"
2528 			      "\22NEWCOMM"
2529 			      "\23ARRAY64BIT"
2530 			      "\24HEATSENSOR");
2531 	}
2532 
2533 	aac_release_sync_fib(sc);
2534 	mtx_unlock(&sc->aac_io_lock);
2535 }
2536 
2537 /*
2538  * Look up a text description of a numeric error code and return a pointer to
2539  * same.
2540  */
2541 static char *
2542 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2543 {
2544 	int i;
2545 
2546 	for (i = 0; table[i].string != NULL; i++)
2547 		if (table[i].code == code)
2548 			return(table[i].string);
2549 	return(table[i + 1].string);
2550 }
2551 
2552 /*
2553  * Management Interface
2554  */
2555 
2556 static int
2557 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2558 {
2559 	struct aac_softc *sc;
2560 
2561 	sc = dev->si_drv1;
2562 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2563 	device_busy(sc->aac_dev);
2564 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2565 	return 0;
2566 }
2567 
2568 static int
2569 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2570 {
2571 	union aac_statrequest *as;
2572 	struct aac_softc *sc;
2573 	int error = 0;
2574 
2575 	as = (union aac_statrequest *)arg;
2576 	sc = dev->si_drv1;
2577 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2578 
2579 	switch (cmd) {
2580 	case AACIO_STATS:
2581 		switch (as->as_item) {
2582 		case AACQ_FREE:
2583 		case AACQ_READY:
2584 		case AACQ_BUSY:
2585 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2586 			      sizeof(struct aac_qstat));
2587 			break;
2588 		default:
2589 			error = ENOENT;
2590 			break;
2591 		}
2592 	break;
2593 
2594 	case FSACTL_SENDFIB:
2595 	case FSACTL_SEND_LARGE_FIB:
2596 		arg = *(caddr_t*)arg;
2597 	case FSACTL_LNX_SENDFIB:
2598 	case FSACTL_LNX_SEND_LARGE_FIB:
2599 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2600 		error = aac_ioctl_sendfib(sc, arg);
2601 		break;
2602 	case FSACTL_SEND_RAW_SRB:
2603 		arg = *(caddr_t*)arg;
2604 	case FSACTL_LNX_SEND_RAW_SRB:
2605 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2606 		error = aac_ioctl_send_raw_srb(sc, arg);
2607 		break;
2608 	case FSACTL_AIF_THREAD:
2609 	case FSACTL_LNX_AIF_THREAD:
2610 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2611 		error = EINVAL;
2612 		break;
2613 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2614 		arg = *(caddr_t*)arg;
2615 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2616 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2617 		error = aac_open_aif(sc, arg);
2618 		break;
2619 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2620 		arg = *(caddr_t*)arg;
2621 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2622 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2623 		error = aac_getnext_aif(sc, arg);
2624 		break;
2625 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2626 		arg = *(caddr_t*)arg;
2627 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2628 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2629 		error = aac_close_aif(sc, arg);
2630 		break;
2631 	case FSACTL_MINIPORT_REV_CHECK:
2632 		arg = *(caddr_t*)arg;
2633 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2634 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2635 		error = aac_rev_check(sc, arg);
2636 		break;
2637 	case FSACTL_QUERY_DISK:
2638 		arg = *(caddr_t*)arg;
2639 	case FSACTL_LNX_QUERY_DISK:
2640 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2641 		error = aac_query_disk(sc, arg);
2642 		break;
2643 	case FSACTL_DELETE_DISK:
2644 	case FSACTL_LNX_DELETE_DISK:
2645 		/*
2646 		 * We don't trust the underland to tell us when to delete a
2647 		 * container, rather we rely on an AIF coming from the
2648 		 * controller
2649 		 */
2650 		error = 0;
2651 		break;
2652 	case FSACTL_GET_PCI_INFO:
2653 		arg = *(caddr_t*)arg;
2654 	case FSACTL_LNX_GET_PCI_INFO:
2655 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2656 		error = aac_get_pci_info(sc, arg);
2657 		break;
2658 	case FSACTL_GET_FEATURES:
2659 		arg = *(caddr_t*)arg;
2660 	case FSACTL_LNX_GET_FEATURES:
2661 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2662 		error = aac_supported_features(sc, arg);
2663 		break;
2664 	default:
2665 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2666 		error = EINVAL;
2667 		break;
2668 	}
2669 	return(error);
2670 }
2671 
2672 static int
2673 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2674 {
2675 	struct aac_softc *sc;
2676 	struct aac_fib_context *ctx;
2677 	int revents;
2678 
2679 	sc = dev->si_drv1;
2680 	revents = 0;
2681 
2682 	mtx_lock(&sc->aac_io_lock);
2683 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2684 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2685 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2686 				revents |= poll_events & (POLLIN | POLLRDNORM);
2687 				break;
2688 			}
2689 		}
2690 	}
2691 	mtx_unlock(&sc->aac_io_lock);
2692 
2693 	if (revents == 0) {
2694 		if (poll_events & (POLLIN | POLLRDNORM))
2695 			selrecord(td, &sc->rcv_select);
2696 	}
2697 
2698 	return (revents);
2699 }
2700 
2701 static void
2702 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2703 {
2704 
2705 	switch (event->ev_type) {
2706 	case AAC_EVENT_CMFREE:
2707 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2708 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2709 			aacraid_add_event(sc, event);
2710 			return;
2711 		}
2712 		free(event, M_AACRAIDBUF);
2713 		wakeup(arg);
2714 		break;
2715 	default:
2716 		break;
2717 	}
2718 }
2719 
2720 /*
2721  * Send a FIB supplied from userspace
2722  */
2723 static int
2724 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2725 {
2726 	struct aac_command *cm;
2727 	int size, error;
2728 
2729 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2730 
2731 	cm = NULL;
2732 
2733 	/*
2734 	 * Get a command
2735 	 */
2736 	mtx_lock(&sc->aac_io_lock);
2737 	if (aacraid_alloc_command(sc, &cm)) {
2738 		struct aac_event *event;
2739 
2740 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2741 		    M_NOWAIT | M_ZERO);
2742 		if (event == NULL) {
2743 			error = EBUSY;
2744 			mtx_unlock(&sc->aac_io_lock);
2745 			goto out;
2746 		}
2747 		event->ev_type = AAC_EVENT_CMFREE;
2748 		event->ev_callback = aac_ioctl_event;
2749 		event->ev_arg = &cm;
2750 		aacraid_add_event(sc, event);
2751 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2752 	}
2753 	mtx_unlock(&sc->aac_io_lock);
2754 
2755 	/*
2756 	 * Fetch the FIB header, then re-copy to get data as well.
2757 	 */
2758 	if ((error = copyin(ufib, cm->cm_fib,
2759 			    sizeof(struct aac_fib_header))) != 0)
2760 		goto out;
2761 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2762 	if (size > sc->aac_max_fib_size) {
2763 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2764 			      size, sc->aac_max_fib_size);
2765 		size = sc->aac_max_fib_size;
2766 	}
2767 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2768 		goto out;
2769 	cm->cm_fib->Header.Size = size;
2770 	cm->cm_timestamp = time_uptime;
2771 	cm->cm_datalen = 0;
2772 
2773 	/*
2774 	 * Pass the FIB to the controller, wait for it to complete.
2775 	 */
2776 	mtx_lock(&sc->aac_io_lock);
2777 	error = aacraid_wait_command(cm);
2778 	mtx_unlock(&sc->aac_io_lock);
2779 	if (error != 0) {
2780 		device_printf(sc->aac_dev,
2781 			      "aacraid_wait_command return %d\n", error);
2782 		goto out;
2783 	}
2784 
2785 	/*
2786 	 * Copy the FIB and data back out to the caller.
2787 	 */
2788 	size = cm->cm_fib->Header.Size;
2789 	if (size > sc->aac_max_fib_size) {
2790 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2791 			      size, sc->aac_max_fib_size);
2792 		size = sc->aac_max_fib_size;
2793 	}
2794 	error = copyout(cm->cm_fib, ufib, size);
2795 
2796 out:
2797 	if (cm != NULL) {
2798 		mtx_lock(&sc->aac_io_lock);
2799 		aacraid_release_command(cm);
2800 		mtx_unlock(&sc->aac_io_lock);
2801 	}
2802 	return(error);
2803 }
2804 
2805 /*
2806  * Send a passthrough FIB supplied from userspace
2807  */
2808 static int
2809 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2810 {
2811 	struct aac_command *cm;
2812 	struct aac_fib *fib;
2813 	struct aac_srb *srbcmd;
2814 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2815 	void *user_reply;
2816 	int error, transfer_data = 0;
2817 	bus_dmamap_t orig_map = 0;
2818 	u_int32_t fibsize = 0;
2819 	u_int64_t srb_sg_address;
2820 	u_int32_t srb_sg_bytecount;
2821 
2822 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2823 
2824 	cm = NULL;
2825 
2826 	mtx_lock(&sc->aac_io_lock);
2827 	if (aacraid_alloc_command(sc, &cm)) {
2828 		struct aac_event *event;
2829 
2830 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2831 		    M_NOWAIT | M_ZERO);
2832 		if (event == NULL) {
2833 			error = EBUSY;
2834 			mtx_unlock(&sc->aac_io_lock);
2835 			goto out;
2836 		}
2837 		event->ev_type = AAC_EVENT_CMFREE;
2838 		event->ev_callback = aac_ioctl_event;
2839 		event->ev_arg = &cm;
2840 		aacraid_add_event(sc, event);
2841 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2842 	}
2843 	mtx_unlock(&sc->aac_io_lock);
2844 
2845 	cm->cm_data = NULL;
2846 	/* save original dma map */
2847 	orig_map = cm->cm_datamap;
2848 
2849 	fib = cm->cm_fib;
2850 	srbcmd = (struct aac_srb *)fib->data;
2851 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2852 	    sizeof (u_int32_t))) != 0)
2853 		goto out;
2854 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2855 		error = EINVAL;
2856 		goto out;
2857 	}
2858 	if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2859 		goto out;
2860 
2861 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2862 	srbcmd->retry_limit = 0;	/* obsolete */
2863 
2864 	/* only one sg element from userspace supported */
2865 	if (srbcmd->sg_map.SgCount > 1) {
2866 		error = EINVAL;
2867 		goto out;
2868 	}
2869 	/* check fibsize */
2870 	if (fibsize == (sizeof(struct aac_srb) +
2871 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2872 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2873 		struct aac_sg_entry sg;
2874 
2875 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2876 			goto out;
2877 
2878 		srb_sg_bytecount = sg.SgByteCount;
2879 		srb_sg_address = (u_int64_t)sg.SgAddress;
2880 	} else if (fibsize == (sizeof(struct aac_srb) +
2881 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2882 #ifdef __LP64__
2883 		struct aac_sg_entry64 *sgp =
2884 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2885 		struct aac_sg_entry64 sg;
2886 
2887 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2888 			goto out;
2889 
2890 		srb_sg_bytecount = sg.SgByteCount;
2891 		srb_sg_address = sg.SgAddress;
2892 #else
2893 		error = EINVAL;
2894 		goto out;
2895 #endif
2896 	} else {
2897 		error = EINVAL;
2898 		goto out;
2899 	}
2900 	user_reply = (char *)arg + fibsize;
2901 	srbcmd->data_len = srb_sg_bytecount;
2902 	if (srbcmd->sg_map.SgCount == 1)
2903 		transfer_data = 1;
2904 
2905 	if (transfer_data) {
2906 		/*
2907 		 * Create DMA tag for the passthr. data buffer and allocate it.
2908 		 */
2909 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2910 			1, 0,			/* algnmnt, boundary */
2911 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2912 			BUS_SPACE_MAXADDR_32BIT :
2913 			0x7fffffff,		/* lowaddr */
2914 			BUS_SPACE_MAXADDR, 	/* highaddr */
2915 			NULL, NULL, 		/* filter, filterarg */
2916 			srb_sg_bytecount, 	/* size */
2917 			sc->aac_sg_tablesize,	/* nsegments */
2918 			srb_sg_bytecount, 	/* maxsegsize */
2919 			0,			/* flags */
2920 			NULL, NULL,		/* No locking needed */
2921 			&cm->cm_passthr_dmat)) {
2922 			error = ENOMEM;
2923 			goto out;
2924 		}
2925 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2926 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2927 			error = ENOMEM;
2928 			goto out;
2929 		}
2930 		/* fill some cm variables */
2931 		cm->cm_datalen = srb_sg_bytecount;
2932 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2933 			cm->cm_flags |= AAC_CMD_DATAIN;
2934 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2935 			cm->cm_flags |= AAC_CMD_DATAOUT;
2936 
2937 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2938 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2939 				cm->cm_data, cm->cm_datalen)) != 0)
2940 				goto out;
2941 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2942 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2943 				BUS_DMASYNC_PREWRITE);
2944 		}
2945 	}
2946 
2947 	/* build the FIB */
2948 	fib->Header.Size = sizeof(struct aac_fib_header) +
2949 		sizeof(struct aac_srb);
2950 	fib->Header.XferState =
2951 		AAC_FIBSTATE_HOSTOWNED   |
2952 		AAC_FIBSTATE_INITIALISED |
2953 		AAC_FIBSTATE_EMPTY	 |
2954 		AAC_FIBSTATE_FROMHOST	 |
2955 		AAC_FIBSTATE_REXPECTED   |
2956 		AAC_FIBSTATE_NORM	 |
2957 		AAC_FIBSTATE_ASYNC;
2958 
2959 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2960 		ScsiPortCommandU64 : ScsiPortCommand;
2961 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2962 
2963 	/* send command */
2964 	if (transfer_data) {
2965 		bus_dmamap_load(cm->cm_passthr_dmat,
2966 			cm->cm_datamap, cm->cm_data,
2967 			cm->cm_datalen,
2968 			aacraid_map_command_sg, cm, 0);
2969 	} else {
2970 		aacraid_map_command_sg(cm, NULL, 0, 0);
2971 	}
2972 
2973 	/* wait for completion */
2974 	mtx_lock(&sc->aac_io_lock);
2975 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2976 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2977 	mtx_unlock(&sc->aac_io_lock);
2978 
2979 	/* copy data */
2980 	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2981 		if ((error = copyout(cm->cm_data,
2982 			(void *)(uintptr_t)srb_sg_address,
2983 			cm->cm_datalen)) != 0)
2984 			goto out;
2985 		/* sync required for bus_dmamem_alloc() allocated mem.? */
2986 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2987 				BUS_DMASYNC_POSTREAD);
2988 	}
2989 
2990 	/* status */
2991 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
2992 
2993 out:
2994 	if (cm && cm->cm_data) {
2995 		if (transfer_data)
2996 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
2997 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
2998 		cm->cm_datamap = orig_map;
2999 	}
3000 	if (cm && cm->cm_passthr_dmat)
3001 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3002 	if (cm) {
3003 		mtx_lock(&sc->aac_io_lock);
3004 		aacraid_release_command(cm);
3005 		mtx_unlock(&sc->aac_io_lock);
3006 	}
3007 	return(error);
3008 }
3009 
3010 /*
3011  * Request an AIF from the controller (new comm. type1)
3012  */
3013 static void
3014 aac_request_aif(struct aac_softc *sc)
3015 {
3016 	struct aac_command *cm;
3017 	struct aac_fib *fib;
3018 
3019 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3020 
3021 	if (aacraid_alloc_command(sc, &cm)) {
3022 		sc->aif_pending = 1;
3023 		return;
3024 	}
3025 	sc->aif_pending = 0;
3026 
3027 	/* build the FIB */
3028 	fib = cm->cm_fib;
3029 	fib->Header.Size = sizeof(struct aac_fib);
3030 	fib->Header.XferState =
3031         AAC_FIBSTATE_HOSTOWNED   |
3032         AAC_FIBSTATE_INITIALISED |
3033         AAC_FIBSTATE_EMPTY	 |
3034         AAC_FIBSTATE_FROMHOST	 |
3035         AAC_FIBSTATE_REXPECTED   |
3036         AAC_FIBSTATE_NORM	 |
3037         AAC_FIBSTATE_ASYNC;
3038 	/* set AIF marker */
3039 	fib->Header.Handle = 0x00800000;
3040 	fib->Header.Command = AifRequest;
3041 	((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3042 
3043 	aacraid_map_command_sg(cm, NULL, 0, 0);
3044 }
3045 
3046 
3047 /*
3048  * cdevpriv interface private destructor.
3049  */
3050 static void
3051 aac_cdevpriv_dtor(void *arg)
3052 {
3053 	struct aac_softc *sc;
3054 
3055 	sc = arg;
3056 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3057 	device_unbusy(sc->aac_dev);
3058 }
3059 
3060 /*
3061  * Handle an AIF sent to us by the controller; queue it for later reference.
3062  * If the queue fills up, then drop the older entries.
3063  */
3064 static void
3065 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3066 {
3067 	struct aac_aif_command *aif;
3068 	struct aac_container *co, *co_next;
3069 	struct aac_fib_context *ctx;
3070 	struct aac_fib *sync_fib;
3071 	struct aac_mntinforesp mir;
3072 	int next, current, found;
3073 	int count = 0, changed = 0, i = 0;
3074 	u_int32_t channel, uid;
3075 
3076 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3077 
3078 	aif = (struct aac_aif_command*)&fib->data[0];
3079 	aacraid_print_aif(sc, aif);
3080 
3081 	/* Is it an event that we should care about? */
3082 	switch (aif->command) {
3083 	case AifCmdEventNotify:
3084 		switch (aif->data.EN.type) {
3085 		case AifEnAddContainer:
3086 		case AifEnDeleteContainer:
3087 			/*
3088 			 * A container was added or deleted, but the message
3089 			 * doesn't tell us anything else!  Re-enumerate the
3090 			 * containers and sort things out.
3091 			 */
3092 			aac_alloc_sync_fib(sc, &sync_fib);
3093 			do {
3094 				/*
3095 				 * Ask the controller for its containers one at
3096 				 * a time.
3097 				 * XXX What if the controller's list changes
3098 				 * midway through this enumaration?
3099 				 * XXX This should be done async.
3100 				 */
3101 				if (aac_get_container_info(sc, sync_fib, i,
3102 					&mir, &uid) != 0)
3103 					continue;
3104 				if (i == 0)
3105 					count = mir.MntRespCount;
3106 				/*
3107 				 * Check the container against our list.
3108 				 * co->co_found was already set to 0 in a
3109 				 * previous run.
3110 				 */
3111 				if ((mir.Status == ST_OK) &&
3112 				    (mir.MntTable[0].VolType != CT_NONE)) {
3113 					found = 0;
3114 					TAILQ_FOREACH(co,
3115 						      &sc->aac_container_tqh,
3116 						      co_link) {
3117 						if (co->co_mntobj.ObjectId ==
3118 						    mir.MntTable[0].ObjectId) {
3119 							co->co_found = 1;
3120 							found = 1;
3121 							break;
3122 						}
3123 					}
3124 					/*
3125 					 * If the container matched, continue
3126 					 * in the list.
3127 					 */
3128 					if (found) {
3129 						i++;
3130 						continue;
3131 					}
3132 
3133 					/*
3134 					 * This is a new container.  Do all the
3135 					 * appropriate things to set it up.
3136 					 */
3137 					aac_add_container(sc, &mir, 1, uid);
3138 					changed = 1;
3139 				}
3140 				i++;
3141 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3142 			aac_release_sync_fib(sc);
3143 
3144 			/*
3145 			 * Go through our list of containers and see which ones
3146 			 * were not marked 'found'.  Since the controller didn't
3147 			 * list them they must have been deleted.  Do the
3148 			 * appropriate steps to destroy the device.  Also reset
3149 			 * the co->co_found field.
3150 			 */
3151 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3152 			while (co != NULL) {
3153 				if (co->co_found == 0) {
3154 					co_next = TAILQ_NEXT(co, co_link);
3155 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3156 						     co_link);
3157 					free(co, M_AACRAIDBUF);
3158 					changed = 1;
3159 					co = co_next;
3160 				} else {
3161 					co->co_found = 0;
3162 					co = TAILQ_NEXT(co, co_link);
3163 				}
3164 			}
3165 
3166 			/* Attach the newly created containers */
3167 			if (changed) {
3168 				if (sc->cam_rescan_cb != NULL)
3169 					sc->cam_rescan_cb(sc, 0,
3170 				    	AAC_CAM_TARGET_WILDCARD);
3171 			}
3172 
3173 			break;
3174 
3175 		case AifEnEnclosureManagement:
3176 			switch (aif->data.EN.data.EEE.eventType) {
3177 			case AIF_EM_DRIVE_INSERTION:
3178 			case AIF_EM_DRIVE_REMOVAL:
3179 				channel = aif->data.EN.data.EEE.unitID;
3180 				if (sc->cam_rescan_cb != NULL)
3181 					sc->cam_rescan_cb(sc,
3182 					    ((channel>>24) & 0xF) + 1,
3183 					    (channel & 0xFFFF));
3184 				break;
3185 			}
3186 			break;
3187 
3188 		case AifEnAddJBOD:
3189 		case AifEnDeleteJBOD:
3190 		case AifRawDeviceRemove:
3191 			channel = aif->data.EN.data.ECE.container;
3192 			if (sc->cam_rescan_cb != NULL)
3193 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3194 				    AAC_CAM_TARGET_WILDCARD);
3195 			break;
3196 
3197 		default:
3198 			break;
3199 		}
3200 
3201 	default:
3202 		break;
3203 	}
3204 
3205 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3206 	current = sc->aifq_idx;
3207 	next = (current + 1) % AAC_AIFQ_LENGTH;
3208 	if (next == 0)
3209 		sc->aifq_filled = 1;
3210 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3211 	/* modify AIF contexts */
3212 	if (sc->aifq_filled) {
3213 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3214 			if (next == ctx->ctx_idx)
3215 				ctx->ctx_wrap = 1;
3216 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3217 				ctx->ctx_idx = next;
3218 		}
3219 	}
3220 	sc->aifq_idx = next;
3221 	/* On the off chance that someone is sleeping for an aif... */
3222 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3223 		wakeup(sc->aac_aifq);
3224 	/* Wakeup any poll()ers */
3225 	selwakeuppri(&sc->rcv_select, PRIBIO);
3226 
3227 	return;
3228 }
3229 
3230 /*
3231  * Return the Revision of the driver to userspace and check to see if the
3232  * userspace app is possibly compatible.  This is extremely bogus since
3233  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3234  * returning what the card reported.
3235  */
3236 static int
3237 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3238 {
3239 	struct aac_rev_check rev_check;
3240 	struct aac_rev_check_resp rev_check_resp;
3241 	int error = 0;
3242 
3243 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3244 
3245 	/*
3246 	 * Copyin the revision struct from userspace
3247 	 */
3248 	if ((error = copyin(udata, (caddr_t)&rev_check,
3249 			sizeof(struct aac_rev_check))) != 0) {
3250 		return error;
3251 	}
3252 
3253 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3254 	      rev_check.callingRevision.buildNumber);
3255 
3256 	/*
3257 	 * Doctor up the response struct.
3258 	 */
3259 	rev_check_resp.possiblyCompatible = 1;
3260 	rev_check_resp.adapterSWRevision.external.comp.major =
3261 	    AAC_DRIVER_MAJOR_VERSION;
3262 	rev_check_resp.adapterSWRevision.external.comp.minor =
3263 	    AAC_DRIVER_MINOR_VERSION;
3264 	rev_check_resp.adapterSWRevision.external.comp.type =
3265 	    AAC_DRIVER_TYPE;
3266 	rev_check_resp.adapterSWRevision.external.comp.dash =
3267 	    AAC_DRIVER_BUGFIX_LEVEL;
3268 	rev_check_resp.adapterSWRevision.buildNumber =
3269 	    AAC_DRIVER_BUILD;
3270 
3271 	return(copyout((caddr_t)&rev_check_resp, udata,
3272 			sizeof(struct aac_rev_check_resp)));
3273 }
3274 
3275 /*
3276  * Pass the fib context to the caller
3277  */
3278 static int
3279 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3280 {
3281 	struct aac_fib_context *fibctx, *ctx;
3282 	int error = 0;
3283 
3284 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3285 
3286 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3287 	if (fibctx == NULL)
3288 		return (ENOMEM);
3289 
3290 	mtx_lock(&sc->aac_io_lock);
3291 	/* all elements are already 0, add to queue */
3292 	if (sc->fibctx == NULL)
3293 		sc->fibctx = fibctx;
3294 	else {
3295 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3296 			;
3297 		ctx->next = fibctx;
3298 		fibctx->prev = ctx;
3299 	}
3300 
3301 	/* evaluate unique value */
3302 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3303 	ctx = sc->fibctx;
3304 	while (ctx != fibctx) {
3305 		if (ctx->unique == fibctx->unique) {
3306 			fibctx->unique++;
3307 			ctx = sc->fibctx;
3308 		} else {
3309 			ctx = ctx->next;
3310 		}
3311 	}
3312 
3313 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3314 	mtx_unlock(&sc->aac_io_lock);
3315 	if (error)
3316 		aac_close_aif(sc, (caddr_t)ctx);
3317 	return error;
3318 }
3319 
3320 /*
3321  * Close the caller's fib context
3322  */
3323 static int
3324 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3325 {
3326 	struct aac_fib_context *ctx;
3327 
3328 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3329 
3330 	mtx_lock(&sc->aac_io_lock);
3331 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3332 		if (ctx->unique == *(uint32_t *)&arg) {
3333 			if (ctx == sc->fibctx)
3334 				sc->fibctx = NULL;
3335 			else {
3336 				ctx->prev->next = ctx->next;
3337 				if (ctx->next)
3338 					ctx->next->prev = ctx->prev;
3339 			}
3340 			break;
3341 		}
3342 	}
3343 	if (ctx)
3344 		free(ctx, M_AACRAIDBUF);
3345 
3346 	mtx_unlock(&sc->aac_io_lock);
3347 	return 0;
3348 }
3349 
3350 /*
3351  * Pass the caller the next AIF in their queue
3352  */
3353 static int
3354 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3355 {
3356 	struct get_adapter_fib_ioctl agf;
3357 	struct aac_fib_context *ctx;
3358 	int error;
3359 
3360 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3361 
3362 	mtx_lock(&sc->aac_io_lock);
3363 #ifdef COMPAT_FREEBSD32
3364 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3365 		struct get_adapter_fib_ioctl32 agf32;
3366 		error = copyin(arg, &agf32, sizeof(agf32));
3367 		if (error == 0) {
3368 			agf.AdapterFibContext = agf32.AdapterFibContext;
3369 			agf.Wait = agf32.Wait;
3370 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3371 		}
3372 	} else
3373 #endif
3374 		error = copyin(arg, &agf, sizeof(agf));
3375 	if (error == 0) {
3376 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3377 			if (agf.AdapterFibContext == ctx->unique)
3378 				break;
3379 		}
3380 		if (!ctx) {
3381 			mtx_unlock(&sc->aac_io_lock);
3382 			return (EFAULT);
3383 		}
3384 
3385 		error = aac_return_aif(sc, ctx, agf.AifFib);
3386 		if (error == EAGAIN && agf.Wait) {
3387 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3388 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3389 			while (error == EAGAIN) {
3390 				mtx_unlock(&sc->aac_io_lock);
3391 				error = tsleep(sc->aac_aifq, PRIBIO |
3392 					       PCATCH, "aacaif", 0);
3393 				mtx_lock(&sc->aac_io_lock);
3394 				if (error == 0)
3395 					error = aac_return_aif(sc, ctx, agf.AifFib);
3396 			}
3397 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3398 		}
3399 	}
3400 	mtx_unlock(&sc->aac_io_lock);
3401 	return(error);
3402 }
3403 
3404 /*
3405  * Hand the next AIF off the top of the queue out to userspace.
3406  */
3407 static int
3408 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3409 {
3410 	int current, error;
3411 
3412 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3413 
3414 	current = ctx->ctx_idx;
3415 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3416 		/* empty */
3417 		return (EAGAIN);
3418 	}
3419 	error =
3420 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3421 	if (error)
3422 		device_printf(sc->aac_dev,
3423 		    "aac_return_aif: copyout returned %d\n", error);
3424 	else {
3425 		ctx->ctx_wrap = 0;
3426 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3427 	}
3428 	return(error);
3429 }
3430 
3431 static int
3432 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3433 {
3434 	struct aac_pci_info {
3435 		u_int32_t bus;
3436 		u_int32_t slot;
3437 	} pciinf;
3438 	int error;
3439 
3440 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3441 
3442 	pciinf.bus = pci_get_bus(sc->aac_dev);
3443 	pciinf.slot = pci_get_slot(sc->aac_dev);
3444 
3445 	error = copyout((caddr_t)&pciinf, uptr,
3446 			sizeof(struct aac_pci_info));
3447 
3448 	return (error);
3449 }
3450 
3451 static int
3452 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3453 {
3454 	struct aac_features f;
3455 	int error;
3456 
3457 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3458 
3459 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3460 		return (error);
3461 
3462 	/*
3463 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3464 	 * ALL zero in the featuresState, the driver will return the current
3465 	 * state of all the supported features, the data field will not be
3466 	 * valid.
3467 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3468 	 * a specific bit set in the featuresState, the driver will return the
3469 	 * current state of this specific feature and whatever data that are
3470 	 * associated with the feature in the data field or perform whatever
3471 	 * action needed indicates in the data field.
3472 	 */
3473 	 if (f.feat.fValue == 0) {
3474 		f.feat.fBits.largeLBA =
3475 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3476 		f.feat.fBits.JBODSupport = 1;
3477 		/* TODO: In the future, add other features state here as well */
3478 	} else {
3479 		if (f.feat.fBits.largeLBA)
3480 			f.feat.fBits.largeLBA =
3481 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3482 		/* TODO: Add other features state and data in the future */
3483 	}
3484 
3485 	error = copyout(&f, uptr, sizeof (f));
3486 	return (error);
3487 }
3488 
3489 /*
3490  * Give the userland some information about the container.  The AAC arch
3491  * expects the driver to be a SCSI passthrough type driver, so it expects
3492  * the containers to have b:t:l numbers.  Fake it.
3493  */
3494 static int
3495 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3496 {
3497 	struct aac_query_disk query_disk;
3498 	struct aac_container *co;
3499 	int error, id;
3500 
3501 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3502 
3503 	mtx_lock(&sc->aac_io_lock);
3504 	error = copyin(uptr, (caddr_t)&query_disk,
3505 		       sizeof(struct aac_query_disk));
3506 	if (error) {
3507 		mtx_unlock(&sc->aac_io_lock);
3508 		return (error);
3509 	}
3510 
3511 	id = query_disk.ContainerNumber;
3512 	if (id == -1) {
3513 		mtx_unlock(&sc->aac_io_lock);
3514 		return (EINVAL);
3515 	}
3516 
3517 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3518 		if (co->co_mntobj.ObjectId == id)
3519 			break;
3520 		}
3521 
3522 	if (co == NULL) {
3523 			query_disk.Valid = 0;
3524 			query_disk.Locked = 0;
3525 			query_disk.Deleted = 1;		/* XXX is this right? */
3526 	} else {
3527 		query_disk.Valid = 1;
3528 		query_disk.Locked = 1;
3529 		query_disk.Deleted = 0;
3530 		query_disk.Bus = device_get_unit(sc->aac_dev);
3531 		query_disk.Target = 0;
3532 		query_disk.Lun = 0;
3533 		query_disk.UnMapped = 0;
3534 	}
3535 
3536 	error = copyout((caddr_t)&query_disk, uptr,
3537 			sizeof(struct aac_query_disk));
3538 
3539 	mtx_unlock(&sc->aac_io_lock);
3540 	return (error);
3541 }
3542 
3543 static void
3544 aac_container_bus(struct aac_softc *sc)
3545 {
3546 	struct aac_sim *sim;
3547 	device_t child;
3548 
3549 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3550 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3551 	if (sim == NULL) {
3552 		device_printf(sc->aac_dev,
3553 	    	"No memory to add container bus\n");
3554 		panic("Out of memory?!");
3555 	}
3556 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3557 	if (child == NULL) {
3558 		device_printf(sc->aac_dev,
3559 	    	"device_add_child failed for container bus\n");
3560 		free(sim, M_AACRAIDBUF);
3561 		panic("Out of memory?!");
3562 	}
3563 
3564 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3565 	sim->BusNumber = 0;
3566 	sim->BusType = CONTAINER_BUS;
3567 	sim->InitiatorBusId = -1;
3568 	sim->aac_sc = sc;
3569 	sim->sim_dev = child;
3570 	sim->aac_cam = NULL;
3571 
3572 	device_set_ivars(child, sim);
3573 	device_set_desc(child, "Container Bus");
3574 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3575 	/*
3576 	device_set_desc(child, aac_describe_code(aac_container_types,
3577 			mir->MntTable[0].VolType));
3578 	*/
3579 	bus_generic_attach(sc->aac_dev);
3580 }
3581 
3582 static void
3583 aac_get_bus_info(struct aac_softc *sc)
3584 {
3585 	struct aac_fib *fib;
3586 	struct aac_ctcfg *c_cmd;
3587 	struct aac_ctcfg_resp *c_resp;
3588 	struct aac_vmioctl *vmi;
3589 	struct aac_vmi_businf_resp *vmi_resp;
3590 	struct aac_getbusinf businfo;
3591 	struct aac_sim *caminf;
3592 	device_t child;
3593 	int i, error;
3594 
3595 	mtx_lock(&sc->aac_io_lock);
3596 	aac_alloc_sync_fib(sc, &fib);
3597 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3598 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3599 
3600 	c_cmd->Command = VM_ContainerConfig;
3601 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3602 	c_cmd->param = 0;
3603 
3604 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3605 	    sizeof(struct aac_ctcfg));
3606 	if (error) {
3607 		device_printf(sc->aac_dev, "Error %d sending "
3608 		    "VM_ContainerConfig command\n", error);
3609 		aac_release_sync_fib(sc);
3610 		mtx_unlock(&sc->aac_io_lock);
3611 		return;
3612 	}
3613 
3614 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3615 	if (c_resp->Status != ST_OK) {
3616 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3617 		    c_resp->Status);
3618 		aac_release_sync_fib(sc);
3619 		mtx_unlock(&sc->aac_io_lock);
3620 		return;
3621 	}
3622 
3623 	sc->scsi_method_id = c_resp->param;
3624 
3625 	vmi = (struct aac_vmioctl *)&fib->data[0];
3626 	bzero(vmi, sizeof(struct aac_vmioctl));
3627 
3628 	vmi->Command = VM_Ioctl;
3629 	vmi->ObjType = FT_DRIVE;
3630 	vmi->MethId = sc->scsi_method_id;
3631 	vmi->ObjId = 0;
3632 	vmi->IoctlCmd = GetBusInfo;
3633 
3634 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3635 	    sizeof(struct aac_vmi_businf_resp));
3636 	if (error) {
3637 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3638 		    error);
3639 		aac_release_sync_fib(sc);
3640 		mtx_unlock(&sc->aac_io_lock);
3641 		return;
3642 	}
3643 
3644 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3645 	if (vmi_resp->Status != ST_OK) {
3646 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3647 		    vmi_resp->Status);
3648 		aac_release_sync_fib(sc);
3649 		mtx_unlock(&sc->aac_io_lock);
3650 		return;
3651 	}
3652 
3653 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3654 	aac_release_sync_fib(sc);
3655 	mtx_unlock(&sc->aac_io_lock);
3656 
3657 	for (i = 0; i < businfo.BusCount; i++) {
3658 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3659 			continue;
3660 
3661 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3662 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3663 		if (caminf == NULL) {
3664 			device_printf(sc->aac_dev,
3665 			    "No memory to add passthrough bus %d\n", i);
3666 			break;
3667 		}
3668 
3669 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3670 		if (child == NULL) {
3671 			device_printf(sc->aac_dev,
3672 			    "device_add_child failed for passthrough bus %d\n",
3673 			    i);
3674 			free(caminf, M_AACRAIDBUF);
3675 			break;
3676 		}
3677 
3678 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3679 		caminf->BusNumber = i+1;
3680 		caminf->BusType = PASSTHROUGH_BUS;
3681 		caminf->InitiatorBusId = -1;
3682 		caminf->aac_sc = sc;
3683 		caminf->sim_dev = child;
3684 		caminf->aac_cam = NULL;
3685 
3686 		device_set_ivars(child, caminf);
3687 		device_set_desc(child, "SCSI Passthrough Bus");
3688 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3689 	}
3690 }
3691 
3692 /*
3693  * Check to see if the kernel is up and running. If we are in a
3694  * BlinkLED state, return the BlinkLED code.
3695  */
3696 static u_int32_t
3697 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3698 {
3699 	u_int32_t ret;
3700 
3701 	ret = AAC_GET_FWSTATUS(sc);
3702 
3703 	if (ret & AAC_UP_AND_RUNNING)
3704 		ret = 0;
3705 	else if (ret & AAC_KERNEL_PANIC && bled)
3706 		*bled = (ret >> 16) & 0xff;
3707 
3708 	return (ret);
3709 }
3710 
3711 /*
3712  * Once do an IOP reset, basically have to re-initialize the card as
3713  * if coming up from a cold boot, and the driver is responsible for
3714  * any IO that was outstanding to the adapter at the time of the IOP
3715  * RESET. And prepare the driver for IOP RESET by making the init code
3716  * modular with the ability to call it from multiple places.
3717  */
3718 static int
3719 aac_reset_adapter(struct aac_softc *sc)
3720 {
3721 	struct aac_command *cm;
3722 	struct aac_fib *fib;
3723 	struct aac_pause_command *pc;
3724 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3725 	int ret, msi_enabled_orig;
3726 
3727 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3728 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3729 
3730 	if (sc->aac_state & AAC_STATE_RESET) {
3731 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3732 		return (EINVAL);
3733 	}
3734 	sc->aac_state |= AAC_STATE_RESET;
3735 
3736 	/* disable interrupt */
3737 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3738 
3739 	/*
3740 	 * Abort all pending commands:
3741 	 * a) on the controller
3742 	 */
3743 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3744 		cm->cm_flags |= AAC_CMD_RESET;
3745 
3746 		/* is there a completion handler? */
3747 		if (cm->cm_complete != NULL) {
3748 			cm->cm_complete(cm);
3749 		} else {
3750 			/* assume that someone is sleeping on this
3751 			 * command
3752 			 */
3753 			wakeup(cm);
3754 		}
3755 	}
3756 
3757 	/* b) in the waiting queues */
3758 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3759 		cm->cm_flags |= AAC_CMD_RESET;
3760 
3761 		/* is there a completion handler? */
3762 		if (cm->cm_complete != NULL) {
3763 			cm->cm_complete(cm);
3764 		} else {
3765 			/* assume that someone is sleeping on this
3766 			 * command
3767 			 */
3768 			wakeup(cm);
3769 		}
3770 	}
3771 
3772 	/* flush drives */
3773 	if (aac_check_adapter_health(sc, NULL) == 0) {
3774 		mtx_unlock(&sc->aac_io_lock);
3775 		(void) aacraid_shutdown(sc->aac_dev);
3776 		mtx_lock(&sc->aac_io_lock);
3777 	}
3778 
3779 	/* execute IOP reset */
3780 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3781 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3782 
3783 		/* We need to wait for 5 seconds before accessing the MU again
3784 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3785 		 */
3786 		waitCount = 5 * 10000;
3787 		while (waitCount) {
3788 			DELAY(100);			/* delay 100 microseconds */
3789 			waitCount--;
3790 		}
3791 	} else {
3792 		ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3793 			0, 0, 0, 0, &status, &reset_mask);
3794 		if (ret && !sc->doorbell_mask) {
3795 			/* call IOP_RESET for older firmware */
3796 			if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3797 			    &status, NULL)) != 0) {
3798 				if (status == AAC_SRB_STS_INVALID_REQUEST) {
3799 					device_printf(sc->aac_dev,
3800 					    "IOP_RESET not supported\n");
3801 				} else {
3802 					/* probably timeout */
3803 					device_printf(sc->aac_dev,
3804 					    "IOP_RESET failed\n");
3805 				}
3806 
3807 				/* unwind aac_shutdown() */
3808 				aac_alloc_sync_fib(sc, &fib);
3809 				pc = (struct aac_pause_command *)&fib->data[0];
3810 				pc->Command = VM_ContainerConfig;
3811 				pc->Type = CT_PAUSE_IO;
3812 				pc->Timeout = 1;
3813 				pc->Min = 1;
3814 				pc->NoRescan = 1;
3815 
3816 				(void) aac_sync_fib(sc, ContainerCommand, 0,
3817 				    fib, sizeof (struct aac_pause_command));
3818 				aac_release_sync_fib(sc);
3819 
3820 				goto finish;
3821 			}
3822 		} else if (sc->doorbell_mask) {
3823 			ret = 0;
3824 			reset_mask = sc->doorbell_mask;
3825 		}
3826 		if (!ret &&
3827 		    (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3828 			AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3829 			/*
3830 			 * We need to wait for 5 seconds before accessing the
3831 			 * doorbell again;
3832 			 * 10000 * 100us = 1000,000us = 1000ms = 1s
3833 			 */
3834 			waitCount = 5 * 10000;
3835 			while (waitCount) {
3836 				DELAY(100);	/* delay 100 microseconds */
3837 				waitCount--;
3838 			}
3839 		}
3840 	}
3841 
3842 	/*
3843 	 * Initialize the adapter.
3844 	 */
3845 	max_msix_orig = sc->aac_max_msix;
3846 	msi_enabled_orig = sc->msi_enabled;
3847 	sc->msi_enabled = FALSE;
3848 	if (aac_check_firmware(sc) != 0)
3849 		goto finish;
3850 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3851 		sc->aac_max_msix = max_msix_orig;
3852 		if (msi_enabled_orig) {
3853 			sc->msi_enabled = msi_enabled_orig;
3854 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3855 		}
3856 		mtx_unlock(&sc->aac_io_lock);
3857 		aac_init(sc);
3858 		mtx_lock(&sc->aac_io_lock);
3859 	}
3860 
3861 finish:
3862 	sc->aac_state &= ~AAC_STATE_RESET;
3863 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3864 	aacraid_startio(sc);
3865 	return (0);
3866 }
3867