xref: /freebsd/sys/dev/aacraid/aacraid.c (revision eb24e1491f9900e922c78e53af588f22a3e9535f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
38  */
39 #define AAC_DRIVERNAME			"aacraid"
40 
41 #include "opt_aacraid.h"
42 
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/proc.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
52 #include <sys/poll.h>
53 #include <sys/ioccom.h>
54 
55 #include <sys/bus.h>
56 #include <sys/conf.h>
57 #include <sys/signalvar.h>
58 #include <sys/time.h>
59 #include <sys/eventhandler.h>
60 #include <sys/rman.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
72 
73 #ifndef FILTER_HANDLED
74 #define FILTER_HANDLED	0x02
75 #endif
76 
77 static void	aac_add_container(struct aac_softc *sc,
78 				  struct aac_mntinforesp *mir, int f,
79 				  u_int32_t uid);
80 static void	aac_get_bus_info(struct aac_softc *sc);
81 static void	aac_container_bus(struct aac_softc *sc);
82 static void	aac_daemon(void *arg);
83 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
84 							  int pages, int nseg, int nseg_new);
85 
86 /* Command Processing */
87 static void	aac_timeout(struct aac_softc *sc);
88 static void	aac_command_thread(struct aac_softc *sc);
89 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
90 				     u_int32_t xferstate, struct aac_fib *fib,
91 				     u_int16_t datasize);
92 /* Command Buffer Management */
93 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
94 				       int nseg, int error);
95 static int	aac_alloc_commands(struct aac_softc *sc);
96 static void	aac_free_commands(struct aac_softc *sc);
97 static void	aac_unmap_command(struct aac_command *cm);
98 
99 /* Hardware Interface */
100 static int	aac_alloc(struct aac_softc *sc);
101 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
102 			       int error);
103 static int	aac_check_firmware(struct aac_softc *sc);
104 static void	aac_define_int_mode(struct aac_softc *sc);
105 static int	aac_init(struct aac_softc *sc);
106 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
107 static int	aac_setup_intr(struct aac_softc *sc);
108 static int	aac_check_config(struct aac_softc *sc);
109 
110 /* PMC SRC interface */
111 static int	aac_src_get_fwstatus(struct aac_softc *sc);
112 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
113 static int	aac_src_get_istatus(struct aac_softc *sc);
114 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
115 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
116 				    u_int32_t arg0, u_int32_t arg1,
117 				    u_int32_t arg2, u_int32_t arg3);
118 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
119 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
120 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
121 static int aac_src_get_outb_queue(struct aac_softc *sc);
122 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
123 
124 struct aac_interface aacraid_src_interface = {
125 	aac_src_get_fwstatus,
126 	aac_src_qnotify,
127 	aac_src_get_istatus,
128 	aac_src_clear_istatus,
129 	aac_src_set_mailbox,
130 	aac_src_get_mailbox,
131 	aac_src_access_devreg,
132 	aac_src_send_command,
133 	aac_src_get_outb_queue,
134 	aac_src_set_outb_queue
135 };
136 
137 /* PMC SRCv interface */
138 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
139 				    u_int32_t arg0, u_int32_t arg1,
140 				    u_int32_t arg2, u_int32_t arg3);
141 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
142 
143 struct aac_interface aacraid_srcv_interface = {
144 	aac_src_get_fwstatus,
145 	aac_src_qnotify,
146 	aac_src_get_istatus,
147 	aac_src_clear_istatus,
148 	aac_srcv_set_mailbox,
149 	aac_srcv_get_mailbox,
150 	aac_src_access_devreg,
151 	aac_src_send_command,
152 	aac_src_get_outb_queue,
153 	aac_src_set_outb_queue
154 };
155 
156 /* Debugging and Diagnostics */
157 static struct aac_code_lookup aac_cpu_variant[] = {
158 	{"i960JX",		CPUI960_JX},
159 	{"i960CX",		CPUI960_CX},
160 	{"i960HX",		CPUI960_HX},
161 	{"i960RX",		CPUI960_RX},
162 	{"i960 80303",		CPUI960_80303},
163 	{"StrongARM SA110",	CPUARM_SA110},
164 	{"PPC603e",		CPUPPC_603e},
165 	{"XScale 80321",	CPU_XSCALE_80321},
166 	{"MIPS 4KC",		CPU_MIPS_4KC},
167 	{"MIPS 5KC",		CPU_MIPS_5KC},
168 	{"Unknown StrongARM",	CPUARM_xxx},
169 	{"Unknown PowerPC",	CPUPPC_xxx},
170 	{NULL, 0},
171 	{"Unknown processor",	0}
172 };
173 
174 static struct aac_code_lookup aac_battery_platform[] = {
175 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
176 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
177 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
178 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
179 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
180 	{NULL, 0},
181 	{"unknown battery platform",		0}
182 };
183 static void	aac_describe_controller(struct aac_softc *sc);
184 static char	*aac_describe_code(struct aac_code_lookup *table,
185 				   u_int32_t code);
186 
187 /* Management Interface */
188 static d_open_t		aac_open;
189 static d_ioctl_t	aac_ioctl;
190 static d_poll_t		aac_poll;
191 static void		aac_cdevpriv_dtor(void *arg);
192 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
193 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
194 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
195 static void	aac_request_aif(struct aac_softc *sc);
196 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
197 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
198 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
199 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
200 static int	aac_return_aif(struct aac_softc *sc,
201 			       struct aac_fib_context *ctx, caddr_t uptr);
202 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
203 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
204 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
205 static void	aac_ioctl_event(struct aac_softc *sc,
206 				struct aac_event *event, void *arg);
207 static int	aac_reset_adapter(struct aac_softc *sc);
208 static int	aac_get_container_info(struct aac_softc *sc,
209 				       struct aac_fib *fib, int cid,
210 				       struct aac_mntinforesp *mir,
211 				       u_int32_t *uid);
212 static u_int32_t
213 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
214 
215 static struct cdevsw aacraid_cdevsw = {
216 	.d_version =	D_VERSION,
217 	.d_flags =	0,
218 	.d_open =	aac_open,
219 	.d_ioctl =	aac_ioctl,
220 	.d_poll =	aac_poll,
221 	.d_name =	"aacraid",
222 };
223 
224 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
225 
226 /* sysctl node */
227 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
228 
229 /*
230  * Device Interface
231  */
232 
233 /*
234  * Initialize the controller and softc
235  */
236 int
237 aacraid_attach(struct aac_softc *sc)
238 {
239 	int error, unit;
240 	struct aac_fib *fib;
241 	struct aac_mntinforesp mir;
242 	int count = 0, i = 0;
243 	u_int32_t uid;
244 
245 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
246 	sc->hint_flags = device_get_flags(sc->aac_dev);
247 	/*
248 	 * Initialize per-controller queues.
249 	 */
250 	aac_initq_free(sc);
251 	aac_initq_ready(sc);
252 	aac_initq_busy(sc);
253 
254 	/* mark controller as suspended until we get ourselves organised */
255 	sc->aac_state |= AAC_STATE_SUSPEND;
256 
257 	/*
258 	 * Check that the firmware on the card is supported.
259 	 */
260 	sc->msi_enabled = sc->msi_tupelo = FALSE;
261 	if ((error = aac_check_firmware(sc)) != 0)
262 		return(error);
263 
264 	/*
265 	 * Initialize locks
266 	 */
267 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
268 	TAILQ_INIT(&sc->aac_container_tqh);
269 	TAILQ_INIT(&sc->aac_ev_cmfree);
270 
271 	/* Initialize the clock daemon callout. */
272 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
273 
274 	/*
275 	 * Initialize the adapter.
276 	 */
277 	if ((error = aac_alloc(sc)) != 0)
278 		return(error);
279 	aac_define_int_mode(sc);
280 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
281 		if ((error = aac_init(sc)) != 0)
282 			return(error);
283 	}
284 
285 	/*
286 	 * Allocate and connect our interrupt.
287 	 */
288 	if ((error = aac_setup_intr(sc)) != 0)
289 		return(error);
290 
291 	/*
292 	 * Print a little information about the controller.
293 	 */
294 	aac_describe_controller(sc);
295 
296 	/*
297 	 * Make the control device.
298 	 */
299 	unit = device_get_unit(sc->aac_dev);
300 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
301 				 0640, "aacraid%d", unit);
302 	sc->aac_dev_t->si_drv1 = sc;
303 
304 	/* Create the AIF thread */
305 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
306 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
307 		panic("Could not create AIF thread");
308 
309 	/* Register the shutdown method to only be called post-dump */
310 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
311 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
312 		device_printf(sc->aac_dev,
313 			      "shutdown event registration failed\n");
314 
315 	/* Find containers */
316 	mtx_lock(&sc->aac_io_lock);
317 	aac_alloc_sync_fib(sc, &fib);
318 	/* loop over possible containers */
319 	do {
320 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
321 			continue;
322 		if (i == 0)
323 			count = mir.MntRespCount;
324 		aac_add_container(sc, &mir, 0, uid);
325 		i++;
326 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
327 	aac_release_sync_fib(sc);
328 	mtx_unlock(&sc->aac_io_lock);
329 
330 	/* Register with CAM for the containers */
331 	TAILQ_INIT(&sc->aac_sim_tqh);
332 	aac_container_bus(sc);
333 	/* Register with CAM for the non-DASD devices */
334 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
335 		aac_get_bus_info(sc);
336 
337 	/* poke the bus to actually attach the child devices */
338 	bus_generic_attach(sc->aac_dev);
339 
340 	/* mark the controller up */
341 	sc->aac_state &= ~AAC_STATE_SUSPEND;
342 
343 	/* enable interrupts now */
344 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
345 
346 	mtx_lock(&sc->aac_io_lock);
347 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
348 	mtx_unlock(&sc->aac_io_lock);
349 
350 	return(0);
351 }
352 
353 static void
354 aac_daemon(void *arg)
355 {
356 	struct aac_softc *sc;
357 	struct timeval tv;
358 	struct aac_command *cm;
359 	struct aac_fib *fib;
360 
361 	sc = arg;
362 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
363 
364 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
365 	if (callout_pending(&sc->aac_daemontime) ||
366 	    callout_active(&sc->aac_daemontime) == 0)
367 		return;
368 	getmicrotime(&tv);
369 
370 	if (!aacraid_alloc_command(sc, &cm)) {
371 		fib = cm->cm_fib;
372 		cm->cm_timestamp = time_uptime;
373 		cm->cm_datalen = 0;
374 		cm->cm_flags |= AAC_CMD_WAIT;
375 
376 		fib->Header.Size =
377 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
378 		fib->Header.XferState =
379 			AAC_FIBSTATE_HOSTOWNED   |
380 			AAC_FIBSTATE_INITIALISED |
381 			AAC_FIBSTATE_EMPTY	 |
382 			AAC_FIBSTATE_FROMHOST	 |
383 			AAC_FIBSTATE_REXPECTED   |
384 			AAC_FIBSTATE_NORM	 |
385 			AAC_FIBSTATE_ASYNC	 |
386 			AAC_FIBSTATE_FAST_RESPONSE;
387 		fib->Header.Command = SendHostTime;
388 		*(uint32_t *)fib->data = tv.tv_sec;
389 
390 		aacraid_map_command_sg(cm, NULL, 0, 0);
391 		aacraid_release_command(cm);
392 	}
393 
394 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
395 }
396 
397 void
398 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
399 {
400 
401 	switch (event->ev_type & AAC_EVENT_MASK) {
402 	case AAC_EVENT_CMFREE:
403 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
404 		break;
405 	default:
406 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
407 		    event->ev_type);
408 		break;
409 	}
410 
411 	return;
412 }
413 
414 /*
415  * Request information of container #cid
416  */
417 static int
418 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
419 		       struct aac_mntinforesp *mir, u_int32_t *uid)
420 {
421 	struct aac_command *cm;
422 	struct aac_fib *fib;
423 	struct aac_mntinfo *mi;
424 	struct aac_cnt_config *ccfg;
425 	int rval;
426 
427 	if (sync_fib == NULL) {
428 		if (aacraid_alloc_command(sc, &cm)) {
429 			device_printf(sc->aac_dev,
430 				"Warning, no free command available\n");
431 			return (-1);
432 		}
433 		fib = cm->cm_fib;
434 	} else {
435 		fib = sync_fib;
436 	}
437 
438 	mi = (struct aac_mntinfo *)&fib->data[0];
439 	/* 4KB support?, 64-bit LBA? */
440 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
441 		mi->Command = VM_NameServeAllBlk;
442 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
443 		mi->Command = VM_NameServe64;
444 	else
445 		mi->Command = VM_NameServe;
446 	mi->MntType = FT_FILESYS;
447 	mi->MntCount = cid;
448 
449 	if (sync_fib) {
450 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
451 			 sizeof(struct aac_mntinfo))) {
452 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
453 			return (-1);
454 		}
455 	} else {
456 		cm->cm_timestamp = time_uptime;
457 		cm->cm_datalen = 0;
458 
459 		fib->Header.Size =
460 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
461 		fib->Header.XferState =
462 			AAC_FIBSTATE_HOSTOWNED   |
463 			AAC_FIBSTATE_INITIALISED |
464 			AAC_FIBSTATE_EMPTY	 |
465 			AAC_FIBSTATE_FROMHOST	 |
466 			AAC_FIBSTATE_REXPECTED   |
467 			AAC_FIBSTATE_NORM	 |
468 			AAC_FIBSTATE_ASYNC	 |
469 			AAC_FIBSTATE_FAST_RESPONSE;
470 		fib->Header.Command = ContainerCommand;
471 		if (aacraid_wait_command(cm) != 0) {
472 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
473 			aacraid_release_command(cm);
474 			return (-1);
475 		}
476 	}
477 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
478 
479 	/* UID */
480 	*uid = cid;
481 	if (mir->MntTable[0].VolType != CT_NONE &&
482 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
483 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
484 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
485 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
486 		}
487 		ccfg = (struct aac_cnt_config *)&fib->data[0];
488 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
489 		ccfg->Command = VM_ContainerConfig;
490 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
491 		ccfg->CTCommand.param[0] = cid;
492 
493 		if (sync_fib) {
494 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
495 				sizeof(struct aac_cnt_config));
496 			if (rval == 0 && ccfg->Command == ST_OK &&
497 				ccfg->CTCommand.param[0] == CT_OK &&
498 				mir->MntTable[0].VolType != CT_PASSTHRU)
499 				*uid = ccfg->CTCommand.param[1];
500 		} else {
501 			fib->Header.Size =
502 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
503 			fib->Header.XferState =
504 				AAC_FIBSTATE_HOSTOWNED   |
505 				AAC_FIBSTATE_INITIALISED |
506 				AAC_FIBSTATE_EMPTY	 |
507 				AAC_FIBSTATE_FROMHOST	 |
508 				AAC_FIBSTATE_REXPECTED   |
509 				AAC_FIBSTATE_NORM	 |
510 				AAC_FIBSTATE_ASYNC	 |
511 				AAC_FIBSTATE_FAST_RESPONSE;
512 			fib->Header.Command = ContainerCommand;
513 			rval = aacraid_wait_command(cm);
514 			if (rval == 0 && ccfg->Command == ST_OK &&
515 				ccfg->CTCommand.param[0] == CT_OK &&
516 				mir->MntTable[0].VolType != CT_PASSTHRU)
517 				*uid = ccfg->CTCommand.param[1];
518 			aacraid_release_command(cm);
519 		}
520 	}
521 
522 	return (0);
523 }
524 
525 /*
526  * Create a device to represent a new container
527  */
528 static void
529 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
530 		  u_int32_t uid)
531 {
532 	struct aac_container *co;
533 
534 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
535 
536 	/*
537 	 * Check container volume type for validity.  Note that many of
538 	 * the possible types may never show up.
539 	 */
540 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
541 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
542 		       M_NOWAIT | M_ZERO);
543 		if (co == NULL) {
544 			panic("Out of memory?!");
545 		}
546 
547 		co->co_found = f;
548 		bcopy(&mir->MntTable[0], &co->co_mntobj,
549 		      sizeof(struct aac_mntobj));
550 		co->co_uid = uid;
551 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
552 	}
553 }
554 
555 /*
556  * Allocate resources associated with (sc)
557  */
558 static int
559 aac_alloc(struct aac_softc *sc)
560 {
561 	bus_size_t maxsize;
562 
563 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
564 
565 	/*
566 	 * Create DMA tag for mapping buffers into controller-addressable space.
567 	 */
568 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
569 			       1, 0, 			/* algnmnt, boundary */
570 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
571 			       BUS_SPACE_MAXADDR :
572 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
573 			       BUS_SPACE_MAXADDR, 	/* highaddr */
574 			       NULL, NULL, 		/* filter, filterarg */
575 			       sc->aac_max_sectors << 9, /* maxsize */
576 			       sc->aac_sg_tablesize,	/* nsegments */
577 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
578 			       BUS_DMA_ALLOCNOW,	/* flags */
579 			       busdma_lock_mutex,	/* lockfunc */
580 			       &sc->aac_io_lock,	/* lockfuncarg */
581 			       &sc->aac_buffer_dmat)) {
582 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
583 		return (ENOMEM);
584 	}
585 
586 	/*
587 	 * Create DMA tag for mapping FIBs into controller-addressable space..
588 	 */
589 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
590 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
591 			sizeof(struct aac_fib_xporthdr) + 31);
592 	else
593 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
594 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
595 			       1, 0, 			/* algnmnt, boundary */
596 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
597 			       BUS_SPACE_MAXADDR_32BIT :
598 			       0x7fffffff,		/* lowaddr */
599 			       BUS_SPACE_MAXADDR, 	/* highaddr */
600 			       NULL, NULL, 		/* filter, filterarg */
601 			       maxsize,  		/* maxsize */
602 			       1,			/* nsegments */
603 			       maxsize,			/* maxsize */
604 			       0,			/* flags */
605 			       NULL, NULL,		/* No locking needed */
606 			       &sc->aac_fib_dmat)) {
607 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
608 		return (ENOMEM);
609 	}
610 
611 	/*
612 	 * Create DMA tag for the common structure and allocate it.
613 	 */
614 	maxsize = sizeof(struct aac_common);
615 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
616 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
617 			       1, 0,			/* algnmnt, boundary */
618 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
619 			       BUS_SPACE_MAXADDR_32BIT :
620 			       0x7fffffff,		/* lowaddr */
621 			       BUS_SPACE_MAXADDR, 	/* highaddr */
622 			       NULL, NULL, 		/* filter, filterarg */
623 			       maxsize, 		/* maxsize */
624 			       1,			/* nsegments */
625 			       maxsize,			/* maxsegsize */
626 			       0,			/* flags */
627 			       NULL, NULL,		/* No locking needed */
628 			       &sc->aac_common_dmat)) {
629 		device_printf(sc->aac_dev,
630 			      "can't allocate common structure DMA tag\n");
631 		return (ENOMEM);
632 	}
633 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
634 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
635 		device_printf(sc->aac_dev, "can't allocate common structure\n");
636 		return (ENOMEM);
637 	}
638 
639 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
640 			sc->aac_common, maxsize,
641 			aac_common_map, sc, 0);
642 	bzero(sc->aac_common, maxsize);
643 
644 	/* Allocate some FIBs and associated command structs */
645 	TAILQ_INIT(&sc->aac_fibmap_tqh);
646 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
647 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
648 	mtx_lock(&sc->aac_io_lock);
649 	while (sc->total_fibs < sc->aac_max_fibs) {
650 		if (aac_alloc_commands(sc) != 0)
651 			break;
652 	}
653 	mtx_unlock(&sc->aac_io_lock);
654 	if (sc->total_fibs == 0)
655 		return (ENOMEM);
656 
657 	return (0);
658 }
659 
660 /*
661  * Free all of the resources associated with (sc)
662  *
663  * Should not be called if the controller is active.
664  */
665 void
666 aacraid_free(struct aac_softc *sc)
667 {
668 	int i;
669 
670 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
671 
672 	/* remove the control device */
673 	if (sc->aac_dev_t != NULL)
674 		destroy_dev(sc->aac_dev_t);
675 
676 	/* throw away any FIB buffers, discard the FIB DMA tag */
677 	aac_free_commands(sc);
678 	if (sc->aac_fib_dmat)
679 		bus_dma_tag_destroy(sc->aac_fib_dmat);
680 
681 	free(sc->aac_commands, M_AACRAIDBUF);
682 
683 	/* destroy the common area */
684 	if (sc->aac_common) {
685 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
686 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
687 				sc->aac_common_dmamap);
688 	}
689 	if (sc->aac_common_dmat)
690 		bus_dma_tag_destroy(sc->aac_common_dmat);
691 
692 	/* disconnect the interrupt handler */
693 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
694 		if (sc->aac_intr[i])
695 			bus_teardown_intr(sc->aac_dev,
696 				sc->aac_irq[i], sc->aac_intr[i]);
697 		if (sc->aac_irq[i])
698 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
699 				sc->aac_irq_rid[i], sc->aac_irq[i]);
700 		else
701 			break;
702 	}
703 	if (sc->msi_enabled || sc->msi_tupelo)
704 		pci_release_msi(sc->aac_dev);
705 
706 	/* destroy data-transfer DMA tag */
707 	if (sc->aac_buffer_dmat)
708 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
709 
710 	/* destroy the parent DMA tag */
711 	if (sc->aac_parent_dmat)
712 		bus_dma_tag_destroy(sc->aac_parent_dmat);
713 
714 	/* release the register window mapping */
715 	if (sc->aac_regs_res0 != NULL)
716 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
717 				     sc->aac_regs_rid0, sc->aac_regs_res0);
718 	if (sc->aac_regs_res1 != NULL)
719 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
720 				     sc->aac_regs_rid1, sc->aac_regs_res1);
721 }
722 
723 /*
724  * Disconnect from the controller completely, in preparation for unload.
725  */
726 int
727 aacraid_detach(device_t dev)
728 {
729 	struct aac_softc *sc;
730 	struct aac_container *co;
731 	struct aac_sim	*sim;
732 	int error;
733 
734 	sc = device_get_softc(dev);
735 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
736 
737 	callout_drain(&sc->aac_daemontime);
738 	/* Remove the child containers */
739 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
740 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
741 		free(co, M_AACRAIDBUF);
742 	}
743 
744 	/* Remove the CAM SIMs */
745 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
746 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
747 		error = device_delete_child(dev, sim->sim_dev);
748 		if (error)
749 			return (error);
750 		free(sim, M_AACRAIDBUF);
751 	}
752 
753 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
754 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
755 		wakeup(sc->aifthread);
756 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
757 	}
758 
759 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
760 		panic("Cannot shutdown AIF thread");
761 
762 	if ((error = aacraid_shutdown(dev)))
763 		return(error);
764 
765 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
766 
767 	aacraid_free(sc);
768 
769 	mtx_destroy(&sc->aac_io_lock);
770 
771 	return(0);
772 }
773 
774 /*
775  * Bring the controller down to a dormant state and detach all child devices.
776  *
777  * This function is called before detach or system shutdown.
778  *
779  * Note that we can assume that the bioq on the controller is empty, as we won't
780  * allow shutdown if any device is open.
781  */
782 int
783 aacraid_shutdown(device_t dev)
784 {
785 	struct aac_softc *sc;
786 	struct aac_fib *fib;
787 	struct aac_close_command *cc;
788 
789 	sc = device_get_softc(dev);
790 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
791 
792 	sc->aac_state |= AAC_STATE_SUSPEND;
793 
794 	/*
795 	 * Send a Container shutdown followed by a HostShutdown FIB to the
796 	 * controller to convince it that we don't want to talk to it anymore.
797 	 * We've been closed and all I/O completed already
798 	 */
799 	device_printf(sc->aac_dev, "shutting down controller...");
800 
801 	mtx_lock(&sc->aac_io_lock);
802 	aac_alloc_sync_fib(sc, &fib);
803 	cc = (struct aac_close_command *)&fib->data[0];
804 
805 	bzero(cc, sizeof(struct aac_close_command));
806 	cc->Command = VM_CloseAll;
807 	cc->ContainerId = 0xfffffffe;
808 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
809 	    sizeof(struct aac_close_command)))
810 		printf("FAILED.\n");
811 	else
812 		printf("done\n");
813 
814 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
815 	aac_release_sync_fib(sc);
816 	mtx_unlock(&sc->aac_io_lock);
817 
818 	return(0);
819 }
820 
821 /*
822  * Bring the controller to a quiescent state, ready for system suspend.
823  */
824 int
825 aacraid_suspend(device_t dev)
826 {
827 	struct aac_softc *sc;
828 
829 	sc = device_get_softc(dev);
830 
831 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
832 	sc->aac_state |= AAC_STATE_SUSPEND;
833 
834 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
835 	return(0);
836 }
837 
838 /*
839  * Bring the controller back to a state ready for operation.
840  */
841 int
842 aacraid_resume(device_t dev)
843 {
844 	struct aac_softc *sc;
845 
846 	sc = device_get_softc(dev);
847 
848 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
849 	sc->aac_state &= ~AAC_STATE_SUSPEND;
850 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
851 	return(0);
852 }
853 
854 /*
855  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
856  */
857 void
858 aacraid_new_intr_type1(void *arg)
859 {
860 	struct aac_msix_ctx *ctx;
861 	struct aac_softc *sc;
862 	int vector_no;
863 	struct aac_command *cm;
864 	struct aac_fib *fib;
865 	u_int32_t bellbits, bellbits_shifted, index, handle;
866 	int isFastResponse, isAif, noMoreAif, mode;
867 
868 	ctx = (struct aac_msix_ctx *)arg;
869 	sc = ctx->sc;
870 	vector_no = ctx->vector_no;
871 
872 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
873 	mtx_lock(&sc->aac_io_lock);
874 
875 	if (sc->msi_enabled) {
876 		mode = AAC_INT_MODE_MSI;
877 		if (vector_no == 0) {
878 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
879 			if (bellbits & 0x40000)
880 				mode |= AAC_INT_MODE_AIF;
881 			else if (bellbits & 0x1000)
882 				mode |= AAC_INT_MODE_SYNC;
883 		}
884 	} else {
885 		mode = AAC_INT_MODE_INTX;
886 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
887 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
888 			bellbits = AAC_DB_RESPONSE_SENT_NS;
889 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
890 		} else {
891 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
892 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
893 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
894 				mode |= AAC_INT_MODE_AIF;
895 			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
896 				mode |= AAC_INT_MODE_SYNC;
897 		}
898 		/* ODR readback, Prep #238630 */
899 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
900 	}
901 
902 	if (mode & AAC_INT_MODE_SYNC) {
903 		if (sc->aac_sync_cm) {
904 			cm = sc->aac_sync_cm;
905 			cm->cm_flags |= AAC_CMD_COMPLETED;
906 			/* is there a completion handler? */
907 			if (cm->cm_complete != NULL) {
908 				cm->cm_complete(cm);
909 			} else {
910 				/* assume that someone is sleeping on this command */
911 				wakeup(cm);
912 			}
913 			sc->flags &= ~AAC_QUEUE_FRZN;
914 			sc->aac_sync_cm = NULL;
915 		}
916 		mode = 0;
917 	}
918 
919 	if (mode & AAC_INT_MODE_AIF) {
920 		if (mode & AAC_INT_MODE_INTX) {
921 			aac_request_aif(sc);
922 			mode = 0;
923 		}
924 	}
925 
926 	if (mode) {
927 		/* handle async. status */
928 		index = sc->aac_host_rrq_idx[vector_no];
929 		for (;;) {
930 			isFastResponse = isAif = noMoreAif = 0;
931 			/* remove toggle bit (31) */
932 			handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
933 			/* check fast response bit (30) */
934 			if (handle & 0x40000000)
935 				isFastResponse = 1;
936 			/* check AIF bit (23) */
937 			else if (handle & 0x00800000)
938 				isAif = TRUE;
939 			handle &= 0x0000ffff;
940 			if (handle == 0)
941 				break;
942 
943 			cm = sc->aac_commands + (handle - 1);
944 			fib = cm->cm_fib;
945 			sc->aac_rrq_outstanding[vector_no]--;
946 			if (isAif) {
947 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
948 				if (!noMoreAif)
949 					aac_handle_aif(sc, fib);
950 				aac_remove_busy(cm);
951 				aacraid_release_command(cm);
952 			} else {
953 				if (isFastResponse) {
954 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
955 					*((u_int32_t *)(fib->data)) = ST_OK;
956 					cm->cm_flags |= AAC_CMD_FASTRESP;
957 				}
958 				aac_remove_busy(cm);
959 				aac_unmap_command(cm);
960 				cm->cm_flags |= AAC_CMD_COMPLETED;
961 
962 				/* is there a completion handler? */
963 				if (cm->cm_complete != NULL) {
964 					cm->cm_complete(cm);
965 				} else {
966 					/* assume that someone is sleeping on this command */
967 					wakeup(cm);
968 				}
969 				sc->flags &= ~AAC_QUEUE_FRZN;
970 			}
971 
972 			sc->aac_common->ac_host_rrq[index++] = 0;
973 			if (index == (vector_no + 1) * sc->aac_vector_cap)
974 				index = vector_no * sc->aac_vector_cap;
975 			sc->aac_host_rrq_idx[vector_no] = index;
976 
977 			if ((isAif && !noMoreAif) || sc->aif_pending)
978 				aac_request_aif(sc);
979 		}
980 	}
981 
982 	if (mode & AAC_INT_MODE_AIF) {
983 		aac_request_aif(sc);
984 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
985 		mode = 0;
986 	}
987 
988 	/* see if we can start some more I/O */
989 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
990 		aacraid_startio(sc);
991 	mtx_unlock(&sc->aac_io_lock);
992 }
993 
994 /*
995  * Handle notification of one or more FIBs coming from the controller.
996  */
997 static void
998 aac_command_thread(struct aac_softc *sc)
999 {
1000 	int retval;
1001 
1002 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1003 
1004 	mtx_lock(&sc->aac_io_lock);
1005 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1006 
1007 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1008 
1009 		retval = 0;
1010 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1011 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1012 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1013 
1014 		/*
1015 		 * First see if any FIBs need to be allocated.
1016 		 */
1017 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1018 			aac_alloc_commands(sc);
1019 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1020 			aacraid_startio(sc);
1021 		}
1022 
1023 		/*
1024 		 * While we're here, check to see if any commands are stuck.
1025 		 * This is pretty low-priority, so it's ok if it doesn't
1026 		 * always fire.
1027 		 */
1028 		if (retval == EWOULDBLOCK)
1029 			aac_timeout(sc);
1030 
1031 		/* Check the hardware printf message buffer */
1032 		if (sc->aac_common->ac_printf[0] != 0)
1033 			aac_print_printf(sc);
1034 	}
1035 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1036 	mtx_unlock(&sc->aac_io_lock);
1037 	wakeup(sc->aac_dev);
1038 
1039 	aac_kthread_exit(0);
1040 }
1041 
1042 /*
1043  * Submit a command to the controller, return when it completes.
1044  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1045  *     be stuck here forever.  At the same time, signals are not caught
1046  *     because there is a risk that a signal could wakeup the sleep before
1047  *     the card has a chance to complete the command.  Since there is no way
1048  *     to cancel a command that is in progress, we can't protect against the
1049  *     card completing a command late and spamming the command and data
1050  *     memory.  So, we are held hostage until the command completes.
1051  */
1052 int
1053 aacraid_wait_command(struct aac_command *cm)
1054 {
1055 	struct aac_softc *sc;
1056 	int error;
1057 
1058 	sc = cm->cm_sc;
1059 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1060 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1061 
1062 	/* Put the command on the ready queue and get things going */
1063 	aac_enqueue_ready(cm);
1064 	aacraid_startio(sc);
1065 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1066 	return(error);
1067 }
1068 
1069 /*
1070  *Command Buffer Management
1071  */
1072 
1073 /*
1074  * Allocate a command.
1075  */
1076 int
1077 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1078 {
1079 	struct aac_command *cm;
1080 
1081 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1082 
1083 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1084 		if (sc->total_fibs < sc->aac_max_fibs) {
1085 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1086 			wakeup(sc->aifthread);
1087 		}
1088 		return (EBUSY);
1089 	}
1090 
1091 	*cmp = cm;
1092 	return(0);
1093 }
1094 
1095 /*
1096  * Release a command back to the freelist.
1097  */
1098 void
1099 aacraid_release_command(struct aac_command *cm)
1100 {
1101 	struct aac_event *event;
1102 	struct aac_softc *sc;
1103 
1104 	sc = cm->cm_sc;
1105 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1106 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1107 
1108 	/* (re)initialize the command/FIB */
1109 	cm->cm_sgtable = NULL;
1110 	cm->cm_flags = 0;
1111 	cm->cm_complete = NULL;
1112 	cm->cm_ccb = NULL;
1113 	cm->cm_passthr_dmat = 0;
1114 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1115 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1116 	cm->cm_fib->Header.Unused = 0;
1117 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1118 
1119 	/*
1120 	 * These are duplicated in aac_start to cover the case where an
1121 	 * intermediate stage may have destroyed them.  They're left
1122 	 * initialized here for debugging purposes only.
1123 	 */
1124 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1125 	cm->cm_fib->Header.Handle = 0;
1126 
1127 	aac_enqueue_free(cm);
1128 
1129 	/*
1130 	 * Dequeue all events so that there's no risk of events getting
1131 	 * stranded.
1132 	 */
1133 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1134 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1135 		event->ev_callback(sc, event, event->ev_arg);
1136 	}
1137 }
1138 
1139 /*
1140  * Map helper for command/FIB allocation.
1141  */
1142 static void
1143 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1144 {
1145 	uint64_t	*fibphys;
1146 
1147 	fibphys = (uint64_t *)arg;
1148 
1149 	*fibphys = segs[0].ds_addr;
1150 }
1151 
1152 /*
1153  * Allocate and initialize commands/FIBs for this adapter.
1154  */
1155 static int
1156 aac_alloc_commands(struct aac_softc *sc)
1157 {
1158 	struct aac_command *cm;
1159 	struct aac_fibmap *fm;
1160 	uint64_t fibphys;
1161 	int i, error;
1162 	u_int32_t maxsize;
1163 
1164 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1165 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1166 
1167 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1168 		return (ENOMEM);
1169 
1170 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1171 	if (fm == NULL)
1172 		return (ENOMEM);
1173 
1174 	mtx_unlock(&sc->aac_io_lock);
1175 	/* allocate the FIBs in DMAable memory and load them */
1176 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1177 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1178 		device_printf(sc->aac_dev,
1179 			      "Not enough contiguous memory available.\n");
1180 		free(fm, M_AACRAIDBUF);
1181 		mtx_lock(&sc->aac_io_lock);
1182 		return (ENOMEM);
1183 	}
1184 
1185 	maxsize = sc->aac_max_fib_size + 31;
1186 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1187 		maxsize += sizeof(struct aac_fib_xporthdr);
1188 	/* Ignore errors since this doesn't bounce */
1189 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1190 			      sc->aac_max_fibs_alloc * maxsize,
1191 			      aac_map_command_helper, &fibphys, 0);
1192 	mtx_lock(&sc->aac_io_lock);
1193 
1194 	/* initialize constant fields in the command structure */
1195 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1196 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1197 		cm = sc->aac_commands + sc->total_fibs;
1198 		fm->aac_commands = cm;
1199 		cm->cm_sc = sc;
1200 		cm->cm_fib = (struct aac_fib *)
1201 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1202 		cm->cm_fibphys = fibphys + i * maxsize;
1203 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1204 			u_int64_t fibphys_aligned;
1205 			fibphys_aligned =
1206 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1207 			cm->cm_fib = (struct aac_fib *)
1208 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1209 			cm->cm_fibphys = fibphys_aligned;
1210 		} else {
1211 			u_int64_t fibphys_aligned;
1212 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1213 			cm->cm_fib = (struct aac_fib *)
1214 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1215 			cm->cm_fibphys = fibphys_aligned;
1216 		}
1217 		cm->cm_index = sc->total_fibs;
1218 
1219 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1220 					       &cm->cm_datamap)) != 0)
1221 			break;
1222 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1223 			aacraid_release_command(cm);
1224 		sc->total_fibs++;
1225 	}
1226 
1227 	if (i > 0) {
1228 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1229 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1230 		return (0);
1231 	}
1232 
1233 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1234 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1235 	free(fm, M_AACRAIDBUF);
1236 	return (ENOMEM);
1237 }
1238 
1239 /*
1240  * Free FIBs owned by this adapter.
1241  */
1242 static void
1243 aac_free_commands(struct aac_softc *sc)
1244 {
1245 	struct aac_fibmap *fm;
1246 	struct aac_command *cm;
1247 	int i;
1248 
1249 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1250 
1251 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1252 
1253 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1254 		/*
1255 		 * We check against total_fibs to handle partially
1256 		 * allocated blocks.
1257 		 */
1258 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1259 			cm = fm->aac_commands + i;
1260 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1261 		}
1262 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1263 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1264 		free(fm, M_AACRAIDBUF);
1265 	}
1266 }
1267 
1268 /*
1269  * Command-mapping helper function - populate this command's s/g table.
1270  */
1271 void
1272 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1273 {
1274 	struct aac_softc *sc;
1275 	struct aac_command *cm;
1276 	struct aac_fib *fib;
1277 	int i;
1278 
1279 	cm = (struct aac_command *)arg;
1280 	sc = cm->cm_sc;
1281 	fib = cm->cm_fib;
1282 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1283 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1284 
1285 	if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1286 		return;
1287 
1288 	/* copy into the FIB */
1289 	if (cm->cm_sgtable != NULL) {
1290 		if (fib->Header.Command == RawIo2) {
1291 			struct aac_raw_io2 *raw;
1292 			struct aac_sge_ieee1212 *sg;
1293 			u_int32_t min_size = PAGE_SIZE, cur_size;
1294 			int conformable = TRUE;
1295 
1296 			raw = (struct aac_raw_io2 *)&fib->data[0];
1297 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1298 			raw->sgeCnt = nseg;
1299 
1300 			for (i = 0; i < nseg; i++) {
1301 				cur_size = segs[i].ds_len;
1302 				sg[i].addrHigh = 0;
1303 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1304 				sg[i].length = cur_size;
1305 				sg[i].flags = 0;
1306 				if (i == 0) {
1307 					raw->sgeFirstSize = cur_size;
1308 				} else if (i == 1) {
1309 					raw->sgeNominalSize = cur_size;
1310 					min_size = cur_size;
1311 				} else if ((i+1) < nseg &&
1312 					cur_size != raw->sgeNominalSize) {
1313 					conformable = FALSE;
1314 					if (cur_size < min_size)
1315 						min_size = cur_size;
1316 				}
1317 			}
1318 
1319 			/* not conformable: evaluate required sg elements */
1320 			if (!conformable) {
1321 				int j, err_found, nseg_new = nseg;
1322 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1323 					err_found = FALSE;
1324 					nseg_new = 2;
1325 					for (j = 1; j < nseg - 1; ++j) {
1326 						if (sg[j].length % (i*PAGE_SIZE)) {
1327 							err_found = TRUE;
1328 							break;
1329 						}
1330 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1331 					}
1332 					if (!err_found)
1333 						break;
1334 				}
1335 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1336 					!(sc->hint_flags & 4))
1337 					nseg = aac_convert_sgraw2(sc,
1338 						raw, i, nseg, nseg_new);
1339 			} else {
1340 				raw->flags |= RIO2_SGL_CONFORMANT;
1341 			}
1342 
1343 			/* update the FIB size for the s/g count */
1344 			fib->Header.Size += nseg *
1345 				sizeof(struct aac_sge_ieee1212);
1346 
1347 		} else if (fib->Header.Command == RawIo) {
1348 			struct aac_sg_tableraw *sg;
1349 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1350 			sg->SgCount = nseg;
1351 			for (i = 0; i < nseg; i++) {
1352 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1353 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1354 				sg->SgEntryRaw[i].Next = 0;
1355 				sg->SgEntryRaw[i].Prev = 0;
1356 				sg->SgEntryRaw[i].Flags = 0;
1357 			}
1358 			/* update the FIB size for the s/g count */
1359 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1360 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1361 			struct aac_sg_table *sg;
1362 			sg = cm->cm_sgtable;
1363 			sg->SgCount = nseg;
1364 			for (i = 0; i < nseg; i++) {
1365 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1366 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1367 			}
1368 			/* update the FIB size for the s/g count */
1369 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1370 		} else {
1371 			struct aac_sg_table64 *sg;
1372 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1373 			sg->SgCount = nseg;
1374 			for (i = 0; i < nseg; i++) {
1375 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1376 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1377 			}
1378 			/* update the FIB size for the s/g count */
1379 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1380 		}
1381 	}
1382 
1383 	/* Fix up the address values in the FIB.  Use the command array index
1384 	 * instead of a pointer since these fields are only 32 bits.  Shift
1385 	 * the SenderFibAddress over to make room for the fast response bit
1386 	 * and for the AIF bit
1387 	 */
1388 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1389 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1390 
1391 	/* save a pointer to the command for speedy reverse-lookup */
1392 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1393 
1394 	if (cm->cm_passthr_dmat == 0) {
1395 		if (cm->cm_flags & AAC_CMD_DATAIN)
1396 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1397 							BUS_DMASYNC_PREREAD);
1398 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1399 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1400 							BUS_DMASYNC_PREWRITE);
1401 	}
1402 
1403 	cm->cm_flags |= AAC_CMD_MAPPED;
1404 
1405 	if (cm->cm_flags & AAC_CMD_WAIT) {
1406 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1407 			cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1408 	} else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1409 		u_int32_t wait = 0;
1410 		sc->aac_sync_cm = cm;
1411 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1412 			cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1413 	} else {
1414 		int count = 10000000L;
1415 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1416 			if (--count == 0) {
1417 				aac_unmap_command(cm);
1418 				sc->flags |= AAC_QUEUE_FRZN;
1419 				aac_requeue_ready(cm);
1420 			}
1421 			DELAY(5);			/* wait 5 usec. */
1422 		}
1423 	}
1424 }
1425 
1426 
1427 static int
1428 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1429 				   int pages, int nseg, int nseg_new)
1430 {
1431 	struct aac_sge_ieee1212 *sge;
1432 	int i, j, pos;
1433 	u_int32_t addr_low;
1434 
1435 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1436 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1437 	if (sge == NULL)
1438 		return nseg;
1439 
1440 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1441 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1442 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1443 			sge[pos].addrLow = addr_low;
1444 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1445 			if (addr_low < raw->sge[i].addrLow)
1446 				sge[pos].addrHigh++;
1447 			sge[pos].length = pages * PAGE_SIZE;
1448 			sge[pos].flags = 0;
1449 			pos++;
1450 		}
1451 	}
1452 	sge[pos] = raw->sge[nseg-1];
1453 	for (i = 1; i < nseg_new; ++i)
1454 		raw->sge[i] = sge[i];
1455 
1456 	free(sge, M_AACRAIDBUF);
1457 	raw->sgeCnt = nseg_new;
1458 	raw->flags |= RIO2_SGL_CONFORMANT;
1459 	raw->sgeNominalSize = pages * PAGE_SIZE;
1460 	return nseg_new;
1461 }
1462 
1463 
1464 /*
1465  * Unmap a command from controller-visible space.
1466  */
1467 static void
1468 aac_unmap_command(struct aac_command *cm)
1469 {
1470 	struct aac_softc *sc;
1471 
1472 	sc = cm->cm_sc;
1473 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1474 
1475 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1476 		return;
1477 
1478 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1479 		if (cm->cm_flags & AAC_CMD_DATAIN)
1480 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1481 					BUS_DMASYNC_POSTREAD);
1482 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1483 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1484 					BUS_DMASYNC_POSTWRITE);
1485 
1486 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1487 	}
1488 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1489 }
1490 
1491 /*
1492  * Hardware Interface
1493  */
1494 
1495 /*
1496  * Initialize the adapter.
1497  */
1498 static void
1499 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1500 {
1501 	struct aac_softc *sc;
1502 
1503 	sc = (struct aac_softc *)arg;
1504 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1505 
1506 	sc->aac_common_busaddr = segs[0].ds_addr;
1507 }
1508 
1509 static int
1510 aac_check_firmware(struct aac_softc *sc)
1511 {
1512 	u_int32_t code, major, minor, maxsize;
1513 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1514 	time_t then;
1515 
1516 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1517 
1518 	/* check if flash update is running */
1519 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1520 		then = time_uptime;
1521 		do {
1522 			code = AAC_GET_FWSTATUS(sc);
1523 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1524 				device_printf(sc->aac_dev,
1525 						  "FATAL: controller not coming ready, "
1526 						   "status %x\n", code);
1527 				return(ENXIO);
1528 			}
1529 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1530 		/*
1531 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1532 		 * do not read scratch pad register at this time
1533 		 */
1534 		waitCount = 10 * 10000;
1535 		while (waitCount) {
1536 			DELAY(100);		/* delay 100 microseconds */
1537 			waitCount--;
1538 		}
1539 	}
1540 
1541 	/*
1542 	 * Wait for the adapter to come ready.
1543 	 */
1544 	then = time_uptime;
1545 	do {
1546 		code = AAC_GET_FWSTATUS(sc);
1547 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1548 			device_printf(sc->aac_dev,
1549 				      "FATAL: controller not coming ready, "
1550 					   "status %x\n", code);
1551 			return(ENXIO);
1552 		}
1553 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1554 
1555 	/*
1556 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1557 	 * firmware version 1.x are not compatible with this driver.
1558 	 */
1559 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1560 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1561 				     NULL, NULL)) {
1562 			device_printf(sc->aac_dev,
1563 				      "Error reading firmware version\n");
1564 			return (EIO);
1565 		}
1566 
1567 		/* These numbers are stored as ASCII! */
1568 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1569 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1570 		if (major == 1) {
1571 			device_printf(sc->aac_dev,
1572 			    "Firmware version %d.%d is not supported.\n",
1573 			    major, minor);
1574 			return (EINVAL);
1575 		}
1576 	}
1577 	/*
1578 	 * Retrieve the capabilities/supported options word so we know what
1579 	 * work-arounds to enable.  Some firmware revs don't support this
1580 	 * command.
1581 	 */
1582 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1583 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1584 			device_printf(sc->aac_dev,
1585 			     "RequestAdapterInfo failed\n");
1586 			return (EIO);
1587 		}
1588 	} else {
1589 		options = AAC_GET_MAILBOX(sc, 1);
1590 		atu_size = AAC_GET_MAILBOX(sc, 2);
1591 		sc->supported_options = options;
1592 		sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1593 
1594 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1595 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1596 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1597 		if (options & AAC_SUPPORTED_NONDASD)
1598 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1599 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1600 			&& (sizeof(bus_addr_t) > 4)
1601 			&& (sc->hint_flags & 0x1)) {
1602 			device_printf(sc->aac_dev,
1603 			    "Enabling 64-bit address support\n");
1604 			sc->flags |= AAC_FLAGS_SG_64BIT;
1605 		}
1606 		if (sc->aac_if.aif_send_command) {
1607 			if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1608 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1609 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1610 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1611 			else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1612 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1613 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1614 		}
1615 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1616 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1617 	}
1618 
1619 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1620 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1621 		return (ENXIO);
1622 	}
1623 
1624 	if (sc->hint_flags & 2) {
1625 		device_printf(sc->aac_dev,
1626 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1627 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1628 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1629 		device_printf(sc->aac_dev,
1630 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1631 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1632 	}
1633 
1634 	/* Check for broken hardware that does a lower number of commands */
1635 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1636 
1637 	/* Remap mem. resource, if required */
1638 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1639 		bus_release_resource(
1640 			sc->aac_dev, SYS_RES_MEMORY,
1641 			sc->aac_regs_rid0, sc->aac_regs_res0);
1642 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1643 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1644 			atu_size, RF_ACTIVE);
1645 		if (sc->aac_regs_res0 == NULL) {
1646 			sc->aac_regs_res0 = bus_alloc_resource_any(
1647 				sc->aac_dev, SYS_RES_MEMORY,
1648 				&sc->aac_regs_rid0, RF_ACTIVE);
1649 			if (sc->aac_regs_res0 == NULL) {
1650 				device_printf(sc->aac_dev,
1651 					"couldn't allocate register window\n");
1652 				return (ENXIO);
1653 			}
1654 		}
1655 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1656 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1657 	}
1658 
1659 	/* Read preferred settings */
1660 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1661 	sc->aac_max_sectors = 128;				/* 64KB */
1662 	sc->aac_max_aif = 1;
1663 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1664 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1665 		 - sizeof(struct aac_blockwrite64))
1666 		 / sizeof(struct aac_sg_entry64);
1667 	else
1668 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1669 		 - sizeof(struct aac_blockwrite))
1670 		 / sizeof(struct aac_sg_entry);
1671 
1672 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1673 		options = AAC_GET_MAILBOX(sc, 1);
1674 		sc->aac_max_fib_size = (options & 0xFFFF);
1675 		sc->aac_max_sectors = (options >> 16) << 1;
1676 		options = AAC_GET_MAILBOX(sc, 2);
1677 		sc->aac_sg_tablesize = (options >> 16);
1678 		options = AAC_GET_MAILBOX(sc, 3);
1679 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1680 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1681 			sc->aac_max_fibs = (options & 0xFFFF);
1682 		options = AAC_GET_MAILBOX(sc, 4);
1683 		sc->aac_max_aif = (options & 0xFFFF);
1684 		options = AAC_GET_MAILBOX(sc, 5);
1685 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1686 	}
1687 
1688 	maxsize = sc->aac_max_fib_size + 31;
1689 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1690 		maxsize += sizeof(struct aac_fib_xporthdr);
1691 	if (maxsize > PAGE_SIZE) {
1692     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1693 		maxsize = PAGE_SIZE;
1694 	}
1695 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1696 
1697 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1698 		sc->flags |= AAC_FLAGS_RAW_IO;
1699 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1700 	}
1701 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1702 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1703 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1704 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1705 	}
1706 
1707 #ifdef AACRAID_DEBUG
1708 	aacraid_get_fw_debug_buffer(sc);
1709 #endif
1710 	return (0);
1711 }
1712 
1713 static int
1714 aac_init(struct aac_softc *sc)
1715 {
1716 	struct aac_adapter_init	*ip;
1717 	int i, error;
1718 
1719 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1720 
1721 	/* reset rrq index */
1722 	sc->aac_fibs_pushed_no = 0;
1723 	for (i = 0; i < sc->aac_max_msix; i++)
1724 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1725 
1726 	/*
1727 	 * Fill in the init structure.  This tells the adapter about the
1728 	 * physical location of various important shared data structures.
1729 	 */
1730 	ip = &sc->aac_common->ac_init;
1731 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1732 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1733 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1734 		sc->flags |= AAC_FLAGS_RAW_IO;
1735 	}
1736 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1737 
1738 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1739 					 offsetof(struct aac_common, ac_fibs);
1740 	ip->AdapterFibsVirtualAddress = 0;
1741 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1742 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1743 
1744 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1745 				  offsetof(struct aac_common, ac_printf);
1746 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1747 
1748 	/*
1749 	 * The adapter assumes that pages are 4K in size, except on some
1750  	 * broken firmware versions that do the page->byte conversion twice,
1751 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1752 	 * Round up since the granularity is so high.
1753 	 */
1754 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1755 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1756 		ip->HostPhysMemPages =
1757 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1758 	}
1759 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1760 
1761 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1762 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1763 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1764 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1765 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1766 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1767 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1768 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1769 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1770 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1771 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1772 	}
1773 	ip->MaxNumAif = sc->aac_max_aif;
1774 	ip->HostRRQ_AddrLow =
1775 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1776 	/* always 32-bit address */
1777 	ip->HostRRQ_AddrHigh = 0;
1778 
1779 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1780 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1781 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1782 		device_printf(sc->aac_dev, "Power Management enabled\n");
1783 	}
1784 
1785 	ip->MaxIoCommands = sc->aac_max_fibs;
1786 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1787 	ip->MaxFibSize = sc->aac_max_fib_size;
1788 
1789 	/*
1790 	 * Do controller-type-specific initialisation
1791 	 */
1792 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1793 
1794 	/*
1795 	 * Give the init structure to the controller.
1796 	 */
1797 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1798 			     sc->aac_common_busaddr +
1799 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1800 			     NULL, NULL)) {
1801 		device_printf(sc->aac_dev,
1802 			      "error establishing init structure\n");
1803 		error = EIO;
1804 		goto out;
1805 	}
1806 
1807 	/*
1808 	 * Check configuration issues
1809 	 */
1810 	if ((error = aac_check_config(sc)) != 0)
1811 		goto out;
1812 
1813 	error = 0;
1814 out:
1815 	return(error);
1816 }
1817 
1818 static void
1819 aac_define_int_mode(struct aac_softc *sc)
1820 {
1821 	device_t dev;
1822 	int cap, msi_count, error = 0;
1823 	uint32_t val;
1824 
1825 	dev = sc->aac_dev;
1826 
1827 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1828 		device_printf(dev, "using line interrupts\n");
1829 		sc->aac_max_msix = 1;
1830 		sc->aac_vector_cap = sc->aac_max_fibs;
1831 		return;
1832 	}
1833 
1834 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1835 	if (sc->aac_max_msix == 0) {
1836 		if (sc->aac_hwif == AAC_HWIF_SRC) {
1837 			msi_count = 1;
1838 			if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1839 				device_printf(dev, "alloc msi failed - err=%d; "
1840 				    "will use INTx\n", error);
1841 				pci_release_msi(dev);
1842 			} else {
1843 				sc->msi_tupelo = TRUE;
1844 			}
1845 		}
1846 		if (sc->msi_tupelo)
1847 			device_printf(dev, "using MSI interrupts\n");
1848 		else
1849 			device_printf(dev, "using line interrupts\n");
1850 
1851 		sc->aac_max_msix = 1;
1852 		sc->aac_vector_cap = sc->aac_max_fibs;
1853 		return;
1854 	}
1855 
1856 	/* OS capability */
1857 	msi_count = pci_msix_count(dev);
1858 	if (msi_count > AAC_MAX_MSIX)
1859 		msi_count = AAC_MAX_MSIX;
1860 	if (msi_count > sc->aac_max_msix)
1861 		msi_count = sc->aac_max_msix;
1862 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1863 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1864 				   "will try MSI\n", msi_count, error);
1865 		pci_release_msi(dev);
1866 	} else {
1867 		sc->msi_enabled = TRUE;
1868 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1869 			msi_count);
1870 	}
1871 
1872 	if (!sc->msi_enabled) {
1873 		msi_count = 1;
1874 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1875 			device_printf(dev, "alloc msi failed - err=%d; "
1876 				           "will use INTx\n", error);
1877 			pci_release_msi(dev);
1878 		} else {
1879 			sc->msi_enabled = TRUE;
1880 			device_printf(dev, "using MSI interrupts\n");
1881 		}
1882 	}
1883 
1884 	if (sc->msi_enabled) {
1885 		/* now read controller capability from PCI config. space */
1886 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1887 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1888 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1889 			pci_release_msi(dev);
1890 			sc->msi_enabled = FALSE;
1891 		}
1892 	}
1893 
1894 	if (!sc->msi_enabled) {
1895 		device_printf(dev, "using legacy interrupts\n");
1896 		sc->aac_max_msix = 1;
1897 	} else {
1898 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1899 		if (sc->aac_max_msix > msi_count)
1900 			sc->aac_max_msix = msi_count;
1901 	}
1902 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1903 
1904 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1905 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1906 }
1907 
1908 static int
1909 aac_find_pci_capability(struct aac_softc *sc, int cap)
1910 {
1911 	device_t dev;
1912 	uint32_t status;
1913 	uint8_t ptr;
1914 
1915 	dev = sc->aac_dev;
1916 
1917 	status = pci_read_config(dev, PCIR_STATUS, 2);
1918 	if (!(status & PCIM_STATUS_CAPPRESENT))
1919 		return (0);
1920 
1921 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1922 	switch (status & PCIM_HDRTYPE) {
1923 	case 0:
1924 	case 1:
1925 		ptr = PCIR_CAP_PTR;
1926 		break;
1927 	case 2:
1928 		ptr = PCIR_CAP_PTR_2;
1929 		break;
1930 	default:
1931 		return (0);
1932 		break;
1933 	}
1934 	ptr = pci_read_config(dev, ptr, 1);
1935 
1936 	while (ptr != 0) {
1937 		int next, val;
1938 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1939 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1940 		if (val == cap)
1941 			return (ptr);
1942 		ptr = next;
1943 	}
1944 
1945 	return (0);
1946 }
1947 
1948 static int
1949 aac_setup_intr(struct aac_softc *sc)
1950 {
1951 	int i, msi_count, rid;
1952 	struct resource *res;
1953 	void *tag;
1954 
1955 	msi_count = sc->aac_max_msix;
1956 	rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1957 
1958 	for (i = 0; i < msi_count; i++, rid++) {
1959 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1960 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1961 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1962 			return (EINVAL);
1963 		}
1964 		sc->aac_irq_rid[i] = rid;
1965 		sc->aac_irq[i] = res;
1966 		if (aac_bus_setup_intr(sc->aac_dev, res,
1967 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1968 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1969 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1970 			return (EINVAL);
1971 		}
1972 		sc->aac_msix[i].vector_no = i;
1973 		sc->aac_msix[i].sc = sc;
1974 		sc->aac_intr[i] = tag;
1975 	}
1976 
1977 	return (0);
1978 }
1979 
1980 static int
1981 aac_check_config(struct aac_softc *sc)
1982 {
1983 	struct aac_fib *fib;
1984 	struct aac_cnt_config *ccfg;
1985 	struct aac_cf_status_hdr *cf_shdr;
1986 	int rval;
1987 
1988 	mtx_lock(&sc->aac_io_lock);
1989 	aac_alloc_sync_fib(sc, &fib);
1990 
1991 	ccfg = (struct aac_cnt_config *)&fib->data[0];
1992 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1993 	ccfg->Command = VM_ContainerConfig;
1994 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
1995 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
1996 
1997 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
1998 		sizeof (struct aac_cnt_config));
1999 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2000 	if (rval == 0 && ccfg->Command == ST_OK &&
2001 		ccfg->CTCommand.param[0] == CT_OK) {
2002 		if (cf_shdr->action <= CFACT_PAUSE) {
2003 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2004 			ccfg->Command = VM_ContainerConfig;
2005 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2006 
2007 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2008 				sizeof (struct aac_cnt_config));
2009 			if (rval == 0 && ccfg->Command == ST_OK &&
2010 				ccfg->CTCommand.param[0] == CT_OK) {
2011 				/* successful completion */
2012 				rval = 0;
2013 			} else {
2014 				/* auto commit aborted due to error(s) */
2015 				rval = -2;
2016 			}
2017 		} else {
2018 			/* auto commit aborted due to adapter indicating
2019 			   config. issues too dangerous to auto commit  */
2020 			rval = -3;
2021 		}
2022 	} else {
2023 		/* error */
2024 		rval = -1;
2025 	}
2026 
2027 	aac_release_sync_fib(sc);
2028 	mtx_unlock(&sc->aac_io_lock);
2029 	return(rval);
2030 }
2031 
2032 /*
2033  * Send a synchronous command to the controller and wait for a result.
2034  * Indicate if the controller completed the command with an error status.
2035  */
2036 int
2037 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2038 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2039 		 u_int32_t *sp, u_int32_t *r1)
2040 {
2041 	time_t then;
2042 	u_int32_t status;
2043 
2044 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2045 
2046 	/* populate the mailbox */
2047 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2048 
2049 	/* ensure the sync command doorbell flag is cleared */
2050 	if (!sc->msi_enabled)
2051 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2052 
2053 	/* then set it to signal the adapter */
2054 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2055 
2056 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2057 		/* spin waiting for the command to complete */
2058 		then = time_uptime;
2059 		do {
2060 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2061 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2062 				return(EIO);
2063 			}
2064 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2065 
2066 		/* clear the completion flag */
2067 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2068 
2069 		/* get the command status */
2070 		status = AAC_GET_MAILBOX(sc, 0);
2071 		if (sp != NULL)
2072 			*sp = status;
2073 
2074 		/* return parameter */
2075 		if (r1 != NULL)
2076 			*r1 = AAC_GET_MAILBOX(sc, 1);
2077 
2078 		if (status != AAC_SRB_STS_SUCCESS)
2079 			return (-1);
2080 	}
2081 	return(0);
2082 }
2083 
2084 static int
2085 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2086 		 struct aac_fib *fib, u_int16_t datasize)
2087 {
2088 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2089 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2090 
2091 	if (datasize > AAC_FIB_DATASIZE)
2092 		return(EINVAL);
2093 
2094 	/*
2095 	 * Set up the sync FIB
2096 	 */
2097 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2098 				AAC_FIBSTATE_INITIALISED |
2099 				AAC_FIBSTATE_EMPTY;
2100 	fib->Header.XferState |= xferstate;
2101 	fib->Header.Command = command;
2102 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2103 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2104 	fib->Header.SenderSize = sizeof(struct aac_fib);
2105 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2106 	fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2107 		offsetof(struct aac_common, ac_sync_fib);
2108 
2109 	/*
2110 	 * Give the FIB to the controller, wait for a response.
2111 	 */
2112 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2113 		fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2114 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2115 		return(EIO);
2116 	}
2117 
2118 	return (0);
2119 }
2120 
2121 /*
2122  * Check for commands that have been outstanding for a suspiciously long time,
2123  * and complain about them.
2124  */
2125 static void
2126 aac_timeout(struct aac_softc *sc)
2127 {
2128 	struct aac_command *cm;
2129 	time_t deadline;
2130 	int timedout;
2131 
2132 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2133 	/*
2134 	 * Traverse the busy command list, bitch about late commands once
2135 	 * only.
2136 	 */
2137 	timedout = 0;
2138 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2139 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2140 		if (cm->cm_timestamp < deadline) {
2141 			device_printf(sc->aac_dev,
2142 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2143 				      cm, (int)(time_uptime-cm->cm_timestamp));
2144 			AAC_PRINT_FIB(sc, cm->cm_fib);
2145 			timedout++;
2146 		}
2147 	}
2148 
2149 	if (timedout)
2150 		aac_reset_adapter(sc);
2151 	aacraid_print_queues(sc);
2152 }
2153 
2154 /*
2155  * Interface Function Vectors
2156  */
2157 
2158 /*
2159  * Read the current firmware status word.
2160  */
2161 static int
2162 aac_src_get_fwstatus(struct aac_softc *sc)
2163 {
2164 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2165 
2166 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2167 }
2168 
2169 /*
2170  * Notify the controller of a change in a given queue
2171  */
2172 static void
2173 aac_src_qnotify(struct aac_softc *sc, int qbit)
2174 {
2175 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2176 
2177 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2178 }
2179 
2180 /*
2181  * Get the interrupt reason bits
2182  */
2183 static int
2184 aac_src_get_istatus(struct aac_softc *sc)
2185 {
2186 	int val;
2187 
2188 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2189 
2190 	if (sc->msi_enabled) {
2191 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2192 		if (val & AAC_MSI_SYNC_STATUS)
2193 			val = AAC_DB_SYNC_COMMAND;
2194 		else
2195 			val = 0;
2196 	} else {
2197 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2198 	}
2199 	return(val);
2200 }
2201 
2202 /*
2203  * Clear some interrupt reason bits
2204  */
2205 static void
2206 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2207 {
2208 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2209 
2210 	if (sc->msi_enabled) {
2211 		if (mask == AAC_DB_SYNC_COMMAND)
2212 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2213 	} else {
2214 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2215 	}
2216 }
2217 
2218 /*
2219  * Populate the mailbox and set the command word
2220  */
2221 static void
2222 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2223 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2224 {
2225 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2226 
2227 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2228 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2229 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2230 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2231 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2232 }
2233 
2234 static void
2235 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2236 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2237 {
2238 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2239 
2240 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2241 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2242 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2243 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2244 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2245 }
2246 
2247 /*
2248  * Fetch the immediate command status word
2249  */
2250 static int
2251 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2252 {
2253 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2254 
2255 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2256 }
2257 
2258 static int
2259 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2260 {
2261 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2262 
2263 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2264 }
2265 
2266 /*
2267  * Set/clear interrupt masks
2268  */
2269 static void
2270 aac_src_access_devreg(struct aac_softc *sc, int mode)
2271 {
2272 	u_int32_t val;
2273 
2274 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2275 
2276 	switch (mode) {
2277 	case AAC_ENABLE_INTERRUPT:
2278 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2279 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2280 				           AAC_INT_ENABLE_TYPE1_INTX));
2281 		break;
2282 
2283 	case AAC_DISABLE_INTERRUPT:
2284 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2285 		break;
2286 
2287 	case AAC_ENABLE_MSIX:
2288 		/* set bit 6 */
2289 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2290 		val |= 0x40;
2291 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2292 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2293 		/* unmask int. */
2294 		val = PMC_ALL_INTERRUPT_BITS;
2295 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2296 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2297 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2298 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2299 		break;
2300 
2301 	case AAC_DISABLE_MSIX:
2302 		/* reset bit 6 */
2303 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2304 		val &= ~0x40;
2305 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2306 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2307 		break;
2308 
2309 	case AAC_CLEAR_AIF_BIT:
2310 		/* set bit 5 */
2311 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2312 		val |= 0x20;
2313 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2314 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2315 		break;
2316 
2317 	case AAC_CLEAR_SYNC_BIT:
2318 		/* set bit 4 */
2319 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2320 		val |= 0x10;
2321 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2322 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2323 		break;
2324 
2325 	case AAC_ENABLE_INTX:
2326 		/* set bit 7 */
2327 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2328 		val |= 0x80;
2329 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2330 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2331 		/* unmask int. */
2332 		val = PMC_ALL_INTERRUPT_BITS;
2333 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2334 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2335 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2336 			val & (~(PMC_GLOBAL_INT_BIT2)));
2337 		break;
2338 
2339 	default:
2340 		break;
2341 	}
2342 }
2343 
2344 /*
2345  * New comm. interface: Send command functions
2346  */
2347 static int
2348 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2349 {
2350 	struct aac_fib_xporthdr *pFibX;
2351 	u_int32_t fibsize, high_addr;
2352 	u_int64_t address;
2353 
2354 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2355 
2356 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2357 		sc->aac_max_msix > 1) {
2358 		u_int16_t vector_no, first_choice = 0xffff;
2359 
2360 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2361 		do {
2362 			vector_no += 1;
2363 			if (vector_no == sc->aac_max_msix)
2364 				vector_no = 1;
2365 			if (sc->aac_rrq_outstanding[vector_no] <
2366 				sc->aac_vector_cap)
2367 				break;
2368 			if (0xffff == first_choice)
2369 				first_choice = vector_no;
2370 			else if (vector_no == first_choice)
2371 				break;
2372 		} while (1);
2373 		if (vector_no == first_choice)
2374 			vector_no = 0;
2375 		sc->aac_rrq_outstanding[vector_no]++;
2376 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2377 			sc->aac_fibs_pushed_no = 0;
2378 		else
2379 			sc->aac_fibs_pushed_no++;
2380 
2381 		cm->cm_fib->Header.Handle += (vector_no << 16);
2382 	}
2383 
2384 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2385 		/* Calculate the amount to the fibsize bits */
2386 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2387 		/* Fill new FIB header */
2388 		address = cm->cm_fibphys;
2389 		high_addr = (u_int32_t)(address >> 32);
2390 		if (high_addr == 0L) {
2391 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2392 			cm->cm_fib->Header.u.TimeStamp = 0L;
2393 		} else {
2394 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2395 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2396 		}
2397 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2398 	} else {
2399 		/* Calculate the amount to the fibsize bits */
2400 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2401 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2402 		/* Fill XPORT header */
2403 		pFibX = (struct aac_fib_xporthdr *)
2404 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2405 		pFibX->Handle = cm->cm_fib->Header.Handle;
2406 		pFibX->HostAddress = cm->cm_fibphys;
2407 		pFibX->Size = cm->cm_fib->Header.Size;
2408 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2409 		high_addr = (u_int32_t)(address >> 32);
2410 	}
2411 
2412 	if (fibsize > 31)
2413 		fibsize = 31;
2414 	aac_enqueue_busy(cm);
2415 	if (high_addr) {
2416 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2417 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2418 	} else {
2419 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2420 	}
2421 	return 0;
2422 }
2423 
2424 /*
2425  * New comm. interface: get, set outbound queue index
2426  */
2427 static int
2428 aac_src_get_outb_queue(struct aac_softc *sc)
2429 {
2430 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2431 
2432 	return(-1);
2433 }
2434 
2435 static void
2436 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2437 {
2438 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2439 }
2440 
2441 /*
2442  * Debugging and Diagnostics
2443  */
2444 
2445 /*
2446  * Print some information about the controller.
2447  */
2448 static void
2449 aac_describe_controller(struct aac_softc *sc)
2450 {
2451 	struct aac_fib *fib;
2452 	struct aac_adapter_info	*info;
2453 	char *adapter_type = "Adaptec RAID controller";
2454 
2455 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2456 
2457 	mtx_lock(&sc->aac_io_lock);
2458 	aac_alloc_sync_fib(sc, &fib);
2459 
2460 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2461 		fib->data[0] = 0;
2462 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2463 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2464 		else {
2465 			struct aac_supplement_adapter_info *supp_info;
2466 
2467 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2468 			adapter_type = (char *)supp_info->AdapterTypeText;
2469 			sc->aac_feature_bits = supp_info->FeatureBits;
2470 			sc->aac_support_opt2 = supp_info->SupportedOptions2;
2471 		}
2472 	}
2473 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2474 		adapter_type,
2475 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2476 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2477 
2478 	fib->data[0] = 0;
2479 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2480 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2481 		aac_release_sync_fib(sc);
2482 		mtx_unlock(&sc->aac_io_lock);
2483 		return;
2484 	}
2485 
2486 	/* save the kernel revision structure for later use */
2487 	info = (struct aac_adapter_info *)&fib->data[0];
2488 	sc->aac_revision = info->KernelRevision;
2489 
2490 	if (bootverbose) {
2491 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2492 		    "(%dMB cache, %dMB execution), %s\n",
2493 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2494 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2495 		    info->BufferMem / (1024 * 1024),
2496 		    info->ExecutionMem / (1024 * 1024),
2497 		    aac_describe_code(aac_battery_platform,
2498 		    info->batteryPlatform));
2499 
2500 		device_printf(sc->aac_dev,
2501 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2502 		    info->KernelRevision.external.comp.major,
2503 		    info->KernelRevision.external.comp.minor,
2504 		    info->KernelRevision.external.comp.dash,
2505 		    info->KernelRevision.buildNumber,
2506 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2507 
2508 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2509 			      sc->supported_options,
2510 			      "\20"
2511 			      "\1SNAPSHOT"
2512 			      "\2CLUSTERS"
2513 			      "\3WCACHE"
2514 			      "\4DATA64"
2515 			      "\5HOSTTIME"
2516 			      "\6RAID50"
2517 			      "\7WINDOW4GB"
2518 			      "\10SCSIUPGD"
2519 			      "\11SOFTERR"
2520 			      "\12NORECOND"
2521 			      "\13SGMAP64"
2522 			      "\14ALARM"
2523 			      "\15NONDASD"
2524 			      "\16SCSIMGT"
2525 			      "\17RAIDSCSI"
2526 			      "\21ADPTINFO"
2527 			      "\22NEWCOMM"
2528 			      "\23ARRAY64BIT"
2529 			      "\24HEATSENSOR");
2530 	}
2531 
2532 	aac_release_sync_fib(sc);
2533 	mtx_unlock(&sc->aac_io_lock);
2534 }
2535 
2536 /*
2537  * Look up a text description of a numeric error code and return a pointer to
2538  * same.
2539  */
2540 static char *
2541 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2542 {
2543 	int i;
2544 
2545 	for (i = 0; table[i].string != NULL; i++)
2546 		if (table[i].code == code)
2547 			return(table[i].string);
2548 	return(table[i + 1].string);
2549 }
2550 
2551 /*
2552  * Management Interface
2553  */
2554 
2555 static int
2556 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2557 {
2558 	struct aac_softc *sc;
2559 
2560 	sc = dev->si_drv1;
2561 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2562 	device_busy(sc->aac_dev);
2563 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2564 	return 0;
2565 }
2566 
2567 static int
2568 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2569 {
2570 	union aac_statrequest *as;
2571 	struct aac_softc *sc;
2572 	int error = 0;
2573 
2574 	as = (union aac_statrequest *)arg;
2575 	sc = dev->si_drv1;
2576 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2577 
2578 	switch (cmd) {
2579 	case AACIO_STATS:
2580 		switch (as->as_item) {
2581 		case AACQ_FREE:
2582 		case AACQ_READY:
2583 		case AACQ_BUSY:
2584 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2585 			      sizeof(struct aac_qstat));
2586 			break;
2587 		default:
2588 			error = ENOENT;
2589 			break;
2590 		}
2591 	break;
2592 
2593 	case FSACTL_SENDFIB:
2594 	case FSACTL_SEND_LARGE_FIB:
2595 		arg = *(caddr_t*)arg;
2596 	case FSACTL_LNX_SENDFIB:
2597 	case FSACTL_LNX_SEND_LARGE_FIB:
2598 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2599 		error = aac_ioctl_sendfib(sc, arg);
2600 		break;
2601 	case FSACTL_SEND_RAW_SRB:
2602 		arg = *(caddr_t*)arg;
2603 	case FSACTL_LNX_SEND_RAW_SRB:
2604 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2605 		error = aac_ioctl_send_raw_srb(sc, arg);
2606 		break;
2607 	case FSACTL_AIF_THREAD:
2608 	case FSACTL_LNX_AIF_THREAD:
2609 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2610 		error = EINVAL;
2611 		break;
2612 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2613 		arg = *(caddr_t*)arg;
2614 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2615 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2616 		error = aac_open_aif(sc, arg);
2617 		break;
2618 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2619 		arg = *(caddr_t*)arg;
2620 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2621 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2622 		error = aac_getnext_aif(sc, arg);
2623 		break;
2624 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2625 		arg = *(caddr_t*)arg;
2626 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2627 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2628 		error = aac_close_aif(sc, arg);
2629 		break;
2630 	case FSACTL_MINIPORT_REV_CHECK:
2631 		arg = *(caddr_t*)arg;
2632 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2633 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2634 		error = aac_rev_check(sc, arg);
2635 		break;
2636 	case FSACTL_QUERY_DISK:
2637 		arg = *(caddr_t*)arg;
2638 	case FSACTL_LNX_QUERY_DISK:
2639 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2640 		error = aac_query_disk(sc, arg);
2641 		break;
2642 	case FSACTL_DELETE_DISK:
2643 	case FSACTL_LNX_DELETE_DISK:
2644 		/*
2645 		 * We don't trust the underland to tell us when to delete a
2646 		 * container, rather we rely on an AIF coming from the
2647 		 * controller
2648 		 */
2649 		error = 0;
2650 		break;
2651 	case FSACTL_GET_PCI_INFO:
2652 		arg = *(caddr_t*)arg;
2653 	case FSACTL_LNX_GET_PCI_INFO:
2654 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2655 		error = aac_get_pci_info(sc, arg);
2656 		break;
2657 	case FSACTL_GET_FEATURES:
2658 		arg = *(caddr_t*)arg;
2659 	case FSACTL_LNX_GET_FEATURES:
2660 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2661 		error = aac_supported_features(sc, arg);
2662 		break;
2663 	default:
2664 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2665 		error = EINVAL;
2666 		break;
2667 	}
2668 	return(error);
2669 }
2670 
2671 static int
2672 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2673 {
2674 	struct aac_softc *sc;
2675 	struct aac_fib_context *ctx;
2676 	int revents;
2677 
2678 	sc = dev->si_drv1;
2679 	revents = 0;
2680 
2681 	mtx_lock(&sc->aac_io_lock);
2682 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2683 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2684 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2685 				revents |= poll_events & (POLLIN | POLLRDNORM);
2686 				break;
2687 			}
2688 		}
2689 	}
2690 	mtx_unlock(&sc->aac_io_lock);
2691 
2692 	if (revents == 0) {
2693 		if (poll_events & (POLLIN | POLLRDNORM))
2694 			selrecord(td, &sc->rcv_select);
2695 	}
2696 
2697 	return (revents);
2698 }
2699 
2700 static void
2701 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2702 {
2703 
2704 	switch (event->ev_type) {
2705 	case AAC_EVENT_CMFREE:
2706 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2707 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2708 			aacraid_add_event(sc, event);
2709 			return;
2710 		}
2711 		free(event, M_AACRAIDBUF);
2712 		wakeup(arg);
2713 		break;
2714 	default:
2715 		break;
2716 	}
2717 }
2718 
2719 /*
2720  * Send a FIB supplied from userspace
2721  */
2722 static int
2723 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2724 {
2725 	struct aac_command *cm;
2726 	int size, error;
2727 
2728 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2729 
2730 	cm = NULL;
2731 
2732 	/*
2733 	 * Get a command
2734 	 */
2735 	mtx_lock(&sc->aac_io_lock);
2736 	if (aacraid_alloc_command(sc, &cm)) {
2737 		struct aac_event *event;
2738 
2739 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2740 		    M_NOWAIT | M_ZERO);
2741 		if (event == NULL) {
2742 			error = EBUSY;
2743 			mtx_unlock(&sc->aac_io_lock);
2744 			goto out;
2745 		}
2746 		event->ev_type = AAC_EVENT_CMFREE;
2747 		event->ev_callback = aac_ioctl_event;
2748 		event->ev_arg = &cm;
2749 		aacraid_add_event(sc, event);
2750 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2751 	}
2752 	mtx_unlock(&sc->aac_io_lock);
2753 
2754 	/*
2755 	 * Fetch the FIB header, then re-copy to get data as well.
2756 	 */
2757 	if ((error = copyin(ufib, cm->cm_fib,
2758 			    sizeof(struct aac_fib_header))) != 0)
2759 		goto out;
2760 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2761 	if (size > sc->aac_max_fib_size) {
2762 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2763 			      size, sc->aac_max_fib_size);
2764 		size = sc->aac_max_fib_size;
2765 	}
2766 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2767 		goto out;
2768 	cm->cm_fib->Header.Size = size;
2769 	cm->cm_timestamp = time_uptime;
2770 	cm->cm_datalen = 0;
2771 
2772 	/*
2773 	 * Pass the FIB to the controller, wait for it to complete.
2774 	 */
2775 	mtx_lock(&sc->aac_io_lock);
2776 	error = aacraid_wait_command(cm);
2777 	mtx_unlock(&sc->aac_io_lock);
2778 	if (error != 0) {
2779 		device_printf(sc->aac_dev,
2780 			      "aacraid_wait_command return %d\n", error);
2781 		goto out;
2782 	}
2783 
2784 	/*
2785 	 * Copy the FIB and data back out to the caller.
2786 	 */
2787 	size = cm->cm_fib->Header.Size;
2788 	if (size > sc->aac_max_fib_size) {
2789 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2790 			      size, sc->aac_max_fib_size);
2791 		size = sc->aac_max_fib_size;
2792 	}
2793 	error = copyout(cm->cm_fib, ufib, size);
2794 
2795 out:
2796 	if (cm != NULL) {
2797 		mtx_lock(&sc->aac_io_lock);
2798 		aacraid_release_command(cm);
2799 		mtx_unlock(&sc->aac_io_lock);
2800 	}
2801 	return(error);
2802 }
2803 
2804 /*
2805  * Send a passthrough FIB supplied from userspace
2806  */
2807 static int
2808 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2809 {
2810 	struct aac_command *cm;
2811 	struct aac_fib *fib;
2812 	struct aac_srb *srbcmd;
2813 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2814 	void *user_reply;
2815 	int error, transfer_data = 0;
2816 	bus_dmamap_t orig_map = 0;
2817 	u_int32_t fibsize = 0;
2818 	u_int64_t srb_sg_address;
2819 	u_int32_t srb_sg_bytecount;
2820 
2821 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2822 
2823 	cm = NULL;
2824 
2825 	mtx_lock(&sc->aac_io_lock);
2826 	if (aacraid_alloc_command(sc, &cm)) {
2827 		struct aac_event *event;
2828 
2829 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2830 		    M_NOWAIT | M_ZERO);
2831 		if (event == NULL) {
2832 			error = EBUSY;
2833 			mtx_unlock(&sc->aac_io_lock);
2834 			goto out;
2835 		}
2836 		event->ev_type = AAC_EVENT_CMFREE;
2837 		event->ev_callback = aac_ioctl_event;
2838 		event->ev_arg = &cm;
2839 		aacraid_add_event(sc, event);
2840 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2841 	}
2842 	mtx_unlock(&sc->aac_io_lock);
2843 
2844 	cm->cm_data = NULL;
2845 	/* save original dma map */
2846 	orig_map = cm->cm_datamap;
2847 
2848 	fib = cm->cm_fib;
2849 	srbcmd = (struct aac_srb *)fib->data;
2850 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2851 	    sizeof (u_int32_t))) != 0)
2852 		goto out;
2853 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2854 		error = EINVAL;
2855 		goto out;
2856 	}
2857 	if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2858 		goto out;
2859 
2860 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2861 	srbcmd->retry_limit = 0;	/* obsolete */
2862 
2863 	/* only one sg element from userspace supported */
2864 	if (srbcmd->sg_map.SgCount > 1) {
2865 		error = EINVAL;
2866 		goto out;
2867 	}
2868 	/* check fibsize */
2869 	if (fibsize == (sizeof(struct aac_srb) +
2870 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2871 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2872 		struct aac_sg_entry sg;
2873 
2874 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2875 			goto out;
2876 
2877 		srb_sg_bytecount = sg.SgByteCount;
2878 		srb_sg_address = (u_int64_t)sg.SgAddress;
2879 	} else if (fibsize == (sizeof(struct aac_srb) +
2880 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2881 #ifdef __LP64__
2882 		struct aac_sg_entry64 *sgp =
2883 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2884 		struct aac_sg_entry64 sg;
2885 
2886 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2887 			goto out;
2888 
2889 		srb_sg_bytecount = sg.SgByteCount;
2890 		srb_sg_address = sg.SgAddress;
2891 #else
2892 		error = EINVAL;
2893 		goto out;
2894 #endif
2895 	} else {
2896 		error = EINVAL;
2897 		goto out;
2898 	}
2899 	user_reply = (char *)arg + fibsize;
2900 	srbcmd->data_len = srb_sg_bytecount;
2901 	if (srbcmd->sg_map.SgCount == 1)
2902 		transfer_data = 1;
2903 
2904 	if (transfer_data) {
2905 		/*
2906 		 * Create DMA tag for the passthr. data buffer and allocate it.
2907 		 */
2908 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2909 			1, 0,			/* algnmnt, boundary */
2910 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2911 			BUS_SPACE_MAXADDR_32BIT :
2912 			0x7fffffff,		/* lowaddr */
2913 			BUS_SPACE_MAXADDR, 	/* highaddr */
2914 			NULL, NULL, 		/* filter, filterarg */
2915 			srb_sg_bytecount, 	/* size */
2916 			sc->aac_sg_tablesize,	/* nsegments */
2917 			srb_sg_bytecount, 	/* maxsegsize */
2918 			0,			/* flags */
2919 			NULL, NULL,		/* No locking needed */
2920 			&cm->cm_passthr_dmat)) {
2921 			error = ENOMEM;
2922 			goto out;
2923 		}
2924 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2925 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2926 			error = ENOMEM;
2927 			goto out;
2928 		}
2929 		/* fill some cm variables */
2930 		cm->cm_datalen = srb_sg_bytecount;
2931 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2932 			cm->cm_flags |= AAC_CMD_DATAIN;
2933 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2934 			cm->cm_flags |= AAC_CMD_DATAOUT;
2935 
2936 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2937 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2938 				cm->cm_data, cm->cm_datalen)) != 0)
2939 				goto out;
2940 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2941 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2942 				BUS_DMASYNC_PREWRITE);
2943 		}
2944 	}
2945 
2946 	/* build the FIB */
2947 	fib->Header.Size = sizeof(struct aac_fib_header) +
2948 		sizeof(struct aac_srb);
2949 	fib->Header.XferState =
2950 		AAC_FIBSTATE_HOSTOWNED   |
2951 		AAC_FIBSTATE_INITIALISED |
2952 		AAC_FIBSTATE_EMPTY	 |
2953 		AAC_FIBSTATE_FROMHOST	 |
2954 		AAC_FIBSTATE_REXPECTED   |
2955 		AAC_FIBSTATE_NORM	 |
2956 		AAC_FIBSTATE_ASYNC;
2957 
2958 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2959 		ScsiPortCommandU64 : ScsiPortCommand;
2960 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2961 
2962 	/* send command */
2963 	if (transfer_data) {
2964 		bus_dmamap_load(cm->cm_passthr_dmat,
2965 			cm->cm_datamap, cm->cm_data,
2966 			cm->cm_datalen,
2967 			aacraid_map_command_sg, cm, 0);
2968 	} else {
2969 		aacraid_map_command_sg(cm, NULL, 0, 0);
2970 	}
2971 
2972 	/* wait for completion */
2973 	mtx_lock(&sc->aac_io_lock);
2974 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2975 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2976 	mtx_unlock(&sc->aac_io_lock);
2977 
2978 	/* copy data */
2979 	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2980 		if ((error = copyout(cm->cm_data,
2981 			(void *)(uintptr_t)srb_sg_address,
2982 			cm->cm_datalen)) != 0)
2983 			goto out;
2984 		/* sync required for bus_dmamem_alloc() allocated mem.? */
2985 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2986 				BUS_DMASYNC_POSTREAD);
2987 	}
2988 
2989 	/* status */
2990 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
2991 
2992 out:
2993 	if (cm && cm->cm_data) {
2994 		if (transfer_data)
2995 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
2996 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
2997 		cm->cm_datamap = orig_map;
2998 	}
2999 	if (cm && cm->cm_passthr_dmat)
3000 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3001 	if (cm) {
3002 		mtx_lock(&sc->aac_io_lock);
3003 		aacraid_release_command(cm);
3004 		mtx_unlock(&sc->aac_io_lock);
3005 	}
3006 	return(error);
3007 }
3008 
3009 /*
3010  * Request an AIF from the controller (new comm. type1)
3011  */
3012 static void
3013 aac_request_aif(struct aac_softc *sc)
3014 {
3015 	struct aac_command *cm;
3016 	struct aac_fib *fib;
3017 
3018 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3019 
3020 	if (aacraid_alloc_command(sc, &cm)) {
3021 		sc->aif_pending = 1;
3022 		return;
3023 	}
3024 	sc->aif_pending = 0;
3025 
3026 	/* build the FIB */
3027 	fib = cm->cm_fib;
3028 	fib->Header.Size = sizeof(struct aac_fib);
3029 	fib->Header.XferState =
3030         AAC_FIBSTATE_HOSTOWNED   |
3031         AAC_FIBSTATE_INITIALISED |
3032         AAC_FIBSTATE_EMPTY	 |
3033         AAC_FIBSTATE_FROMHOST	 |
3034         AAC_FIBSTATE_REXPECTED   |
3035         AAC_FIBSTATE_NORM	 |
3036         AAC_FIBSTATE_ASYNC;
3037 	/* set AIF marker */
3038 	fib->Header.Handle = 0x00800000;
3039 	fib->Header.Command = AifRequest;
3040 	((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3041 
3042 	aacraid_map_command_sg(cm, NULL, 0, 0);
3043 }
3044 
3045 
3046 /*
3047  * cdevpriv interface private destructor.
3048  */
3049 static void
3050 aac_cdevpriv_dtor(void *arg)
3051 {
3052 	struct aac_softc *sc;
3053 
3054 	sc = arg;
3055 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3056 	device_unbusy(sc->aac_dev);
3057 }
3058 
3059 /*
3060  * Handle an AIF sent to us by the controller; queue it for later reference.
3061  * If the queue fills up, then drop the older entries.
3062  */
3063 static void
3064 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3065 {
3066 	struct aac_aif_command *aif;
3067 	struct aac_container *co, *co_next;
3068 	struct aac_fib_context *ctx;
3069 	struct aac_fib *sync_fib;
3070 	struct aac_mntinforesp mir;
3071 	int next, current, found;
3072 	int count = 0, changed = 0, i = 0;
3073 	u_int32_t channel, uid;
3074 
3075 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3076 
3077 	aif = (struct aac_aif_command*)&fib->data[0];
3078 	aacraid_print_aif(sc, aif);
3079 
3080 	/* Is it an event that we should care about? */
3081 	switch (aif->command) {
3082 	case AifCmdEventNotify:
3083 		switch (aif->data.EN.type) {
3084 		case AifEnAddContainer:
3085 		case AifEnDeleteContainer:
3086 			/*
3087 			 * A container was added or deleted, but the message
3088 			 * doesn't tell us anything else!  Re-enumerate the
3089 			 * containers and sort things out.
3090 			 */
3091 			aac_alloc_sync_fib(sc, &sync_fib);
3092 			do {
3093 				/*
3094 				 * Ask the controller for its containers one at
3095 				 * a time.
3096 				 * XXX What if the controller's list changes
3097 				 * midway through this enumaration?
3098 				 * XXX This should be done async.
3099 				 */
3100 				if (aac_get_container_info(sc, sync_fib, i,
3101 					&mir, &uid) != 0)
3102 					continue;
3103 				if (i == 0)
3104 					count = mir.MntRespCount;
3105 				/*
3106 				 * Check the container against our list.
3107 				 * co->co_found was already set to 0 in a
3108 				 * previous run.
3109 				 */
3110 				if ((mir.Status == ST_OK) &&
3111 				    (mir.MntTable[0].VolType != CT_NONE)) {
3112 					found = 0;
3113 					TAILQ_FOREACH(co,
3114 						      &sc->aac_container_tqh,
3115 						      co_link) {
3116 						if (co->co_mntobj.ObjectId ==
3117 						    mir.MntTable[0].ObjectId) {
3118 							co->co_found = 1;
3119 							found = 1;
3120 							break;
3121 						}
3122 					}
3123 					/*
3124 					 * If the container matched, continue
3125 					 * in the list.
3126 					 */
3127 					if (found) {
3128 						i++;
3129 						continue;
3130 					}
3131 
3132 					/*
3133 					 * This is a new container.  Do all the
3134 					 * appropriate things to set it up.
3135 					 */
3136 					aac_add_container(sc, &mir, 1, uid);
3137 					changed = 1;
3138 				}
3139 				i++;
3140 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3141 			aac_release_sync_fib(sc);
3142 
3143 			/*
3144 			 * Go through our list of containers and see which ones
3145 			 * were not marked 'found'.  Since the controller didn't
3146 			 * list them they must have been deleted.  Do the
3147 			 * appropriate steps to destroy the device.  Also reset
3148 			 * the co->co_found field.
3149 			 */
3150 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3151 			while (co != NULL) {
3152 				if (co->co_found == 0) {
3153 					co_next = TAILQ_NEXT(co, co_link);
3154 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3155 						     co_link);
3156 					free(co, M_AACRAIDBUF);
3157 					changed = 1;
3158 					co = co_next;
3159 				} else {
3160 					co->co_found = 0;
3161 					co = TAILQ_NEXT(co, co_link);
3162 				}
3163 			}
3164 
3165 			/* Attach the newly created containers */
3166 			if (changed) {
3167 				if (sc->cam_rescan_cb != NULL)
3168 					sc->cam_rescan_cb(sc, 0,
3169 				    	AAC_CAM_TARGET_WILDCARD);
3170 			}
3171 
3172 			break;
3173 
3174 		case AifEnEnclosureManagement:
3175 			switch (aif->data.EN.data.EEE.eventType) {
3176 			case AIF_EM_DRIVE_INSERTION:
3177 			case AIF_EM_DRIVE_REMOVAL:
3178 				channel = aif->data.EN.data.EEE.unitID;
3179 				if (sc->cam_rescan_cb != NULL)
3180 					sc->cam_rescan_cb(sc,
3181 					    ((channel>>24) & 0xF) + 1,
3182 					    (channel & 0xFFFF));
3183 				break;
3184 			}
3185 			break;
3186 
3187 		case AifEnAddJBOD:
3188 		case AifEnDeleteJBOD:
3189 		case AifRawDeviceRemove:
3190 			channel = aif->data.EN.data.ECE.container;
3191 			if (sc->cam_rescan_cb != NULL)
3192 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3193 				    AAC_CAM_TARGET_WILDCARD);
3194 			break;
3195 
3196 		default:
3197 			break;
3198 		}
3199 
3200 	default:
3201 		break;
3202 	}
3203 
3204 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3205 	current = sc->aifq_idx;
3206 	next = (current + 1) % AAC_AIFQ_LENGTH;
3207 	if (next == 0)
3208 		sc->aifq_filled = 1;
3209 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3210 	/* modify AIF contexts */
3211 	if (sc->aifq_filled) {
3212 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3213 			if (next == ctx->ctx_idx)
3214 				ctx->ctx_wrap = 1;
3215 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3216 				ctx->ctx_idx = next;
3217 		}
3218 	}
3219 	sc->aifq_idx = next;
3220 	/* On the off chance that someone is sleeping for an aif... */
3221 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3222 		wakeup(sc->aac_aifq);
3223 	/* Wakeup any poll()ers */
3224 	selwakeuppri(&sc->rcv_select, PRIBIO);
3225 
3226 	return;
3227 }
3228 
3229 /*
3230  * Return the Revision of the driver to userspace and check to see if the
3231  * userspace app is possibly compatible.  This is extremely bogus since
3232  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3233  * returning what the card reported.
3234  */
3235 static int
3236 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3237 {
3238 	struct aac_rev_check rev_check;
3239 	struct aac_rev_check_resp rev_check_resp;
3240 	int error = 0;
3241 
3242 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3243 
3244 	/*
3245 	 * Copyin the revision struct from userspace
3246 	 */
3247 	if ((error = copyin(udata, (caddr_t)&rev_check,
3248 			sizeof(struct aac_rev_check))) != 0) {
3249 		return error;
3250 	}
3251 
3252 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3253 	      rev_check.callingRevision.buildNumber);
3254 
3255 	/*
3256 	 * Doctor up the response struct.
3257 	 */
3258 	rev_check_resp.possiblyCompatible = 1;
3259 	rev_check_resp.adapterSWRevision.external.comp.major =
3260 	    AAC_DRIVER_MAJOR_VERSION;
3261 	rev_check_resp.adapterSWRevision.external.comp.minor =
3262 	    AAC_DRIVER_MINOR_VERSION;
3263 	rev_check_resp.adapterSWRevision.external.comp.type =
3264 	    AAC_DRIVER_TYPE;
3265 	rev_check_resp.adapterSWRevision.external.comp.dash =
3266 	    AAC_DRIVER_BUGFIX_LEVEL;
3267 	rev_check_resp.adapterSWRevision.buildNumber =
3268 	    AAC_DRIVER_BUILD;
3269 
3270 	return(copyout((caddr_t)&rev_check_resp, udata,
3271 			sizeof(struct aac_rev_check_resp)));
3272 }
3273 
3274 /*
3275  * Pass the fib context to the caller
3276  */
3277 static int
3278 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3279 {
3280 	struct aac_fib_context *fibctx, *ctx;
3281 	int error = 0;
3282 
3283 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3284 
3285 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3286 	if (fibctx == NULL)
3287 		return (ENOMEM);
3288 
3289 	mtx_lock(&sc->aac_io_lock);
3290 	/* all elements are already 0, add to queue */
3291 	if (sc->fibctx == NULL)
3292 		sc->fibctx = fibctx;
3293 	else {
3294 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3295 			;
3296 		ctx->next = fibctx;
3297 		fibctx->prev = ctx;
3298 	}
3299 
3300 	/* evaluate unique value */
3301 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3302 	ctx = sc->fibctx;
3303 	while (ctx != fibctx) {
3304 		if (ctx->unique == fibctx->unique) {
3305 			fibctx->unique++;
3306 			ctx = sc->fibctx;
3307 		} else {
3308 			ctx = ctx->next;
3309 		}
3310 	}
3311 
3312 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3313 	mtx_unlock(&sc->aac_io_lock);
3314 	if (error)
3315 		aac_close_aif(sc, (caddr_t)ctx);
3316 	return error;
3317 }
3318 
3319 /*
3320  * Close the caller's fib context
3321  */
3322 static int
3323 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3324 {
3325 	struct aac_fib_context *ctx;
3326 
3327 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3328 
3329 	mtx_lock(&sc->aac_io_lock);
3330 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3331 		if (ctx->unique == *(uint32_t *)&arg) {
3332 			if (ctx == sc->fibctx)
3333 				sc->fibctx = NULL;
3334 			else {
3335 				ctx->prev->next = ctx->next;
3336 				if (ctx->next)
3337 					ctx->next->prev = ctx->prev;
3338 			}
3339 			break;
3340 		}
3341 	}
3342 	if (ctx)
3343 		free(ctx, M_AACRAIDBUF);
3344 
3345 	mtx_unlock(&sc->aac_io_lock);
3346 	return 0;
3347 }
3348 
3349 /*
3350  * Pass the caller the next AIF in their queue
3351  */
3352 static int
3353 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3354 {
3355 	struct get_adapter_fib_ioctl agf;
3356 	struct aac_fib_context *ctx;
3357 	int error;
3358 
3359 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3360 
3361 	mtx_lock(&sc->aac_io_lock);
3362 #ifdef COMPAT_FREEBSD32
3363 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3364 		struct get_adapter_fib_ioctl32 agf32;
3365 		error = copyin(arg, &agf32, sizeof(agf32));
3366 		if (error == 0) {
3367 			agf.AdapterFibContext = agf32.AdapterFibContext;
3368 			agf.Wait = agf32.Wait;
3369 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3370 		}
3371 	} else
3372 #endif
3373 		error = copyin(arg, &agf, sizeof(agf));
3374 	if (error == 0) {
3375 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3376 			if (agf.AdapterFibContext == ctx->unique)
3377 				break;
3378 		}
3379 		if (!ctx) {
3380 			mtx_unlock(&sc->aac_io_lock);
3381 			return (EFAULT);
3382 		}
3383 
3384 		error = aac_return_aif(sc, ctx, agf.AifFib);
3385 		if (error == EAGAIN && agf.Wait) {
3386 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3387 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3388 			while (error == EAGAIN) {
3389 				mtx_unlock(&sc->aac_io_lock);
3390 				error = tsleep(sc->aac_aifq, PRIBIO |
3391 					       PCATCH, "aacaif", 0);
3392 				mtx_lock(&sc->aac_io_lock);
3393 				if (error == 0)
3394 					error = aac_return_aif(sc, ctx, agf.AifFib);
3395 			}
3396 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3397 		}
3398 	}
3399 	mtx_unlock(&sc->aac_io_lock);
3400 	return(error);
3401 }
3402 
3403 /*
3404  * Hand the next AIF off the top of the queue out to userspace.
3405  */
3406 static int
3407 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3408 {
3409 	int current, error;
3410 
3411 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3412 
3413 	current = ctx->ctx_idx;
3414 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3415 		/* empty */
3416 		return (EAGAIN);
3417 	}
3418 	error =
3419 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3420 	if (error)
3421 		device_printf(sc->aac_dev,
3422 		    "aac_return_aif: copyout returned %d\n", error);
3423 	else {
3424 		ctx->ctx_wrap = 0;
3425 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3426 	}
3427 	return(error);
3428 }
3429 
3430 static int
3431 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3432 {
3433 	struct aac_pci_info {
3434 		u_int32_t bus;
3435 		u_int32_t slot;
3436 	} pciinf;
3437 	int error;
3438 
3439 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3440 
3441 	pciinf.bus = pci_get_bus(sc->aac_dev);
3442 	pciinf.slot = pci_get_slot(sc->aac_dev);
3443 
3444 	error = copyout((caddr_t)&pciinf, uptr,
3445 			sizeof(struct aac_pci_info));
3446 
3447 	return (error);
3448 }
3449 
3450 static int
3451 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3452 {
3453 	struct aac_features f;
3454 	int error;
3455 
3456 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3457 
3458 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3459 		return (error);
3460 
3461 	/*
3462 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3463 	 * ALL zero in the featuresState, the driver will return the current
3464 	 * state of all the supported features, the data field will not be
3465 	 * valid.
3466 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3467 	 * a specific bit set in the featuresState, the driver will return the
3468 	 * current state of this specific feature and whatever data that are
3469 	 * associated with the feature in the data field or perform whatever
3470 	 * action needed indicates in the data field.
3471 	 */
3472 	 if (f.feat.fValue == 0) {
3473 		f.feat.fBits.largeLBA =
3474 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3475 		f.feat.fBits.JBODSupport = 1;
3476 		/* TODO: In the future, add other features state here as well */
3477 	} else {
3478 		if (f.feat.fBits.largeLBA)
3479 			f.feat.fBits.largeLBA =
3480 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3481 		/* TODO: Add other features state and data in the future */
3482 	}
3483 
3484 	error = copyout(&f, uptr, sizeof (f));
3485 	return (error);
3486 }
3487 
3488 /*
3489  * Give the userland some information about the container.  The AAC arch
3490  * expects the driver to be a SCSI passthrough type driver, so it expects
3491  * the containers to have b:t:l numbers.  Fake it.
3492  */
3493 static int
3494 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3495 {
3496 	struct aac_query_disk query_disk;
3497 	struct aac_container *co;
3498 	int error, id;
3499 
3500 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3501 
3502 	mtx_lock(&sc->aac_io_lock);
3503 	error = copyin(uptr, (caddr_t)&query_disk,
3504 		       sizeof(struct aac_query_disk));
3505 	if (error) {
3506 		mtx_unlock(&sc->aac_io_lock);
3507 		return (error);
3508 	}
3509 
3510 	id = query_disk.ContainerNumber;
3511 	if (id == -1) {
3512 		mtx_unlock(&sc->aac_io_lock);
3513 		return (EINVAL);
3514 	}
3515 
3516 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3517 		if (co->co_mntobj.ObjectId == id)
3518 			break;
3519 		}
3520 
3521 	if (co == NULL) {
3522 			query_disk.Valid = 0;
3523 			query_disk.Locked = 0;
3524 			query_disk.Deleted = 1;		/* XXX is this right? */
3525 	} else {
3526 		query_disk.Valid = 1;
3527 		query_disk.Locked = 1;
3528 		query_disk.Deleted = 0;
3529 		query_disk.Bus = device_get_unit(sc->aac_dev);
3530 		query_disk.Target = 0;
3531 		query_disk.Lun = 0;
3532 		query_disk.UnMapped = 0;
3533 	}
3534 
3535 	error = copyout((caddr_t)&query_disk, uptr,
3536 			sizeof(struct aac_query_disk));
3537 
3538 	mtx_unlock(&sc->aac_io_lock);
3539 	return (error);
3540 }
3541 
3542 static void
3543 aac_container_bus(struct aac_softc *sc)
3544 {
3545 	struct aac_sim *sim;
3546 	device_t child;
3547 
3548 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3549 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3550 	if (sim == NULL) {
3551 		device_printf(sc->aac_dev,
3552 	    	"No memory to add container bus\n");
3553 		panic("Out of memory?!");
3554 	}
3555 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3556 	if (child == NULL) {
3557 		device_printf(sc->aac_dev,
3558 	    	"device_add_child failed for container bus\n");
3559 		free(sim, M_AACRAIDBUF);
3560 		panic("Out of memory?!");
3561 	}
3562 
3563 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3564 	sim->BusNumber = 0;
3565 	sim->BusType = CONTAINER_BUS;
3566 	sim->InitiatorBusId = -1;
3567 	sim->aac_sc = sc;
3568 	sim->sim_dev = child;
3569 	sim->aac_cam = NULL;
3570 
3571 	device_set_ivars(child, sim);
3572 	device_set_desc(child, "Container Bus");
3573 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3574 	/*
3575 	device_set_desc(child, aac_describe_code(aac_container_types,
3576 			mir->MntTable[0].VolType));
3577 	*/
3578 	bus_generic_attach(sc->aac_dev);
3579 }
3580 
3581 static void
3582 aac_get_bus_info(struct aac_softc *sc)
3583 {
3584 	struct aac_fib *fib;
3585 	struct aac_ctcfg *c_cmd;
3586 	struct aac_ctcfg_resp *c_resp;
3587 	struct aac_vmioctl *vmi;
3588 	struct aac_vmi_businf_resp *vmi_resp;
3589 	struct aac_getbusinf businfo;
3590 	struct aac_sim *caminf;
3591 	device_t child;
3592 	int i, error;
3593 
3594 	mtx_lock(&sc->aac_io_lock);
3595 	aac_alloc_sync_fib(sc, &fib);
3596 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3597 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3598 
3599 	c_cmd->Command = VM_ContainerConfig;
3600 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3601 	c_cmd->param = 0;
3602 
3603 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3604 	    sizeof(struct aac_ctcfg));
3605 	if (error) {
3606 		device_printf(sc->aac_dev, "Error %d sending "
3607 		    "VM_ContainerConfig command\n", error);
3608 		aac_release_sync_fib(sc);
3609 		mtx_unlock(&sc->aac_io_lock);
3610 		return;
3611 	}
3612 
3613 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3614 	if (c_resp->Status != ST_OK) {
3615 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3616 		    c_resp->Status);
3617 		aac_release_sync_fib(sc);
3618 		mtx_unlock(&sc->aac_io_lock);
3619 		return;
3620 	}
3621 
3622 	sc->scsi_method_id = c_resp->param;
3623 
3624 	vmi = (struct aac_vmioctl *)&fib->data[0];
3625 	bzero(vmi, sizeof(struct aac_vmioctl));
3626 
3627 	vmi->Command = VM_Ioctl;
3628 	vmi->ObjType = FT_DRIVE;
3629 	vmi->MethId = sc->scsi_method_id;
3630 	vmi->ObjId = 0;
3631 	vmi->IoctlCmd = GetBusInfo;
3632 
3633 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3634 	    sizeof(struct aac_vmi_businf_resp));
3635 	if (error) {
3636 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3637 		    error);
3638 		aac_release_sync_fib(sc);
3639 		mtx_unlock(&sc->aac_io_lock);
3640 		return;
3641 	}
3642 
3643 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3644 	if (vmi_resp->Status != ST_OK) {
3645 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3646 		    vmi_resp->Status);
3647 		aac_release_sync_fib(sc);
3648 		mtx_unlock(&sc->aac_io_lock);
3649 		return;
3650 	}
3651 
3652 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3653 	aac_release_sync_fib(sc);
3654 	mtx_unlock(&sc->aac_io_lock);
3655 
3656 	for (i = 0; i < businfo.BusCount; i++) {
3657 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3658 			continue;
3659 
3660 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3661 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3662 		if (caminf == NULL) {
3663 			device_printf(sc->aac_dev,
3664 			    "No memory to add passthrough bus %d\n", i);
3665 			break;
3666 		}
3667 
3668 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3669 		if (child == NULL) {
3670 			device_printf(sc->aac_dev,
3671 			    "device_add_child failed for passthrough bus %d\n",
3672 			    i);
3673 			free(caminf, M_AACRAIDBUF);
3674 			break;
3675 		}
3676 
3677 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3678 		caminf->BusNumber = i+1;
3679 		caminf->BusType = PASSTHROUGH_BUS;
3680 		caminf->InitiatorBusId = -1;
3681 		caminf->aac_sc = sc;
3682 		caminf->sim_dev = child;
3683 		caminf->aac_cam = NULL;
3684 
3685 		device_set_ivars(child, caminf);
3686 		device_set_desc(child, "SCSI Passthrough Bus");
3687 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3688 	}
3689 }
3690 
3691 /*
3692  * Check to see if the kernel is up and running. If we are in a
3693  * BlinkLED state, return the BlinkLED code.
3694  */
3695 static u_int32_t
3696 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3697 {
3698 	u_int32_t ret;
3699 
3700 	ret = AAC_GET_FWSTATUS(sc);
3701 
3702 	if (ret & AAC_UP_AND_RUNNING)
3703 		ret = 0;
3704 	else if (ret & AAC_KERNEL_PANIC && bled)
3705 		*bled = (ret >> 16) & 0xff;
3706 
3707 	return (ret);
3708 }
3709 
3710 /*
3711  * Once do an IOP reset, basically have to re-initialize the card as
3712  * if coming up from a cold boot, and the driver is responsible for
3713  * any IO that was outstanding to the adapter at the time of the IOP
3714  * RESET. And prepare the driver for IOP RESET by making the init code
3715  * modular with the ability to call it from multiple places.
3716  */
3717 static int
3718 aac_reset_adapter(struct aac_softc *sc)
3719 {
3720 	struct aac_command *cm;
3721 	struct aac_fib *fib;
3722 	struct aac_pause_command *pc;
3723 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3724 	int ret, msi_enabled_orig;
3725 
3726 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3727 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3728 
3729 	if (sc->aac_state & AAC_STATE_RESET) {
3730 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3731 		return (EINVAL);
3732 	}
3733 	sc->aac_state |= AAC_STATE_RESET;
3734 
3735 	/* disable interrupt */
3736 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3737 
3738 	/*
3739 	 * Abort all pending commands:
3740 	 * a) on the controller
3741 	 */
3742 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3743 		cm->cm_flags |= AAC_CMD_RESET;
3744 
3745 		/* is there a completion handler? */
3746 		if (cm->cm_complete != NULL) {
3747 			cm->cm_complete(cm);
3748 		} else {
3749 			/* assume that someone is sleeping on this
3750 			 * command
3751 			 */
3752 			wakeup(cm);
3753 		}
3754 	}
3755 
3756 	/* b) in the waiting queues */
3757 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3758 		cm->cm_flags |= AAC_CMD_RESET;
3759 
3760 		/* is there a completion handler? */
3761 		if (cm->cm_complete != NULL) {
3762 			cm->cm_complete(cm);
3763 		} else {
3764 			/* assume that someone is sleeping on this
3765 			 * command
3766 			 */
3767 			wakeup(cm);
3768 		}
3769 	}
3770 
3771 	/* flush drives */
3772 	if (aac_check_adapter_health(sc, NULL) == 0) {
3773 		mtx_unlock(&sc->aac_io_lock);
3774 		(void) aacraid_shutdown(sc->aac_dev);
3775 		mtx_lock(&sc->aac_io_lock);
3776 	}
3777 
3778 	/* execute IOP reset */
3779 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3780 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3781 
3782 		/* We need to wait for 5 seconds before accessing the MU again
3783 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3784 		 */
3785 		waitCount = 5 * 10000;
3786 		while (waitCount) {
3787 			DELAY(100);			/* delay 100 microseconds */
3788 			waitCount--;
3789 		}
3790 	} else {
3791 		ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3792 			0, 0, 0, 0, &status, &reset_mask);
3793 		if (ret && !sc->doorbell_mask) {
3794 			/* call IOP_RESET for older firmware */
3795 			if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3796 			    &status, NULL)) != 0) {
3797 				if (status == AAC_SRB_STS_INVALID_REQUEST) {
3798 					device_printf(sc->aac_dev,
3799 					    "IOP_RESET not supported\n");
3800 				} else {
3801 					/* probably timeout */
3802 					device_printf(sc->aac_dev,
3803 					    "IOP_RESET failed\n");
3804 				}
3805 
3806 				/* unwind aac_shutdown() */
3807 				aac_alloc_sync_fib(sc, &fib);
3808 				pc = (struct aac_pause_command *)&fib->data[0];
3809 				pc->Command = VM_ContainerConfig;
3810 				pc->Type = CT_PAUSE_IO;
3811 				pc->Timeout = 1;
3812 				pc->Min = 1;
3813 				pc->NoRescan = 1;
3814 
3815 				(void) aac_sync_fib(sc, ContainerCommand, 0,
3816 				    fib, sizeof (struct aac_pause_command));
3817 				aac_release_sync_fib(sc);
3818 
3819 				goto finish;
3820 			}
3821 		} else if (sc->doorbell_mask) {
3822 			ret = 0;
3823 			reset_mask = sc->doorbell_mask;
3824 		}
3825 		if (!ret &&
3826 		    (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3827 			AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3828 			/*
3829 			 * We need to wait for 5 seconds before accessing the
3830 			 * doorbell again;
3831 			 * 10000 * 100us = 1000,000us = 1000ms = 1s
3832 			 */
3833 			waitCount = 5 * 10000;
3834 			while (waitCount) {
3835 				DELAY(100);	/* delay 100 microseconds */
3836 				waitCount--;
3837 			}
3838 		}
3839 	}
3840 
3841 	/*
3842 	 * Initialize the adapter.
3843 	 */
3844 	max_msix_orig = sc->aac_max_msix;
3845 	msi_enabled_orig = sc->msi_enabled;
3846 	sc->msi_enabled = FALSE;
3847 	if (aac_check_firmware(sc) != 0)
3848 		goto finish;
3849 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3850 		sc->aac_max_msix = max_msix_orig;
3851 		if (msi_enabled_orig) {
3852 			sc->msi_enabled = msi_enabled_orig;
3853 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3854 		}
3855 		mtx_unlock(&sc->aac_io_lock);
3856 		aac_init(sc);
3857 		mtx_lock(&sc->aac_io_lock);
3858 	}
3859 
3860 finish:
3861 	sc->aac_state &= ~AAC_STATE_RESET;
3862 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3863 	aacraid_startio(sc);
3864 	return (0);
3865 }
3866