xref: /freebsd/sys/dev/aacraid/aacraid.c (revision 56e53cb8ef000c3ef72337a4095987a932cdedef)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
38  */
39 #define AAC_DRIVERNAME			"aacraid"
40 
41 #include "opt_aacraid.h"
42 
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/sysctl.h>
50 #include <sys/poll.h>
51 #include <sys/ioccom.h>
52 
53 #include <sys/bus.h>
54 #include <sys/conf.h>
55 #include <sys/signalvar.h>
56 #include <sys/time.h>
57 #include <sys/eventhandler.h>
58 #include <sys/rman.h>
59 
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 
66 #include <dev/aacraid/aacraid_reg.h>
67 #include <sys/aac_ioctl.h>
68 #include <dev/aacraid/aacraid_debug.h>
69 #include <dev/aacraid/aacraid_var.h>
70 
71 #ifndef FILTER_HANDLED
72 #define FILTER_HANDLED	0x02
73 #endif
74 
75 static void	aac_add_container(struct aac_softc *sc,
76 				  struct aac_mntinforesp *mir, int f,
77 				  u_int32_t uid);
78 static void	aac_get_bus_info(struct aac_softc *sc);
79 static void	aac_container_bus(struct aac_softc *sc);
80 static void	aac_daemon(void *arg);
81 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
82 							  int pages, int nseg, int nseg_new);
83 
84 /* Command Processing */
85 static void	aac_timeout(struct aac_softc *sc);
86 static void	aac_command_thread(struct aac_softc *sc);
87 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
88 				     u_int32_t xferstate, struct aac_fib *fib,
89 				     u_int16_t datasize);
90 /* Command Buffer Management */
91 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
92 				       int nseg, int error);
93 static int	aac_alloc_commands(struct aac_softc *sc);
94 static void	aac_free_commands(struct aac_softc *sc);
95 static void	aac_unmap_command(struct aac_command *cm);
96 
97 /* Hardware Interface */
98 static int	aac_alloc(struct aac_softc *sc);
99 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
100 			       int error);
101 static int	aac_check_firmware(struct aac_softc *sc);
102 static void	aac_define_int_mode(struct aac_softc *sc);
103 static int	aac_init(struct aac_softc *sc);
104 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
105 static int	aac_setup_intr(struct aac_softc *sc);
106 static int	aac_check_config(struct aac_softc *sc);
107 
108 /* PMC SRC interface */
109 static int	aac_src_get_fwstatus(struct aac_softc *sc);
110 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
111 static int	aac_src_get_istatus(struct aac_softc *sc);
112 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
113 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
114 				    u_int32_t arg0, u_int32_t arg1,
115 				    u_int32_t arg2, u_int32_t arg3);
116 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
117 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
118 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
119 static int aac_src_get_outb_queue(struct aac_softc *sc);
120 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
121 
122 struct aac_interface aacraid_src_interface = {
123 	aac_src_get_fwstatus,
124 	aac_src_qnotify,
125 	aac_src_get_istatus,
126 	aac_src_clear_istatus,
127 	aac_src_set_mailbox,
128 	aac_src_get_mailbox,
129 	aac_src_access_devreg,
130 	aac_src_send_command,
131 	aac_src_get_outb_queue,
132 	aac_src_set_outb_queue
133 };
134 
135 /* PMC SRCv interface */
136 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
137 				    u_int32_t arg0, u_int32_t arg1,
138 				    u_int32_t arg2, u_int32_t arg3);
139 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
140 
141 struct aac_interface aacraid_srcv_interface = {
142 	aac_src_get_fwstatus,
143 	aac_src_qnotify,
144 	aac_src_get_istatus,
145 	aac_src_clear_istatus,
146 	aac_srcv_set_mailbox,
147 	aac_srcv_get_mailbox,
148 	aac_src_access_devreg,
149 	aac_src_send_command,
150 	aac_src_get_outb_queue,
151 	aac_src_set_outb_queue
152 };
153 
154 /* Debugging and Diagnostics */
155 static struct aac_code_lookup aac_cpu_variant[] = {
156 	{"i960JX",		CPUI960_JX},
157 	{"i960CX",		CPUI960_CX},
158 	{"i960HX",		CPUI960_HX},
159 	{"i960RX",		CPUI960_RX},
160 	{"i960 80303",		CPUI960_80303},
161 	{"StrongARM SA110",	CPUARM_SA110},
162 	{"PPC603e",		CPUPPC_603e},
163 	{"XScale 80321",	CPU_XSCALE_80321},
164 	{"MIPS 4KC",		CPU_MIPS_4KC},
165 	{"MIPS 5KC",		CPU_MIPS_5KC},
166 	{"Unknown StrongARM",	CPUARM_xxx},
167 	{"Unknown PowerPC",	CPUPPC_xxx},
168 	{NULL, 0},
169 	{"Unknown processor",	0}
170 };
171 
172 static struct aac_code_lookup aac_battery_platform[] = {
173 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
174 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
175 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
176 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
177 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
178 	{NULL, 0},
179 	{"unknown battery platform",		0}
180 };
181 static void	aac_describe_controller(struct aac_softc *sc);
182 static char	*aac_describe_code(struct aac_code_lookup *table,
183 				   u_int32_t code);
184 
185 /* Management Interface */
186 static d_open_t		aac_open;
187 static d_ioctl_t	aac_ioctl;
188 static d_poll_t		aac_poll;
189 #if __FreeBSD_version >= 702000
190 static void		aac_cdevpriv_dtor(void *arg);
191 #else
192 static d_close_t	aac_close;
193 #endif
194 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
195 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
196 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
197 static void	aac_request_aif(struct aac_softc *sc);
198 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
199 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
200 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
201 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
202 static int	aac_return_aif(struct aac_softc *sc,
203 			       struct aac_fib_context *ctx, caddr_t uptr);
204 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
205 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
206 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
207 static void	aac_ioctl_event(struct aac_softc *sc,
208 				struct aac_event *event, void *arg);
209 static int	aac_reset_adapter(struct aac_softc *sc);
210 static int	aac_get_container_info(struct aac_softc *sc,
211 				       struct aac_fib *fib, int cid,
212 				       struct aac_mntinforesp *mir,
213 				       u_int32_t *uid);
214 static u_int32_t
215 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
216 
217 static struct cdevsw aacraid_cdevsw = {
218 	.d_version =	D_VERSION,
219 	.d_flags =	D_NEEDGIANT,
220 	.d_open =	aac_open,
221 #if __FreeBSD_version < 702000
222 	.d_close =	aac_close,
223 #endif
224 	.d_ioctl =	aac_ioctl,
225 	.d_poll =	aac_poll,
226 	.d_name =	"aacraid",
227 };
228 
229 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
230 
231 /* sysctl node */
232 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
233 
234 /*
235  * Device Interface
236  */
237 
238 /*
239  * Initialize the controller and softc
240  */
241 int
242 aacraid_attach(struct aac_softc *sc)
243 {
244 	int error, unit;
245 	struct aac_fib *fib;
246 	struct aac_mntinforesp mir;
247 	int count = 0, i = 0;
248 	u_int32_t uid;
249 
250 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
251 	sc->hint_flags = device_get_flags(sc->aac_dev);
252 	/*
253 	 * Initialize per-controller queues.
254 	 */
255 	aac_initq_free(sc);
256 	aac_initq_ready(sc);
257 	aac_initq_busy(sc);
258 
259 	/* mark controller as suspended until we get ourselves organised */
260 	sc->aac_state |= AAC_STATE_SUSPEND;
261 
262 	/*
263 	 * Check that the firmware on the card is supported.
264 	 */
265 	sc->msi_enabled = FALSE;
266 	if ((error = aac_check_firmware(sc)) != 0)
267 		return(error);
268 
269 	/*
270 	 * Initialize locks
271 	 */
272 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
273 	TAILQ_INIT(&sc->aac_container_tqh);
274 	TAILQ_INIT(&sc->aac_ev_cmfree);
275 
276 #if __FreeBSD_version >= 800000
277 	/* Initialize the clock daemon callout. */
278 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
279 #endif
280 	/*
281 	 * Initialize the adapter.
282 	 */
283 	if ((error = aac_alloc(sc)) != 0)
284 		return(error);
285 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
286 		aac_define_int_mode(sc);
287 		if ((error = aac_init(sc)) != 0)
288 			return(error);
289 	}
290 
291 	/*
292 	 * Allocate and connect our interrupt.
293 	 */
294 	if ((error = aac_setup_intr(sc)) != 0)
295 		return(error);
296 
297 	/*
298 	 * Print a little information about the controller.
299 	 */
300 	aac_describe_controller(sc);
301 
302 	/*
303 	 * Make the control device.
304 	 */
305 	unit = device_get_unit(sc->aac_dev);
306 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
307 				 0640, "aacraid%d", unit);
308 	sc->aac_dev_t->si_drv1 = sc;
309 
310 	/* Create the AIF thread */
311 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
312 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
313 		panic("Could not create AIF thread");
314 
315 	/* Register the shutdown method to only be called post-dump */
316 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
317 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
318 		device_printf(sc->aac_dev,
319 			      "shutdown event registration failed\n");
320 
321 	/* Find containers */
322 	mtx_lock(&sc->aac_io_lock);
323 	aac_alloc_sync_fib(sc, &fib);
324 	/* loop over possible containers */
325 	do {
326 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
327 			continue;
328 		if (i == 0)
329 			count = mir.MntRespCount;
330 		aac_add_container(sc, &mir, 0, uid);
331 		i++;
332 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
333 	aac_release_sync_fib(sc);
334 	mtx_unlock(&sc->aac_io_lock);
335 
336 	/* Register with CAM for the containers */
337 	TAILQ_INIT(&sc->aac_sim_tqh);
338 	aac_container_bus(sc);
339 	/* Register with CAM for the non-DASD devices */
340 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
341 		aac_get_bus_info(sc);
342 
343 	/* poke the bus to actually attach the child devices */
344 	bus_generic_attach(sc->aac_dev);
345 
346 	/* mark the controller up */
347 	sc->aac_state &= ~AAC_STATE_SUSPEND;
348 
349 	/* enable interrupts now */
350 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
351 
352 #if __FreeBSD_version >= 800000
353 	mtx_lock(&sc->aac_io_lock);
354 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
355 	mtx_unlock(&sc->aac_io_lock);
356 #else
357 	{
358 		struct timeval tv;
359 		tv.tv_sec = 60;
360 		tv.tv_usec = 0;
361 		sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
362 	}
363 #endif
364 
365 	return(0);
366 }
367 
368 static void
369 aac_daemon(void *arg)
370 {
371 	struct aac_softc *sc;
372 	struct timeval tv;
373 	struct aac_command *cm;
374 	struct aac_fib *fib;
375 
376 	sc = arg;
377 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
378 
379 #if __FreeBSD_version >= 800000
380 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
381 	if (callout_pending(&sc->aac_daemontime) ||
382 	    callout_active(&sc->aac_daemontime) == 0)
383 		return;
384 #else
385 	mtx_lock(&sc->aac_io_lock);
386 #endif
387 	getmicrotime(&tv);
388 
389 	if (!aacraid_alloc_command(sc, &cm)) {
390 		fib = cm->cm_fib;
391 		cm->cm_timestamp = time_uptime;
392 		cm->cm_datalen = 0;
393 		cm->cm_flags |= AAC_CMD_WAIT;
394 
395 		fib->Header.Size =
396 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
397 		fib->Header.XferState =
398 			AAC_FIBSTATE_HOSTOWNED   |
399 			AAC_FIBSTATE_INITIALISED |
400 			AAC_FIBSTATE_EMPTY	 |
401 			AAC_FIBSTATE_FROMHOST	 |
402 			AAC_FIBSTATE_REXPECTED   |
403 			AAC_FIBSTATE_NORM	 |
404 			AAC_FIBSTATE_ASYNC	 |
405 			AAC_FIBSTATE_FAST_RESPONSE;
406 		fib->Header.Command = SendHostTime;
407 		*(uint32_t *)fib->data = tv.tv_sec;
408 
409 		aacraid_map_command_sg(cm, NULL, 0, 0);
410 		aacraid_release_command(cm);
411 	}
412 
413 #if __FreeBSD_version >= 800000
414 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
415 #else
416 	mtx_unlock(&sc->aac_io_lock);
417 	tv.tv_sec = 30 * 60;
418 	tv.tv_usec = 0;
419 	sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
420 #endif
421 }
422 
423 void
424 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
425 {
426 
427 	switch (event->ev_type & AAC_EVENT_MASK) {
428 	case AAC_EVENT_CMFREE:
429 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
430 		break;
431 	default:
432 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
433 		    event->ev_type);
434 		break;
435 	}
436 
437 	return;
438 }
439 
440 /*
441  * Request information of container #cid
442  */
443 static int
444 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
445 		       struct aac_mntinforesp *mir, u_int32_t *uid)
446 {
447 	struct aac_command *cm;
448 	struct aac_fib *fib;
449 	struct aac_mntinfo *mi;
450 	struct aac_cnt_config *ccfg;
451 	int rval;
452 
453 	if (sync_fib == NULL) {
454 		if (aacraid_alloc_command(sc, &cm)) {
455 			device_printf(sc->aac_dev,
456 				"Warning, no free command available\n");
457 			return (-1);
458 		}
459 		fib = cm->cm_fib;
460 	} else {
461 		fib = sync_fib;
462 	}
463 
464 	mi = (struct aac_mntinfo *)&fib->data[0];
465 	/* 4KB support?, 64-bit LBA? */
466 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
467 		mi->Command = VM_NameServeAllBlk;
468 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
469 		mi->Command = VM_NameServe64;
470 	else
471 		mi->Command = VM_NameServe;
472 	mi->MntType = FT_FILESYS;
473 	mi->MntCount = cid;
474 
475 	if (sync_fib) {
476 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
477 			 sizeof(struct aac_mntinfo))) {
478 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
479 			return (-1);
480 		}
481 	} else {
482 		cm->cm_timestamp = time_uptime;
483 		cm->cm_datalen = 0;
484 
485 		fib->Header.Size =
486 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
487 		fib->Header.XferState =
488 			AAC_FIBSTATE_HOSTOWNED   |
489 			AAC_FIBSTATE_INITIALISED |
490 			AAC_FIBSTATE_EMPTY	 |
491 			AAC_FIBSTATE_FROMHOST	 |
492 			AAC_FIBSTATE_REXPECTED   |
493 			AAC_FIBSTATE_NORM	 |
494 			AAC_FIBSTATE_ASYNC	 |
495 			AAC_FIBSTATE_FAST_RESPONSE;
496 		fib->Header.Command = ContainerCommand;
497 		if (aacraid_wait_command(cm) != 0) {
498 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
499 			aacraid_release_command(cm);
500 			return (-1);
501 		}
502 	}
503 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
504 
505 	/* UID */
506 	*uid = cid;
507 	if (mir->MntTable[0].VolType != CT_NONE &&
508 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
509 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
510 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
511 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
512 		}
513 		ccfg = (struct aac_cnt_config *)&fib->data[0];
514 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
515 		ccfg->Command = VM_ContainerConfig;
516 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
517 		ccfg->CTCommand.param[0] = cid;
518 
519 		if (sync_fib) {
520 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
521 				sizeof(struct aac_cnt_config));
522 			if (rval == 0 && ccfg->Command == ST_OK &&
523 				ccfg->CTCommand.param[0] == CT_OK &&
524 				mir->MntTable[0].VolType != CT_PASSTHRU)
525 				*uid = ccfg->CTCommand.param[1];
526 		} else {
527 			fib->Header.Size =
528 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
529 			fib->Header.XferState =
530 				AAC_FIBSTATE_HOSTOWNED   |
531 				AAC_FIBSTATE_INITIALISED |
532 				AAC_FIBSTATE_EMPTY	 |
533 				AAC_FIBSTATE_FROMHOST	 |
534 				AAC_FIBSTATE_REXPECTED   |
535 				AAC_FIBSTATE_NORM	 |
536 				AAC_FIBSTATE_ASYNC	 |
537 				AAC_FIBSTATE_FAST_RESPONSE;
538 			fib->Header.Command = ContainerCommand;
539 			rval = aacraid_wait_command(cm);
540 			if (rval == 0 && ccfg->Command == ST_OK &&
541 				ccfg->CTCommand.param[0] == CT_OK &&
542 				mir->MntTable[0].VolType != CT_PASSTHRU)
543 				*uid = ccfg->CTCommand.param[1];
544 			aacraid_release_command(cm);
545 		}
546 	}
547 
548 	return (0);
549 }
550 
551 /*
552  * Create a device to represent a new container
553  */
554 static void
555 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
556 		  u_int32_t uid)
557 {
558 	struct aac_container *co;
559 
560 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
561 
562 	/*
563 	 * Check container volume type for validity.  Note that many of
564 	 * the possible types may never show up.
565 	 */
566 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
567 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
568 		       M_NOWAIT | M_ZERO);
569 		if (co == NULL) {
570 			panic("Out of memory?!");
571 		}
572 
573 		co->co_found = f;
574 		bcopy(&mir->MntTable[0], &co->co_mntobj,
575 		      sizeof(struct aac_mntobj));
576 		co->co_uid = uid;
577 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
578 	}
579 }
580 
581 /*
582  * Allocate resources associated with (sc)
583  */
584 static int
585 aac_alloc(struct aac_softc *sc)
586 {
587 	bus_size_t maxsize;
588 
589 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
590 
591 	/*
592 	 * Create DMA tag for mapping buffers into controller-addressable space.
593 	 */
594 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
595 			       1, 0, 			/* algnmnt, boundary */
596 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
597 			       BUS_SPACE_MAXADDR :
598 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
599 			       BUS_SPACE_MAXADDR, 	/* highaddr */
600 			       NULL, NULL, 		/* filter, filterarg */
601 			       sc->aac_max_sectors << 9, /* maxsize */
602 			       sc->aac_sg_tablesize,	/* nsegments */
603 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
604 			       BUS_DMA_ALLOCNOW,	/* flags */
605 			       busdma_lock_mutex,	/* lockfunc */
606 			       &sc->aac_io_lock,	/* lockfuncarg */
607 			       &sc->aac_buffer_dmat)) {
608 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
609 		return (ENOMEM);
610 	}
611 
612 	/*
613 	 * Create DMA tag for mapping FIBs into controller-addressable space..
614 	 */
615 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
616 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
617 			sizeof(struct aac_fib_xporthdr) + 31);
618 	else
619 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
620 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
621 			       1, 0, 			/* algnmnt, boundary */
622 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
623 			       BUS_SPACE_MAXADDR_32BIT :
624 			       0x7fffffff,		/* lowaddr */
625 			       BUS_SPACE_MAXADDR, 	/* highaddr */
626 			       NULL, NULL, 		/* filter, filterarg */
627 			       maxsize,  		/* maxsize */
628 			       1,			/* nsegments */
629 			       maxsize,			/* maxsize */
630 			       0,			/* flags */
631 			       NULL, NULL,		/* No locking needed */
632 			       &sc->aac_fib_dmat)) {
633 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
634 		return (ENOMEM);
635 	}
636 
637 	/*
638 	 * Create DMA tag for the common structure and allocate it.
639 	 */
640 	maxsize = sizeof(struct aac_common);
641 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
642 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
643 			       1, 0,			/* algnmnt, boundary */
644 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
645 			       BUS_SPACE_MAXADDR_32BIT :
646 			       0x7fffffff,		/* lowaddr */
647 			       BUS_SPACE_MAXADDR, 	/* highaddr */
648 			       NULL, NULL, 		/* filter, filterarg */
649 			       maxsize, 		/* maxsize */
650 			       1,			/* nsegments */
651 			       maxsize,			/* maxsegsize */
652 			       0,			/* flags */
653 			       NULL, NULL,		/* No locking needed */
654 			       &sc->aac_common_dmat)) {
655 		device_printf(sc->aac_dev,
656 			      "can't allocate common structure DMA tag\n");
657 		return (ENOMEM);
658 	}
659 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
660 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
661 		device_printf(sc->aac_dev, "can't allocate common structure\n");
662 		return (ENOMEM);
663 	}
664 
665 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
666 			sc->aac_common, maxsize,
667 			aac_common_map, sc, 0);
668 	bzero(sc->aac_common, maxsize);
669 
670 	/* Allocate some FIBs and associated command structs */
671 	TAILQ_INIT(&sc->aac_fibmap_tqh);
672 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
673 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
674 	mtx_lock(&sc->aac_io_lock);
675 	while (sc->total_fibs < sc->aac_max_fibs) {
676 		if (aac_alloc_commands(sc) != 0)
677 			break;
678 	}
679 	mtx_unlock(&sc->aac_io_lock);
680 	if (sc->total_fibs == 0)
681 		return (ENOMEM);
682 
683 	return (0);
684 }
685 
686 /*
687  * Free all of the resources associated with (sc)
688  *
689  * Should not be called if the controller is active.
690  */
691 void
692 aacraid_free(struct aac_softc *sc)
693 {
694 	int i;
695 
696 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
697 
698 	/* remove the control device */
699 	if (sc->aac_dev_t != NULL)
700 		destroy_dev(sc->aac_dev_t);
701 
702 	/* throw away any FIB buffers, discard the FIB DMA tag */
703 	aac_free_commands(sc);
704 	if (sc->aac_fib_dmat)
705 		bus_dma_tag_destroy(sc->aac_fib_dmat);
706 
707 	free(sc->aac_commands, M_AACRAIDBUF);
708 
709 	/* destroy the common area */
710 	if (sc->aac_common) {
711 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
712 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
713 				sc->aac_common_dmamap);
714 	}
715 	if (sc->aac_common_dmat)
716 		bus_dma_tag_destroy(sc->aac_common_dmat);
717 
718 	/* disconnect the interrupt handler */
719 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
720 		if (sc->aac_intr[i])
721 			bus_teardown_intr(sc->aac_dev,
722 				sc->aac_irq[i], sc->aac_intr[i]);
723 		if (sc->aac_irq[i])
724 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
725 				sc->aac_irq_rid[i], sc->aac_irq[i]);
726 		else
727 			break;
728 	}
729 	if (sc->msi_enabled)
730 		pci_release_msi(sc->aac_dev);
731 
732 	/* destroy data-transfer DMA tag */
733 	if (sc->aac_buffer_dmat)
734 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
735 
736 	/* destroy the parent DMA tag */
737 	if (sc->aac_parent_dmat)
738 		bus_dma_tag_destroy(sc->aac_parent_dmat);
739 
740 	/* release the register window mapping */
741 	if (sc->aac_regs_res0 != NULL)
742 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
743 				     sc->aac_regs_rid0, sc->aac_regs_res0);
744 	if (sc->aac_regs_res1 != NULL)
745 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
746 				     sc->aac_regs_rid1, sc->aac_regs_res1);
747 }
748 
749 /*
750  * Disconnect from the controller completely, in preparation for unload.
751  */
752 int
753 aacraid_detach(device_t dev)
754 {
755 	struct aac_softc *sc;
756 	struct aac_container *co;
757 	struct aac_sim	*sim;
758 	int error;
759 
760 	sc = device_get_softc(dev);
761 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
762 
763 #if __FreeBSD_version >= 800000
764 	callout_drain(&sc->aac_daemontime);
765 #else
766 	untimeout(aac_daemon, (void *)sc, sc->timeout_id);
767 #endif
768 	/* Remove the child containers */
769 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
770 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
771 		free(co, M_AACRAIDBUF);
772 	}
773 
774 	/* Remove the CAM SIMs */
775 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
776 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
777 		error = device_delete_child(dev, sim->sim_dev);
778 		if (error)
779 			return (error);
780 		free(sim, M_AACRAIDBUF);
781 	}
782 
783 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
784 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
785 		wakeup(sc->aifthread);
786 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
787 	}
788 
789 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
790 		panic("Cannot shutdown AIF thread");
791 
792 	if ((error = aacraid_shutdown(dev)))
793 		return(error);
794 
795 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
796 
797 	aacraid_free(sc);
798 
799 	mtx_destroy(&sc->aac_io_lock);
800 
801 	return(0);
802 }
803 
804 /*
805  * Bring the controller down to a dormant state and detach all child devices.
806  *
807  * This function is called before detach or system shutdown.
808  *
809  * Note that we can assume that the bioq on the controller is empty, as we won't
810  * allow shutdown if any device is open.
811  */
812 int
813 aacraid_shutdown(device_t dev)
814 {
815 	struct aac_softc *sc;
816 	struct aac_fib *fib;
817 	struct aac_close_command *cc;
818 
819 	sc = device_get_softc(dev);
820 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
821 
822 	sc->aac_state |= AAC_STATE_SUSPEND;
823 
824 	/*
825 	 * Send a Container shutdown followed by a HostShutdown FIB to the
826 	 * controller to convince it that we don't want to talk to it anymore.
827 	 * We've been closed and all I/O completed already
828 	 */
829 	device_printf(sc->aac_dev, "shutting down controller...");
830 
831 	mtx_lock(&sc->aac_io_lock);
832 	aac_alloc_sync_fib(sc, &fib);
833 	cc = (struct aac_close_command *)&fib->data[0];
834 
835 	bzero(cc, sizeof(struct aac_close_command));
836 	cc->Command = VM_CloseAll;
837 	cc->ContainerId = 0xfffffffe;
838 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
839 	    sizeof(struct aac_close_command)))
840 		printf("FAILED.\n");
841 	else
842 		printf("done\n");
843 
844 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
845 	aac_release_sync_fib(sc);
846 	mtx_unlock(&sc->aac_io_lock);
847 
848 	return(0);
849 }
850 
851 /*
852  * Bring the controller to a quiescent state, ready for system suspend.
853  */
854 int
855 aacraid_suspend(device_t dev)
856 {
857 	struct aac_softc *sc;
858 
859 	sc = device_get_softc(dev);
860 
861 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
862 	sc->aac_state |= AAC_STATE_SUSPEND;
863 
864 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
865 	return(0);
866 }
867 
868 /*
869  * Bring the controller back to a state ready for operation.
870  */
871 int
872 aacraid_resume(device_t dev)
873 {
874 	struct aac_softc *sc;
875 
876 	sc = device_get_softc(dev);
877 
878 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
879 	sc->aac_state &= ~AAC_STATE_SUSPEND;
880 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
881 	return(0);
882 }
883 
884 /*
885  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
886  */
887 void
888 aacraid_new_intr_type1(void *arg)
889 {
890 	struct aac_msix_ctx *ctx;
891 	struct aac_softc *sc;
892 	int vector_no;
893 	struct aac_command *cm;
894 	struct aac_fib *fib;
895 	u_int32_t bellbits, bellbits_shifted, index, handle;
896 	int isFastResponse, isAif, noMoreAif, mode;
897 
898 	ctx = (struct aac_msix_ctx *)arg;
899 	sc = ctx->sc;
900 	vector_no = ctx->vector_no;
901 
902 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
903 	mtx_lock(&sc->aac_io_lock);
904 
905 	if (sc->msi_enabled) {
906 		mode = AAC_INT_MODE_MSI;
907 		if (vector_no == 0) {
908 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
909 			if (bellbits & 0x40000)
910 				mode |= AAC_INT_MODE_AIF;
911 			else if (bellbits & 0x1000)
912 				mode |= AAC_INT_MODE_SYNC;
913 		}
914 	} else {
915 		mode = AAC_INT_MODE_INTX;
916 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
917 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
918 			bellbits = AAC_DB_RESPONSE_SENT_NS;
919 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
920 		} else {
921 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
922 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
923 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
924 				mode |= AAC_INT_MODE_AIF;
925 			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
926 				mode |= AAC_INT_MODE_SYNC;
927 		}
928 		/* ODR readback, Prep #238630 */
929 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
930 	}
931 
932 	if (mode & AAC_INT_MODE_SYNC) {
933 		if (sc->aac_sync_cm) {
934 			cm = sc->aac_sync_cm;
935 			cm->cm_flags |= AAC_CMD_COMPLETED;
936 			/* is there a completion handler? */
937 			if (cm->cm_complete != NULL) {
938 				cm->cm_complete(cm);
939 			} else {
940 				/* assume that someone is sleeping on this command */
941 				wakeup(cm);
942 			}
943 			sc->flags &= ~AAC_QUEUE_FRZN;
944 			sc->aac_sync_cm = NULL;
945 		}
946 		mode = 0;
947 	}
948 
949 	if (mode & AAC_INT_MODE_AIF) {
950 		if (mode & AAC_INT_MODE_INTX) {
951 			aac_request_aif(sc);
952 			mode = 0;
953 		}
954 	}
955 
956 	if (mode) {
957 		/* handle async. status */
958 		index = sc->aac_host_rrq_idx[vector_no];
959 		for (;;) {
960 			isFastResponse = isAif = noMoreAif = 0;
961 			/* remove toggle bit (31) */
962 			handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
963 			/* check fast response bit (30) */
964 			if (handle & 0x40000000)
965 				isFastResponse = 1;
966 			/* check AIF bit (23) */
967 			else if (handle & 0x00800000)
968 				isAif = TRUE;
969 			handle &= 0x0000ffff;
970 			if (handle == 0)
971 				break;
972 
973 			cm = sc->aac_commands + (handle - 1);
974 			fib = cm->cm_fib;
975 			sc->aac_rrq_outstanding[vector_no]--;
976 			if (isAif) {
977 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
978 				if (!noMoreAif)
979 					aac_handle_aif(sc, fib);
980 				aac_remove_busy(cm);
981 				aacraid_release_command(cm);
982 			} else {
983 				if (isFastResponse) {
984 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
985 					*((u_int32_t *)(fib->data)) = ST_OK;
986 					cm->cm_flags |= AAC_CMD_FASTRESP;
987 				}
988 				aac_remove_busy(cm);
989 				aac_unmap_command(cm);
990 				cm->cm_flags |= AAC_CMD_COMPLETED;
991 
992 				/* is there a completion handler? */
993 				if (cm->cm_complete != NULL) {
994 					cm->cm_complete(cm);
995 				} else {
996 					/* assume that someone is sleeping on this command */
997 					wakeup(cm);
998 				}
999 				sc->flags &= ~AAC_QUEUE_FRZN;
1000 			}
1001 
1002 			sc->aac_common->ac_host_rrq[index++] = 0;
1003 			if (index == (vector_no + 1) * sc->aac_vector_cap)
1004 				index = vector_no * sc->aac_vector_cap;
1005 			sc->aac_host_rrq_idx[vector_no] = index;
1006 
1007 			if ((isAif && !noMoreAif) || sc->aif_pending)
1008 				aac_request_aif(sc);
1009 		}
1010 	}
1011 
1012 	if (mode & AAC_INT_MODE_AIF) {
1013 		aac_request_aif(sc);
1014 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1015 		mode = 0;
1016 	}
1017 
1018 	/* see if we can start some more I/O */
1019 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1020 		aacraid_startio(sc);
1021 	mtx_unlock(&sc->aac_io_lock);
1022 }
1023 
1024 /*
1025  * Handle notification of one or more FIBs coming from the controller.
1026  */
1027 static void
1028 aac_command_thread(struct aac_softc *sc)
1029 {
1030 	int retval;
1031 
1032 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1033 
1034 	mtx_lock(&sc->aac_io_lock);
1035 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1036 
1037 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1038 
1039 		retval = 0;
1040 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1041 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1042 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1043 
1044 		/*
1045 		 * First see if any FIBs need to be allocated.  This needs
1046 		 * to be called without the driver lock because contigmalloc
1047 		 * will grab Giant, and would result in an LOR.
1048 		 */
1049 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1050 			aac_alloc_commands(sc);
1051 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1052 			aacraid_startio(sc);
1053 		}
1054 
1055 		/*
1056 		 * While we're here, check to see if any commands are stuck.
1057 		 * This is pretty low-priority, so it's ok if it doesn't
1058 		 * always fire.
1059 		 */
1060 		if (retval == EWOULDBLOCK)
1061 			aac_timeout(sc);
1062 
1063 		/* Check the hardware printf message buffer */
1064 		if (sc->aac_common->ac_printf[0] != 0)
1065 			aac_print_printf(sc);
1066 	}
1067 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1068 	mtx_unlock(&sc->aac_io_lock);
1069 	wakeup(sc->aac_dev);
1070 
1071 	aac_kthread_exit(0);
1072 }
1073 
1074 /*
1075  * Submit a command to the controller, return when it completes.
1076  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1077  *     be stuck here forever.  At the same time, signals are not caught
1078  *     because there is a risk that a signal could wakeup the sleep before
1079  *     the card has a chance to complete the command.  Since there is no way
1080  *     to cancel a command that is in progress, we can't protect against the
1081  *     card completing a command late and spamming the command and data
1082  *     memory.  So, we are held hostage until the command completes.
1083  */
1084 int
1085 aacraid_wait_command(struct aac_command *cm)
1086 {
1087 	struct aac_softc *sc;
1088 	int error;
1089 
1090 	sc = cm->cm_sc;
1091 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1092 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1093 
1094 	/* Put the command on the ready queue and get things going */
1095 	aac_enqueue_ready(cm);
1096 	aacraid_startio(sc);
1097 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1098 	return(error);
1099 }
1100 
1101 /*
1102  *Command Buffer Management
1103  */
1104 
1105 /*
1106  * Allocate a command.
1107  */
1108 int
1109 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1110 {
1111 	struct aac_command *cm;
1112 
1113 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1114 
1115 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1116 		if (sc->total_fibs < sc->aac_max_fibs) {
1117 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1118 			wakeup(sc->aifthread);
1119 		}
1120 		return (EBUSY);
1121 	}
1122 
1123 	*cmp = cm;
1124 	return(0);
1125 }
1126 
1127 /*
1128  * Release a command back to the freelist.
1129  */
1130 void
1131 aacraid_release_command(struct aac_command *cm)
1132 {
1133 	struct aac_event *event;
1134 	struct aac_softc *sc;
1135 
1136 	sc = cm->cm_sc;
1137 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1138 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1139 
1140 	/* (re)initialize the command/FIB */
1141 	cm->cm_sgtable = NULL;
1142 	cm->cm_flags = 0;
1143 	cm->cm_complete = NULL;
1144 	cm->cm_ccb = NULL;
1145 	cm->cm_passthr_dmat = 0;
1146 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1147 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1148 	cm->cm_fib->Header.Unused = 0;
1149 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1150 
1151 	/*
1152 	 * These are duplicated in aac_start to cover the case where an
1153 	 * intermediate stage may have destroyed them.  They're left
1154 	 * initialized here for debugging purposes only.
1155 	 */
1156 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1157 	cm->cm_fib->Header.Handle = 0;
1158 
1159 	aac_enqueue_free(cm);
1160 
1161 	/*
1162 	 * Dequeue all events so that there's no risk of events getting
1163 	 * stranded.
1164 	 */
1165 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1166 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1167 		event->ev_callback(sc, event, event->ev_arg);
1168 	}
1169 }
1170 
1171 /*
1172  * Map helper for command/FIB allocation.
1173  */
1174 static void
1175 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1176 {
1177 	uint64_t	*fibphys;
1178 
1179 	fibphys = (uint64_t *)arg;
1180 
1181 	*fibphys = segs[0].ds_addr;
1182 }
1183 
1184 /*
1185  * Allocate and initialize commands/FIBs for this adapter.
1186  */
1187 static int
1188 aac_alloc_commands(struct aac_softc *sc)
1189 {
1190 	struct aac_command *cm;
1191 	struct aac_fibmap *fm;
1192 	uint64_t fibphys;
1193 	int i, error;
1194 	u_int32_t maxsize;
1195 
1196 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1197 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1198 
1199 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1200 		return (ENOMEM);
1201 
1202 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1203 	if (fm == NULL)
1204 		return (ENOMEM);
1205 
1206 	mtx_unlock(&sc->aac_io_lock);
1207 	/* allocate the FIBs in DMAable memory and load them */
1208 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1209 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1210 		device_printf(sc->aac_dev,
1211 			      "Not enough contiguous memory available.\n");
1212 		free(fm, M_AACRAIDBUF);
1213 		mtx_lock(&sc->aac_io_lock);
1214 		return (ENOMEM);
1215 	}
1216 
1217 	maxsize = sc->aac_max_fib_size + 31;
1218 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1219 		maxsize += sizeof(struct aac_fib_xporthdr);
1220 	/* Ignore errors since this doesn't bounce */
1221 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1222 			      sc->aac_max_fibs_alloc * maxsize,
1223 			      aac_map_command_helper, &fibphys, 0);
1224 	mtx_lock(&sc->aac_io_lock);
1225 
1226 	/* initialize constant fields in the command structure */
1227 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1228 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1229 		cm = sc->aac_commands + sc->total_fibs;
1230 		fm->aac_commands = cm;
1231 		cm->cm_sc = sc;
1232 		cm->cm_fib = (struct aac_fib *)
1233 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1234 		cm->cm_fibphys = fibphys + i * maxsize;
1235 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1236 			u_int64_t fibphys_aligned;
1237 			fibphys_aligned =
1238 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1239 			cm->cm_fib = (struct aac_fib *)
1240 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1241 			cm->cm_fibphys = fibphys_aligned;
1242 		} else {
1243 			u_int64_t fibphys_aligned;
1244 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1245 			cm->cm_fib = (struct aac_fib *)
1246 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1247 			cm->cm_fibphys = fibphys_aligned;
1248 		}
1249 		cm->cm_index = sc->total_fibs;
1250 
1251 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1252 					       &cm->cm_datamap)) != 0)
1253 			break;
1254 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1255 			aacraid_release_command(cm);
1256 		sc->total_fibs++;
1257 	}
1258 
1259 	if (i > 0) {
1260 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1261 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1262 		return (0);
1263 	}
1264 
1265 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1266 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1267 	free(fm, M_AACRAIDBUF);
1268 	return (ENOMEM);
1269 }
1270 
1271 /*
1272  * Free FIBs owned by this adapter.
1273  */
1274 static void
1275 aac_free_commands(struct aac_softc *sc)
1276 {
1277 	struct aac_fibmap *fm;
1278 	struct aac_command *cm;
1279 	int i;
1280 
1281 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1282 
1283 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1284 
1285 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1286 		/*
1287 		 * We check against total_fibs to handle partially
1288 		 * allocated blocks.
1289 		 */
1290 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1291 			cm = fm->aac_commands + i;
1292 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1293 		}
1294 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1295 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1296 		free(fm, M_AACRAIDBUF);
1297 	}
1298 }
1299 
1300 /*
1301  * Command-mapping helper function - populate this command's s/g table.
1302  */
1303 void
1304 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1305 {
1306 	struct aac_softc *sc;
1307 	struct aac_command *cm;
1308 	struct aac_fib *fib;
1309 	int i;
1310 
1311 	cm = (struct aac_command *)arg;
1312 	sc = cm->cm_sc;
1313 	fib = cm->cm_fib;
1314 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1315 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1316 
1317 	/* copy into the FIB */
1318 	if (cm->cm_sgtable != NULL) {
1319 		if (fib->Header.Command == RawIo2) {
1320 			struct aac_raw_io2 *raw;
1321 			struct aac_sge_ieee1212 *sg;
1322 			u_int32_t min_size = PAGE_SIZE, cur_size;
1323 			int conformable = TRUE;
1324 
1325 			raw = (struct aac_raw_io2 *)&fib->data[0];
1326 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1327 			raw->sgeCnt = nseg;
1328 
1329 			for (i = 0; i < nseg; i++) {
1330 				cur_size = segs[i].ds_len;
1331 				sg[i].addrHigh = 0;
1332 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1333 				sg[i].length = cur_size;
1334 				sg[i].flags = 0;
1335 				if (i == 0) {
1336 					raw->sgeFirstSize = cur_size;
1337 				} else if (i == 1) {
1338 					raw->sgeNominalSize = cur_size;
1339 					min_size = cur_size;
1340 				} else if ((i+1) < nseg &&
1341 					cur_size != raw->sgeNominalSize) {
1342 					conformable = FALSE;
1343 					if (cur_size < min_size)
1344 						min_size = cur_size;
1345 				}
1346 			}
1347 
1348 			/* not conformable: evaluate required sg elements */
1349 			if (!conformable) {
1350 				int j, err_found, nseg_new = nseg;
1351 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1352 					err_found = FALSE;
1353 					nseg_new = 2;
1354 					for (j = 1; j < nseg - 1; ++j) {
1355 						if (sg[j].length % (i*PAGE_SIZE)) {
1356 							err_found = TRUE;
1357 							break;
1358 						}
1359 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1360 					}
1361 					if (!err_found)
1362 						break;
1363 				}
1364 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1365 					!(sc->hint_flags & 4))
1366 					nseg = aac_convert_sgraw2(sc,
1367 						raw, i, nseg, nseg_new);
1368 			} else {
1369 				raw->flags |= RIO2_SGL_CONFORMANT;
1370 			}
1371 
1372 			/* update the FIB size for the s/g count */
1373 			fib->Header.Size += nseg *
1374 				sizeof(struct aac_sge_ieee1212);
1375 
1376 		} else if (fib->Header.Command == RawIo) {
1377 			struct aac_sg_tableraw *sg;
1378 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1379 			sg->SgCount = nseg;
1380 			for (i = 0; i < nseg; i++) {
1381 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1382 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1383 				sg->SgEntryRaw[i].Next = 0;
1384 				sg->SgEntryRaw[i].Prev = 0;
1385 				sg->SgEntryRaw[i].Flags = 0;
1386 			}
1387 			/* update the FIB size for the s/g count */
1388 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1389 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1390 			struct aac_sg_table *sg;
1391 			sg = cm->cm_sgtable;
1392 			sg->SgCount = nseg;
1393 			for (i = 0; i < nseg; i++) {
1394 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1395 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1396 			}
1397 			/* update the FIB size for the s/g count */
1398 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1399 		} else {
1400 			struct aac_sg_table64 *sg;
1401 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1402 			sg->SgCount = nseg;
1403 			for (i = 0; i < nseg; i++) {
1404 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1405 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1406 			}
1407 			/* update the FIB size for the s/g count */
1408 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1409 		}
1410 	}
1411 
1412 	/* Fix up the address values in the FIB.  Use the command array index
1413 	 * instead of a pointer since these fields are only 32 bits.  Shift
1414 	 * the SenderFibAddress over to make room for the fast response bit
1415 	 * and for the AIF bit
1416 	 */
1417 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1418 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1419 
1420 	/* save a pointer to the command for speedy reverse-lookup */
1421 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1422 
1423 	if (cm->cm_passthr_dmat == 0) {
1424 		if (cm->cm_flags & AAC_CMD_DATAIN)
1425 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1426 							BUS_DMASYNC_PREREAD);
1427 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1428 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1429 							BUS_DMASYNC_PREWRITE);
1430 	}
1431 
1432 	cm->cm_flags |= AAC_CMD_MAPPED;
1433 
1434 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1435 		u_int32_t wait = 0;
1436 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1437 	} else if (cm->cm_flags & AAC_CMD_WAIT) {
1438 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1439 	} else {
1440 		int count = 10000000L;
1441 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1442 			if (--count == 0) {
1443 				aac_unmap_command(cm);
1444 				sc->flags |= AAC_QUEUE_FRZN;
1445 				aac_requeue_ready(cm);
1446 			}
1447 			DELAY(5);			/* wait 5 usec. */
1448 		}
1449 	}
1450 }
1451 
1452 
1453 static int
1454 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1455 				   int pages, int nseg, int nseg_new)
1456 {
1457 	struct aac_sge_ieee1212 *sge;
1458 	int i, j, pos;
1459 	u_int32_t addr_low;
1460 
1461 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1462 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1463 	if (sge == NULL)
1464 		return nseg;
1465 
1466 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1467 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1468 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1469 			sge[pos].addrLow = addr_low;
1470 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1471 			if (addr_low < raw->sge[i].addrLow)
1472 				sge[pos].addrHigh++;
1473 			sge[pos].length = pages * PAGE_SIZE;
1474 			sge[pos].flags = 0;
1475 			pos++;
1476 		}
1477 	}
1478 	sge[pos] = raw->sge[nseg-1];
1479 	for (i = 1; i < nseg_new; ++i)
1480 		raw->sge[i] = sge[i];
1481 
1482 	free(sge, M_AACRAIDBUF);
1483 	raw->sgeCnt = nseg_new;
1484 	raw->flags |= RIO2_SGL_CONFORMANT;
1485 	raw->sgeNominalSize = pages * PAGE_SIZE;
1486 	return nseg_new;
1487 }
1488 
1489 
1490 /*
1491  * Unmap a command from controller-visible space.
1492  */
1493 static void
1494 aac_unmap_command(struct aac_command *cm)
1495 {
1496 	struct aac_softc *sc;
1497 
1498 	sc = cm->cm_sc;
1499 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1500 
1501 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1502 		return;
1503 
1504 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1505 		if (cm->cm_flags & AAC_CMD_DATAIN)
1506 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1507 					BUS_DMASYNC_POSTREAD);
1508 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1509 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1510 					BUS_DMASYNC_POSTWRITE);
1511 
1512 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1513 	}
1514 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1515 }
1516 
1517 /*
1518  * Hardware Interface
1519  */
1520 
1521 /*
1522  * Initialize the adapter.
1523  */
1524 static void
1525 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1526 {
1527 	struct aac_softc *sc;
1528 
1529 	sc = (struct aac_softc *)arg;
1530 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1531 
1532 	sc->aac_common_busaddr = segs[0].ds_addr;
1533 }
1534 
1535 static int
1536 aac_check_firmware(struct aac_softc *sc)
1537 {
1538 	u_int32_t code, major, minor, maxsize;
1539 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1540 	time_t then;
1541 
1542 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1543 
1544 	/* check if flash update is running */
1545 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1546 		then = time_uptime;
1547 		do {
1548 			code = AAC_GET_FWSTATUS(sc);
1549 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1550 				device_printf(sc->aac_dev,
1551 						  "FATAL: controller not coming ready, "
1552 						   "status %x\n", code);
1553 				return(ENXIO);
1554 			}
1555 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1556 		/*
1557 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1558 		 * do not read scratch pad register at this time
1559 		 */
1560 		waitCount = 10 * 10000;
1561 		while (waitCount) {
1562 			DELAY(100);		/* delay 100 microseconds */
1563 			waitCount--;
1564 		}
1565 	}
1566 
1567 	/*
1568 	 * Wait for the adapter to come ready.
1569 	 */
1570 	then = time_uptime;
1571 	do {
1572 		code = AAC_GET_FWSTATUS(sc);
1573 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1574 			device_printf(sc->aac_dev,
1575 				      "FATAL: controller not coming ready, "
1576 					   "status %x\n", code);
1577 			return(ENXIO);
1578 		}
1579 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1580 
1581 	/*
1582 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1583 	 * firmware version 1.x are not compatible with this driver.
1584 	 */
1585 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1586 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1587 				     NULL, NULL)) {
1588 			device_printf(sc->aac_dev,
1589 				      "Error reading firmware version\n");
1590 			return (EIO);
1591 		}
1592 
1593 		/* These numbers are stored as ASCII! */
1594 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1595 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1596 		if (major == 1) {
1597 			device_printf(sc->aac_dev,
1598 			    "Firmware version %d.%d is not supported.\n",
1599 			    major, minor);
1600 			return (EINVAL);
1601 		}
1602 	}
1603 	/*
1604 	 * Retrieve the capabilities/supported options word so we know what
1605 	 * work-arounds to enable.  Some firmware revs don't support this
1606 	 * command.
1607 	 */
1608 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1609 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1610 			device_printf(sc->aac_dev,
1611 			     "RequestAdapterInfo failed\n");
1612 			return (EIO);
1613 		}
1614 	} else {
1615 		options = AAC_GET_MAILBOX(sc, 1);
1616 		atu_size = AAC_GET_MAILBOX(sc, 2);
1617 		sc->supported_options = options;
1618 
1619 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1620 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1621 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1622 		if (options & AAC_SUPPORTED_NONDASD)
1623 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1624 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1625 			&& (sizeof(bus_addr_t) > 4)
1626 			&& (sc->hint_flags & 0x1)) {
1627 			device_printf(sc->aac_dev,
1628 			    "Enabling 64-bit address support\n");
1629 			sc->flags |= AAC_FLAGS_SG_64BIT;
1630 		}
1631 		if (sc->aac_if.aif_send_command) {
1632 			if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1633 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1634 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1635 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1636 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1637 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1638 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1639 		}
1640 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1641 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1642 	}
1643 
1644 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1645 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1646 		return (ENXIO);
1647 	}
1648 
1649 	if (sc->hint_flags & 2) {
1650 		device_printf(sc->aac_dev,
1651 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1652 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1653 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1654 		device_printf(sc->aac_dev,
1655 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1656 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1657 	}
1658 
1659 	/* Check for broken hardware that does a lower number of commands */
1660 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1661 
1662 	/* Remap mem. resource, if required */
1663 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1664 		bus_release_resource(
1665 			sc->aac_dev, SYS_RES_MEMORY,
1666 			sc->aac_regs_rid0, sc->aac_regs_res0);
1667 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1668 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1669 			atu_size, RF_ACTIVE);
1670 		if (sc->aac_regs_res0 == NULL) {
1671 			sc->aac_regs_res0 = bus_alloc_resource_any(
1672 				sc->aac_dev, SYS_RES_MEMORY,
1673 				&sc->aac_regs_rid0, RF_ACTIVE);
1674 			if (sc->aac_regs_res0 == NULL) {
1675 				device_printf(sc->aac_dev,
1676 					"couldn't allocate register window\n");
1677 				return (ENXIO);
1678 			}
1679 		}
1680 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1681 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1682 	}
1683 
1684 	/* Read preferred settings */
1685 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1686 	sc->aac_max_sectors = 128;				/* 64KB */
1687 	sc->aac_max_aif = 1;
1688 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1689 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1690 		 - sizeof(struct aac_blockwrite64))
1691 		 / sizeof(struct aac_sg_entry64);
1692 	else
1693 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1694 		 - sizeof(struct aac_blockwrite))
1695 		 / sizeof(struct aac_sg_entry);
1696 
1697 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1698 		options = AAC_GET_MAILBOX(sc, 1);
1699 		sc->aac_max_fib_size = (options & 0xFFFF);
1700 		sc->aac_max_sectors = (options >> 16) << 1;
1701 		options = AAC_GET_MAILBOX(sc, 2);
1702 		sc->aac_sg_tablesize = (options >> 16);
1703 		options = AAC_GET_MAILBOX(sc, 3);
1704 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1705 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1706 			sc->aac_max_fibs = (options & 0xFFFF);
1707 		options = AAC_GET_MAILBOX(sc, 4);
1708 		sc->aac_max_aif = (options & 0xFFFF);
1709 		options = AAC_GET_MAILBOX(sc, 5);
1710 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1711 	}
1712 
1713 	maxsize = sc->aac_max_fib_size + 31;
1714 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1715 		maxsize += sizeof(struct aac_fib_xporthdr);
1716 	if (maxsize > PAGE_SIZE) {
1717     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1718 		maxsize = PAGE_SIZE;
1719 	}
1720 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1721 
1722 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1723 		sc->flags |= AAC_FLAGS_RAW_IO;
1724 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1725 	}
1726 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1727 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1728 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1729 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1730 	}
1731 
1732 #ifdef AACRAID_DEBUG
1733 	aacraid_get_fw_debug_buffer(sc);
1734 #endif
1735 	return (0);
1736 }
1737 
1738 static int
1739 aac_init(struct aac_softc *sc)
1740 {
1741 	struct aac_adapter_init	*ip;
1742 	int i, error;
1743 
1744 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1745 
1746 	/* reset rrq index */
1747 	sc->aac_fibs_pushed_no = 0;
1748 	for (i = 0; i < sc->aac_max_msix; i++)
1749 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1750 
1751 	/*
1752 	 * Fill in the init structure.  This tells the adapter about the
1753 	 * physical location of various important shared data structures.
1754 	 */
1755 	ip = &sc->aac_common->ac_init;
1756 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1757 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1758 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1759 		sc->flags |= AAC_FLAGS_RAW_IO;
1760 	}
1761 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1762 
1763 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1764 					 offsetof(struct aac_common, ac_fibs);
1765 	ip->AdapterFibsVirtualAddress = 0;
1766 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1767 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1768 
1769 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1770 				  offsetof(struct aac_common, ac_printf);
1771 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1772 
1773 	/*
1774 	 * The adapter assumes that pages are 4K in size, except on some
1775  	 * broken firmware versions that do the page->byte conversion twice,
1776 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1777 	 * Round up since the granularity is so high.
1778 	 */
1779 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1780 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1781 		ip->HostPhysMemPages =
1782 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1783 	}
1784 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1785 
1786 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1787 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1788 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1789 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1790 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1791 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1792 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1793 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1794 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1795 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1796 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1797 	}
1798 	ip->MaxNumAif = sc->aac_max_aif;
1799 	ip->HostRRQ_AddrLow =
1800 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1801 	/* always 32-bit address */
1802 	ip->HostRRQ_AddrHigh = 0;
1803 
1804 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1805 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1806 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1807 		device_printf(sc->aac_dev, "Power Management enabled\n");
1808 	}
1809 
1810 	ip->MaxIoCommands = sc->aac_max_fibs;
1811 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1812 	ip->MaxFibSize = sc->aac_max_fib_size;
1813 
1814 	/*
1815 	 * Do controller-type-specific initialisation
1816 	 */
1817 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1818 
1819 	/*
1820 	 * Give the init structure to the controller.
1821 	 */
1822 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1823 			     sc->aac_common_busaddr +
1824 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1825 			     NULL, NULL)) {
1826 		device_printf(sc->aac_dev,
1827 			      "error establishing init structure\n");
1828 		error = EIO;
1829 		goto out;
1830 	}
1831 
1832 	/*
1833 	 * Check configuration issues
1834 	 */
1835 	if ((error = aac_check_config(sc)) != 0)
1836 		goto out;
1837 
1838 	error = 0;
1839 out:
1840 	return(error);
1841 }
1842 
1843 static void
1844 aac_define_int_mode(struct aac_softc *sc)
1845 {
1846 	device_t dev;
1847 	int cap, msi_count, error = 0;
1848 	uint32_t val;
1849 
1850 	dev = sc->aac_dev;
1851 
1852 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1853 	if (sc->aac_max_msix == 0) {
1854 		sc->aac_max_msix = 1;
1855 		sc->aac_vector_cap = sc->aac_max_fibs;
1856 		return;
1857 	}
1858 
1859 	/* OS capability */
1860 	msi_count = pci_msix_count(dev);
1861 	if (msi_count > AAC_MAX_MSIX)
1862 		msi_count = AAC_MAX_MSIX;
1863 	if (msi_count > sc->aac_max_msix)
1864 		msi_count = sc->aac_max_msix;
1865 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1866 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1867 				   "will try MSI\n", msi_count, error);
1868 		pci_release_msi(dev);
1869 	} else {
1870 		sc->msi_enabled = TRUE;
1871 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1872 			msi_count);
1873 	}
1874 
1875 	if (!sc->msi_enabled) {
1876 		msi_count = 1;
1877 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1878 			device_printf(dev, "alloc msi failed - err=%d; "
1879 				           "will use INTx\n", error);
1880 			pci_release_msi(dev);
1881 		} else {
1882 			sc->msi_enabled = TRUE;
1883 			device_printf(dev, "using MSI interrupts\n");
1884 		}
1885 	}
1886 
1887 	if (sc->msi_enabled) {
1888 		/* now read controller capability from PCI config. space */
1889 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1890 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1891 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1892 			pci_release_msi(dev);
1893 			sc->msi_enabled = FALSE;
1894 		}
1895 	}
1896 
1897 	if (!sc->msi_enabled) {
1898 		device_printf(dev, "using legacy interrupts\n");
1899 		sc->aac_max_msix = 1;
1900 	} else {
1901 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1902 		if (sc->aac_max_msix > msi_count)
1903 			sc->aac_max_msix = msi_count;
1904 	}
1905 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1906 
1907 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1908 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1909 }
1910 
1911 static int
1912 aac_find_pci_capability(struct aac_softc *sc, int cap)
1913 {
1914 	device_t dev;
1915 	uint32_t status;
1916 	uint8_t ptr;
1917 
1918 	dev = sc->aac_dev;
1919 
1920 	status = pci_read_config(dev, PCIR_STATUS, 2);
1921 	if (!(status & PCIM_STATUS_CAPPRESENT))
1922 		return (0);
1923 
1924 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1925 	switch (status & PCIM_HDRTYPE) {
1926 	case 0:
1927 	case 1:
1928 		ptr = PCIR_CAP_PTR;
1929 		break;
1930 	case 2:
1931 		ptr = PCIR_CAP_PTR_2;
1932 		break;
1933 	default:
1934 		return (0);
1935 		break;
1936 	}
1937 	ptr = pci_read_config(dev, ptr, 1);
1938 
1939 	while (ptr != 0) {
1940 		int next, val;
1941 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1942 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1943 		if (val == cap)
1944 			return (ptr);
1945 		ptr = next;
1946 	}
1947 
1948 	return (0);
1949 }
1950 
1951 static int
1952 aac_setup_intr(struct aac_softc *sc)
1953 {
1954 	int i, msi_count, rid;
1955 	struct resource *res;
1956 	void *tag;
1957 
1958 	msi_count = sc->aac_max_msix;
1959 	rid = (sc->msi_enabled ? 1:0);
1960 
1961 	for (i = 0; i < msi_count; i++, rid++) {
1962 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1963 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1964 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1965 			return (EINVAL);
1966 		}
1967 		sc->aac_irq_rid[i] = rid;
1968 		sc->aac_irq[i] = res;
1969 		if (aac_bus_setup_intr(sc->aac_dev, res,
1970 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1971 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1972 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1973 			return (EINVAL);
1974 		}
1975 		sc->aac_msix[i].vector_no = i;
1976 		sc->aac_msix[i].sc = sc;
1977 		sc->aac_intr[i] = tag;
1978 	}
1979 
1980 	return (0);
1981 }
1982 
1983 static int
1984 aac_check_config(struct aac_softc *sc)
1985 {
1986 	struct aac_fib *fib;
1987 	struct aac_cnt_config *ccfg;
1988 	struct aac_cf_status_hdr *cf_shdr;
1989 	int rval;
1990 
1991 	mtx_lock(&sc->aac_io_lock);
1992 	aac_alloc_sync_fib(sc, &fib);
1993 
1994 	ccfg = (struct aac_cnt_config *)&fib->data[0];
1995 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1996 	ccfg->Command = VM_ContainerConfig;
1997 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
1998 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
1999 
2000 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2001 		sizeof (struct aac_cnt_config));
2002 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2003 	if (rval == 0 && ccfg->Command == ST_OK &&
2004 		ccfg->CTCommand.param[0] == CT_OK) {
2005 		if (cf_shdr->action <= CFACT_PAUSE) {
2006 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2007 			ccfg->Command = VM_ContainerConfig;
2008 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2009 
2010 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2011 				sizeof (struct aac_cnt_config));
2012 			if (rval == 0 && ccfg->Command == ST_OK &&
2013 				ccfg->CTCommand.param[0] == CT_OK) {
2014 				/* successful completion */
2015 				rval = 0;
2016 			} else {
2017 				/* auto commit aborted due to error(s) */
2018 				rval = -2;
2019 			}
2020 		} else {
2021 			/* auto commit aborted due to adapter indicating
2022 			   config. issues too dangerous to auto commit  */
2023 			rval = -3;
2024 		}
2025 	} else {
2026 		/* error */
2027 		rval = -1;
2028 	}
2029 
2030 	aac_release_sync_fib(sc);
2031 	mtx_unlock(&sc->aac_io_lock);
2032 	return(rval);
2033 }
2034 
2035 /*
2036  * Send a synchronous command to the controller and wait for a result.
2037  * Indicate if the controller completed the command with an error status.
2038  */
2039 int
2040 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2041 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2042 		 u_int32_t *sp, u_int32_t *r1)
2043 {
2044 	time_t then;
2045 	u_int32_t status;
2046 
2047 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2048 
2049 	/* populate the mailbox */
2050 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2051 
2052 	/* ensure the sync command doorbell flag is cleared */
2053 	if (!sc->msi_enabled)
2054 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2055 
2056 	/* then set it to signal the adapter */
2057 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2058 
2059 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2060 		/* spin waiting for the command to complete */
2061 		then = time_uptime;
2062 		do {
2063 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2064 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2065 				return(EIO);
2066 			}
2067 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2068 
2069 		/* clear the completion flag */
2070 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2071 
2072 		/* get the command status */
2073 		status = AAC_GET_MAILBOX(sc, 0);
2074 		if (sp != NULL)
2075 			*sp = status;
2076 
2077 		/* return parameter */
2078 		if (r1 != NULL)
2079 			*r1 = AAC_GET_MAILBOX(sc, 1);
2080 
2081 		if (status != AAC_SRB_STS_SUCCESS)
2082 			return (-1);
2083 	}
2084 	return(0);
2085 }
2086 
2087 static int
2088 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2089 		 struct aac_fib *fib, u_int16_t datasize)
2090 {
2091 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2092 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2093 
2094 	if (datasize > AAC_FIB_DATASIZE)
2095 		return(EINVAL);
2096 
2097 	/*
2098 	 * Set up the sync FIB
2099 	 */
2100 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2101 				AAC_FIBSTATE_INITIALISED |
2102 				AAC_FIBSTATE_EMPTY;
2103 	fib->Header.XferState |= xferstate;
2104 	fib->Header.Command = command;
2105 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2106 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2107 	fib->Header.SenderSize = sizeof(struct aac_fib);
2108 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2109 	fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2110 		offsetof(struct aac_common, ac_sync_fib);
2111 
2112 	/*
2113 	 * Give the FIB to the controller, wait for a response.
2114 	 */
2115 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2116 		fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2117 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2118 		return(EIO);
2119 	}
2120 
2121 	return (0);
2122 }
2123 
2124 /*
2125  * Check for commands that have been outstanding for a suspiciously long time,
2126  * and complain about them.
2127  */
2128 static void
2129 aac_timeout(struct aac_softc *sc)
2130 {
2131 	struct aac_command *cm;
2132 	time_t deadline;
2133 	int timedout;
2134 
2135 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2136 	/*
2137 	 * Traverse the busy command list, bitch about late commands once
2138 	 * only.
2139 	 */
2140 	timedout = 0;
2141 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2142 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2143 		if (cm->cm_timestamp < deadline) {
2144 			device_printf(sc->aac_dev,
2145 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2146 				      cm, (int)(time_uptime-cm->cm_timestamp));
2147 			AAC_PRINT_FIB(sc, cm->cm_fib);
2148 			timedout++;
2149 		}
2150 	}
2151 
2152 	if (timedout)
2153 		aac_reset_adapter(sc);
2154 	aacraid_print_queues(sc);
2155 }
2156 
2157 /*
2158  * Interface Function Vectors
2159  */
2160 
2161 /*
2162  * Read the current firmware status word.
2163  */
2164 static int
2165 aac_src_get_fwstatus(struct aac_softc *sc)
2166 {
2167 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2168 
2169 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2170 }
2171 
2172 /*
2173  * Notify the controller of a change in a given queue
2174  */
2175 static void
2176 aac_src_qnotify(struct aac_softc *sc, int qbit)
2177 {
2178 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2179 
2180 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2181 }
2182 
2183 /*
2184  * Get the interrupt reason bits
2185  */
2186 static int
2187 aac_src_get_istatus(struct aac_softc *sc)
2188 {
2189 	int val;
2190 
2191 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2192 
2193 	if (sc->msi_enabled) {
2194 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2195 		if (val & AAC_MSI_SYNC_STATUS)
2196 			val = AAC_DB_SYNC_COMMAND;
2197 		else
2198 			val = 0;
2199 	} else {
2200 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2201 	}
2202 	return(val);
2203 }
2204 
2205 /*
2206  * Clear some interrupt reason bits
2207  */
2208 static void
2209 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2210 {
2211 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2212 
2213 	if (sc->msi_enabled) {
2214 		if (mask == AAC_DB_SYNC_COMMAND)
2215 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2216 	} else {
2217 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2218 	}
2219 }
2220 
2221 /*
2222  * Populate the mailbox and set the command word
2223  */
2224 static void
2225 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2226 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2227 {
2228 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2229 
2230 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2231 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2232 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2233 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2234 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2235 }
2236 
2237 static void
2238 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2239 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2240 {
2241 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2242 
2243 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2244 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2245 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2246 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2247 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2248 }
2249 
2250 /*
2251  * Fetch the immediate command status word
2252  */
2253 static int
2254 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2255 {
2256 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2257 
2258 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2259 }
2260 
2261 static int
2262 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2263 {
2264 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2265 
2266 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2267 }
2268 
2269 /*
2270  * Set/clear interrupt masks
2271  */
2272 static void
2273 aac_src_access_devreg(struct aac_softc *sc, int mode)
2274 {
2275 	u_int32_t val;
2276 
2277 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2278 
2279 	switch (mode) {
2280 	case AAC_ENABLE_INTERRUPT:
2281 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2282 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2283 				           AAC_INT_ENABLE_TYPE1_INTX));
2284 		break;
2285 
2286 	case AAC_DISABLE_INTERRUPT:
2287 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2288 		break;
2289 
2290 	case AAC_ENABLE_MSIX:
2291 		/* set bit 6 */
2292 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2293 		val |= 0x40;
2294 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2295 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2296 		/* unmask int. */
2297 		val = PMC_ALL_INTERRUPT_BITS;
2298 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2299 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2300 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2301 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2302 		break;
2303 
2304 	case AAC_DISABLE_MSIX:
2305 		/* reset bit 6 */
2306 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2307 		val &= ~0x40;
2308 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2309 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2310 		break;
2311 
2312 	case AAC_CLEAR_AIF_BIT:
2313 		/* set bit 5 */
2314 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2315 		val |= 0x20;
2316 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2317 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2318 		break;
2319 
2320 	case AAC_CLEAR_SYNC_BIT:
2321 		/* set bit 4 */
2322 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2323 		val |= 0x10;
2324 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2325 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2326 		break;
2327 
2328 	case AAC_ENABLE_INTX:
2329 		/* set bit 7 */
2330 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2331 		val |= 0x80;
2332 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2333 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2334 		/* unmask int. */
2335 		val = PMC_ALL_INTERRUPT_BITS;
2336 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2337 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2338 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2339 			val & (~(PMC_GLOBAL_INT_BIT2)));
2340 		break;
2341 
2342 	default:
2343 		break;
2344 	}
2345 }
2346 
2347 /*
2348  * New comm. interface: Send command functions
2349  */
2350 static int
2351 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2352 {
2353 	struct aac_fib_xporthdr *pFibX;
2354 	u_int32_t fibsize, high_addr;
2355 	u_int64_t address;
2356 
2357 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2358 
2359 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2360 		sc->aac_max_msix > 1) {
2361 		u_int16_t vector_no, first_choice = 0xffff;
2362 
2363 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2364 		do {
2365 			vector_no += 1;
2366 			if (vector_no == sc->aac_max_msix)
2367 				vector_no = 1;
2368 			if (sc->aac_rrq_outstanding[vector_no] <
2369 				sc->aac_vector_cap)
2370 				break;
2371 			if (0xffff == first_choice)
2372 				first_choice = vector_no;
2373 			else if (vector_no == first_choice)
2374 				break;
2375 		} while (1);
2376 		if (vector_no == first_choice)
2377 			vector_no = 0;
2378 		sc->aac_rrq_outstanding[vector_no]++;
2379 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2380 			sc->aac_fibs_pushed_no = 0;
2381 		else
2382 			sc->aac_fibs_pushed_no++;
2383 
2384 		cm->cm_fib->Header.Handle += (vector_no << 16);
2385 	}
2386 
2387 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2388 		/* Calculate the amount to the fibsize bits */
2389 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2390 		/* Fill new FIB header */
2391 		address = cm->cm_fibphys;
2392 		high_addr = (u_int32_t)(address >> 32);
2393 		if (high_addr == 0L) {
2394 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2395 			cm->cm_fib->Header.u.TimeStamp = 0L;
2396 		} else {
2397 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2398 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2399 		}
2400 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2401 	} else {
2402 		/* Calculate the amount to the fibsize bits */
2403 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2404 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2405 		/* Fill XPORT header */
2406 		pFibX = (struct aac_fib_xporthdr *)
2407 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2408 		pFibX->Handle = cm->cm_fib->Header.Handle;
2409 		pFibX->HostAddress = cm->cm_fibphys;
2410 		pFibX->Size = cm->cm_fib->Header.Size;
2411 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2412 		high_addr = (u_int32_t)(address >> 32);
2413 	}
2414 
2415 	if (fibsize > 31)
2416 		fibsize = 31;
2417 	aac_enqueue_busy(cm);
2418 	if (high_addr) {
2419 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2420 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2421 	} else {
2422 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2423 	}
2424 	return 0;
2425 }
2426 
2427 /*
2428  * New comm. interface: get, set outbound queue index
2429  */
2430 static int
2431 aac_src_get_outb_queue(struct aac_softc *sc)
2432 {
2433 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2434 
2435 	return(-1);
2436 }
2437 
2438 static void
2439 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2440 {
2441 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2442 }
2443 
2444 /*
2445  * Debugging and Diagnostics
2446  */
2447 
2448 /*
2449  * Print some information about the controller.
2450  */
2451 static void
2452 aac_describe_controller(struct aac_softc *sc)
2453 {
2454 	struct aac_fib *fib;
2455 	struct aac_adapter_info	*info;
2456 	char *adapter_type = "Adaptec RAID controller";
2457 
2458 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2459 
2460 	mtx_lock(&sc->aac_io_lock);
2461 	aac_alloc_sync_fib(sc, &fib);
2462 
2463 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2464 		fib->data[0] = 0;
2465 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2466 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2467 		else {
2468 			struct aac_supplement_adapter_info *supp_info;
2469 
2470 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2471 			adapter_type = (char *)supp_info->AdapterTypeText;
2472 			sc->aac_feature_bits = supp_info->FeatureBits;
2473 			sc->aac_support_opt2 = supp_info->SupportedOptions2;
2474 		}
2475 	}
2476 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2477 		adapter_type,
2478 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2479 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2480 
2481 	fib->data[0] = 0;
2482 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2483 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2484 		aac_release_sync_fib(sc);
2485 		mtx_unlock(&sc->aac_io_lock);
2486 		return;
2487 	}
2488 
2489 	/* save the kernel revision structure for later use */
2490 	info = (struct aac_adapter_info *)&fib->data[0];
2491 	sc->aac_revision = info->KernelRevision;
2492 
2493 	if (bootverbose) {
2494 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2495 		    "(%dMB cache, %dMB execution), %s\n",
2496 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2497 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2498 		    info->BufferMem / (1024 * 1024),
2499 		    info->ExecutionMem / (1024 * 1024),
2500 		    aac_describe_code(aac_battery_platform,
2501 		    info->batteryPlatform));
2502 
2503 		device_printf(sc->aac_dev,
2504 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2505 		    info->KernelRevision.external.comp.major,
2506 		    info->KernelRevision.external.comp.minor,
2507 		    info->KernelRevision.external.comp.dash,
2508 		    info->KernelRevision.buildNumber,
2509 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2510 
2511 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2512 			      sc->supported_options,
2513 			      "\20"
2514 			      "\1SNAPSHOT"
2515 			      "\2CLUSTERS"
2516 			      "\3WCACHE"
2517 			      "\4DATA64"
2518 			      "\5HOSTTIME"
2519 			      "\6RAID50"
2520 			      "\7WINDOW4GB"
2521 			      "\10SCSIUPGD"
2522 			      "\11SOFTERR"
2523 			      "\12NORECOND"
2524 			      "\13SGMAP64"
2525 			      "\14ALARM"
2526 			      "\15NONDASD"
2527 			      "\16SCSIMGT"
2528 			      "\17RAIDSCSI"
2529 			      "\21ADPTINFO"
2530 			      "\22NEWCOMM"
2531 			      "\23ARRAY64BIT"
2532 			      "\24HEATSENSOR");
2533 	}
2534 
2535 	aac_release_sync_fib(sc);
2536 	mtx_unlock(&sc->aac_io_lock);
2537 }
2538 
2539 /*
2540  * Look up a text description of a numeric error code and return a pointer to
2541  * same.
2542  */
2543 static char *
2544 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2545 {
2546 	int i;
2547 
2548 	for (i = 0; table[i].string != NULL; i++)
2549 		if (table[i].code == code)
2550 			return(table[i].string);
2551 	return(table[i + 1].string);
2552 }
2553 
2554 /*
2555  * Management Interface
2556  */
2557 
2558 static int
2559 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2560 {
2561 	struct aac_softc *sc;
2562 
2563 	sc = dev->si_drv1;
2564 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2565 #if __FreeBSD_version >= 702000
2566 	device_busy(sc->aac_dev);
2567 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2568 #endif
2569 	return 0;
2570 }
2571 
2572 static int
2573 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2574 {
2575 	union aac_statrequest *as;
2576 	struct aac_softc *sc;
2577 	int error = 0;
2578 
2579 	as = (union aac_statrequest *)arg;
2580 	sc = dev->si_drv1;
2581 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2582 
2583 	switch (cmd) {
2584 	case AACIO_STATS:
2585 		switch (as->as_item) {
2586 		case AACQ_FREE:
2587 		case AACQ_READY:
2588 		case AACQ_BUSY:
2589 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2590 			      sizeof(struct aac_qstat));
2591 			break;
2592 		default:
2593 			error = ENOENT;
2594 			break;
2595 		}
2596 	break;
2597 
2598 	case FSACTL_SENDFIB:
2599 	case FSACTL_SEND_LARGE_FIB:
2600 		arg = *(caddr_t*)arg;
2601 	case FSACTL_LNX_SENDFIB:
2602 	case FSACTL_LNX_SEND_LARGE_FIB:
2603 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2604 		error = aac_ioctl_sendfib(sc, arg);
2605 		break;
2606 	case FSACTL_SEND_RAW_SRB:
2607 		arg = *(caddr_t*)arg;
2608 	case FSACTL_LNX_SEND_RAW_SRB:
2609 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2610 		error = aac_ioctl_send_raw_srb(sc, arg);
2611 		break;
2612 	case FSACTL_AIF_THREAD:
2613 	case FSACTL_LNX_AIF_THREAD:
2614 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2615 		error = EINVAL;
2616 		break;
2617 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2618 		arg = *(caddr_t*)arg;
2619 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2620 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2621 		error = aac_open_aif(sc, arg);
2622 		break;
2623 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2624 		arg = *(caddr_t*)arg;
2625 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2626 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2627 		error = aac_getnext_aif(sc, arg);
2628 		break;
2629 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2630 		arg = *(caddr_t*)arg;
2631 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2632 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2633 		error = aac_close_aif(sc, arg);
2634 		break;
2635 	case FSACTL_MINIPORT_REV_CHECK:
2636 		arg = *(caddr_t*)arg;
2637 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2638 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2639 		error = aac_rev_check(sc, arg);
2640 		break;
2641 	case FSACTL_QUERY_DISK:
2642 		arg = *(caddr_t*)arg;
2643 	case FSACTL_LNX_QUERY_DISK:
2644 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2645 		error = aac_query_disk(sc, arg);
2646 		break;
2647 	case FSACTL_DELETE_DISK:
2648 	case FSACTL_LNX_DELETE_DISK:
2649 		/*
2650 		 * We don't trust the underland to tell us when to delete a
2651 		 * container, rather we rely on an AIF coming from the
2652 		 * controller
2653 		 */
2654 		error = 0;
2655 		break;
2656 	case FSACTL_GET_PCI_INFO:
2657 		arg = *(caddr_t*)arg;
2658 	case FSACTL_LNX_GET_PCI_INFO:
2659 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2660 		error = aac_get_pci_info(sc, arg);
2661 		break;
2662 	case FSACTL_GET_FEATURES:
2663 		arg = *(caddr_t*)arg;
2664 	case FSACTL_LNX_GET_FEATURES:
2665 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2666 		error = aac_supported_features(sc, arg);
2667 		break;
2668 	default:
2669 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2670 		error = EINVAL;
2671 		break;
2672 	}
2673 	return(error);
2674 }
2675 
2676 static int
2677 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2678 {
2679 	struct aac_softc *sc;
2680 	struct aac_fib_context *ctx;
2681 	int revents;
2682 
2683 	sc = dev->si_drv1;
2684 	revents = 0;
2685 
2686 	mtx_lock(&sc->aac_io_lock);
2687 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2688 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2689 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2690 				revents |= poll_events & (POLLIN | POLLRDNORM);
2691 				break;
2692 			}
2693 		}
2694 	}
2695 	mtx_unlock(&sc->aac_io_lock);
2696 
2697 	if (revents == 0) {
2698 		if (poll_events & (POLLIN | POLLRDNORM))
2699 			selrecord(td, &sc->rcv_select);
2700 	}
2701 
2702 	return (revents);
2703 }
2704 
2705 static void
2706 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2707 {
2708 
2709 	switch (event->ev_type) {
2710 	case AAC_EVENT_CMFREE:
2711 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2712 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2713 			aacraid_add_event(sc, event);
2714 			return;
2715 		}
2716 		free(event, M_AACRAIDBUF);
2717 		wakeup(arg);
2718 		break;
2719 	default:
2720 		break;
2721 	}
2722 }
2723 
2724 /*
2725  * Send a FIB supplied from userspace
2726  */
2727 static int
2728 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2729 {
2730 	struct aac_command *cm;
2731 	int size, error;
2732 
2733 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2734 
2735 	cm = NULL;
2736 
2737 	/*
2738 	 * Get a command
2739 	 */
2740 	mtx_lock(&sc->aac_io_lock);
2741 	if (aacraid_alloc_command(sc, &cm)) {
2742 		struct aac_event *event;
2743 
2744 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2745 		    M_NOWAIT | M_ZERO);
2746 		if (event == NULL) {
2747 			error = EBUSY;
2748 			mtx_unlock(&sc->aac_io_lock);
2749 			goto out;
2750 		}
2751 		event->ev_type = AAC_EVENT_CMFREE;
2752 		event->ev_callback = aac_ioctl_event;
2753 		event->ev_arg = &cm;
2754 		aacraid_add_event(sc, event);
2755 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2756 	}
2757 	mtx_unlock(&sc->aac_io_lock);
2758 
2759 	/*
2760 	 * Fetch the FIB header, then re-copy to get data as well.
2761 	 */
2762 	if ((error = copyin(ufib, cm->cm_fib,
2763 			    sizeof(struct aac_fib_header))) != 0)
2764 		goto out;
2765 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2766 	if (size > sc->aac_max_fib_size) {
2767 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2768 			      size, sc->aac_max_fib_size);
2769 		size = sc->aac_max_fib_size;
2770 	}
2771 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2772 		goto out;
2773 	cm->cm_fib->Header.Size = size;
2774 	cm->cm_timestamp = time_uptime;
2775 	cm->cm_datalen = 0;
2776 
2777 	/*
2778 	 * Pass the FIB to the controller, wait for it to complete.
2779 	 */
2780 	mtx_lock(&sc->aac_io_lock);
2781 	error = aacraid_wait_command(cm);
2782 	mtx_unlock(&sc->aac_io_lock);
2783 	if (error != 0) {
2784 		device_printf(sc->aac_dev,
2785 			      "aacraid_wait_command return %d\n", error);
2786 		goto out;
2787 	}
2788 
2789 	/*
2790 	 * Copy the FIB and data back out to the caller.
2791 	 */
2792 	size = cm->cm_fib->Header.Size;
2793 	if (size > sc->aac_max_fib_size) {
2794 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2795 			      size, sc->aac_max_fib_size);
2796 		size = sc->aac_max_fib_size;
2797 	}
2798 	error = copyout(cm->cm_fib, ufib, size);
2799 
2800 out:
2801 	if (cm != NULL) {
2802 		mtx_lock(&sc->aac_io_lock);
2803 		aacraid_release_command(cm);
2804 		mtx_unlock(&sc->aac_io_lock);
2805 	}
2806 	return(error);
2807 }
2808 
2809 /*
2810  * Send a passthrough FIB supplied from userspace
2811  */
2812 static int
2813 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2814 {
2815 	struct aac_command *cm;
2816 	struct aac_fib *fib;
2817 	struct aac_srb *srbcmd;
2818 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2819 	void *user_reply;
2820 	int error, transfer_data = 0;
2821 	bus_dmamap_t orig_map = 0;
2822 	u_int32_t fibsize = 0;
2823 	u_int64_t srb_sg_address;
2824 	u_int32_t srb_sg_bytecount;
2825 
2826 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2827 
2828 	cm = NULL;
2829 
2830 	mtx_lock(&sc->aac_io_lock);
2831 	if (aacraid_alloc_command(sc, &cm)) {
2832 		struct aac_event *event;
2833 
2834 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2835 		    M_NOWAIT | M_ZERO);
2836 		if (event == NULL) {
2837 			error = EBUSY;
2838 			mtx_unlock(&sc->aac_io_lock);
2839 			goto out;
2840 		}
2841 		event->ev_type = AAC_EVENT_CMFREE;
2842 		event->ev_callback = aac_ioctl_event;
2843 		event->ev_arg = &cm;
2844 		aacraid_add_event(sc, event);
2845 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2846 	}
2847 	mtx_unlock(&sc->aac_io_lock);
2848 
2849 	cm->cm_data = NULL;
2850 	/* save original dma map */
2851 	orig_map = cm->cm_datamap;
2852 
2853 	fib = cm->cm_fib;
2854 	srbcmd = (struct aac_srb *)fib->data;
2855 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2856 		sizeof (u_int32_t)) != 0))
2857 		goto out;
2858 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2859 		error = EINVAL;
2860 		goto out;
2861 	}
2862 	if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0))
2863 		goto out;
2864 
2865 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2866 	srbcmd->retry_limit = 0;	/* obsolete */
2867 
2868 	/* only one sg element from userspace supported */
2869 	if (srbcmd->sg_map.SgCount > 1) {
2870 		error = EINVAL;
2871 		goto out;
2872 	}
2873 	/* check fibsize */
2874 	if (fibsize == (sizeof(struct aac_srb) +
2875 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2876 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2877 		struct aac_sg_entry sg;
2878 
2879 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2880 			goto out;
2881 
2882 		srb_sg_bytecount = sg.SgByteCount;
2883 		srb_sg_address = (u_int64_t)sg.SgAddress;
2884 	} else if (fibsize == (sizeof(struct aac_srb) +
2885 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2886 #ifdef __LP64__
2887 		struct aac_sg_entry64 *sgp =
2888 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2889 		struct aac_sg_entry64 sg;
2890 
2891 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2892 			goto out;
2893 
2894 		srb_sg_bytecount = sg.SgByteCount;
2895 		srb_sg_address = sg.SgAddress;
2896 		if (srb_sg_address > 0xffffffffull &&
2897 			!(sc->flags & AAC_FLAGS_SG_64BIT))
2898 #endif
2899 		{
2900 			error = EINVAL;
2901 			goto out;
2902 		}
2903 	} else {
2904 		error = EINVAL;
2905 		goto out;
2906 	}
2907 	user_reply = (char *)arg + fibsize;
2908 	srbcmd->data_len = srb_sg_bytecount;
2909 	if (srbcmd->sg_map.SgCount == 1)
2910 		transfer_data = 1;
2911 
2912 	if (transfer_data) {
2913 		/*
2914 		 * Create DMA tag for the passthr. data buffer and allocate it.
2915 		 */
2916 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2917 			1, 0,			/* algnmnt, boundary */
2918 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2919 			BUS_SPACE_MAXADDR_32BIT :
2920 			0x7fffffff,		/* lowaddr */
2921 			BUS_SPACE_MAXADDR, 	/* highaddr */
2922 			NULL, NULL, 		/* filter, filterarg */
2923 			srb_sg_bytecount, 	/* size */
2924 			sc->aac_sg_tablesize,	/* nsegments */
2925 			srb_sg_bytecount, 	/* maxsegsize */
2926 			0,			/* flags */
2927 			NULL, NULL,		/* No locking needed */
2928 			&cm->cm_passthr_dmat)) {
2929 			error = ENOMEM;
2930 			goto out;
2931 		}
2932 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2933 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2934 			error = ENOMEM;
2935 			goto out;
2936 		}
2937 		/* fill some cm variables */
2938 		cm->cm_datalen = srb_sg_bytecount;
2939 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2940 			cm->cm_flags |= AAC_CMD_DATAIN;
2941 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2942 			cm->cm_flags |= AAC_CMD_DATAOUT;
2943 
2944 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2945 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2946 				cm->cm_data, cm->cm_datalen)) != 0)
2947 				goto out;
2948 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2949 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2950 				BUS_DMASYNC_PREWRITE);
2951 		}
2952 	}
2953 
2954 	/* build the FIB */
2955 	fib->Header.Size = sizeof(struct aac_fib_header) +
2956 		sizeof(struct aac_srb);
2957 	fib->Header.XferState =
2958 		AAC_FIBSTATE_HOSTOWNED   |
2959 		AAC_FIBSTATE_INITIALISED |
2960 		AAC_FIBSTATE_EMPTY	 |
2961 		AAC_FIBSTATE_FROMHOST	 |
2962 		AAC_FIBSTATE_REXPECTED   |
2963 		AAC_FIBSTATE_NORM	 |
2964 		AAC_FIBSTATE_ASYNC;
2965 
2966 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2967 		ScsiPortCommandU64 : ScsiPortCommand;
2968 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2969 
2970 	/* send command */
2971 	if (transfer_data) {
2972 		bus_dmamap_load(cm->cm_passthr_dmat,
2973 			cm->cm_datamap, cm->cm_data,
2974 			cm->cm_datalen,
2975 			aacraid_map_command_sg, cm, 0);
2976 	} else {
2977 		aacraid_map_command_sg(cm, NULL, 0, 0);
2978 	}
2979 
2980 	/* wait for completion */
2981 	mtx_lock(&sc->aac_io_lock);
2982 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2983 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2984 	mtx_unlock(&sc->aac_io_lock);
2985 
2986 	/* copy data */
2987 	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2988 		if ((error = copyout(cm->cm_data,
2989 			(void *)(uintptr_t)srb_sg_address,
2990 			cm->cm_datalen)) != 0)
2991 			goto out;
2992 		/* sync required for bus_dmamem_alloc() allocated mem.? */
2993 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2994 				BUS_DMASYNC_POSTREAD);
2995 	}
2996 
2997 	/* status */
2998 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
2999 
3000 out:
3001 	if (cm && cm->cm_data) {
3002 		if (transfer_data)
3003 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3004 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3005 		cm->cm_datamap = orig_map;
3006 	}
3007 	if (cm && cm->cm_passthr_dmat)
3008 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3009 	if (cm) {
3010 		mtx_lock(&sc->aac_io_lock);
3011 		aacraid_release_command(cm);
3012 		mtx_unlock(&sc->aac_io_lock);
3013 	}
3014 	return(error);
3015 }
3016 
3017 /*
3018  * Request an AIF from the controller (new comm. type1)
3019  */
3020 static void
3021 aac_request_aif(struct aac_softc *sc)
3022 {
3023 	struct aac_command *cm;
3024 	struct aac_fib *fib;
3025 
3026 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3027 
3028 	if (aacraid_alloc_command(sc, &cm)) {
3029 		sc->aif_pending = 1;
3030 		return;
3031 	}
3032 	sc->aif_pending = 0;
3033 
3034 	/* build the FIB */
3035 	fib = cm->cm_fib;
3036 	fib->Header.Size = sizeof(struct aac_fib);
3037 	fib->Header.XferState =
3038         AAC_FIBSTATE_HOSTOWNED   |
3039         AAC_FIBSTATE_INITIALISED |
3040         AAC_FIBSTATE_EMPTY	 |
3041         AAC_FIBSTATE_FROMHOST	 |
3042         AAC_FIBSTATE_REXPECTED   |
3043         AAC_FIBSTATE_NORM	 |
3044         AAC_FIBSTATE_ASYNC;
3045 	/* set AIF marker */
3046 	fib->Header.Handle = 0x00800000;
3047 	fib->Header.Command = AifRequest;
3048 	((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3049 
3050 	aacraid_map_command_sg(cm, NULL, 0, 0);
3051 }
3052 
3053 
3054 #if __FreeBSD_version >= 702000
3055 /*
3056  * cdevpriv interface private destructor.
3057  */
3058 static void
3059 aac_cdevpriv_dtor(void *arg)
3060 {
3061 	struct aac_softc *sc;
3062 
3063 	sc = arg;
3064 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3065 	mtx_lock(&Giant);
3066 	device_unbusy(sc->aac_dev);
3067 	mtx_unlock(&Giant);
3068 }
3069 #else
3070 static int
3071 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3072 {
3073 	struct aac_softc *sc;
3074 
3075 	sc = dev->si_drv1;
3076 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3077 	return 0;
3078 }
3079 #endif
3080 
3081 /*
3082  * Handle an AIF sent to us by the controller; queue it for later reference.
3083  * If the queue fills up, then drop the older entries.
3084  */
3085 static void
3086 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3087 {
3088 	struct aac_aif_command *aif;
3089 	struct aac_container *co, *co_next;
3090 	struct aac_fib_context *ctx;
3091 	struct aac_fib *sync_fib;
3092 	struct aac_mntinforesp mir;
3093 	int next, current, found;
3094 	int count = 0, changed = 0, i = 0;
3095 	u_int32_t channel, uid;
3096 
3097 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3098 
3099 	aif = (struct aac_aif_command*)&fib->data[0];
3100 	aacraid_print_aif(sc, aif);
3101 
3102 	/* Is it an event that we should care about? */
3103 	switch (aif->command) {
3104 	case AifCmdEventNotify:
3105 		switch (aif->data.EN.type) {
3106 		case AifEnAddContainer:
3107 		case AifEnDeleteContainer:
3108 			/*
3109 			 * A container was added or deleted, but the message
3110 			 * doesn't tell us anything else!  Re-enumerate the
3111 			 * containers and sort things out.
3112 			 */
3113 			aac_alloc_sync_fib(sc, &sync_fib);
3114 			do {
3115 				/*
3116 				 * Ask the controller for its containers one at
3117 				 * a time.
3118 				 * XXX What if the controller's list changes
3119 				 * midway through this enumaration?
3120 				 * XXX This should be done async.
3121 				 */
3122 				if (aac_get_container_info(sc, sync_fib, i,
3123 					&mir, &uid) != 0)
3124 					continue;
3125 				if (i == 0)
3126 					count = mir.MntRespCount;
3127 				/*
3128 				 * Check the container against our list.
3129 				 * co->co_found was already set to 0 in a
3130 				 * previous run.
3131 				 */
3132 				if ((mir.Status == ST_OK) &&
3133 				    (mir.MntTable[0].VolType != CT_NONE)) {
3134 					found = 0;
3135 					TAILQ_FOREACH(co,
3136 						      &sc->aac_container_tqh,
3137 						      co_link) {
3138 						if (co->co_mntobj.ObjectId ==
3139 						    mir.MntTable[0].ObjectId) {
3140 							co->co_found = 1;
3141 							found = 1;
3142 							break;
3143 						}
3144 					}
3145 					/*
3146 					 * If the container matched, continue
3147 					 * in the list.
3148 					 */
3149 					if (found) {
3150 						i++;
3151 						continue;
3152 					}
3153 
3154 					/*
3155 					 * This is a new container.  Do all the
3156 					 * appropriate things to set it up.
3157 					 */
3158 					aac_add_container(sc, &mir, 1, uid);
3159 					changed = 1;
3160 				}
3161 				i++;
3162 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3163 			aac_release_sync_fib(sc);
3164 
3165 			/*
3166 			 * Go through our list of containers and see which ones
3167 			 * were not marked 'found'.  Since the controller didn't
3168 			 * list them they must have been deleted.  Do the
3169 			 * appropriate steps to destroy the device.  Also reset
3170 			 * the co->co_found field.
3171 			 */
3172 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3173 			while (co != NULL) {
3174 				if (co->co_found == 0) {
3175 					co_next = TAILQ_NEXT(co, co_link);
3176 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3177 						     co_link);
3178 					free(co, M_AACRAIDBUF);
3179 					changed = 1;
3180 					co = co_next;
3181 				} else {
3182 					co->co_found = 0;
3183 					co = TAILQ_NEXT(co, co_link);
3184 				}
3185 			}
3186 
3187 			/* Attach the newly created containers */
3188 			if (changed) {
3189 				if (sc->cam_rescan_cb != NULL)
3190 					sc->cam_rescan_cb(sc, 0,
3191 				    	AAC_CAM_TARGET_WILDCARD);
3192 			}
3193 
3194 			break;
3195 
3196 		case AifEnEnclosureManagement:
3197 			switch (aif->data.EN.data.EEE.eventType) {
3198 			case AIF_EM_DRIVE_INSERTION:
3199 			case AIF_EM_DRIVE_REMOVAL:
3200 				channel = aif->data.EN.data.EEE.unitID;
3201 				if (sc->cam_rescan_cb != NULL)
3202 					sc->cam_rescan_cb(sc,
3203 					    ((channel>>24) & 0xF) + 1,
3204 					    (channel & 0xFFFF));
3205 				break;
3206 			}
3207 			break;
3208 
3209 		case AifEnAddJBOD:
3210 		case AifEnDeleteJBOD:
3211 		case AifRawDeviceRemove:
3212 			channel = aif->data.EN.data.ECE.container;
3213 			if (sc->cam_rescan_cb != NULL)
3214 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3215 				    AAC_CAM_TARGET_WILDCARD);
3216 			break;
3217 
3218 		default:
3219 			break;
3220 		}
3221 
3222 	default:
3223 		break;
3224 	}
3225 
3226 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3227 	current = sc->aifq_idx;
3228 	next = (current + 1) % AAC_AIFQ_LENGTH;
3229 	if (next == 0)
3230 		sc->aifq_filled = 1;
3231 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3232 	/* modify AIF contexts */
3233 	if (sc->aifq_filled) {
3234 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3235 			if (next == ctx->ctx_idx)
3236 				ctx->ctx_wrap = 1;
3237 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3238 				ctx->ctx_idx = next;
3239 		}
3240 	}
3241 	sc->aifq_idx = next;
3242 	/* On the off chance that someone is sleeping for an aif... */
3243 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3244 		wakeup(sc->aac_aifq);
3245 	/* Wakeup any poll()ers */
3246 	selwakeuppri(&sc->rcv_select, PRIBIO);
3247 
3248 	return;
3249 }
3250 
3251 /*
3252  * Return the Revision of the driver to userspace and check to see if the
3253  * userspace app is possibly compatible.  This is extremely bogus since
3254  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3255  * returning what the card reported.
3256  */
3257 static int
3258 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3259 {
3260 	struct aac_rev_check rev_check;
3261 	struct aac_rev_check_resp rev_check_resp;
3262 	int error = 0;
3263 
3264 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3265 
3266 	/*
3267 	 * Copyin the revision struct from userspace
3268 	 */
3269 	if ((error = copyin(udata, (caddr_t)&rev_check,
3270 			sizeof(struct aac_rev_check))) != 0) {
3271 		return error;
3272 	}
3273 
3274 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3275 	      rev_check.callingRevision.buildNumber);
3276 
3277 	/*
3278 	 * Doctor up the response struct.
3279 	 */
3280 	rev_check_resp.possiblyCompatible = 1;
3281 	rev_check_resp.adapterSWRevision.external.comp.major =
3282 	    AAC_DRIVER_MAJOR_VERSION;
3283 	rev_check_resp.adapterSWRevision.external.comp.minor =
3284 	    AAC_DRIVER_MINOR_VERSION;
3285 	rev_check_resp.adapterSWRevision.external.comp.type =
3286 	    AAC_DRIVER_TYPE;
3287 	rev_check_resp.adapterSWRevision.external.comp.dash =
3288 	    AAC_DRIVER_BUGFIX_LEVEL;
3289 	rev_check_resp.adapterSWRevision.buildNumber =
3290 	    AAC_DRIVER_BUILD;
3291 
3292 	return(copyout((caddr_t)&rev_check_resp, udata,
3293 			sizeof(struct aac_rev_check_resp)));
3294 }
3295 
3296 /*
3297  * Pass the fib context to the caller
3298  */
3299 static int
3300 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3301 {
3302 	struct aac_fib_context *fibctx, *ctx;
3303 	int error = 0;
3304 
3305 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3306 
3307 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3308 	if (fibctx == NULL)
3309 		return (ENOMEM);
3310 
3311 	mtx_lock(&sc->aac_io_lock);
3312 	/* all elements are already 0, add to queue */
3313 	if (sc->fibctx == NULL)
3314 		sc->fibctx = fibctx;
3315 	else {
3316 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3317 			;
3318 		ctx->next = fibctx;
3319 		fibctx->prev = ctx;
3320 	}
3321 
3322 	/* evaluate unique value */
3323 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3324 	ctx = sc->fibctx;
3325 	while (ctx != fibctx) {
3326 		if (ctx->unique == fibctx->unique) {
3327 			fibctx->unique++;
3328 			ctx = sc->fibctx;
3329 		} else {
3330 			ctx = ctx->next;
3331 		}
3332 	}
3333 
3334 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3335 	mtx_unlock(&sc->aac_io_lock);
3336 	if (error)
3337 		aac_close_aif(sc, (caddr_t)ctx);
3338 	return error;
3339 }
3340 
3341 /*
3342  * Close the caller's fib context
3343  */
3344 static int
3345 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3346 {
3347 	struct aac_fib_context *ctx;
3348 
3349 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3350 
3351 	mtx_lock(&sc->aac_io_lock);
3352 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3353 		if (ctx->unique == *(uint32_t *)&arg) {
3354 			if (ctx == sc->fibctx)
3355 				sc->fibctx = NULL;
3356 			else {
3357 				ctx->prev->next = ctx->next;
3358 				if (ctx->next)
3359 					ctx->next->prev = ctx->prev;
3360 			}
3361 			break;
3362 		}
3363 	}
3364 	if (ctx)
3365 		free(ctx, M_AACRAIDBUF);
3366 
3367 	mtx_unlock(&sc->aac_io_lock);
3368 	return 0;
3369 }
3370 
3371 /*
3372  * Pass the caller the next AIF in their queue
3373  */
3374 static int
3375 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3376 {
3377 	struct get_adapter_fib_ioctl agf;
3378 	struct aac_fib_context *ctx;
3379 	int error;
3380 
3381 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3382 
3383 	mtx_lock(&sc->aac_io_lock);
3384 	if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3385 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3386 			if (agf.AdapterFibContext == ctx->unique)
3387 				break;
3388 		}
3389 		if (!ctx) {
3390 			mtx_unlock(&sc->aac_io_lock);
3391 			return (EFAULT);
3392 		}
3393 
3394 		error = aac_return_aif(sc, ctx, agf.AifFib);
3395 		if (error == EAGAIN && agf.Wait) {
3396 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3397 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3398 			while (error == EAGAIN) {
3399 				mtx_unlock(&sc->aac_io_lock);
3400 				error = tsleep(sc->aac_aifq, PRIBIO |
3401 					       PCATCH, "aacaif", 0);
3402 				mtx_lock(&sc->aac_io_lock);
3403 				if (error == 0)
3404 					error = aac_return_aif(sc, ctx, agf.AifFib);
3405 			}
3406 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3407 		}
3408 	}
3409 	mtx_unlock(&sc->aac_io_lock);
3410 	return(error);
3411 }
3412 
3413 /*
3414  * Hand the next AIF off the top of the queue out to userspace.
3415  */
3416 static int
3417 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3418 {
3419 	int current, error;
3420 
3421 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3422 
3423 	current = ctx->ctx_idx;
3424 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3425 		/* empty */
3426 		return (EAGAIN);
3427 	}
3428 	error =
3429 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3430 	if (error)
3431 		device_printf(sc->aac_dev,
3432 		    "aac_return_aif: copyout returned %d\n", error);
3433 	else {
3434 		ctx->ctx_wrap = 0;
3435 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3436 	}
3437 	return(error);
3438 }
3439 
3440 static int
3441 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3442 {
3443 	struct aac_pci_info {
3444 		u_int32_t bus;
3445 		u_int32_t slot;
3446 	} pciinf;
3447 	int error;
3448 
3449 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3450 
3451 	pciinf.bus = pci_get_bus(sc->aac_dev);
3452 	pciinf.slot = pci_get_slot(sc->aac_dev);
3453 
3454 	error = copyout((caddr_t)&pciinf, uptr,
3455 			sizeof(struct aac_pci_info));
3456 
3457 	return (error);
3458 }
3459 
3460 static int
3461 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3462 {
3463 	struct aac_features f;
3464 	int error;
3465 
3466 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3467 
3468 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3469 		return (error);
3470 
3471 	/*
3472 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3473 	 * ALL zero in the featuresState, the driver will return the current
3474 	 * state of all the supported features, the data field will not be
3475 	 * valid.
3476 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3477 	 * a specific bit set in the featuresState, the driver will return the
3478 	 * current state of this specific feature and whatever data that are
3479 	 * associated with the feature in the data field or perform whatever
3480 	 * action needed indicates in the data field.
3481 	 */
3482 	 if (f.feat.fValue == 0) {
3483 		f.feat.fBits.largeLBA =
3484 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3485 		f.feat.fBits.JBODSupport = 1;
3486 		/* TODO: In the future, add other features state here as well */
3487 	} else {
3488 		if (f.feat.fBits.largeLBA)
3489 			f.feat.fBits.largeLBA =
3490 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3491 		/* TODO: Add other features state and data in the future */
3492 	}
3493 
3494 	error = copyout(&f, uptr, sizeof (f));
3495 	return (error);
3496 }
3497 
3498 /*
3499  * Give the userland some information about the container.  The AAC arch
3500  * expects the driver to be a SCSI passthrough type driver, so it expects
3501  * the containers to have b:t:l numbers.  Fake it.
3502  */
3503 static int
3504 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3505 {
3506 	struct aac_query_disk query_disk;
3507 	struct aac_container *co;
3508 	int error, id;
3509 
3510 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3511 
3512 	mtx_lock(&sc->aac_io_lock);
3513 	error = copyin(uptr, (caddr_t)&query_disk,
3514 		       sizeof(struct aac_query_disk));
3515 	if (error) {
3516 		mtx_unlock(&sc->aac_io_lock);
3517 		return (error);
3518 	}
3519 
3520 	id = query_disk.ContainerNumber;
3521 	if (id == -1) {
3522 		mtx_unlock(&sc->aac_io_lock);
3523 		return (EINVAL);
3524 	}
3525 
3526 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3527 		if (co->co_mntobj.ObjectId == id)
3528 			break;
3529 		}
3530 
3531 	if (co == NULL) {
3532 			query_disk.Valid = 0;
3533 			query_disk.Locked = 0;
3534 			query_disk.Deleted = 1;		/* XXX is this right? */
3535 	} else {
3536 		query_disk.Valid = 1;
3537 		query_disk.Locked = 1;
3538 		query_disk.Deleted = 0;
3539 		query_disk.Bus = device_get_unit(sc->aac_dev);
3540 		query_disk.Target = 0;
3541 		query_disk.Lun = 0;
3542 		query_disk.UnMapped = 0;
3543 	}
3544 
3545 	error = copyout((caddr_t)&query_disk, uptr,
3546 			sizeof(struct aac_query_disk));
3547 
3548 	mtx_unlock(&sc->aac_io_lock);
3549 	return (error);
3550 }
3551 
3552 static void
3553 aac_container_bus(struct aac_softc *sc)
3554 {
3555 	struct aac_sim *sim;
3556 	device_t child;
3557 
3558 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3559 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3560 	if (sim == NULL) {
3561 		device_printf(sc->aac_dev,
3562 	    	"No memory to add container bus\n");
3563 		panic("Out of memory?!");
3564 	}
3565 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3566 	if (child == NULL) {
3567 		device_printf(sc->aac_dev,
3568 	    	"device_add_child failed for container bus\n");
3569 		free(sim, M_AACRAIDBUF);
3570 		panic("Out of memory?!");
3571 	}
3572 
3573 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3574 	sim->BusNumber = 0;
3575 	sim->BusType = CONTAINER_BUS;
3576 	sim->InitiatorBusId = -1;
3577 	sim->aac_sc = sc;
3578 	sim->sim_dev = child;
3579 	sim->aac_cam = NULL;
3580 
3581 	device_set_ivars(child, sim);
3582 	device_set_desc(child, "Container Bus");
3583 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3584 	/*
3585 	device_set_desc(child, aac_describe_code(aac_container_types,
3586 			mir->MntTable[0].VolType));
3587 	*/
3588 	bus_generic_attach(sc->aac_dev);
3589 }
3590 
3591 static void
3592 aac_get_bus_info(struct aac_softc *sc)
3593 {
3594 	struct aac_fib *fib;
3595 	struct aac_ctcfg *c_cmd;
3596 	struct aac_ctcfg_resp *c_resp;
3597 	struct aac_vmioctl *vmi;
3598 	struct aac_vmi_businf_resp *vmi_resp;
3599 	struct aac_getbusinf businfo;
3600 	struct aac_sim *caminf;
3601 	device_t child;
3602 	int i, error;
3603 
3604 	mtx_lock(&sc->aac_io_lock);
3605 	aac_alloc_sync_fib(sc, &fib);
3606 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3607 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3608 
3609 	c_cmd->Command = VM_ContainerConfig;
3610 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3611 	c_cmd->param = 0;
3612 
3613 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3614 	    sizeof(struct aac_ctcfg));
3615 	if (error) {
3616 		device_printf(sc->aac_dev, "Error %d sending "
3617 		    "VM_ContainerConfig command\n", error);
3618 		aac_release_sync_fib(sc);
3619 		mtx_unlock(&sc->aac_io_lock);
3620 		return;
3621 	}
3622 
3623 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3624 	if (c_resp->Status != ST_OK) {
3625 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3626 		    c_resp->Status);
3627 		aac_release_sync_fib(sc);
3628 		mtx_unlock(&sc->aac_io_lock);
3629 		return;
3630 	}
3631 
3632 	sc->scsi_method_id = c_resp->param;
3633 
3634 	vmi = (struct aac_vmioctl *)&fib->data[0];
3635 	bzero(vmi, sizeof(struct aac_vmioctl));
3636 
3637 	vmi->Command = VM_Ioctl;
3638 	vmi->ObjType = FT_DRIVE;
3639 	vmi->MethId = sc->scsi_method_id;
3640 	vmi->ObjId = 0;
3641 	vmi->IoctlCmd = GetBusInfo;
3642 
3643 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3644 	    sizeof(struct aac_vmi_businf_resp));
3645 	if (error) {
3646 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3647 		    error);
3648 		aac_release_sync_fib(sc);
3649 		mtx_unlock(&sc->aac_io_lock);
3650 		return;
3651 	}
3652 
3653 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3654 	if (vmi_resp->Status != ST_OK) {
3655 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3656 		    vmi_resp->Status);
3657 		aac_release_sync_fib(sc);
3658 		mtx_unlock(&sc->aac_io_lock);
3659 		return;
3660 	}
3661 
3662 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3663 	aac_release_sync_fib(sc);
3664 	mtx_unlock(&sc->aac_io_lock);
3665 
3666 	for (i = 0; i < businfo.BusCount; i++) {
3667 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3668 			continue;
3669 
3670 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3671 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3672 		if (caminf == NULL) {
3673 			device_printf(sc->aac_dev,
3674 			    "No memory to add passthrough bus %d\n", i);
3675 			break;
3676 		}
3677 
3678 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3679 		if (child == NULL) {
3680 			device_printf(sc->aac_dev,
3681 			    "device_add_child failed for passthrough bus %d\n",
3682 			    i);
3683 			free(caminf, M_AACRAIDBUF);
3684 			break;
3685 		}
3686 
3687 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3688 		caminf->BusNumber = i+1;
3689 		caminf->BusType = PASSTHROUGH_BUS;
3690 		caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3691 		caminf->aac_sc = sc;
3692 		caminf->sim_dev = child;
3693 		caminf->aac_cam = NULL;
3694 
3695 		device_set_ivars(child, caminf);
3696 		device_set_desc(child, "SCSI Passthrough Bus");
3697 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3698 	}
3699 }
3700 
3701 /*
3702  * Check to see if the kernel is up and running. If we are in a
3703  * BlinkLED state, return the BlinkLED code.
3704  */
3705 static u_int32_t
3706 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3707 {
3708 	u_int32_t ret;
3709 
3710 	ret = AAC_GET_FWSTATUS(sc);
3711 
3712 	if (ret & AAC_UP_AND_RUNNING)
3713 		ret = 0;
3714 	else if (ret & AAC_KERNEL_PANIC && bled)
3715 		*bled = (ret >> 16) & 0xff;
3716 
3717 	return (ret);
3718 }
3719 
3720 /*
3721  * Once do an IOP reset, basically have to re-initialize the card as
3722  * if coming up from a cold boot, and the driver is responsible for
3723  * any IO that was outstanding to the adapter at the time of the IOP
3724  * RESET. And prepare the driver for IOP RESET by making the init code
3725  * modular with the ability to call it from multiple places.
3726  */
3727 static int
3728 aac_reset_adapter(struct aac_softc *sc)
3729 {
3730 	struct aac_command *cm;
3731 	struct aac_fib *fib;
3732 	struct aac_pause_command *pc;
3733 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3734 	int msi_enabled_orig;
3735 
3736 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3737 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3738 
3739 	if (sc->aac_state & AAC_STATE_RESET) {
3740 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3741 		return (EINVAL);
3742 	}
3743 	sc->aac_state |= AAC_STATE_RESET;
3744 
3745 	/* disable interrupt */
3746 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3747 
3748 	/*
3749 	 * Abort all pending commands:
3750 	 * a) on the controller
3751 	 */
3752 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3753 		cm->cm_flags |= AAC_CMD_RESET;
3754 
3755 		/* is there a completion handler? */
3756 		if (cm->cm_complete != NULL) {
3757 			cm->cm_complete(cm);
3758 		} else {
3759 			/* assume that someone is sleeping on this
3760 			 * command
3761 			 */
3762 			wakeup(cm);
3763 		}
3764 	}
3765 
3766 	/* b) in the waiting queues */
3767 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3768 		cm->cm_flags |= AAC_CMD_RESET;
3769 
3770 		/* is there a completion handler? */
3771 		if (cm->cm_complete != NULL) {
3772 			cm->cm_complete(cm);
3773 		} else {
3774 			/* assume that someone is sleeping on this
3775 			 * command
3776 			 */
3777 			wakeup(cm);
3778 		}
3779 	}
3780 
3781 	/* flush drives */
3782 	if (aac_check_adapter_health(sc, NULL) == 0) {
3783 		mtx_unlock(&sc->aac_io_lock);
3784 		(void) aacraid_shutdown(sc->aac_dev);
3785 		mtx_lock(&sc->aac_io_lock);
3786 	}
3787 
3788 	/* execute IOP reset */
3789 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3790 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3791 
3792 		/* We need to wait for 5 seconds before accessing the MU again
3793 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3794 		 */
3795 		waitCount = 5 * 10000;
3796 		while (waitCount) {
3797 			DELAY(100);			/* delay 100 microseconds */
3798 			waitCount--;
3799 		}
3800 	} else if ((aacraid_sync_command(sc,
3801 		AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) {
3802 		/* call IOP_RESET for older firmware */
3803 		if ((aacraid_sync_command(sc,
3804 			AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) {
3805 
3806 			if (status == AAC_SRB_STS_INVALID_REQUEST)
3807 				device_printf(sc->aac_dev, "IOP_RESET not supported\n");
3808 			else
3809 				/* probably timeout */
3810 				device_printf(sc->aac_dev, "IOP_RESET failed\n");
3811 
3812 			/* unwind aac_shutdown() */
3813 			aac_alloc_sync_fib(sc, &fib);
3814 			pc = (struct aac_pause_command *)&fib->data[0];
3815 			pc->Command = VM_ContainerConfig;
3816 			pc->Type = CT_PAUSE_IO;
3817 			pc->Timeout = 1;
3818 			pc->Min = 1;
3819 			pc->NoRescan = 1;
3820 
3821 			(void) aac_sync_fib(sc, ContainerCommand, 0, fib,
3822 				sizeof (struct aac_pause_command));
3823 			aac_release_sync_fib(sc);
3824 
3825 			goto finish;
3826 		}
3827 	} else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) {
3828 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3829 		/*
3830 		 * We need to wait for 5 seconds before accessing the doorbell
3831 		 * again, 10000 * 100us = 1000,000us = 1000ms = 1s
3832 		 */
3833 		waitCount = 5 * 10000;
3834 		while (waitCount) {
3835 			DELAY(100);		/* delay 100 microseconds */
3836 			waitCount--;
3837 		}
3838 	}
3839 
3840 	/*
3841 	 * Initialize the adapter.
3842 	 */
3843 	max_msix_orig = sc->aac_max_msix;
3844 	msi_enabled_orig = sc->msi_enabled;
3845 	sc->msi_enabled = FALSE;
3846 	if (aac_check_firmware(sc) != 0)
3847 		goto finish;
3848 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3849 		sc->aac_max_msix = max_msix_orig;
3850 		if (msi_enabled_orig) {
3851 			sc->msi_enabled = msi_enabled_orig;
3852 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3853 		}
3854 		mtx_unlock(&sc->aac_io_lock);
3855 		aac_init(sc);
3856 		mtx_lock(&sc->aac_io_lock);
3857 	}
3858 
3859 finish:
3860 	sc->aac_state &= ~AAC_STATE_RESET;
3861 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3862 	aacraid_startio(sc);
3863 	return (0);
3864 }
3865