xref: /freebsd/sys/dev/aacraid/aacraid.c (revision 7c1b51d6dc2e165ae7333373513b080f17cf79bd)
1 /*-
2  * Copyright (c) 2000 Michael Smith
3  * Copyright (c) 2001 Scott Long
4  * Copyright (c) 2000 BSDi
5  * Copyright (c) 2001-2010 Adaptec, Inc.
6  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
36  */
37 #define AAC_DRIVERNAME			"aacraid"
38 
39 #include "opt_aacraid.h"
40 
41 /* #include <stddef.h> */
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/sysctl.h>
48 #include <sys/poll.h>
49 #include <sys/ioccom.h>
50 
51 #include <sys/bus.h>
52 #include <sys/conf.h>
53 #include <sys/signalvar.h>
54 #include <sys/time.h>
55 #include <sys/eventhandler.h>
56 #include <sys/rman.h>
57 
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 
64 #include <dev/aacraid/aacraid_reg.h>
65 #include <sys/aac_ioctl.h>
66 #include <dev/aacraid/aacraid_debug.h>
67 #include <dev/aacraid/aacraid_var.h>
68 
69 #ifndef FILTER_HANDLED
70 #define FILTER_HANDLED	0x02
71 #endif
72 
73 static void	aac_add_container(struct aac_softc *sc,
74 				  struct aac_mntinforesp *mir, int f,
75 				  u_int32_t uid);
76 static void	aac_get_bus_info(struct aac_softc *sc);
77 static void	aac_container_bus(struct aac_softc *sc);
78 static void	aac_daemon(void *arg);
79 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
80 							  int pages, int nseg, int nseg_new);
81 
82 /* Command Processing */
83 static void	aac_timeout(struct aac_softc *sc);
84 static void	aac_command_thread(struct aac_softc *sc);
85 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
86 				     u_int32_t xferstate, struct aac_fib *fib,
87 				     u_int16_t datasize);
88 /* Command Buffer Management */
89 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
90 				       int nseg, int error);
91 static int	aac_alloc_commands(struct aac_softc *sc);
92 static void	aac_free_commands(struct aac_softc *sc);
93 static void	aac_unmap_command(struct aac_command *cm);
94 
95 /* Hardware Interface */
96 static int	aac_alloc(struct aac_softc *sc);
97 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
98 			       int error);
99 static int	aac_check_firmware(struct aac_softc *sc);
100 static void	aac_define_int_mode(struct aac_softc *sc);
101 static int	aac_init(struct aac_softc *sc);
102 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
103 static int	aac_setup_intr(struct aac_softc *sc);
104 static int	aac_check_config(struct aac_softc *sc);
105 
106 /* PMC SRC interface */
107 static int	aac_src_get_fwstatus(struct aac_softc *sc);
108 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
109 static int	aac_src_get_istatus(struct aac_softc *sc);
110 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
111 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
112 				    u_int32_t arg0, u_int32_t arg1,
113 				    u_int32_t arg2, u_int32_t arg3);
114 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
115 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
116 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
117 static int aac_src_get_outb_queue(struct aac_softc *sc);
118 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
119 
120 struct aac_interface aacraid_src_interface = {
121 	aac_src_get_fwstatus,
122 	aac_src_qnotify,
123 	aac_src_get_istatus,
124 	aac_src_clear_istatus,
125 	aac_src_set_mailbox,
126 	aac_src_get_mailbox,
127 	aac_src_access_devreg,
128 	aac_src_send_command,
129 	aac_src_get_outb_queue,
130 	aac_src_set_outb_queue
131 };
132 
133 /* PMC SRCv interface */
134 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
135 				    u_int32_t arg0, u_int32_t arg1,
136 				    u_int32_t arg2, u_int32_t arg3);
137 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
138 
139 struct aac_interface aacraid_srcv_interface = {
140 	aac_src_get_fwstatus,
141 	aac_src_qnotify,
142 	aac_src_get_istatus,
143 	aac_src_clear_istatus,
144 	aac_srcv_set_mailbox,
145 	aac_srcv_get_mailbox,
146 	aac_src_access_devreg,
147 	aac_src_send_command,
148 	aac_src_get_outb_queue,
149 	aac_src_set_outb_queue
150 };
151 
152 /* Debugging and Diagnostics */
153 static struct aac_code_lookup aac_cpu_variant[] = {
154 	{"i960JX",		CPUI960_JX},
155 	{"i960CX",		CPUI960_CX},
156 	{"i960HX",		CPUI960_HX},
157 	{"i960RX",		CPUI960_RX},
158 	{"i960 80303",		CPUI960_80303},
159 	{"StrongARM SA110",	CPUARM_SA110},
160 	{"PPC603e",		CPUPPC_603e},
161 	{"XScale 80321",	CPU_XSCALE_80321},
162 	{"MIPS 4KC",		CPU_MIPS_4KC},
163 	{"MIPS 5KC",		CPU_MIPS_5KC},
164 	{"Unknown StrongARM",	CPUARM_xxx},
165 	{"Unknown PowerPC",	CPUPPC_xxx},
166 	{NULL, 0},
167 	{"Unknown processor",	0}
168 };
169 
170 static struct aac_code_lookup aac_battery_platform[] = {
171 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
172 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
173 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
174 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
175 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
176 	{NULL, 0},
177 	{"unknown battery platform",		0}
178 };
179 static void	aac_describe_controller(struct aac_softc *sc);
180 static char	*aac_describe_code(struct aac_code_lookup *table,
181 				   u_int32_t code);
182 
183 /* Management Interface */
184 static d_open_t		aac_open;
185 static d_ioctl_t	aac_ioctl;
186 static d_poll_t		aac_poll;
187 #if __FreeBSD_version >= 702000
188 static void		aac_cdevpriv_dtor(void *arg);
189 #else
190 static d_close_t	aac_close;
191 #endif
192 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
193 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
194 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
195 static void	aac_request_aif(struct aac_softc *sc);
196 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
197 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
198 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
199 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
200 static int	aac_return_aif(struct aac_softc *sc,
201 			       struct aac_fib_context *ctx, caddr_t uptr);
202 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
203 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
204 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
205 static void	aac_ioctl_event(struct aac_softc *sc,
206 				struct aac_event *event, void *arg);
207 static int	aac_reset_adapter(struct aac_softc *sc);
208 static int	aac_get_container_info(struct aac_softc *sc,
209 				       struct aac_fib *fib, int cid,
210 				       struct aac_mntinforesp *mir,
211 				       u_int32_t *uid);
212 static u_int32_t
213 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
214 
215 static struct cdevsw aacraid_cdevsw = {
216 	.d_version =	D_VERSION,
217 	.d_flags =	D_NEEDGIANT,
218 	.d_open =	aac_open,
219 #if __FreeBSD_version < 702000
220 	.d_close =	aac_close,
221 #endif
222 	.d_ioctl =	aac_ioctl,
223 	.d_poll =	aac_poll,
224 	.d_name =	"aacraid",
225 };
226 
227 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
228 
229 /* sysctl node */
230 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
231 
232 /*
233  * Device Interface
234  */
235 
236 /*
237  * Initialize the controller and softc
238  */
239 int
240 aacraid_attach(struct aac_softc *sc)
241 {
242 	int error, unit;
243 	struct aac_fib *fib;
244 	struct aac_mntinforesp mir;
245 	int count = 0, i = 0;
246 	u_int32_t uid;
247 
248 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
249 	sc->hint_flags = device_get_flags(sc->aac_dev);
250 	/*
251 	 * Initialize per-controller queues.
252 	 */
253 	aac_initq_free(sc);
254 	aac_initq_ready(sc);
255 	aac_initq_busy(sc);
256 
257 	/* mark controller as suspended until we get ourselves organised */
258 	sc->aac_state |= AAC_STATE_SUSPEND;
259 
260 	/*
261 	 * Check that the firmware on the card is supported.
262 	 */
263 	sc->msi_enabled = FALSE;
264 	if ((error = aac_check_firmware(sc)) != 0)
265 		return(error);
266 
267 	/*
268 	 * Initialize locks
269 	 */
270 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
271 	TAILQ_INIT(&sc->aac_container_tqh);
272 	TAILQ_INIT(&sc->aac_ev_cmfree);
273 
274 #if __FreeBSD_version >= 800000
275 	/* Initialize the clock daemon callout. */
276 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
277 #endif
278 	/*
279 	 * Initialize the adapter.
280 	 */
281 	if ((error = aac_alloc(sc)) != 0)
282 		return(error);
283 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
284 		aac_define_int_mode(sc);
285 		if ((error = aac_init(sc)) != 0)
286 			return(error);
287 	}
288 
289 	/*
290 	 * Allocate and connect our interrupt.
291 	 */
292 	if ((error = aac_setup_intr(sc)) != 0)
293 		return(error);
294 
295 	/*
296 	 * Print a little information about the controller.
297 	 */
298 	aac_describe_controller(sc);
299 
300 	/*
301 	 * Make the control device.
302 	 */
303 	unit = device_get_unit(sc->aac_dev);
304 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
305 				 0640, "aacraid%d", unit);
306 	sc->aac_dev_t->si_drv1 = sc;
307 
308 	/* Create the AIF thread */
309 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
310 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
311 		panic("Could not create AIF thread");
312 
313 	/* Register the shutdown method to only be called post-dump */
314 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
315 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
316 		device_printf(sc->aac_dev,
317 			      "shutdown event registration failed\n");
318 
319 	/* Find containers */
320 	mtx_lock(&sc->aac_io_lock);
321 	aac_alloc_sync_fib(sc, &fib);
322 	/* loop over possible containers */
323 	do {
324 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
325 			continue;
326 		if (i == 0)
327 			count = mir.MntRespCount;
328 		aac_add_container(sc, &mir, 0, uid);
329 		i++;
330 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
331 	aac_release_sync_fib(sc);
332 	mtx_unlock(&sc->aac_io_lock);
333 
334 	/* Register with CAM for the containers */
335 	TAILQ_INIT(&sc->aac_sim_tqh);
336 	aac_container_bus(sc);
337 	/* Register with CAM for the non-DASD devices */
338 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
339 		aac_get_bus_info(sc);
340 
341 	/* poke the bus to actually attach the child devices */
342 	bus_generic_attach(sc->aac_dev);
343 
344 	/* mark the controller up */
345 	sc->aac_state &= ~AAC_STATE_SUSPEND;
346 
347 	/* enable interrupts now */
348 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
349 
350 #if __FreeBSD_version >= 800000
351 	mtx_lock(&sc->aac_io_lock);
352 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
353 	mtx_unlock(&sc->aac_io_lock);
354 #else
355 	{
356 		struct timeval tv;
357 		tv.tv_sec = 60;
358 		tv.tv_usec = 0;
359 		sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
360 	}
361 #endif
362 
363 	return(0);
364 }
365 
366 static void
367 aac_daemon(void *arg)
368 {
369 	struct aac_softc *sc;
370 	struct timeval tv;
371 	struct aac_command *cm;
372 	struct aac_fib *fib;
373 
374 	sc = arg;
375 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
376 
377 #if __FreeBSD_version >= 800000
378 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
379 	if (callout_pending(&sc->aac_daemontime) ||
380 	    callout_active(&sc->aac_daemontime) == 0)
381 		return;
382 #else
383 	mtx_lock(&sc->aac_io_lock);
384 #endif
385 	getmicrotime(&tv);
386 
387 	if (!aacraid_alloc_command(sc, &cm)) {
388 		fib = cm->cm_fib;
389 		cm->cm_timestamp = time_uptime;
390 		cm->cm_datalen = 0;
391 		cm->cm_flags |= AAC_CMD_WAIT;
392 
393 		fib->Header.Size =
394 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
395 		fib->Header.XferState =
396 			AAC_FIBSTATE_HOSTOWNED   |
397 			AAC_FIBSTATE_INITIALISED |
398 			AAC_FIBSTATE_EMPTY	 |
399 			AAC_FIBSTATE_FROMHOST	 |
400 			AAC_FIBSTATE_REXPECTED   |
401 			AAC_FIBSTATE_NORM	 |
402 			AAC_FIBSTATE_ASYNC	 |
403 			AAC_FIBSTATE_FAST_RESPONSE;
404 		fib->Header.Command = SendHostTime;
405 		*(uint32_t *)fib->data = tv.tv_sec;
406 
407 		aacraid_map_command_sg(cm, NULL, 0, 0);
408 		aacraid_release_command(cm);
409 	}
410 
411 #if __FreeBSD_version >= 800000
412 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
413 #else
414 	mtx_unlock(&sc->aac_io_lock);
415 	tv.tv_sec = 30 * 60;
416 	tv.tv_usec = 0;
417 	sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
418 #endif
419 }
420 
421 void
422 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
423 {
424 
425 	switch (event->ev_type & AAC_EVENT_MASK) {
426 	case AAC_EVENT_CMFREE:
427 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
428 		break;
429 	default:
430 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
431 		    event->ev_type);
432 		break;
433 	}
434 
435 	return;
436 }
437 
438 /*
439  * Request information of container #cid
440  */
441 static int
442 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
443 		       struct aac_mntinforesp *mir, u_int32_t *uid)
444 {
445 	struct aac_command *cm;
446 	struct aac_fib *fib;
447 	struct aac_mntinfo *mi;
448 	struct aac_cnt_config *ccfg;
449 	int rval;
450 
451 	if (sync_fib == NULL) {
452 		if (aacraid_alloc_command(sc, &cm)) {
453 			device_printf(sc->aac_dev,
454 				"Warning, no free command available\n");
455 			return (-1);
456 		}
457 		fib = cm->cm_fib;
458 	} else {
459 		fib = sync_fib;
460 	}
461 
462 	mi = (struct aac_mntinfo *)&fib->data[0];
463 	/* 4KB support?, 64-bit LBA? */
464 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
465 		mi->Command = VM_NameServeAllBlk;
466 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
467 		mi->Command = VM_NameServe64;
468 	else
469 		mi->Command = VM_NameServe;
470 	mi->MntType = FT_FILESYS;
471 	mi->MntCount = cid;
472 
473 	if (sync_fib) {
474 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
475 			 sizeof(struct aac_mntinfo))) {
476 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
477 			return (-1);
478 		}
479 	} else {
480 		cm->cm_timestamp = time_uptime;
481 		cm->cm_datalen = 0;
482 
483 		fib->Header.Size =
484 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
485 		fib->Header.XferState =
486 			AAC_FIBSTATE_HOSTOWNED   |
487 			AAC_FIBSTATE_INITIALISED |
488 			AAC_FIBSTATE_EMPTY	 |
489 			AAC_FIBSTATE_FROMHOST	 |
490 			AAC_FIBSTATE_REXPECTED   |
491 			AAC_FIBSTATE_NORM	 |
492 			AAC_FIBSTATE_ASYNC	 |
493 			AAC_FIBSTATE_FAST_RESPONSE;
494 		fib->Header.Command = ContainerCommand;
495 		if (aacraid_wait_command(cm) != 0) {
496 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
497 			aacraid_release_command(cm);
498 			return (-1);
499 		}
500 	}
501 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
502 
503 	/* UID */
504 	*uid = cid;
505 	if (mir->MntTable[0].VolType != CT_NONE &&
506 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
507 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
508 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
509 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
510 		}
511 		ccfg = (struct aac_cnt_config *)&fib->data[0];
512 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
513 		ccfg->Command = VM_ContainerConfig;
514 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
515 		ccfg->CTCommand.param[0] = cid;
516 
517 		if (sync_fib) {
518 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
519 				sizeof(struct aac_cnt_config));
520 			if (rval == 0 && ccfg->Command == ST_OK &&
521 				ccfg->CTCommand.param[0] == CT_OK &&
522 				mir->MntTable[0].VolType != CT_PASSTHRU)
523 				*uid = ccfg->CTCommand.param[1];
524 		} else {
525 			fib->Header.Size =
526 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
527 			fib->Header.XferState =
528 				AAC_FIBSTATE_HOSTOWNED   |
529 				AAC_FIBSTATE_INITIALISED |
530 				AAC_FIBSTATE_EMPTY	 |
531 				AAC_FIBSTATE_FROMHOST	 |
532 				AAC_FIBSTATE_REXPECTED   |
533 				AAC_FIBSTATE_NORM	 |
534 				AAC_FIBSTATE_ASYNC	 |
535 				AAC_FIBSTATE_FAST_RESPONSE;
536 			fib->Header.Command = ContainerCommand;
537 			rval = aacraid_wait_command(cm);
538 			if (rval == 0 && ccfg->Command == ST_OK &&
539 				ccfg->CTCommand.param[0] == CT_OK &&
540 				mir->MntTable[0].VolType != CT_PASSTHRU)
541 				*uid = ccfg->CTCommand.param[1];
542 			aacraid_release_command(cm);
543 		}
544 	}
545 
546 	return (0);
547 }
548 
549 /*
550  * Create a device to represent a new container
551  */
552 static void
553 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
554 		  u_int32_t uid)
555 {
556 	struct aac_container *co;
557 
558 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
559 
560 	/*
561 	 * Check container volume type for validity.  Note that many of
562 	 * the possible types may never show up.
563 	 */
564 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
565 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
566 		       M_NOWAIT | M_ZERO);
567 		if (co == NULL) {
568 			panic("Out of memory?!");
569 		}
570 
571 		co->co_found = f;
572 		bcopy(&mir->MntTable[0], &co->co_mntobj,
573 		      sizeof(struct aac_mntobj));
574 		co->co_uid = uid;
575 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
576 	}
577 }
578 
579 /*
580  * Allocate resources associated with (sc)
581  */
582 static int
583 aac_alloc(struct aac_softc *sc)
584 {
585 	bus_size_t maxsize;
586 
587 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
588 
589 	/*
590 	 * Create DMA tag for mapping buffers into controller-addressable space.
591 	 */
592 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
593 			       1, 0, 			/* algnmnt, boundary */
594 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
595 			       BUS_SPACE_MAXADDR :
596 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
597 			       BUS_SPACE_MAXADDR, 	/* highaddr */
598 			       NULL, NULL, 		/* filter, filterarg */
599 			       sc->aac_max_sectors << 9, /* maxsize */
600 			       sc->aac_sg_tablesize,	/* nsegments */
601 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
602 			       BUS_DMA_ALLOCNOW,	/* flags */
603 			       busdma_lock_mutex,	/* lockfunc */
604 			       &sc->aac_io_lock,	/* lockfuncarg */
605 			       &sc->aac_buffer_dmat)) {
606 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
607 		return (ENOMEM);
608 	}
609 
610 	/*
611 	 * Create DMA tag for mapping FIBs into controller-addressable space..
612 	 */
613 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
614 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
615 			sizeof(struct aac_fib_xporthdr) + 31);
616 	else
617 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
618 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
619 			       1, 0, 			/* algnmnt, boundary */
620 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
621 			       BUS_SPACE_MAXADDR_32BIT :
622 			       0x7fffffff,		/* lowaddr */
623 			       BUS_SPACE_MAXADDR, 	/* highaddr */
624 			       NULL, NULL, 		/* filter, filterarg */
625 			       maxsize,  		/* maxsize */
626 			       1,			/* nsegments */
627 			       maxsize,			/* maxsize */
628 			       0,			/* flags */
629 			       NULL, NULL,		/* No locking needed */
630 			       &sc->aac_fib_dmat)) {
631 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
632 		return (ENOMEM);
633 	}
634 
635 	/*
636 	 * Create DMA tag for the common structure and allocate it.
637 	 */
638 	maxsize = sizeof(struct aac_common);
639 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
640 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
641 			       1, 0,			/* algnmnt, boundary */
642 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
643 			       BUS_SPACE_MAXADDR_32BIT :
644 			       0x7fffffff,		/* lowaddr */
645 			       BUS_SPACE_MAXADDR, 	/* highaddr */
646 			       NULL, NULL, 		/* filter, filterarg */
647 			       maxsize, 		/* maxsize */
648 			       1,			/* nsegments */
649 			       maxsize,			/* maxsegsize */
650 			       0,			/* flags */
651 			       NULL, NULL,		/* No locking needed */
652 			       &sc->aac_common_dmat)) {
653 		device_printf(sc->aac_dev,
654 			      "can't allocate common structure DMA tag\n");
655 		return (ENOMEM);
656 	}
657 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
658 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
659 		device_printf(sc->aac_dev, "can't allocate common structure\n");
660 		return (ENOMEM);
661 	}
662 
663 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
664 			sc->aac_common, maxsize,
665 			aac_common_map, sc, 0);
666 	bzero(sc->aac_common, maxsize);
667 
668 	/* Allocate some FIBs and associated command structs */
669 	TAILQ_INIT(&sc->aac_fibmap_tqh);
670 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
671 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
672 	mtx_lock(&sc->aac_io_lock);
673 	while (sc->total_fibs < sc->aac_max_fibs) {
674 		if (aac_alloc_commands(sc) != 0)
675 			break;
676 	}
677 	mtx_unlock(&sc->aac_io_lock);
678 	if (sc->total_fibs == 0)
679 		return (ENOMEM);
680 
681 	return (0);
682 }
683 
684 /*
685  * Free all of the resources associated with (sc)
686  *
687  * Should not be called if the controller is active.
688  */
689 void
690 aacraid_free(struct aac_softc *sc)
691 {
692 	int i;
693 
694 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
695 
696 	/* remove the control device */
697 	if (sc->aac_dev_t != NULL)
698 		destroy_dev(sc->aac_dev_t);
699 
700 	/* throw away any FIB buffers, discard the FIB DMA tag */
701 	aac_free_commands(sc);
702 	if (sc->aac_fib_dmat)
703 		bus_dma_tag_destroy(sc->aac_fib_dmat);
704 
705 	free(sc->aac_commands, M_AACRAIDBUF);
706 
707 	/* destroy the common area */
708 	if (sc->aac_common) {
709 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
710 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
711 				sc->aac_common_dmamap);
712 	}
713 	if (sc->aac_common_dmat)
714 		bus_dma_tag_destroy(sc->aac_common_dmat);
715 
716 	/* disconnect the interrupt handler */
717 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
718 		if (sc->aac_intr[i])
719 			bus_teardown_intr(sc->aac_dev,
720 				sc->aac_irq[i], sc->aac_intr[i]);
721 		if (sc->aac_irq[i])
722 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
723 				sc->aac_irq_rid[i], sc->aac_irq[i]);
724 		else
725 			break;
726 	}
727 	if (sc->msi_enabled)
728 		pci_release_msi(sc->aac_dev);
729 
730 	/* destroy data-transfer DMA tag */
731 	if (sc->aac_buffer_dmat)
732 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
733 
734 	/* destroy the parent DMA tag */
735 	if (sc->aac_parent_dmat)
736 		bus_dma_tag_destroy(sc->aac_parent_dmat);
737 
738 	/* release the register window mapping */
739 	if (sc->aac_regs_res0 != NULL)
740 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
741 				     sc->aac_regs_rid0, sc->aac_regs_res0);
742 	if (sc->aac_regs_res1 != NULL)
743 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
744 				     sc->aac_regs_rid1, sc->aac_regs_res1);
745 }
746 
747 /*
748  * Disconnect from the controller completely, in preparation for unload.
749  */
750 int
751 aacraid_detach(device_t dev)
752 {
753 	struct aac_softc *sc;
754 	struct aac_container *co;
755 	struct aac_sim	*sim;
756 	int error;
757 
758 	sc = device_get_softc(dev);
759 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
760 
761 #if __FreeBSD_version >= 800000
762 	callout_drain(&sc->aac_daemontime);
763 #else
764 	untimeout(aac_daemon, (void *)sc, sc->timeout_id);
765 #endif
766 	/* Remove the child containers */
767 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
768 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
769 		free(co, M_AACRAIDBUF);
770 	}
771 
772 	/* Remove the CAM SIMs */
773 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
774 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
775 		error = device_delete_child(dev, sim->sim_dev);
776 		if (error)
777 			return (error);
778 		free(sim, M_AACRAIDBUF);
779 	}
780 
781 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
782 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
783 		wakeup(sc->aifthread);
784 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
785 	}
786 
787 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
788 		panic("Cannot shutdown AIF thread");
789 
790 	if ((error = aacraid_shutdown(dev)))
791 		return(error);
792 
793 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
794 
795 	aacraid_free(sc);
796 
797 	mtx_destroy(&sc->aac_io_lock);
798 
799 	return(0);
800 }
801 
802 /*
803  * Bring the controller down to a dormant state and detach all child devices.
804  *
805  * This function is called before detach or system shutdown.
806  *
807  * Note that we can assume that the bioq on the controller is empty, as we won't
808  * allow shutdown if any device is open.
809  */
810 int
811 aacraid_shutdown(device_t dev)
812 {
813 	struct aac_softc *sc;
814 	struct aac_fib *fib;
815 	struct aac_close_command *cc;
816 
817 	sc = device_get_softc(dev);
818 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
819 
820 	sc->aac_state |= AAC_STATE_SUSPEND;
821 
822 	/*
823 	 * Send a Container shutdown followed by a HostShutdown FIB to the
824 	 * controller to convince it that we don't want to talk to it anymore.
825 	 * We've been closed and all I/O completed already
826 	 */
827 	device_printf(sc->aac_dev, "shutting down controller...");
828 
829 	mtx_lock(&sc->aac_io_lock);
830 	aac_alloc_sync_fib(sc, &fib);
831 	cc = (struct aac_close_command *)&fib->data[0];
832 
833 	bzero(cc, sizeof(struct aac_close_command));
834 	cc->Command = VM_CloseAll;
835 	cc->ContainerId = 0xfffffffe;
836 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
837 	    sizeof(struct aac_close_command)))
838 		printf("FAILED.\n");
839 	else
840 		printf("done\n");
841 
842 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
843 	aac_release_sync_fib(sc);
844 	mtx_unlock(&sc->aac_io_lock);
845 
846 	return(0);
847 }
848 
849 /*
850  * Bring the controller to a quiescent state, ready for system suspend.
851  */
852 int
853 aacraid_suspend(device_t dev)
854 {
855 	struct aac_softc *sc;
856 
857 	sc = device_get_softc(dev);
858 
859 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
860 	sc->aac_state |= AAC_STATE_SUSPEND;
861 
862 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
863 	return(0);
864 }
865 
866 /*
867  * Bring the controller back to a state ready for operation.
868  */
869 int
870 aacraid_resume(device_t dev)
871 {
872 	struct aac_softc *sc;
873 
874 	sc = device_get_softc(dev);
875 
876 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
877 	sc->aac_state &= ~AAC_STATE_SUSPEND;
878 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
879 	return(0);
880 }
881 
882 /*
883  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
884  */
885 void
886 aacraid_new_intr_type1(void *arg)
887 {
888 	struct aac_msix_ctx *ctx;
889 	struct aac_softc *sc;
890 	int vector_no;
891 	struct aac_command *cm;
892 	struct aac_fib *fib;
893 	u_int32_t bellbits, bellbits_shifted, index, handle;
894 	int isFastResponse, isAif, noMoreAif, mode;
895 
896 	ctx = (struct aac_msix_ctx *)arg;
897 	sc = ctx->sc;
898 	vector_no = ctx->vector_no;
899 
900 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
901 	mtx_lock(&sc->aac_io_lock);
902 
903 	if (sc->msi_enabled) {
904 		mode = AAC_INT_MODE_MSI;
905 		if (vector_no == 0) {
906 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
907 			if (bellbits & 0x40000)
908 				mode |= AAC_INT_MODE_AIF;
909 			else if (bellbits & 0x1000)
910 				mode |= AAC_INT_MODE_SYNC;
911 		}
912 	} else {
913 		mode = AAC_INT_MODE_INTX;
914 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
915 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
916 			bellbits = AAC_DB_RESPONSE_SENT_NS;
917 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
918 		} else {
919 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
920 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
921 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
922 				mode |= AAC_INT_MODE_AIF;
923 			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
924 				mode |= AAC_INT_MODE_SYNC;
925 		}
926 		/* ODR readback, Prep #238630 */
927 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
928 	}
929 
930 	if (mode & AAC_INT_MODE_SYNC) {
931 		if (sc->aac_sync_cm) {
932 			cm = sc->aac_sync_cm;
933 			cm->cm_flags |= AAC_CMD_COMPLETED;
934 			/* is there a completion handler? */
935 			if (cm->cm_complete != NULL) {
936 				cm->cm_complete(cm);
937 			} else {
938 				/* assume that someone is sleeping on this command */
939 				wakeup(cm);
940 			}
941 			sc->flags &= ~AAC_QUEUE_FRZN;
942 			sc->aac_sync_cm = NULL;
943 		}
944 		mode = 0;
945 	}
946 
947 	if (mode & AAC_INT_MODE_AIF) {
948 		if (mode & AAC_INT_MODE_INTX) {
949 			aac_request_aif(sc);
950 			mode = 0;
951 		}
952 	}
953 
954 	if (mode) {
955 		/* handle async. status */
956 		index = sc->aac_host_rrq_idx[vector_no];
957 		for (;;) {
958 			isFastResponse = isAif = noMoreAif = 0;
959 			/* remove toggle bit (31) */
960 			handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
961 			/* check fast response bit (30) */
962 			if (handle & 0x40000000)
963 				isFastResponse = 1;
964 			/* check AIF bit (23) */
965 			else if (handle & 0x00800000)
966 				isAif = TRUE;
967 			handle &= 0x0000ffff;
968 			if (handle == 0)
969 				break;
970 
971 			cm = sc->aac_commands + (handle - 1);
972 			fib = cm->cm_fib;
973 			sc->aac_rrq_outstanding[vector_no]--;
974 			if (isAif) {
975 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
976 				if (!noMoreAif)
977 					aac_handle_aif(sc, fib);
978 				aac_remove_busy(cm);
979 				aacraid_release_command(cm);
980 			} else {
981 				if (isFastResponse) {
982 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
983 					*((u_int32_t *)(fib->data)) = ST_OK;
984 					cm->cm_flags |= AAC_CMD_FASTRESP;
985 				}
986 				aac_remove_busy(cm);
987 				aac_unmap_command(cm);
988 				cm->cm_flags |= AAC_CMD_COMPLETED;
989 
990 				/* is there a completion handler? */
991 				if (cm->cm_complete != NULL) {
992 					cm->cm_complete(cm);
993 				} else {
994 					/* assume that someone is sleeping on this command */
995 					wakeup(cm);
996 				}
997 				sc->flags &= ~AAC_QUEUE_FRZN;
998 			}
999 
1000 			sc->aac_common->ac_host_rrq[index++] = 0;
1001 			if (index == (vector_no + 1) * sc->aac_vector_cap)
1002 				index = vector_no * sc->aac_vector_cap;
1003 			sc->aac_host_rrq_idx[vector_no] = index;
1004 
1005 			if ((isAif && !noMoreAif) || sc->aif_pending)
1006 				aac_request_aif(sc);
1007 		}
1008 	}
1009 
1010 	if (mode & AAC_INT_MODE_AIF) {
1011 		aac_request_aif(sc);
1012 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1013 		mode = 0;
1014 	}
1015 
1016 	/* see if we can start some more I/O */
1017 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1018 		aacraid_startio(sc);
1019 	mtx_unlock(&sc->aac_io_lock);
1020 }
1021 
1022 /*
1023  * Handle notification of one or more FIBs coming from the controller.
1024  */
1025 static void
1026 aac_command_thread(struct aac_softc *sc)
1027 {
1028 	int retval;
1029 
1030 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1031 
1032 	mtx_lock(&sc->aac_io_lock);
1033 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1034 
1035 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1036 
1037 		retval = 0;
1038 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1039 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1040 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1041 
1042 		/*
1043 		 * First see if any FIBs need to be allocated.  This needs
1044 		 * to be called without the driver lock because contigmalloc
1045 		 * will grab Giant, and would result in an LOR.
1046 		 */
1047 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1048 			aac_alloc_commands(sc);
1049 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1050 			aacraid_startio(sc);
1051 		}
1052 
1053 		/*
1054 		 * While we're here, check to see if any commands are stuck.
1055 		 * This is pretty low-priority, so it's ok if it doesn't
1056 		 * always fire.
1057 		 */
1058 		if (retval == EWOULDBLOCK)
1059 			aac_timeout(sc);
1060 
1061 		/* Check the hardware printf message buffer */
1062 		if (sc->aac_common->ac_printf[0] != 0)
1063 			aac_print_printf(sc);
1064 	}
1065 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1066 	mtx_unlock(&sc->aac_io_lock);
1067 	wakeup(sc->aac_dev);
1068 
1069 	aac_kthread_exit(0);
1070 }
1071 
1072 /*
1073  * Submit a command to the controller, return when it completes.
1074  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1075  *     be stuck here forever.  At the same time, signals are not caught
1076  *     because there is a risk that a signal could wakeup the sleep before
1077  *     the card has a chance to complete the command.  Since there is no way
1078  *     to cancel a command that is in progress, we can't protect against the
1079  *     card completing a command late and spamming the command and data
1080  *     memory.  So, we are held hostage until the command completes.
1081  */
1082 int
1083 aacraid_wait_command(struct aac_command *cm)
1084 {
1085 	struct aac_softc *sc;
1086 	int error;
1087 
1088 	sc = cm->cm_sc;
1089 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1090 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1091 
1092 	/* Put the command on the ready queue and get things going */
1093 	aac_enqueue_ready(cm);
1094 	aacraid_startio(sc);
1095 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1096 	return(error);
1097 }
1098 
1099 /*
1100  *Command Buffer Management
1101  */
1102 
1103 /*
1104  * Allocate a command.
1105  */
1106 int
1107 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1108 {
1109 	struct aac_command *cm;
1110 
1111 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1112 
1113 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1114 		if (sc->total_fibs < sc->aac_max_fibs) {
1115 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1116 			wakeup(sc->aifthread);
1117 		}
1118 		return (EBUSY);
1119 	}
1120 
1121 	*cmp = cm;
1122 	return(0);
1123 }
1124 
1125 /*
1126  * Release a command back to the freelist.
1127  */
1128 void
1129 aacraid_release_command(struct aac_command *cm)
1130 {
1131 	struct aac_event *event;
1132 	struct aac_softc *sc;
1133 
1134 	sc = cm->cm_sc;
1135 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1136 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1137 
1138 	/* (re)initialize the command/FIB */
1139 	cm->cm_sgtable = NULL;
1140 	cm->cm_flags = 0;
1141 	cm->cm_complete = NULL;
1142 	cm->cm_ccb = NULL;
1143 	cm->cm_passthr_dmat = 0;
1144 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1145 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1146 	cm->cm_fib->Header.Unused = 0;
1147 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1148 
1149 	/*
1150 	 * These are duplicated in aac_start to cover the case where an
1151 	 * intermediate stage may have destroyed them.  They're left
1152 	 * initialized here for debugging purposes only.
1153 	 */
1154 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1155 	cm->cm_fib->Header.Handle = 0;
1156 
1157 	aac_enqueue_free(cm);
1158 
1159 	/*
1160 	 * Dequeue all events so that there's no risk of events getting
1161 	 * stranded.
1162 	 */
1163 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1164 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1165 		event->ev_callback(sc, event, event->ev_arg);
1166 	}
1167 }
1168 
1169 /*
1170  * Map helper for command/FIB allocation.
1171  */
1172 static void
1173 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1174 {
1175 	uint64_t	*fibphys;
1176 
1177 	fibphys = (uint64_t *)arg;
1178 
1179 	*fibphys = segs[0].ds_addr;
1180 }
1181 
1182 /*
1183  * Allocate and initialize commands/FIBs for this adapter.
1184  */
1185 static int
1186 aac_alloc_commands(struct aac_softc *sc)
1187 {
1188 	struct aac_command *cm;
1189 	struct aac_fibmap *fm;
1190 	uint64_t fibphys;
1191 	int i, error;
1192 	u_int32_t maxsize;
1193 
1194 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1195 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1196 
1197 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1198 		return (ENOMEM);
1199 
1200 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1201 	if (fm == NULL)
1202 		return (ENOMEM);
1203 
1204 	mtx_unlock(&sc->aac_io_lock);
1205 	/* allocate the FIBs in DMAable memory and load them */
1206 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1207 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1208 		device_printf(sc->aac_dev,
1209 			      "Not enough contiguous memory available.\n");
1210 		free(fm, M_AACRAIDBUF);
1211 		mtx_lock(&sc->aac_io_lock);
1212 		return (ENOMEM);
1213 	}
1214 
1215 	maxsize = sc->aac_max_fib_size + 31;
1216 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1217 		maxsize += sizeof(struct aac_fib_xporthdr);
1218 	/* Ignore errors since this doesn't bounce */
1219 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1220 			      sc->aac_max_fibs_alloc * maxsize,
1221 			      aac_map_command_helper, &fibphys, 0);
1222 	mtx_lock(&sc->aac_io_lock);
1223 
1224 	/* initialize constant fields in the command structure */
1225 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1226 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1227 		cm = sc->aac_commands + sc->total_fibs;
1228 		fm->aac_commands = cm;
1229 		cm->cm_sc = sc;
1230 		cm->cm_fib = (struct aac_fib *)
1231 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1232 		cm->cm_fibphys = fibphys + i * maxsize;
1233 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1234 			u_int64_t fibphys_aligned;
1235 			fibphys_aligned =
1236 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1237 			cm->cm_fib = (struct aac_fib *)
1238 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1239 			cm->cm_fibphys = fibphys_aligned;
1240 		} else {
1241 			u_int64_t fibphys_aligned;
1242 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1243 			cm->cm_fib = (struct aac_fib *)
1244 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1245 			cm->cm_fibphys = fibphys_aligned;
1246 		}
1247 		cm->cm_index = sc->total_fibs;
1248 
1249 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1250 					       &cm->cm_datamap)) != 0)
1251 			break;
1252 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1253 			aacraid_release_command(cm);
1254 		sc->total_fibs++;
1255 	}
1256 
1257 	if (i > 0) {
1258 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1259 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1260 		return (0);
1261 	}
1262 
1263 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1264 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1265 	free(fm, M_AACRAIDBUF);
1266 	return (ENOMEM);
1267 }
1268 
1269 /*
1270  * Free FIBs owned by this adapter.
1271  */
1272 static void
1273 aac_free_commands(struct aac_softc *sc)
1274 {
1275 	struct aac_fibmap *fm;
1276 	struct aac_command *cm;
1277 	int i;
1278 
1279 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1280 
1281 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1282 
1283 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1284 		/*
1285 		 * We check against total_fibs to handle partially
1286 		 * allocated blocks.
1287 		 */
1288 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1289 			cm = fm->aac_commands + i;
1290 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1291 		}
1292 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1293 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1294 		free(fm, M_AACRAIDBUF);
1295 	}
1296 }
1297 
1298 /*
1299  * Command-mapping helper function - populate this command's s/g table.
1300  */
1301 void
1302 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1303 {
1304 	struct aac_softc *sc;
1305 	struct aac_command *cm;
1306 	struct aac_fib *fib;
1307 	int i;
1308 
1309 	cm = (struct aac_command *)arg;
1310 	sc = cm->cm_sc;
1311 	fib = cm->cm_fib;
1312 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1313 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1314 
1315 	/* copy into the FIB */
1316 	if (cm->cm_sgtable != NULL) {
1317 		if (fib->Header.Command == RawIo2) {
1318 			struct aac_raw_io2 *raw;
1319 			struct aac_sge_ieee1212 *sg;
1320 			u_int32_t min_size = PAGE_SIZE, cur_size;
1321 			int conformable = TRUE;
1322 
1323 			raw = (struct aac_raw_io2 *)&fib->data[0];
1324 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1325 			raw->sgeCnt = nseg;
1326 
1327 			for (i = 0; i < nseg; i++) {
1328 				cur_size = segs[i].ds_len;
1329 				sg[i].addrHigh = 0;
1330 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1331 				sg[i].length = cur_size;
1332 				sg[i].flags = 0;
1333 				if (i == 0) {
1334 					raw->sgeFirstSize = cur_size;
1335 				} else if (i == 1) {
1336 					raw->sgeNominalSize = cur_size;
1337 					min_size = cur_size;
1338 				} else if ((i+1) < nseg &&
1339 					cur_size != raw->sgeNominalSize) {
1340 					conformable = FALSE;
1341 					if (cur_size < min_size)
1342 						min_size = cur_size;
1343 				}
1344 			}
1345 
1346 			/* not conformable: evaluate required sg elements */
1347 			if (!conformable) {
1348 				int j, err_found, nseg_new = nseg;
1349 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1350 					err_found = FALSE;
1351 					nseg_new = 2;
1352 					for (j = 1; j < nseg - 1; ++j) {
1353 						if (sg[j].length % (i*PAGE_SIZE)) {
1354 							err_found = TRUE;
1355 							break;
1356 						}
1357 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1358 					}
1359 					if (!err_found)
1360 						break;
1361 				}
1362 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1363 					!(sc->hint_flags & 4))
1364 					nseg = aac_convert_sgraw2(sc,
1365 						raw, i, nseg, nseg_new);
1366 			} else {
1367 				raw->flags |= RIO2_SGL_CONFORMANT;
1368 			}
1369 
1370 			/* update the FIB size for the s/g count */
1371 			fib->Header.Size += nseg *
1372 				sizeof(struct aac_sge_ieee1212);
1373 
1374 		} else if (fib->Header.Command == RawIo) {
1375 			struct aac_sg_tableraw *sg;
1376 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1377 			sg->SgCount = nseg;
1378 			for (i = 0; i < nseg; i++) {
1379 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1380 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1381 				sg->SgEntryRaw[i].Next = 0;
1382 				sg->SgEntryRaw[i].Prev = 0;
1383 				sg->SgEntryRaw[i].Flags = 0;
1384 			}
1385 			/* update the FIB size for the s/g count */
1386 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1387 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1388 			struct aac_sg_table *sg;
1389 			sg = cm->cm_sgtable;
1390 			sg->SgCount = nseg;
1391 			for (i = 0; i < nseg; i++) {
1392 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1393 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1394 			}
1395 			/* update the FIB size for the s/g count */
1396 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1397 		} else {
1398 			struct aac_sg_table64 *sg;
1399 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1400 			sg->SgCount = nseg;
1401 			for (i = 0; i < nseg; i++) {
1402 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1403 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1404 			}
1405 			/* update the FIB size for the s/g count */
1406 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1407 		}
1408 	}
1409 
1410 	/* Fix up the address values in the FIB.  Use the command array index
1411 	 * instead of a pointer since these fields are only 32 bits.  Shift
1412 	 * the SenderFibAddress over to make room for the fast response bit
1413 	 * and for the AIF bit
1414 	 */
1415 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1416 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1417 
1418 	/* save a pointer to the command for speedy reverse-lookup */
1419 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1420 
1421 	if (cm->cm_passthr_dmat == 0) {
1422 		if (cm->cm_flags & AAC_CMD_DATAIN)
1423 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1424 							BUS_DMASYNC_PREREAD);
1425 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1426 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1427 							BUS_DMASYNC_PREWRITE);
1428 	}
1429 
1430 	cm->cm_flags |= AAC_CMD_MAPPED;
1431 
1432 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1433 		u_int32_t wait = 0;
1434 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1435 	} else if (cm->cm_flags & AAC_CMD_WAIT) {
1436 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1437 	} else {
1438 		int count = 10000000L;
1439 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1440 			if (--count == 0) {
1441 				aac_unmap_command(cm);
1442 				sc->flags |= AAC_QUEUE_FRZN;
1443 				aac_requeue_ready(cm);
1444 			}
1445 			DELAY(5);			/* wait 5 usec. */
1446 		}
1447 	}
1448 }
1449 
1450 
1451 static int
1452 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1453 				   int pages, int nseg, int nseg_new)
1454 {
1455 	struct aac_sge_ieee1212 *sge;
1456 	int i, j, pos;
1457 	u_int32_t addr_low;
1458 
1459 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1460 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1461 	if (sge == NULL)
1462 		return nseg;
1463 
1464 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1465 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1466 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1467 			sge[pos].addrLow = addr_low;
1468 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1469 			if (addr_low < raw->sge[i].addrLow)
1470 				sge[pos].addrHigh++;
1471 			sge[pos].length = pages * PAGE_SIZE;
1472 			sge[pos].flags = 0;
1473 			pos++;
1474 		}
1475 	}
1476 	sge[pos] = raw->sge[nseg-1];
1477 	for (i = 1; i < nseg_new; ++i)
1478 		raw->sge[i] = sge[i];
1479 
1480 	free(sge, M_AACRAIDBUF);
1481 	raw->sgeCnt = nseg_new;
1482 	raw->flags |= RIO2_SGL_CONFORMANT;
1483 	raw->sgeNominalSize = pages * PAGE_SIZE;
1484 	return nseg_new;
1485 }
1486 
1487 
1488 /*
1489  * Unmap a command from controller-visible space.
1490  */
1491 static void
1492 aac_unmap_command(struct aac_command *cm)
1493 {
1494 	struct aac_softc *sc;
1495 
1496 	sc = cm->cm_sc;
1497 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1498 
1499 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1500 		return;
1501 
1502 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1503 		if (cm->cm_flags & AAC_CMD_DATAIN)
1504 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1505 					BUS_DMASYNC_POSTREAD);
1506 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1507 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1508 					BUS_DMASYNC_POSTWRITE);
1509 
1510 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1511 	}
1512 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1513 }
1514 
1515 /*
1516  * Hardware Interface
1517  */
1518 
1519 /*
1520  * Initialize the adapter.
1521  */
1522 static void
1523 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1524 {
1525 	struct aac_softc *sc;
1526 
1527 	sc = (struct aac_softc *)arg;
1528 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1529 
1530 	sc->aac_common_busaddr = segs[0].ds_addr;
1531 }
1532 
1533 static int
1534 aac_check_firmware(struct aac_softc *sc)
1535 {
1536 	u_int32_t code, major, minor, maxsize;
1537 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1538 	time_t then;
1539 
1540 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1541 
1542 	/* check if flash update is running */
1543 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1544 		then = time_uptime;
1545 		do {
1546 			code = AAC_GET_FWSTATUS(sc);
1547 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1548 				device_printf(sc->aac_dev,
1549 						  "FATAL: controller not coming ready, "
1550 						   "status %x\n", code);
1551 				return(ENXIO);
1552 			}
1553 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1554 		/*
1555 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1556 		 * do not read scratch pad register at this time
1557 		 */
1558 		waitCount = 10 * 10000;
1559 		while (waitCount) {
1560 			DELAY(100);		/* delay 100 microseconds */
1561 			waitCount--;
1562 		}
1563 	}
1564 
1565 	/*
1566 	 * Wait for the adapter to come ready.
1567 	 */
1568 	then = time_uptime;
1569 	do {
1570 		code = AAC_GET_FWSTATUS(sc);
1571 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1572 			device_printf(sc->aac_dev,
1573 				      "FATAL: controller not coming ready, "
1574 					   "status %x\n", code);
1575 			return(ENXIO);
1576 		}
1577 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1578 
1579 	/*
1580 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1581 	 * firmware version 1.x are not compatible with this driver.
1582 	 */
1583 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1584 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1585 				     NULL, NULL)) {
1586 			device_printf(sc->aac_dev,
1587 				      "Error reading firmware version\n");
1588 			return (EIO);
1589 		}
1590 
1591 		/* These numbers are stored as ASCII! */
1592 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1593 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1594 		if (major == 1) {
1595 			device_printf(sc->aac_dev,
1596 			    "Firmware version %d.%d is not supported.\n",
1597 			    major, minor);
1598 			return (EINVAL);
1599 		}
1600 	}
1601 	/*
1602 	 * Retrieve the capabilities/supported options word so we know what
1603 	 * work-arounds to enable.  Some firmware revs don't support this
1604 	 * command.
1605 	 */
1606 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1607 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1608 			device_printf(sc->aac_dev,
1609 			     "RequestAdapterInfo failed\n");
1610 			return (EIO);
1611 		}
1612 	} else {
1613 		options = AAC_GET_MAILBOX(sc, 1);
1614 		atu_size = AAC_GET_MAILBOX(sc, 2);
1615 		sc->supported_options = options;
1616 
1617 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1618 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1619 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1620 		if (options & AAC_SUPPORTED_NONDASD)
1621 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1622 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1623 			&& (sizeof(bus_addr_t) > 4)
1624 			&& (sc->hint_flags & 0x1)) {
1625 			device_printf(sc->aac_dev,
1626 			    "Enabling 64-bit address support\n");
1627 			sc->flags |= AAC_FLAGS_SG_64BIT;
1628 		}
1629 		if (sc->aac_if.aif_send_command) {
1630 			if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1631 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1632 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1633 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1634 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1635 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1636 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1637 		}
1638 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1639 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1640 	}
1641 
1642 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1643 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1644 		return (ENXIO);
1645 	}
1646 
1647 	if (sc->hint_flags & 2) {
1648 		device_printf(sc->aac_dev,
1649 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1650 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1651 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1652 		device_printf(sc->aac_dev,
1653 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1654 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1655 	}
1656 
1657 	/* Check for broken hardware that does a lower number of commands */
1658 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1659 
1660 	/* Remap mem. resource, if required */
1661 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1662 		bus_release_resource(
1663 			sc->aac_dev, SYS_RES_MEMORY,
1664 			sc->aac_regs_rid0, sc->aac_regs_res0);
1665 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1666 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1667 			atu_size, RF_ACTIVE);
1668 		if (sc->aac_regs_res0 == NULL) {
1669 			sc->aac_regs_res0 = bus_alloc_resource_any(
1670 				sc->aac_dev, SYS_RES_MEMORY,
1671 				&sc->aac_regs_rid0, RF_ACTIVE);
1672 			if (sc->aac_regs_res0 == NULL) {
1673 				device_printf(sc->aac_dev,
1674 					"couldn't allocate register window\n");
1675 				return (ENXIO);
1676 			}
1677 		}
1678 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1679 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1680 	}
1681 
1682 	/* Read preferred settings */
1683 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1684 	sc->aac_max_sectors = 128;				/* 64KB */
1685 	sc->aac_max_aif = 1;
1686 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1687 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1688 		 - sizeof(struct aac_blockwrite64))
1689 		 / sizeof(struct aac_sg_entry64);
1690 	else
1691 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1692 		 - sizeof(struct aac_blockwrite))
1693 		 / sizeof(struct aac_sg_entry);
1694 
1695 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1696 		options = AAC_GET_MAILBOX(sc, 1);
1697 		sc->aac_max_fib_size = (options & 0xFFFF);
1698 		sc->aac_max_sectors = (options >> 16) << 1;
1699 		options = AAC_GET_MAILBOX(sc, 2);
1700 		sc->aac_sg_tablesize = (options >> 16);
1701 		options = AAC_GET_MAILBOX(sc, 3);
1702 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1703 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1704 			sc->aac_max_fibs = (options & 0xFFFF);
1705 		options = AAC_GET_MAILBOX(sc, 4);
1706 		sc->aac_max_aif = (options & 0xFFFF);
1707 		options = AAC_GET_MAILBOX(sc, 5);
1708 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1709 	}
1710 
1711 	maxsize = sc->aac_max_fib_size + 31;
1712 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1713 		maxsize += sizeof(struct aac_fib_xporthdr);
1714 	if (maxsize > PAGE_SIZE) {
1715     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1716 		maxsize = PAGE_SIZE;
1717 	}
1718 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1719 
1720 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1721 		sc->flags |= AAC_FLAGS_RAW_IO;
1722 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1723 	}
1724 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1725 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1726 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1727 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1728 	}
1729 
1730 #ifdef AACRAID_DEBUG
1731 	aacraid_get_fw_debug_buffer(sc);
1732 #endif
1733 	return (0);
1734 }
1735 
1736 static int
1737 aac_init(struct aac_softc *sc)
1738 {
1739 	struct aac_adapter_init	*ip;
1740 	int i, error;
1741 
1742 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1743 
1744 	/* reset rrq index */
1745 	sc->aac_fibs_pushed_no = 0;
1746 	for (i = 0; i < sc->aac_max_msix; i++)
1747 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1748 
1749 	/*
1750 	 * Fill in the init structure.  This tells the adapter about the
1751 	 * physical location of various important shared data structures.
1752 	 */
1753 	ip = &sc->aac_common->ac_init;
1754 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1755 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1756 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1757 		sc->flags |= AAC_FLAGS_RAW_IO;
1758 	}
1759 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1760 
1761 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1762 					 offsetof(struct aac_common, ac_fibs);
1763 	ip->AdapterFibsVirtualAddress = 0;
1764 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1765 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1766 
1767 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1768 				  offsetof(struct aac_common, ac_printf);
1769 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1770 
1771 	/*
1772 	 * The adapter assumes that pages are 4K in size, except on some
1773  	 * broken firmware versions that do the page->byte conversion twice,
1774 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1775 	 * Round up since the granularity is so high.
1776 	 */
1777 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1778 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1779 		ip->HostPhysMemPages =
1780 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1781 	}
1782 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1783 
1784 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1785 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1786 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1787 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1788 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1789 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1790 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1791 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1792 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1793 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1794 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1795 	}
1796 	ip->MaxNumAif = sc->aac_max_aif;
1797 	ip->HostRRQ_AddrLow =
1798 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1799 	/* always 32-bit address */
1800 	ip->HostRRQ_AddrHigh = 0;
1801 
1802 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1803 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1804 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1805 		device_printf(sc->aac_dev, "Power Management enabled\n");
1806 	}
1807 
1808 	ip->MaxIoCommands = sc->aac_max_fibs;
1809 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1810 	ip->MaxFibSize = sc->aac_max_fib_size;
1811 
1812 	/*
1813 	 * Do controller-type-specific initialisation
1814 	 */
1815 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1816 
1817 	/*
1818 	 * Give the init structure to the controller.
1819 	 */
1820 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1821 			     sc->aac_common_busaddr +
1822 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1823 			     NULL, NULL)) {
1824 		device_printf(sc->aac_dev,
1825 			      "error establishing init structure\n");
1826 		error = EIO;
1827 		goto out;
1828 	}
1829 
1830 	/*
1831 	 * Check configuration issues
1832 	 */
1833 	if ((error = aac_check_config(sc)) != 0)
1834 		goto out;
1835 
1836 	error = 0;
1837 out:
1838 	return(error);
1839 }
1840 
1841 static void
1842 aac_define_int_mode(struct aac_softc *sc)
1843 {
1844 	device_t dev;
1845 	int cap, msi_count, error = 0;
1846 	uint32_t val;
1847 
1848 	dev = sc->aac_dev;
1849 
1850 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1851 	if (sc->aac_max_msix == 0) {
1852 		sc->aac_max_msix = 1;
1853 		sc->aac_vector_cap = sc->aac_max_fibs;
1854 		return;
1855 	}
1856 
1857 	/* OS capability */
1858 	msi_count = pci_msix_count(dev);
1859 	if (msi_count > AAC_MAX_MSIX)
1860 		msi_count = AAC_MAX_MSIX;
1861 	if (msi_count > sc->aac_max_msix)
1862 		msi_count = sc->aac_max_msix;
1863 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1864 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1865 				   "will try MSI\n", msi_count, error);
1866 		pci_release_msi(dev);
1867 	} else {
1868 		sc->msi_enabled = TRUE;
1869 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1870 			msi_count);
1871 	}
1872 
1873 	if (!sc->msi_enabled) {
1874 		msi_count = 1;
1875 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1876 			device_printf(dev, "alloc msi failed - err=%d; "
1877 				           "will use INTx\n", error);
1878 			pci_release_msi(dev);
1879 		} else {
1880 			sc->msi_enabled = TRUE;
1881 			device_printf(dev, "using MSI interrupts\n");
1882 		}
1883 	}
1884 
1885 	if (sc->msi_enabled) {
1886 		/* now read controller capability from PCI config. space */
1887 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1888 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1889 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1890 			pci_release_msi(dev);
1891 			sc->msi_enabled = FALSE;
1892 		}
1893 	}
1894 
1895 	if (!sc->msi_enabled) {
1896 		device_printf(dev, "using legacy interrupts\n");
1897 		sc->aac_max_msix = 1;
1898 	} else {
1899 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1900 		if (sc->aac_max_msix > msi_count)
1901 			sc->aac_max_msix = msi_count;
1902 	}
1903 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1904 
1905 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1906 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1907 }
1908 
1909 static int
1910 aac_find_pci_capability(struct aac_softc *sc, int cap)
1911 {
1912 	device_t dev;
1913 	uint32_t status;
1914 	uint8_t ptr;
1915 
1916 	dev = sc->aac_dev;
1917 
1918 	status = pci_read_config(dev, PCIR_STATUS, 2);
1919 	if (!(status & PCIM_STATUS_CAPPRESENT))
1920 		return (0);
1921 
1922 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1923 	switch (status & PCIM_HDRTYPE) {
1924 	case 0:
1925 	case 1:
1926 		ptr = PCIR_CAP_PTR;
1927 		break;
1928 	case 2:
1929 		ptr = PCIR_CAP_PTR_2;
1930 		break;
1931 	default:
1932 		return (0);
1933 		break;
1934 	}
1935 	ptr = pci_read_config(dev, ptr, 1);
1936 
1937 	while (ptr != 0) {
1938 		int next, val;
1939 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1940 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1941 		if (val == cap)
1942 			return (ptr);
1943 		ptr = next;
1944 	}
1945 
1946 	return (0);
1947 }
1948 
1949 static int
1950 aac_setup_intr(struct aac_softc *sc)
1951 {
1952 	int i, msi_count, rid;
1953 	struct resource *res;
1954 	void *tag;
1955 
1956 	msi_count = sc->aac_max_msix;
1957 	rid = (sc->msi_enabled ? 1:0);
1958 
1959 	for (i = 0; i < msi_count; i++, rid++) {
1960 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1961 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1962 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1963 			return (EINVAL);
1964 		}
1965 		sc->aac_irq_rid[i] = rid;
1966 		sc->aac_irq[i] = res;
1967 		if (aac_bus_setup_intr(sc->aac_dev, res,
1968 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1969 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1970 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1971 			return (EINVAL);
1972 		}
1973 		sc->aac_msix[i].vector_no = i;
1974 		sc->aac_msix[i].sc = sc;
1975 		sc->aac_intr[i] = tag;
1976 	}
1977 
1978 	return (0);
1979 }
1980 
1981 static int
1982 aac_check_config(struct aac_softc *sc)
1983 {
1984 	struct aac_fib *fib;
1985 	struct aac_cnt_config *ccfg;
1986 	struct aac_cf_status_hdr *cf_shdr;
1987 	int rval;
1988 
1989 	mtx_lock(&sc->aac_io_lock);
1990 	aac_alloc_sync_fib(sc, &fib);
1991 
1992 	ccfg = (struct aac_cnt_config *)&fib->data[0];
1993 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1994 	ccfg->Command = VM_ContainerConfig;
1995 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
1996 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
1997 
1998 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
1999 		sizeof (struct aac_cnt_config));
2000 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2001 	if (rval == 0 && ccfg->Command == ST_OK &&
2002 		ccfg->CTCommand.param[0] == CT_OK) {
2003 		if (cf_shdr->action <= CFACT_PAUSE) {
2004 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2005 			ccfg->Command = VM_ContainerConfig;
2006 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2007 
2008 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2009 				sizeof (struct aac_cnt_config));
2010 			if (rval == 0 && ccfg->Command == ST_OK &&
2011 				ccfg->CTCommand.param[0] == CT_OK) {
2012 				/* successful completion */
2013 				rval = 0;
2014 			} else {
2015 				/* auto commit aborted due to error(s) */
2016 				rval = -2;
2017 			}
2018 		} else {
2019 			/* auto commit aborted due to adapter indicating
2020 			   config. issues too dangerous to auto commit  */
2021 			rval = -3;
2022 		}
2023 	} else {
2024 		/* error */
2025 		rval = -1;
2026 	}
2027 
2028 	aac_release_sync_fib(sc);
2029 	mtx_unlock(&sc->aac_io_lock);
2030 	return(rval);
2031 }
2032 
2033 /*
2034  * Send a synchronous command to the controller and wait for a result.
2035  * Indicate if the controller completed the command with an error status.
2036  */
2037 int
2038 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2039 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2040 		 u_int32_t *sp, u_int32_t *r1)
2041 {
2042 	time_t then;
2043 	u_int32_t status;
2044 
2045 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2046 
2047 	/* populate the mailbox */
2048 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2049 
2050 	/* ensure the sync command doorbell flag is cleared */
2051 	if (!sc->msi_enabled)
2052 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2053 
2054 	/* then set it to signal the adapter */
2055 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2056 
2057 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2058 		/* spin waiting for the command to complete */
2059 		then = time_uptime;
2060 		do {
2061 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2062 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2063 				return(EIO);
2064 			}
2065 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2066 
2067 		/* clear the completion flag */
2068 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2069 
2070 		/* get the command status */
2071 		status = AAC_GET_MAILBOX(sc, 0);
2072 		if (sp != NULL)
2073 			*sp = status;
2074 
2075 		/* return parameter */
2076 		if (r1 != NULL)
2077 			*r1 = AAC_GET_MAILBOX(sc, 1);
2078 
2079 		if (status != AAC_SRB_STS_SUCCESS)
2080 			return (-1);
2081 	}
2082 	return(0);
2083 }
2084 
2085 static int
2086 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2087 		 struct aac_fib *fib, u_int16_t datasize)
2088 {
2089 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2090 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2091 
2092 	if (datasize > AAC_FIB_DATASIZE)
2093 		return(EINVAL);
2094 
2095 	/*
2096 	 * Set up the sync FIB
2097 	 */
2098 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2099 				AAC_FIBSTATE_INITIALISED |
2100 				AAC_FIBSTATE_EMPTY;
2101 	fib->Header.XferState |= xferstate;
2102 	fib->Header.Command = command;
2103 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2104 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2105 	fib->Header.SenderSize = sizeof(struct aac_fib);
2106 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2107 	fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2108 		offsetof(struct aac_common, ac_sync_fib);
2109 
2110 	/*
2111 	 * Give the FIB to the controller, wait for a response.
2112 	 */
2113 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2114 		fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2115 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2116 		return(EIO);
2117 	}
2118 
2119 	return (0);
2120 }
2121 
2122 /*
2123  * Check for commands that have been outstanding for a suspiciously long time,
2124  * and complain about them.
2125  */
2126 static void
2127 aac_timeout(struct aac_softc *sc)
2128 {
2129 	struct aac_command *cm;
2130 	time_t deadline;
2131 	int timedout;
2132 
2133 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2134 	/*
2135 	 * Traverse the busy command list, bitch about late commands once
2136 	 * only.
2137 	 */
2138 	timedout = 0;
2139 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2140 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2141 		if (cm->cm_timestamp < deadline) {
2142 			device_printf(sc->aac_dev,
2143 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2144 				      cm, (int)(time_uptime-cm->cm_timestamp));
2145 			AAC_PRINT_FIB(sc, cm->cm_fib);
2146 			timedout++;
2147 		}
2148 	}
2149 
2150 	if (timedout)
2151 		aac_reset_adapter(sc);
2152 	aacraid_print_queues(sc);
2153 }
2154 
2155 /*
2156  * Interface Function Vectors
2157  */
2158 
2159 /*
2160  * Read the current firmware status word.
2161  */
2162 static int
2163 aac_src_get_fwstatus(struct aac_softc *sc)
2164 {
2165 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2166 
2167 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2168 }
2169 
2170 /*
2171  * Notify the controller of a change in a given queue
2172  */
2173 static void
2174 aac_src_qnotify(struct aac_softc *sc, int qbit)
2175 {
2176 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2177 
2178 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2179 }
2180 
2181 /*
2182  * Get the interrupt reason bits
2183  */
2184 static int
2185 aac_src_get_istatus(struct aac_softc *sc)
2186 {
2187 	int val;
2188 
2189 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2190 
2191 	if (sc->msi_enabled) {
2192 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2193 		if (val & AAC_MSI_SYNC_STATUS)
2194 			val = AAC_DB_SYNC_COMMAND;
2195 		else
2196 			val = 0;
2197 	} else {
2198 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2199 	}
2200 	return(val);
2201 }
2202 
2203 /*
2204  * Clear some interrupt reason bits
2205  */
2206 static void
2207 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2208 {
2209 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2210 
2211 	if (sc->msi_enabled) {
2212 		if (mask == AAC_DB_SYNC_COMMAND)
2213 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2214 	} else {
2215 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2216 	}
2217 }
2218 
2219 /*
2220  * Populate the mailbox and set the command word
2221  */
2222 static void
2223 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2224 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2225 {
2226 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2227 
2228 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2229 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2230 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2231 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2232 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2233 }
2234 
2235 static void
2236 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2237 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2238 {
2239 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2240 
2241 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2242 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2243 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2244 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2245 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2246 }
2247 
2248 /*
2249  * Fetch the immediate command status word
2250  */
2251 static int
2252 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2253 {
2254 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2255 
2256 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2257 }
2258 
2259 static int
2260 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2261 {
2262 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2263 
2264 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2265 }
2266 
2267 /*
2268  * Set/clear interrupt masks
2269  */
2270 static void
2271 aac_src_access_devreg(struct aac_softc *sc, int mode)
2272 {
2273 	u_int32_t val;
2274 
2275 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2276 
2277 	switch (mode) {
2278 	case AAC_ENABLE_INTERRUPT:
2279 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2280 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2281 				           AAC_INT_ENABLE_TYPE1_INTX));
2282 		break;
2283 
2284 	case AAC_DISABLE_INTERRUPT:
2285 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2286 		break;
2287 
2288 	case AAC_ENABLE_MSIX:
2289 		/* set bit 6 */
2290 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2291 		val |= 0x40;
2292 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2293 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2294 		/* unmask int. */
2295 		val = PMC_ALL_INTERRUPT_BITS;
2296 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2297 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2298 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2299 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2300 		break;
2301 
2302 	case AAC_DISABLE_MSIX:
2303 		/* reset bit 6 */
2304 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2305 		val &= ~0x40;
2306 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2307 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2308 		break;
2309 
2310 	case AAC_CLEAR_AIF_BIT:
2311 		/* set bit 5 */
2312 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2313 		val |= 0x20;
2314 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2315 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2316 		break;
2317 
2318 	case AAC_CLEAR_SYNC_BIT:
2319 		/* set bit 4 */
2320 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2321 		val |= 0x10;
2322 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2323 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2324 		break;
2325 
2326 	case AAC_ENABLE_INTX:
2327 		/* set bit 7 */
2328 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2329 		val |= 0x80;
2330 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2331 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2332 		/* unmask int. */
2333 		val = PMC_ALL_INTERRUPT_BITS;
2334 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2335 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2336 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2337 			val & (~(PMC_GLOBAL_INT_BIT2)));
2338 		break;
2339 
2340 	default:
2341 		break;
2342 	}
2343 }
2344 
2345 /*
2346  * New comm. interface: Send command functions
2347  */
2348 static int
2349 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2350 {
2351 	struct aac_fib_xporthdr *pFibX;
2352 	u_int32_t fibsize, high_addr;
2353 	u_int64_t address;
2354 
2355 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2356 
2357 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2358 		sc->aac_max_msix > 1) {
2359 		u_int16_t vector_no, first_choice = 0xffff;
2360 
2361 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2362 		do {
2363 			vector_no += 1;
2364 			if (vector_no == sc->aac_max_msix)
2365 				vector_no = 1;
2366 			if (sc->aac_rrq_outstanding[vector_no] <
2367 				sc->aac_vector_cap)
2368 				break;
2369 			if (0xffff == first_choice)
2370 				first_choice = vector_no;
2371 			else if (vector_no == first_choice)
2372 				break;
2373 		} while (1);
2374 		if (vector_no == first_choice)
2375 			vector_no = 0;
2376 		sc->aac_rrq_outstanding[vector_no]++;
2377 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2378 			sc->aac_fibs_pushed_no = 0;
2379 		else
2380 			sc->aac_fibs_pushed_no++;
2381 
2382 		cm->cm_fib->Header.Handle += (vector_no << 16);
2383 	}
2384 
2385 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2386 		/* Calculate the amount to the fibsize bits */
2387 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2388 		/* Fill new FIB header */
2389 		address = cm->cm_fibphys;
2390 		high_addr = (u_int32_t)(address >> 32);
2391 		if (high_addr == 0L) {
2392 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2393 			cm->cm_fib->Header.u.TimeStamp = 0L;
2394 		} else {
2395 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2396 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2397 		}
2398 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2399 	} else {
2400 		/* Calculate the amount to the fibsize bits */
2401 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2402 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2403 		/* Fill XPORT header */
2404 		pFibX = (struct aac_fib_xporthdr *)
2405 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2406 		pFibX->Handle = cm->cm_fib->Header.Handle;
2407 		pFibX->HostAddress = cm->cm_fibphys;
2408 		pFibX->Size = cm->cm_fib->Header.Size;
2409 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2410 		high_addr = (u_int32_t)(address >> 32);
2411 	}
2412 
2413 	if (fibsize > 31)
2414 		fibsize = 31;
2415 	aac_enqueue_busy(cm);
2416 	if (high_addr) {
2417 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2418 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2419 	} else {
2420 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2421 	}
2422 	return 0;
2423 }
2424 
2425 /*
2426  * New comm. interface: get, set outbound queue index
2427  */
2428 static int
2429 aac_src_get_outb_queue(struct aac_softc *sc)
2430 {
2431 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2432 
2433 	return(-1);
2434 }
2435 
2436 static void
2437 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2438 {
2439 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2440 }
2441 
2442 /*
2443  * Debugging and Diagnostics
2444  */
2445 
2446 /*
2447  * Print some information about the controller.
2448  */
2449 static void
2450 aac_describe_controller(struct aac_softc *sc)
2451 {
2452 	struct aac_fib *fib;
2453 	struct aac_adapter_info	*info;
2454 	char *adapter_type = "Adaptec RAID controller";
2455 
2456 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2457 
2458 	mtx_lock(&sc->aac_io_lock);
2459 	aac_alloc_sync_fib(sc, &fib);
2460 
2461 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2462 		fib->data[0] = 0;
2463 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2464 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2465 		else {
2466 			struct aac_supplement_adapter_info *supp_info;
2467 
2468 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2469 			adapter_type = (char *)supp_info->AdapterTypeText;
2470 			sc->aac_feature_bits = supp_info->FeatureBits;
2471 			sc->aac_support_opt2 = supp_info->SupportedOptions2;
2472 		}
2473 	}
2474 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2475 		adapter_type,
2476 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2477 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2478 
2479 	fib->data[0] = 0;
2480 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2481 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2482 		aac_release_sync_fib(sc);
2483 		mtx_unlock(&sc->aac_io_lock);
2484 		return;
2485 	}
2486 
2487 	/* save the kernel revision structure for later use */
2488 	info = (struct aac_adapter_info *)&fib->data[0];
2489 	sc->aac_revision = info->KernelRevision;
2490 
2491 	if (bootverbose) {
2492 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2493 		    "(%dMB cache, %dMB execution), %s\n",
2494 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2495 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2496 		    info->BufferMem / (1024 * 1024),
2497 		    info->ExecutionMem / (1024 * 1024),
2498 		    aac_describe_code(aac_battery_platform,
2499 		    info->batteryPlatform));
2500 
2501 		device_printf(sc->aac_dev,
2502 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2503 		    info->KernelRevision.external.comp.major,
2504 		    info->KernelRevision.external.comp.minor,
2505 		    info->KernelRevision.external.comp.dash,
2506 		    info->KernelRevision.buildNumber,
2507 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2508 
2509 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2510 			      sc->supported_options,
2511 			      "\20"
2512 			      "\1SNAPSHOT"
2513 			      "\2CLUSTERS"
2514 			      "\3WCACHE"
2515 			      "\4DATA64"
2516 			      "\5HOSTTIME"
2517 			      "\6RAID50"
2518 			      "\7WINDOW4GB"
2519 			      "\10SCSIUPGD"
2520 			      "\11SOFTERR"
2521 			      "\12NORECOND"
2522 			      "\13SGMAP64"
2523 			      "\14ALARM"
2524 			      "\15NONDASD"
2525 			      "\16SCSIMGT"
2526 			      "\17RAIDSCSI"
2527 			      "\21ADPTINFO"
2528 			      "\22NEWCOMM"
2529 			      "\23ARRAY64BIT"
2530 			      "\24HEATSENSOR");
2531 	}
2532 
2533 	aac_release_sync_fib(sc);
2534 	mtx_unlock(&sc->aac_io_lock);
2535 }
2536 
2537 /*
2538  * Look up a text description of a numeric error code and return a pointer to
2539  * same.
2540  */
2541 static char *
2542 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2543 {
2544 	int i;
2545 
2546 	for (i = 0; table[i].string != NULL; i++)
2547 		if (table[i].code == code)
2548 			return(table[i].string);
2549 	return(table[i + 1].string);
2550 }
2551 
2552 /*
2553  * Management Interface
2554  */
2555 
2556 static int
2557 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2558 {
2559 	struct aac_softc *sc;
2560 
2561 	sc = dev->si_drv1;
2562 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2563 #if __FreeBSD_version >= 702000
2564 	device_busy(sc->aac_dev);
2565 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2566 #endif
2567 	return 0;
2568 }
2569 
2570 static int
2571 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2572 {
2573 	union aac_statrequest *as;
2574 	struct aac_softc *sc;
2575 	int error = 0;
2576 
2577 	as = (union aac_statrequest *)arg;
2578 	sc = dev->si_drv1;
2579 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2580 
2581 	switch (cmd) {
2582 	case AACIO_STATS:
2583 		switch (as->as_item) {
2584 		case AACQ_FREE:
2585 		case AACQ_READY:
2586 		case AACQ_BUSY:
2587 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2588 			      sizeof(struct aac_qstat));
2589 			break;
2590 		default:
2591 			error = ENOENT;
2592 			break;
2593 		}
2594 	break;
2595 
2596 	case FSACTL_SENDFIB:
2597 	case FSACTL_SEND_LARGE_FIB:
2598 		arg = *(caddr_t*)arg;
2599 	case FSACTL_LNX_SENDFIB:
2600 	case FSACTL_LNX_SEND_LARGE_FIB:
2601 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2602 		error = aac_ioctl_sendfib(sc, arg);
2603 		break;
2604 	case FSACTL_SEND_RAW_SRB:
2605 		arg = *(caddr_t*)arg;
2606 	case FSACTL_LNX_SEND_RAW_SRB:
2607 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2608 		error = aac_ioctl_send_raw_srb(sc, arg);
2609 		break;
2610 	case FSACTL_AIF_THREAD:
2611 	case FSACTL_LNX_AIF_THREAD:
2612 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2613 		error = EINVAL;
2614 		break;
2615 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2616 		arg = *(caddr_t*)arg;
2617 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2618 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2619 		error = aac_open_aif(sc, arg);
2620 		break;
2621 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2622 		arg = *(caddr_t*)arg;
2623 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2624 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2625 		error = aac_getnext_aif(sc, arg);
2626 		break;
2627 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2628 		arg = *(caddr_t*)arg;
2629 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2630 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2631 		error = aac_close_aif(sc, arg);
2632 		break;
2633 	case FSACTL_MINIPORT_REV_CHECK:
2634 		arg = *(caddr_t*)arg;
2635 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2636 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2637 		error = aac_rev_check(sc, arg);
2638 		break;
2639 	case FSACTL_QUERY_DISK:
2640 		arg = *(caddr_t*)arg;
2641 	case FSACTL_LNX_QUERY_DISK:
2642 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2643 		error = aac_query_disk(sc, arg);
2644 		break;
2645 	case FSACTL_DELETE_DISK:
2646 	case FSACTL_LNX_DELETE_DISK:
2647 		/*
2648 		 * We don't trust the underland to tell us when to delete a
2649 		 * container, rather we rely on an AIF coming from the
2650 		 * controller
2651 		 */
2652 		error = 0;
2653 		break;
2654 	case FSACTL_GET_PCI_INFO:
2655 		arg = *(caddr_t*)arg;
2656 	case FSACTL_LNX_GET_PCI_INFO:
2657 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2658 		error = aac_get_pci_info(sc, arg);
2659 		break;
2660 	case FSACTL_GET_FEATURES:
2661 		arg = *(caddr_t*)arg;
2662 	case FSACTL_LNX_GET_FEATURES:
2663 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2664 		error = aac_supported_features(sc, arg);
2665 		break;
2666 	default:
2667 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2668 		error = EINVAL;
2669 		break;
2670 	}
2671 	return(error);
2672 }
2673 
2674 static int
2675 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2676 {
2677 	struct aac_softc *sc;
2678 	struct aac_fib_context *ctx;
2679 	int revents;
2680 
2681 	sc = dev->si_drv1;
2682 	revents = 0;
2683 
2684 	mtx_lock(&sc->aac_io_lock);
2685 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2686 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2687 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2688 				revents |= poll_events & (POLLIN | POLLRDNORM);
2689 				break;
2690 			}
2691 		}
2692 	}
2693 	mtx_unlock(&sc->aac_io_lock);
2694 
2695 	if (revents == 0) {
2696 		if (poll_events & (POLLIN | POLLRDNORM))
2697 			selrecord(td, &sc->rcv_select);
2698 	}
2699 
2700 	return (revents);
2701 }
2702 
2703 static void
2704 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2705 {
2706 
2707 	switch (event->ev_type) {
2708 	case AAC_EVENT_CMFREE:
2709 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2710 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2711 			aacraid_add_event(sc, event);
2712 			return;
2713 		}
2714 		free(event, M_AACRAIDBUF);
2715 		wakeup(arg);
2716 		break;
2717 	default:
2718 		break;
2719 	}
2720 }
2721 
2722 /*
2723  * Send a FIB supplied from userspace
2724  */
2725 static int
2726 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2727 {
2728 	struct aac_command *cm;
2729 	int size, error;
2730 
2731 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2732 
2733 	cm = NULL;
2734 
2735 	/*
2736 	 * Get a command
2737 	 */
2738 	mtx_lock(&sc->aac_io_lock);
2739 	if (aacraid_alloc_command(sc, &cm)) {
2740 		struct aac_event *event;
2741 
2742 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2743 		    M_NOWAIT | M_ZERO);
2744 		if (event == NULL) {
2745 			error = EBUSY;
2746 			mtx_unlock(&sc->aac_io_lock);
2747 			goto out;
2748 		}
2749 		event->ev_type = AAC_EVENT_CMFREE;
2750 		event->ev_callback = aac_ioctl_event;
2751 		event->ev_arg = &cm;
2752 		aacraid_add_event(sc, event);
2753 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2754 	}
2755 	mtx_unlock(&sc->aac_io_lock);
2756 
2757 	/*
2758 	 * Fetch the FIB header, then re-copy to get data as well.
2759 	 */
2760 	if ((error = copyin(ufib, cm->cm_fib,
2761 			    sizeof(struct aac_fib_header))) != 0)
2762 		goto out;
2763 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2764 	if (size > sc->aac_max_fib_size) {
2765 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2766 			      size, sc->aac_max_fib_size);
2767 		size = sc->aac_max_fib_size;
2768 	}
2769 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2770 		goto out;
2771 	cm->cm_fib->Header.Size = size;
2772 	cm->cm_timestamp = time_uptime;
2773 	cm->cm_datalen = 0;
2774 
2775 	/*
2776 	 * Pass the FIB to the controller, wait for it to complete.
2777 	 */
2778 	mtx_lock(&sc->aac_io_lock);
2779 	error = aacraid_wait_command(cm);
2780 	mtx_unlock(&sc->aac_io_lock);
2781 	if (error != 0) {
2782 		device_printf(sc->aac_dev,
2783 			      "aacraid_wait_command return %d\n", error);
2784 		goto out;
2785 	}
2786 
2787 	/*
2788 	 * Copy the FIB and data back out to the caller.
2789 	 */
2790 	size = cm->cm_fib->Header.Size;
2791 	if (size > sc->aac_max_fib_size) {
2792 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2793 			      size, sc->aac_max_fib_size);
2794 		size = sc->aac_max_fib_size;
2795 	}
2796 	error = copyout(cm->cm_fib, ufib, size);
2797 
2798 out:
2799 	if (cm != NULL) {
2800 		mtx_lock(&sc->aac_io_lock);
2801 		aacraid_release_command(cm);
2802 		mtx_unlock(&sc->aac_io_lock);
2803 	}
2804 	return(error);
2805 }
2806 
2807 /*
2808  * Send a passthrough FIB supplied from userspace
2809  */
2810 static int
2811 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2812 {
2813 	struct aac_command *cm;
2814 	struct aac_fib *fib;
2815 	struct aac_srb *srbcmd;
2816 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2817 	void *user_reply;
2818 	int error, transfer_data = 0;
2819 	bus_dmamap_t orig_map = 0;
2820 	u_int32_t fibsize = 0;
2821 	u_int64_t srb_sg_address;
2822 	u_int32_t srb_sg_bytecount;
2823 
2824 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2825 
2826 	cm = NULL;
2827 
2828 	mtx_lock(&sc->aac_io_lock);
2829 	if (aacraid_alloc_command(sc, &cm)) {
2830 		struct aac_event *event;
2831 
2832 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2833 		    M_NOWAIT | M_ZERO);
2834 		if (event == NULL) {
2835 			error = EBUSY;
2836 			mtx_unlock(&sc->aac_io_lock);
2837 			goto out;
2838 		}
2839 		event->ev_type = AAC_EVENT_CMFREE;
2840 		event->ev_callback = aac_ioctl_event;
2841 		event->ev_arg = &cm;
2842 		aacraid_add_event(sc, event);
2843 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2844 	}
2845 	mtx_unlock(&sc->aac_io_lock);
2846 
2847 	cm->cm_data = NULL;
2848 	/* save original dma map */
2849 	orig_map = cm->cm_datamap;
2850 
2851 	fib = cm->cm_fib;
2852 	srbcmd = (struct aac_srb *)fib->data;
2853 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2854 		sizeof (u_int32_t)) != 0))
2855 		goto out;
2856 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2857 		error = EINVAL;
2858 		goto out;
2859 	}
2860 	if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0))
2861 		goto out;
2862 
2863 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2864 	srbcmd->retry_limit = 0;	/* obsolete */
2865 
2866 	/* only one sg element from userspace supported */
2867 	if (srbcmd->sg_map.SgCount > 1) {
2868 		error = EINVAL;
2869 		goto out;
2870 	}
2871 	/* check fibsize */
2872 	if (fibsize == (sizeof(struct aac_srb) +
2873 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2874 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2875 		struct aac_sg_entry sg;
2876 
2877 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2878 			goto out;
2879 
2880 		srb_sg_bytecount = sg.SgByteCount;
2881 		srb_sg_address = (u_int64_t)sg.SgAddress;
2882 	} else if (fibsize == (sizeof(struct aac_srb) +
2883 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2884 #ifdef __LP64__
2885 		struct aac_sg_entry64 *sgp =
2886 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2887 		struct aac_sg_entry64 sg;
2888 
2889 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2890 			goto out;
2891 
2892 		srb_sg_bytecount = sg.SgByteCount;
2893 		srb_sg_address = sg.SgAddress;
2894 		if (srb_sg_address > 0xffffffffull &&
2895 			!(sc->flags & AAC_FLAGS_SG_64BIT))
2896 #endif
2897 		{
2898 			error = EINVAL;
2899 			goto out;
2900 		}
2901 	} else {
2902 		error = EINVAL;
2903 		goto out;
2904 	}
2905 	user_reply = (char *)arg + fibsize;
2906 	srbcmd->data_len = srb_sg_bytecount;
2907 	if (srbcmd->sg_map.SgCount == 1)
2908 		transfer_data = 1;
2909 
2910 	if (transfer_data) {
2911 		/*
2912 		 * Create DMA tag for the passthr. data buffer and allocate it.
2913 		 */
2914 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2915 			1, 0,			/* algnmnt, boundary */
2916 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2917 			BUS_SPACE_MAXADDR_32BIT :
2918 			0x7fffffff,		/* lowaddr */
2919 			BUS_SPACE_MAXADDR, 	/* highaddr */
2920 			NULL, NULL, 		/* filter, filterarg */
2921 			srb_sg_bytecount, 	/* size */
2922 			sc->aac_sg_tablesize,	/* nsegments */
2923 			srb_sg_bytecount, 	/* maxsegsize */
2924 			0,			/* flags */
2925 			NULL, NULL,		/* No locking needed */
2926 			&cm->cm_passthr_dmat)) {
2927 			error = ENOMEM;
2928 			goto out;
2929 		}
2930 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2931 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2932 			error = ENOMEM;
2933 			goto out;
2934 		}
2935 		/* fill some cm variables */
2936 		cm->cm_datalen = srb_sg_bytecount;
2937 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2938 			cm->cm_flags |= AAC_CMD_DATAIN;
2939 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2940 			cm->cm_flags |= AAC_CMD_DATAOUT;
2941 
2942 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2943 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2944 				cm->cm_data, cm->cm_datalen)) != 0)
2945 				goto out;
2946 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2947 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2948 				BUS_DMASYNC_PREWRITE);
2949 		}
2950 	}
2951 
2952 	/* build the FIB */
2953 	fib->Header.Size = sizeof(struct aac_fib_header) +
2954 		sizeof(struct aac_srb);
2955 	fib->Header.XferState =
2956 		AAC_FIBSTATE_HOSTOWNED   |
2957 		AAC_FIBSTATE_INITIALISED |
2958 		AAC_FIBSTATE_EMPTY	 |
2959 		AAC_FIBSTATE_FROMHOST	 |
2960 		AAC_FIBSTATE_REXPECTED   |
2961 		AAC_FIBSTATE_NORM	 |
2962 		AAC_FIBSTATE_ASYNC;
2963 
2964 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2965 		ScsiPortCommandU64 : ScsiPortCommand;
2966 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2967 
2968 	/* send command */
2969 	if (transfer_data) {
2970 		bus_dmamap_load(cm->cm_passthr_dmat,
2971 			cm->cm_datamap, cm->cm_data,
2972 			cm->cm_datalen,
2973 			aacraid_map_command_sg, cm, 0);
2974 	} else {
2975 		aacraid_map_command_sg(cm, NULL, 0, 0);
2976 	}
2977 
2978 	/* wait for completion */
2979 	mtx_lock(&sc->aac_io_lock);
2980 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2981 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2982 	mtx_unlock(&sc->aac_io_lock);
2983 
2984 	/* copy data */
2985 	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2986 		if ((error = copyout(cm->cm_data,
2987 			(void *)(uintptr_t)srb_sg_address,
2988 			cm->cm_datalen)) != 0)
2989 			goto out;
2990 		/* sync required for bus_dmamem_alloc() allocated mem.? */
2991 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2992 				BUS_DMASYNC_POSTREAD);
2993 	}
2994 
2995 	/* status */
2996 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
2997 
2998 out:
2999 	if (cm && cm->cm_data) {
3000 		if (transfer_data)
3001 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3002 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3003 		cm->cm_datamap = orig_map;
3004 	}
3005 	if (cm && cm->cm_passthr_dmat)
3006 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3007 	if (cm) {
3008 		mtx_lock(&sc->aac_io_lock);
3009 		aacraid_release_command(cm);
3010 		mtx_unlock(&sc->aac_io_lock);
3011 	}
3012 	return(error);
3013 }
3014 
3015 /*
3016  * Request an AIF from the controller (new comm. type1)
3017  */
3018 static void
3019 aac_request_aif(struct aac_softc *sc)
3020 {
3021 	struct aac_command *cm;
3022 	struct aac_fib *fib;
3023 
3024 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3025 
3026 	if (aacraid_alloc_command(sc, &cm)) {
3027 		sc->aif_pending = 1;
3028 		return;
3029 	}
3030 	sc->aif_pending = 0;
3031 
3032 	/* build the FIB */
3033 	fib = cm->cm_fib;
3034 	fib->Header.Size = sizeof(struct aac_fib);
3035 	fib->Header.XferState =
3036         AAC_FIBSTATE_HOSTOWNED   |
3037         AAC_FIBSTATE_INITIALISED |
3038         AAC_FIBSTATE_EMPTY	 |
3039         AAC_FIBSTATE_FROMHOST	 |
3040         AAC_FIBSTATE_REXPECTED   |
3041         AAC_FIBSTATE_NORM	 |
3042         AAC_FIBSTATE_ASYNC;
3043 	/* set AIF marker */
3044 	fib->Header.Handle = 0x00800000;
3045 	fib->Header.Command = AifRequest;
3046 	((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3047 
3048 	aacraid_map_command_sg(cm, NULL, 0, 0);
3049 }
3050 
3051 
3052 #if __FreeBSD_version >= 702000
3053 /*
3054  * cdevpriv interface private destructor.
3055  */
3056 static void
3057 aac_cdevpriv_dtor(void *arg)
3058 {
3059 	struct aac_softc *sc;
3060 
3061 	sc = arg;
3062 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3063 	mtx_lock(&Giant);
3064 	device_unbusy(sc->aac_dev);
3065 	mtx_unlock(&Giant);
3066 }
3067 #else
3068 static int
3069 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3070 {
3071 	struct aac_softc *sc;
3072 
3073 	sc = dev->si_drv1;
3074 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3075 	return 0;
3076 }
3077 #endif
3078 
3079 /*
3080  * Handle an AIF sent to us by the controller; queue it for later reference.
3081  * If the queue fills up, then drop the older entries.
3082  */
3083 static void
3084 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3085 {
3086 	struct aac_aif_command *aif;
3087 	struct aac_container *co, *co_next;
3088 	struct aac_fib_context *ctx;
3089 	struct aac_fib *sync_fib;
3090 	struct aac_mntinforesp mir;
3091 	int next, current, found;
3092 	int count = 0, changed = 0, i = 0;
3093 	u_int32_t channel, uid;
3094 
3095 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3096 
3097 	aif = (struct aac_aif_command*)&fib->data[0];
3098 	aacraid_print_aif(sc, aif);
3099 
3100 	/* Is it an event that we should care about? */
3101 	switch (aif->command) {
3102 	case AifCmdEventNotify:
3103 		switch (aif->data.EN.type) {
3104 		case AifEnAddContainer:
3105 		case AifEnDeleteContainer:
3106 			/*
3107 			 * A container was added or deleted, but the message
3108 			 * doesn't tell us anything else!  Re-enumerate the
3109 			 * containers and sort things out.
3110 			 */
3111 			aac_alloc_sync_fib(sc, &sync_fib);
3112 			do {
3113 				/*
3114 				 * Ask the controller for its containers one at
3115 				 * a time.
3116 				 * XXX What if the controller's list changes
3117 				 * midway through this enumaration?
3118 				 * XXX This should be done async.
3119 				 */
3120 				if (aac_get_container_info(sc, sync_fib, i,
3121 					&mir, &uid) != 0)
3122 					continue;
3123 				if (i == 0)
3124 					count = mir.MntRespCount;
3125 				/*
3126 				 * Check the container against our list.
3127 				 * co->co_found was already set to 0 in a
3128 				 * previous run.
3129 				 */
3130 				if ((mir.Status == ST_OK) &&
3131 				    (mir.MntTable[0].VolType != CT_NONE)) {
3132 					found = 0;
3133 					TAILQ_FOREACH(co,
3134 						      &sc->aac_container_tqh,
3135 						      co_link) {
3136 						if (co->co_mntobj.ObjectId ==
3137 						    mir.MntTable[0].ObjectId) {
3138 							co->co_found = 1;
3139 							found = 1;
3140 							break;
3141 						}
3142 					}
3143 					/*
3144 					 * If the container matched, continue
3145 					 * in the list.
3146 					 */
3147 					if (found) {
3148 						i++;
3149 						continue;
3150 					}
3151 
3152 					/*
3153 					 * This is a new container.  Do all the
3154 					 * appropriate things to set it up.
3155 					 */
3156 					aac_add_container(sc, &mir, 1, uid);
3157 					changed = 1;
3158 				}
3159 				i++;
3160 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3161 			aac_release_sync_fib(sc);
3162 
3163 			/*
3164 			 * Go through our list of containers and see which ones
3165 			 * were not marked 'found'.  Since the controller didn't
3166 			 * list them they must have been deleted.  Do the
3167 			 * appropriate steps to destroy the device.  Also reset
3168 			 * the co->co_found field.
3169 			 */
3170 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3171 			while (co != NULL) {
3172 				if (co->co_found == 0) {
3173 					co_next = TAILQ_NEXT(co, co_link);
3174 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3175 						     co_link);
3176 					free(co, M_AACRAIDBUF);
3177 					changed = 1;
3178 					co = co_next;
3179 				} else {
3180 					co->co_found = 0;
3181 					co = TAILQ_NEXT(co, co_link);
3182 				}
3183 			}
3184 
3185 			/* Attach the newly created containers */
3186 			if (changed) {
3187 				if (sc->cam_rescan_cb != NULL)
3188 					sc->cam_rescan_cb(sc, 0,
3189 				    	AAC_CAM_TARGET_WILDCARD);
3190 			}
3191 
3192 			break;
3193 
3194 		case AifEnEnclosureManagement:
3195 			switch (aif->data.EN.data.EEE.eventType) {
3196 			case AIF_EM_DRIVE_INSERTION:
3197 			case AIF_EM_DRIVE_REMOVAL:
3198 				channel = aif->data.EN.data.EEE.unitID;
3199 				if (sc->cam_rescan_cb != NULL)
3200 					sc->cam_rescan_cb(sc,
3201 					    ((channel>>24) & 0xF) + 1,
3202 					    (channel & 0xFFFF));
3203 				break;
3204 			}
3205 			break;
3206 
3207 		case AifEnAddJBOD:
3208 		case AifEnDeleteJBOD:
3209 		case AifRawDeviceRemove:
3210 			channel = aif->data.EN.data.ECE.container;
3211 			if (sc->cam_rescan_cb != NULL)
3212 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3213 				    AAC_CAM_TARGET_WILDCARD);
3214 			break;
3215 
3216 		default:
3217 			break;
3218 		}
3219 
3220 	default:
3221 		break;
3222 	}
3223 
3224 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3225 	current = sc->aifq_idx;
3226 	next = (current + 1) % AAC_AIFQ_LENGTH;
3227 	if (next == 0)
3228 		sc->aifq_filled = 1;
3229 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3230 	/* modify AIF contexts */
3231 	if (sc->aifq_filled) {
3232 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3233 			if (next == ctx->ctx_idx)
3234 				ctx->ctx_wrap = 1;
3235 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3236 				ctx->ctx_idx = next;
3237 		}
3238 	}
3239 	sc->aifq_idx = next;
3240 	/* On the off chance that someone is sleeping for an aif... */
3241 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3242 		wakeup(sc->aac_aifq);
3243 	/* Wakeup any poll()ers */
3244 	selwakeuppri(&sc->rcv_select, PRIBIO);
3245 
3246 	return;
3247 }
3248 
3249 /*
3250  * Return the Revision of the driver to userspace and check to see if the
3251  * userspace app is possibly compatible.  This is extremely bogus since
3252  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3253  * returning what the card reported.
3254  */
3255 static int
3256 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3257 {
3258 	struct aac_rev_check rev_check;
3259 	struct aac_rev_check_resp rev_check_resp;
3260 	int error = 0;
3261 
3262 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3263 
3264 	/*
3265 	 * Copyin the revision struct from userspace
3266 	 */
3267 	if ((error = copyin(udata, (caddr_t)&rev_check,
3268 			sizeof(struct aac_rev_check))) != 0) {
3269 		return error;
3270 	}
3271 
3272 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3273 	      rev_check.callingRevision.buildNumber);
3274 
3275 	/*
3276 	 * Doctor up the response struct.
3277 	 */
3278 	rev_check_resp.possiblyCompatible = 1;
3279 	rev_check_resp.adapterSWRevision.external.comp.major =
3280 	    AAC_DRIVER_MAJOR_VERSION;
3281 	rev_check_resp.adapterSWRevision.external.comp.minor =
3282 	    AAC_DRIVER_MINOR_VERSION;
3283 	rev_check_resp.adapterSWRevision.external.comp.type =
3284 	    AAC_DRIVER_TYPE;
3285 	rev_check_resp.adapterSWRevision.external.comp.dash =
3286 	    AAC_DRIVER_BUGFIX_LEVEL;
3287 	rev_check_resp.adapterSWRevision.buildNumber =
3288 	    AAC_DRIVER_BUILD;
3289 
3290 	return(copyout((caddr_t)&rev_check_resp, udata,
3291 			sizeof(struct aac_rev_check_resp)));
3292 }
3293 
3294 /*
3295  * Pass the fib context to the caller
3296  */
3297 static int
3298 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3299 {
3300 	struct aac_fib_context *fibctx, *ctx;
3301 	int error = 0;
3302 
3303 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3304 
3305 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3306 	if (fibctx == NULL)
3307 		return (ENOMEM);
3308 
3309 	mtx_lock(&sc->aac_io_lock);
3310 	/* all elements are already 0, add to queue */
3311 	if (sc->fibctx == NULL)
3312 		sc->fibctx = fibctx;
3313 	else {
3314 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3315 			;
3316 		ctx->next = fibctx;
3317 		fibctx->prev = ctx;
3318 	}
3319 
3320 	/* evaluate unique value */
3321 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3322 	ctx = sc->fibctx;
3323 	while (ctx != fibctx) {
3324 		if (ctx->unique == fibctx->unique) {
3325 			fibctx->unique++;
3326 			ctx = sc->fibctx;
3327 		} else {
3328 			ctx = ctx->next;
3329 		}
3330 	}
3331 
3332 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3333 	mtx_unlock(&sc->aac_io_lock);
3334 	if (error)
3335 		aac_close_aif(sc, (caddr_t)ctx);
3336 	return error;
3337 }
3338 
3339 /*
3340  * Close the caller's fib context
3341  */
3342 static int
3343 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3344 {
3345 	struct aac_fib_context *ctx;
3346 
3347 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3348 
3349 	mtx_lock(&sc->aac_io_lock);
3350 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3351 		if (ctx->unique == *(uint32_t *)&arg) {
3352 			if (ctx == sc->fibctx)
3353 				sc->fibctx = NULL;
3354 			else {
3355 				ctx->prev->next = ctx->next;
3356 				if (ctx->next)
3357 					ctx->next->prev = ctx->prev;
3358 			}
3359 			break;
3360 		}
3361 	}
3362 	if (ctx)
3363 		free(ctx, M_AACRAIDBUF);
3364 
3365 	mtx_unlock(&sc->aac_io_lock);
3366 	return 0;
3367 }
3368 
3369 /*
3370  * Pass the caller the next AIF in their queue
3371  */
3372 static int
3373 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3374 {
3375 	struct get_adapter_fib_ioctl agf;
3376 	struct aac_fib_context *ctx;
3377 	int error;
3378 
3379 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3380 
3381 	mtx_lock(&sc->aac_io_lock);
3382 	if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3383 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3384 			if (agf.AdapterFibContext == ctx->unique)
3385 				break;
3386 		}
3387 		if (!ctx) {
3388 			mtx_unlock(&sc->aac_io_lock);
3389 			return (EFAULT);
3390 		}
3391 
3392 		error = aac_return_aif(sc, ctx, agf.AifFib);
3393 		if (error == EAGAIN && agf.Wait) {
3394 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3395 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3396 			while (error == EAGAIN) {
3397 				mtx_unlock(&sc->aac_io_lock);
3398 				error = tsleep(sc->aac_aifq, PRIBIO |
3399 					       PCATCH, "aacaif", 0);
3400 				mtx_lock(&sc->aac_io_lock);
3401 				if (error == 0)
3402 					error = aac_return_aif(sc, ctx, agf.AifFib);
3403 			}
3404 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3405 		}
3406 	}
3407 	mtx_unlock(&sc->aac_io_lock);
3408 	return(error);
3409 }
3410 
3411 /*
3412  * Hand the next AIF off the top of the queue out to userspace.
3413  */
3414 static int
3415 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3416 {
3417 	int current, error;
3418 
3419 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3420 
3421 	current = ctx->ctx_idx;
3422 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3423 		/* empty */
3424 		return (EAGAIN);
3425 	}
3426 	error =
3427 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3428 	if (error)
3429 		device_printf(sc->aac_dev,
3430 		    "aac_return_aif: copyout returned %d\n", error);
3431 	else {
3432 		ctx->ctx_wrap = 0;
3433 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3434 	}
3435 	return(error);
3436 }
3437 
3438 static int
3439 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3440 {
3441 	struct aac_pci_info {
3442 		u_int32_t bus;
3443 		u_int32_t slot;
3444 	} pciinf;
3445 	int error;
3446 
3447 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3448 
3449 	pciinf.bus = pci_get_bus(sc->aac_dev);
3450 	pciinf.slot = pci_get_slot(sc->aac_dev);
3451 
3452 	error = copyout((caddr_t)&pciinf, uptr,
3453 			sizeof(struct aac_pci_info));
3454 
3455 	return (error);
3456 }
3457 
3458 static int
3459 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3460 {
3461 	struct aac_features f;
3462 	int error;
3463 
3464 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3465 
3466 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3467 		return (error);
3468 
3469 	/*
3470 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3471 	 * ALL zero in the featuresState, the driver will return the current
3472 	 * state of all the supported features, the data field will not be
3473 	 * valid.
3474 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3475 	 * a specific bit set in the featuresState, the driver will return the
3476 	 * current state of this specific feature and whatever data that are
3477 	 * associated with the feature in the data field or perform whatever
3478 	 * action needed indicates in the data field.
3479 	 */
3480 	 if (f.feat.fValue == 0) {
3481 		f.feat.fBits.largeLBA =
3482 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3483 		f.feat.fBits.JBODSupport = 1;
3484 		/* TODO: In the future, add other features state here as well */
3485 	} else {
3486 		if (f.feat.fBits.largeLBA)
3487 			f.feat.fBits.largeLBA =
3488 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3489 		/* TODO: Add other features state and data in the future */
3490 	}
3491 
3492 	error = copyout(&f, uptr, sizeof (f));
3493 	return (error);
3494 }
3495 
3496 /*
3497  * Give the userland some information about the container.  The AAC arch
3498  * expects the driver to be a SCSI passthrough type driver, so it expects
3499  * the containers to have b:t:l numbers.  Fake it.
3500  */
3501 static int
3502 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3503 {
3504 	struct aac_query_disk query_disk;
3505 	struct aac_container *co;
3506 	int error, id;
3507 
3508 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3509 
3510 	mtx_lock(&sc->aac_io_lock);
3511 	error = copyin(uptr, (caddr_t)&query_disk,
3512 		       sizeof(struct aac_query_disk));
3513 	if (error) {
3514 		mtx_unlock(&sc->aac_io_lock);
3515 		return (error);
3516 	}
3517 
3518 	id = query_disk.ContainerNumber;
3519 	if (id == -1) {
3520 		mtx_unlock(&sc->aac_io_lock);
3521 		return (EINVAL);
3522 	}
3523 
3524 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3525 		if (co->co_mntobj.ObjectId == id)
3526 			break;
3527 		}
3528 
3529 	if (co == NULL) {
3530 			query_disk.Valid = 0;
3531 			query_disk.Locked = 0;
3532 			query_disk.Deleted = 1;		/* XXX is this right? */
3533 	} else {
3534 		query_disk.Valid = 1;
3535 		query_disk.Locked = 1;
3536 		query_disk.Deleted = 0;
3537 		query_disk.Bus = device_get_unit(sc->aac_dev);
3538 		query_disk.Target = 0;
3539 		query_disk.Lun = 0;
3540 		query_disk.UnMapped = 0;
3541 	}
3542 
3543 	error = copyout((caddr_t)&query_disk, uptr,
3544 			sizeof(struct aac_query_disk));
3545 
3546 	mtx_unlock(&sc->aac_io_lock);
3547 	return (error);
3548 }
3549 
3550 static void
3551 aac_container_bus(struct aac_softc *sc)
3552 {
3553 	struct aac_sim *sim;
3554 	device_t child;
3555 
3556 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3557 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3558 	if (sim == NULL) {
3559 		device_printf(sc->aac_dev,
3560 	    	"No memory to add container bus\n");
3561 		panic("Out of memory?!");
3562 	}
3563 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3564 	if (child == NULL) {
3565 		device_printf(sc->aac_dev,
3566 	    	"device_add_child failed for container bus\n");
3567 		free(sim, M_AACRAIDBUF);
3568 		panic("Out of memory?!");
3569 	}
3570 
3571 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3572 	sim->BusNumber = 0;
3573 	sim->BusType = CONTAINER_BUS;
3574 	sim->InitiatorBusId = -1;
3575 	sim->aac_sc = sc;
3576 	sim->sim_dev = child;
3577 	sim->aac_cam = NULL;
3578 
3579 	device_set_ivars(child, sim);
3580 	device_set_desc(child, "Container Bus");
3581 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3582 	/*
3583 	device_set_desc(child, aac_describe_code(aac_container_types,
3584 			mir->MntTable[0].VolType));
3585 	*/
3586 	bus_generic_attach(sc->aac_dev);
3587 }
3588 
3589 static void
3590 aac_get_bus_info(struct aac_softc *sc)
3591 {
3592 	struct aac_fib *fib;
3593 	struct aac_ctcfg *c_cmd;
3594 	struct aac_ctcfg_resp *c_resp;
3595 	struct aac_vmioctl *vmi;
3596 	struct aac_vmi_businf_resp *vmi_resp;
3597 	struct aac_getbusinf businfo;
3598 	struct aac_sim *caminf;
3599 	device_t child;
3600 	int i, error;
3601 
3602 	mtx_lock(&sc->aac_io_lock);
3603 	aac_alloc_sync_fib(sc, &fib);
3604 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3605 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3606 
3607 	c_cmd->Command = VM_ContainerConfig;
3608 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3609 	c_cmd->param = 0;
3610 
3611 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3612 	    sizeof(struct aac_ctcfg));
3613 	if (error) {
3614 		device_printf(sc->aac_dev, "Error %d sending "
3615 		    "VM_ContainerConfig command\n", error);
3616 		aac_release_sync_fib(sc);
3617 		mtx_unlock(&sc->aac_io_lock);
3618 		return;
3619 	}
3620 
3621 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3622 	if (c_resp->Status != ST_OK) {
3623 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3624 		    c_resp->Status);
3625 		aac_release_sync_fib(sc);
3626 		mtx_unlock(&sc->aac_io_lock);
3627 		return;
3628 	}
3629 
3630 	sc->scsi_method_id = c_resp->param;
3631 
3632 	vmi = (struct aac_vmioctl *)&fib->data[0];
3633 	bzero(vmi, sizeof(struct aac_vmioctl));
3634 
3635 	vmi->Command = VM_Ioctl;
3636 	vmi->ObjType = FT_DRIVE;
3637 	vmi->MethId = sc->scsi_method_id;
3638 	vmi->ObjId = 0;
3639 	vmi->IoctlCmd = GetBusInfo;
3640 
3641 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3642 	    sizeof(struct aac_vmi_businf_resp));
3643 	if (error) {
3644 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3645 		    error);
3646 		aac_release_sync_fib(sc);
3647 		mtx_unlock(&sc->aac_io_lock);
3648 		return;
3649 	}
3650 
3651 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3652 	if (vmi_resp->Status != ST_OK) {
3653 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3654 		    vmi_resp->Status);
3655 		aac_release_sync_fib(sc);
3656 		mtx_unlock(&sc->aac_io_lock);
3657 		return;
3658 	}
3659 
3660 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3661 	aac_release_sync_fib(sc);
3662 	mtx_unlock(&sc->aac_io_lock);
3663 
3664 	for (i = 0; i < businfo.BusCount; i++) {
3665 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3666 			continue;
3667 
3668 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3669 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3670 		if (caminf == NULL) {
3671 			device_printf(sc->aac_dev,
3672 			    "No memory to add passthrough bus %d\n", i);
3673 			break;
3674 		}
3675 
3676 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3677 		if (child == NULL) {
3678 			device_printf(sc->aac_dev,
3679 			    "device_add_child failed for passthrough bus %d\n",
3680 			    i);
3681 			free(caminf, M_AACRAIDBUF);
3682 			break;
3683 		}
3684 
3685 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3686 		caminf->BusNumber = i+1;
3687 		caminf->BusType = PASSTHROUGH_BUS;
3688 		caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3689 		caminf->aac_sc = sc;
3690 		caminf->sim_dev = child;
3691 		caminf->aac_cam = NULL;
3692 
3693 		device_set_ivars(child, caminf);
3694 		device_set_desc(child, "SCSI Passthrough Bus");
3695 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3696 	}
3697 }
3698 
3699 /*
3700  * Check to see if the kernel is up and running. If we are in a
3701  * BlinkLED state, return the BlinkLED code.
3702  */
3703 static u_int32_t
3704 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3705 {
3706 	u_int32_t ret;
3707 
3708 	ret = AAC_GET_FWSTATUS(sc);
3709 
3710 	if (ret & AAC_UP_AND_RUNNING)
3711 		ret = 0;
3712 	else if (ret & AAC_KERNEL_PANIC && bled)
3713 		*bled = (ret >> 16) & 0xff;
3714 
3715 	return (ret);
3716 }
3717 
3718 /*
3719  * Once do an IOP reset, basically have to re-initialize the card as
3720  * if coming up from a cold boot, and the driver is responsible for
3721  * any IO that was outstanding to the adapter at the time of the IOP
3722  * RESET. And prepare the driver for IOP RESET by making the init code
3723  * modular with the ability to call it from multiple places.
3724  */
3725 static int
3726 aac_reset_adapter(struct aac_softc *sc)
3727 {
3728 	struct aac_command *cm;
3729 	struct aac_fib *fib;
3730 	struct aac_pause_command *pc;
3731 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3732 	int msi_enabled_orig;
3733 
3734 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3735 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3736 
3737 	if (sc->aac_state & AAC_STATE_RESET) {
3738 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3739 		return (EINVAL);
3740 	}
3741 	sc->aac_state |= AAC_STATE_RESET;
3742 
3743 	/* disable interrupt */
3744 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3745 
3746 	/*
3747 	 * Abort all pending commands:
3748 	 * a) on the controller
3749 	 */
3750 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3751 		cm->cm_flags |= AAC_CMD_RESET;
3752 
3753 		/* is there a completion handler? */
3754 		if (cm->cm_complete != NULL) {
3755 			cm->cm_complete(cm);
3756 		} else {
3757 			/* assume that someone is sleeping on this
3758 			 * command
3759 			 */
3760 			wakeup(cm);
3761 		}
3762 	}
3763 
3764 	/* b) in the waiting queues */
3765 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3766 		cm->cm_flags |= AAC_CMD_RESET;
3767 
3768 		/* is there a completion handler? */
3769 		if (cm->cm_complete != NULL) {
3770 			cm->cm_complete(cm);
3771 		} else {
3772 			/* assume that someone is sleeping on this
3773 			 * command
3774 			 */
3775 			wakeup(cm);
3776 		}
3777 	}
3778 
3779 	/* flush drives */
3780 	if (aac_check_adapter_health(sc, NULL) == 0) {
3781 		mtx_unlock(&sc->aac_io_lock);
3782 		(void) aacraid_shutdown(sc->aac_dev);
3783 		mtx_lock(&sc->aac_io_lock);
3784 	}
3785 
3786 	/* execute IOP reset */
3787 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3788 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3789 
3790 		/* We need to wait for 5 seconds before accessing the MU again
3791 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3792 		 */
3793 		waitCount = 5 * 10000;
3794 		while (waitCount) {
3795 			DELAY(100);			/* delay 100 microseconds */
3796 			waitCount--;
3797 		}
3798 	} else if ((aacraid_sync_command(sc,
3799 		AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) {
3800 		/* call IOP_RESET for older firmware */
3801 		if ((aacraid_sync_command(sc,
3802 			AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) {
3803 
3804 			if (status == AAC_SRB_STS_INVALID_REQUEST)
3805 				device_printf(sc->aac_dev, "IOP_RESET not supported\n");
3806 			else
3807 				/* probably timeout */
3808 				device_printf(sc->aac_dev, "IOP_RESET failed\n");
3809 
3810 			/* unwind aac_shutdown() */
3811 			aac_alloc_sync_fib(sc, &fib);
3812 			pc = (struct aac_pause_command *)&fib->data[0];
3813 			pc->Command = VM_ContainerConfig;
3814 			pc->Type = CT_PAUSE_IO;
3815 			pc->Timeout = 1;
3816 			pc->Min = 1;
3817 			pc->NoRescan = 1;
3818 
3819 			(void) aac_sync_fib(sc, ContainerCommand, 0, fib,
3820 				sizeof (struct aac_pause_command));
3821 			aac_release_sync_fib(sc);
3822 
3823 			goto finish;
3824 		}
3825 	} else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) {
3826 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3827 		/*
3828 		 * We need to wait for 5 seconds before accessing the doorbell
3829 		 * again, 10000 * 100us = 1000,000us = 1000ms = 1s
3830 		 */
3831 		waitCount = 5 * 10000;
3832 		while (waitCount) {
3833 			DELAY(100);		/* delay 100 microseconds */
3834 			waitCount--;
3835 		}
3836 	}
3837 
3838 	/*
3839 	 * Initialize the adapter.
3840 	 */
3841 	max_msix_orig = sc->aac_max_msix;
3842 	msi_enabled_orig = sc->msi_enabled;
3843 	sc->msi_enabled = FALSE;
3844 	if (aac_check_firmware(sc) != 0)
3845 		goto finish;
3846 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3847 		sc->aac_max_msix = max_msix_orig;
3848 		if (msi_enabled_orig) {
3849 			sc->msi_enabled = msi_enabled_orig;
3850 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3851 		}
3852 		mtx_unlock(&sc->aac_io_lock);
3853 		aac_init(sc);
3854 		mtx_lock(&sc->aac_io_lock);
3855 	}
3856 
3857 finish:
3858 	sc->aac_state &= ~AAC_STATE_RESET;
3859 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3860 	aacraid_startio(sc);
3861 	return (0);
3862 }
3863