xref: /freebsd/sys/dev/aacraid/aacraid.c (revision adc56f5a383771f594829b7db9c263b6f0dcf1bd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
38  */
39 #define AAC_DRIVERNAME			"aacraid"
40 
41 #include "opt_aacraid.h"
42 
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/proc.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
52 #include <sys/poll.h>
53 #include <sys/ioccom.h>
54 
55 #include <sys/bus.h>
56 #include <sys/conf.h>
57 #include <sys/signalvar.h>
58 #include <sys/time.h>
59 #include <sys/eventhandler.h>
60 #include <sys/rman.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
72 
73 #ifndef FILTER_HANDLED
74 #define FILTER_HANDLED	0x02
75 #endif
76 
77 static void	aac_add_container(struct aac_softc *sc,
78 				  struct aac_mntinforesp *mir, int f,
79 				  u_int32_t uid);
80 static void	aac_get_bus_info(struct aac_softc *sc);
81 static void	aac_container_bus(struct aac_softc *sc);
82 static void	aac_daemon(void *arg);
83 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
84 							  int pages, int nseg, int nseg_new);
85 
86 /* Command Processing */
87 static void	aac_timeout(struct aac_softc *sc);
88 static void	aac_command_thread(struct aac_softc *sc);
89 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
90 				     u_int32_t xferstate, struct aac_fib *fib,
91 				     u_int16_t datasize);
92 /* Command Buffer Management */
93 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
94 				       int nseg, int error);
95 static int	aac_alloc_commands(struct aac_softc *sc);
96 static void	aac_free_commands(struct aac_softc *sc);
97 static void	aac_unmap_command(struct aac_command *cm);
98 
99 /* Hardware Interface */
100 static int	aac_alloc(struct aac_softc *sc);
101 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
102 			       int error);
103 static int	aac_check_firmware(struct aac_softc *sc);
104 static void	aac_define_int_mode(struct aac_softc *sc);
105 static int	aac_init(struct aac_softc *sc);
106 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
107 static int	aac_setup_intr(struct aac_softc *sc);
108 static int	aac_check_config(struct aac_softc *sc);
109 
110 /* PMC SRC interface */
111 static int	aac_src_get_fwstatus(struct aac_softc *sc);
112 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
113 static int	aac_src_get_istatus(struct aac_softc *sc);
114 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
115 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
116 				    u_int32_t arg0, u_int32_t arg1,
117 				    u_int32_t arg2, u_int32_t arg3);
118 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
119 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
120 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
121 static int aac_src_get_outb_queue(struct aac_softc *sc);
122 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
123 
124 struct aac_interface aacraid_src_interface = {
125 	aac_src_get_fwstatus,
126 	aac_src_qnotify,
127 	aac_src_get_istatus,
128 	aac_src_clear_istatus,
129 	aac_src_set_mailbox,
130 	aac_src_get_mailbox,
131 	aac_src_access_devreg,
132 	aac_src_send_command,
133 	aac_src_get_outb_queue,
134 	aac_src_set_outb_queue
135 };
136 
137 /* PMC SRCv interface */
138 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
139 				    u_int32_t arg0, u_int32_t arg1,
140 				    u_int32_t arg2, u_int32_t arg3);
141 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
142 
143 struct aac_interface aacraid_srcv_interface = {
144 	aac_src_get_fwstatus,
145 	aac_src_qnotify,
146 	aac_src_get_istatus,
147 	aac_src_clear_istatus,
148 	aac_srcv_set_mailbox,
149 	aac_srcv_get_mailbox,
150 	aac_src_access_devreg,
151 	aac_src_send_command,
152 	aac_src_get_outb_queue,
153 	aac_src_set_outb_queue
154 };
155 
156 /* Debugging and Diagnostics */
157 static struct aac_code_lookup aac_cpu_variant[] = {
158 	{"i960JX",		CPUI960_JX},
159 	{"i960CX",		CPUI960_CX},
160 	{"i960HX",		CPUI960_HX},
161 	{"i960RX",		CPUI960_RX},
162 	{"i960 80303",		CPUI960_80303},
163 	{"StrongARM SA110",	CPUARM_SA110},
164 	{"PPC603e",		CPUPPC_603e},
165 	{"XScale 80321",	CPU_XSCALE_80321},
166 	{"MIPS 4KC",		CPU_MIPS_4KC},
167 	{"MIPS 5KC",		CPU_MIPS_5KC},
168 	{"Unknown StrongARM",	CPUARM_xxx},
169 	{"Unknown PowerPC",	CPUPPC_xxx},
170 	{NULL, 0},
171 	{"Unknown processor",	0}
172 };
173 
174 static struct aac_code_lookup aac_battery_platform[] = {
175 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
176 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
177 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
178 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
179 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
180 	{NULL, 0},
181 	{"unknown battery platform",		0}
182 };
183 static void	aac_describe_controller(struct aac_softc *sc);
184 static char	*aac_describe_code(struct aac_code_lookup *table,
185 				   u_int32_t code);
186 
187 /* Management Interface */
188 static d_open_t		aac_open;
189 static d_ioctl_t	aac_ioctl;
190 static d_poll_t		aac_poll;
191 #if __FreeBSD_version >= 702000
192 static void		aac_cdevpriv_dtor(void *arg);
193 #else
194 static d_close_t	aac_close;
195 #endif
196 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
197 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
198 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
199 static void	aac_request_aif(struct aac_softc *sc);
200 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
201 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
202 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
203 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
204 static int	aac_return_aif(struct aac_softc *sc,
205 			       struct aac_fib_context *ctx, caddr_t uptr);
206 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
207 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
208 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
209 static void	aac_ioctl_event(struct aac_softc *sc,
210 				struct aac_event *event, void *arg);
211 static int	aac_reset_adapter(struct aac_softc *sc);
212 static int	aac_get_container_info(struct aac_softc *sc,
213 				       struct aac_fib *fib, int cid,
214 				       struct aac_mntinforesp *mir,
215 				       u_int32_t *uid);
216 static u_int32_t
217 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
218 
219 static struct cdevsw aacraid_cdevsw = {
220 	.d_version =	D_VERSION,
221 	.d_flags =	0,
222 	.d_open =	aac_open,
223 #if __FreeBSD_version < 702000
224 	.d_close =	aac_close,
225 #endif
226 	.d_ioctl =	aac_ioctl,
227 	.d_poll =	aac_poll,
228 	.d_name =	"aacraid",
229 };
230 
231 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
232 
233 /* sysctl node */
234 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
235 
236 /*
237  * Device Interface
238  */
239 
240 /*
241  * Initialize the controller and softc
242  */
243 int
244 aacraid_attach(struct aac_softc *sc)
245 {
246 	int error, unit;
247 	struct aac_fib *fib;
248 	struct aac_mntinforesp mir;
249 	int count = 0, i = 0;
250 	u_int32_t uid;
251 
252 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
253 	sc->hint_flags = device_get_flags(sc->aac_dev);
254 	/*
255 	 * Initialize per-controller queues.
256 	 */
257 	aac_initq_free(sc);
258 	aac_initq_ready(sc);
259 	aac_initq_busy(sc);
260 
261 	/* mark controller as suspended until we get ourselves organised */
262 	sc->aac_state |= AAC_STATE_SUSPEND;
263 
264 	/*
265 	 * Check that the firmware on the card is supported.
266 	 */
267 	sc->msi_enabled = sc->msi_tupelo = FALSE;
268 	if ((error = aac_check_firmware(sc)) != 0)
269 		return(error);
270 
271 	/*
272 	 * Initialize locks
273 	 */
274 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
275 	TAILQ_INIT(&sc->aac_container_tqh);
276 	TAILQ_INIT(&sc->aac_ev_cmfree);
277 
278 #if __FreeBSD_version >= 800000
279 	/* Initialize the clock daemon callout. */
280 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
281 #endif
282 	/*
283 	 * Initialize the adapter.
284 	 */
285 	if ((error = aac_alloc(sc)) != 0)
286 		return(error);
287 	aac_define_int_mode(sc);
288 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
289 		if ((error = aac_init(sc)) != 0)
290 			return(error);
291 	}
292 
293 	/*
294 	 * Allocate and connect our interrupt.
295 	 */
296 	if ((error = aac_setup_intr(sc)) != 0)
297 		return(error);
298 
299 	/*
300 	 * Print a little information about the controller.
301 	 */
302 	aac_describe_controller(sc);
303 
304 	/*
305 	 * Make the control device.
306 	 */
307 	unit = device_get_unit(sc->aac_dev);
308 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
309 				 0640, "aacraid%d", unit);
310 	sc->aac_dev_t->si_drv1 = sc;
311 
312 	/* Create the AIF thread */
313 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
314 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
315 		panic("Could not create AIF thread");
316 
317 	/* Register the shutdown method to only be called post-dump */
318 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
319 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
320 		device_printf(sc->aac_dev,
321 			      "shutdown event registration failed\n");
322 
323 	/* Find containers */
324 	mtx_lock(&sc->aac_io_lock);
325 	aac_alloc_sync_fib(sc, &fib);
326 	/* loop over possible containers */
327 	do {
328 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
329 			continue;
330 		if (i == 0)
331 			count = mir.MntRespCount;
332 		aac_add_container(sc, &mir, 0, uid);
333 		i++;
334 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
335 	aac_release_sync_fib(sc);
336 	mtx_unlock(&sc->aac_io_lock);
337 
338 	/* Register with CAM for the containers */
339 	TAILQ_INIT(&sc->aac_sim_tqh);
340 	aac_container_bus(sc);
341 	/* Register with CAM for the non-DASD devices */
342 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
343 		aac_get_bus_info(sc);
344 
345 	/* poke the bus to actually attach the child devices */
346 	bus_generic_attach(sc->aac_dev);
347 
348 	/* mark the controller up */
349 	sc->aac_state &= ~AAC_STATE_SUSPEND;
350 
351 	/* enable interrupts now */
352 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
353 
354 #if __FreeBSD_version >= 800000
355 	mtx_lock(&sc->aac_io_lock);
356 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
357 	mtx_unlock(&sc->aac_io_lock);
358 #else
359 	{
360 		struct timeval tv;
361 		tv.tv_sec = 60;
362 		tv.tv_usec = 0;
363 		sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
364 	}
365 #endif
366 
367 	return(0);
368 }
369 
370 static void
371 aac_daemon(void *arg)
372 {
373 	struct aac_softc *sc;
374 	struct timeval tv;
375 	struct aac_command *cm;
376 	struct aac_fib *fib;
377 
378 	sc = arg;
379 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
380 
381 #if __FreeBSD_version >= 800000
382 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
383 	if (callout_pending(&sc->aac_daemontime) ||
384 	    callout_active(&sc->aac_daemontime) == 0)
385 		return;
386 #else
387 	mtx_lock(&sc->aac_io_lock);
388 #endif
389 	getmicrotime(&tv);
390 
391 	if (!aacraid_alloc_command(sc, &cm)) {
392 		fib = cm->cm_fib;
393 		cm->cm_timestamp = time_uptime;
394 		cm->cm_datalen = 0;
395 		cm->cm_flags |= AAC_CMD_WAIT;
396 
397 		fib->Header.Size =
398 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
399 		fib->Header.XferState =
400 			AAC_FIBSTATE_HOSTOWNED   |
401 			AAC_FIBSTATE_INITIALISED |
402 			AAC_FIBSTATE_EMPTY	 |
403 			AAC_FIBSTATE_FROMHOST	 |
404 			AAC_FIBSTATE_REXPECTED   |
405 			AAC_FIBSTATE_NORM	 |
406 			AAC_FIBSTATE_ASYNC	 |
407 			AAC_FIBSTATE_FAST_RESPONSE;
408 		fib->Header.Command = SendHostTime;
409 		*(uint32_t *)fib->data = tv.tv_sec;
410 
411 		aacraid_map_command_sg(cm, NULL, 0, 0);
412 		aacraid_release_command(cm);
413 	}
414 
415 #if __FreeBSD_version >= 800000
416 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
417 #else
418 	mtx_unlock(&sc->aac_io_lock);
419 	tv.tv_sec = 30 * 60;
420 	tv.tv_usec = 0;
421 	sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
422 #endif
423 }
424 
425 void
426 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
427 {
428 
429 	switch (event->ev_type & AAC_EVENT_MASK) {
430 	case AAC_EVENT_CMFREE:
431 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
432 		break;
433 	default:
434 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
435 		    event->ev_type);
436 		break;
437 	}
438 
439 	return;
440 }
441 
442 /*
443  * Request information of container #cid
444  */
445 static int
446 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
447 		       struct aac_mntinforesp *mir, u_int32_t *uid)
448 {
449 	struct aac_command *cm;
450 	struct aac_fib *fib;
451 	struct aac_mntinfo *mi;
452 	struct aac_cnt_config *ccfg;
453 	int rval;
454 
455 	if (sync_fib == NULL) {
456 		if (aacraid_alloc_command(sc, &cm)) {
457 			device_printf(sc->aac_dev,
458 				"Warning, no free command available\n");
459 			return (-1);
460 		}
461 		fib = cm->cm_fib;
462 	} else {
463 		fib = sync_fib;
464 	}
465 
466 	mi = (struct aac_mntinfo *)&fib->data[0];
467 	/* 4KB support?, 64-bit LBA? */
468 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
469 		mi->Command = VM_NameServeAllBlk;
470 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
471 		mi->Command = VM_NameServe64;
472 	else
473 		mi->Command = VM_NameServe;
474 	mi->MntType = FT_FILESYS;
475 	mi->MntCount = cid;
476 
477 	if (sync_fib) {
478 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
479 			 sizeof(struct aac_mntinfo))) {
480 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
481 			return (-1);
482 		}
483 	} else {
484 		cm->cm_timestamp = time_uptime;
485 		cm->cm_datalen = 0;
486 
487 		fib->Header.Size =
488 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
489 		fib->Header.XferState =
490 			AAC_FIBSTATE_HOSTOWNED   |
491 			AAC_FIBSTATE_INITIALISED |
492 			AAC_FIBSTATE_EMPTY	 |
493 			AAC_FIBSTATE_FROMHOST	 |
494 			AAC_FIBSTATE_REXPECTED   |
495 			AAC_FIBSTATE_NORM	 |
496 			AAC_FIBSTATE_ASYNC	 |
497 			AAC_FIBSTATE_FAST_RESPONSE;
498 		fib->Header.Command = ContainerCommand;
499 		if (aacraid_wait_command(cm) != 0) {
500 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
501 			aacraid_release_command(cm);
502 			return (-1);
503 		}
504 	}
505 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
506 
507 	/* UID */
508 	*uid = cid;
509 	if (mir->MntTable[0].VolType != CT_NONE &&
510 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
511 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
512 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
513 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
514 		}
515 		ccfg = (struct aac_cnt_config *)&fib->data[0];
516 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
517 		ccfg->Command = VM_ContainerConfig;
518 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
519 		ccfg->CTCommand.param[0] = cid;
520 
521 		if (sync_fib) {
522 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
523 				sizeof(struct aac_cnt_config));
524 			if (rval == 0 && ccfg->Command == ST_OK &&
525 				ccfg->CTCommand.param[0] == CT_OK &&
526 				mir->MntTable[0].VolType != CT_PASSTHRU)
527 				*uid = ccfg->CTCommand.param[1];
528 		} else {
529 			fib->Header.Size =
530 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
531 			fib->Header.XferState =
532 				AAC_FIBSTATE_HOSTOWNED   |
533 				AAC_FIBSTATE_INITIALISED |
534 				AAC_FIBSTATE_EMPTY	 |
535 				AAC_FIBSTATE_FROMHOST	 |
536 				AAC_FIBSTATE_REXPECTED   |
537 				AAC_FIBSTATE_NORM	 |
538 				AAC_FIBSTATE_ASYNC	 |
539 				AAC_FIBSTATE_FAST_RESPONSE;
540 			fib->Header.Command = ContainerCommand;
541 			rval = aacraid_wait_command(cm);
542 			if (rval == 0 && ccfg->Command == ST_OK &&
543 				ccfg->CTCommand.param[0] == CT_OK &&
544 				mir->MntTable[0].VolType != CT_PASSTHRU)
545 				*uid = ccfg->CTCommand.param[1];
546 			aacraid_release_command(cm);
547 		}
548 	}
549 
550 	return (0);
551 }
552 
553 /*
554  * Create a device to represent a new container
555  */
556 static void
557 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
558 		  u_int32_t uid)
559 {
560 	struct aac_container *co;
561 
562 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
563 
564 	/*
565 	 * Check container volume type for validity.  Note that many of
566 	 * the possible types may never show up.
567 	 */
568 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
569 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
570 		       M_NOWAIT | M_ZERO);
571 		if (co == NULL) {
572 			panic("Out of memory?!");
573 		}
574 
575 		co->co_found = f;
576 		bcopy(&mir->MntTable[0], &co->co_mntobj,
577 		      sizeof(struct aac_mntobj));
578 		co->co_uid = uid;
579 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
580 	}
581 }
582 
583 /*
584  * Allocate resources associated with (sc)
585  */
586 static int
587 aac_alloc(struct aac_softc *sc)
588 {
589 	bus_size_t maxsize;
590 
591 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
592 
593 	/*
594 	 * Create DMA tag for mapping buffers into controller-addressable space.
595 	 */
596 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
597 			       1, 0, 			/* algnmnt, boundary */
598 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
599 			       BUS_SPACE_MAXADDR :
600 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
601 			       BUS_SPACE_MAXADDR, 	/* highaddr */
602 			       NULL, NULL, 		/* filter, filterarg */
603 			       sc->aac_max_sectors << 9, /* maxsize */
604 			       sc->aac_sg_tablesize,	/* nsegments */
605 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
606 			       BUS_DMA_ALLOCNOW,	/* flags */
607 			       busdma_lock_mutex,	/* lockfunc */
608 			       &sc->aac_io_lock,	/* lockfuncarg */
609 			       &sc->aac_buffer_dmat)) {
610 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
611 		return (ENOMEM);
612 	}
613 
614 	/*
615 	 * Create DMA tag for mapping FIBs into controller-addressable space..
616 	 */
617 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
618 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
619 			sizeof(struct aac_fib_xporthdr) + 31);
620 	else
621 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
622 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
623 			       1, 0, 			/* algnmnt, boundary */
624 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
625 			       BUS_SPACE_MAXADDR_32BIT :
626 			       0x7fffffff,		/* lowaddr */
627 			       BUS_SPACE_MAXADDR, 	/* highaddr */
628 			       NULL, NULL, 		/* filter, filterarg */
629 			       maxsize,  		/* maxsize */
630 			       1,			/* nsegments */
631 			       maxsize,			/* maxsize */
632 			       0,			/* flags */
633 			       NULL, NULL,		/* No locking needed */
634 			       &sc->aac_fib_dmat)) {
635 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
636 		return (ENOMEM);
637 	}
638 
639 	/*
640 	 * Create DMA tag for the common structure and allocate it.
641 	 */
642 	maxsize = sizeof(struct aac_common);
643 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
644 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
645 			       1, 0,			/* algnmnt, boundary */
646 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
647 			       BUS_SPACE_MAXADDR_32BIT :
648 			       0x7fffffff,		/* lowaddr */
649 			       BUS_SPACE_MAXADDR, 	/* highaddr */
650 			       NULL, NULL, 		/* filter, filterarg */
651 			       maxsize, 		/* maxsize */
652 			       1,			/* nsegments */
653 			       maxsize,			/* maxsegsize */
654 			       0,			/* flags */
655 			       NULL, NULL,		/* No locking needed */
656 			       &sc->aac_common_dmat)) {
657 		device_printf(sc->aac_dev,
658 			      "can't allocate common structure DMA tag\n");
659 		return (ENOMEM);
660 	}
661 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
662 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
663 		device_printf(sc->aac_dev, "can't allocate common structure\n");
664 		return (ENOMEM);
665 	}
666 
667 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
668 			sc->aac_common, maxsize,
669 			aac_common_map, sc, 0);
670 	bzero(sc->aac_common, maxsize);
671 
672 	/* Allocate some FIBs and associated command structs */
673 	TAILQ_INIT(&sc->aac_fibmap_tqh);
674 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
675 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
676 	mtx_lock(&sc->aac_io_lock);
677 	while (sc->total_fibs < sc->aac_max_fibs) {
678 		if (aac_alloc_commands(sc) != 0)
679 			break;
680 	}
681 	mtx_unlock(&sc->aac_io_lock);
682 	if (sc->total_fibs == 0)
683 		return (ENOMEM);
684 
685 	return (0);
686 }
687 
688 /*
689  * Free all of the resources associated with (sc)
690  *
691  * Should not be called if the controller is active.
692  */
693 void
694 aacraid_free(struct aac_softc *sc)
695 {
696 	int i;
697 
698 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
699 
700 	/* remove the control device */
701 	if (sc->aac_dev_t != NULL)
702 		destroy_dev(sc->aac_dev_t);
703 
704 	/* throw away any FIB buffers, discard the FIB DMA tag */
705 	aac_free_commands(sc);
706 	if (sc->aac_fib_dmat)
707 		bus_dma_tag_destroy(sc->aac_fib_dmat);
708 
709 	free(sc->aac_commands, M_AACRAIDBUF);
710 
711 	/* destroy the common area */
712 	if (sc->aac_common) {
713 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
714 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
715 				sc->aac_common_dmamap);
716 	}
717 	if (sc->aac_common_dmat)
718 		bus_dma_tag_destroy(sc->aac_common_dmat);
719 
720 	/* disconnect the interrupt handler */
721 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
722 		if (sc->aac_intr[i])
723 			bus_teardown_intr(sc->aac_dev,
724 				sc->aac_irq[i], sc->aac_intr[i]);
725 		if (sc->aac_irq[i])
726 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
727 				sc->aac_irq_rid[i], sc->aac_irq[i]);
728 		else
729 			break;
730 	}
731 	if (sc->msi_enabled || sc->msi_tupelo)
732 		pci_release_msi(sc->aac_dev);
733 
734 	/* destroy data-transfer DMA tag */
735 	if (sc->aac_buffer_dmat)
736 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
737 
738 	/* destroy the parent DMA tag */
739 	if (sc->aac_parent_dmat)
740 		bus_dma_tag_destroy(sc->aac_parent_dmat);
741 
742 	/* release the register window mapping */
743 	if (sc->aac_regs_res0 != NULL)
744 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
745 				     sc->aac_regs_rid0, sc->aac_regs_res0);
746 	if (sc->aac_regs_res1 != NULL)
747 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
748 				     sc->aac_regs_rid1, sc->aac_regs_res1);
749 }
750 
751 /*
752  * Disconnect from the controller completely, in preparation for unload.
753  */
754 int
755 aacraid_detach(device_t dev)
756 {
757 	struct aac_softc *sc;
758 	struct aac_container *co;
759 	struct aac_sim	*sim;
760 	int error;
761 
762 	sc = device_get_softc(dev);
763 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
764 
765 #if __FreeBSD_version >= 800000
766 	callout_drain(&sc->aac_daemontime);
767 #else
768 	untimeout(aac_daemon, (void *)sc, sc->timeout_id);
769 #endif
770 	/* Remove the child containers */
771 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
772 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
773 		free(co, M_AACRAIDBUF);
774 	}
775 
776 	/* Remove the CAM SIMs */
777 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
778 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
779 		error = device_delete_child(dev, sim->sim_dev);
780 		if (error)
781 			return (error);
782 		free(sim, M_AACRAIDBUF);
783 	}
784 
785 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
786 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
787 		wakeup(sc->aifthread);
788 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
789 	}
790 
791 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
792 		panic("Cannot shutdown AIF thread");
793 
794 	if ((error = aacraid_shutdown(dev)))
795 		return(error);
796 
797 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
798 
799 	aacraid_free(sc);
800 
801 	mtx_destroy(&sc->aac_io_lock);
802 
803 	return(0);
804 }
805 
806 /*
807  * Bring the controller down to a dormant state and detach all child devices.
808  *
809  * This function is called before detach or system shutdown.
810  *
811  * Note that we can assume that the bioq on the controller is empty, as we won't
812  * allow shutdown if any device is open.
813  */
814 int
815 aacraid_shutdown(device_t dev)
816 {
817 	struct aac_softc *sc;
818 	struct aac_fib *fib;
819 	struct aac_close_command *cc;
820 
821 	sc = device_get_softc(dev);
822 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
823 
824 	sc->aac_state |= AAC_STATE_SUSPEND;
825 
826 	/*
827 	 * Send a Container shutdown followed by a HostShutdown FIB to the
828 	 * controller to convince it that we don't want to talk to it anymore.
829 	 * We've been closed and all I/O completed already
830 	 */
831 	device_printf(sc->aac_dev, "shutting down controller...");
832 
833 	mtx_lock(&sc->aac_io_lock);
834 	aac_alloc_sync_fib(sc, &fib);
835 	cc = (struct aac_close_command *)&fib->data[0];
836 
837 	bzero(cc, sizeof(struct aac_close_command));
838 	cc->Command = VM_CloseAll;
839 	cc->ContainerId = 0xfffffffe;
840 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
841 	    sizeof(struct aac_close_command)))
842 		printf("FAILED.\n");
843 	else
844 		printf("done\n");
845 
846 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
847 	aac_release_sync_fib(sc);
848 	mtx_unlock(&sc->aac_io_lock);
849 
850 	return(0);
851 }
852 
853 /*
854  * Bring the controller to a quiescent state, ready for system suspend.
855  */
856 int
857 aacraid_suspend(device_t dev)
858 {
859 	struct aac_softc *sc;
860 
861 	sc = device_get_softc(dev);
862 
863 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
864 	sc->aac_state |= AAC_STATE_SUSPEND;
865 
866 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
867 	return(0);
868 }
869 
870 /*
871  * Bring the controller back to a state ready for operation.
872  */
873 int
874 aacraid_resume(device_t dev)
875 {
876 	struct aac_softc *sc;
877 
878 	sc = device_get_softc(dev);
879 
880 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
881 	sc->aac_state &= ~AAC_STATE_SUSPEND;
882 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
883 	return(0);
884 }
885 
886 /*
887  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
888  */
889 void
890 aacraid_new_intr_type1(void *arg)
891 {
892 	struct aac_msix_ctx *ctx;
893 	struct aac_softc *sc;
894 	int vector_no;
895 	struct aac_command *cm;
896 	struct aac_fib *fib;
897 	u_int32_t bellbits, bellbits_shifted, index, handle;
898 	int isFastResponse, isAif, noMoreAif, mode;
899 
900 	ctx = (struct aac_msix_ctx *)arg;
901 	sc = ctx->sc;
902 	vector_no = ctx->vector_no;
903 
904 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
905 	mtx_lock(&sc->aac_io_lock);
906 
907 	if (sc->msi_enabled) {
908 		mode = AAC_INT_MODE_MSI;
909 		if (vector_no == 0) {
910 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
911 			if (bellbits & 0x40000)
912 				mode |= AAC_INT_MODE_AIF;
913 			else if (bellbits & 0x1000)
914 				mode |= AAC_INT_MODE_SYNC;
915 		}
916 	} else {
917 		mode = AAC_INT_MODE_INTX;
918 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
919 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
920 			bellbits = AAC_DB_RESPONSE_SENT_NS;
921 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
922 		} else {
923 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
924 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
925 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
926 				mode |= AAC_INT_MODE_AIF;
927 			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
928 				mode |= AAC_INT_MODE_SYNC;
929 		}
930 		/* ODR readback, Prep #238630 */
931 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
932 	}
933 
934 	if (mode & AAC_INT_MODE_SYNC) {
935 		if (sc->aac_sync_cm) {
936 			cm = sc->aac_sync_cm;
937 			cm->cm_flags |= AAC_CMD_COMPLETED;
938 			/* is there a completion handler? */
939 			if (cm->cm_complete != NULL) {
940 				cm->cm_complete(cm);
941 			} else {
942 				/* assume that someone is sleeping on this command */
943 				wakeup(cm);
944 			}
945 			sc->flags &= ~AAC_QUEUE_FRZN;
946 			sc->aac_sync_cm = NULL;
947 		}
948 		mode = 0;
949 	}
950 
951 	if (mode & AAC_INT_MODE_AIF) {
952 		if (mode & AAC_INT_MODE_INTX) {
953 			aac_request_aif(sc);
954 			mode = 0;
955 		}
956 	}
957 
958 	if (mode) {
959 		/* handle async. status */
960 		index = sc->aac_host_rrq_idx[vector_no];
961 		for (;;) {
962 			isFastResponse = isAif = noMoreAif = 0;
963 			/* remove toggle bit (31) */
964 			handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
965 			/* check fast response bit (30) */
966 			if (handle & 0x40000000)
967 				isFastResponse = 1;
968 			/* check AIF bit (23) */
969 			else if (handle & 0x00800000)
970 				isAif = TRUE;
971 			handle &= 0x0000ffff;
972 			if (handle == 0)
973 				break;
974 
975 			cm = sc->aac_commands + (handle - 1);
976 			fib = cm->cm_fib;
977 			sc->aac_rrq_outstanding[vector_no]--;
978 			if (isAif) {
979 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
980 				if (!noMoreAif)
981 					aac_handle_aif(sc, fib);
982 				aac_remove_busy(cm);
983 				aacraid_release_command(cm);
984 			} else {
985 				if (isFastResponse) {
986 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
987 					*((u_int32_t *)(fib->data)) = ST_OK;
988 					cm->cm_flags |= AAC_CMD_FASTRESP;
989 				}
990 				aac_remove_busy(cm);
991 				aac_unmap_command(cm);
992 				cm->cm_flags |= AAC_CMD_COMPLETED;
993 
994 				/* is there a completion handler? */
995 				if (cm->cm_complete != NULL) {
996 					cm->cm_complete(cm);
997 				} else {
998 					/* assume that someone is sleeping on this command */
999 					wakeup(cm);
1000 				}
1001 				sc->flags &= ~AAC_QUEUE_FRZN;
1002 			}
1003 
1004 			sc->aac_common->ac_host_rrq[index++] = 0;
1005 			if (index == (vector_no + 1) * sc->aac_vector_cap)
1006 				index = vector_no * sc->aac_vector_cap;
1007 			sc->aac_host_rrq_idx[vector_no] = index;
1008 
1009 			if ((isAif && !noMoreAif) || sc->aif_pending)
1010 				aac_request_aif(sc);
1011 		}
1012 	}
1013 
1014 	if (mode & AAC_INT_MODE_AIF) {
1015 		aac_request_aif(sc);
1016 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1017 		mode = 0;
1018 	}
1019 
1020 	/* see if we can start some more I/O */
1021 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1022 		aacraid_startio(sc);
1023 	mtx_unlock(&sc->aac_io_lock);
1024 }
1025 
1026 /*
1027  * Handle notification of one or more FIBs coming from the controller.
1028  */
1029 static void
1030 aac_command_thread(struct aac_softc *sc)
1031 {
1032 	int retval;
1033 
1034 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1035 
1036 	mtx_lock(&sc->aac_io_lock);
1037 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1038 
1039 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1040 
1041 		retval = 0;
1042 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1043 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1044 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1045 
1046 		/*
1047 		 * First see if any FIBs need to be allocated.
1048 		 */
1049 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1050 			aac_alloc_commands(sc);
1051 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1052 			aacraid_startio(sc);
1053 		}
1054 
1055 		/*
1056 		 * While we're here, check to see if any commands are stuck.
1057 		 * This is pretty low-priority, so it's ok if it doesn't
1058 		 * always fire.
1059 		 */
1060 		if (retval == EWOULDBLOCK)
1061 			aac_timeout(sc);
1062 
1063 		/* Check the hardware printf message buffer */
1064 		if (sc->aac_common->ac_printf[0] != 0)
1065 			aac_print_printf(sc);
1066 	}
1067 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1068 	mtx_unlock(&sc->aac_io_lock);
1069 	wakeup(sc->aac_dev);
1070 
1071 	aac_kthread_exit(0);
1072 }
1073 
1074 /*
1075  * Submit a command to the controller, return when it completes.
1076  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1077  *     be stuck here forever.  At the same time, signals are not caught
1078  *     because there is a risk that a signal could wakeup the sleep before
1079  *     the card has a chance to complete the command.  Since there is no way
1080  *     to cancel a command that is in progress, we can't protect against the
1081  *     card completing a command late and spamming the command and data
1082  *     memory.  So, we are held hostage until the command completes.
1083  */
1084 int
1085 aacraid_wait_command(struct aac_command *cm)
1086 {
1087 	struct aac_softc *sc;
1088 	int error;
1089 
1090 	sc = cm->cm_sc;
1091 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1092 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1093 
1094 	/* Put the command on the ready queue and get things going */
1095 	aac_enqueue_ready(cm);
1096 	aacraid_startio(sc);
1097 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1098 	return(error);
1099 }
1100 
1101 /*
1102  *Command Buffer Management
1103  */
1104 
1105 /*
1106  * Allocate a command.
1107  */
1108 int
1109 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1110 {
1111 	struct aac_command *cm;
1112 
1113 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1114 
1115 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1116 		if (sc->total_fibs < sc->aac_max_fibs) {
1117 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1118 			wakeup(sc->aifthread);
1119 		}
1120 		return (EBUSY);
1121 	}
1122 
1123 	*cmp = cm;
1124 	return(0);
1125 }
1126 
1127 /*
1128  * Release a command back to the freelist.
1129  */
1130 void
1131 aacraid_release_command(struct aac_command *cm)
1132 {
1133 	struct aac_event *event;
1134 	struct aac_softc *sc;
1135 
1136 	sc = cm->cm_sc;
1137 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1138 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1139 
1140 	/* (re)initialize the command/FIB */
1141 	cm->cm_sgtable = NULL;
1142 	cm->cm_flags = 0;
1143 	cm->cm_complete = NULL;
1144 	cm->cm_ccb = NULL;
1145 	cm->cm_passthr_dmat = 0;
1146 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1147 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1148 	cm->cm_fib->Header.Unused = 0;
1149 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1150 
1151 	/*
1152 	 * These are duplicated in aac_start to cover the case where an
1153 	 * intermediate stage may have destroyed them.  They're left
1154 	 * initialized here for debugging purposes only.
1155 	 */
1156 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1157 	cm->cm_fib->Header.Handle = 0;
1158 
1159 	aac_enqueue_free(cm);
1160 
1161 	/*
1162 	 * Dequeue all events so that there's no risk of events getting
1163 	 * stranded.
1164 	 */
1165 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1166 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1167 		event->ev_callback(sc, event, event->ev_arg);
1168 	}
1169 }
1170 
1171 /*
1172  * Map helper for command/FIB allocation.
1173  */
1174 static void
1175 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1176 {
1177 	uint64_t	*fibphys;
1178 
1179 	fibphys = (uint64_t *)arg;
1180 
1181 	*fibphys = segs[0].ds_addr;
1182 }
1183 
1184 /*
1185  * Allocate and initialize commands/FIBs for this adapter.
1186  */
1187 static int
1188 aac_alloc_commands(struct aac_softc *sc)
1189 {
1190 	struct aac_command *cm;
1191 	struct aac_fibmap *fm;
1192 	uint64_t fibphys;
1193 	int i, error;
1194 	u_int32_t maxsize;
1195 
1196 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1197 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1198 
1199 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1200 		return (ENOMEM);
1201 
1202 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1203 	if (fm == NULL)
1204 		return (ENOMEM);
1205 
1206 	mtx_unlock(&sc->aac_io_lock);
1207 	/* allocate the FIBs in DMAable memory and load them */
1208 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1209 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1210 		device_printf(sc->aac_dev,
1211 			      "Not enough contiguous memory available.\n");
1212 		free(fm, M_AACRAIDBUF);
1213 		mtx_lock(&sc->aac_io_lock);
1214 		return (ENOMEM);
1215 	}
1216 
1217 	maxsize = sc->aac_max_fib_size + 31;
1218 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1219 		maxsize += sizeof(struct aac_fib_xporthdr);
1220 	/* Ignore errors since this doesn't bounce */
1221 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1222 			      sc->aac_max_fibs_alloc * maxsize,
1223 			      aac_map_command_helper, &fibphys, 0);
1224 	mtx_lock(&sc->aac_io_lock);
1225 
1226 	/* initialize constant fields in the command structure */
1227 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1228 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1229 		cm = sc->aac_commands + sc->total_fibs;
1230 		fm->aac_commands = cm;
1231 		cm->cm_sc = sc;
1232 		cm->cm_fib = (struct aac_fib *)
1233 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1234 		cm->cm_fibphys = fibphys + i * maxsize;
1235 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1236 			u_int64_t fibphys_aligned;
1237 			fibphys_aligned =
1238 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1239 			cm->cm_fib = (struct aac_fib *)
1240 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1241 			cm->cm_fibphys = fibphys_aligned;
1242 		} else {
1243 			u_int64_t fibphys_aligned;
1244 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1245 			cm->cm_fib = (struct aac_fib *)
1246 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1247 			cm->cm_fibphys = fibphys_aligned;
1248 		}
1249 		cm->cm_index = sc->total_fibs;
1250 
1251 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1252 					       &cm->cm_datamap)) != 0)
1253 			break;
1254 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1255 			aacraid_release_command(cm);
1256 		sc->total_fibs++;
1257 	}
1258 
1259 	if (i > 0) {
1260 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1261 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1262 		return (0);
1263 	}
1264 
1265 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1266 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1267 	free(fm, M_AACRAIDBUF);
1268 	return (ENOMEM);
1269 }
1270 
1271 /*
1272  * Free FIBs owned by this adapter.
1273  */
1274 static void
1275 aac_free_commands(struct aac_softc *sc)
1276 {
1277 	struct aac_fibmap *fm;
1278 	struct aac_command *cm;
1279 	int i;
1280 
1281 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1282 
1283 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1284 
1285 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1286 		/*
1287 		 * We check against total_fibs to handle partially
1288 		 * allocated blocks.
1289 		 */
1290 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1291 			cm = fm->aac_commands + i;
1292 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1293 		}
1294 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1295 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1296 		free(fm, M_AACRAIDBUF);
1297 	}
1298 }
1299 
1300 /*
1301  * Command-mapping helper function - populate this command's s/g table.
1302  */
1303 void
1304 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1305 {
1306 	struct aac_softc *sc;
1307 	struct aac_command *cm;
1308 	struct aac_fib *fib;
1309 	int i;
1310 
1311 	cm = (struct aac_command *)arg;
1312 	sc = cm->cm_sc;
1313 	fib = cm->cm_fib;
1314 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1315 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1316 
1317 	if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1318 		return;
1319 
1320 	/* copy into the FIB */
1321 	if (cm->cm_sgtable != NULL) {
1322 		if (fib->Header.Command == RawIo2) {
1323 			struct aac_raw_io2 *raw;
1324 			struct aac_sge_ieee1212 *sg;
1325 			u_int32_t min_size = PAGE_SIZE, cur_size;
1326 			int conformable = TRUE;
1327 
1328 			raw = (struct aac_raw_io2 *)&fib->data[0];
1329 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1330 			raw->sgeCnt = nseg;
1331 
1332 			for (i = 0; i < nseg; i++) {
1333 				cur_size = segs[i].ds_len;
1334 				sg[i].addrHigh = 0;
1335 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1336 				sg[i].length = cur_size;
1337 				sg[i].flags = 0;
1338 				if (i == 0) {
1339 					raw->sgeFirstSize = cur_size;
1340 				} else if (i == 1) {
1341 					raw->sgeNominalSize = cur_size;
1342 					min_size = cur_size;
1343 				} else if ((i+1) < nseg &&
1344 					cur_size != raw->sgeNominalSize) {
1345 					conformable = FALSE;
1346 					if (cur_size < min_size)
1347 						min_size = cur_size;
1348 				}
1349 			}
1350 
1351 			/* not conformable: evaluate required sg elements */
1352 			if (!conformable) {
1353 				int j, err_found, nseg_new = nseg;
1354 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1355 					err_found = FALSE;
1356 					nseg_new = 2;
1357 					for (j = 1; j < nseg - 1; ++j) {
1358 						if (sg[j].length % (i*PAGE_SIZE)) {
1359 							err_found = TRUE;
1360 							break;
1361 						}
1362 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1363 					}
1364 					if (!err_found)
1365 						break;
1366 				}
1367 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1368 					!(sc->hint_flags & 4))
1369 					nseg = aac_convert_sgraw2(sc,
1370 						raw, i, nseg, nseg_new);
1371 			} else {
1372 				raw->flags |= RIO2_SGL_CONFORMANT;
1373 			}
1374 
1375 			/* update the FIB size for the s/g count */
1376 			fib->Header.Size += nseg *
1377 				sizeof(struct aac_sge_ieee1212);
1378 
1379 		} else if (fib->Header.Command == RawIo) {
1380 			struct aac_sg_tableraw *sg;
1381 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1382 			sg->SgCount = nseg;
1383 			for (i = 0; i < nseg; i++) {
1384 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1385 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1386 				sg->SgEntryRaw[i].Next = 0;
1387 				sg->SgEntryRaw[i].Prev = 0;
1388 				sg->SgEntryRaw[i].Flags = 0;
1389 			}
1390 			/* update the FIB size for the s/g count */
1391 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1392 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1393 			struct aac_sg_table *sg;
1394 			sg = cm->cm_sgtable;
1395 			sg->SgCount = nseg;
1396 			for (i = 0; i < nseg; i++) {
1397 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1398 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1399 			}
1400 			/* update the FIB size for the s/g count */
1401 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1402 		} else {
1403 			struct aac_sg_table64 *sg;
1404 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1405 			sg->SgCount = nseg;
1406 			for (i = 0; i < nseg; i++) {
1407 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1408 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1409 			}
1410 			/* update the FIB size for the s/g count */
1411 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1412 		}
1413 	}
1414 
1415 	/* Fix up the address values in the FIB.  Use the command array index
1416 	 * instead of a pointer since these fields are only 32 bits.  Shift
1417 	 * the SenderFibAddress over to make room for the fast response bit
1418 	 * and for the AIF bit
1419 	 */
1420 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1421 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1422 
1423 	/* save a pointer to the command for speedy reverse-lookup */
1424 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1425 
1426 	if (cm->cm_passthr_dmat == 0) {
1427 		if (cm->cm_flags & AAC_CMD_DATAIN)
1428 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1429 							BUS_DMASYNC_PREREAD);
1430 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1431 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1432 							BUS_DMASYNC_PREWRITE);
1433 	}
1434 
1435 	cm->cm_flags |= AAC_CMD_MAPPED;
1436 
1437 	if (cm->cm_flags & AAC_CMD_WAIT) {
1438 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1439 			cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1440 	} else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1441 		u_int32_t wait = 0;
1442 		sc->aac_sync_cm = cm;
1443 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1444 			cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1445 	} else {
1446 		int count = 10000000L;
1447 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1448 			if (--count == 0) {
1449 				aac_unmap_command(cm);
1450 				sc->flags |= AAC_QUEUE_FRZN;
1451 				aac_requeue_ready(cm);
1452 			}
1453 			DELAY(5);			/* wait 5 usec. */
1454 		}
1455 	}
1456 }
1457 
1458 
1459 static int
1460 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1461 				   int pages, int nseg, int nseg_new)
1462 {
1463 	struct aac_sge_ieee1212 *sge;
1464 	int i, j, pos;
1465 	u_int32_t addr_low;
1466 
1467 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1468 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1469 	if (sge == NULL)
1470 		return nseg;
1471 
1472 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1473 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1474 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1475 			sge[pos].addrLow = addr_low;
1476 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1477 			if (addr_low < raw->sge[i].addrLow)
1478 				sge[pos].addrHigh++;
1479 			sge[pos].length = pages * PAGE_SIZE;
1480 			sge[pos].flags = 0;
1481 			pos++;
1482 		}
1483 	}
1484 	sge[pos] = raw->sge[nseg-1];
1485 	for (i = 1; i < nseg_new; ++i)
1486 		raw->sge[i] = sge[i];
1487 
1488 	free(sge, M_AACRAIDBUF);
1489 	raw->sgeCnt = nseg_new;
1490 	raw->flags |= RIO2_SGL_CONFORMANT;
1491 	raw->sgeNominalSize = pages * PAGE_SIZE;
1492 	return nseg_new;
1493 }
1494 
1495 
1496 /*
1497  * Unmap a command from controller-visible space.
1498  */
1499 static void
1500 aac_unmap_command(struct aac_command *cm)
1501 {
1502 	struct aac_softc *sc;
1503 
1504 	sc = cm->cm_sc;
1505 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1506 
1507 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1508 		return;
1509 
1510 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1511 		if (cm->cm_flags & AAC_CMD_DATAIN)
1512 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1513 					BUS_DMASYNC_POSTREAD);
1514 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1515 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1516 					BUS_DMASYNC_POSTWRITE);
1517 
1518 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1519 	}
1520 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1521 }
1522 
1523 /*
1524  * Hardware Interface
1525  */
1526 
1527 /*
1528  * Initialize the adapter.
1529  */
1530 static void
1531 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1532 {
1533 	struct aac_softc *sc;
1534 
1535 	sc = (struct aac_softc *)arg;
1536 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1537 
1538 	sc->aac_common_busaddr = segs[0].ds_addr;
1539 }
1540 
1541 static int
1542 aac_check_firmware(struct aac_softc *sc)
1543 {
1544 	u_int32_t code, major, minor, maxsize;
1545 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1546 	time_t then;
1547 
1548 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1549 
1550 	/* check if flash update is running */
1551 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1552 		then = time_uptime;
1553 		do {
1554 			code = AAC_GET_FWSTATUS(sc);
1555 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1556 				device_printf(sc->aac_dev,
1557 						  "FATAL: controller not coming ready, "
1558 						   "status %x\n", code);
1559 				return(ENXIO);
1560 			}
1561 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1562 		/*
1563 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1564 		 * do not read scratch pad register at this time
1565 		 */
1566 		waitCount = 10 * 10000;
1567 		while (waitCount) {
1568 			DELAY(100);		/* delay 100 microseconds */
1569 			waitCount--;
1570 		}
1571 	}
1572 
1573 	/*
1574 	 * Wait for the adapter to come ready.
1575 	 */
1576 	then = time_uptime;
1577 	do {
1578 		code = AAC_GET_FWSTATUS(sc);
1579 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1580 			device_printf(sc->aac_dev,
1581 				      "FATAL: controller not coming ready, "
1582 					   "status %x\n", code);
1583 			return(ENXIO);
1584 		}
1585 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1586 
1587 	/*
1588 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1589 	 * firmware version 1.x are not compatible with this driver.
1590 	 */
1591 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1592 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1593 				     NULL, NULL)) {
1594 			device_printf(sc->aac_dev,
1595 				      "Error reading firmware version\n");
1596 			return (EIO);
1597 		}
1598 
1599 		/* These numbers are stored as ASCII! */
1600 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1601 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1602 		if (major == 1) {
1603 			device_printf(sc->aac_dev,
1604 			    "Firmware version %d.%d is not supported.\n",
1605 			    major, minor);
1606 			return (EINVAL);
1607 		}
1608 	}
1609 	/*
1610 	 * Retrieve the capabilities/supported options word so we know what
1611 	 * work-arounds to enable.  Some firmware revs don't support this
1612 	 * command.
1613 	 */
1614 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1615 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1616 			device_printf(sc->aac_dev,
1617 			     "RequestAdapterInfo failed\n");
1618 			return (EIO);
1619 		}
1620 	} else {
1621 		options = AAC_GET_MAILBOX(sc, 1);
1622 		atu_size = AAC_GET_MAILBOX(sc, 2);
1623 		sc->supported_options = options;
1624 		sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1625 
1626 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1627 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1628 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1629 		if (options & AAC_SUPPORTED_NONDASD)
1630 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1631 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1632 			&& (sizeof(bus_addr_t) > 4)
1633 			&& (sc->hint_flags & 0x1)) {
1634 			device_printf(sc->aac_dev,
1635 			    "Enabling 64-bit address support\n");
1636 			sc->flags |= AAC_FLAGS_SG_64BIT;
1637 		}
1638 		if (sc->aac_if.aif_send_command) {
1639 			if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1640 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1641 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1642 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1643 			else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1644 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1645 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1646 		}
1647 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1648 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1649 	}
1650 
1651 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1652 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1653 		return (ENXIO);
1654 	}
1655 
1656 	if (sc->hint_flags & 2) {
1657 		device_printf(sc->aac_dev,
1658 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1659 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1660 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1661 		device_printf(sc->aac_dev,
1662 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1663 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1664 	}
1665 
1666 	/* Check for broken hardware that does a lower number of commands */
1667 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1668 
1669 	/* Remap mem. resource, if required */
1670 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1671 		bus_release_resource(
1672 			sc->aac_dev, SYS_RES_MEMORY,
1673 			sc->aac_regs_rid0, sc->aac_regs_res0);
1674 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1675 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1676 			atu_size, RF_ACTIVE);
1677 		if (sc->aac_regs_res0 == NULL) {
1678 			sc->aac_regs_res0 = bus_alloc_resource_any(
1679 				sc->aac_dev, SYS_RES_MEMORY,
1680 				&sc->aac_regs_rid0, RF_ACTIVE);
1681 			if (sc->aac_regs_res0 == NULL) {
1682 				device_printf(sc->aac_dev,
1683 					"couldn't allocate register window\n");
1684 				return (ENXIO);
1685 			}
1686 		}
1687 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1688 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1689 	}
1690 
1691 	/* Read preferred settings */
1692 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1693 	sc->aac_max_sectors = 128;				/* 64KB */
1694 	sc->aac_max_aif = 1;
1695 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1696 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1697 		 - sizeof(struct aac_blockwrite64))
1698 		 / sizeof(struct aac_sg_entry64);
1699 	else
1700 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1701 		 - sizeof(struct aac_blockwrite))
1702 		 / sizeof(struct aac_sg_entry);
1703 
1704 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1705 		options = AAC_GET_MAILBOX(sc, 1);
1706 		sc->aac_max_fib_size = (options & 0xFFFF);
1707 		sc->aac_max_sectors = (options >> 16) << 1;
1708 		options = AAC_GET_MAILBOX(sc, 2);
1709 		sc->aac_sg_tablesize = (options >> 16);
1710 		options = AAC_GET_MAILBOX(sc, 3);
1711 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1712 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1713 			sc->aac_max_fibs = (options & 0xFFFF);
1714 		options = AAC_GET_MAILBOX(sc, 4);
1715 		sc->aac_max_aif = (options & 0xFFFF);
1716 		options = AAC_GET_MAILBOX(sc, 5);
1717 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1718 	}
1719 
1720 	maxsize = sc->aac_max_fib_size + 31;
1721 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1722 		maxsize += sizeof(struct aac_fib_xporthdr);
1723 	if (maxsize > PAGE_SIZE) {
1724     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1725 		maxsize = PAGE_SIZE;
1726 	}
1727 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1728 
1729 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1730 		sc->flags |= AAC_FLAGS_RAW_IO;
1731 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1732 	}
1733 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1734 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1735 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1736 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1737 	}
1738 
1739 #ifdef AACRAID_DEBUG
1740 	aacraid_get_fw_debug_buffer(sc);
1741 #endif
1742 	return (0);
1743 }
1744 
1745 static int
1746 aac_init(struct aac_softc *sc)
1747 {
1748 	struct aac_adapter_init	*ip;
1749 	int i, error;
1750 
1751 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1752 
1753 	/* reset rrq index */
1754 	sc->aac_fibs_pushed_no = 0;
1755 	for (i = 0; i < sc->aac_max_msix; i++)
1756 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1757 
1758 	/*
1759 	 * Fill in the init structure.  This tells the adapter about the
1760 	 * physical location of various important shared data structures.
1761 	 */
1762 	ip = &sc->aac_common->ac_init;
1763 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1764 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1765 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1766 		sc->flags |= AAC_FLAGS_RAW_IO;
1767 	}
1768 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1769 
1770 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1771 					 offsetof(struct aac_common, ac_fibs);
1772 	ip->AdapterFibsVirtualAddress = 0;
1773 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1774 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1775 
1776 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1777 				  offsetof(struct aac_common, ac_printf);
1778 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1779 
1780 	/*
1781 	 * The adapter assumes that pages are 4K in size, except on some
1782  	 * broken firmware versions that do the page->byte conversion twice,
1783 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1784 	 * Round up since the granularity is so high.
1785 	 */
1786 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1787 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1788 		ip->HostPhysMemPages =
1789 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1790 	}
1791 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1792 
1793 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1794 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1795 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1796 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1797 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1798 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1799 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1800 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1801 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1802 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1803 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1804 	}
1805 	ip->MaxNumAif = sc->aac_max_aif;
1806 	ip->HostRRQ_AddrLow =
1807 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1808 	/* always 32-bit address */
1809 	ip->HostRRQ_AddrHigh = 0;
1810 
1811 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1812 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1813 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1814 		device_printf(sc->aac_dev, "Power Management enabled\n");
1815 	}
1816 
1817 	ip->MaxIoCommands = sc->aac_max_fibs;
1818 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1819 	ip->MaxFibSize = sc->aac_max_fib_size;
1820 
1821 	/*
1822 	 * Do controller-type-specific initialisation
1823 	 */
1824 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1825 
1826 	/*
1827 	 * Give the init structure to the controller.
1828 	 */
1829 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1830 			     sc->aac_common_busaddr +
1831 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1832 			     NULL, NULL)) {
1833 		device_printf(sc->aac_dev,
1834 			      "error establishing init structure\n");
1835 		error = EIO;
1836 		goto out;
1837 	}
1838 
1839 	/*
1840 	 * Check configuration issues
1841 	 */
1842 	if ((error = aac_check_config(sc)) != 0)
1843 		goto out;
1844 
1845 	error = 0;
1846 out:
1847 	return(error);
1848 }
1849 
1850 static void
1851 aac_define_int_mode(struct aac_softc *sc)
1852 {
1853 	device_t dev;
1854 	int cap, msi_count, error = 0;
1855 	uint32_t val;
1856 
1857 	dev = sc->aac_dev;
1858 
1859 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1860 		device_printf(dev, "using line interrupts\n");
1861 		sc->aac_max_msix = 1;
1862 		sc->aac_vector_cap = sc->aac_max_fibs;
1863 		return;
1864 	}
1865 
1866 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1867 	if (sc->aac_max_msix == 0) {
1868 		if (sc->aac_hwif == AAC_HWIF_SRC) {
1869 			msi_count = 1;
1870 			if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1871 				device_printf(dev, "alloc msi failed - err=%d; "
1872 				    "will use INTx\n", error);
1873 				pci_release_msi(dev);
1874 			} else {
1875 				sc->msi_tupelo = TRUE;
1876 			}
1877 		}
1878 		if (sc->msi_tupelo)
1879 			device_printf(dev, "using MSI interrupts\n");
1880 		else
1881 			device_printf(dev, "using line interrupts\n");
1882 
1883 		sc->aac_max_msix = 1;
1884 		sc->aac_vector_cap = sc->aac_max_fibs;
1885 		return;
1886 	}
1887 
1888 	/* OS capability */
1889 	msi_count = pci_msix_count(dev);
1890 	if (msi_count > AAC_MAX_MSIX)
1891 		msi_count = AAC_MAX_MSIX;
1892 	if (msi_count > sc->aac_max_msix)
1893 		msi_count = sc->aac_max_msix;
1894 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1895 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1896 				   "will try MSI\n", msi_count, error);
1897 		pci_release_msi(dev);
1898 	} else {
1899 		sc->msi_enabled = TRUE;
1900 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1901 			msi_count);
1902 	}
1903 
1904 	if (!sc->msi_enabled) {
1905 		msi_count = 1;
1906 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1907 			device_printf(dev, "alloc msi failed - err=%d; "
1908 				           "will use INTx\n", error);
1909 			pci_release_msi(dev);
1910 		} else {
1911 			sc->msi_enabled = TRUE;
1912 			device_printf(dev, "using MSI interrupts\n");
1913 		}
1914 	}
1915 
1916 	if (sc->msi_enabled) {
1917 		/* now read controller capability from PCI config. space */
1918 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1919 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1920 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1921 			pci_release_msi(dev);
1922 			sc->msi_enabled = FALSE;
1923 		}
1924 	}
1925 
1926 	if (!sc->msi_enabled) {
1927 		device_printf(dev, "using legacy interrupts\n");
1928 		sc->aac_max_msix = 1;
1929 	} else {
1930 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1931 		if (sc->aac_max_msix > msi_count)
1932 			sc->aac_max_msix = msi_count;
1933 	}
1934 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1935 
1936 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1937 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1938 }
1939 
1940 static int
1941 aac_find_pci_capability(struct aac_softc *sc, int cap)
1942 {
1943 	device_t dev;
1944 	uint32_t status;
1945 	uint8_t ptr;
1946 
1947 	dev = sc->aac_dev;
1948 
1949 	status = pci_read_config(dev, PCIR_STATUS, 2);
1950 	if (!(status & PCIM_STATUS_CAPPRESENT))
1951 		return (0);
1952 
1953 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1954 	switch (status & PCIM_HDRTYPE) {
1955 	case 0:
1956 	case 1:
1957 		ptr = PCIR_CAP_PTR;
1958 		break;
1959 	case 2:
1960 		ptr = PCIR_CAP_PTR_2;
1961 		break;
1962 	default:
1963 		return (0);
1964 		break;
1965 	}
1966 	ptr = pci_read_config(dev, ptr, 1);
1967 
1968 	while (ptr != 0) {
1969 		int next, val;
1970 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1971 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1972 		if (val == cap)
1973 			return (ptr);
1974 		ptr = next;
1975 	}
1976 
1977 	return (0);
1978 }
1979 
1980 static int
1981 aac_setup_intr(struct aac_softc *sc)
1982 {
1983 	int i, msi_count, rid;
1984 	struct resource *res;
1985 	void *tag;
1986 
1987 	msi_count = sc->aac_max_msix;
1988 	rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1989 
1990 	for (i = 0; i < msi_count; i++, rid++) {
1991 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1992 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1993 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1994 			return (EINVAL);
1995 		}
1996 		sc->aac_irq_rid[i] = rid;
1997 		sc->aac_irq[i] = res;
1998 		if (aac_bus_setup_intr(sc->aac_dev, res,
1999 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
2000 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
2001 			device_printf(sc->aac_dev, "can't set up interrupt\n");
2002 			return (EINVAL);
2003 		}
2004 		sc->aac_msix[i].vector_no = i;
2005 		sc->aac_msix[i].sc = sc;
2006 		sc->aac_intr[i] = tag;
2007 	}
2008 
2009 	return (0);
2010 }
2011 
2012 static int
2013 aac_check_config(struct aac_softc *sc)
2014 {
2015 	struct aac_fib *fib;
2016 	struct aac_cnt_config *ccfg;
2017 	struct aac_cf_status_hdr *cf_shdr;
2018 	int rval;
2019 
2020 	mtx_lock(&sc->aac_io_lock);
2021 	aac_alloc_sync_fib(sc, &fib);
2022 
2023 	ccfg = (struct aac_cnt_config *)&fib->data[0];
2024 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2025 	ccfg->Command = VM_ContainerConfig;
2026 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2027 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2028 
2029 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2030 		sizeof (struct aac_cnt_config));
2031 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2032 	if (rval == 0 && ccfg->Command == ST_OK &&
2033 		ccfg->CTCommand.param[0] == CT_OK) {
2034 		if (cf_shdr->action <= CFACT_PAUSE) {
2035 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2036 			ccfg->Command = VM_ContainerConfig;
2037 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2038 
2039 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2040 				sizeof (struct aac_cnt_config));
2041 			if (rval == 0 && ccfg->Command == ST_OK &&
2042 				ccfg->CTCommand.param[0] == CT_OK) {
2043 				/* successful completion */
2044 				rval = 0;
2045 			} else {
2046 				/* auto commit aborted due to error(s) */
2047 				rval = -2;
2048 			}
2049 		} else {
2050 			/* auto commit aborted due to adapter indicating
2051 			   config. issues too dangerous to auto commit  */
2052 			rval = -3;
2053 		}
2054 	} else {
2055 		/* error */
2056 		rval = -1;
2057 	}
2058 
2059 	aac_release_sync_fib(sc);
2060 	mtx_unlock(&sc->aac_io_lock);
2061 	return(rval);
2062 }
2063 
2064 /*
2065  * Send a synchronous command to the controller and wait for a result.
2066  * Indicate if the controller completed the command with an error status.
2067  */
2068 int
2069 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2070 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2071 		 u_int32_t *sp, u_int32_t *r1)
2072 {
2073 	time_t then;
2074 	u_int32_t status;
2075 
2076 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2077 
2078 	/* populate the mailbox */
2079 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2080 
2081 	/* ensure the sync command doorbell flag is cleared */
2082 	if (!sc->msi_enabled)
2083 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2084 
2085 	/* then set it to signal the adapter */
2086 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2087 
2088 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2089 		/* spin waiting for the command to complete */
2090 		then = time_uptime;
2091 		do {
2092 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2093 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2094 				return(EIO);
2095 			}
2096 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2097 
2098 		/* clear the completion flag */
2099 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2100 
2101 		/* get the command status */
2102 		status = AAC_GET_MAILBOX(sc, 0);
2103 		if (sp != NULL)
2104 			*sp = status;
2105 
2106 		/* return parameter */
2107 		if (r1 != NULL)
2108 			*r1 = AAC_GET_MAILBOX(sc, 1);
2109 
2110 		if (status != AAC_SRB_STS_SUCCESS)
2111 			return (-1);
2112 	}
2113 	return(0);
2114 }
2115 
2116 static int
2117 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2118 		 struct aac_fib *fib, u_int16_t datasize)
2119 {
2120 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2121 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2122 
2123 	if (datasize > AAC_FIB_DATASIZE)
2124 		return(EINVAL);
2125 
2126 	/*
2127 	 * Set up the sync FIB
2128 	 */
2129 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2130 				AAC_FIBSTATE_INITIALISED |
2131 				AAC_FIBSTATE_EMPTY;
2132 	fib->Header.XferState |= xferstate;
2133 	fib->Header.Command = command;
2134 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2135 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2136 	fib->Header.SenderSize = sizeof(struct aac_fib);
2137 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2138 	fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2139 		offsetof(struct aac_common, ac_sync_fib);
2140 
2141 	/*
2142 	 * Give the FIB to the controller, wait for a response.
2143 	 */
2144 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2145 		fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2146 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2147 		return(EIO);
2148 	}
2149 
2150 	return (0);
2151 }
2152 
2153 /*
2154  * Check for commands that have been outstanding for a suspiciously long time,
2155  * and complain about them.
2156  */
2157 static void
2158 aac_timeout(struct aac_softc *sc)
2159 {
2160 	struct aac_command *cm;
2161 	time_t deadline;
2162 	int timedout;
2163 
2164 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2165 	/*
2166 	 * Traverse the busy command list, bitch about late commands once
2167 	 * only.
2168 	 */
2169 	timedout = 0;
2170 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2171 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2172 		if (cm->cm_timestamp < deadline) {
2173 			device_printf(sc->aac_dev,
2174 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2175 				      cm, (int)(time_uptime-cm->cm_timestamp));
2176 			AAC_PRINT_FIB(sc, cm->cm_fib);
2177 			timedout++;
2178 		}
2179 	}
2180 
2181 	if (timedout)
2182 		aac_reset_adapter(sc);
2183 	aacraid_print_queues(sc);
2184 }
2185 
2186 /*
2187  * Interface Function Vectors
2188  */
2189 
2190 /*
2191  * Read the current firmware status word.
2192  */
2193 static int
2194 aac_src_get_fwstatus(struct aac_softc *sc)
2195 {
2196 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2197 
2198 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2199 }
2200 
2201 /*
2202  * Notify the controller of a change in a given queue
2203  */
2204 static void
2205 aac_src_qnotify(struct aac_softc *sc, int qbit)
2206 {
2207 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2208 
2209 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2210 }
2211 
2212 /*
2213  * Get the interrupt reason bits
2214  */
2215 static int
2216 aac_src_get_istatus(struct aac_softc *sc)
2217 {
2218 	int val;
2219 
2220 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2221 
2222 	if (sc->msi_enabled) {
2223 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2224 		if (val & AAC_MSI_SYNC_STATUS)
2225 			val = AAC_DB_SYNC_COMMAND;
2226 		else
2227 			val = 0;
2228 	} else {
2229 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2230 	}
2231 	return(val);
2232 }
2233 
2234 /*
2235  * Clear some interrupt reason bits
2236  */
2237 static void
2238 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2239 {
2240 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2241 
2242 	if (sc->msi_enabled) {
2243 		if (mask == AAC_DB_SYNC_COMMAND)
2244 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2245 	} else {
2246 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2247 	}
2248 }
2249 
2250 /*
2251  * Populate the mailbox and set the command word
2252  */
2253 static void
2254 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2255 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2256 {
2257 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2258 
2259 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2260 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2261 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2262 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2263 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2264 }
2265 
2266 static void
2267 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2268 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2269 {
2270 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2271 
2272 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2273 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2274 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2275 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2276 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2277 }
2278 
2279 /*
2280  * Fetch the immediate command status word
2281  */
2282 static int
2283 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2284 {
2285 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2286 
2287 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2288 }
2289 
2290 static int
2291 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2292 {
2293 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2294 
2295 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2296 }
2297 
2298 /*
2299  * Set/clear interrupt masks
2300  */
2301 static void
2302 aac_src_access_devreg(struct aac_softc *sc, int mode)
2303 {
2304 	u_int32_t val;
2305 
2306 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2307 
2308 	switch (mode) {
2309 	case AAC_ENABLE_INTERRUPT:
2310 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2311 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2312 				           AAC_INT_ENABLE_TYPE1_INTX));
2313 		break;
2314 
2315 	case AAC_DISABLE_INTERRUPT:
2316 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2317 		break;
2318 
2319 	case AAC_ENABLE_MSIX:
2320 		/* set bit 6 */
2321 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2322 		val |= 0x40;
2323 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2324 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2325 		/* unmask int. */
2326 		val = PMC_ALL_INTERRUPT_BITS;
2327 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2328 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2329 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2330 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2331 		break;
2332 
2333 	case AAC_DISABLE_MSIX:
2334 		/* reset bit 6 */
2335 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2336 		val &= ~0x40;
2337 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2338 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2339 		break;
2340 
2341 	case AAC_CLEAR_AIF_BIT:
2342 		/* set bit 5 */
2343 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2344 		val |= 0x20;
2345 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2346 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2347 		break;
2348 
2349 	case AAC_CLEAR_SYNC_BIT:
2350 		/* set bit 4 */
2351 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2352 		val |= 0x10;
2353 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2354 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2355 		break;
2356 
2357 	case AAC_ENABLE_INTX:
2358 		/* set bit 7 */
2359 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2360 		val |= 0x80;
2361 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2362 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2363 		/* unmask int. */
2364 		val = PMC_ALL_INTERRUPT_BITS;
2365 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2366 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2367 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2368 			val & (~(PMC_GLOBAL_INT_BIT2)));
2369 		break;
2370 
2371 	default:
2372 		break;
2373 	}
2374 }
2375 
2376 /*
2377  * New comm. interface: Send command functions
2378  */
2379 static int
2380 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2381 {
2382 	struct aac_fib_xporthdr *pFibX;
2383 	u_int32_t fibsize, high_addr;
2384 	u_int64_t address;
2385 
2386 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2387 
2388 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2389 		sc->aac_max_msix > 1) {
2390 		u_int16_t vector_no, first_choice = 0xffff;
2391 
2392 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2393 		do {
2394 			vector_no += 1;
2395 			if (vector_no == sc->aac_max_msix)
2396 				vector_no = 1;
2397 			if (sc->aac_rrq_outstanding[vector_no] <
2398 				sc->aac_vector_cap)
2399 				break;
2400 			if (0xffff == first_choice)
2401 				first_choice = vector_no;
2402 			else if (vector_no == first_choice)
2403 				break;
2404 		} while (1);
2405 		if (vector_no == first_choice)
2406 			vector_no = 0;
2407 		sc->aac_rrq_outstanding[vector_no]++;
2408 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2409 			sc->aac_fibs_pushed_no = 0;
2410 		else
2411 			sc->aac_fibs_pushed_no++;
2412 
2413 		cm->cm_fib->Header.Handle += (vector_no << 16);
2414 	}
2415 
2416 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2417 		/* Calculate the amount to the fibsize bits */
2418 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2419 		/* Fill new FIB header */
2420 		address = cm->cm_fibphys;
2421 		high_addr = (u_int32_t)(address >> 32);
2422 		if (high_addr == 0L) {
2423 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2424 			cm->cm_fib->Header.u.TimeStamp = 0L;
2425 		} else {
2426 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2427 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2428 		}
2429 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2430 	} else {
2431 		/* Calculate the amount to the fibsize bits */
2432 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2433 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2434 		/* Fill XPORT header */
2435 		pFibX = (struct aac_fib_xporthdr *)
2436 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2437 		pFibX->Handle = cm->cm_fib->Header.Handle;
2438 		pFibX->HostAddress = cm->cm_fibphys;
2439 		pFibX->Size = cm->cm_fib->Header.Size;
2440 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2441 		high_addr = (u_int32_t)(address >> 32);
2442 	}
2443 
2444 	if (fibsize > 31)
2445 		fibsize = 31;
2446 	aac_enqueue_busy(cm);
2447 	if (high_addr) {
2448 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2449 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2450 	} else {
2451 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2452 	}
2453 	return 0;
2454 }
2455 
2456 /*
2457  * New comm. interface: get, set outbound queue index
2458  */
2459 static int
2460 aac_src_get_outb_queue(struct aac_softc *sc)
2461 {
2462 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2463 
2464 	return(-1);
2465 }
2466 
2467 static void
2468 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2469 {
2470 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2471 }
2472 
2473 /*
2474  * Debugging and Diagnostics
2475  */
2476 
2477 /*
2478  * Print some information about the controller.
2479  */
2480 static void
2481 aac_describe_controller(struct aac_softc *sc)
2482 {
2483 	struct aac_fib *fib;
2484 	struct aac_adapter_info	*info;
2485 	char *adapter_type = "Adaptec RAID controller";
2486 
2487 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2488 
2489 	mtx_lock(&sc->aac_io_lock);
2490 	aac_alloc_sync_fib(sc, &fib);
2491 
2492 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2493 		fib->data[0] = 0;
2494 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2495 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2496 		else {
2497 			struct aac_supplement_adapter_info *supp_info;
2498 
2499 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2500 			adapter_type = (char *)supp_info->AdapterTypeText;
2501 			sc->aac_feature_bits = supp_info->FeatureBits;
2502 			sc->aac_support_opt2 = supp_info->SupportedOptions2;
2503 		}
2504 	}
2505 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2506 		adapter_type,
2507 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2508 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2509 
2510 	fib->data[0] = 0;
2511 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2512 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2513 		aac_release_sync_fib(sc);
2514 		mtx_unlock(&sc->aac_io_lock);
2515 		return;
2516 	}
2517 
2518 	/* save the kernel revision structure for later use */
2519 	info = (struct aac_adapter_info *)&fib->data[0];
2520 	sc->aac_revision = info->KernelRevision;
2521 
2522 	if (bootverbose) {
2523 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2524 		    "(%dMB cache, %dMB execution), %s\n",
2525 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2526 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2527 		    info->BufferMem / (1024 * 1024),
2528 		    info->ExecutionMem / (1024 * 1024),
2529 		    aac_describe_code(aac_battery_platform,
2530 		    info->batteryPlatform));
2531 
2532 		device_printf(sc->aac_dev,
2533 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2534 		    info->KernelRevision.external.comp.major,
2535 		    info->KernelRevision.external.comp.minor,
2536 		    info->KernelRevision.external.comp.dash,
2537 		    info->KernelRevision.buildNumber,
2538 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2539 
2540 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2541 			      sc->supported_options,
2542 			      "\20"
2543 			      "\1SNAPSHOT"
2544 			      "\2CLUSTERS"
2545 			      "\3WCACHE"
2546 			      "\4DATA64"
2547 			      "\5HOSTTIME"
2548 			      "\6RAID50"
2549 			      "\7WINDOW4GB"
2550 			      "\10SCSIUPGD"
2551 			      "\11SOFTERR"
2552 			      "\12NORECOND"
2553 			      "\13SGMAP64"
2554 			      "\14ALARM"
2555 			      "\15NONDASD"
2556 			      "\16SCSIMGT"
2557 			      "\17RAIDSCSI"
2558 			      "\21ADPTINFO"
2559 			      "\22NEWCOMM"
2560 			      "\23ARRAY64BIT"
2561 			      "\24HEATSENSOR");
2562 	}
2563 
2564 	aac_release_sync_fib(sc);
2565 	mtx_unlock(&sc->aac_io_lock);
2566 }
2567 
2568 /*
2569  * Look up a text description of a numeric error code and return a pointer to
2570  * same.
2571  */
2572 static char *
2573 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2574 {
2575 	int i;
2576 
2577 	for (i = 0; table[i].string != NULL; i++)
2578 		if (table[i].code == code)
2579 			return(table[i].string);
2580 	return(table[i + 1].string);
2581 }
2582 
2583 /*
2584  * Management Interface
2585  */
2586 
2587 static int
2588 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2589 {
2590 	struct aac_softc *sc;
2591 
2592 	sc = dev->si_drv1;
2593 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2594 #if __FreeBSD_version >= 702000
2595 	device_busy(sc->aac_dev);
2596 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2597 #endif
2598 	return 0;
2599 }
2600 
2601 static int
2602 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2603 {
2604 	union aac_statrequest *as;
2605 	struct aac_softc *sc;
2606 	int error = 0;
2607 
2608 	as = (union aac_statrequest *)arg;
2609 	sc = dev->si_drv1;
2610 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2611 
2612 	switch (cmd) {
2613 	case AACIO_STATS:
2614 		switch (as->as_item) {
2615 		case AACQ_FREE:
2616 		case AACQ_READY:
2617 		case AACQ_BUSY:
2618 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2619 			      sizeof(struct aac_qstat));
2620 			break;
2621 		default:
2622 			error = ENOENT;
2623 			break;
2624 		}
2625 	break;
2626 
2627 	case FSACTL_SENDFIB:
2628 	case FSACTL_SEND_LARGE_FIB:
2629 		arg = *(caddr_t*)arg;
2630 	case FSACTL_LNX_SENDFIB:
2631 	case FSACTL_LNX_SEND_LARGE_FIB:
2632 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2633 		error = aac_ioctl_sendfib(sc, arg);
2634 		break;
2635 	case FSACTL_SEND_RAW_SRB:
2636 		arg = *(caddr_t*)arg;
2637 	case FSACTL_LNX_SEND_RAW_SRB:
2638 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2639 		error = aac_ioctl_send_raw_srb(sc, arg);
2640 		break;
2641 	case FSACTL_AIF_THREAD:
2642 	case FSACTL_LNX_AIF_THREAD:
2643 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2644 		error = EINVAL;
2645 		break;
2646 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2647 		arg = *(caddr_t*)arg;
2648 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2649 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2650 		error = aac_open_aif(sc, arg);
2651 		break;
2652 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2653 		arg = *(caddr_t*)arg;
2654 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2655 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2656 		error = aac_getnext_aif(sc, arg);
2657 		break;
2658 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2659 		arg = *(caddr_t*)arg;
2660 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2661 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2662 		error = aac_close_aif(sc, arg);
2663 		break;
2664 	case FSACTL_MINIPORT_REV_CHECK:
2665 		arg = *(caddr_t*)arg;
2666 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2667 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2668 		error = aac_rev_check(sc, arg);
2669 		break;
2670 	case FSACTL_QUERY_DISK:
2671 		arg = *(caddr_t*)arg;
2672 	case FSACTL_LNX_QUERY_DISK:
2673 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2674 		error = aac_query_disk(sc, arg);
2675 		break;
2676 	case FSACTL_DELETE_DISK:
2677 	case FSACTL_LNX_DELETE_DISK:
2678 		/*
2679 		 * We don't trust the underland to tell us when to delete a
2680 		 * container, rather we rely on an AIF coming from the
2681 		 * controller
2682 		 */
2683 		error = 0;
2684 		break;
2685 	case FSACTL_GET_PCI_INFO:
2686 		arg = *(caddr_t*)arg;
2687 	case FSACTL_LNX_GET_PCI_INFO:
2688 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2689 		error = aac_get_pci_info(sc, arg);
2690 		break;
2691 	case FSACTL_GET_FEATURES:
2692 		arg = *(caddr_t*)arg;
2693 	case FSACTL_LNX_GET_FEATURES:
2694 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2695 		error = aac_supported_features(sc, arg);
2696 		break;
2697 	default:
2698 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2699 		error = EINVAL;
2700 		break;
2701 	}
2702 	return(error);
2703 }
2704 
2705 static int
2706 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2707 {
2708 	struct aac_softc *sc;
2709 	struct aac_fib_context *ctx;
2710 	int revents;
2711 
2712 	sc = dev->si_drv1;
2713 	revents = 0;
2714 
2715 	mtx_lock(&sc->aac_io_lock);
2716 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2717 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2718 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2719 				revents |= poll_events & (POLLIN | POLLRDNORM);
2720 				break;
2721 			}
2722 		}
2723 	}
2724 	mtx_unlock(&sc->aac_io_lock);
2725 
2726 	if (revents == 0) {
2727 		if (poll_events & (POLLIN | POLLRDNORM))
2728 			selrecord(td, &sc->rcv_select);
2729 	}
2730 
2731 	return (revents);
2732 }
2733 
2734 static void
2735 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2736 {
2737 
2738 	switch (event->ev_type) {
2739 	case AAC_EVENT_CMFREE:
2740 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2741 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2742 			aacraid_add_event(sc, event);
2743 			return;
2744 		}
2745 		free(event, M_AACRAIDBUF);
2746 		wakeup(arg);
2747 		break;
2748 	default:
2749 		break;
2750 	}
2751 }
2752 
2753 /*
2754  * Send a FIB supplied from userspace
2755  */
2756 static int
2757 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2758 {
2759 	struct aac_command *cm;
2760 	int size, error;
2761 
2762 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2763 
2764 	cm = NULL;
2765 
2766 	/*
2767 	 * Get a command
2768 	 */
2769 	mtx_lock(&sc->aac_io_lock);
2770 	if (aacraid_alloc_command(sc, &cm)) {
2771 		struct aac_event *event;
2772 
2773 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2774 		    M_NOWAIT | M_ZERO);
2775 		if (event == NULL) {
2776 			error = EBUSY;
2777 			mtx_unlock(&sc->aac_io_lock);
2778 			goto out;
2779 		}
2780 		event->ev_type = AAC_EVENT_CMFREE;
2781 		event->ev_callback = aac_ioctl_event;
2782 		event->ev_arg = &cm;
2783 		aacraid_add_event(sc, event);
2784 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2785 	}
2786 	mtx_unlock(&sc->aac_io_lock);
2787 
2788 	/*
2789 	 * Fetch the FIB header, then re-copy to get data as well.
2790 	 */
2791 	if ((error = copyin(ufib, cm->cm_fib,
2792 			    sizeof(struct aac_fib_header))) != 0)
2793 		goto out;
2794 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2795 	if (size > sc->aac_max_fib_size) {
2796 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2797 			      size, sc->aac_max_fib_size);
2798 		size = sc->aac_max_fib_size;
2799 	}
2800 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2801 		goto out;
2802 	cm->cm_fib->Header.Size = size;
2803 	cm->cm_timestamp = time_uptime;
2804 	cm->cm_datalen = 0;
2805 
2806 	/*
2807 	 * Pass the FIB to the controller, wait for it to complete.
2808 	 */
2809 	mtx_lock(&sc->aac_io_lock);
2810 	error = aacraid_wait_command(cm);
2811 	mtx_unlock(&sc->aac_io_lock);
2812 	if (error != 0) {
2813 		device_printf(sc->aac_dev,
2814 			      "aacraid_wait_command return %d\n", error);
2815 		goto out;
2816 	}
2817 
2818 	/*
2819 	 * Copy the FIB and data back out to the caller.
2820 	 */
2821 	size = cm->cm_fib->Header.Size;
2822 	if (size > sc->aac_max_fib_size) {
2823 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2824 			      size, sc->aac_max_fib_size);
2825 		size = sc->aac_max_fib_size;
2826 	}
2827 	error = copyout(cm->cm_fib, ufib, size);
2828 
2829 out:
2830 	if (cm != NULL) {
2831 		mtx_lock(&sc->aac_io_lock);
2832 		aacraid_release_command(cm);
2833 		mtx_unlock(&sc->aac_io_lock);
2834 	}
2835 	return(error);
2836 }
2837 
2838 /*
2839  * Send a passthrough FIB supplied from userspace
2840  */
2841 static int
2842 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2843 {
2844 	struct aac_command *cm;
2845 	struct aac_fib *fib;
2846 	struct aac_srb *srbcmd;
2847 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2848 	void *user_reply;
2849 	int error, transfer_data = 0;
2850 	bus_dmamap_t orig_map = 0;
2851 	u_int32_t fibsize = 0;
2852 	u_int64_t srb_sg_address;
2853 	u_int32_t srb_sg_bytecount;
2854 
2855 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2856 
2857 	cm = NULL;
2858 
2859 	mtx_lock(&sc->aac_io_lock);
2860 	if (aacraid_alloc_command(sc, &cm)) {
2861 		struct aac_event *event;
2862 
2863 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2864 		    M_NOWAIT | M_ZERO);
2865 		if (event == NULL) {
2866 			error = EBUSY;
2867 			mtx_unlock(&sc->aac_io_lock);
2868 			goto out;
2869 		}
2870 		event->ev_type = AAC_EVENT_CMFREE;
2871 		event->ev_callback = aac_ioctl_event;
2872 		event->ev_arg = &cm;
2873 		aacraid_add_event(sc, event);
2874 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2875 	}
2876 	mtx_unlock(&sc->aac_io_lock);
2877 
2878 	cm->cm_data = NULL;
2879 	/* save original dma map */
2880 	orig_map = cm->cm_datamap;
2881 
2882 	fib = cm->cm_fib;
2883 	srbcmd = (struct aac_srb *)fib->data;
2884 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2885 	    sizeof (u_int32_t))) != 0)
2886 		goto out;
2887 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2888 		error = EINVAL;
2889 		goto out;
2890 	}
2891 	if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2892 		goto out;
2893 
2894 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2895 	srbcmd->retry_limit = 0;	/* obsolete */
2896 
2897 	/* only one sg element from userspace supported */
2898 	if (srbcmd->sg_map.SgCount > 1) {
2899 		error = EINVAL;
2900 		goto out;
2901 	}
2902 	/* check fibsize */
2903 	if (fibsize == (sizeof(struct aac_srb) +
2904 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2905 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2906 		struct aac_sg_entry sg;
2907 
2908 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2909 			goto out;
2910 
2911 		srb_sg_bytecount = sg.SgByteCount;
2912 		srb_sg_address = (u_int64_t)sg.SgAddress;
2913 	} else if (fibsize == (sizeof(struct aac_srb) +
2914 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2915 #ifdef __LP64__
2916 		struct aac_sg_entry64 *sgp =
2917 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2918 		struct aac_sg_entry64 sg;
2919 
2920 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2921 			goto out;
2922 
2923 		srb_sg_bytecount = sg.SgByteCount;
2924 		srb_sg_address = sg.SgAddress;
2925 #else
2926 		error = EINVAL;
2927 		goto out;
2928 #endif
2929 	} else {
2930 		error = EINVAL;
2931 		goto out;
2932 	}
2933 	user_reply = (char *)arg + fibsize;
2934 	srbcmd->data_len = srb_sg_bytecount;
2935 	if (srbcmd->sg_map.SgCount == 1)
2936 		transfer_data = 1;
2937 
2938 	if (transfer_data) {
2939 		/*
2940 		 * Create DMA tag for the passthr. data buffer and allocate it.
2941 		 */
2942 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2943 			1, 0,			/* algnmnt, boundary */
2944 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2945 			BUS_SPACE_MAXADDR_32BIT :
2946 			0x7fffffff,		/* lowaddr */
2947 			BUS_SPACE_MAXADDR, 	/* highaddr */
2948 			NULL, NULL, 		/* filter, filterarg */
2949 			srb_sg_bytecount, 	/* size */
2950 			sc->aac_sg_tablesize,	/* nsegments */
2951 			srb_sg_bytecount, 	/* maxsegsize */
2952 			0,			/* flags */
2953 			NULL, NULL,		/* No locking needed */
2954 			&cm->cm_passthr_dmat)) {
2955 			error = ENOMEM;
2956 			goto out;
2957 		}
2958 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2959 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2960 			error = ENOMEM;
2961 			goto out;
2962 		}
2963 		/* fill some cm variables */
2964 		cm->cm_datalen = srb_sg_bytecount;
2965 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2966 			cm->cm_flags |= AAC_CMD_DATAIN;
2967 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2968 			cm->cm_flags |= AAC_CMD_DATAOUT;
2969 
2970 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2971 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2972 				cm->cm_data, cm->cm_datalen)) != 0)
2973 				goto out;
2974 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2975 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2976 				BUS_DMASYNC_PREWRITE);
2977 		}
2978 	}
2979 
2980 	/* build the FIB */
2981 	fib->Header.Size = sizeof(struct aac_fib_header) +
2982 		sizeof(struct aac_srb);
2983 	fib->Header.XferState =
2984 		AAC_FIBSTATE_HOSTOWNED   |
2985 		AAC_FIBSTATE_INITIALISED |
2986 		AAC_FIBSTATE_EMPTY	 |
2987 		AAC_FIBSTATE_FROMHOST	 |
2988 		AAC_FIBSTATE_REXPECTED   |
2989 		AAC_FIBSTATE_NORM	 |
2990 		AAC_FIBSTATE_ASYNC;
2991 
2992 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2993 		ScsiPortCommandU64 : ScsiPortCommand;
2994 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2995 
2996 	/* send command */
2997 	if (transfer_data) {
2998 		bus_dmamap_load(cm->cm_passthr_dmat,
2999 			cm->cm_datamap, cm->cm_data,
3000 			cm->cm_datalen,
3001 			aacraid_map_command_sg, cm, 0);
3002 	} else {
3003 		aacraid_map_command_sg(cm, NULL, 0, 0);
3004 	}
3005 
3006 	/* wait for completion */
3007 	mtx_lock(&sc->aac_io_lock);
3008 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
3009 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
3010 	mtx_unlock(&sc->aac_io_lock);
3011 
3012 	/* copy data */
3013 	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
3014 		if ((error = copyout(cm->cm_data,
3015 			(void *)(uintptr_t)srb_sg_address,
3016 			cm->cm_datalen)) != 0)
3017 			goto out;
3018 		/* sync required for bus_dmamem_alloc() allocated mem.? */
3019 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
3020 				BUS_DMASYNC_POSTREAD);
3021 	}
3022 
3023 	/* status */
3024 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3025 
3026 out:
3027 	if (cm && cm->cm_data) {
3028 		if (transfer_data)
3029 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3030 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3031 		cm->cm_datamap = orig_map;
3032 	}
3033 	if (cm && cm->cm_passthr_dmat)
3034 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3035 	if (cm) {
3036 		mtx_lock(&sc->aac_io_lock);
3037 		aacraid_release_command(cm);
3038 		mtx_unlock(&sc->aac_io_lock);
3039 	}
3040 	return(error);
3041 }
3042 
3043 /*
3044  * Request an AIF from the controller (new comm. type1)
3045  */
3046 static void
3047 aac_request_aif(struct aac_softc *sc)
3048 {
3049 	struct aac_command *cm;
3050 	struct aac_fib *fib;
3051 
3052 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3053 
3054 	if (aacraid_alloc_command(sc, &cm)) {
3055 		sc->aif_pending = 1;
3056 		return;
3057 	}
3058 	sc->aif_pending = 0;
3059 
3060 	/* build the FIB */
3061 	fib = cm->cm_fib;
3062 	fib->Header.Size = sizeof(struct aac_fib);
3063 	fib->Header.XferState =
3064         AAC_FIBSTATE_HOSTOWNED   |
3065         AAC_FIBSTATE_INITIALISED |
3066         AAC_FIBSTATE_EMPTY	 |
3067         AAC_FIBSTATE_FROMHOST	 |
3068         AAC_FIBSTATE_REXPECTED   |
3069         AAC_FIBSTATE_NORM	 |
3070         AAC_FIBSTATE_ASYNC;
3071 	/* set AIF marker */
3072 	fib->Header.Handle = 0x00800000;
3073 	fib->Header.Command = AifRequest;
3074 	((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3075 
3076 	aacraid_map_command_sg(cm, NULL, 0, 0);
3077 }
3078 
3079 
3080 #if __FreeBSD_version >= 702000
3081 /*
3082  * cdevpriv interface private destructor.
3083  */
3084 static void
3085 aac_cdevpriv_dtor(void *arg)
3086 {
3087 	struct aac_softc *sc;
3088 
3089 	sc = arg;
3090 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3091 	device_unbusy(sc->aac_dev);
3092 }
3093 #else
3094 static int
3095 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3096 {
3097 	struct aac_softc *sc;
3098 
3099 	sc = dev->si_drv1;
3100 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3101 	return 0;
3102 }
3103 #endif
3104 
3105 /*
3106  * Handle an AIF sent to us by the controller; queue it for later reference.
3107  * If the queue fills up, then drop the older entries.
3108  */
3109 static void
3110 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3111 {
3112 	struct aac_aif_command *aif;
3113 	struct aac_container *co, *co_next;
3114 	struct aac_fib_context *ctx;
3115 	struct aac_fib *sync_fib;
3116 	struct aac_mntinforesp mir;
3117 	int next, current, found;
3118 	int count = 0, changed = 0, i = 0;
3119 	u_int32_t channel, uid;
3120 
3121 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3122 
3123 	aif = (struct aac_aif_command*)&fib->data[0];
3124 	aacraid_print_aif(sc, aif);
3125 
3126 	/* Is it an event that we should care about? */
3127 	switch (aif->command) {
3128 	case AifCmdEventNotify:
3129 		switch (aif->data.EN.type) {
3130 		case AifEnAddContainer:
3131 		case AifEnDeleteContainer:
3132 			/*
3133 			 * A container was added or deleted, but the message
3134 			 * doesn't tell us anything else!  Re-enumerate the
3135 			 * containers and sort things out.
3136 			 */
3137 			aac_alloc_sync_fib(sc, &sync_fib);
3138 			do {
3139 				/*
3140 				 * Ask the controller for its containers one at
3141 				 * a time.
3142 				 * XXX What if the controller's list changes
3143 				 * midway through this enumaration?
3144 				 * XXX This should be done async.
3145 				 */
3146 				if (aac_get_container_info(sc, sync_fib, i,
3147 					&mir, &uid) != 0)
3148 					continue;
3149 				if (i == 0)
3150 					count = mir.MntRespCount;
3151 				/*
3152 				 * Check the container against our list.
3153 				 * co->co_found was already set to 0 in a
3154 				 * previous run.
3155 				 */
3156 				if ((mir.Status == ST_OK) &&
3157 				    (mir.MntTable[0].VolType != CT_NONE)) {
3158 					found = 0;
3159 					TAILQ_FOREACH(co,
3160 						      &sc->aac_container_tqh,
3161 						      co_link) {
3162 						if (co->co_mntobj.ObjectId ==
3163 						    mir.MntTable[0].ObjectId) {
3164 							co->co_found = 1;
3165 							found = 1;
3166 							break;
3167 						}
3168 					}
3169 					/*
3170 					 * If the container matched, continue
3171 					 * in the list.
3172 					 */
3173 					if (found) {
3174 						i++;
3175 						continue;
3176 					}
3177 
3178 					/*
3179 					 * This is a new container.  Do all the
3180 					 * appropriate things to set it up.
3181 					 */
3182 					aac_add_container(sc, &mir, 1, uid);
3183 					changed = 1;
3184 				}
3185 				i++;
3186 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3187 			aac_release_sync_fib(sc);
3188 
3189 			/*
3190 			 * Go through our list of containers and see which ones
3191 			 * were not marked 'found'.  Since the controller didn't
3192 			 * list them they must have been deleted.  Do the
3193 			 * appropriate steps to destroy the device.  Also reset
3194 			 * the co->co_found field.
3195 			 */
3196 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3197 			while (co != NULL) {
3198 				if (co->co_found == 0) {
3199 					co_next = TAILQ_NEXT(co, co_link);
3200 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3201 						     co_link);
3202 					free(co, M_AACRAIDBUF);
3203 					changed = 1;
3204 					co = co_next;
3205 				} else {
3206 					co->co_found = 0;
3207 					co = TAILQ_NEXT(co, co_link);
3208 				}
3209 			}
3210 
3211 			/* Attach the newly created containers */
3212 			if (changed) {
3213 				if (sc->cam_rescan_cb != NULL)
3214 					sc->cam_rescan_cb(sc, 0,
3215 				    	AAC_CAM_TARGET_WILDCARD);
3216 			}
3217 
3218 			break;
3219 
3220 		case AifEnEnclosureManagement:
3221 			switch (aif->data.EN.data.EEE.eventType) {
3222 			case AIF_EM_DRIVE_INSERTION:
3223 			case AIF_EM_DRIVE_REMOVAL:
3224 				channel = aif->data.EN.data.EEE.unitID;
3225 				if (sc->cam_rescan_cb != NULL)
3226 					sc->cam_rescan_cb(sc,
3227 					    ((channel>>24) & 0xF) + 1,
3228 					    (channel & 0xFFFF));
3229 				break;
3230 			}
3231 			break;
3232 
3233 		case AifEnAddJBOD:
3234 		case AifEnDeleteJBOD:
3235 		case AifRawDeviceRemove:
3236 			channel = aif->data.EN.data.ECE.container;
3237 			if (sc->cam_rescan_cb != NULL)
3238 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3239 				    AAC_CAM_TARGET_WILDCARD);
3240 			break;
3241 
3242 		default:
3243 			break;
3244 		}
3245 
3246 	default:
3247 		break;
3248 	}
3249 
3250 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3251 	current = sc->aifq_idx;
3252 	next = (current + 1) % AAC_AIFQ_LENGTH;
3253 	if (next == 0)
3254 		sc->aifq_filled = 1;
3255 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3256 	/* modify AIF contexts */
3257 	if (sc->aifq_filled) {
3258 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3259 			if (next == ctx->ctx_idx)
3260 				ctx->ctx_wrap = 1;
3261 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3262 				ctx->ctx_idx = next;
3263 		}
3264 	}
3265 	sc->aifq_idx = next;
3266 	/* On the off chance that someone is sleeping for an aif... */
3267 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3268 		wakeup(sc->aac_aifq);
3269 	/* Wakeup any poll()ers */
3270 	selwakeuppri(&sc->rcv_select, PRIBIO);
3271 
3272 	return;
3273 }
3274 
3275 /*
3276  * Return the Revision of the driver to userspace and check to see if the
3277  * userspace app is possibly compatible.  This is extremely bogus since
3278  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3279  * returning what the card reported.
3280  */
3281 static int
3282 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3283 {
3284 	struct aac_rev_check rev_check;
3285 	struct aac_rev_check_resp rev_check_resp;
3286 	int error = 0;
3287 
3288 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3289 
3290 	/*
3291 	 * Copyin the revision struct from userspace
3292 	 */
3293 	if ((error = copyin(udata, (caddr_t)&rev_check,
3294 			sizeof(struct aac_rev_check))) != 0) {
3295 		return error;
3296 	}
3297 
3298 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3299 	      rev_check.callingRevision.buildNumber);
3300 
3301 	/*
3302 	 * Doctor up the response struct.
3303 	 */
3304 	rev_check_resp.possiblyCompatible = 1;
3305 	rev_check_resp.adapterSWRevision.external.comp.major =
3306 	    AAC_DRIVER_MAJOR_VERSION;
3307 	rev_check_resp.adapterSWRevision.external.comp.minor =
3308 	    AAC_DRIVER_MINOR_VERSION;
3309 	rev_check_resp.adapterSWRevision.external.comp.type =
3310 	    AAC_DRIVER_TYPE;
3311 	rev_check_resp.adapterSWRevision.external.comp.dash =
3312 	    AAC_DRIVER_BUGFIX_LEVEL;
3313 	rev_check_resp.adapterSWRevision.buildNumber =
3314 	    AAC_DRIVER_BUILD;
3315 
3316 	return(copyout((caddr_t)&rev_check_resp, udata,
3317 			sizeof(struct aac_rev_check_resp)));
3318 }
3319 
3320 /*
3321  * Pass the fib context to the caller
3322  */
3323 static int
3324 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3325 {
3326 	struct aac_fib_context *fibctx, *ctx;
3327 	int error = 0;
3328 
3329 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3330 
3331 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3332 	if (fibctx == NULL)
3333 		return (ENOMEM);
3334 
3335 	mtx_lock(&sc->aac_io_lock);
3336 	/* all elements are already 0, add to queue */
3337 	if (sc->fibctx == NULL)
3338 		sc->fibctx = fibctx;
3339 	else {
3340 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3341 			;
3342 		ctx->next = fibctx;
3343 		fibctx->prev = ctx;
3344 	}
3345 
3346 	/* evaluate unique value */
3347 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3348 	ctx = sc->fibctx;
3349 	while (ctx != fibctx) {
3350 		if (ctx->unique == fibctx->unique) {
3351 			fibctx->unique++;
3352 			ctx = sc->fibctx;
3353 		} else {
3354 			ctx = ctx->next;
3355 		}
3356 	}
3357 
3358 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3359 	mtx_unlock(&sc->aac_io_lock);
3360 	if (error)
3361 		aac_close_aif(sc, (caddr_t)ctx);
3362 	return error;
3363 }
3364 
3365 /*
3366  * Close the caller's fib context
3367  */
3368 static int
3369 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3370 {
3371 	struct aac_fib_context *ctx;
3372 
3373 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3374 
3375 	mtx_lock(&sc->aac_io_lock);
3376 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3377 		if (ctx->unique == *(uint32_t *)&arg) {
3378 			if (ctx == sc->fibctx)
3379 				sc->fibctx = NULL;
3380 			else {
3381 				ctx->prev->next = ctx->next;
3382 				if (ctx->next)
3383 					ctx->next->prev = ctx->prev;
3384 			}
3385 			break;
3386 		}
3387 	}
3388 	if (ctx)
3389 		free(ctx, M_AACRAIDBUF);
3390 
3391 	mtx_unlock(&sc->aac_io_lock);
3392 	return 0;
3393 }
3394 
3395 /*
3396  * Pass the caller the next AIF in their queue
3397  */
3398 static int
3399 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3400 {
3401 	struct get_adapter_fib_ioctl agf;
3402 	struct aac_fib_context *ctx;
3403 	int error;
3404 
3405 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3406 
3407 	mtx_lock(&sc->aac_io_lock);
3408 #ifdef COMPAT_FREEBSD32
3409 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3410 		struct get_adapter_fib_ioctl32 agf32;
3411 		error = copyin(arg, &agf32, sizeof(agf32));
3412 		if (error == 0) {
3413 			agf.AdapterFibContext = agf32.AdapterFibContext;
3414 			agf.Wait = agf32.Wait;
3415 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3416 		}
3417 	} else
3418 #endif
3419 		error = copyin(arg, &agf, sizeof(agf));
3420 	if (error == 0) {
3421 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3422 			if (agf.AdapterFibContext == ctx->unique)
3423 				break;
3424 		}
3425 		if (!ctx) {
3426 			mtx_unlock(&sc->aac_io_lock);
3427 			return (EFAULT);
3428 		}
3429 
3430 		error = aac_return_aif(sc, ctx, agf.AifFib);
3431 		if (error == EAGAIN && agf.Wait) {
3432 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3433 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3434 			while (error == EAGAIN) {
3435 				mtx_unlock(&sc->aac_io_lock);
3436 				error = tsleep(sc->aac_aifq, PRIBIO |
3437 					       PCATCH, "aacaif", 0);
3438 				mtx_lock(&sc->aac_io_lock);
3439 				if (error == 0)
3440 					error = aac_return_aif(sc, ctx, agf.AifFib);
3441 			}
3442 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3443 		}
3444 	}
3445 	mtx_unlock(&sc->aac_io_lock);
3446 	return(error);
3447 }
3448 
3449 /*
3450  * Hand the next AIF off the top of the queue out to userspace.
3451  */
3452 static int
3453 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3454 {
3455 	int current, error;
3456 
3457 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3458 
3459 	current = ctx->ctx_idx;
3460 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3461 		/* empty */
3462 		return (EAGAIN);
3463 	}
3464 	error =
3465 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3466 	if (error)
3467 		device_printf(sc->aac_dev,
3468 		    "aac_return_aif: copyout returned %d\n", error);
3469 	else {
3470 		ctx->ctx_wrap = 0;
3471 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3472 	}
3473 	return(error);
3474 }
3475 
3476 static int
3477 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3478 {
3479 	struct aac_pci_info {
3480 		u_int32_t bus;
3481 		u_int32_t slot;
3482 	} pciinf;
3483 	int error;
3484 
3485 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3486 
3487 	pciinf.bus = pci_get_bus(sc->aac_dev);
3488 	pciinf.slot = pci_get_slot(sc->aac_dev);
3489 
3490 	error = copyout((caddr_t)&pciinf, uptr,
3491 			sizeof(struct aac_pci_info));
3492 
3493 	return (error);
3494 }
3495 
3496 static int
3497 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3498 {
3499 	struct aac_features f;
3500 	int error;
3501 
3502 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3503 
3504 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3505 		return (error);
3506 
3507 	/*
3508 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3509 	 * ALL zero in the featuresState, the driver will return the current
3510 	 * state of all the supported features, the data field will not be
3511 	 * valid.
3512 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3513 	 * a specific bit set in the featuresState, the driver will return the
3514 	 * current state of this specific feature and whatever data that are
3515 	 * associated with the feature in the data field or perform whatever
3516 	 * action needed indicates in the data field.
3517 	 */
3518 	 if (f.feat.fValue == 0) {
3519 		f.feat.fBits.largeLBA =
3520 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3521 		f.feat.fBits.JBODSupport = 1;
3522 		/* TODO: In the future, add other features state here as well */
3523 	} else {
3524 		if (f.feat.fBits.largeLBA)
3525 			f.feat.fBits.largeLBA =
3526 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3527 		/* TODO: Add other features state and data in the future */
3528 	}
3529 
3530 	error = copyout(&f, uptr, sizeof (f));
3531 	return (error);
3532 }
3533 
3534 /*
3535  * Give the userland some information about the container.  The AAC arch
3536  * expects the driver to be a SCSI passthrough type driver, so it expects
3537  * the containers to have b:t:l numbers.  Fake it.
3538  */
3539 static int
3540 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3541 {
3542 	struct aac_query_disk query_disk;
3543 	struct aac_container *co;
3544 	int error, id;
3545 
3546 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3547 
3548 	mtx_lock(&sc->aac_io_lock);
3549 	error = copyin(uptr, (caddr_t)&query_disk,
3550 		       sizeof(struct aac_query_disk));
3551 	if (error) {
3552 		mtx_unlock(&sc->aac_io_lock);
3553 		return (error);
3554 	}
3555 
3556 	id = query_disk.ContainerNumber;
3557 	if (id == -1) {
3558 		mtx_unlock(&sc->aac_io_lock);
3559 		return (EINVAL);
3560 	}
3561 
3562 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3563 		if (co->co_mntobj.ObjectId == id)
3564 			break;
3565 		}
3566 
3567 	if (co == NULL) {
3568 			query_disk.Valid = 0;
3569 			query_disk.Locked = 0;
3570 			query_disk.Deleted = 1;		/* XXX is this right? */
3571 	} else {
3572 		query_disk.Valid = 1;
3573 		query_disk.Locked = 1;
3574 		query_disk.Deleted = 0;
3575 		query_disk.Bus = device_get_unit(sc->aac_dev);
3576 		query_disk.Target = 0;
3577 		query_disk.Lun = 0;
3578 		query_disk.UnMapped = 0;
3579 	}
3580 
3581 	error = copyout((caddr_t)&query_disk, uptr,
3582 			sizeof(struct aac_query_disk));
3583 
3584 	mtx_unlock(&sc->aac_io_lock);
3585 	return (error);
3586 }
3587 
3588 static void
3589 aac_container_bus(struct aac_softc *sc)
3590 {
3591 	struct aac_sim *sim;
3592 	device_t child;
3593 
3594 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3595 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3596 	if (sim == NULL) {
3597 		device_printf(sc->aac_dev,
3598 	    	"No memory to add container bus\n");
3599 		panic("Out of memory?!");
3600 	}
3601 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3602 	if (child == NULL) {
3603 		device_printf(sc->aac_dev,
3604 	    	"device_add_child failed for container bus\n");
3605 		free(sim, M_AACRAIDBUF);
3606 		panic("Out of memory?!");
3607 	}
3608 
3609 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3610 	sim->BusNumber = 0;
3611 	sim->BusType = CONTAINER_BUS;
3612 	sim->InitiatorBusId = -1;
3613 	sim->aac_sc = sc;
3614 	sim->sim_dev = child;
3615 	sim->aac_cam = NULL;
3616 
3617 	device_set_ivars(child, sim);
3618 	device_set_desc(child, "Container Bus");
3619 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3620 	/*
3621 	device_set_desc(child, aac_describe_code(aac_container_types,
3622 			mir->MntTable[0].VolType));
3623 	*/
3624 	bus_generic_attach(sc->aac_dev);
3625 }
3626 
3627 static void
3628 aac_get_bus_info(struct aac_softc *sc)
3629 {
3630 	struct aac_fib *fib;
3631 	struct aac_ctcfg *c_cmd;
3632 	struct aac_ctcfg_resp *c_resp;
3633 	struct aac_vmioctl *vmi;
3634 	struct aac_vmi_businf_resp *vmi_resp;
3635 	struct aac_getbusinf businfo;
3636 	struct aac_sim *caminf;
3637 	device_t child;
3638 	int i, error;
3639 
3640 	mtx_lock(&sc->aac_io_lock);
3641 	aac_alloc_sync_fib(sc, &fib);
3642 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3643 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3644 
3645 	c_cmd->Command = VM_ContainerConfig;
3646 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3647 	c_cmd->param = 0;
3648 
3649 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3650 	    sizeof(struct aac_ctcfg));
3651 	if (error) {
3652 		device_printf(sc->aac_dev, "Error %d sending "
3653 		    "VM_ContainerConfig command\n", error);
3654 		aac_release_sync_fib(sc);
3655 		mtx_unlock(&sc->aac_io_lock);
3656 		return;
3657 	}
3658 
3659 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3660 	if (c_resp->Status != ST_OK) {
3661 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3662 		    c_resp->Status);
3663 		aac_release_sync_fib(sc);
3664 		mtx_unlock(&sc->aac_io_lock);
3665 		return;
3666 	}
3667 
3668 	sc->scsi_method_id = c_resp->param;
3669 
3670 	vmi = (struct aac_vmioctl *)&fib->data[0];
3671 	bzero(vmi, sizeof(struct aac_vmioctl));
3672 
3673 	vmi->Command = VM_Ioctl;
3674 	vmi->ObjType = FT_DRIVE;
3675 	vmi->MethId = sc->scsi_method_id;
3676 	vmi->ObjId = 0;
3677 	vmi->IoctlCmd = GetBusInfo;
3678 
3679 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3680 	    sizeof(struct aac_vmi_businf_resp));
3681 	if (error) {
3682 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3683 		    error);
3684 		aac_release_sync_fib(sc);
3685 		mtx_unlock(&sc->aac_io_lock);
3686 		return;
3687 	}
3688 
3689 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3690 	if (vmi_resp->Status != ST_OK) {
3691 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3692 		    vmi_resp->Status);
3693 		aac_release_sync_fib(sc);
3694 		mtx_unlock(&sc->aac_io_lock);
3695 		return;
3696 	}
3697 
3698 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3699 	aac_release_sync_fib(sc);
3700 	mtx_unlock(&sc->aac_io_lock);
3701 
3702 	for (i = 0; i < businfo.BusCount; i++) {
3703 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3704 			continue;
3705 
3706 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3707 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3708 		if (caminf == NULL) {
3709 			device_printf(sc->aac_dev,
3710 			    "No memory to add passthrough bus %d\n", i);
3711 			break;
3712 		}
3713 
3714 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3715 		if (child == NULL) {
3716 			device_printf(sc->aac_dev,
3717 			    "device_add_child failed for passthrough bus %d\n",
3718 			    i);
3719 			free(caminf, M_AACRAIDBUF);
3720 			break;
3721 		}
3722 
3723 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3724 		caminf->BusNumber = i+1;
3725 		caminf->BusType = PASSTHROUGH_BUS;
3726 		caminf->InitiatorBusId = -1;
3727 		caminf->aac_sc = sc;
3728 		caminf->sim_dev = child;
3729 		caminf->aac_cam = NULL;
3730 
3731 		device_set_ivars(child, caminf);
3732 		device_set_desc(child, "SCSI Passthrough Bus");
3733 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3734 	}
3735 }
3736 
3737 /*
3738  * Check to see if the kernel is up and running. If we are in a
3739  * BlinkLED state, return the BlinkLED code.
3740  */
3741 static u_int32_t
3742 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3743 {
3744 	u_int32_t ret;
3745 
3746 	ret = AAC_GET_FWSTATUS(sc);
3747 
3748 	if (ret & AAC_UP_AND_RUNNING)
3749 		ret = 0;
3750 	else if (ret & AAC_KERNEL_PANIC && bled)
3751 		*bled = (ret >> 16) & 0xff;
3752 
3753 	return (ret);
3754 }
3755 
3756 /*
3757  * Once do an IOP reset, basically have to re-initialize the card as
3758  * if coming up from a cold boot, and the driver is responsible for
3759  * any IO that was outstanding to the adapter at the time of the IOP
3760  * RESET. And prepare the driver for IOP RESET by making the init code
3761  * modular with the ability to call it from multiple places.
3762  */
3763 static int
3764 aac_reset_adapter(struct aac_softc *sc)
3765 {
3766 	struct aac_command *cm;
3767 	struct aac_fib *fib;
3768 	struct aac_pause_command *pc;
3769 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3770 	int ret, msi_enabled_orig;
3771 
3772 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3773 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3774 
3775 	if (sc->aac_state & AAC_STATE_RESET) {
3776 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3777 		return (EINVAL);
3778 	}
3779 	sc->aac_state |= AAC_STATE_RESET;
3780 
3781 	/* disable interrupt */
3782 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3783 
3784 	/*
3785 	 * Abort all pending commands:
3786 	 * a) on the controller
3787 	 */
3788 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3789 		cm->cm_flags |= AAC_CMD_RESET;
3790 
3791 		/* is there a completion handler? */
3792 		if (cm->cm_complete != NULL) {
3793 			cm->cm_complete(cm);
3794 		} else {
3795 			/* assume that someone is sleeping on this
3796 			 * command
3797 			 */
3798 			wakeup(cm);
3799 		}
3800 	}
3801 
3802 	/* b) in the waiting queues */
3803 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3804 		cm->cm_flags |= AAC_CMD_RESET;
3805 
3806 		/* is there a completion handler? */
3807 		if (cm->cm_complete != NULL) {
3808 			cm->cm_complete(cm);
3809 		} else {
3810 			/* assume that someone is sleeping on this
3811 			 * command
3812 			 */
3813 			wakeup(cm);
3814 		}
3815 	}
3816 
3817 	/* flush drives */
3818 	if (aac_check_adapter_health(sc, NULL) == 0) {
3819 		mtx_unlock(&sc->aac_io_lock);
3820 		(void) aacraid_shutdown(sc->aac_dev);
3821 		mtx_lock(&sc->aac_io_lock);
3822 	}
3823 
3824 	/* execute IOP reset */
3825 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3826 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3827 
3828 		/* We need to wait for 5 seconds before accessing the MU again
3829 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3830 		 */
3831 		waitCount = 5 * 10000;
3832 		while (waitCount) {
3833 			DELAY(100);			/* delay 100 microseconds */
3834 			waitCount--;
3835 		}
3836 	} else {
3837 		ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3838 			0, 0, 0, 0, &status, &reset_mask);
3839 		if (ret && !sc->doorbell_mask) {
3840 			/* call IOP_RESET for older firmware */
3841 			if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3842 			    &status, NULL)) != 0) {
3843 				if (status == AAC_SRB_STS_INVALID_REQUEST) {
3844 					device_printf(sc->aac_dev,
3845 					    "IOP_RESET not supported\n");
3846 				} else {
3847 					/* probably timeout */
3848 					device_printf(sc->aac_dev,
3849 					    "IOP_RESET failed\n");
3850 				}
3851 
3852 				/* unwind aac_shutdown() */
3853 				aac_alloc_sync_fib(sc, &fib);
3854 				pc = (struct aac_pause_command *)&fib->data[0];
3855 				pc->Command = VM_ContainerConfig;
3856 				pc->Type = CT_PAUSE_IO;
3857 				pc->Timeout = 1;
3858 				pc->Min = 1;
3859 				pc->NoRescan = 1;
3860 
3861 				(void) aac_sync_fib(sc, ContainerCommand, 0,
3862 				    fib, sizeof (struct aac_pause_command));
3863 				aac_release_sync_fib(sc);
3864 
3865 				goto finish;
3866 			}
3867 		} else if (sc->doorbell_mask) {
3868 			ret = 0;
3869 			reset_mask = sc->doorbell_mask;
3870 		}
3871 		if (!ret &&
3872 		    (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3873 			AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3874 			/*
3875 			 * We need to wait for 5 seconds before accessing the
3876 			 * doorbell again;
3877 			 * 10000 * 100us = 1000,000us = 1000ms = 1s
3878 			 */
3879 			waitCount = 5 * 10000;
3880 			while (waitCount) {
3881 				DELAY(100);	/* delay 100 microseconds */
3882 				waitCount--;
3883 			}
3884 		}
3885 	}
3886 
3887 	/*
3888 	 * Initialize the adapter.
3889 	 */
3890 	max_msix_orig = sc->aac_max_msix;
3891 	msi_enabled_orig = sc->msi_enabled;
3892 	sc->msi_enabled = FALSE;
3893 	if (aac_check_firmware(sc) != 0)
3894 		goto finish;
3895 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3896 		sc->aac_max_msix = max_msix_orig;
3897 		if (msi_enabled_orig) {
3898 			sc->msi_enabled = msi_enabled_orig;
3899 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3900 		}
3901 		mtx_unlock(&sc->aac_io_lock);
3902 		aac_init(sc);
3903 		mtx_lock(&sc->aac_io_lock);
3904 	}
3905 
3906 finish:
3907 	sc->aac_state &= ~AAC_STATE_RESET;
3908 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3909 	aacraid_startio(sc);
3910 	return (0);
3911 }
3912