xref: /illumos-gate/usr/src/uts/common/io/aac/aac.c (revision 22f5594a529d50114d839d4ddecc2c499731a3d7)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright 2005-06 Adaptec, Inc.
8  * Copyright (c) 2005-06 Adaptec Inc., Achim Leubner
9  * Copyright (c) 2000 Michael Smith
10  * Copyright (c) 2001 Scott Long
11  * Copyright (c) 2000 BSDi
12  * All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #pragma ident	"%Z%%M%	%I%	%E% SMI"
36 
37 #include <sys/modctl.h>
38 #include <sys/conf.h>
39 #include <sys/cmn_err.h>
40 #include <sys/ddi.h>
41 #include <sys/devops.h>
42 #include <sys/pci.h>
43 #include <sys/types.h>
44 #include <sys/ddidmareq.h>
45 #include <sys/scsi/scsi.h>
46 #include <sys/ksynch.h>
47 #include <sys/sunddi.h>
48 #include <sys/byteorder.h>
49 #include "aac_regs.h"
50 #include "aac.h"
51 
52 /*
53  * FMA header files
54  */
55 #include <sys/ddifm.h>
56 #include <sys/fm/protocol.h>
57 #include <sys/fm/util.h>
58 #include <sys/fm/io/ddi.h>
59 
60 /*
61  * For minor nodes created by the SCSA framework, minor numbers are
62  * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
63  * number less than 64.
64  *
65  * To support cfgadm, need to confirm the SCSA framework by creating
66  * devctl/scsi and driver specific minor nodes under SCSA format,
67  * and calling scsi_hba_xxx() functions aacordingly.
68  */
69 
70 #define	AAC_MINOR		32
71 #define	INST2AAC(x)		(((x) << INST_MINOR_SHIFT) | AAC_MINOR)
72 #define	AAC_SCSA_MINOR(x)	((x) & TRAN_MINOR_MASK)
73 #define	AAC_IS_SCSA_NODE(x)	((x) == DEVCTL_MINOR || (x) == SCSI_MINOR)
74 
75 #define	AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private)
76 #define	AAC_DIP2TRAN(dip)	((scsi_hba_tran_t *)ddi_get_driver_private(dip))
77 #define	AAC_DIP2SOFTS(dip)	(AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip)))
78 #define	PKT2AC(pkt)		((struct aac_cmd *)(pkt)->pkt_ha_private)
79 #define	AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \
80 		if (!(cond)) { \
81 			int count = (timeout) * 10; \
82 			while (count) { \
83 				drv_usecwait(100); \
84 				if (cond) \
85 					break; \
86 				count--; \
87 			} \
88 			(timeout) = (count + 9) / 10; \
89 		} \
90 	}
91 
92 #define	AAC_SENSE_DATA_DESCR_LEN \
93 	(sizeof (struct scsi_descr_sense_hdr) + \
94 	sizeof (struct scsi_information_sense_descr))
95 #define	AAC_ARQ64_LENGTH \
96 	(sizeof (struct scsi_arq_status) + \
97 	AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH)
98 
99 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
100 #define	AAC_GETGXADDR(cmdlen, cdbp) \
101 	((cmdlen == 6) ? GETG0ADDR(cdbp) : \
102 	(cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \
103 	((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp))
104 
105 #define	AAC_CDB_INQUIRY_CMDDT	0x02
106 #define	AAC_CDB_INQUIRY_EVPD	0x01
107 #define	AAC_VPD_PAGE_CODE	1
108 #define	AAC_VPD_PAGE_LENGTH	3
109 #define	AAC_VPD_PAGE_DATA	4
110 #define	AAC_VPD_ID_CODESET	0
111 #define	AAC_VPD_ID_TYPE		1
112 #define	AAC_VPD_ID_LENGTH	3
113 #define	AAC_VPD_ID_DATA		4
114 
115 /* Return the size of FIB with data part type data_type */
116 #define	AAC_FIB_SIZEOF(data_type) \
117 	(sizeof (struct aac_fib_header) + sizeof (data_type))
118 /* Return the container size defined in mir */
119 #define	AAC_MIR_SIZE(softs, acc, mir) \
120 	(((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \
121 	(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \
122 	((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \
123 	(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity))
124 
125 /* The last entry of aac_cards[] is for unknown cards */
126 #define	AAC_UNKNOWN_CARD \
127 	(sizeof (aac_cards) / sizeof (struct aac_card_type) - 1)
128 #define	CARD_IS_UNKNOWN(i)	(i == AAC_UNKNOWN_CARD)
129 #define	BUF_IS_READ(bp)		((bp)->b_flags & B_READ)
130 #define	AAC_IS_Q_EMPTY(q)	((q)->q_head == NULL)
131 #define	AAC_CMDQ(acp)		(!((acp)->flags & AAC_CMD_SYNC))
132 
133 #define	PCI_MEM_GET32(softs, off) \
134 	ddi_get32((softs)->pci_mem_handle, \
135 	    (uint32_t *)((softs)->pci_mem_base_vaddr + (off)))
136 #define	PCI_MEM_PUT32(softs, off, val) \
137 	ddi_put32((softs)->pci_mem_handle, \
138 	    (uint32_t *)((softs)->pci_mem_base_vaddr + (off)), \
139 	    (uint32_t)(val))
140 #define	PCI_MEM_GET16(softs, off) \
141 	ddi_get16((softs)->pci_mem_handle, \
142 	(uint16_t *)((softs)->pci_mem_base_vaddr + (off)))
143 #define	PCI_MEM_PUT16(softs, off, val) \
144 	ddi_put16((softs)->pci_mem_handle, \
145 	(uint16_t *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val))
146 /* Write host data at valp to device mem[off] repeatedly count times */
147 #define	PCI_MEM_REP_PUT8(softs, off, valp, count) \
148 	ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \
149 	    (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
150 	    count, DDI_DEV_AUTOINCR)
151 /* Read device data at mem[off] to host addr valp repeatedly count times */
152 #define	PCI_MEM_REP_GET8(softs, off, valp, count) \
153 	ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \
154 	    (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
155 	    count, DDI_DEV_AUTOINCR)
156 #define	AAC_GET_FIELD8(acc, d, s, field) \
157 	(d)->field = ddi_get8(acc, (uint8_t *)&(s)->field)
158 #define	AAC_GET_FIELD32(acc, d, s, field) \
159 	(d)->field = ddi_get32(acc, (uint32_t *)&(s)->field)
160 #define	AAC_GET_FIELD64(acc, d, s, field) \
161 	(d)->field = ddi_get64(acc, (uint64_t *)&(s)->field)
162 #define	AAC_REP_GET_FIELD8(acc, d, s, field, r) \
163 	ddi_rep_get8((acc), (uint8_t *)&(d)->field, \
164 	    (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
165 #define	AAC_REP_GET_FIELD32(acc, d, s, field, r) \
166 	ddi_rep_get32((acc), (uint32_t *)&(d)->field, \
167 	    (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
168 
169 #define	AAC_ENABLE_INTR(softs) { \
170 		if (softs->flags & AAC_FLAGS_NEW_COMM) \
171 			PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \
172 		else \
173 			PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \
174 	}
175 
176 #define	AAC_DISABLE_INTR(softs)		PCI_MEM_PUT32(softs, AAC_OIMR, ~0)
177 #define	AAC_STATUS_CLR(softs, mask)	PCI_MEM_PUT32(softs, AAC_ODBR, mask)
178 #define	AAC_STATUS_GET(softs)		PCI_MEM_GET32(softs, AAC_ODBR)
179 #define	AAC_NOTIFY(softs, val)		PCI_MEM_PUT32(softs, AAC_IDBR, val)
180 #define	AAC_OUTB_GET(softs)		PCI_MEM_GET32(softs, AAC_OQUE)
181 #define	AAC_OUTB_SET(softs, val)	PCI_MEM_PUT32(softs, AAC_OQUE, val)
182 #define	AAC_FWSTATUS_GET(softs)	\
183 	((softs)->aac_if.aif_get_fwstatus(softs))
184 #define	AAC_MAILBOX_GET(softs, mb) \
185 	((softs)->aac_if.aif_get_mailbox((softs), (mb)))
186 #define	AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \
187 	((softs)->aac_if.aif_set_mailbox((softs), (cmd), \
188 	    (arg0), (arg1), (arg2), (arg3)))
189 
190 #define	AAC_THROTTLE_DRAIN	-1
191 
192 #define	AAC_QUIESCE_TICK	1	/* 1 second */
193 #define	AAC_QUIESCE_TIMEOUT	60	/* 60 seconds */
194 #define	AAC_DEFAULT_TICK	10	/* 10 seconds */
195 #define	AAC_SYNC_TICK		(30*60)	/* 30 minutes */
196 
197 /* Poll time for aac_do_poll_io() */
198 #define	AAC_POLL_TIME		60	/* 60 seconds */
199 
200 /*
201  * Hardware access functions
202  */
203 static int aac_rx_get_fwstatus(struct aac_softstate *);
204 static int aac_rx_get_mailbox(struct aac_softstate *, int);
205 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
206     uint32_t, uint32_t, uint32_t);
207 static int aac_rkt_get_fwstatus(struct aac_softstate *);
208 static int aac_rkt_get_mailbox(struct aac_softstate *, int);
209 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
210     uint32_t, uint32_t, uint32_t);
211 
212 /*
213  * SCSA function prototypes
214  */
215 static int aac_attach(dev_info_t *, ddi_attach_cmd_t);
216 static int aac_detach(dev_info_t *, ddi_detach_cmd_t);
217 static int aac_reset(dev_info_t *, ddi_reset_cmd_t);
218 
219 /*
220  * Interrupt handler functions
221  */
222 static uint_t aac_intr_old(caddr_t);
223 static uint_t aac_intr_new(caddr_t);
224 static uint_t aac_softintr(caddr_t);
225 
226 /*
227  * Internal functions in attach
228  */
229 static int aac_check_card_type(struct aac_softstate *);
230 static int aac_check_firmware(struct aac_softstate *);
231 static int aac_common_attach(struct aac_softstate *);
232 static void aac_common_detach(struct aac_softstate *);
233 static int aac_probe_containers(struct aac_softstate *);
234 static int aac_alloc_comm_space(struct aac_softstate *);
235 static int aac_setup_comm_space(struct aac_softstate *);
236 static void aac_free_comm_space(struct aac_softstate *);
237 static int aac_hba_setup(struct aac_softstate *);
238 
239 /*
240  * Sync FIB operation functions
241  */
242 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t,
243     uint32_t, uint32_t, uint32_t, uint32_t *);
244 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t);
245 
246 /*
247  * Command queue operation functions
248  */
249 static void aac_cmd_initq(struct aac_cmd_queue *);
250 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *);
251 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *);
252 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *);
253 
254 /*
255  * FIB queue operation functions
256  */
257 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t);
258 static int aac_fib_dequeue(struct aac_softstate *, int, int *);
259 
260 /*
261  * Slot operation functions
262  */
263 static int aac_create_slots(struct aac_softstate *);
264 static void aac_destroy_slots(struct aac_softstate *);
265 static void aac_alloc_fibs(struct aac_softstate *);
266 static void aac_destroy_fibs(struct aac_softstate *);
267 static struct aac_slot *aac_get_slot(struct aac_softstate *);
268 static void aac_release_slot(struct aac_softstate *, struct aac_slot *);
269 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *);
270 static void aac_free_fib(struct aac_slot *);
271 
272 /*
273  * Internal functions
274  */
275 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_slot *,
276     uint16_t, uint16_t);
277 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *);
278 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *);
279 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *);
280 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *);
281 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *);
282 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *);
283 static void aac_start_waiting_io(struct aac_softstate *);
284 static void aac_drain_comp_q(struct aac_softstate *);
285 int aac_do_io(struct aac_softstate *, struct aac_cmd *);
286 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *);
287 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *);
288 static int aac_send_command(struct aac_softstate *, struct aac_slot *);
289 static void aac_cmd_timeout(struct aac_softstate *);
290 static int aac_dma_sync_ac(struct aac_cmd *);
291 static int aac_shutdown(struct aac_softstate *);
292 static int aac_reset_adapter(struct aac_softstate *);
293 static int aac_do_quiesce(struct aac_softstate *softs);
294 static int aac_do_unquiesce(struct aac_softstate *softs);
295 static void aac_unhold_bus(struct aac_softstate *, int);
296 static void aac_set_throttle(struct aac_softstate *, struct aac_container *,
297     int, int);
298 
299 /*
300  * Adapter Initiated FIB handling function
301  */
302 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *);
303 
304 /*
305  * Timeout handling thread function
306  */
307 static void aac_daemon(void *);
308 
309 /*
310  * IOCTL interface related functions
311  */
312 static int aac_open(dev_t *, int, int, cred_t *);
313 static int aac_close(dev_t, int, int, cred_t *);
314 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
315 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int);
316 
317 /*
318  * FMA Prototypes
319  */
320 static void aac_fm_init(struct aac_softstate *);
321 static void aac_fm_fini(struct aac_softstate *);
322 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
323 int aac_check_acc_handle(ddi_acc_handle_t);
324 int aac_check_dma_handle(ddi_dma_handle_t);
325 void aac_fm_ereport(struct aac_softstate *, char *);
326 
327 #ifdef DEBUG
328 /*
329  * UART	debug output support
330  */
331 
332 #define	AAC_PRINT_BUFFER_SIZE		512
333 #define	AAC_PRINT_TIMEOUT		250	/* 1/4 sec. = 250 msec. */
334 
335 #define	AAC_FW_DBG_STRLEN_OFFSET	0x00
336 #define	AAC_FW_DBG_FLAGS_OFFSET		0x04
337 #define	AAC_FW_DBG_BLED_OFFSET		0x08
338 
339 static int aac_get_fw_debug_buffer(struct aac_softstate *);
340 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *);
341 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *);
342 
343 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
344 static char aac_fmt[] = " %s";
345 static char aac_fmt_header[] = " %s.%d: %s";
346 static kmutex_t aac_prt_mutex;
347 
348 /*
349  * Debug flags to be put into the softstate flags field
350  * when initialized
351  */
352 uint32_t aac_debug_flags =
353 /*    AACDB_FLAGS_KERNEL_PRINT | */
354 /*    AACDB_FLAGS_FW_PRINT |	*/
355 /*    AACDB_FLAGS_MISC |	*/
356 /*    AACDB_FLAGS_FUNC1 |	*/
357 /*    AACDB_FLAGS_FUNC2 |	*/
358 /*    AACDB_FLAGS_SCMD |	*/
359 /*    AACDB_FLAGS_AIF |		*/
360 /*    AACDB_FLAGS_FIB |		*/
361 /*    AACDB_FLAGS_IOCTL |	*/
362 0;
363 
364 #endif /* DEBUG */
365 
366 static struct cb_ops aac_cb_ops = {
367 	aac_open,	/* open */
368 	aac_close,	/* close */
369 	nodev,		/* strategy */
370 	nodev,		/* print */
371 	nodev,		/* dump */
372 	nodev,		/* read */
373 	nodev,		/* write */
374 	aac_ioctl,	/* ioctl */
375 	nodev,		/* devmap */
376 	nodev,		/* mmap */
377 	nodev,		/* segmap */
378 	nochpoll,	/* poll */
379 	ddi_prop_op,	/* cb_prop_op */
380 	NULL,		/* streamtab */
381 	D_64BIT | D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
382 	CB_REV,		/* cb_rev */
383 	nodev,		/* async I/O read entry point */
384 	nodev		/* async I/O write entry point */
385 };
386 
387 static struct dev_ops aac_dev_ops = {
388 	DEVO_REV,
389 	0,
390 	nodev,
391 	nulldev,
392 	nulldev,
393 	aac_attach,
394 	aac_detach,
395 	aac_reset,
396 	&aac_cb_ops,
397 	NULL,
398 	NULL
399 };
400 
401 static struct modldrv aac_modldrv = {
402 	&mod_driverops,
403 	"AAC Driver " AAC_DRIVER_VERSION,
404 	&aac_dev_ops,
405 };
406 
407 static struct modlinkage aac_modlinkage = {
408 	MODREV_1,
409 	&aac_modldrv,
410 	NULL
411 };
412 
413 static struct aac_softstate  *aac_softstatep;
414 
415 /*
416  * Supported card list
417  * ordered in vendor id, subvendor id, subdevice id, and device id
418  */
419 static struct aac_card_type aac_cards[] = {
420 	{0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX,
421 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
422 	    "Dell", "PERC 3/Di"},
423 	{0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX,
424 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
425 	    "Dell", "PERC 3/Di"},
426 	{0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX,
427 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
428 	    "Dell", "PERC 3/Si"},
429 	{0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX,
430 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
431 	    "Dell", "PERC 3/Di"},
432 	{0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX,
433 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
434 	    "Dell", "PERC 3/Si"},
435 	{0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX,
436 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
437 	    "Dell", "PERC 3/Di"},
438 	{0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX,
439 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
440 	    "Dell", "PERC 3/Di"},
441 	{0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX,
442 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
443 	    "Dell", "PERC 3/Di"},
444 	{0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX,
445 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
446 	    "Dell", "PERC 3/Di"},
447 	{0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX,
448 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
449 	    "Dell", "PERC 3/Di"},
450 	{0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX,
451 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
452 	    "Dell", "PERC 320/DC"},
453 	{0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX,
454 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"},
455 
456 	{0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX,
457 	    0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"},
458 	{0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX,
459 	    0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"},
460 	{0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT,
461 	    0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"},
462 
463 	{0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX,
464 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
465 	{0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX,
466 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
467 
468 	{0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX,
469 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
470 	    "Adaptec", "2200S"},
471 	{0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX,
472 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
473 	    "Adaptec", "2120S"},
474 	{0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX,
475 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
476 	    "Adaptec", "2200S"},
477 	{0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX,
478 	    0, AAC_TYPE_SCSI, "Adaptec", "3230S"},
479 	{0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX,
480 	    0, AAC_TYPE_SCSI, "Adaptec", "3240S"},
481 	{0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX,
482 	    0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"},
483 	{0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX,
484 	    0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"},
485 	{0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT,
486 	    0, AAC_TYPE_SCSI, "Adaptec", "2230S"},
487 	{0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT,
488 	    0, AAC_TYPE_SCSI, "Adaptec", "2130S"},
489 	{0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX,
490 	    0, AAC_TYPE_SATA, "Adaptec", "2020SA"},
491 	{0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX,
492 	    0, AAC_TYPE_SATA, "Adaptec", "2025SA"},
493 	{0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX,
494 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"},
495 	{0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX,
496 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"},
497 	{0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX,
498 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"},
499 	{0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX,
500 	    0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"},
501 	{0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX,
502 	    0, AAC_TYPE_SCSI, "Adaptec", "2240S"},
503 	{0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX,
504 	    0, AAC_TYPE_SAS, "Adaptec", "4005SAS"},
505 	{0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX,
506 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"},
507 	{0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX,
508 	    0, AAC_TYPE_SAS, "Adaptec", "4800SAS"},
509 	{0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX,
510 	    0, AAC_TYPE_SAS, "Adaptec", "4805SAS"},
511 	{0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT,
512 	    0, AAC_TYPE_SATA, "Adaptec", "2820SA"},
513 	{0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT,
514 	    0, AAC_TYPE_SATA, "Adaptec", "2620SA"},
515 	{0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT,
516 	    0, AAC_TYPE_SATA, "Adaptec", "2420SA"},
517 	{0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT,
518 	    0, AAC_TYPE_SATA, "ICP", "9024RO"},
519 	{0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT,
520 	    0, AAC_TYPE_SATA, "ICP", "9014RO"},
521 	{0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT,
522 	    0, AAC_TYPE_SATA, "ICP", "9047MA"},
523 	{0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT,
524 	    0, AAC_TYPE_SATA, "ICP", "9087MA"},
525 	{0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX,
526 	    0, AAC_TYPE_SAS, "ICP", "9085LI"},
527 	{0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX,
528 	    0, AAC_TYPE_SAS, "ICP", "5085BR"},
529 	{0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT,
530 	    0, AAC_TYPE_SATA, "ICP", "9067MA"},
531 	{0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX,
532 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"},
533 	{0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX,
534 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"},
535 	{0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX,
536 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"},
537 	{0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX,
538 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"},
539 	{0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX,
540 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"},
541 	{0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX,
542 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"},
543 
544 	{0, 0, 0, 0, AAC_HWIF_UNKNOWN,
545 	    0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"},
546 };
547 
548 /*
549  * Hardware access functions for i960 based cards
550  */
551 static struct aac_interface aac_rx_interface = {
552 	aac_rx_get_fwstatus,
553 	aac_rx_get_mailbox,
554 	aac_rx_set_mailbox
555 };
556 
557 /*
558  * Hardware access functions for Rocket based cards
559  */
560 static struct aac_interface aac_rkt_interface = {
561 	aac_rkt_get_fwstatus,
562 	aac_rkt_get_mailbox,
563 	aac_rkt_set_mailbox
564 };
565 
566 ddi_device_acc_attr_t aac_acc_attr = {
567 	DDI_DEVICE_ATTR_V0,
568 	DDI_STRUCTURE_LE_ACC,
569 	DDI_STRICTORDER_ACC,
570 	DDI_FLAGERR_ACC
571 };
572 
573 static struct {
574 	int	size;
575 	int	notify;
576 } aac_qinfo[] = {
577 	{AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
578 	{AAC_HOST_HIGH_CMD_ENTRIES, 0},
579 	{AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
580 	{AAC_ADAP_HIGH_CMD_ENTRIES, 0},
581 	{AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
582 	{AAC_HOST_HIGH_RESP_ENTRIES, 0},
583 	{AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
584 	{AAC_ADAP_HIGH_RESP_ENTRIES, 0}
585 };
586 
587 /*
588  * Default aac dma attributes
589  */
590 static ddi_dma_attr_t aac_dma_attr = {
591 	DMA_ATTR_V0,
592 	0,		/* lowest usable address */
593 	0xffffffffull,	/* high DMA address range */
594 	0xffffffffull,	/* DMA counter register */
595 	AAC_DMA_ALIGN,	/* DMA address alignment */
596 	1,		/* DMA burstsizes */
597 	1,		/* min effective DMA size */
598 	0xffffffffull,	/* max DMA xfer size */
599 	0xffffffffull,	/* segment boundary */
600 	1,		/* s/g list length */
601 	AAC_BLK_SIZE,	/* granularity of device */
602 	DDI_DMA_FLAGERR	/* DMA transfer flags */
603 };
604 
605 static int aac_tick = AAC_DEFAULT_TICK;	/* tick for the internal timer */
606 static uint32_t aac_timebase = 0;	/* internal timer in seconds */
607 static uint32_t aac_sync_time = 0;	/* next time to sync. with firmware */
608 
609 /*
610  * Warlock directives
611  *
612  * Different variables with the same types have to be protected by the
613  * same mutex; otherwise, warlock will complain with "variables don't
614  * seem to be protected consistently". For example,
615  * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected
616  * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to
617  * declare them as protected explictly at aac_cmd_dequeue().
618  */
619 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \
620     scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \
621     mode_format mode_geometry mode_header aac_cmd))
622 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \
623     aac_sge))
624 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \
625     aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \
626     aac_sg_table aac_srb))
627 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry))
628 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
629 
630 int
631 _init(void)
632 {
633 	int rval = 0;
634 
635 #ifdef DEBUG
636 	mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL);
637 #endif
638 	DBCALLED(NULL, 1);
639 
640 	if ((rval = ddi_soft_state_init((void *)&aac_softstatep,
641 	    sizeof (struct aac_softstate), 0)) != 0)
642 		goto error;
643 
644 	if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) {
645 		ddi_soft_state_fini((void *)&aac_softstatep);
646 		goto error;
647 	}
648 
649 	if ((rval = mod_install(&aac_modlinkage)) != 0) {
650 		ddi_soft_state_fini((void *)&aac_softstatep);
651 		scsi_hba_fini(&aac_modlinkage);
652 		goto error;
653 	}
654 	return (rval);
655 
656 error:
657 	AACDB_PRINT(NULL, CE_WARN, "Mod init error!");
658 #ifdef DEBUG
659 	mutex_destroy(&aac_prt_mutex);
660 #endif
661 	return (rval);
662 }
663 
664 int
665 _info(struct modinfo *modinfop)
666 {
667 	DBCALLED(NULL, 1);
668 	return (mod_info(&aac_modlinkage, modinfop));
669 }
670 
671 /*
672  * An HBA driver cannot be unload unless you reboot,
673  * so this function will be of no use.
674  */
675 int
676 _fini(void)
677 {
678 	int rval;
679 
680 	DBCALLED(NULL, 1);
681 
682 	if ((rval = mod_remove(&aac_modlinkage)) != 0)
683 		goto error;
684 
685 	scsi_hba_fini(&aac_modlinkage);
686 	ddi_soft_state_fini((void *)&aac_softstatep);
687 #ifdef DEBUG
688 	mutex_destroy(&aac_prt_mutex);
689 #endif
690 	return (0);
691 
692 error:
693 	AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!");
694 	return (rval);
695 }
696 
697 static int
698 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
699 {
700 	int instance, i;
701 	struct aac_softstate *softs = NULL;
702 	int attach_state = 0;
703 
704 	DBCALLED(NULL, 1);
705 
706 	switch (cmd) {
707 	case DDI_ATTACH:
708 		break;
709 	case DDI_RESUME:
710 		return (DDI_FAILURE);
711 	default:
712 		return (DDI_FAILURE);
713 	}
714 
715 	instance = ddi_get_instance(dip);
716 
717 	/* Get soft state */
718 	if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) {
719 		AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state");
720 		goto error;
721 	}
722 	softs = ddi_get_soft_state(aac_softstatep, instance);
723 	attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED;
724 
725 	softs->instance = instance;
726 	softs->devinfo_p = dip;
727 	softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr;
728 	softs->addr_dma_attr.dma_attr_granular = 1;
729 	softs->card = AAC_UNKNOWN_CARD;
730 #ifdef DEBUG
731 	softs->debug_flags = aac_debug_flags;
732 #endif
733 
734 	/* Check the card type */
735 	if (aac_check_card_type(softs) == AACERR) {
736 		AACDB_PRINT(softs, CE_WARN, "Card not supported");
737 		goto error;
738 	}
739 	/* We have found the right card and everything is OK */
740 	attach_state |= AAC_ATTACH_CARD_DETECTED;
741 
742 	/*
743 	 * Initialize FMA
744 	 */
745 	softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p,
746 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
747 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
748 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
749 
750 	aac_fm_init(softs);
751 
752 	/* Map PCI mem space */
753 	if (ddi_regs_map_setup(dip, 1,
754 	    (caddr_t *)&softs->pci_mem_base_vaddr, 0,
755 	    softs->map_size_min, &aac_acc_attr,
756 	    &softs->pci_mem_handle) != DDI_SUCCESS)
757 		goto error;
758 
759 	softs->map_size = softs->map_size_min;
760 	attach_state |= AAC_ATTACH_PCI_MEM_MAPPED;
761 
762 	AAC_DISABLE_INTR(softs);
763 
764 	if (ddi_intr_hilevel(dip, 0)) {
765 		AACDB_PRINT(softs, CE_WARN,
766 		    "High level interrupt is not supported!");
767 		goto error;
768 	}
769 
770 	/* Init mutexes */
771 	if (ddi_get_iblock_cookie(dip, 0, &softs->iblock_cookie) !=
772 	    DDI_SUCCESS) {
773 		AACDB_PRINT(softs, CE_WARN,
774 		    "Can not get interrupt block cookie!");
775 		goto error;
776 	}
777 	mutex_init(&softs->q_comp_mutex, NULL,
778 	    MUTEX_DRIVER, (void *)softs->iblock_cookie);
779 	cv_init(&softs->event, NULL, CV_DRIVER, NULL);
780 	mutex_init(&softs->aifq_mutex, NULL,
781 	    MUTEX_DRIVER, (void *)softs->iblock_cookie);
782 	cv_init(&softs->aifv, NULL, CV_DRIVER, NULL);
783 	cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL);
784 	mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER,
785 	    (void *)softs->iblock_cookie);
786 	attach_state |= AAC_ATTACH_KMUTEX_INITED;
787 
788 	/*
789 	 * Everything has been set up till now,
790 	 * we will do some common attach.
791 	 */
792 	if (aac_common_attach(softs) == AACERR)
793 		goto error;
794 	attach_state |= AAC_ATTACH_COMM_SPACE_SETUP;
795 
796 	/* Init the cmd queues */
797 	for (i = 0; i < AAC_CMDQ_NUM; i++)
798 		aac_cmd_initq(&softs->q_wait[i]);
799 	aac_cmd_initq(&softs->q_busy);
800 	aac_cmd_initq(&softs->q_comp);
801 
802 	if (aac_hba_setup(softs) != AACOK)
803 		goto error;
804 	attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP;
805 
806 	/* Connect interrupt handlers */
807 	if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id,
808 	    NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) {
809 		AACDB_PRINT(softs, CE_WARN,
810 		    "Can not setup soft interrupt handler!");
811 		goto error;
812 	}
813 	attach_state |= AAC_ATTACH_SOFT_INTR_SETUP;
814 
815 	if (ddi_add_intr(dip, 0, &softs->iblock_cookie,
816 	    (ddi_idevice_cookie_t *)0,
817 	    (softs->flags & AAC_FLAGS_NEW_COMM) ?
818 	    aac_intr_new : aac_intr_old, (caddr_t)softs) != DDI_SUCCESS) {
819 		AACDB_PRINT(softs, CE_WARN, "Can not setup interrupt handler!");
820 		goto error;
821 	}
822 	attach_state |= AAC_ATTACH_HARD_INTR_SETUP;
823 
824 	/* Create devctl/scsi nodes for cfgadm */
825 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
826 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
827 		AACDB_PRINT(softs, CE_WARN, "failed to create devctl node");
828 		goto error;
829 	}
830 	attach_state |= AAC_ATTACH_CREATE_DEVCTL;
831 
832 	if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance),
833 	    DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
834 		AACDB_PRINT(softs, CE_WARN, "failed to create scsi node");
835 		goto error;
836 	}
837 	attach_state |= AAC_ATTACH_CREATE_SCSI;
838 
839 	/* Create aac node for app. to issue ioctls */
840 	if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance),
841 	    DDI_PSEUDO, 0) != DDI_SUCCESS) {
842 		AACDB_PRINT(softs, CE_WARN, "failed to create aac node");
843 		goto error;
844 	}
845 
846 	aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
847 	softs->state = AAC_STATE_RUN;
848 
849 	/* Create a thread for command timeout */
850 	softs->timeout_id = timeout(aac_daemon, (void *)softs,
851 	    (60 * drv_usectohz(1000000)));
852 
853 	/* Common attach is OK, so we are attached! */
854 	AAC_ENABLE_INTR(softs);
855 	ddi_report_dev(dip);
856 	AACDB_PRINT(softs, CE_NOTE, "aac attached ok");
857 	return (DDI_SUCCESS);
858 
859 error:
860 	if (attach_state & AAC_ATTACH_CREATE_SCSI)
861 		ddi_remove_minor_node(dip, "scsi");
862 	if (attach_state & AAC_ATTACH_CREATE_DEVCTL)
863 		ddi_remove_minor_node(dip, "devctl");
864 	if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP)
865 		aac_common_detach(softs);
866 	if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
867 		(void) scsi_hba_detach(dip);
868 		scsi_hba_tran_free(AAC_DIP2TRAN(dip));
869 	}
870 	if (attach_state & AAC_ATTACH_HARD_INTR_SETUP)
871 		ddi_remove_intr(dip, 0, softs->iblock_cookie);
872 	if (attach_state & AAC_ATTACH_SOFT_INTR_SETUP)
873 		ddi_remove_softintr(softs->softint_id);
874 	if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
875 		mutex_destroy(&softs->q_comp_mutex);
876 		cv_destroy(&softs->event);
877 		mutex_destroy(&softs->aifq_mutex);
878 		cv_destroy(&softs->aifv);
879 		cv_destroy(&softs->drain_cv);
880 		mutex_destroy(&softs->io_lock);
881 	}
882 	if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED)
883 		ddi_regs_map_free(&softs->pci_mem_handle);
884 	aac_fm_fini(softs);
885 	if (attach_state & AAC_ATTACH_CARD_DETECTED)
886 		softs->card = AACERR;
887 	if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED)
888 		ddi_soft_state_free(aac_softstatep, instance);
889 	return (DDI_FAILURE);
890 }
891 
892 static int
893 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
894 {
895 	scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip);
896 	struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
897 
898 	DBCALLED(softs, 1);
899 
900 	switch (cmd) {
901 	case DDI_DETACH:
902 		break;
903 	case DDI_SUSPEND:
904 		return (DDI_FAILURE);
905 	default:
906 		return (DDI_FAILURE);
907 	}
908 
909 	mutex_enter(&softs->io_lock);
910 	AAC_DISABLE_INTR(softs);
911 	softs->state = AAC_STATE_STOPPED;
912 
913 	mutex_exit(&softs->io_lock);
914 	(void) untimeout(softs->timeout_id);
915 	mutex_enter(&softs->io_lock);
916 	softs->timeout_id = 0;
917 
918 	ddi_remove_minor_node(dip, "aac");
919 	ddi_remove_minor_node(dip, "scsi");
920 	ddi_remove_minor_node(dip, "devctl");
921 
922 	mutex_exit(&softs->io_lock);
923 	ddi_remove_intr(dip, 0, softs->iblock_cookie);
924 	ddi_remove_softintr(softs->softint_id);
925 
926 	aac_common_detach(softs);
927 
928 	(void) scsi_hba_detach(dip);
929 	scsi_hba_tran_free(tran);
930 
931 	mutex_destroy(&softs->q_comp_mutex);
932 	cv_destroy(&softs->event);
933 	mutex_destroy(&softs->aifq_mutex);
934 	cv_destroy(&softs->aifv);
935 	cv_destroy(&softs->drain_cv);
936 	mutex_destroy(&softs->io_lock);
937 
938 	ddi_regs_map_free(&softs->pci_mem_handle);
939 	aac_fm_fini(softs);
940 	softs->hwif = AAC_HWIF_UNKNOWN;
941 	softs->card = AAC_UNKNOWN_CARD;
942 	ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip));
943 
944 	return (DDI_SUCCESS);
945 }
946 
947 /*ARGSUSED*/
948 static int
949 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
950 {
951 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
952 
953 	DBCALLED(softs, 1);
954 
955 	mutex_enter(&softs->io_lock);
956 	(void) aac_shutdown(softs);
957 	mutex_exit(&softs->io_lock);
958 
959 	return (DDI_SUCCESS);
960 }
961 
962 /*
963  * Bring the controller down to a dormant state and detach all child devices.
964  * This function is called before detach or system shutdown.
965  * Note: we can assume that the q_wait on the controller is empty, as we
966  * won't allow shutdown if any device is open.
967  */
968 static int
969 aac_shutdown(struct aac_softstate *softs)
970 {
971 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
972 	struct aac_close_command *cc = (struct aac_close_command *) \
973 	    &softs->sync_slot.fibp->data[0];
974 	int rval;
975 
976 	ddi_put32(acc, &cc->Command, VM_CloseAll);
977 	ddi_put32(acc, &cc->ContainerId, 0xfffffffful);
978 
979 	/* Flush all caches, set FW to write through mode */
980 	rval = aac_sync_fib(softs, ContainerCommand,
981 	    AAC_FIB_SIZEOF(struct aac_close_command));
982 
983 	AACDB_PRINT(softs, CE_NOTE,
984 	    "shutting down aac %s", (rval == AACOK) ? "ok" : "fail");
985 	return (rval);
986 }
987 
988 static uint_t
989 aac_softintr(caddr_t arg)
990 {
991 	struct aac_softstate *softs = (struct aac_softstate *)arg;
992 
993 	if (!AAC_IS_Q_EMPTY(&softs->q_comp)) {
994 		aac_drain_comp_q(softs);
995 		return (DDI_INTR_CLAIMED);
996 	} else {
997 		return (DDI_INTR_UNCLAIMED);
998 	}
999 }
1000 
1001 /*
1002  * Setup auto sense data for pkt
1003  */
1004 static void
1005 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key,
1006     uchar_t add_code, uchar_t qual_code, uint64_t info)
1007 {
1008 	struct scsi_arq_status *arqstat;
1009 
1010 	pkt->pkt_state |= STATE_GOT_STATUS | STATE_ARQ_DONE;
1011 
1012 	arqstat = (struct scsi_arq_status *)(pkt->pkt_scbp);
1013 	arqstat->sts_status.sts_chk = 1; /* CHECK CONDITION */
1014 	arqstat->sts_rqpkt_reason = CMD_CMPLT;
1015 	arqstat->sts_rqpkt_resid = 0;
1016 	arqstat->sts_rqpkt_state =
1017 	    STATE_GOT_BUS |
1018 	    STATE_GOT_TARGET |
1019 	    STATE_SENT_CMD |
1020 	    STATE_XFERRED_DATA;
1021 	arqstat->sts_rqpkt_statistics = 0;
1022 
1023 	if (info <= 0xfffffffful) {
1024 		arqstat->sts_sensedata.es_valid = 1;
1025 		arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
1026 		arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT;
1027 		arqstat->sts_sensedata.es_key = key;
1028 		arqstat->sts_sensedata.es_add_code = add_code;
1029 		arqstat->sts_sensedata.es_qual_code = qual_code;
1030 
1031 		arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF;
1032 		arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF;
1033 		arqstat->sts_sensedata.es_info_3 = (info >>  8) & 0xFF;
1034 		arqstat->sts_sensedata.es_info_4 = info & 0xFF;
1035 	} else { /* 64-bit LBA */
1036 		struct scsi_descr_sense_hdr *dsp;
1037 		struct scsi_information_sense_descr *isd;
1038 
1039 		dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata;
1040 		dsp->ds_class = CLASS_EXTENDED_SENSE;
1041 		dsp->ds_code = CODE_FMT_DESCR_CURRENT;
1042 		dsp->ds_key = key;
1043 		dsp->ds_add_code = add_code;
1044 		dsp->ds_qual_code = qual_code;
1045 		dsp->ds_addl_sense_length =
1046 		    sizeof (struct scsi_information_sense_descr);
1047 
1048 		isd = (struct scsi_information_sense_descr *)(dsp+1);
1049 		isd->isd_descr_type = DESCR_INFORMATION;
1050 		isd->isd_valid = 1;
1051 		isd->isd_information[0] = (info >> 56) & 0xFF;
1052 		isd->isd_information[1] = (info >> 48) & 0xFF;
1053 		isd->isd_information[2] = (info >> 40) & 0xFF;
1054 		isd->isd_information[3] = (info >> 32) & 0xFF;
1055 		isd->isd_information[4] = (info >> 24) & 0xFF;
1056 		isd->isd_information[5] = (info >> 16) & 0xFF;
1057 		isd->isd_information[6] = (info >>  8) & 0xFF;
1058 		isd->isd_information[7] = (info) & 0xFF;
1059 	}
1060 }
1061 
1062 /*
1063  * Setup auto sense data for HARDWARE ERROR
1064  */
1065 static void
1066 aac_set_arq_data_hwerr(struct aac_cmd *acp)
1067 {
1068 	union scsi_cdb *cdbp;
1069 	uint64_t err_blkno;
1070 
1071 	cdbp = (union scsi_cdb *)acp->pkt->pkt_cdbp;
1072 	err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp);
1073 	aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno);
1074 }
1075 
1076 /*
1077  * Setup auto sense data for UNIT ATTENTION
1078  */
1079 /*ARGSUSED*/
1080 static void
1081 aac_set_arq_data_reset(struct aac_softstate *softs, struct aac_cmd *acp)
1082 {
1083 	struct aac_container *dvp = acp->dvp;
1084 
1085 	if (dvp->reset) {
1086 		dvp->reset = 0;
1087 		aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 0x29, 0x02, 0);
1088 	}
1089 }
1090 
1091 /*
1092  * Send a command to the adapter in New Comm. interface
1093  */
1094 static int
1095 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp)
1096 {
1097 	uint32_t index, device;
1098 
1099 	index = PCI_MEM_GET32(softs, AAC_IQUE);
1100 	if (index == 0xffffffffUL) {
1101 		index = PCI_MEM_GET32(softs, AAC_IQUE);
1102 		if (index == 0xffffffffUL)
1103 			return (AACERR);
1104 	}
1105 
1106 	device = index;
1107 	PCI_MEM_PUT32(softs, device,
1108 	    (uint32_t)(slotp->fib_phyaddr & 0xfffffffful));
1109 	device += 4;
1110 	PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32));
1111 	device += 4;
1112 	PCI_MEM_PUT32(softs, device, slotp->acp->fib_size);
1113 	PCI_MEM_PUT32(softs, AAC_IQUE, index);
1114 	return (AACOK);
1115 }
1116 
1117 static void
1118 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp)
1119 {
1120 	struct aac_container *dvp = acp->dvp;
1121 	int q = AAC_CMDQ(acp);
1122 
1123 	if (acp->slotp) { /* outstanding cmd */
1124 		aac_release_slot(softs, acp->slotp);
1125 		acp->slotp = NULL;
1126 		if (dvp) {
1127 			dvp->ncmds[q]--;
1128 			if (dvp->throttle[q] == AAC_THROTTLE_DRAIN &&
1129 			    dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC)
1130 				aac_set_throttle(softs, dvp, q,
1131 				    softs->total_slots);
1132 		}
1133 		softs->bus_ncmds[q]--;
1134 		(void) aac_cmd_delete(&softs->q_busy, acp);
1135 	} else { /* cmd in waiting queue */
1136 		aac_cmd_delete(&softs->q_wait[q], acp);
1137 	}
1138 
1139 	if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */
1140 		mutex_enter(&softs->q_comp_mutex);
1141 		aac_cmd_enqueue(&softs->q_comp, acp);
1142 		mutex_exit(&softs->q_comp_mutex);
1143 	} else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */
1144 		cv_broadcast(&softs->event);
1145 	}
1146 }
1147 
1148 static void
1149 aac_handle_io(struct aac_softstate *softs, int index)
1150 {
1151 	struct aac_slot *slotp;
1152 	struct aac_cmd *acp;
1153 	uint32_t fast;
1154 
1155 	fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE;
1156 	index >>= 2;
1157 
1158 	/* Make sure firmware reported index is valid */
1159 	ASSERT(index >= 0 && index < softs->total_slots);
1160 	slotp = &softs->io_slot[index];
1161 	ASSERT(slotp->index == index);
1162 	acp = slotp->acp;
1163 	ASSERT(acp != NULL && acp->slotp == slotp);
1164 
1165 	acp->flags |= AAC_CMD_CMPLT;
1166 	(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1167 
1168 	if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) {
1169 		/*
1170 		 * For fast response IO, the firmware do not return any FIB
1171 		 * data, so we need to fill in the FIB status and state so that
1172 		 * FIB users can handle it correctly.
1173 		 */
1174 		if (fast) {
1175 			uint32_t state;
1176 
1177 			state = ddi_get32(slotp->fib_acc_handle,
1178 			    &slotp->fibp->Header.XferState);
1179 			/*
1180 			 * Update state for CPU not for device, no DMA sync
1181 			 * needed
1182 			 */
1183 			ddi_put32(slotp->fib_acc_handle,
1184 			    &slotp->fibp->Header.XferState,
1185 			    state | AAC_FIBSTATE_DONEADAP);
1186 			ddi_put32(slotp->fib_acc_handle,
1187 			    (uint32_t *)&slotp->fibp->data[0], ST_OK);
1188 		}
1189 
1190 		/* Handle completed ac */
1191 		acp->ac_comp(softs, acp);
1192 	} else {
1193 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1194 		acp->flags |= AAC_CMD_ERR;
1195 		if (acp->pkt) {
1196 			acp->pkt->pkt_reason = CMD_TRAN_ERR;
1197 			acp->pkt->pkt_statistics = 0;
1198 		}
1199 	}
1200 	aac_end_io(softs, acp);
1201 }
1202 
1203 /*
1204  * Interrupt handler for New Comm. interface
1205  * New Comm. interface use a different mechanism for interrupt. No explict
1206  * message queues, and driver need only accesses the mapped PCI mem space to
1207  * find the completed FIB or AIF.
1208  */
1209 static int
1210 aac_process_intr_new(struct aac_softstate *softs)
1211 {
1212 	uint32_t index;
1213 
1214 	index = AAC_OUTB_GET(softs);
1215 	if (index == 0xfffffffful)
1216 		index = AAC_OUTB_GET(softs);
1217 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1218 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1219 		return (DDI_INTR_UNCLAIMED);
1220 	}
1221 	if (index != 0xfffffffful) {
1222 		do {
1223 			if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
1224 				aac_handle_io(softs, index);
1225 			} else if (index != 0xfffffffeul) {
1226 				struct aac_fib *fibp;	/* FIB in AIF queue */
1227 				uint16_t fib_size, fib_size0;
1228 
1229 				/*
1230 				 * 0xfffffffe means that the controller wants
1231 				 * more work, ignore it for now. Otherwise,
1232 				 * AIF received.
1233 				 */
1234 				index &= ~2;
1235 
1236 				mutex_enter(&softs->aifq_mutex);
1237 				/*
1238 				 * Copy AIF from adapter to the empty AIF slot
1239 				 */
1240 				fibp = &softs->aifq[softs->aifq_idx].d;
1241 				fib_size0 = PCI_MEM_GET16(softs, index + \
1242 				    offsetof(struct aac_fib, Header.Size));
1243 				fib_size = (fib_size0 > AAC_FIB_SIZE) ?
1244 				    AAC_FIB_SIZE : fib_size0;
1245 				PCI_MEM_REP_GET8(softs, index, fibp,
1246 				    fib_size);
1247 
1248 				if (aac_check_acc_handle(softs-> \
1249 				    pci_mem_handle) == DDI_SUCCESS)
1250 					(void) aac_handle_aif(softs, fibp);
1251 				else
1252 					ddi_fm_service_impact(softs->devinfo_p,
1253 					    DDI_SERVICE_UNAFFECTED);
1254 				mutex_exit(&softs->aifq_mutex);
1255 
1256 				/*
1257 				 * AIF memory is owned by the adapter, so let it
1258 				 * know that we are done with it.
1259 				 */
1260 				AAC_OUTB_SET(softs, index);
1261 				AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1262 			}
1263 
1264 			index = AAC_OUTB_GET(softs);
1265 		} while (index != 0xfffffffful);
1266 
1267 		/*
1268 		 * Process waiting cmds before start new ones to
1269 		 * ensure first IOs are serviced first.
1270 		 */
1271 		aac_start_waiting_io(softs);
1272 		return (AAC_DB_COMMAND_READY);
1273 	} else {
1274 		return (0);
1275 	}
1276 }
1277 
1278 static uint_t
1279 aac_intr_new(caddr_t arg)
1280 {
1281 	struct aac_softstate *softs = (struct aac_softstate *)arg;
1282 	uint_t rval;
1283 
1284 	mutex_enter(&softs->io_lock);
1285 	if (aac_process_intr_new(softs))
1286 		rval = DDI_INTR_CLAIMED;
1287 	else
1288 		rval = DDI_INTR_UNCLAIMED;
1289 	mutex_exit(&softs->io_lock);
1290 
1291 	aac_drain_comp_q(softs);
1292 	return (rval);
1293 }
1294 
1295 /*
1296  * Interrupt handler for old interface
1297  * Explicit message queues are used to send FIB to and get completed FIB from
1298  * the adapter. Driver and adapter maitain the queues in the producer/consumer
1299  * manner. The driver has to query the queues to find the completed FIB.
1300  */
1301 static int
1302 aac_process_intr_old(struct aac_softstate *softs)
1303 {
1304 	uint16_t status;
1305 
1306 	status = AAC_STATUS_GET(softs);
1307 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1308 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1309 		return (DDI_INTR_UNCLAIMED);
1310 	}
1311 	if (status & AAC_DB_RESPONSE_READY) {
1312 		int slot_idx;
1313 
1314 		/* ACK the intr */
1315 		AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1316 		(void) AAC_STATUS_GET(softs);
1317 		while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q,
1318 		    &slot_idx) == AACOK)
1319 			aac_handle_io(softs, slot_idx);
1320 
1321 		/*
1322 		 * Process waiting cmds before start new ones to
1323 		 * ensure first IOs are serviced first.
1324 		 */
1325 		aac_start_waiting_io(softs);
1326 		return (AAC_DB_RESPONSE_READY);
1327 	} else if (status & AAC_DB_COMMAND_READY) {
1328 		int aif_idx;
1329 
1330 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY);
1331 		(void) AAC_STATUS_GET(softs);
1332 		if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) ==
1333 		    AACOK) {
1334 			ddi_acc_handle_t acc = softs->comm_space_acc_handle;
1335 			struct aac_fib *fibp;	/* FIB in AIF queue */
1336 			struct aac_fib *fibp0;	/* FIB in communication space */
1337 			uint16_t fib_size, fib_size0;
1338 			uint32_t fib_xfer_state;
1339 			uint32_t addr, size;
1340 
1341 			ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS));
1342 
1343 #define	AAC_SYNC_AIF(softs, aif_idx, type) \
1344 	{ (void) ddi_dma_sync((softs)->comm_space_dma_handle, \
1345 	    offsetof(struct aac_comm_space, \
1346 	    adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \
1347 	    (type)); }
1348 
1349 			mutex_enter(&softs->aifq_mutex);
1350 			/* Copy AIF from adapter to the empty AIF slot */
1351 			fibp = &softs->aifq[softs->aifq_idx].d;
1352 			AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU);
1353 			fibp0 = &softs->comm_space->adapter_fibs[aif_idx];
1354 			fib_size0 = ddi_get16(acc, &fibp0->Header.Size);
1355 			fib_size = (fib_size0 > AAC_FIB_SIZE) ?
1356 			    AAC_FIB_SIZE : fib_size0;
1357 			ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0,
1358 			    fib_size, DDI_DEV_AUTOINCR);
1359 
1360 			(void) aac_handle_aif(softs, fibp);
1361 			mutex_exit(&softs->aifq_mutex);
1362 
1363 			/* Complete AIF back to adapter with good status */
1364 			fib_xfer_state = LE_32(fibp->Header.XferState);
1365 			if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
1366 				ddi_put32(acc, &fibp0->Header.XferState,
1367 				    fib_xfer_state | AAC_FIBSTATE_DONEHOST);
1368 				ddi_put32(acc, (uint32_t *)&fibp0->data[0],
1369 				    ST_OK);
1370 				if (fib_size0 > AAC_FIB_SIZE)
1371 					ddi_put16(acc, &fibp0->Header.Size,
1372 					    AAC_FIB_SIZE);
1373 				AAC_SYNC_AIF(softs, aif_idx,
1374 				    DDI_DMA_SYNC_FORDEV);
1375 			}
1376 
1377 			/* Put the AIF response on the response queue */
1378 			addr = ddi_get32(acc,
1379 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1380 			    Header.SenderFibAddress);
1381 			size = (uint32_t)ddi_get16(acc,
1382 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1383 			    Header.Size);
1384 			ddi_put32(acc,
1385 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1386 			    Header.ReceiverFibAddress, addr);
1387 			if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q,
1388 			    addr, size) == AACERR)
1389 				cmn_err(CE_NOTE, "!AIF ack failed");
1390 		}
1391 		return (AAC_DB_COMMAND_READY);
1392 	} else if (status & AAC_DB_PRINTF_READY) {
1393 		/* ACK the intr */
1394 		AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY);
1395 		(void) AAC_STATUS_GET(softs);
1396 		(void) ddi_dma_sync(softs->comm_space_dma_handle,
1397 		    offsetof(struct aac_comm_space, adapter_print_buf),
1398 		    AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU);
1399 		if (aac_check_dma_handle(softs->comm_space_dma_handle) ==
1400 		    DDI_SUCCESS)
1401 			cmn_err(CE_NOTE, "MSG From Adapter: %s",
1402 			    softs->comm_space->adapter_print_buf);
1403 		else
1404 			ddi_fm_service_impact(softs->devinfo_p,
1405 			    DDI_SERVICE_UNAFFECTED);
1406 		AAC_NOTIFY(softs, AAC_DB_PRINTF_READY);
1407 		return (AAC_DB_PRINTF_READY);
1408 	} else if (status & AAC_DB_COMMAND_NOT_FULL) {
1409 		/*
1410 		 * Without these two condition statements, the OS could hang
1411 		 * after a while, especially if there are a lot of AIF's to
1412 		 * handle, for instance if a drive is pulled from an array
1413 		 * under heavy load.
1414 		 */
1415 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1416 		return (AAC_DB_COMMAND_NOT_FULL);
1417 	} else if (status & AAC_DB_RESPONSE_NOT_FULL) {
1418 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1419 		AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL);
1420 		return (AAC_DB_RESPONSE_NOT_FULL);
1421 	} else {
1422 		return (0);
1423 	}
1424 }
1425 
1426 static uint_t
1427 aac_intr_old(caddr_t arg)
1428 {
1429 	struct aac_softstate *softs = (struct aac_softstate *)arg;
1430 	int rval;
1431 
1432 	mutex_enter(&softs->io_lock);
1433 	if (aac_process_intr_old(softs))
1434 		rval = DDI_INTR_CLAIMED;
1435 	else
1436 		rval = DDI_INTR_UNCLAIMED;
1437 	mutex_exit(&softs->io_lock);
1438 
1439 	aac_drain_comp_q(softs);
1440 	return (rval);
1441 }
1442 
1443 /*
1444  * Set pkt_reason and OR in pkt_statistics flag
1445  */
1446 static void
1447 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp,
1448     uchar_t reason, uint_t stat)
1449 {
1450 #ifndef __lock_lint
1451 	_NOTE(ARGUNUSED(softs))
1452 #endif
1453 	AACDB_PRINT(softs, CE_NOTE, "acp=0x%p, reason=%x, stat=%x",
1454 	    (void *)acp, reason, stat);
1455 	if (acp->pkt->pkt_reason == CMD_CMPLT)
1456 		acp->pkt->pkt_reason = reason;
1457 	acp->pkt->pkt_statistics |= stat;
1458 }
1459 
1460 /*
1461  * Handle a finished pkt of soft SCMD
1462  */
1463 static void
1464 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp)
1465 {
1466 	ASSERT(acp->pkt);
1467 
1468 	acp->flags |= AAC_CMD_CMPLT;
1469 
1470 	acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \
1471 	    STATE_SENT_CMD;
1472 	if (acp->pkt->pkt_state & STATE_XFERRED_DATA)
1473 		acp->pkt->pkt_resid = 0;
1474 
1475 	/* AAC_CMD_NO_INTR means no complete callback */
1476 	if (!(acp->flags & AAC_CMD_NO_INTR)) {
1477 		mutex_enter(&softs->q_comp_mutex);
1478 		aac_cmd_enqueue(&softs->q_comp, acp);
1479 		mutex_exit(&softs->q_comp_mutex);
1480 		ddi_trigger_softintr(softs->softint_id);
1481 	}
1482 }
1483 
1484 /*
1485  * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
1486  */
1487 
1488 /*
1489  * Handle completed logical device IO command
1490  */
1491 /*ARGSUSED*/
1492 static void
1493 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1494 {
1495 	struct aac_slot *slotp = acp->slotp;
1496 	struct aac_blockread_response *resp;
1497 	uint32_t status;
1498 
1499 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1500 	ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1501 
1502 	/*
1503 	 * block_read/write has a similar response header, use blockread
1504 	 * response for both.
1505 	 */
1506 	resp = (struct aac_blockread_response *)&slotp->fibp->data[0];
1507 	status = ddi_get32(slotp->fib_acc_handle, &resp->Status);
1508 	if (status == ST_OK) {
1509 		acp->pkt->pkt_resid = 0;
1510 		acp->pkt->pkt_state |= STATE_XFERRED_DATA;
1511 	} else {
1512 		aac_set_arq_data_hwerr(acp);
1513 	}
1514 }
1515 
1516 /*
1517  * Handle completed IOCTL command
1518  */
1519 /*ARGSUSED*/
1520 void
1521 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1522 {
1523 	struct aac_slot *slotp = acp->slotp;
1524 
1525 	/*
1526 	 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
1527 	 * may wait on softs->event, so use cv_broadcast() instead
1528 	 * of cv_signal().
1529 	 */
1530 	ASSERT(acp->flags & AAC_CMD_SYNC);
1531 	ASSERT(acp->flags & AAC_CMD_NO_CB);
1532 
1533 	/* Get the size of the response FIB from its FIB.Header.Size field */
1534 	acp->fib_size = ddi_get16(slotp->fib_acc_handle,
1535 	    &slotp->fibp->Header.Size);
1536 
1537 	ASSERT(acp->fib_size <= softs->aac_max_fib_size);
1538 	ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp,
1539 	    (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR);
1540 }
1541 
1542 /*
1543  * Handle completed Flush command
1544  */
1545 /*ARGSUSED*/
1546 static void
1547 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1548 {
1549 	struct aac_slot *slotp = acp->slotp;
1550 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
1551 	struct aac_synchronize_reply *resp;
1552 	uint32_t status;
1553 
1554 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1555 
1556 	resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0];
1557 	status = ddi_get32(acc, &resp->Status);
1558 	if (status != CT_OK)
1559 		aac_set_arq_data_hwerr(acp);
1560 }
1561 
1562 /*
1563  * Access PCI space to see if the driver can support the card
1564  */
1565 static int
1566 aac_check_card_type(struct aac_softstate *softs)
1567 {
1568 	ddi_acc_handle_t pci_config_handle;
1569 	int card_index;
1570 	uint32_t pci_cmd;
1571 
1572 	/* Map pci configuration space */
1573 	if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) !=
1574 	    DDI_SUCCESS) {
1575 		AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space");
1576 		return (AACERR);
1577 	}
1578 
1579 	softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID);
1580 	softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID);
1581 	softs->subvendid = pci_config_get16(pci_config_handle,
1582 	    PCI_CONF_SUBVENID);
1583 	softs->subsysid = pci_config_get16(pci_config_handle,
1584 	    PCI_CONF_SUBSYSID);
1585 
1586 	card_index = 0;
1587 	while (!CARD_IS_UNKNOWN(card_index)) {
1588 		if ((aac_cards[card_index].vendor == softs->vendid) &&
1589 		    (aac_cards[card_index].device == softs->devid) &&
1590 		    (aac_cards[card_index].subvendor == softs->subvendid) &&
1591 		    (aac_cards[card_index].subsys == softs->subsysid)) {
1592 			break;
1593 		}
1594 		card_index++;
1595 	}
1596 
1597 	softs->card = card_index;
1598 	softs->hwif = aac_cards[card_index].hwif;
1599 
1600 	/*
1601 	 * Unknown aac card
1602 	 * do a generic match based on the VendorID and DeviceID to
1603 	 * support the new cards in the aac family
1604 	 */
1605 	if (CARD_IS_UNKNOWN(card_index)) {
1606 		if (softs->vendid != 0x9005) {
1607 			AACDB_PRINT(softs, CE_WARN,
1608 			    "Unknown vendor 0x%x", softs->vendid);
1609 			goto error;
1610 		}
1611 		switch (softs->devid) {
1612 		case 0x285:
1613 			softs->hwif = AAC_HWIF_I960RX;
1614 			break;
1615 		case 0x286:
1616 			softs->hwif = AAC_HWIF_RKT;
1617 			break;
1618 		default:
1619 			AACDB_PRINT(softs, CE_WARN,
1620 			    "Unknown device \"pci9005,%x\"", softs->devid);
1621 			goto error;
1622 		}
1623 	}
1624 
1625 	/* Set hardware dependent interface */
1626 	switch (softs->hwif) {
1627 	case AAC_HWIF_I960RX:
1628 		softs->aac_if = aac_rx_interface;
1629 		softs->map_size_min = AAC_MAP_SIZE_MIN_RX;
1630 		break;
1631 	case AAC_HWIF_RKT:
1632 		softs->aac_if = aac_rkt_interface;
1633 		softs->map_size_min = AAC_MAP_SIZE_MIN_RKT;
1634 		break;
1635 	default:
1636 		AACDB_PRINT(softs, CE_WARN,
1637 		    "Unknown hardware interface %d", softs->hwif);
1638 		goto error;
1639 	}
1640 
1641 	/* Set card names */
1642 	(void *)strncpy(softs->vendor_name, aac_cards[card_index].vid,
1643 	    AAC_VENDOR_LEN);
1644 	(void *)strncpy(softs->product_name, aac_cards[card_index].desc,
1645 	    AAC_PRODUCT_LEN);
1646 
1647 	/* Set up quirks */
1648 	softs->flags = aac_cards[card_index].quirks;
1649 
1650 	/* Force the busmaster enable bit on */
1651 	pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
1652 	if ((pci_cmd & PCI_COMM_ME) == 0) {
1653 		pci_cmd |= PCI_COMM_ME;
1654 		pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd);
1655 		pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
1656 		if ((pci_cmd & PCI_COMM_ME) == 0) {
1657 			cmn_err(CE_CONT, "?Cannot enable busmaster bit");
1658 			goto error;
1659 		}
1660 	}
1661 
1662 	/* Set memory base to map */
1663 	softs->pci_mem_base_paddr = 0xfffffff0UL & \
1664 	    pci_config_get32(pci_config_handle, PCI_CONF_BASE0);
1665 
1666 	pci_config_teardown(&pci_config_handle);
1667 
1668 	return (AACOK); /* card type detected */
1669 error:
1670 	pci_config_teardown(&pci_config_handle);
1671 	return (AACERR); /* no matched card found */
1672 }
1673 
1674 /*
1675  * Check the firmware to determine the features to support and the FIB
1676  * parameters to use.
1677  */
1678 static int
1679 aac_check_firmware(struct aac_softstate *softs)
1680 {
1681 	uint32_t options;
1682 	uint32_t atu_size;
1683 	ddi_acc_handle_t pci_handle;
1684 	char *pci_mbr;
1685 	uint32_t max_fibs;
1686 	uint32_t max_fib_size;
1687 	uint32_t sg_tablesize;
1688 	uint32_t max_sectors;
1689 	uint32_t status;
1690 
1691 	/* Get supported options */
1692 	if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0,
1693 	    &status)) != AACOK) {
1694 		if (status != SRB_STATUS_INVALID_REQUEST) {
1695 			cmn_err(CE_CONT,
1696 			    "?Fatal error: request adapter info error");
1697 			return (AACERR);
1698 		}
1699 		options = 0;
1700 		atu_size = 0;
1701 	} else {
1702 		options = AAC_MAILBOX_GET(softs, 1);
1703 		atu_size = AAC_MAILBOX_GET(softs, 2);
1704 	}
1705 
1706 	if (softs->state & AAC_STATE_RESET) {
1707 		if ((softs->support_opt == options) &&
1708 		    (softs->atu_size == atu_size))
1709 			return (AACOK);
1710 
1711 		cmn_err(CE_WARN,
1712 		    "?Fatal error: firmware changed, system needs reboot");
1713 		return (AACERR);
1714 	}
1715 
1716 	/*
1717 	 * The following critical settings are initialized only once during
1718 	 * driver attachment.
1719 	 */
1720 	softs->support_opt = options;
1721 	softs->atu_size = atu_size;
1722 
1723 	/* Process supported options */
1724 	if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1725 	    (softs->flags & AAC_FLAGS_NO4GB) == 0) {
1726 		AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window");
1727 		softs->flags |= AAC_FLAGS_4GB_WINDOW;
1728 	} else {
1729 		/*
1730 		 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
1731 		 * only. IO is handled by the DMA engine which does not suffer
1732 		 * from the ATU window programming workarounds necessary for
1733 		 * CPU copy operations.
1734 		 */
1735 		softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull;
1736 		softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull;
1737 	}
1738 
1739 	if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
1740 		AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address");
1741 		softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
1742 		softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull;
1743 		softs->flags |= AAC_FLAGS_SG_64BIT;
1744 	}
1745 
1746 	if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
1747 		softs->flags |= AAC_FLAGS_ARRAY_64BIT;
1748 		AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size");
1749 	}
1750 
1751 	/* Read preferred settings */
1752 	max_fib_size = 0;
1753 	if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF,
1754 	    0, 0, 0, 0, NULL)) == AACOK) {
1755 		options = AAC_MAILBOX_GET(softs, 1);
1756 		max_fib_size = (options & 0xffff);
1757 		max_sectors = (options >> 16) << 1;
1758 		options = AAC_MAILBOX_GET(softs, 2);
1759 		sg_tablesize = (options >> 16);
1760 		options = AAC_MAILBOX_GET(softs, 3);
1761 		max_fibs = (options & 0xffff);
1762 	}
1763 
1764 	/* Enable new comm. and rawio at the same time */
1765 	if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) &&
1766 	    (max_fib_size != 0)) {
1767 		if ((atu_size > softs->map_size) &&
1768 		    (ddi_regs_map_setup(softs->devinfo_p, 1,
1769 		    (caddr_t *)&pci_mbr, 0, atu_size, &aac_acc_attr,
1770 		    &pci_handle) == DDI_SUCCESS)) {
1771 			ddi_regs_map_free(&softs->pci_mem_handle);
1772 			softs->pci_mem_handle = pci_handle;
1773 			softs->pci_mem_base_vaddr = pci_mbr;
1774 			softs->map_size = atu_size;
1775 		}
1776 		if (atu_size == softs->map_size) {
1777 			softs->flags |= AAC_FLAGS_NEW_COMM;
1778 			AACDB_PRINT(softs, CE_NOTE,
1779 			    "!Enable New Comm. interface");
1780 		}
1781 	}
1782 
1783 	/* Set FIB parameters */
1784 	if (softs->flags & AAC_FLAGS_NEW_COMM) {
1785 		softs->aac_max_fibs = max_fibs;
1786 		softs->aac_max_fib_size = max_fib_size;
1787 		softs->aac_max_sectors = max_sectors;
1788 		softs->aac_sg_tablesize = sg_tablesize;
1789 
1790 		softs->flags |= AAC_FLAGS_RAW_IO;
1791 		AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO");
1792 	} else {
1793 		softs->aac_max_fibs =
1794 		    (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512;
1795 		softs->aac_max_fib_size = AAC_FIB_SIZE;
1796 		softs->aac_max_sectors = 128;	/* 64K */
1797 		if (softs->flags & AAC_FLAGS_17SG)
1798 			softs->aac_sg_tablesize = 17;
1799 		else if (softs->flags & AAC_FLAGS_34SG)
1800 			softs->aac_sg_tablesize = 34;
1801 		else if (softs->flags & AAC_FLAGS_SG_64BIT)
1802 			softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
1803 			    sizeof (struct aac_blockwrite64) +
1804 			    sizeof (struct aac_sg_entry64)) /
1805 			    sizeof (struct aac_sg_entry64);
1806 		else
1807 			softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
1808 			    sizeof (struct aac_blockwrite) +
1809 			    sizeof (struct aac_sg_entry)) /
1810 			    sizeof (struct aac_sg_entry);
1811 	}
1812 
1813 	if ((softs->flags & AAC_FLAGS_RAW_IO) &&
1814 	    (softs->flags & AAC_FLAGS_ARRAY_64BIT)) {
1815 		softs->flags |= AAC_FLAGS_LBA_64BIT;
1816 		AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array");
1817 	}
1818 	softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize;
1819 	softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9;
1820 	/*
1821 	 * 64K maximum segment size in scatter gather list is controlled by
1822 	 * the NEW_COMM bit in the adapter information. If not set, the card
1823 	 * can only accept a maximum of 64K. It is not recommended to permit
1824 	 * more than 128KB of total transfer size to the adapters because
1825 	 * performance is negatively impacted.
1826 	 *
1827 	 * For new comm, segment size equals max xfer size. For old comm,
1828 	 * we use 64K for both.
1829 	 */
1830 	softs->buf_dma_attr.dma_attr_count_max =
1831 	    softs->buf_dma_attr.dma_attr_maxxfer - 1;
1832 
1833 	/* Setup FIB operations for logical devices */
1834 	if (softs->flags & AAC_FLAGS_RAW_IO)
1835 		softs->aac_cmd_fib = aac_cmd_fib_rawio;
1836 	else if (softs->flags & AAC_FLAGS_SG_64BIT)
1837 		softs->aac_cmd_fib = aac_cmd_fib_brw64;
1838 	else
1839 		softs->aac_cmd_fib = aac_cmd_fib_brw;
1840 	softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \
1841 	    aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32;
1842 
1843 	/* 64-bit LBA needs descriptor format sense data */
1844 	softs->slen = sizeof (struct scsi_arq_status);
1845 	if ((softs->flags & AAC_FLAGS_LBA_64BIT) &&
1846 	    softs->slen < AAC_ARQ64_LENGTH)
1847 		softs->slen = AAC_ARQ64_LENGTH;
1848 
1849 	AACDB_PRINT(softs, CE_NOTE,
1850 	    "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
1851 	    softs->aac_max_fibs, softs->aac_max_fib_size,
1852 	    softs->aac_max_sectors, softs->aac_sg_tablesize);
1853 
1854 	return (AACOK);
1855 }
1856 
1857 static void
1858 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0,
1859     struct FsaRev *fsarev1)
1860 {
1861 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
1862 
1863 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash);
1864 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type);
1865 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor);
1866 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major);
1867 	AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber);
1868 }
1869 
1870 /*
1871  * The following function comes from Adaptec:
1872  *
1873  * Query adapter information and supplement adapter information
1874  */
1875 static int
1876 aac_get_adapter_info(struct aac_softstate *softs,
1877     struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr)
1878 {
1879 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
1880 	struct aac_fib *fibp = softs->sync_slot.fibp;
1881 	struct aac_adapter_info *ainfp;
1882 	struct aac_supplement_adapter_info *sinfp;
1883 
1884 	ddi_put8(acc, &fibp->data[0], 0);
1885 	if (aac_sync_fib(softs, RequestAdapterInfo,
1886 	    sizeof (struct aac_fib_header)) != AACOK) {
1887 		AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed");
1888 		return (AACERR);
1889 	}
1890 	ainfp = (struct aac_adapter_info *)fibp->data;
1891 	if (ainfr) {
1892 		AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
1893 		AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase);
1894 		AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture);
1895 		AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant);
1896 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed);
1897 		AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem);
1898 		AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem);
1899 		AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem);
1900 		aac_fsa_rev(softs, &ainfp->KernelRevision,
1901 		    &ainfr->KernelRevision);
1902 		aac_fsa_rev(softs, &ainfp->MonitorRevision,
1903 		    &ainfr->MonitorRevision);
1904 		aac_fsa_rev(softs, &ainfp->HardwareRevision,
1905 		    &ainfr->HardwareRevision);
1906 		aac_fsa_rev(softs, &ainfp->BIOSRevision,
1907 		    &ainfr->BIOSRevision);
1908 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled);
1909 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask);
1910 		AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber);
1911 		AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform);
1912 		AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
1913 		AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant);
1914 	}
1915 	if (sinfr) {
1916 		if (!(softs->support_opt &
1917 		    AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) {
1918 			AACDB_PRINT(softs, CE_WARN,
1919 			    "SupplementAdapterInfo not supported");
1920 			return (AACERR);
1921 		}
1922 		ddi_put8(acc, &fibp->data[0], 0);
1923 		if (aac_sync_fib(softs, RequestSupplementAdapterInfo,
1924 		    sizeof (struct aac_fib_header)) != AACOK) {
1925 			AACDB_PRINT(softs, CE_WARN,
1926 			    "RequestSupplementAdapterInfo failed");
1927 			return (AACERR);
1928 		}
1929 		sinfp = (struct aac_supplement_adapter_info *)fibp->data;
1930 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1);
1931 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2);
1932 		AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize);
1933 		AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId);
1934 		AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts);
1935 		AAC_GET_FIELD32(acc, sinfr, sinfp, Version);
1936 		AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits);
1937 		AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber);
1938 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3);
1939 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12);
1940 		AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts);
1941 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo,
1942 		    sizeof (struct vpd_info));
1943 		aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision,
1944 		    &sinfr->FlashFirmwareRevision);
1945 		AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions);
1946 		aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision,
1947 		    &sinfr->FlashFirmwareBootRevision);
1948 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo,
1949 		    MFG_PCBA_SERIAL_NUMBER_WIDTH);
1950 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0],
1951 		    MFG_WWN_WIDTH);
1952 		AAC_REP_GET_FIELD32(acc, sinfr, sinfp, ReservedGrowth[0], 2);
1953 	}
1954 	return (AACOK);
1955 }
1956 
1957 /*
1958  * The following function comes from Adaptec:
1959  *
1960  * Routine to be called during initialization of communications with
1961  * the adapter to handle possible adapter configuration issues. When
1962  * the adapter first boots up, it examines attached drives, etc, and
1963  * potentially comes up with a new or revised configuration (relative to
1964  * what's stored in it's NVRAM). Additionally it may discover problems
1965  * that make the current physical configuration unworkable (currently
1966  * applicable only to cluster configuration issues).
1967  *
1968  * If there are no configuration issues or the issues are considered
1969  * trival by the adapter, it will set it's configuration status to
1970  * "FSACT_CONTINUE" and execute the "commit confiuguration" action
1971  * automatically on it's own.
1972  *
1973  * However, if there are non-trivial issues, the adapter will set it's
1974  * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT"
1975  * and wait for some agent on the host to issue the "\ContainerCommand
1976  * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the
1977  * adapter to commit the new/updated configuration and enable
1978  * un-inhibited operation.  The host agent should first issue the
1979  * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB
1980  * command to obtain information about config issues detected by
1981  * the adapter.
1982  *
1983  * Normally the adapter's PC BIOS will execute on the host following
1984  * adapter poweron and reset and will be responsible for querring the
1985  * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG
1986  * command if appropriate.
1987  *
1988  * However, with the introduction of IOP reset support, the adapter may
1989  * boot up without the benefit of the adapter's PC BIOS host agent.
1990  * This routine is intended to take care of these issues in situations
1991  * where BIOS doesn't execute following adapter poweron or reset.  The
1992  * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so
1993  * there is no harm in doing this when it's already been done.
1994  */
1995 static int
1996 aac_handle_adapter_config_issues(struct aac_softstate *softs)
1997 {
1998 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
1999 	struct aac_fib *fibp = softs->sync_slot.fibp;
2000 	struct aac_Container *cmd;
2001 	struct aac_Container_resp *resp;
2002 	struct aac_cf_status_header *cfg_sts_hdr;
2003 	uint32_t resp_status;
2004 	uint32_t ct_status;
2005 	uint32_t cfg_stat_action;
2006 	int rval;
2007 
2008 	/* Get adapter config status */
2009 	cmd = (struct aac_Container *)&fibp->data[0];
2010 
2011 	bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2012 	ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2013 	ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS);
2014 	ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE],
2015 	    sizeof (struct aac_cf_status_header));
2016 	rval = aac_sync_fib(softs, ContainerCommand,
2017 	    AAC_FIB_SIZEOF(struct aac_Container));
2018 	resp = (struct aac_Container_resp *)cmd;
2019 	cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data;
2020 
2021 	resp_status = ddi_get32(acc, &resp->Status);
2022 	ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2023 	if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) {
2024 		cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action);
2025 
2026 		/* Commit configuration if it's reasonable to do so. */
2027 		if (cfg_stat_action <= CFACT_PAUSE) {
2028 			bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2029 			ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2030 			ddi_put32(acc, &cmd->CTCommand.command,
2031 			    CT_COMMIT_CONFIG);
2032 			rval = aac_sync_fib(softs, ContainerCommand,
2033 			    AAC_FIB_SIZEOF(struct aac_Container));
2034 
2035 			resp_status = ddi_get32(acc, &resp->Status);
2036 			ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2037 			if ((rval == AACOK) && (resp_status == 0) &&
2038 			    (ct_status == CT_OK))
2039 				/* Successful completion */
2040 				rval = AACMPE_OK;
2041 			else
2042 				/* Auto-commit aborted due to error(s). */
2043 				rval = AACMPE_COMMIT_CONFIG;
2044 		} else {
2045 			/*
2046 			 * Auto-commit aborted due to adapter indicating
2047 			 * configuration issue(s) too dangerous to auto-commit.
2048 			 */
2049 			rval = AACMPE_CONFIG_STATUS;
2050 		}
2051 	} else {
2052 		cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted");
2053 		rval = AACMPE_CONFIG_STATUS;
2054 	}
2055 	return (rval);
2056 }
2057 
2058 /*
2059  * Hardware initialization and resource allocation
2060  */
2061 static int
2062 aac_common_attach(struct aac_softstate *softs)
2063 {
2064 	uint32_t status;
2065 	int i;
2066 
2067 	DBCALLED(softs, 1);
2068 
2069 	/*
2070 	 * Do a little check here to make sure there aren't any outstanding
2071 	 * FIBs in the message queue. At this point there should not be and
2072 	 * if there are they are probably left over from another instance of
2073 	 * the driver like when the system crashes and the crash dump driver
2074 	 * gets loaded.
2075 	 */
2076 	while (AAC_OUTB_GET(softs) != 0xfffffffful)
2077 		;
2078 
2079 	/*
2080 	 * Wait the card to complete booting up before do anything that
2081 	 * attempts to communicate with it.
2082 	 */
2083 	status = AAC_FWSTATUS_GET(softs);
2084 	if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC)
2085 		goto error;
2086 	i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */
2087 	AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i);
2088 	if (i == 0) {
2089 		cmn_err(CE_CONT, "?Fatal error: controller not ready");
2090 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2091 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2092 		goto error;
2093 	}
2094 
2095 	/* Read and set card supported options and settings */
2096 	if (aac_check_firmware(softs) == AACERR) {
2097 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2098 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2099 		goto error;
2100 	}
2101 
2102 	/* Clear out all interrupts */
2103 	AAC_STATUS_CLR(softs, ~0);
2104 
2105 	/* Setup communication space with the card */
2106 	if (softs->comm_space_dma_handle == NULL) {
2107 		if (aac_alloc_comm_space(softs) != AACOK)
2108 			goto error;
2109 	}
2110 	if (aac_setup_comm_space(softs) != AACOK) {
2111 		cmn_err(CE_CONT, "?Setup communication space failed");
2112 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2113 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2114 		goto error;
2115 	}
2116 
2117 #ifdef DEBUG
2118 	if (aac_get_fw_debug_buffer(softs) != AACOK)
2119 		cmn_err(CE_CONT, "?firmware UART trace not supported");
2120 #endif
2121 
2122 	/* Allocate slots */
2123 	if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) {
2124 		cmn_err(CE_CONT, "?Fatal error: slots allocate failed");
2125 		goto error;
2126 	}
2127 	AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots);
2128 
2129 	/* Allocate FIBs */
2130 	if (softs->total_fibs < softs->total_slots) {
2131 		aac_alloc_fibs(softs);
2132 		if (softs->total_fibs == 0)
2133 			goto error;
2134 		AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated",
2135 		    softs->total_fibs);
2136 	}
2137 
2138 	/* Get adapter names */
2139 	if (CARD_IS_UNKNOWN(softs->card)) {
2140 		struct aac_supplement_adapter_info sinf;
2141 
2142 		if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) {
2143 			cmn_err(CE_CONT, "?Query adapter information failed");
2144 		} else {
2145 			char *p, *p0, *p1;
2146 
2147 			/*
2148 			 * Now find the controller name in supp_adapter_info->
2149 			 * AdapterTypeText. Use the first word as the vendor
2150 			 * and the other words as the product name.
2151 			 */
2152 			AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = "
2153 			    "\"%s\"", sinf.AdapterTypeText);
2154 			p = sinf.AdapterTypeText;
2155 			p0 = p1 = NULL;
2156 			/* Skip heading spaces */
2157 			while (*p && (*p == ' ' || *p == '\t'))
2158 				p++;
2159 			p0 = p;
2160 			while (*p && (*p != ' ' && *p != '\t'))
2161 				p++;
2162 			/* Remove middle spaces */
2163 			while (*p && (*p == ' ' || *p == '\t'))
2164 				*p++ = 0;
2165 			p1 = p;
2166 			/* Remove trailing spaces */
2167 			p = p1 + strlen(p1) - 1;
2168 			while (p > p1 && (*p == ' ' || *p == '\t'))
2169 				*p-- = 0;
2170 			if (*p0 && *p1) {
2171 				(void *)strncpy(softs->vendor_name, p0,
2172 				    AAC_VENDOR_LEN);
2173 				(void *)strncpy(softs->product_name, p1,
2174 				    AAC_PRODUCT_LEN);
2175 			} else {
2176 				cmn_err(CE_WARN,
2177 				    "?adapter name mis-formatted\n");
2178 				if (*p0)
2179 					(void *)strncpy(softs->product_name,
2180 					    p0, AAC_PRODUCT_LEN);
2181 			}
2182 		}
2183 	}
2184 
2185 	cmn_err(CE_NOTE,
2186 	    "!aac driver %d.%02d.%02d-%d, found card: " \
2187 	    "%s %s(pci0x%x.%x.%x.%x) at 0x%x",
2188 	    AAC_DRIVER_MAJOR_VERSION,
2189 	    AAC_DRIVER_MINOR_VERSION,
2190 	    AAC_DRIVER_BUGFIX_LEVEL,
2191 	    AAC_DRIVER_BUILD,
2192 	    softs->vendor_name, softs->product_name,
2193 	    softs->vendid, softs->devid, softs->subvendid, softs->subsysid,
2194 	    softs->pci_mem_base_paddr);
2195 
2196 	/* Perform acceptance of adapter-detected config changes if possible */
2197 	if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) {
2198 		cmn_err(CE_CONT, "?Handle adapter config issues failed");
2199 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2200 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2201 		goto error;
2202 	}
2203 
2204 	/* Setup containers */
2205 	bzero(softs->containers, sizeof (struct aac_container) * AAC_MAX_LD);
2206 	softs->container_count = 0;
2207 	if (aac_probe_containers(softs) != AACOK) {
2208 		cmn_err(CE_CONT, "?Fatal error: get container info error");
2209 		goto error;
2210 	}
2211 
2212 	/* Check dma & acc handles allocated in attach */
2213 	if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) {
2214 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2215 		goto error;
2216 	}
2217 
2218 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
2219 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2220 		goto error;
2221 	}
2222 
2223 	for (i = 0; i < softs->total_slots; i++) {
2224 		if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) !=
2225 		    DDI_SUCCESS) {
2226 			ddi_fm_service_impact(softs->devinfo_p,
2227 			    DDI_SERVICE_LOST);
2228 			goto error;
2229 		}
2230 	}
2231 
2232 	return (AACOK);
2233 
2234 error:
2235 	if (softs->state & AAC_STATE_RESET)
2236 		return (AACERR);
2237 	if (softs->total_fibs > 0)
2238 		aac_destroy_fibs(softs);
2239 	if (softs->total_slots > 0)
2240 		aac_destroy_slots(softs);
2241 	if (softs->comm_space_dma_handle)
2242 		aac_free_comm_space(softs);
2243 	return (AACERR);
2244 }
2245 
2246 /*
2247  * Hardware shutdown and resource release
2248  */
2249 static void
2250 aac_common_detach(struct aac_softstate *softs)
2251 {
2252 	DBCALLED(softs, 1);
2253 
2254 	(void) aac_shutdown(softs);
2255 
2256 	aac_destroy_fibs(softs);
2257 	aac_destroy_slots(softs);
2258 	aac_free_comm_space(softs);
2259 }
2260 
2261 /*
2262  * Send a synchronous command to the controller and wait for a result.
2263  * Indicate if the controller completed the command with an error status.
2264  */
2265 int
2266 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd,
2267     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3,
2268     uint32_t *statusp)
2269 {
2270 	int timeout;
2271 	uint32_t status;
2272 
2273 	if (statusp != NULL)
2274 		*statusp = SRB_STATUS_SUCCESS;
2275 
2276 	/* Fill in mailbox */
2277 	AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3);
2278 
2279 	/* Ensure the sync command doorbell flag is cleared */
2280 	AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
2281 
2282 	/* Then set it to signal the adapter */
2283 	AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND);
2284 
2285 	/* Spin waiting for the command to complete */
2286 	timeout = AAC_IMMEDIATE_TIMEOUT * 1000;
2287 	AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout);
2288 	if (!timeout) {
2289 		AACDB_PRINT(softs, CE_WARN,
2290 		    "Sync command timed out after %d seconds (0x%x)!",
2291 		    AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs));
2292 		return (AACERR);
2293 	}
2294 
2295 	/* Clear the completion flag */
2296 	AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
2297 
2298 	/* Get the command status */
2299 	status = AAC_MAILBOX_GET(softs, 0);
2300 	if (statusp != NULL)
2301 		*statusp = status;
2302 	if (status != SRB_STATUS_SUCCESS) {
2303 		AACDB_PRINT(softs, CE_WARN,
2304 		    "Sync command fail: status = 0x%x", status);
2305 		return (AACERR);
2306 	}
2307 
2308 	return (AACOK);
2309 }
2310 
2311 /*
2312  * Send a synchronous FIB to the adapter and wait for its completion
2313  */
2314 static int
2315 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize)
2316 {
2317 	struct aac_slot *slotp = &softs->sync_slot;
2318 	ddi_dma_handle_t dma = slotp->fib_dma_handle;
2319 	uint32_t status;
2320 	int rval;
2321 
2322 	/* Sync fib only supports 512 bytes */
2323 	if (fibsize > AAC_FIB_SIZE)
2324 		return (AACERR);
2325 
2326 	/*
2327 	 * Setup sync fib
2328 	 * Need not reinitialize FIB header if it's already been filled
2329 	 * by others like aac_cmd_fib_scsi as aac_cmd.
2330 	 */
2331 	if (slotp->acp == NULL)
2332 		aac_cmd_fib_header(softs, slotp, cmd, fibsize);
2333 
2334 	(void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib),
2335 	    fibsize, DDI_DMA_SYNC_FORDEV);
2336 
2337 	/* Give the FIB to the controller, wait for a response. */
2338 	rval = aac_sync_mbcommand(softs, AAC_MONKER_SYNCFIB,
2339 	    slotp->fib_phyaddr, 0, 0, 0, &status);
2340 	if (rval == AACERR) {
2341 		AACDB_PRINT(softs, CE_WARN,
2342 		    "Send sync fib to controller failed");
2343 		return (AACERR);
2344 	}
2345 
2346 	(void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib),
2347 	    AAC_FIB_SIZE, DDI_DMA_SYNC_FORCPU);
2348 
2349 	if ((aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) ||
2350 	    (aac_check_dma_handle(dma) != DDI_SUCCESS)) {
2351 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
2352 		return (AACERR);
2353 	}
2354 
2355 	return (AACOK);
2356 }
2357 
2358 static void
2359 aac_cmd_initq(struct aac_cmd_queue *q)
2360 {
2361 	q->q_head = NULL;
2362 	q->q_tail = (struct aac_cmd *)&q->q_head;
2363 }
2364 
2365 /*
2366  * Remove a cmd from the head of q
2367  */
2368 static struct aac_cmd *
2369 aac_cmd_dequeue(struct aac_cmd_queue *q)
2370 {
2371 	struct aac_cmd *acp;
2372 
2373 	_NOTE(ASSUMING_PROTECTED(*q))
2374 
2375 	if ((acp = q->q_head) != NULL) {
2376 		if ((q->q_head = acp->next) != NULL)
2377 			acp->next = NULL;
2378 		else
2379 			q->q_tail = (struct aac_cmd *)&q->q_head;
2380 		acp->prev = NULL;
2381 	}
2382 	return (acp);
2383 }
2384 
2385 /*
2386  * Add a cmd to the tail of q
2387  */
2388 static void
2389 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp)
2390 {
2391 	ASSERT(acp->next == NULL);
2392 	acp->prev = q->q_tail;
2393 	q->q_tail->next = acp;
2394 	q->q_tail = acp;
2395 }
2396 
2397 /*
2398  * Remove the cmd ac from q
2399  */
2400 static void
2401 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp)
2402 {
2403 	if (acp->prev) {
2404 		if ((acp->prev->next = acp->next) != NULL) {
2405 			acp->next->prev = acp->prev;
2406 			acp->next = NULL;
2407 		} else {
2408 			q->q_tail = acp->prev;
2409 		}
2410 		acp->prev = NULL;
2411 	}
2412 	/* ac is not in the queue */
2413 }
2414 
2415 /*
2416  * Atomically insert an entry into the nominated queue, returns 0 on success or
2417  * AACERR if the queue is full.
2418  *
2419  * Note: it would be more efficient to defer notifying the controller in
2420  *	 the case where we may be inserting several entries in rapid succession,
2421  *	 but implementing this usefully may be difficult (it would involve a
2422  *	 separate queue/notify interface).
2423  */
2424 static int
2425 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr,
2426     uint32_t fib_size)
2427 {
2428 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
2429 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
2430 	uint32_t pi, ci;
2431 
2432 	DBCALLED(softs, 2);
2433 
2434 	ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q);
2435 
2436 	/* Get the producer/consumer indices */
2437 	(void) ddi_dma_sync(dma, (uint8_t *)softs->qtablep->qt_qindex[queue] - \
2438 	    (uint8_t *)softs->comm_space, sizeof (uint32_t) * 2,
2439 	    DDI_DMA_SYNC_FORCPU);
2440 	if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
2441 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
2442 		return (AACERR);
2443 	}
2444 
2445 	pi = ddi_get32(acc,
2446 	    &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
2447 	ci = ddi_get32(acc,
2448 	    &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
2449 
2450 	/*
2451 	 * Wrap the queue first before we check the queue to see
2452 	 * if it is full
2453 	 */
2454 	if (pi >= aac_qinfo[queue].size)
2455 		pi = 0;
2456 
2457 	/* XXX queue full */
2458 	if ((pi + 1) == ci)
2459 		return (AACERR);
2460 
2461 	/* Fill in queue entry */
2462 	ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size);
2463 	ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr);
2464 	(void) ddi_dma_sync(dma, (uint8_t *)(softs->qentries[queue] + pi) - \
2465 	    (uint8_t *)softs->comm_space, sizeof (struct aac_queue_entry),
2466 	    DDI_DMA_SYNC_FORDEV);
2467 
2468 	/* Update producer index */
2469 	ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX],
2470 	    pi + 1);
2471 	(void) ddi_dma_sync(dma,
2472 	    (uint8_t *)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \
2473 	    (uint8_t *)softs->comm_space, sizeof (uint32_t),
2474 	    DDI_DMA_SYNC_FORDEV);
2475 
2476 	if (aac_qinfo[queue].notify != 0)
2477 		AAC_NOTIFY(softs, aac_qinfo[queue].notify);
2478 	return (AACOK);
2479 }
2480 
2481 /*
2482  * Atomically remove one entry from the nominated queue, returns 0 on
2483  * success or AACERR if the queue is empty.
2484  */
2485 static int
2486 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp)
2487 {
2488 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
2489 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
2490 	uint32_t pi, ci;
2491 	int unfull = 0;
2492 
2493 	DBCALLED(softs, 2);
2494 
2495 	ASSERT(idxp);
2496 
2497 	/* Get the producer/consumer indices */
2498 	(void) ddi_dma_sync(dma, (uint8_t *)softs->qtablep->qt_qindex[queue] - \
2499 	    (uint8_t *)softs->comm_space, sizeof (uint32_t) * 2,
2500 	    DDI_DMA_SYNC_FORCPU);
2501 	pi = ddi_get32(acc,
2502 	    &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
2503 	ci = ddi_get32(acc,
2504 	    &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
2505 
2506 	/* Check for queue empty */
2507 	if (ci == pi)
2508 		return (AACERR);
2509 
2510 	if (pi >= aac_qinfo[queue].size)
2511 		pi = 0;
2512 
2513 	/* Check for queue full */
2514 	if (ci == pi + 1)
2515 		unfull = 1;
2516 
2517 	/*
2518 	 * The controller does not wrap the queue,
2519 	 * so we have to do it by ourselves
2520 	 */
2521 	if (ci >= aac_qinfo[queue].size)
2522 		ci = 0;
2523 
2524 	/* Fetch the entry */
2525 	(void) ddi_dma_sync(dma, (uint8_t *)(softs->qentries[queue] + pi) - \
2526 	    (uint8_t *)softs->comm_space, sizeof (struct aac_queue_entry),
2527 	    DDI_DMA_SYNC_FORCPU);
2528 	if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
2529 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
2530 		return (AACERR);
2531 	}
2532 
2533 	switch (queue) {
2534 	case AAC_HOST_NORM_RESP_Q:
2535 	case AAC_HOST_HIGH_RESP_Q:
2536 		*idxp = ddi_get32(acc,
2537 		    &(softs->qentries[queue] + ci)->aq_fib_addr);
2538 		break;
2539 
2540 	case AAC_HOST_NORM_CMD_Q:
2541 	case AAC_HOST_HIGH_CMD_Q:
2542 		*idxp = ddi_get32(acc,
2543 		    &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE;
2544 		break;
2545 
2546 	default:
2547 		cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()");
2548 		return (AACERR);
2549 	}
2550 
2551 	/* Update consumer index */
2552 	ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX],
2553 	    ci + 1);
2554 	(void) ddi_dma_sync(dma,
2555 	    (uint8_t *)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \
2556 	    (uint8_t *)softs->comm_space, sizeof (uint32_t),
2557 	    DDI_DMA_SYNC_FORDEV);
2558 
2559 	if (unfull && aac_qinfo[queue].notify != 0)
2560 		AAC_NOTIFY(softs, aac_qinfo[queue].notify);
2561 	return (AACOK);
2562 }
2563 
2564 /*
2565  * Request information of the container cid
2566  */
2567 static struct aac_mntinforesp *
2568 aac_get_container_info(struct aac_softstate *softs, int cid)
2569 {
2570 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2571 	struct aac_fib *fibp = softs->sync_slot.fibp;
2572 	struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0];
2573 	struct aac_mntinforesp *mir;
2574 
2575 	ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */
2576 	    (softs->flags & AAC_FLAGS_LBA_64BIT) ?
2577 	    VM_NameServe64 : VM_NameServe);
2578 	ddi_put32(acc, &mi->MntType, FT_FILESYS);
2579 	ddi_put32(acc, &mi->MntCount, cid);
2580 
2581 	if (aac_sync_fib(softs, ContainerCommand,
2582 	    AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) {
2583 		AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid);
2584 		return (NULL);
2585 	}
2586 
2587 	mir = (struct aac_mntinforesp *)&fibp->data[0];
2588 	if (ddi_get32(acc, &mir->Status) == ST_OK)
2589 		return (mir);
2590 	return (NULL);
2591 }
2592 
2593 static int
2594 aac_get_container_count(struct aac_softstate *softs, int *count)
2595 {
2596 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2597 	struct aac_mntinforesp *mir;
2598 
2599 	if ((mir = aac_get_container_info(softs, 0)) == NULL)
2600 		return (AACERR);
2601 	*count = ddi_get32(acc, &mir->MntRespCount);
2602 	if (*count > AAC_MAX_LD) {
2603 		AACDB_PRINT(softs, CE_CONT,
2604 		    "container count(%d) > AAC_MAX_LD", *count);
2605 		return (AACERR);
2606 	}
2607 	return (AACOK);
2608 }
2609 
2610 static int
2611 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid)
2612 {
2613 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2614 	struct aac_Container *ct = (struct aac_Container *) \
2615 	    &softs->sync_slot.fibp->data[0];
2616 
2617 	bzero(ct, sizeof (*ct) - CT_PACKET_SIZE);
2618 	ddi_put32(acc, &ct->Command, VM_ContainerConfig);
2619 	ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID);
2620 	ddi_put32(acc, &ct->CTCommand.param[0], cid);
2621 
2622 	if (aac_sync_fib(softs, ContainerCommand,
2623 	    AAC_FIB_SIZEOF(struct aac_Container)) == AACERR)
2624 		return (AACERR);
2625 	if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK)
2626 		return (AACERR);
2627 
2628 	*uid = ddi_get32(acc, &ct->CTCommand.param[1]);
2629 	return (AACOK);
2630 }
2631 
2632 static int
2633 aac_probe_container(struct aac_softstate *softs, uint32_t cid)
2634 {
2635 	struct aac_container *dvp = &softs->containers[cid];
2636 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2637 	struct aac_mntinforesp *mir;
2638 	uint64_t size;
2639 	uint32_t uid;
2640 
2641 	/* Get container basic info */
2642 	if ((mir = aac_get_container_info(softs, cid)) == NULL)
2643 		return (AACERR);
2644 
2645 	if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) {
2646 		if (dvp->valid) {
2647 			AACDB_PRINT(softs, CE_NOTE,
2648 			    ">>> Container %d deleted", cid);
2649 			dvp->valid = 0;
2650 		}
2651 	} else {
2652 		size = AAC_MIR_SIZE(softs, acc, mir);
2653 		AACDB_PRINT(softs, CE_NOTE, "Container #%d found: " \
2654 		    "size=0x%x.%08x, type=%d, name=%s",
2655 		    cid,
2656 		    ddi_get32(acc, &mir->MntObj.CapacityHigh),
2657 		    ddi_get32(acc, &mir->MntObj.Capacity),
2658 		    ddi_get32(acc, &mir->MntObj.VolType),
2659 		    mir->MntObj.FileSystemName);
2660 
2661 		/* Get container UID */
2662 		if (aac_get_container_uid(softs, cid, &uid) == AACERR) {
2663 			AACDB_PRINT(softs, CE_CONT,
2664 			    "query container %d uid failed", cid);
2665 			return (AACERR);
2666 		}
2667 		AACDB_PRINT(softs, CE_CONT, "uid=0x%08x", uid);
2668 
2669 		if (dvp->valid) {
2670 			if (dvp->uid != uid) {
2671 				AACDB_PRINT(softs, CE_WARN,
2672 				    ">>> Container %u uid changed to %d",
2673 				    cid, uid);
2674 				dvp->uid = uid;
2675 			}
2676 			if (dvp->size != size) {
2677 				AACDB_PRINT(softs, CE_NOTE,
2678 				    ">>> Container %u size changed to %"PRIu64,
2679 				    cid, size);
2680 				dvp->size = size;
2681 			}
2682 		} else { /* Init new container */
2683 			AACDB_PRINT(softs, CE_NOTE,
2684 			    ">>> Container %d added", cid);
2685 			dvp->valid = 1;
2686 
2687 			dvp->cid = cid;
2688 			dvp->uid = uid;
2689 			dvp->size = size;
2690 			dvp->locked = 0;
2691 			dvp->deleted = 0;
2692 		}
2693 	}
2694 	return (AACOK);
2695 }
2696 
2697 /*
2698  * Do a rescan of all the possible containers and update the container list
2699  * with newly online/offline containers.
2700  */
2701 static int
2702 aac_probe_containers(struct aac_softstate *softs)
2703 {
2704 	int i, count, total;
2705 
2706 	/* Loop over possible containers */
2707 	count = softs->container_count;
2708 	if (aac_get_container_count(softs, &count) == AACERR)
2709 		return (AACERR);
2710 	for (i = total = 0; i < count; i++) {
2711 		if (aac_probe_container(softs, i) == AACOK)
2712 			total++;
2713 	}
2714 	if (count < softs->container_count) {
2715 		struct aac_container *dvp;
2716 
2717 		for (dvp = &softs->containers[count];
2718 		    dvp < &softs->containers[softs->container_count]; dvp++) {
2719 			if (dvp->valid == 0)
2720 				continue;
2721 			AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted",
2722 			    dvp->cid);
2723 			dvp->valid = 0;
2724 		}
2725 	}
2726 	softs->container_count = count;
2727 	AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total);
2728 	return (AACOK);
2729 }
2730 
2731 static int
2732 aac_alloc_comm_space(struct aac_softstate *softs)
2733 {
2734 	size_t rlen;
2735 	ddi_dma_cookie_t cookie;
2736 	uint_t cookien;
2737 
2738 	/* Allocate DMA for comm. space */
2739 	if (ddi_dma_alloc_handle(
2740 	    softs->devinfo_p,
2741 	    &softs->addr_dma_attr,
2742 	    DDI_DMA_SLEEP,
2743 	    NULL,
2744 	    &softs->comm_space_dma_handle) != DDI_SUCCESS) {
2745 		AACDB_PRINT(softs, CE_WARN,
2746 		    "Cannot alloc dma handle for communication area");
2747 		goto error;
2748 	}
2749 	if (ddi_dma_mem_alloc(
2750 	    softs->comm_space_dma_handle,
2751 	    sizeof (struct aac_comm_space),
2752 	    &aac_acc_attr,
2753 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2754 	    DDI_DMA_SLEEP,
2755 	    NULL,
2756 	    (caddr_t *)&softs->comm_space,
2757 	    &rlen,
2758 	    &softs->comm_space_acc_handle) != DDI_SUCCESS) {
2759 		AACDB_PRINT(softs, CE_WARN,
2760 		    "Cannot alloc mem for communication area");
2761 		goto error;
2762 	}
2763 	if (ddi_dma_addr_bind_handle(
2764 	    softs->comm_space_dma_handle,
2765 	    NULL,
2766 	    (caddr_t)softs->comm_space,
2767 	    sizeof (struct aac_comm_space),
2768 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2769 	    DDI_DMA_SLEEP,
2770 	    NULL,
2771 	    &cookie,
2772 	    &cookien) != DDI_DMA_MAPPED) {
2773 		AACDB_PRINT(softs, CE_WARN,
2774 		    "DMA bind failed for communication area");
2775 		goto error;
2776 	}
2777 	softs->comm_space_phyaddr = cookie.dmac_address;
2778 
2779 	/* Setup sync FIB space */
2780 	softs->sync_slot.fibp = &softs->comm_space->sync_fib;
2781 	softs->sync_slot.fib_phyaddr = softs->comm_space_phyaddr + \
2782 	    offsetof(struct aac_comm_space, sync_fib);
2783 	softs->sync_slot.fib_acc_handle = softs->comm_space_acc_handle;
2784 	softs->sync_slot.fib_dma_handle = softs->comm_space_dma_handle;
2785 
2786 	return (AACOK);
2787 error:
2788 	if (softs->comm_space_acc_handle) {
2789 		ddi_dma_mem_free(&softs->comm_space_acc_handle);
2790 		softs->comm_space_acc_handle = NULL;
2791 	}
2792 	if (softs->comm_space_dma_handle) {
2793 		ddi_dma_free_handle(&softs->comm_space_dma_handle);
2794 		softs->comm_space_dma_handle = NULL;
2795 	}
2796 	return (AACERR);
2797 }
2798 
2799 static void
2800 aac_free_comm_space(struct aac_softstate *softs)
2801 {
2802 	softs->sync_slot.fibp = NULL;
2803 	softs->sync_slot.fib_phyaddr = NULL;
2804 	softs->sync_slot.fib_acc_handle = NULL;
2805 	softs->sync_slot.fib_dma_handle = NULL;
2806 
2807 	(void) ddi_dma_unbind_handle(softs->comm_space_dma_handle);
2808 	ddi_dma_mem_free(&softs->comm_space_acc_handle);
2809 	softs->comm_space_acc_handle = NULL;
2810 	ddi_dma_free_handle(&softs->comm_space_dma_handle);
2811 	softs->comm_space_dma_handle = NULL;
2812 	softs->comm_space_phyaddr = NULL;
2813 }
2814 
2815 /*
2816  * Initialize the data structures that are required for the communication
2817  * interface to operate
2818  */
2819 static int
2820 aac_setup_comm_space(struct aac_softstate *softs)
2821 {
2822 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
2823 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
2824 	uint32_t comm_space_phyaddr;
2825 	struct aac_adapter_init *initp;
2826 	int qoffset;
2827 
2828 	comm_space_phyaddr = softs->comm_space_phyaddr;
2829 
2830 	/* Setup adapter init struct */
2831 	initp = &softs->comm_space->init_data;
2832 	bzero(initp, sizeof (struct aac_adapter_init));
2833 
2834 	ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION);
2835 	ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time());
2836 
2837 	/* Setup new/old comm. specific data */
2838 	if (softs->flags & AAC_FLAGS_RAW_IO) {
2839 		ddi_put32(acc, &initp->InitStructRevision,
2840 		    AAC_INIT_STRUCT_REVISION_4);
2841 		ddi_put32(acc, &initp->InitFlags,
2842 		    (softs->flags & AAC_FLAGS_NEW_COMM) ?
2843 		    AAC_INIT_FLAGS_NEW_COMM_SUPPORTED : 0);
2844 		/* Setup the preferred settings */
2845 		ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs);
2846 		ddi_put32(acc, &initp->MaxIoSize,
2847 		    (softs->aac_max_sectors << 9));
2848 		ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size);
2849 	} else {
2850 		/*
2851 		 * Tells the adapter about the physical location of various
2852 		 * important shared data structures
2853 		 */
2854 		ddi_put32(acc, &initp->AdapterFibsPhysicalAddress,
2855 		    comm_space_phyaddr + \
2856 		    offsetof(struct aac_comm_space, adapter_fibs));
2857 		ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0);
2858 		ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE);
2859 		ddi_put32(acc, &initp->AdapterFibsSize,
2860 		    AAC_ADAPTER_FIBS * AAC_FIB_SIZE);
2861 		ddi_put32(acc, &initp->PrintfBufferAddress,
2862 		    comm_space_phyaddr + \
2863 		    offsetof(struct aac_comm_space, adapter_print_buf));
2864 		ddi_put32(acc, &initp->PrintfBufferSize,
2865 		    AAC_ADAPTER_PRINT_BUFSIZE);
2866 		ddi_put32(acc, &initp->MiniPortRevision,
2867 		    AAC_INIT_STRUCT_MINIPORT_REVISION);
2868 		ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN);
2869 
2870 		qoffset = (comm_space_phyaddr + \
2871 		    offsetof(struct aac_comm_space, qtable)) % \
2872 		    AAC_QUEUE_ALIGN;
2873 		if (qoffset)
2874 			qoffset = AAC_QUEUE_ALIGN - qoffset;
2875 		softs->qtablep = (struct aac_queue_table *) \
2876 		    ((char *)&softs->comm_space->qtable + qoffset);
2877 		ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \
2878 		    offsetof(struct aac_comm_space, qtable) + qoffset);
2879 
2880 		/* Init queue table */
2881 		ddi_put32(acc, &softs->qtablep-> \
2882 		    qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX],
2883 		    AAC_HOST_NORM_CMD_ENTRIES);
2884 		ddi_put32(acc, &softs->qtablep-> \
2885 		    qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX],
2886 		    AAC_HOST_NORM_CMD_ENTRIES);
2887 		ddi_put32(acc, &softs->qtablep-> \
2888 		    qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
2889 		    AAC_HOST_HIGH_CMD_ENTRIES);
2890 		ddi_put32(acc, &softs->qtablep-> \
2891 		    qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
2892 		    AAC_HOST_HIGH_CMD_ENTRIES);
2893 		ddi_put32(acc, &softs->qtablep-> \
2894 		    qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX],
2895 		    AAC_ADAP_NORM_CMD_ENTRIES);
2896 		ddi_put32(acc, &softs->qtablep-> \
2897 		    qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX],
2898 		    AAC_ADAP_NORM_CMD_ENTRIES);
2899 		ddi_put32(acc, &softs->qtablep-> \
2900 		    qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
2901 		    AAC_ADAP_HIGH_CMD_ENTRIES);
2902 		ddi_put32(acc, &softs->qtablep-> \
2903 		    qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
2904 		    AAC_ADAP_HIGH_CMD_ENTRIES);
2905 		ddi_put32(acc, &softs->qtablep-> \
2906 		    qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX],
2907 		    AAC_HOST_NORM_RESP_ENTRIES);
2908 		ddi_put32(acc, &softs->qtablep-> \
2909 		    qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX],
2910 		    AAC_HOST_NORM_RESP_ENTRIES);
2911 		ddi_put32(acc, &softs->qtablep-> \
2912 		    qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
2913 		    AAC_HOST_HIGH_RESP_ENTRIES);
2914 		ddi_put32(acc, &softs->qtablep-> \
2915 		    qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
2916 		    AAC_HOST_HIGH_RESP_ENTRIES);
2917 		ddi_put32(acc, &softs->qtablep-> \
2918 		    qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX],
2919 		    AAC_ADAP_NORM_RESP_ENTRIES);
2920 		ddi_put32(acc, &softs->qtablep-> \
2921 		    qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX],
2922 		    AAC_ADAP_NORM_RESP_ENTRIES);
2923 		ddi_put32(acc, &softs->qtablep-> \
2924 		    qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
2925 		    AAC_ADAP_HIGH_RESP_ENTRIES);
2926 		ddi_put32(acc, &softs->qtablep-> \
2927 		    qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
2928 		    AAC_ADAP_HIGH_RESP_ENTRIES);
2929 
2930 		/* Init queue entries */
2931 		softs->qentries[AAC_HOST_NORM_CMD_Q] =
2932 		    &softs->qtablep->qt_HostNormCmdQueue[0];
2933 		softs->qentries[AAC_HOST_HIGH_CMD_Q] =
2934 		    &softs->qtablep->qt_HostHighCmdQueue[0];
2935 		softs->qentries[AAC_ADAP_NORM_CMD_Q] =
2936 		    &softs->qtablep->qt_AdapNormCmdQueue[0];
2937 		softs->qentries[AAC_ADAP_HIGH_CMD_Q] =
2938 		    &softs->qtablep->qt_AdapHighCmdQueue[0];
2939 		softs->qentries[AAC_HOST_NORM_RESP_Q] =
2940 		    &softs->qtablep->qt_HostNormRespQueue[0];
2941 		softs->qentries[AAC_HOST_HIGH_RESP_Q] =
2942 		    &softs->qtablep->qt_HostHighRespQueue[0];
2943 		softs->qentries[AAC_ADAP_NORM_RESP_Q] =
2944 		    &softs->qtablep->qt_AdapNormRespQueue[0];
2945 		softs->qentries[AAC_ADAP_HIGH_RESP_Q] =
2946 		    &softs->qtablep->qt_AdapHighRespQueue[0];
2947 	}
2948 	(void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV);
2949 
2950 	/* Send init structure to the card */
2951 	if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT,
2952 	    comm_space_phyaddr + \
2953 	    offsetof(struct aac_comm_space, init_data),
2954 	    0, 0, 0, NULL) == AACERR) {
2955 		AACDB_PRINT(softs, CE_WARN,
2956 		    "Cannot send init structure to adapter");
2957 		return (AACERR);
2958 	}
2959 
2960 	return (AACOK);
2961 }
2962 
2963 static uchar_t *
2964 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf)
2965 {
2966 	(void) memset(buf, ' ', AAC_VENDOR_LEN);
2967 	bcopy(softs->vendor_name, buf, strlen(softs->vendor_name));
2968 	return (buf + AAC_VENDOR_LEN);
2969 }
2970 
2971 static uchar_t *
2972 aac_product_id(struct aac_softstate *softs, uchar_t *buf)
2973 {
2974 	(void) memset(buf, ' ', AAC_PRODUCT_LEN);
2975 	bcopy(softs->product_name, buf, strlen(softs->product_name));
2976 	return (buf + AAC_PRODUCT_LEN);
2977 }
2978 
2979 /*
2980  * Construct unit serial number from container uid
2981  */
2982 static uchar_t *
2983 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf)
2984 {
2985 	int i, d;
2986 	uint32_t uid = softs->containers[tgt].uid;
2987 
2988 	for (i = 7; i >= 0; i--) {
2989 		d = uid & 0xf;
2990 		buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d;
2991 		uid >>= 4;
2992 	}
2993 	return (buf + 8);
2994 }
2995 
2996 /*
2997  * SPC-3 7.5 INQUIRY command implementation
2998  */
2999 static void
3000 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt,
3001     union scsi_cdb *cdbp, struct buf *bp)
3002 {
3003 	int tgt = pkt->pkt_address.a_target;
3004 	char *b_addr = NULL;
3005 	uchar_t page = cdbp->cdb_opaque[2];
3006 
3007 	if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) {
3008 		/* Command Support Data is not supported */
3009 		aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0);
3010 		return;
3011 	}
3012 
3013 	if (bp && bp->b_un.b_addr && bp->b_bcount) {
3014 		if (bp->b_flags & (B_PHYS | B_PAGEIO))
3015 			bp_mapin(bp);
3016 		b_addr = bp->b_un.b_addr;
3017 	}
3018 
3019 	if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) {
3020 		uchar_t *vpdp = (uchar_t *)b_addr;
3021 		uchar_t *idp, *sp;
3022 
3023 		/* SPC-3 8.4 Vital product data parameters */
3024 		switch (page) {
3025 		case 0x00:
3026 			/* Supported VPD pages */
3027 			if (vpdp == NULL ||
3028 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 3))
3029 				return;
3030 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3031 			vpdp[AAC_VPD_PAGE_CODE] = 0x00;
3032 			vpdp[AAC_VPD_PAGE_LENGTH] = 3;
3033 
3034 			vpdp[AAC_VPD_PAGE_DATA] = 0x00;
3035 			vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80;
3036 			vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83;
3037 
3038 			pkt->pkt_state |= STATE_XFERRED_DATA;
3039 			break;
3040 
3041 		case 0x80:
3042 			/* Unit serial number page */
3043 			if (vpdp == NULL ||
3044 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 8))
3045 				return;
3046 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3047 			vpdp[AAC_VPD_PAGE_CODE] = 0x80;
3048 			vpdp[AAC_VPD_PAGE_LENGTH] = 8;
3049 
3050 			sp = &vpdp[AAC_VPD_PAGE_DATA];
3051 			(void) aac_lun_serialno(softs, tgt, sp);
3052 
3053 			pkt->pkt_state |= STATE_XFERRED_DATA;
3054 			break;
3055 
3056 		case 0x83:
3057 			/* Device identification page */
3058 			if (vpdp == NULL ||
3059 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 32))
3060 				return;
3061 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3062 			vpdp[AAC_VPD_PAGE_CODE] = 0x83;
3063 
3064 			idp = &vpdp[AAC_VPD_PAGE_DATA];
3065 			bzero(idp, AAC_VPD_ID_LENGTH);
3066 			idp[AAC_VPD_ID_CODESET] = 0x02;
3067 			idp[AAC_VPD_ID_TYPE] = 0x01;
3068 
3069 			/*
3070 			 * SPC-3 Table 111 - Identifier type
3071 			 * One recommanded method of constructing the remainder
3072 			 * of identifier field is to concatenate the product
3073 			 * identification field from the standard INQUIRY data
3074 			 * field and the product serial number field from the
3075 			 * unit serial number page.
3076 			 */
3077 			sp = &idp[AAC_VPD_ID_DATA];
3078 			sp = aac_vendor_id(softs, sp);
3079 			sp = aac_product_id(softs, sp);
3080 			sp = aac_lun_serialno(softs, tgt, sp);
3081 			idp[AAC_VPD_ID_LENGTH] = sp - &idp[AAC_VPD_ID_DATA];
3082 
3083 			vpdp[AAC_VPD_PAGE_LENGTH] =
3084 			    sp - &vpdp[AAC_VPD_PAGE_DATA];
3085 			pkt->pkt_state |= STATE_XFERRED_DATA;
3086 			break;
3087 
3088 		default:
3089 			aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3090 			    0x24, 0x00, 0);
3091 			break;
3092 		}
3093 	} else {
3094 		struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr;
3095 		size_t len = sizeof (struct scsi_inquiry);
3096 
3097 		if (page != 0) {
3098 			aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3099 			    0x24, 0x00, 0);
3100 			return;
3101 		}
3102 		if (inqp == NULL || bp->b_bcount < len)
3103 			return;
3104 
3105 		bzero(inqp, len);
3106 		inqp->inq_len = AAC_ADDITIONAL_LEN;
3107 		inqp->inq_ansi = AAC_ANSI_VER;
3108 		inqp->inq_rdf = AAC_RESP_DATA_FORMAT;
3109 		(void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid);
3110 		(void) aac_product_id(softs, (uchar_t *)inqp->inq_pid);
3111 		bcopy("V1.0", inqp->inq_revision, 4);
3112 		inqp->inq_cmdque = 1; /* enable tagged-queuing */
3113 		/*
3114 		 * For "sd-max-xfer-size" property which may impact performance
3115 		 * when IO threads increase.
3116 		 */
3117 		inqp->inq_wbus32 = 1;
3118 
3119 		pkt->pkt_state |= STATE_XFERRED_DATA;
3120 	}
3121 }
3122 
3123 /*
3124  * SPC-3 7.10 MODE SENSE command implementation
3125  */
3126 static void
3127 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt,
3128     union scsi_cdb *cdbp, struct buf *bp, int capacity)
3129 {
3130 	uchar_t pagecode;
3131 	struct mode_header *headerp;
3132 	struct mode_header_g1 *g1_headerp;
3133 	unsigned int ncyl;
3134 	caddr_t sense_data;
3135 	caddr_t next_page;
3136 	size_t sdata_size;
3137 	size_t pages_size;
3138 	int unsupport_page = 0;
3139 
3140 	ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE ||
3141 	    cdbp->scc_cmd == SCMD_MODE_SENSE_G1);
3142 
3143 	if (!(bp && bp->b_un.b_addr && bp->b_bcount))
3144 		return;
3145 
3146 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
3147 		bp_mapin(bp);
3148 	pkt->pkt_state |= STATE_XFERRED_DATA;
3149 	pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F;
3150 
3151 	/* calculate the size of needed buffer */
3152 	if (cdbp->scc_cmd == SCMD_MODE_SENSE)
3153 		sdata_size = MODE_HEADER_LENGTH;
3154 	else /* must be SCMD_MODE_SENSE_G1 */
3155 		sdata_size = MODE_HEADER_LENGTH_G1;
3156 
3157 	pages_size = 0;
3158 	switch (pagecode) {
3159 	case SD_MODE_SENSE_PAGE3_CODE:
3160 		pages_size += sizeof (struct mode_format);
3161 		break;
3162 
3163 	case SD_MODE_SENSE_PAGE4_CODE:
3164 		pages_size += sizeof (struct mode_geometry);
3165 		break;
3166 
3167 	case MODEPAGE_CTRL_MODE:
3168 		if (softs->flags & AAC_FLAGS_LBA_64BIT) {
3169 			pages_size += sizeof (struct mode_control_scsi3);
3170 		} else {
3171 			unsupport_page = 1;
3172 		}
3173 		break;
3174 
3175 	case MODEPAGE_ALLPAGES:
3176 		if (softs->flags & AAC_FLAGS_LBA_64BIT) {
3177 			pages_size += sizeof (struct mode_format) +
3178 			    sizeof (struct mode_geometry) +
3179 			    sizeof (struct mode_control_scsi3);
3180 		} else {
3181 			pages_size += sizeof (struct mode_format) +
3182 			    sizeof (struct mode_geometry);
3183 		}
3184 		break;
3185 
3186 	default:
3187 		/* unsupported pages */
3188 		unsupport_page = 1;
3189 	}
3190 
3191 	/* allocate buffer to fill the send data */
3192 	sdata_size += pages_size;
3193 	sense_data = kmem_zalloc(sdata_size, KM_SLEEP);
3194 
3195 	if (cdbp->scc_cmd == SCMD_MODE_SENSE) {
3196 		headerp = (struct mode_header *)sense_data;
3197 		headerp->length = MODE_HEADER_LENGTH + pages_size -
3198 		    sizeof (headerp->length);
3199 		headerp->bdesc_length = 0;
3200 		next_page = sense_data + sizeof (struct mode_header);
3201 	} else {
3202 		g1_headerp = (struct mode_header_g1 *)sense_data;
3203 		headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size -
3204 		    sizeof (headerp->length));
3205 		g1_headerp->bdesc_length = 0;
3206 		next_page = sense_data + sizeof (struct mode_header_g1);
3207 	}
3208 
3209 	if (unsupport_page)
3210 		goto finish;
3211 
3212 	if (pagecode == SD_MODE_SENSE_PAGE3_CODE ||
3213 	    pagecode == MODEPAGE_ALLPAGES) {
3214 		/* SBC-3 7.1.3.3 Format device page */
3215 		struct mode_format *page3p;
3216 
3217 		page3p = (struct mode_format *)next_page;
3218 		page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE;
3219 		page3p->mode_page.length = sizeof (struct mode_format);
3220 		page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE);
3221 		page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK);
3222 
3223 		next_page += sizeof (struct mode_format);
3224 	}
3225 
3226 	if (pagecode == SD_MODE_SENSE_PAGE4_CODE ||
3227 	    pagecode == MODEPAGE_ALLPAGES) {
3228 		/* SBC-3 7.1.3.8 Rigid disk device geometry page */
3229 		struct mode_geometry *page4p;
3230 
3231 		page4p = (struct mode_geometry *)next_page;
3232 		page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE;
3233 		page4p->mode_page.length = sizeof (struct mode_geometry);
3234 		page4p->heads = AAC_NUMBER_OF_HEADS;
3235 		page4p->rpm = BE_16(AAC_ROTATION_SPEED);
3236 		ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK);
3237 		page4p->cyl_lb = ncyl & 0xff;
3238 		page4p->cyl_mb = (ncyl >> 8) & 0xff;
3239 		page4p->cyl_ub = (ncyl >> 16) & 0xff;
3240 
3241 		next_page += sizeof (struct mode_geometry);
3242 	}
3243 
3244 	if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) &&
3245 	    softs->flags & AAC_FLAGS_LBA_64BIT) {
3246 		/* 64-bit LBA need large sense data */
3247 		struct mode_control_scsi3 *mctl;
3248 
3249 		mctl = (struct mode_control_scsi3 *)next_page;
3250 		mctl->mode_page.code = MODEPAGE_CTRL_MODE;
3251 		mctl->mode_page.length =
3252 		    sizeof (struct mode_control_scsi3) -
3253 		    sizeof (struct mode_page);
3254 		mctl->d_sense = 1;
3255 	}
3256 
3257 finish:
3258 	/* copyout the valid data. */
3259 	bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount));
3260 	kmem_free(sense_data, sdata_size);
3261 }
3262 
3263 /*ARGSUSED*/
3264 static int
3265 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3266     scsi_hba_tran_t *tran, struct scsi_device *sd)
3267 {
3268 	struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
3269 #if defined(DEBUG) || defined(__lock_lint)
3270 	int ctl = ddi_get_instance(softs->devinfo_p);
3271 #endif
3272 	int tgt = sd->sd_address.a_target;
3273 	int lun = sd->sd_address.a_lun;
3274 	struct aac_container *dvp;
3275 
3276 	DBCALLED(softs, 2);
3277 
3278 	if ((0 > tgt) || (tgt >= AAC_MAX_LD)) {
3279 		AACDB_PRINT(softs, CE_NOTE,
3280 		    "aac_tran_tgt_init: c%t%dL%d out", ctl, tgt, lun);
3281 		return (DDI_FAILURE);
3282 	}
3283 
3284 	/*
3285 	 * Only support container that has been detected and valid
3286 	 */
3287 	mutex_enter(&softs->io_lock);
3288 	dvp = &softs->containers[tgt];
3289 	if (dvp->valid && lun == 0) {
3290 		AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%t%dL%d ok",
3291 		    ctl, tgt, lun);
3292 		mutex_exit(&softs->io_lock);
3293 		return (DDI_SUCCESS);
3294 	} else {
3295 		AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%t%dL%d",
3296 		    ctl, tgt, lun);
3297 		mutex_exit(&softs->io_lock);
3298 		return (DDI_FAILURE);
3299 	}
3300 }
3301 
3302 /*
3303  * Check if the firmware is Up And Running. If it is in the Kernel Panic
3304  * state, (BlinkLED code + 1) is returned.
3305  *    0 -- firmware up and running
3306  *   -1 -- firmware dead
3307  *   >0 -- firmware kernel panic
3308  */
3309 static int
3310 aac_check_adapter_health(struct aac_softstate *softs)
3311 {
3312 	int rval;
3313 
3314 	rval = PCI_MEM_GET32(softs, AAC_OMR0);
3315 
3316 	if (rval & AAC_KERNEL_UP_AND_RUNNING) {
3317 		rval = 0;
3318 	} else if (rval & AAC_KERNEL_PANIC) {
3319 		cmn_err(CE_WARN, "firmware panic");
3320 		rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */
3321 	} else {
3322 		cmn_err(CE_WARN, "firmware dead");
3323 		rval = -1;
3324 	}
3325 	return (rval);
3326 }
3327 
3328 static void
3329 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp,
3330     uchar_t reason)
3331 {
3332 	acp->flags |= AAC_CMD_ABORT;
3333 
3334 	if (acp->pkt) {
3335 		/*
3336 		 * Each lun should generate a unit attention
3337 		 * condition when reset.
3338 		 * Phys. drives are treated as logical ones
3339 		 * during error recovery.
3340 		 */
3341 		if (softs->flags & AAC_STATE_RESET)
3342 			aac_set_arq_data_reset(softs, acp);
3343 
3344 		switch (reason) {
3345 		case CMD_TIMEOUT:
3346 			aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
3347 			    STAT_TIMEOUT | STAT_BUS_RESET);
3348 			break;
3349 		case CMD_RESET:
3350 			/* aac support only RESET_ALL */
3351 			aac_set_pkt_reason(softs, acp, CMD_RESET,
3352 			    STAT_BUS_RESET);
3353 			break;
3354 		case CMD_ABORTED:
3355 			aac_set_pkt_reason(softs, acp, CMD_ABORTED,
3356 			    STAT_ABORTED);
3357 			break;
3358 		}
3359 	}
3360 	aac_end_io(softs, acp);
3361 }
3362 
3363 /*
3364  * Abort all the pending commands of type iocmd or just the command pkt
3365  * corresponding to pkt
3366  */
3367 static void
3368 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt,
3369     int reason)
3370 {
3371 	struct aac_cmd *ac_arg, *acp;
3372 	int i;
3373 
3374 	if (pkt == NULL) {
3375 		ac_arg = NULL;
3376 	} else {
3377 		ac_arg = PKT2AC(pkt);
3378 		iocmd = (ac_arg->flags & AAC_CMD_SYNC) ?
3379 		    AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC;
3380 	}
3381 
3382 	/*
3383 	 * a) outstanding commands on the controller
3384 	 * Note: should abort outstanding commands only after one
3385 	 * IOP reset has been done.
3386 	 */
3387 	if (iocmd & AAC_IOCMD_OUTSTANDING) {
3388 		struct aac_cmd *acp;
3389 
3390 		for (i = 0; i < AAC_MAX_LD; i++) {
3391 			if (softs->containers[i].valid)
3392 				softs->containers[i].reset = 1;
3393 		}
3394 		while ((acp = softs->q_busy.q_head) != NULL)
3395 			aac_abort_iocmd(softs, acp, reason);
3396 	}
3397 
3398 	/* b) commands in the waiting queues */
3399 	for (i = 0; i < AAC_CMDQ_NUM; i++) {
3400 		if (iocmd & (1 << i)) {
3401 			if (ac_arg) {
3402 				aac_abort_iocmd(softs, ac_arg, reason);
3403 			} else {
3404 				while ((acp = softs->q_wait[i].q_head) != NULL)
3405 					aac_abort_iocmd(softs, acp, reason);
3406 			}
3407 		}
3408 	}
3409 }
3410 
3411 /*
3412  * The draining thread is shared among quiesce threads. It terminates
3413  * when the adapter is quiesced or stopped by aac_stop_drain().
3414  */
3415 static void
3416 aac_check_drain(void *arg)
3417 {
3418 	struct aac_softstate *softs = arg;
3419 
3420 	mutex_enter(&softs->io_lock);
3421 	if (softs->ndrains) {
3422 		/*
3423 		 * If both ASYNC and SYNC bus throttle are held,
3424 		 * wake up threads only when both are drained out.
3425 		 */
3426 		if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 ||
3427 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) &&
3428 		    (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 ||
3429 		    softs->bus_ncmds[AAC_CMDQ_SYNC] == 0))
3430 			cv_broadcast(&softs->drain_cv);
3431 		else
3432 			softs->drain_timeid = timeout(aac_check_drain, softs,
3433 			    AAC_QUIESCE_TICK * drv_usectohz(1000000));
3434 	}
3435 	mutex_exit(&softs->io_lock);
3436 }
3437 
3438 /*
3439  * If not draining the outstanding cmds, drain them. Otherwise,
3440  * only update ndrains.
3441  */
3442 static void
3443 aac_start_drain(struct aac_softstate *softs)
3444 {
3445 	if (softs->ndrains == 0) {
3446 		softs->drain_timeid = timeout(aac_check_drain, softs,
3447 		    AAC_QUIESCE_TICK * drv_usectohz(1000000));
3448 	}
3449 	softs->ndrains++;
3450 }
3451 
3452 /*
3453  * Stop the draining thread when no other threads use it any longer.
3454  * Side effect: io_lock may be released in the middle.
3455  */
3456 static void
3457 aac_stop_drain(struct aac_softstate *softs)
3458 {
3459 	softs->ndrains--;
3460 	if (softs->ndrains == 0) {
3461 		if (softs->drain_timeid != 0) {
3462 			timeout_id_t tid = softs->drain_timeid;
3463 
3464 			softs->drain_timeid = 0;
3465 			mutex_exit(&softs->io_lock);
3466 			(void) untimeout(tid);
3467 			mutex_enter(&softs->io_lock);
3468 		}
3469 	}
3470 }
3471 
3472 /*
3473  * The following function comes from Adaptec:
3474  *
3475  * Once do an IOP reset, basically the driver have to re-initialize the card
3476  * as if up from a cold boot, and the driver is responsible for any IO that
3477  * is outstanding to the adapter at the time of the IOP RESET. And prepare
3478  * for IOP RESET by making the init code modular with the ability to call it
3479  * from multiple places.
3480  */
3481 static int
3482 aac_reset_adapter(struct aac_softstate *softs)
3483 {
3484 	int health;
3485 	uint32_t status;
3486 	int rval = AACERR;
3487 
3488 	DBCALLED(softs, 1);
3489 
3490 	ASSERT(softs->state & AAC_STATE_RESET);
3491 
3492 	ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0);
3493 	/* Disable interrupt */
3494 	AAC_DISABLE_INTR(softs);
3495 
3496 	health = aac_check_adapter_health(softs);
3497 	if (health == -1) {
3498 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
3499 		goto finish;
3500 	}
3501 	if (health == 0) /* flush drives if possible */
3502 		(void) aac_shutdown(softs);
3503 
3504 	/* Execute IOP reset */
3505 	if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0,
3506 	    &status)) != AACOK) {
3507 		ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3508 		struct aac_fib *fibp;
3509 		struct aac_pause_command *pc;
3510 
3511 		if ((status & 0xf) == 0xf) {
3512 			uint32_t wait_count;
3513 
3514 			/*
3515 			 * Sunrise Lake has dual cores and we must drag the
3516 			 * other core with us to reset simultaneously. There
3517 			 * are 2 bits in the Inbound Reset Control and Status
3518 			 * Register (offset 0x38) of the Sunrise Lake to reset
3519 			 * the chip without clearing out the PCI configuration
3520 			 * info (COMMAND & BARS).
3521 			 */
3522 			PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3523 
3524 			/*
3525 			 * We need to wait for 5 seconds before accessing the MU
3526 			 * again 10000 * 100us = 1000,000us = 1000ms = 1s
3527 			 */
3528 			wait_count = 5 * 10000;
3529 			while (wait_count) {
3530 				drv_usecwait(100); /* delay 100 microseconds */
3531 				wait_count--;
3532 			}
3533 		} else {
3534 			if (status == SRB_STATUS_INVALID_REQUEST)
3535 				cmn_err(CE_WARN, "!IOP_RESET not supported");
3536 			else /* probably timeout */
3537 				cmn_err(CE_WARN, "!IOP_RESET failed");
3538 
3539 			/* Unwind aac_shutdown() */
3540 			fibp = softs->sync_slot.fibp;
3541 			pc = (struct aac_pause_command *)&fibp->data[0];
3542 
3543 			bzero(pc, sizeof (*pc));
3544 			ddi_put32(acc, &pc->Command, VM_ContainerConfig);
3545 			ddi_put32(acc, &pc->Type, CT_PAUSE_IO);
3546 			ddi_put32(acc, &pc->Timeout, 1);
3547 			ddi_put32(acc, &pc->Min, 1);
3548 			ddi_put32(acc, &pc->NoRescan, 1);
3549 
3550 			(void) aac_sync_fib(softs, ContainerCommand,
3551 			    AAC_FIB_SIZEOF(struct aac_pause_command));
3552 
3553 			ddi_fm_service_impact(softs->devinfo_p,
3554 			    DDI_SERVICE_LOST);
3555 			goto finish;
3556 		}
3557 	}
3558 
3559 	/*
3560 	 * Re-read and renegotiate the FIB parameters, as one of the actions
3561 	 * that can result from an IOP reset is the running of a new firmware
3562 	 * image.
3563 	 */
3564 	if (aac_common_attach(softs) != AACOK)
3565 		goto finish;
3566 
3567 	rval = AACOK;
3568 
3569 finish:
3570 	AAC_ENABLE_INTR(softs);
3571 	return (rval);
3572 }
3573 
3574 static void
3575 aac_set_throttle(struct aac_softstate *softs, struct aac_container *dvp, int q,
3576     int throttle)
3577 {
3578 	/*
3579 	 * If the bus is draining/quiesced, no changes to the throttles
3580 	 * are allowed. All throttles should have been set to 0.
3581 	 */
3582 	if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains)
3583 		return;
3584 	dvp->throttle[q] = throttle;
3585 }
3586 
3587 static void
3588 aac_hold_bus(struct aac_softstate *softs, int iocmds)
3589 {
3590 	int i, q;
3591 
3592 	/* Hold bus by holding every device on the bus */
3593 	for (q = 0; q < AAC_CMDQ_NUM; q++) {
3594 		if (iocmds & (1 << q)) {
3595 			softs->bus_throttle[q] = 0;
3596 			for (i = 0; i < AAC_MAX_LD; i++)
3597 				aac_set_throttle(softs, &softs->containers[i],
3598 				    q, 0);
3599 		}
3600 	}
3601 }
3602 
3603 static void
3604 aac_unhold_bus(struct aac_softstate *softs, int iocmds)
3605 {
3606 	int i, q;
3607 
3608 	for (q = 0; q < AAC_CMDQ_NUM; q++) {
3609 		if (iocmds & (1 << q)) {
3610 			/*
3611 			 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been
3612 			 * quiesced or being drained by possibly some quiesce
3613 			 * threads.
3614 			 */
3615 			if (q == AAC_CMDQ_ASYNC && ((softs->state &
3616 			    AAC_STATE_QUIESCED) || softs->ndrains))
3617 				continue;
3618 			softs->bus_throttle[q] = softs->total_slots;
3619 			for (i = 0; i < AAC_MAX_LD; i++)
3620 				aac_set_throttle(softs, &softs->containers[i],
3621 				    q, softs->total_slots);
3622 		}
3623 	}
3624 }
3625 
3626 static int
3627 aac_do_reset(struct aac_softstate *softs)
3628 {
3629 	int health;
3630 	int rval;
3631 
3632 	softs->state |= AAC_STATE_RESET;
3633 	health = aac_check_adapter_health(softs);
3634 
3635 	/*
3636 	 * Hold off new io commands and wait all outstanding io
3637 	 * commands to complete.
3638 	 */
3639 	if (health == 0 && (softs->bus_ncmds[AAC_CMDQ_SYNC] ||
3640 	    softs->bus_ncmds[AAC_CMDQ_ASYNC])) {
3641 		/*
3642 		 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds
3643 		 * to complete the outstanding io commands
3644 		 */
3645 		int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10;
3646 		int (*intr_handler)(struct aac_softstate *);
3647 
3648 		aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
3649 		/*
3650 		 * Poll the adapter by ourselves in case interrupt is disabled
3651 		 * and to avoid releasing the io_lock.
3652 		 */
3653 		intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
3654 		    aac_process_intr_new : aac_process_intr_old;
3655 		while ((softs->bus_ncmds[AAC_CMDQ_SYNC] ||
3656 		    softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) {
3657 			drv_usecwait(100);
3658 			(void) intr_handler(softs);
3659 			timeout--;
3660 		}
3661 		aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
3662 	}
3663 
3664 	/*
3665 	 * If a longer waiting time still can't drain all outstanding io
3666 	 * commands, do IOP reset.
3667 	 */
3668 	if (softs->bus_ncmds[AAC_CMDQ_SYNC] ||
3669 	    softs->bus_ncmds[AAC_CMDQ_ASYNC]) {
3670 		if ((rval = aac_reset_adapter(softs)) != AACOK)
3671 			softs->state |= AAC_STATE_DEAD;
3672 	} else {
3673 		rval = AACOK;
3674 	}
3675 
3676 	softs->state &= ~AAC_STATE_RESET;
3677 	return (rval);
3678 }
3679 
3680 static int
3681 aac_tran_reset(struct scsi_address *ap, int level)
3682 {
3683 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
3684 	int rval;
3685 
3686 	DBCALLED(softs, 1);
3687 
3688 	if (level != RESET_ALL) {
3689 		cmn_err(CE_NOTE, "!reset target/lun not supported");
3690 		return (0);
3691 	}
3692 
3693 	mutex_enter(&softs->io_lock);
3694 	rval = (aac_do_reset(softs) == AACOK) ? 1 : 0;
3695 	if (rval == 1 && !ddi_in_panic()) {
3696 		aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC,
3697 		    NULL, CMD_RESET);
3698 		aac_start_waiting_io(softs);
3699 	} else {
3700 		/* Abort IOCTL cmds when system panic or adapter dead */
3701 		aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET);
3702 	}
3703 	mutex_exit(&softs->io_lock);
3704 
3705 	aac_drain_comp_q(softs);
3706 	return (rval);
3707 }
3708 
3709 static int
3710 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
3711 {
3712 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
3713 
3714 	DBCALLED(softs, 1);
3715 
3716 	mutex_enter(&softs->io_lock);
3717 	aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED);
3718 	mutex_exit(&softs->io_lock);
3719 
3720 	aac_drain_comp_q(softs);
3721 	return (1);
3722 }
3723 
3724 void
3725 aac_free_dmamap(struct aac_cmd *acp)
3726 {
3727 	/* Free dma mapping */
3728 	if (acp->flags & AAC_CMD_DMA_VALID) {
3729 		ASSERT(acp->buf_dma_handle);
3730 		(void) ddi_dma_unbind_handle(acp->buf_dma_handle);
3731 		acp->flags &= ~AAC_CMD_DMA_VALID;
3732 	}
3733 
3734 	if (acp->abp != NULL) { /* free non-aligned buf DMA */
3735 		ASSERT(acp->buf_dma_handle);
3736 		if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp)
3737 			ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr,
3738 			    (uint8_t *)acp->abp, acp->bp->b_bcount,
3739 			    DDI_DEV_AUTOINCR);
3740 		ddi_dma_mem_free(&acp->abh);
3741 		acp->abp = NULL;
3742 	}
3743 
3744 	if (acp->buf_dma_handle) {
3745 		ddi_dma_free_handle(&acp->buf_dma_handle);
3746 		acp->buf_dma_handle = NULL;
3747 	}
3748 }
3749 
3750 static void
3751 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
3752 {
3753 	AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported",
3754 	    ((union scsi_cdb *)acp->pkt->pkt_cdbp)->scc_cmd);
3755 	aac_free_dmamap(acp);
3756 	aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0);
3757 	aac_soft_callback(softs, acp);
3758 }
3759 
3760 /*
3761  * Handle command to logical device
3762  */
3763 static int
3764 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp)
3765 {
3766 	struct aac_container *dvp;
3767 	struct scsi_pkt *pkt;
3768 	union scsi_cdb *cdbp;
3769 	struct buf *bp;
3770 	int rval;
3771 
3772 	dvp = acp->dvp;
3773 	pkt = acp->pkt;
3774 	cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
3775 	bp = acp->bp;
3776 
3777 	switch (cdbp->scc_cmd) {
3778 	case SCMD_INQUIRY: /* inquiry */
3779 		aac_free_dmamap(acp);
3780 		aac_inquiry(softs, pkt, cdbp, bp);
3781 		aac_soft_callback(softs, acp);
3782 		rval = TRAN_ACCEPT;
3783 		break;
3784 
3785 	case SCMD_READ_CAPACITY: /* read capacity */
3786 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
3787 			struct scsi_capacity cap;
3788 			uint64_t last_lba;
3789 
3790 			/* check 64-bit LBA */
3791 			last_lba = dvp->size - 1;
3792 			if (last_lba > 0xffffffffull) {
3793 				cap.capacity = 0xfffffffful;
3794 			} else {
3795 				cap.capacity = BE_32(last_lba);
3796 			}
3797 			cap.lbasize = BE_32(AAC_SECTOR_SIZE);
3798 
3799 			aac_free_dmamap(acp);
3800 			if (bp->b_flags & (B_PHYS|B_PAGEIO))
3801 				bp_mapin(bp);
3802 			bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8));
3803 			pkt->pkt_state |= STATE_XFERRED_DATA;
3804 		}
3805 		aac_soft_callback(softs, acp);
3806 		rval = TRAN_ACCEPT;
3807 		break;
3808 
3809 	case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */
3810 		/* Check if containers need 64-bit LBA support */
3811 		if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) {
3812 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
3813 				struct scsi_capacity_16 cap16;
3814 				int cap_len = sizeof (struct scsi_capacity_16);
3815 
3816 				bzero(&cap16, cap_len);
3817 				cap16.sc_capacity = BE_64(dvp->size);
3818 				cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE);
3819 
3820 				aac_free_dmamap(acp);
3821 				if (bp->b_flags & (B_PHYS | B_PAGEIO))
3822 					bp_mapin(bp);
3823 				bcopy(&cap16, bp->b_un.b_addr,
3824 				    min(bp->b_bcount, cap_len));
3825 				pkt->pkt_state |= STATE_XFERRED_DATA;
3826 			}
3827 			aac_soft_callback(softs, acp);
3828 		} else {
3829 			aac_unknown_scmd(softs, acp);
3830 		}
3831 		rval = TRAN_ACCEPT;
3832 		break;
3833 
3834 	case SCMD_READ_G4: /* read_16 */
3835 	case SCMD_WRITE_G4: /* write_16 */
3836 		if (softs->flags & AAC_FLAGS_RAW_IO) {
3837 			/* NOTE: GETG4ADDRTL(cdbp) is int32_t */
3838 			acp->blkno = ((uint64_t) \
3839 			    GETG4ADDR(cdbp) << 32) | \
3840 			    (uint32_t)GETG4ADDRTL(cdbp);
3841 			goto do_io;
3842 		}
3843 		AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported");
3844 		aac_unknown_scmd(softs, acp);
3845 		rval = TRAN_ACCEPT;
3846 		break;
3847 
3848 	case SCMD_READ: /* read_6 */
3849 	case SCMD_WRITE: /* write_6 */
3850 		acp->blkno = GETG0ADDR(cdbp);
3851 		goto do_io;
3852 
3853 	case SCMD_READ_G1: /* read_10 */
3854 	case SCMD_WRITE_G1: /* write_10 */
3855 		acp->blkno = (uint32_t)GETG1ADDR(cdbp);
3856 do_io:
3857 		if (acp->flags & AAC_CMD_DMA_VALID) {
3858 			uint64_t cnt_size = dvp->size;
3859 
3860 			/*
3861 			 * If LBA > array size AND rawio, the
3862 			 * adapter may hang. So check it before
3863 			 * sending.
3864 			 * NOTE: (blkno + blkcnt) may overflow
3865 			 */
3866 			if ((acp->blkno < cnt_size) &&
3867 			    ((acp->blkno + acp->bcount /
3868 			    AAC_BLK_SIZE) <= cnt_size)) {
3869 				rval = aac_do_io(softs, acp);
3870 			} else {
3871 			/*
3872 			 * Request exceeds the capacity of disk,
3873 			 * set error block number to last LBA
3874 			 * + 1.
3875 			 */
3876 				aac_set_arq_data(pkt,
3877 				    KEY_ILLEGAL_REQUEST, 0x21,
3878 				    0x00, cnt_size);
3879 				aac_soft_callback(softs, acp);
3880 				rval = TRAN_ACCEPT;
3881 			}
3882 		} else if (acp->bcount == 0) {
3883 			/* For 0 length IO, just return ok */
3884 			aac_soft_callback(softs, acp);
3885 			rval = TRAN_ACCEPT;
3886 		} else {
3887 			rval = TRAN_BADPKT;
3888 		}
3889 		break;
3890 
3891 	case SCMD_MODE_SENSE: /* mode_sense_6 */
3892 	case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */
3893 		int capacity;
3894 
3895 		aac_free_dmamap(acp);
3896 		if (dvp->size > 0xffffffffull)
3897 			capacity = 0xfffffffful; /* 64-bit LBA */
3898 		else
3899 			capacity = dvp->size;
3900 		aac_mode_sense(softs, pkt, cdbp, bp, capacity);
3901 		aac_soft_callback(softs, acp);
3902 		rval = TRAN_ACCEPT;
3903 		break;
3904 	}
3905 
3906 	case SCMD_TEST_UNIT_READY:
3907 	case SCMD_REQUEST_SENSE:
3908 	case SCMD_FORMAT:
3909 	case SCMD_START_STOP:
3910 		aac_free_dmamap(acp);
3911 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
3912 			if (acp->flags & AAC_CMD_BUF_READ) {
3913 				if (bp->b_flags & (B_PHYS|B_PAGEIO))
3914 					bp_mapin(bp);
3915 				bzero(bp->b_un.b_addr, bp->b_bcount);
3916 			}
3917 			pkt->pkt_state |= STATE_XFERRED_DATA;
3918 		}
3919 		aac_soft_callback(softs, acp);
3920 		rval = TRAN_ACCEPT;
3921 		break;
3922 
3923 	case SCMD_SYNCHRONIZE_CACHE:
3924 		acp->flags |= AAC_CMD_NTAG;
3925 		acp->aac_cmd_fib = aac_cmd_fib_sync;
3926 		acp->ac_comp = aac_synccache_complete;
3927 		rval = aac_do_io(softs, acp);
3928 		break;
3929 
3930 	case SCMD_DOORLOCK:
3931 		aac_free_dmamap(acp);
3932 		dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0;
3933 		aac_soft_callback(softs, acp);
3934 		rval = TRAN_ACCEPT;
3935 		break;
3936 
3937 	default: /* unknown command */
3938 		aac_unknown_scmd(softs, acp);
3939 		rval = TRAN_ACCEPT;
3940 		break;
3941 	}
3942 
3943 	return (rval);
3944 }
3945 
3946 static int
3947 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3948 {
3949 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
3950 	struct aac_cmd *acp = PKT2AC(pkt);
3951 	struct aac_container *dvp = acp->dvp;
3952 	int rval;
3953 
3954 	DBCALLED(softs, 2);
3955 
3956 	/*
3957 	 * Reinitialize some fields of ac and pkt; the packet may
3958 	 * have been resubmitted
3959 	 */
3960 	acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \
3961 	    AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID;
3962 	acp->timeout = acp->pkt->pkt_time;
3963 	if (pkt->pkt_flags & FLAG_NOINTR)
3964 		acp->flags |= AAC_CMD_NO_INTR;
3965 	pkt->pkt_reason = CMD_CMPLT;
3966 	pkt->pkt_state = 0;
3967 	pkt->pkt_statistics = 0;
3968 	*pkt->pkt_scbp = 0; /* clear arq scsi_status */
3969 
3970 	if (acp->flags & AAC_CMD_DMA_VALID) {
3971 		pkt->pkt_resid = acp->bcount;
3972 		/* Consistent packets need to be sync'ed first */
3973 		if ((acp->flags & AAC_CMD_CONSISTENT) &&
3974 		    (acp->flags & AAC_CMD_BUF_WRITE))
3975 			if (aac_dma_sync_ac(acp) != AACOK) {
3976 				ddi_fm_service_impact(softs->devinfo_p,
3977 				    DDI_SERVICE_UNAFFECTED);
3978 				return (TRAN_BADPKT);
3979 			}
3980 	} else {
3981 		pkt->pkt_resid = 0;
3982 	}
3983 
3984 	mutex_enter(&softs->io_lock);
3985 	AACDB_PRINT_SCMD(softs, acp);
3986 	if (dvp->valid && ap->a_lun == 0 && !(softs->state & AAC_STATE_DEAD)) {
3987 		rval = aac_tran_start_ld(softs, acp);
3988 	} else {
3989 		AACDB_PRINT(softs, CE_WARN,
3990 		    "Cannot send cmd to target t%dL%d: %s",
3991 		    ap->a_target, ap->a_lun,
3992 		    (softs->state & AAC_STATE_DEAD) ?
3993 		    "adapter dead" : "target invalid");
3994 		rval = TRAN_FATAL_ERROR;
3995 	}
3996 	mutex_exit(&softs->io_lock);
3997 	return (rval);
3998 }
3999 
4000 static int
4001 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom)
4002 {
4003 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4004 	struct aac_container *dvp;
4005 	int rval;
4006 
4007 	DBCALLED(softs, 2);
4008 
4009 	/* We don't allow inquiring about capabilities for other targets */
4010 	if (cap == NULL || whom == 0) {
4011 		AACDB_PRINT(softs, CE_WARN,
4012 		    "GetCap> %s not supported: whom=%d", cap, whom);
4013 		return (-1);
4014 	}
4015 
4016 	mutex_enter(&softs->io_lock);
4017 	dvp = &softs->containers[ap->a_target];
4018 	if (!dvp->valid || (ap->a_lun != 0)) {
4019 		mutex_exit(&softs->io_lock);
4020 		AACDB_PRINT(softs, CE_WARN, "Bad target t%dL%d to getcap",
4021 		    ap->a_target, ap->a_lun);
4022 		return (-1);
4023 	}
4024 
4025 	switch (scsi_hba_lookup_capstr(cap)) {
4026 	case SCSI_CAP_ARQ: /* auto request sense */
4027 		rval = 1;
4028 		break;
4029 	case SCSI_CAP_UNTAGGED_QING:
4030 	case SCSI_CAP_TAGGED_QING:
4031 		rval = 1;
4032 		break;
4033 	case SCSI_CAP_DMA_MAX:
4034 		rval = softs->buf_dma_attr.dma_attr_maxxfer;
4035 		break;
4036 	default:
4037 		rval = -1;
4038 		break;
4039 	}
4040 	mutex_exit(&softs->io_lock);
4041 
4042 	AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d",
4043 	    cap, ap->a_target, ap->a_lun, rval);
4044 	return (rval);
4045 }
4046 
4047 /*ARGSUSED*/
4048 static int
4049 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4050 {
4051 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4052 	struct aac_container *dvp;
4053 	int rval;
4054 
4055 	DBCALLED(softs, 2);
4056 
4057 	/* We don't allow inquiring about capabilities for other targets */
4058 	if (cap == NULL || whom == 0) {
4059 		AACDB_PRINT(softs, CE_WARN,
4060 		    "SetCap> %s not supported: whom=%d", cap, whom);
4061 		return (-1);
4062 	}
4063 
4064 	mutex_enter(&softs->io_lock);
4065 	dvp = &softs->containers[ap->a_target];
4066 	if (!dvp->valid || (ap->a_lun != 0)) {
4067 		mutex_exit(&softs->io_lock);
4068 		AACDB_PRINT(softs, CE_WARN, "Bad target t%dL%d to setcap",
4069 		    ap->a_target, ap->a_lun);
4070 		return (-1);
4071 	}
4072 
4073 	switch (scsi_hba_lookup_capstr(cap)) {
4074 	case SCSI_CAP_ARQ:
4075 		/* Force auto request sense */
4076 		rval = (value == 1) ? 1 : 0;
4077 		break;
4078 	case SCSI_CAP_UNTAGGED_QING:
4079 	case SCSI_CAP_TAGGED_QING:
4080 		rval = (value == 1) ? 1 : 0;
4081 		break;
4082 	default:
4083 		rval = -1;
4084 		break;
4085 	}
4086 	mutex_exit(&softs->io_lock);
4087 
4088 	AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d",
4089 	    cap, ap->a_target, ap->a_lun, value, rval);
4090 	return (rval);
4091 }
4092 
4093 static void
4094 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4095 {
4096 	struct aac_cmd *acp = PKT2AC(pkt);
4097 
4098 	DBCALLED(NULL, 2);
4099 
4100 	if (acp->sgt) {
4101 		kmem_free(acp->sgt, sizeof (struct aac_sge) * \
4102 		    acp->left_cookien);
4103 	}
4104 	aac_free_dmamap(acp);
4105 	ASSERT(acp->slotp == NULL);
4106 	scsi_hba_pkt_free(ap, pkt);
4107 }
4108 
4109 int
4110 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp,
4111     struct buf *bp, int flags, int (*cb)(), caddr_t arg)
4112 {
4113 	int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
4114 	uint_t oldcookiec;
4115 	int bioerr;
4116 	int rval;
4117 
4118 	oldcookiec = acp->left_cookien;
4119 
4120 	/* Move window to build s/g map */
4121 	if (acp->total_nwin > 0) {
4122 		if (++acp->cur_win < acp->total_nwin) {
4123 			off_t off;
4124 			size_t len;
4125 
4126 			rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win,
4127 			    &off, &len, &acp->cookie, &acp->left_cookien);
4128 			if (rval == DDI_SUCCESS)
4129 				goto get_dma_cookies;
4130 			AACDB_PRINT(softs, CE_WARN,
4131 			    "ddi_dma_getwin() fail %d", rval);
4132 			return (NULL);
4133 		}
4134 		AACDB_PRINT(softs, CE_WARN, "Nothing to transfer");
4135 		return (NULL);
4136 	}
4137 
4138 	/* We need to transfer data, so we alloc DMA resources for this pkt */
4139 	if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) {
4140 		uint_t dma_flags = 0;
4141 		struct aac_sge *sge;
4142 
4143 		/*
4144 		 * We will still use this point to fake some
4145 		 * infomation in tran_start
4146 		 */
4147 		acp->bp = bp;
4148 
4149 		/* Set dma flags */
4150 		if (BUF_IS_READ(bp)) {
4151 			dma_flags |= DDI_DMA_READ;
4152 			acp->flags |= AAC_CMD_BUF_READ;
4153 		} else {
4154 			dma_flags |= DDI_DMA_WRITE;
4155 			acp->flags |= AAC_CMD_BUF_WRITE;
4156 		}
4157 		if (flags & PKT_CONSISTENT)
4158 			dma_flags |= DDI_DMA_CONSISTENT;
4159 		if (flags & PKT_DMA_PARTIAL)
4160 			dma_flags |= DDI_DMA_PARTIAL;
4161 
4162 		/* Alloc buf dma handle */
4163 		if (!acp->buf_dma_handle) {
4164 			rval = ddi_dma_alloc_handle(softs->devinfo_p,
4165 			    &softs->buf_dma_attr, cb, arg,
4166 			    &acp->buf_dma_handle);
4167 			if (rval != DDI_SUCCESS) {
4168 				AACDB_PRINT(softs, CE_WARN,
4169 				    "Can't allocate DMA handle, errno=%d",
4170 				    rval);
4171 				goto error_out;
4172 			}
4173 		}
4174 
4175 		/* Bind buf */
4176 		if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) {
4177 			rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle,
4178 			    bp, dma_flags, cb, arg, &acp->cookie,
4179 			    &acp->left_cookien);
4180 		} else {
4181 			size_t bufsz;
4182 
4183 			AACDB_PRINT_TRAN(softs,
4184 			    "non-aligned buffer: addr=0x%p, cnt=%lu",
4185 			    (void *)bp->b_un.b_addr, bp->b_bcount);
4186 			if (bp->b_flags & (B_PAGEIO|B_PHYS))
4187 				bp_mapin(bp);
4188 
4189 			rval = ddi_dma_mem_alloc(acp->buf_dma_handle,
4190 			    AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN),
4191 			    &aac_acc_attr, DDI_DMA_STREAMING,
4192 			    cb, arg, &acp->abp, &bufsz, &acp->abh);
4193 
4194 			if (rval != DDI_SUCCESS) {
4195 				AACDB_PRINT(softs, CE_NOTE,
4196 				    "Cannot alloc DMA to non-aligned buf");
4197 				bioerr = 0;
4198 				goto error_out;
4199 			}
4200 
4201 			if (acp->flags & AAC_CMD_BUF_WRITE)
4202 				ddi_rep_put8(acp->abh,
4203 				    (uint8_t *)bp->b_un.b_addr,
4204 				    (uint8_t *)acp->abp, bp->b_bcount,
4205 				    DDI_DEV_AUTOINCR);
4206 
4207 			rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle,
4208 			    NULL, acp->abp, bufsz, dma_flags, cb, arg,
4209 			    &acp->cookie, &acp->left_cookien);
4210 		}
4211 
4212 		switch (rval) {
4213 		case DDI_DMA_PARTIAL_MAP:
4214 			if (ddi_dma_numwin(acp->buf_dma_handle,
4215 			    &acp->total_nwin) == DDI_FAILURE) {
4216 				AACDB_PRINT(softs, CE_WARN,
4217 				    "Cannot get number of DMA windows");
4218 				bioerr = 0;
4219 				goto error_out;
4220 			}
4221 			AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
4222 			    acp->left_cookien);
4223 			acp->cur_win = 0;
4224 			break;
4225 
4226 		case DDI_DMA_MAPPED:
4227 			AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
4228 			    acp->left_cookien);
4229 			acp->cur_win = 0;
4230 			acp->total_nwin = 1;
4231 			break;
4232 
4233 		case DDI_DMA_NORESOURCES:
4234 			bioerr = 0;
4235 			AACDB_PRINT(softs, CE_WARN,
4236 			    "Cannot bind buf for DMA: DDI_DMA_NORESOURCES");
4237 			goto error_out;
4238 		case DDI_DMA_BADATTR:
4239 		case DDI_DMA_NOMAPPING:
4240 			bioerr = EFAULT;
4241 			AACDB_PRINT(softs, CE_WARN,
4242 			    "Cannot bind buf for DMA: DDI_DMA_NOMAPPING");
4243 			goto error_out;
4244 		case DDI_DMA_TOOBIG:
4245 			bioerr = EINVAL;
4246 			AACDB_PRINT(softs, CE_WARN,
4247 			    "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)",
4248 			    bp->b_bcount);
4249 			goto error_out;
4250 		default:
4251 			bioerr = EINVAL;
4252 			AACDB_PRINT(softs, CE_WARN,
4253 			    "Cannot bind buf for DMA: %d", rval);
4254 			goto error_out;
4255 		}
4256 		acp->flags |= AAC_CMD_DMA_VALID;
4257 
4258 get_dma_cookies:
4259 		ASSERT(acp->left_cookien > 0);
4260 		if (acp->left_cookien > softs->aac_sg_tablesize) {
4261 			AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d",
4262 			    acp->left_cookien);
4263 			bioerr = EINVAL;
4264 			goto error_out;
4265 		}
4266 		if (oldcookiec != acp->left_cookien && acp->sgt != NULL) {
4267 			kmem_free(acp->sgt, sizeof (struct aac_sge) * \
4268 			    oldcookiec);
4269 			acp->sgt = NULL;
4270 		}
4271 		if (acp->sgt == NULL) {
4272 			acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \
4273 			    acp->left_cookien, kf);
4274 			if (acp->sgt == NULL) {
4275 				AACDB_PRINT(softs, CE_WARN,
4276 				    "sgt kmem_alloc fail");
4277 				bioerr = ENOMEM;
4278 				goto error_out;
4279 			}
4280 		}
4281 
4282 		sge = &acp->sgt[0];
4283 		sge->bcount = acp->cookie.dmac_size;
4284 		sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
4285 		sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
4286 		acp->bcount = acp->cookie.dmac_size;
4287 		for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) {
4288 			ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie);
4289 			sge->bcount = acp->cookie.dmac_size;
4290 			sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
4291 			sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
4292 			acp->bcount += acp->cookie.dmac_size;
4293 		}
4294 
4295 		/*
4296 		 * Note: The old DMA engine do not correctly handle
4297 		 * dma_attr_maxxfer attribute. So we have to ensure
4298 		 * it by ourself.
4299 		 */
4300 		if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) {
4301 			AACDB_PRINT(softs, CE_NOTE,
4302 			    "large xfer size received %d\n", acp->bcount);
4303 			bioerr = EINVAL;
4304 			goto error_out;
4305 		}
4306 
4307 		acp->total_xfer += acp->bcount;
4308 
4309 		if (acp->pkt) {
4310 			/* Return remaining byte count */
4311 			acp->pkt->pkt_resid = bp->b_bcount - acp->total_xfer;
4312 
4313 			AACDB_PRINT_TRAN(softs,
4314 			    "bp=0x%p, xfered=%d/%d, resid=%d",
4315 			    (void *)bp->b_un.b_addr, (int)acp->total_xfer,
4316 			    (int)bp->b_bcount, (int)acp->pkt->pkt_resid);
4317 
4318 			ASSERT(acp->pkt->pkt_resid >= 0);
4319 		}
4320 	}
4321 	return (AACOK);
4322 
4323 error_out:
4324 	bioerror(bp, bioerr);
4325 	return (AACERR);
4326 }
4327 
4328 static struct scsi_pkt *
4329 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
4330     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
4331     int (*callback)(), caddr_t arg)
4332 {
4333 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4334 	struct aac_cmd *acp, *new_acp;
4335 
4336 	DBCALLED(softs, 2);
4337 
4338 	/* Allocate pkt */
4339 	if (pkt == NULL) {
4340 		int slen;
4341 
4342 		/* Force auto request sense */
4343 		slen = (statuslen > softs->slen) ? statuslen : softs->slen;
4344 		pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen,
4345 		    slen, tgtlen, sizeof (struct aac_cmd), callback, arg);
4346 		if (pkt == NULL) {
4347 			AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed");
4348 			return (NULL);
4349 		}
4350 		acp = new_acp = PKT2AC(pkt);
4351 		acp->pkt = pkt;
4352 		acp->cmdlen = cmdlen;
4353 
4354 		acp->dvp = &softs->containers[ap->a_target];
4355 		acp->aac_cmd_fib = softs->aac_cmd_fib;
4356 		acp->ac_comp = aac_ld_complete;
4357 	} else {
4358 		acp = PKT2AC(pkt);
4359 		new_acp = NULL;
4360 	}
4361 
4362 	if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK)
4363 		return (pkt);
4364 
4365 	if (new_acp)
4366 		aac_tran_destroy_pkt(ap, pkt);
4367 	return (NULL);
4368 }
4369 
4370 /*
4371  * tran_sync_pkt(9E) - explicit DMA synchronization
4372  */
4373 /*ARGSUSED*/
4374 static void
4375 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4376 {
4377 	struct aac_cmd *acp = PKT2AC(pkt);
4378 
4379 	DBCALLED(NULL, 2);
4380 
4381 	if (aac_dma_sync_ac(acp) != AACOK)
4382 		ddi_fm_service_impact(
4383 		    (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p,
4384 		    DDI_SERVICE_UNAFFECTED);
4385 }
4386 
4387 /*
4388  * tran_dmafree(9E) - deallocate DMA resources allocated for command
4389  */
4390 /*ARGSUSED*/
4391 static void
4392 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4393 {
4394 	struct aac_cmd *acp = PKT2AC(pkt);
4395 
4396 	DBCALLED(NULL, 2);
4397 
4398 	aac_free_dmamap(acp);
4399 }
4400 
4401 static int
4402 aac_do_quiesce(struct aac_softstate *softs)
4403 {
4404 	aac_hold_bus(softs, AAC_IOCMD_ASYNC);
4405 	if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) {
4406 		aac_start_drain(softs);
4407 		do {
4408 			if (cv_wait_sig(&softs->drain_cv,
4409 			    &softs->io_lock) == 0) {
4410 				/* Quiesce has been interrupted */
4411 				aac_stop_drain(softs);
4412 				aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
4413 				aac_start_waiting_io(softs);
4414 				return (AACERR);
4415 			}
4416 		} while (softs->bus_ncmds[AAC_CMDQ_ASYNC]);
4417 		aac_stop_drain(softs);
4418 	}
4419 
4420 	softs->state |= AAC_STATE_QUIESCED;
4421 	return (AACOK);
4422 }
4423 
4424 static int
4425 aac_tran_quiesce(dev_info_t *dip)
4426 {
4427 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
4428 	int rval;
4429 
4430 	DBCALLED(softs, 1);
4431 
4432 	mutex_enter(&softs->io_lock);
4433 	if (aac_do_quiesce(softs) == AACOK)
4434 		rval = 0;
4435 	else
4436 		rval = 1;
4437 	mutex_exit(&softs->io_lock);
4438 	return (rval);
4439 }
4440 
4441 static int
4442 aac_do_unquiesce(struct aac_softstate *softs)
4443 {
4444 	softs->state &= ~AAC_STATE_QUIESCED;
4445 	aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
4446 
4447 	aac_start_waiting_io(softs);
4448 	return (AACOK);
4449 }
4450 
4451 static int
4452 aac_tran_unquiesce(dev_info_t *dip)
4453 {
4454 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
4455 	int rval;
4456 
4457 	DBCALLED(softs, 1);
4458 
4459 	mutex_enter(&softs->io_lock);
4460 	if (aac_do_unquiesce(softs) == AACOK)
4461 		rval = 0;
4462 	else
4463 		rval = 1;
4464 	mutex_exit(&softs->io_lock);
4465 	return (rval);
4466 }
4467 
4468 static int
4469 aac_hba_setup(struct aac_softstate *softs)
4470 {
4471 	scsi_hba_tran_t *hba_tran;
4472 	int rval;
4473 
4474 	hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP);
4475 	if (hba_tran == NULL)
4476 		return (AACERR);
4477 	hba_tran->tran_hba_private = softs;
4478 	hba_tran->tran_tgt_init = aac_tran_tgt_init;
4479 	hba_tran->tran_tgt_probe = scsi_hba_probe;
4480 	hba_tran->tran_start = aac_tran_start;
4481 	hba_tran->tran_getcap = aac_tran_getcap;
4482 	hba_tran->tran_setcap = aac_tran_setcap;
4483 	hba_tran->tran_init_pkt = aac_tran_init_pkt;
4484 	hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt;
4485 	hba_tran->tran_reset = aac_tran_reset;
4486 	hba_tran->tran_abort = aac_tran_abort;
4487 	hba_tran->tran_sync_pkt = aac_tran_sync_pkt;
4488 	hba_tran->tran_dmafree = aac_tran_dmafree;
4489 	hba_tran->tran_quiesce = aac_tran_quiesce;
4490 	hba_tran->tran_unquiesce = aac_tran_unquiesce;
4491 	rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr,
4492 	    hba_tran, 0);
4493 	if (rval != DDI_SUCCESS) {
4494 		scsi_hba_tran_free(hba_tran);
4495 		AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed");
4496 		return (AACERR);
4497 	}
4498 
4499 	return (AACOK);
4500 }
4501 
4502 /*
4503  * FIB setup operations
4504  */
4505 
4506 /*
4507  * Init FIB header
4508  */
4509 static void
4510 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_slot *slotp,
4511     uint16_t cmd, uint16_t fib_size)
4512 {
4513 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
4514 	struct aac_fib *fibp = slotp->fibp;
4515 	uint32_t xfer_state;
4516 
4517 	xfer_state =
4518 	    AAC_FIBSTATE_HOSTOWNED |
4519 	    AAC_FIBSTATE_INITIALISED |
4520 	    AAC_FIBSTATE_EMPTY |
4521 	    AAC_FIBSTATE_FROMHOST |
4522 	    AAC_FIBSTATE_REXPECTED |
4523 	    AAC_FIBSTATE_NORM;
4524 	if (slotp->acp && !(slotp->acp->flags & AAC_CMD_SYNC)) {
4525 		xfer_state |=
4526 		    AAC_FIBSTATE_ASYNC |
4527 		    AAC_FIBSTATE_FAST_RESPONSE /* enable fast io */;
4528 		ddi_put16(acc, &fibp->Header.SenderSize,
4529 		    softs->aac_max_fib_size);
4530 	} else {
4531 		ddi_put16(acc, &fibp->Header.SenderSize, AAC_FIB_SIZE);
4532 	}
4533 
4534 	ddi_put32(acc, &fibp->Header.XferState, xfer_state);
4535 	ddi_put16(acc, &fibp->Header.Command, cmd);
4536 	ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB);
4537 	ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */
4538 	ddi_put16(acc, &fibp->Header.Size, fib_size);
4539 	ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2));
4540 	ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
4541 	ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */
4542 }
4543 
4544 /*
4545  * Init FIB for raw IO command
4546  */
4547 static void
4548 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp)
4549 {
4550 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
4551 	struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0];
4552 	struct aac_sg_entryraw *sgp;
4553 	struct aac_sge *sge;
4554 
4555 	/* Calculate FIB size */
4556 	acp->fib_size = sizeof (struct aac_fib_header) + \
4557 	    sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \
4558 	    sizeof (struct aac_sg_entryraw);
4559 
4560 	aac_cmd_fib_header(softs, acp->slotp, RawIo, acp->fib_size);
4561 
4562 	ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0);
4563 	ddi_put16(acc, &io->BpTotal, 0);
4564 	ddi_put16(acc, &io->BpComplete, 0);
4565 
4566 	ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno));
4567 	ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno));
4568 	ddi_put16(acc, &io->ContainerId,
4569 	    ((struct aac_container *)acp->dvp)->cid);
4570 
4571 	/* Fill SG table */
4572 	ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien);
4573 	ddi_put32(acc, &io->ByteCount, acp->bcount);
4574 
4575 	for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0];
4576 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
4577 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
4578 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
4579 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
4580 		sgp->Next = 0;
4581 		sgp->Prev = 0;
4582 		sgp->Flags = 0;
4583 	}
4584 }
4585 
4586 /* Init FIB for 64-bit block IO command */
4587 static void
4588 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp)
4589 {
4590 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
4591 	struct aac_blockread64 *br = (struct aac_blockread64 *) \
4592 	    &acp->slotp->fibp->data[0];
4593 	struct aac_sg_entry64 *sgp;
4594 	struct aac_sge *sge;
4595 
4596 	acp->fib_size = sizeof (struct aac_fib_header) + \
4597 	    sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \
4598 	    sizeof (struct aac_sg_entry64);
4599 
4600 	aac_cmd_fib_header(softs, acp->slotp, ContainerCommand64,
4601 	    acp->fib_size);
4602 
4603 	/*
4604 	 * The definitions for aac_blockread64 and aac_blockwrite64
4605 	 * are the same.
4606 	 */
4607 	ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
4608 	ddi_put16(acc, &br->ContainerId,
4609 	    ((struct aac_container *)acp->dvp)->cid);
4610 	ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ?
4611 	    VM_CtHostRead64 : VM_CtHostWrite64);
4612 	ddi_put16(acc, &br->Pad, 0);
4613 	ddi_put16(acc, &br->Flags, 0);
4614 
4615 	/* Fill SG table */
4616 	ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien);
4617 	ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE);
4618 
4619 	for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0];
4620 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
4621 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
4622 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
4623 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
4624 	}
4625 }
4626 
4627 /* Init FIB for block IO command */
4628 static void
4629 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp)
4630 {
4631 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
4632 	struct aac_blockread *br = (struct aac_blockread *) \
4633 	    &acp->slotp->fibp->data[0];
4634 	struct aac_sg_entry *sgp;
4635 	struct aac_sge *sge = &acp->sgt[0];
4636 
4637 	if (acp->flags & AAC_CMD_BUF_READ) {
4638 		acp->fib_size = sizeof (struct aac_fib_header) + \
4639 		    sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \
4640 		    sizeof (struct aac_sg_entry);
4641 
4642 		ddi_put32(acc, &br->Command, VM_CtBlockRead);
4643 		ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien);
4644 		sgp = &br->SgMap.SgEntry[0];
4645 	} else {
4646 		struct aac_blockwrite *bw = (struct aac_blockwrite *)br;
4647 
4648 		acp->fib_size = sizeof (struct aac_fib_header) + \
4649 		    sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \
4650 		    sizeof (struct aac_sg_entry);
4651 
4652 		ddi_put32(acc, &bw->Command, VM_CtBlockWrite);
4653 		ddi_put32(acc, &bw->Stable, CUNSTABLE);
4654 		ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien);
4655 		sgp = &bw->SgMap.SgEntry[0];
4656 	}
4657 	aac_cmd_fib_header(softs, acp->slotp, ContainerCommand, acp->fib_size);
4658 
4659 	/*
4660 	 * aac_blockread and aac_blockwrite have the similar
4661 	 * structure head, so use br for bw here
4662 	 */
4663 	ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
4664 	ddi_put32(acc, &br->ContainerId,
4665 	    ((struct aac_container *)acp->dvp)->cid);
4666 	ddi_put32(acc, &br->ByteCount, acp->bcount);
4667 
4668 	/* Fill SG table */
4669 	for (sge = &acp->sgt[0];
4670 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
4671 		ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
4672 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
4673 	}
4674 }
4675 
4676 /*ARGSUSED*/
4677 void
4678 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp)
4679 {
4680 	struct aac_slot *slotp = acp->slotp;
4681 	struct aac_fib *fibp = slotp->fibp;
4682 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
4683 
4684 	ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp,
4685 	    acp->fib_size,   /* only copy data of needed length */
4686 	    DDI_DEV_AUTOINCR);
4687 	ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
4688 	ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2);
4689 }
4690 
4691 static void
4692 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp)
4693 {
4694 	struct aac_slot *slotp = acp->slotp;
4695 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
4696 	struct aac_synchronize_command *sync =
4697 	    (struct aac_synchronize_command *)&slotp->fibp->data[0];
4698 
4699 	acp->fib_size = sizeof (struct aac_fib_header) + \
4700 	    sizeof (struct aac_synchronize_command);
4701 
4702 	aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size);
4703 	ddi_put32(acc, &sync->Command, VM_ContainerConfig);
4704 	ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE);
4705 	ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid);
4706 	ddi_put32(acc, &sync->Count,
4707 	    sizeof (((struct aac_synchronize_reply *)0)->Data));
4708 }
4709 
4710 /*
4711  * Init FIB for pass-through SCMD
4712  */
4713 static void
4714 aac_cmd_fib_srb(struct aac_cmd *acp)
4715 {
4716 	struct aac_slot *slotp = acp->slotp;
4717 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
4718 	struct aac_srb *srb = (struct aac_srb *)&slotp->fibp->data[0];
4719 	struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0];
4720 
4721 	ddi_put32(acc, &srb->function, SRBF_ExecuteScsi);
4722 	ddi_put32(acc, &srb->retry_limit, 0);
4723 	ddi_put32(acc, &srb->cdb_size, acp->cmdlen);
4724 	ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */
4725 
4726 	ddi_put32(acc, &srb->flags, srb0->flags);
4727 	ddi_put32(acc, &srb->channel, srb0->channel);
4728 	ddi_put32(acc, &srb->id, srb0->id);
4729 	ddi_put32(acc, &srb->lun, srb0->lun);
4730 	ddi_rep_put8(acc, srb0->cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR);
4731 }
4732 
4733 static void
4734 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp)
4735 {
4736 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
4737 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
4738 	struct aac_sg_entry *sgp;
4739 	struct aac_sge *sge;
4740 
4741 	acp->fib_size = sizeof (struct aac_fib_header) + \
4742 	    sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
4743 	    acp->left_cookien * sizeof (struct aac_sg_entry);
4744 
4745 	/* Fill FIB and SRB headers, and copy cdb */
4746 	aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommand, acp->fib_size);
4747 	aac_cmd_fib_srb(acp);
4748 
4749 	/* Fill SG table */
4750 	ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
4751 	ddi_put32(acc, &srb->count, acp->bcount);
4752 
4753 	for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0];
4754 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
4755 		ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
4756 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
4757 	}
4758 }
4759 
4760 static void
4761 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp)
4762 {
4763 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
4764 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
4765 	struct aac_sg_entry64 *sgp;
4766 	struct aac_sge *sge;
4767 
4768 	acp->fib_size = sizeof (struct aac_fib_header) + \
4769 	    sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
4770 	    acp->left_cookien * sizeof (struct aac_sg_entry64);
4771 
4772 	/* Fill FIB and SRB headers, and copy cdb */
4773 	aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommandU64,
4774 	    acp->fib_size);
4775 	aac_cmd_fib_srb(acp);
4776 
4777 	/* Fill SG table */
4778 	ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
4779 	ddi_put32(acc, &srb->count, acp->bcount);
4780 
4781 	for (sge = &acp->sgt[0],
4782 	    sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0];
4783 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
4784 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
4785 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
4786 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
4787 	}
4788 }
4789 
4790 static int
4791 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
4792 {
4793 	struct aac_slot *slotp;
4794 
4795 	if (slotp = aac_get_slot(softs)) {
4796 		acp->slotp = slotp;
4797 		slotp->acp = acp;
4798 		acp->aac_cmd_fib(softs, acp);
4799 		(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0,
4800 		    DDI_DMA_SYNC_FORDEV);
4801 		return (AACOK);
4802 	}
4803 	return (AACERR);
4804 }
4805 
4806 static int
4807 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp)
4808 {
4809 	struct aac_container *dvp = acp->dvp;
4810 	int q = AAC_CMDQ(acp);
4811 
4812 	if (dvp) {
4813 		if (dvp->ncmds[q] < dvp->throttle[q]) {
4814 			if (!(acp->flags & AAC_CMD_NTAG) ||
4815 			    dvp->ncmds[q] == 0) {
4816 do_bind:
4817 				return (aac_cmd_slot_bind(softs, acp));
4818 			}
4819 			ASSERT(q == AAC_CMDQ_ASYNC);
4820 			aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC,
4821 			    AAC_THROTTLE_DRAIN);
4822 		}
4823 	} else {
4824 		if (softs->bus_ncmds[q] < softs->bus_throttle[q])
4825 			goto do_bind;
4826 	}
4827 	return (AACERR);
4828 }
4829 
4830 static void
4831 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp)
4832 {
4833 	struct aac_slot *slotp = acp->slotp;
4834 	int q = AAC_CMDQ(acp);
4835 	int rval;
4836 
4837 	/* Set ac and pkt */
4838 	if (acp->pkt) { /* ac from ioctl has no pkt */
4839 		acp->pkt->pkt_state |=
4840 		    STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
4841 	}
4842 	if (acp->timeout) /* 0 indicates no timeout */
4843 		acp->timeout += aac_timebase + aac_tick;
4844 
4845 	if (acp->dvp)
4846 		acp->dvp->ncmds[q]++;
4847 	softs->bus_ncmds[q]++;
4848 	aac_cmd_enqueue(&softs->q_busy, acp);
4849 
4850 	if (softs->flags & AAC_FLAGS_NEW_COMM) {
4851 		rval = aac_send_command(softs, slotp);
4852 	} else {
4853 		/*
4854 		 * If fib can not be enqueued, the adapter is in an abnormal
4855 		 * state, there will be no interrupt to us.
4856 		 */
4857 		rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q,
4858 		    slotp->fib_phyaddr, acp->fib_size);
4859 	}
4860 
4861 	if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS)
4862 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
4863 
4864 	/*
4865 	 * NOTE: We send command only when slots availabe, so should never
4866 	 * reach here.
4867 	 */
4868 	if (rval != AACOK) {
4869 		AACDB_PRINT(softs, CE_NOTE, "SCMD send failed");
4870 		if (acp->pkt) {
4871 			acp->pkt->pkt_state &= ~STATE_SENT_CMD;
4872 			aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0);
4873 		}
4874 		aac_end_io(softs, acp);
4875 		if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB)))
4876 			ddi_trigger_softintr(softs->softint_id);
4877 	}
4878 }
4879 
4880 static void
4881 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q)
4882 {
4883 	struct aac_cmd *acp, *next_acp;
4884 
4885 	/* Serve as many waiting io's as possible */
4886 	for (acp = q->q_head; acp; acp = next_acp) {
4887 		next_acp = acp->next;
4888 		if (aac_bind_io(softs, acp) == AACOK) {
4889 			aac_cmd_delete(q, acp);
4890 			aac_start_io(softs, acp);
4891 		}
4892 		if (softs->free_io_slot_head == NULL)
4893 			break;
4894 	}
4895 }
4896 
4897 static void
4898 aac_start_waiting_io(struct aac_softstate *softs)
4899 {
4900 	/*
4901 	 * Sync FIB io is served before async FIB io so that io requests
4902 	 * sent by interactive userland commands get responded asap.
4903 	 */
4904 	if (softs->q_wait[AAC_CMDQ_SYNC].q_head)
4905 		aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]);
4906 	if (softs->q_wait[AAC_CMDQ_ASYNC].q_head)
4907 		aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]);
4908 }
4909 
4910 static void
4911 aac_drain_comp_q(struct aac_softstate *softs)
4912 {
4913 	struct aac_cmd *acp;
4914 	struct scsi_pkt *pkt;
4915 
4916 	/*CONSTCOND*/
4917 	while (1) {
4918 		mutex_enter(&softs->q_comp_mutex);
4919 		acp = aac_cmd_dequeue(&softs->q_comp);
4920 		mutex_exit(&softs->q_comp_mutex);
4921 		if (acp != NULL) {
4922 			ASSERT(acp->pkt != NULL);
4923 			pkt = acp->pkt;
4924 
4925 			if (pkt->pkt_reason == CMD_CMPLT) {
4926 				/*
4927 				 * Consistent packets need to be sync'ed first
4928 				 */
4929 				if ((acp->flags & AAC_CMD_CONSISTENT) &&
4930 				    (acp->flags & AAC_CMD_BUF_READ)) {
4931 					if (aac_dma_sync_ac(acp) != AACOK) {
4932 						ddi_fm_service_impact(
4933 						    softs->devinfo_p,
4934 						    DDI_SERVICE_UNAFFECTED);
4935 						pkt->pkt_reason = CMD_TRAN_ERR;
4936 						pkt->pkt_statistics = 0;
4937 					}
4938 				}
4939 				if ((aac_check_acc_handle(softs-> \
4940 				    comm_space_acc_handle) != DDI_SUCCESS) ||
4941 				    (aac_check_acc_handle(softs-> \
4942 				    pci_mem_handle) != DDI_SUCCESS)) {
4943 					ddi_fm_service_impact(softs->devinfo_p,
4944 					    DDI_SERVICE_UNAFFECTED);
4945 					ddi_fm_acc_err_clear(softs-> \
4946 					    pci_mem_handle, DDI_FME_VER0);
4947 					pkt->pkt_reason = CMD_TRAN_ERR;
4948 					pkt->pkt_statistics = 0;
4949 				}
4950 				if (aac_check_dma_handle(softs-> \
4951 				    comm_space_dma_handle) != DDI_SUCCESS) {
4952 					ddi_fm_service_impact(softs->devinfo_p,
4953 					    DDI_SERVICE_UNAFFECTED);
4954 					pkt->pkt_reason = CMD_TRAN_ERR;
4955 					pkt->pkt_statistics = 0;
4956 				}
4957 			}
4958 			(*pkt->pkt_comp)(pkt);
4959 		} else {
4960 			break;
4961 		}
4962 	}
4963 }
4964 
4965 static int
4966 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp)
4967 {
4968 	size_t rlen;
4969 	ddi_dma_cookie_t cookie;
4970 	uint_t cookien;
4971 
4972 	/* Allocate FIB dma resource */
4973 	if (ddi_dma_alloc_handle(
4974 	    softs->devinfo_p,
4975 	    &softs->addr_dma_attr,
4976 	    DDI_DMA_SLEEP,
4977 	    NULL,
4978 	    &slotp->fib_dma_handle) != DDI_SUCCESS) {
4979 		AACDB_PRINT(softs, CE_WARN,
4980 		    "Cannot alloc dma handle for slot fib area");
4981 		goto error;
4982 	}
4983 	if (ddi_dma_mem_alloc(
4984 	    slotp->fib_dma_handle,
4985 	    softs->aac_max_fib_size,
4986 	    &aac_acc_attr,
4987 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4988 	    DDI_DMA_SLEEP,
4989 	    NULL,
4990 	    (caddr_t *)&slotp->fibp,
4991 	    &rlen,
4992 	    &slotp->fib_acc_handle) != DDI_SUCCESS) {
4993 		AACDB_PRINT(softs, CE_WARN,
4994 		    "Cannot alloc mem for slot fib area");
4995 		goto error;
4996 	}
4997 	if (ddi_dma_addr_bind_handle(
4998 	    slotp->fib_dma_handle,
4999 	    NULL,
5000 	    (caddr_t)slotp->fibp,
5001 	    softs->aac_max_fib_size,
5002 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
5003 	    DDI_DMA_SLEEP,
5004 	    NULL,
5005 	    &cookie,
5006 	    &cookien) != DDI_DMA_MAPPED) {
5007 		AACDB_PRINT(softs, CE_WARN,
5008 		    "dma bind failed for slot fib area");
5009 		goto error;
5010 	}
5011 
5012 	/* Check dma handles allocated in fib attach */
5013 	if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) {
5014 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
5015 		goto error;
5016 	}
5017 
5018 	/* Check acc handles allocated in fib attach */
5019 	if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) {
5020 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
5021 		goto error;
5022 	}
5023 
5024 	slotp->fib_phyaddr = cookie.dmac_laddress;
5025 	return (AACOK);
5026 
5027 error:
5028 	if (slotp->fib_acc_handle) {
5029 		ddi_dma_mem_free(&slotp->fib_acc_handle);
5030 		slotp->fib_acc_handle = NULL;
5031 	}
5032 	if (slotp->fib_dma_handle) {
5033 		ddi_dma_free_handle(&slotp->fib_dma_handle);
5034 		slotp->fib_dma_handle = NULL;
5035 	}
5036 	return (AACERR);
5037 }
5038 
5039 static void
5040 aac_free_fib(struct aac_slot *slotp)
5041 {
5042 	(void) ddi_dma_unbind_handle(slotp->fib_dma_handle);
5043 	ddi_dma_mem_free(&slotp->fib_acc_handle);
5044 	slotp->fib_acc_handle = NULL;
5045 	ddi_dma_free_handle(&slotp->fib_dma_handle);
5046 	slotp->fib_dma_handle = NULL;
5047 	slotp->fib_phyaddr = 0;
5048 }
5049 
5050 static void
5051 aac_alloc_fibs(struct aac_softstate *softs)
5052 {
5053 	int i;
5054 	struct aac_slot *slotp;
5055 
5056 	for (i = 0; i < softs->total_slots &&
5057 	    softs->total_fibs < softs->total_slots; i++) {
5058 		slotp = &(softs->io_slot[i]);
5059 		if (slotp->fib_phyaddr)
5060 			continue;
5061 		if (aac_alloc_fib(softs, slotp) != AACOK)
5062 			break;
5063 
5064 		/* Insert the slot to the free slot list */
5065 		aac_release_slot(softs, slotp);
5066 		softs->total_fibs++;
5067 	}
5068 }
5069 
5070 static void
5071 aac_destroy_fibs(struct aac_softstate *softs)
5072 {
5073 	struct aac_slot *slotp;
5074 
5075 	while ((slotp = softs->free_io_slot_head) != NULL) {
5076 		ASSERT(slotp->fib_phyaddr);
5077 		softs->free_io_slot_head = slotp->next;
5078 		aac_free_fib(slotp);
5079 		ASSERT(slotp->index == (slotp - softs->io_slot));
5080 		softs->total_fibs--;
5081 	}
5082 	ASSERT(softs->total_fibs == 0);
5083 }
5084 
5085 static int
5086 aac_create_slots(struct aac_softstate *softs)
5087 {
5088 	int i;
5089 
5090 	softs->total_slots = softs->aac_max_fibs;
5091 	softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \
5092 	    softs->total_slots, KM_SLEEP);
5093 	if (softs->io_slot == NULL) {
5094 		AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot");
5095 		return (AACERR);
5096 	}
5097 	for (i = 0; i < softs->total_slots; i++)
5098 		softs->io_slot[i].index = i;
5099 	softs->free_io_slot_head = NULL;
5100 	softs->total_fibs = 0;
5101 	return (AACOK);
5102 }
5103 
5104 static void
5105 aac_destroy_slots(struct aac_softstate *softs)
5106 {
5107 	ASSERT(softs->free_io_slot_head == NULL);
5108 
5109 	kmem_free(softs->io_slot, sizeof (struct aac_slot) * \
5110 	    softs->total_slots);
5111 	softs->io_slot = NULL;
5112 	softs->total_slots = 0;
5113 }
5114 
5115 struct aac_slot *
5116 aac_get_slot(struct aac_softstate *softs)
5117 {
5118 	struct aac_slot *slotp;
5119 
5120 	if ((slotp = softs->free_io_slot_head) != NULL) {
5121 		softs->free_io_slot_head = slotp->next;
5122 		slotp->next = NULL;
5123 	}
5124 	return (slotp);
5125 }
5126 
5127 static void
5128 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp)
5129 {
5130 	ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots));
5131 	ASSERT(slotp == &softs->io_slot[slotp->index]);
5132 
5133 	slotp->acp = NULL;
5134 	slotp->next = softs->free_io_slot_head;
5135 	softs->free_io_slot_head = slotp;
5136 }
5137 
5138 int
5139 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp)
5140 {
5141 	if (aac_bind_io(softs, acp) == AACOK)
5142 		aac_start_io(softs, acp);
5143 	else
5144 		aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp);
5145 
5146 	if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR)))
5147 		return (TRAN_ACCEPT);
5148 	/*
5149 	 * Because sync FIB is always 512 bytes and used for critical
5150 	 * functions, async FIB is used for poll IO.
5151 	 */
5152 	if (acp->flags & AAC_CMD_NO_INTR) {
5153 		if (aac_do_poll_io(softs, acp) == AACOK)
5154 			return (TRAN_ACCEPT);
5155 	} else {
5156 		if (aac_do_sync_io(softs, acp) == AACOK)
5157 			return (TRAN_ACCEPT);
5158 	}
5159 	return (TRAN_BADPKT);
5160 }
5161 
5162 static int
5163 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp)
5164 {
5165 	int (*intr_handler)(struct aac_softstate *);
5166 
5167 	/*
5168 	 * Interrupt is disabled, we have to poll the adapter by ourselves.
5169 	 */
5170 	intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
5171 	    aac_process_intr_new : aac_process_intr_old;
5172 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) {
5173 		int i = AAC_POLL_TIME * 1000;
5174 
5175 		AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i);
5176 		if (i == 0)
5177 			aac_cmd_timeout(softs);
5178 	}
5179 
5180 	ddi_trigger_softintr(softs->softint_id);
5181 
5182 	if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR))
5183 		return (AACOK);
5184 	return (AACERR);
5185 }
5186 
5187 static int
5188 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp)
5189 {
5190 	ASSERT(softs && acp);
5191 
5192 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT)))
5193 		cv_wait(&softs->event, &softs->io_lock);
5194 
5195 	if (acp->flags & AAC_CMD_CMPLT)
5196 		return (AACOK);
5197 	return (AACERR);
5198 }
5199 
5200 static int
5201 aac_dma_sync_ac(struct aac_cmd *acp)
5202 {
5203 	if (acp->buf_dma_handle) {
5204 		if (acp->flags & AAC_CMD_BUF_WRITE) {
5205 			if (acp->abp != NULL)
5206 				ddi_rep_put8(acp->abh,
5207 				    (uint8_t *)acp->bp->b_un.b_addr,
5208 				    (uint8_t *)acp->abp, acp->bp->b_bcount,
5209 				    DDI_DEV_AUTOINCR);
5210 			(void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
5211 			    DDI_DMA_SYNC_FORDEV);
5212 		} else {
5213 			(void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
5214 			    DDI_DMA_SYNC_FORCPU);
5215 			if (aac_check_dma_handle(acp->buf_dma_handle) !=
5216 			    DDI_SUCCESS)
5217 				return (AACERR);
5218 			if (acp->abp != NULL)
5219 				ddi_rep_get8(acp->abh,
5220 				    (uint8_t *)acp->bp->b_un.b_addr,
5221 				    (uint8_t *)acp->abp, acp->bp->b_bcount,
5222 				    DDI_DEV_AUTOINCR);
5223 		}
5224 	}
5225 	return (AACOK);
5226 }
5227 
5228 /*
5229  * The following function comes from Adaptec:
5230  *
5231  * When driver sees a particular event that means containers are changed, it
5232  * will rescan containers. However a change may not be complete until some
5233  * other event is received. For example, creating or deleting an array will
5234  * incur as many as six AifEnConfigChange events which would generate six
5235  * container rescans. To diminish rescans, driver set a flag to wait for
5236  * another particular event. When sees that events come in, it will do rescan.
5237  */
5238 static int
5239 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp)
5240 {
5241 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
5242 	uint16_t fib_command;
5243 	struct aac_aif_command *aif;
5244 	int en_type;
5245 	int devcfg_needed;
5246 	int current, next;
5247 
5248 	fib_command = LE_16(fibp->Header.Command);
5249 	if (fib_command != AifRequest) {
5250 		cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x",
5251 		    fib_command);
5252 		return (AACERR);
5253 	}
5254 
5255 	/* Update internal container state */
5256 	aif = (struct aac_aif_command *)&fibp->data[0];
5257 
5258 	AACDB_PRINT_AIF(softs, aif);
5259 	devcfg_needed = 0;
5260 	en_type = LE_32((uint32_t)aif->data.EN.type);
5261 
5262 	switch (LE_32((uint32_t)aif->command)) {
5263 	case AifCmdDriverNotify: {
5264 		int cid = LE_32(aif->data.EN.data.ECC.container[0]);
5265 
5266 		switch (en_type) {
5267 		case AifDenMorphComplete:
5268 		case AifDenVolumeExtendComplete:
5269 			if (softs->containers[cid].valid)
5270 				softs->devcfg_wait_on = AifEnConfigChange;
5271 			break;
5272 		}
5273 		if (softs->devcfg_wait_on == en_type)
5274 			devcfg_needed = 1;
5275 		break;
5276 	}
5277 
5278 	case AifCmdEventNotify:
5279 		switch (en_type) {
5280 		case AifEnAddContainer:
5281 		case AifEnDeleteContainer:
5282 			softs->devcfg_wait_on = AifEnConfigChange;
5283 			break;
5284 		case AifEnContainerChange:
5285 			if (!softs->devcfg_wait_on)
5286 				softs->devcfg_wait_on = AifEnConfigChange;
5287 			break;
5288 		case AifEnContainerEvent:
5289 			if (ddi_get32(acc, &aif-> \
5290 			    data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE)
5291 				devcfg_needed = 1;
5292 			break;
5293 		}
5294 		if (softs->devcfg_wait_on == en_type)
5295 			devcfg_needed = 1;
5296 		break;
5297 
5298 	case AifCmdJobProgress:
5299 		if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) {
5300 			int pr_status;
5301 			uint32_t pr_ftick, pr_ctick;
5302 
5303 			pr_status = LE_32((uint32_t)aif->data.PR[0].status);
5304 			pr_ctick = LE_32(aif->data.PR[0].currentTick);
5305 			pr_ftick = LE_32(aif->data.PR[0].finalTick);
5306 
5307 			if ((pr_ctick == pr_ftick) ||
5308 			    (pr_status == AifJobStsSuccess))
5309 				softs->devcfg_wait_on = AifEnContainerChange;
5310 			else if ((pr_ctick == 0) &&
5311 			    (pr_status == AifJobStsRunning))
5312 				softs->devcfg_wait_on = AifEnContainerChange;
5313 		}
5314 		break;
5315 	}
5316 
5317 	if (devcfg_needed)
5318 		(void) aac_probe_containers(softs);
5319 
5320 	/* Modify AIF contexts */
5321 	current = softs->aifq_idx;
5322 	next = (current + 1) % AAC_AIFQ_LENGTH;
5323 	if (next == 0) {
5324 		struct aac_fib_context *ctx;
5325 
5326 		softs->aifq_wrap = 1;
5327 		for (ctx = softs->fibctx; ctx; ctx = ctx->next) {
5328 			if (next == ctx->ctx_idx) {
5329 				ctx->ctx_filled = 1;
5330 			} else if (current == ctx->ctx_idx && ctx->ctx_filled) {
5331 				ctx->ctx_idx = next;
5332 				AACDB_PRINT(softs, CE_NOTE,
5333 				    "-- AIF queue(%x) overrun", ctx->unique);
5334 			}
5335 		}
5336 	}
5337 	softs->aifq_idx = next;
5338 
5339 	/* Wakeup applications */
5340 	cv_broadcast(&softs->aifv);
5341 	return (AACOK);
5342 }
5343 
5344 /*
5345  * Timeout recovery
5346  */
5347 static void
5348 aac_cmd_timeout(struct aac_softstate *softs)
5349 {
5350 	/*
5351 	 * Besides the firmware in unhealthy state, an overloaded
5352 	 * adapter may also incur pkt timeout.
5353 	 * There is a chance for an adapter with a slower IOP to take
5354 	 * longer than 60 seconds to process the commands, such as when
5355 	 * to perform IOs. So the adapter is doing a build on a RAID-5
5356 	 * while being required longer completion times should be
5357 	 * tolerated.
5358 	 */
5359 	if (aac_do_reset(softs) == AACOK) {
5360 		aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL,
5361 		    CMD_RESET);
5362 		aac_start_waiting_io(softs);
5363 	} else {
5364 		/* Abort all waiting cmds when adapter is dead */
5365 		aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL,
5366 		    CMD_TIMEOUT);
5367 	}
5368 }
5369 
5370 /*
5371  * The following function comes from Adaptec:
5372  *
5373  * Time sync. command added to synchronize time with firmware every 30
5374  * minutes (required for correct AIF timestamps etc.)
5375  */
5376 static int
5377 aac_sync_tick(struct aac_softstate *softs)
5378 {
5379 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
5380 	struct aac_fib *fibp = softs->sync_slot.fibp;
5381 
5382 	ddi_put32(acc, (uint32_t *)&fibp->data[0], ddi_get_time());
5383 	return (aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t)));
5384 }
5385 
5386 static void
5387 aac_daemon(void *arg)
5388 {
5389 	struct aac_softstate *softs = (struct aac_softstate *)arg;
5390 	struct aac_cmd *acp;
5391 
5392 	DBCALLED(softs, 2);
5393 
5394 	mutex_enter(&softs->io_lock);
5395 	/* Check slot for timeout pkts */
5396 	aac_timebase += aac_tick;
5397 	for (acp = softs->q_busy.q_head; acp; acp = acp->next) {
5398 		if (acp->timeout) {
5399 			if (acp->timeout <= aac_timebase) {
5400 				aac_cmd_timeout(softs);
5401 				ddi_trigger_softintr(softs->softint_id);
5402 			}
5403 			break;
5404 		}
5405 	}
5406 
5407 	/* Time sync. with firmware every AAC_SYNC_TICK */
5408 	if (aac_sync_time <= aac_timebase) {
5409 		aac_sync_time = aac_timebase;
5410 		if (aac_sync_tick(softs) != AACOK)
5411 			aac_sync_time += aac_tick << 1; /* retry shortly */
5412 		else
5413 			aac_sync_time += AAC_SYNC_TICK;
5414 	}
5415 
5416 	if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0))
5417 		softs->timeout_id = timeout(aac_daemon, (void *)softs,
5418 		    (aac_tick * drv_usectohz(1000000)));
5419 	mutex_exit(&softs->io_lock);
5420 }
5421 
5422 /*
5423  * Architecture dependent functions
5424  */
5425 static int
5426 aac_rx_get_fwstatus(struct aac_softstate *softs)
5427 {
5428 	return (PCI_MEM_GET32(softs, AAC_OMR0));
5429 }
5430 
5431 static int
5432 aac_rx_get_mailbox(struct aac_softstate *softs, int mb)
5433 {
5434 	return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4));
5435 }
5436 
5437 static void
5438 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
5439     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
5440 {
5441 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd);
5442 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0);
5443 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1);
5444 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2);
5445 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3);
5446 }
5447 
5448 static int
5449 aac_rkt_get_fwstatus(struct aac_softstate *softs)
5450 {
5451 	return (PCI_MEM_GET32(softs, AAC_OMR0));
5452 }
5453 
5454 static int
5455 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb)
5456 {
5457 	return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4));
5458 }
5459 
5460 static void
5461 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
5462     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
5463 {
5464 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd);
5465 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0);
5466 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1);
5467 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2);
5468 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3);
5469 }
5470 
5471 /*
5472  * cb_ops functions
5473  */
5474 static int
5475 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred)
5476 {
5477 	struct aac_softstate *softs;
5478 	int minor0, minor;
5479 	int instance;
5480 
5481 	DBCALLED(NULL, 2);
5482 
5483 	if (otyp != OTYP_BLK && otyp != OTYP_CHR)
5484 		return (EINVAL);
5485 
5486 	minor0 = getminor(*devp);
5487 	minor = AAC_SCSA_MINOR(minor0);
5488 
5489 	if (AAC_IS_SCSA_NODE(minor))
5490 		return (scsi_hba_open(devp, flag, otyp, cred));
5491 
5492 	instance = MINOR2INST(minor0);
5493 	if (instance >= AAC_MAX_ADAPTERS)
5494 		return (ENXIO);
5495 
5496 	softs = ddi_get_soft_state(aac_softstatep, instance);
5497 	if (softs == NULL)
5498 		return (ENXIO);
5499 
5500 	return (0);
5501 }
5502 
5503 /*ARGSUSED*/
5504 static int
5505 aac_close(dev_t dev, int flag, int otyp, cred_t *cred)
5506 {
5507 	int minor0, minor;
5508 	int instance;
5509 
5510 	DBCALLED(NULL, 2);
5511 
5512 	if (otyp != OTYP_BLK && otyp != OTYP_CHR)
5513 		return (EINVAL);
5514 
5515 	minor0 = getminor(dev);
5516 	minor = AAC_SCSA_MINOR(minor0);
5517 
5518 	if (AAC_IS_SCSA_NODE(minor))
5519 		return (scsi_hba_close(dev, flag, otyp, cred));
5520 
5521 	instance = MINOR2INST(minor0);
5522 	if (instance >= AAC_MAX_ADAPTERS)
5523 		return (ENXIO);
5524 
5525 	return (0);
5526 }
5527 
5528 static int
5529 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p,
5530     int *rval_p)
5531 {
5532 	struct aac_softstate *softs;
5533 	int minor0, minor;
5534 	int instance;
5535 
5536 	DBCALLED(NULL, 2);
5537 
5538 	if (drv_priv(cred_p) != 0)
5539 		return (EPERM);
5540 
5541 	minor0 = getminor(dev);
5542 	minor = AAC_SCSA_MINOR(minor0);
5543 
5544 	if (AAC_IS_SCSA_NODE(minor))
5545 		return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p));
5546 
5547 	instance = MINOR2INST(minor0);
5548 	if (instance < AAC_MAX_ADAPTERS) {
5549 		softs = ddi_get_soft_state(aac_softstatep, instance);
5550 		return (aac_do_ioctl(softs, dev, cmd, arg, flag));
5551 	}
5552 	return (ENXIO);
5553 }
5554 
5555 /*
5556  * The IO fault service error handling callback function
5557  */
5558 /*ARGSUSED*/
5559 static int
5560 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5561 {
5562 	/*
5563 	 * as the driver can always deal with an error in any dma or
5564 	 * access handle, we can just return the fme_status value.
5565 	 */
5566 	pci_ereport_post(dip, err, NULL);
5567 	return (err->fme_status);
5568 }
5569 
5570 /*
5571  * aac_fm_init - initialize fma capabilities and register with IO
5572  *               fault services.
5573  */
5574 static void
5575 aac_fm_init(struct aac_softstate *softs)
5576 {
5577 	/*
5578 	 * Need to change iblock to priority for new MSI intr
5579 	 */
5580 	ddi_iblock_cookie_t fm_ibc;
5581 
5582 	/* Only register with IO Fault Services if we have some capability */
5583 	if (softs->fm_capabilities) {
5584 		/* Adjust access and dma attributes for FMA */
5585 		aac_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5586 		softs->buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
5587 		softs->addr_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
5588 
5589 		/*
5590 		 * Register capabilities with IO Fault Services.
5591 		 * fm_capabilities will be updated to indicate
5592 		 * capabilities actually supported (not requested.)
5593 		 */
5594 		ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc);
5595 
5596 		/*
5597 		 * Initialize pci ereport capabilities if ereport
5598 		 * capable (should always be.)
5599 		 */
5600 		if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
5601 		    DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
5602 			pci_ereport_setup(softs->devinfo_p);
5603 		}
5604 
5605 		/*
5606 		 * Register error callback if error callback capable.
5607 		 */
5608 		if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
5609 			ddi_fm_handler_register(softs->devinfo_p,
5610 			    aac_fm_error_cb, (void *) softs);
5611 		}
5612 	} else {
5613 		/* Clear FMA if no capabilities */
5614 		aac_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5615 		softs->buf_dma_attr.dma_attr_flags = 0;
5616 		softs->addr_dma_attr.dma_attr_flags = 0;
5617 	}
5618 }
5619 
5620 /*
5621  * aac_fm_fini - Releases fma capabilities and un-registers with IO
5622  *               fault services.
5623  */
5624 static void
5625 aac_fm_fini(struct aac_softstate *softs)
5626 {
5627 	/* Only unregister FMA capabilities if registered */
5628 	if (softs->fm_capabilities) {
5629 		/*
5630 		 * Un-register error callback if error callback capable.
5631 		 */
5632 		if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
5633 			ddi_fm_handler_unregister(softs->devinfo_p);
5634 		}
5635 
5636 		/*
5637 		 * Release any resources allocated by pci_ereport_setup()
5638 		 */
5639 		if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
5640 		    DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
5641 			pci_ereport_teardown(softs->devinfo_p);
5642 		}
5643 
5644 		/* Unregister from IO Fault Services */
5645 		ddi_fm_fini(softs->devinfo_p);
5646 	}
5647 }
5648 
5649 int
5650 aac_check_acc_handle(ddi_acc_handle_t handle)
5651 {
5652 	ddi_fm_error_t de;
5653 
5654 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5655 	return (de.fme_status);
5656 }
5657 
5658 int
5659 aac_check_dma_handle(ddi_dma_handle_t handle)
5660 {
5661 	ddi_fm_error_t de;
5662 
5663 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5664 	return (de.fme_status);
5665 }
5666 
5667 void
5668 aac_fm_ereport(struct aac_softstate *softs, char *detail)
5669 {
5670 	uint64_t ena;
5671 	char buf[FM_MAX_CLASS];
5672 
5673 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5674 	ena = fm_ena_generate(0, FM_ENA_FMT1);
5675 	if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) {
5676 		ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP,
5677 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
5678 	}
5679 }
5680 
5681 #ifdef DEBUG
5682 
5683 /* -------------------------debug aid functions-------------------------- */
5684 
5685 #define	AAC_FIB_CMD_KEY_STRINGS \
5686 	TestCommandResponse, "TestCommandResponse", \
5687 	TestAdapterCommand, "TestAdapterCommand", \
5688 	LastTestCommand, "LastTestCommand", \
5689 	ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \
5690 	ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \
5691 	ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \
5692 	ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \
5693 	ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \
5694 	ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \
5695 	ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \
5696 	ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \
5697 	InterfaceShutdown, "InterfaceShutdown", \
5698 	DmaCommandFib, "DmaCommandFib", \
5699 	StartProfile, "StartProfile", \
5700 	TermProfile, "TermProfile", \
5701 	SpeedTest, "SpeedTest", \
5702 	TakeABreakPt, "TakeABreakPt", \
5703 	RequestPerfData, "RequestPerfData", \
5704 	SetInterruptDefTimer, "SetInterruptDefTimer", \
5705 	SetInterruptDefCount, "SetInterruptDefCount", \
5706 	GetInterruptDefStatus, "GetInterruptDefStatus", \
5707 	LastCommCommand, "LastCommCommand", \
5708 	NuFileSystem, "NuFileSystem", \
5709 	UFS, "UFS", \
5710 	HostFileSystem, "HostFileSystem", \
5711 	LastFileSystemCommand, "LastFileSystemCommand", \
5712 	ContainerCommand, "ContainerCommand", \
5713 	ContainerCommand64, "ContainerCommand64", \
5714 	ClusterCommand, "ClusterCommand", \
5715 	ScsiPortCommand, "ScsiPortCommand", \
5716 	ScsiPortCommandU64, "ScsiPortCommandU64", \
5717 	AifRequest, "AifRequest", \
5718 	CheckRevision, "CheckRevision", \
5719 	FsaHostShutdown, "FsaHostShutdown", \
5720 	RequestAdapterInfo, "RequestAdapterInfo", \
5721 	IsAdapterPaused, "IsAdapterPaused", \
5722 	SendHostTime, "SendHostTime", \
5723 	LastMiscCommand, "LastMiscCommand"
5724 
5725 #define	AAC_CTVM_SUBCMD_KEY_STRINGS \
5726 	VM_Null, "VM_Null", \
5727 	VM_NameServe, "VM_NameServe", \
5728 	VM_ContainerConfig, "VM_ContainerConfig", \
5729 	VM_Ioctl, "VM_Ioctl", \
5730 	VM_FilesystemIoctl, "VM_FilesystemIoctl", \
5731 	VM_CloseAll, "VM_CloseAll", \
5732 	VM_CtBlockRead, "VM_CtBlockRead", \
5733 	VM_CtBlockWrite, "VM_CtBlockWrite", \
5734 	VM_SliceBlockRead, "VM_SliceBlockRead", \
5735 	VM_SliceBlockWrite, "VM_SliceBlockWrite", \
5736 	VM_DriveBlockRead, "VM_DriveBlockRead", \
5737 	VM_DriveBlockWrite, "VM_DriveBlockWrite", \
5738 	VM_EnclosureMgt, "VM_EnclosureMgt", \
5739 	VM_Unused, "VM_Unused", \
5740 	VM_CtBlockVerify, "VM_CtBlockVerify", \
5741 	VM_CtPerf, "VM_CtPerf", \
5742 	VM_CtBlockRead64, "VM_CtBlockRead64", \
5743 	VM_CtBlockWrite64, "VM_CtBlockWrite64", \
5744 	VM_CtBlockVerify64, "VM_CtBlockVerify64", \
5745 	VM_CtHostRead64, "VM_CtHostRead64", \
5746 	VM_CtHostWrite64, "VM_CtHostWrite64", \
5747 	VM_NameServe64, "VM_NameServe64"
5748 
5749 #define	AAC_CT_SUBCMD_KEY_STRINGS \
5750 	CT_Null, "CT_Null", \
5751 	CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \
5752 	CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \
5753 	CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \
5754 	CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \
5755 	CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \
5756 	CT_WRITE_MBR, "CT_WRITE_MBR", \
5757 	CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \
5758 	CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \
5759 	CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \
5760 	CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \
5761 	CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \
5762 	CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \
5763 	CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \
5764 	CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \
5765 	CT_READ_MBR, "CT_READ_MBR", \
5766 	CT_READ_PARTITION, "CT_READ_PARTITION", \
5767 	CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \
5768 	CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \
5769 	CT_SLICE_SIZE, "CT_SLICE_SIZE", \
5770 	CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \
5771 	CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \
5772 	CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \
5773 	CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \
5774 	CT_UNMIRROR, "CT_UNMIRROR", \
5775 	CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \
5776 	CT_GEN_MIRROR, "CT_GEN_MIRROR", \
5777 	CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \
5778 	CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \
5779 	CT_MOVE2, "CT_MOVE2", \
5780 	CT_SPLIT, "CT_SPLIT", \
5781 	CT_SPLIT2, "CT_SPLIT2", \
5782 	CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \
5783 	CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \
5784 	CT_RECONFIG, "CT_RECONFIG", \
5785 	CT_BREAK2, "CT_BREAK2", \
5786 	CT_BREAK, "CT_BREAK", \
5787 	CT_MERGE2, "CT_MERGE2", \
5788 	CT_MERGE, "CT_MERGE", \
5789 	CT_FORCE_ERROR, "CT_FORCE_ERROR", \
5790 	CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \
5791 	CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \
5792 	CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \
5793 	CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \
5794 	CT_VOLUME_ADD, "CT_VOLUME_ADD", \
5795 	CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \
5796 	CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \
5797 	CT_COPY_STATUS, "CT_COPY_STATUS", \
5798 	CT_COPY, "CT_COPY", \
5799 	CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \
5800 	CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \
5801 	CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \
5802 	CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \
5803 	CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \
5804 	CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \
5805 	CT_SET, "CT_SET", \
5806 	CT_GET, "CT_GET", \
5807 	CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \
5808 	CT_GET_DELAY, "CT_GET_DELAY", \
5809 	CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \
5810 	CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \
5811 	CT_SCRUB, "CT_SCRUB", \
5812 	CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \
5813 	CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \
5814 	CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \
5815 	CT_PAUSE_IO, "CT_PAUSE_IO", \
5816 	CT_RELEASE_IO, "CT_RELEASE_IO", \
5817 	CT_SCRUB2, "CT_SCRUB2", \
5818 	CT_MCHECK, "CT_MCHECK", \
5819 	CT_CORRUPT, "CT_CORRUPT", \
5820 	CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \
5821 	CT_PROMOTE, "CT_PROMOTE", \
5822 	CT_SET_DEAD, "CT_SET_DEAD", \
5823 	CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \
5824 	CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \
5825 	CT_GET_PARAM, "CT_GET_PARAM", \
5826 	CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \
5827 	CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \
5828 	CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \
5829 	CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \
5830 	CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \
5831 	CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \
5832 	CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \
5833 	CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \
5834 	CT_STOP_DATA, "CT_STOP_DATA", \
5835 	CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \
5836 	CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \
5837 	CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \
5838 	CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \
5839 	CT_GET_TIME, "CT_GET_TIME", \
5840 	CT_READ_DATA, "CT_READ_DATA", \
5841 	CT_CTR, "CT_CTR", \
5842 	CT_CTL, "CT_CTL", \
5843 	CT_DRAINIO, "CT_DRAINIO", \
5844 	CT_RELEASEIO, "CT_RELEASEIO", \
5845 	CT_GET_NVRAM, "CT_GET_NVRAM", \
5846 	CT_GET_MEMORY, "CT_GET_MEMORY", \
5847 	CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \
5848 	CT_ADD_LEVEL, "CT_ADD_LEVEL", \
5849 	CT_NV_ZERO, "CT_NV_ZERO", \
5850 	CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \
5851 	CT_THROTTLE_ON, "CT_THROTTLE_ON", \
5852 	CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \
5853 	CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \
5854 	CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \
5855 	CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \
5856 	CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \
5857 	CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \
5858 	CT_MONITOR, "CT_MONITOR", \
5859 	CT_GEN_MORPH, "CT_GEN_MORPH", \
5860 	CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \
5861 	CT_CACHE_SET, "CT_CACHE_SET", \
5862 	CT_CACHE_STAT, "CT_CACHE_STAT", \
5863 	CT_TRACE_START, "CT_TRACE_START", \
5864 	CT_TRACE_STOP, "CT_TRACE_STOP", \
5865 	CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \
5866 	CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \
5867 	CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \
5868 	CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \
5869 	CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \
5870 	CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \
5871 	CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \
5872 	CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \
5873 	CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \
5874 	CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \
5875 	CT_STOP_DUMPS, "CT_STOP_DUMPS", \
5876 	CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \
5877 	CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \
5878 	CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \
5879 	CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \
5880 	CT_READ_NAME, "CT_READ_NAME", \
5881 	CT_WRITE_NAME, "CT_WRITE_NAME", \
5882 	CT_TOSS_CACHE, "CT_TOSS_CACHE", \
5883 	CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \
5884 	CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \
5885 	CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \
5886 	CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \
5887 	CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \
5888 	CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \
5889 	CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \
5890 	CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \
5891 	CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \
5892 	CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \
5893 	CT_FLUSH, "CT_FLUSH", \
5894 	CT_REBUILD, "CT_REBUILD", \
5895 	CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \
5896 	CT_RESTART, "CT_RESTART", \
5897 	CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \
5898 	CT_TRACE_FLAG, "CT_TRACE_FLAG", \
5899 	CT_RESTART_MORPH, "CT_RESTART_MORPH", \
5900 	CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \
5901 	CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \
5902 	CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \
5903 	CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \
5904 	CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \
5905 	CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \
5906 	CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \
5907 	CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \
5908 	CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \
5909 	CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \
5910 	CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \
5911 	CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \
5912 	CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \
5913 	CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \
5914 	CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \
5915 	CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \
5916 	CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \
5917 	CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \
5918 	CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \
5919 	CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \
5920 	CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \
5921 	CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \
5922 	CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \
5923 	CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \
5924 	CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \
5925 	CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \
5926 	CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \
5927 	CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \
5928 	CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \
5929 	CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \
5930 	CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \
5931 	CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \
5932 	CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \
5933 	CT_IS_CONTAINER_MEATADATA_STANDARD, \
5934 	    "CT_IS_CONTAINER_MEATADATA_STANDARD", \
5935 	CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \
5936 	CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \
5937 	CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \
5938 	CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \
5939 	CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \
5940 	CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \
5941 	CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \
5942 	CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \
5943 	CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \
5944 	CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \
5945 	CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \
5946 	CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \
5947 	CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \
5948 	CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \
5949 	CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \
5950 	CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \
5951 	CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \
5952 	CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \
5953 	CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE"
5954 
5955 #define	AAC_CL_SUBCMD_KEY_STRINGS \
5956 	CL_NULL, "CL_NULL", \
5957 	DS_INIT, "DS_INIT", \
5958 	DS_RESCAN, "DS_RESCAN", \
5959 	DS_CREATE, "DS_CREATE", \
5960 	DS_DELETE, "DS_DELETE", \
5961 	DS_ADD_DISK, "DS_ADD_DISK", \
5962 	DS_REMOVE_DISK, "DS_REMOVE_DISK", \
5963 	DS_MOVE_DISK, "DS_MOVE_DISK", \
5964 	DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \
5965 	DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \
5966 	DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \
5967 	DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \
5968 	DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \
5969 	DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \
5970 	DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \
5971 	DS_GET_DRIVES, "DS_GET_DRIVES", \
5972 	DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \
5973 	DS_ONLINE, "DS_ONLINE", \
5974 	DS_OFFLINE, "DS_OFFLINE", \
5975 	DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \
5976 	DS_FSAPRINT, "DS_FSAPRINT", \
5977 	CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \
5978 	CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \
5979 	CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \
5980 	CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \
5981 	CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \
5982 	CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \
5983 	CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \
5984 	CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \
5985 	CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \
5986 	CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \
5987 	CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \
5988 	CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \
5989 	CC_GET_BUSINFO, "CC_GET_BUSINFO", \
5990 	CC_GET_PORTINFO, "CC_GET_PORTINFO", \
5991 	CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \
5992 	CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \
5993 	CQ_QUORUM_OP, "CQ_QUORUM_OP"
5994 
5995 #define	AAC_AIF_SUBCMD_KEY_STRINGS \
5996 	AifCmdEventNotify, "AifCmdEventNotify", \
5997 	AifCmdJobProgress, "AifCmdJobProgress", \
5998 	AifCmdAPIReport, "AifCmdAPIReport", \
5999 	AifCmdDriverNotify, "AifCmdDriverNotify", \
6000 	AifReqJobList, "AifReqJobList", \
6001 	AifReqJobsForCtr, "AifReqJobsForCtr", \
6002 	AifReqJobsForScsi, "AifReqJobsForScsi", \
6003 	AifReqJobReport, "AifReqJobReport", \
6004 	AifReqTerminateJob, "AifReqTerminateJob", \
6005 	AifReqSuspendJob, "AifReqSuspendJob", \
6006 	AifReqResumeJob, "AifReqResumeJob", \
6007 	AifReqSendAPIReport, "AifReqSendAPIReport", \
6008 	AifReqAPIJobStart, "AifReqAPIJobStart", \
6009 	AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \
6010 	AifReqAPIJobFinish, "AifReqAPIJobFinish"
6011 
6012 #define	AAC_IOCTL_SUBCMD_KEY_STRINGS \
6013 	Reserved_IOCTL, "Reserved_IOCTL", \
6014 	GetDeviceHandle, "GetDeviceHandle", \
6015 	BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \
6016 	DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \
6017 	RescanBus, "RescanBus", \
6018 	GetDeviceProbeInfo, "GetDeviceProbeInfo", \
6019 	GetDeviceCapacity, "GetDeviceCapacity", \
6020 	GetContainerProbeInfo, "GetContainerProbeInfo", \
6021 	GetRequestedMemorySize, "GetRequestedMemorySize", \
6022 	GetBusInfo, "GetBusInfo", \
6023 	GetVendorSpecific, "GetVendorSpecific", \
6024 	EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \
6025 	EnhancedGetBusInfo, "EnhancedGetBusInfo", \
6026 	SetupExtendedCounters, "SetupExtendedCounters", \
6027 	GetPerformanceCounters, "GetPerformanceCounters", \
6028 	ResetPerformanceCounters, "ResetPerformanceCounters", \
6029 	ReadModePage, "ReadModePage", \
6030 	WriteModePage, "WriteModePage", \
6031 	ReadDriveParameter, "ReadDriveParameter", \
6032 	WriteDriveParameter, "WriteDriveParameter", \
6033 	ResetAdapter, "ResetAdapter", \
6034 	ResetBus, "ResetBus", \
6035 	ResetBusDevice, "ResetBusDevice", \
6036 	ExecuteSrb, "ExecuteSrb", \
6037 	Create_IO_Task, "Create_IO_Task", \
6038 	Delete_IO_Task, "Delete_IO_Task", \
6039 	Get_IO_Task_Info, "Get_IO_Task_Info", \
6040 	Check_Task_Progress, "Check_Task_Progress", \
6041 	InjectError, "InjectError", \
6042 	GetDeviceDefectCounts, "GetDeviceDefectCounts", \
6043 	GetDeviceDefectInfo, "GetDeviceDefectInfo", \
6044 	GetDeviceStatus, "GetDeviceStatus", \
6045 	ClearDeviceStatus, "ClearDeviceStatus", \
6046 	DiskSpinControl, "DiskSpinControl", \
6047 	DiskSmartControl, "DiskSmartControl", \
6048 	WriteSame, "WriteSame", \
6049 	ReadWriteLong, "ReadWriteLong", \
6050 	FormatUnit, "FormatUnit", \
6051 	TargetDeviceControl, "TargetDeviceControl", \
6052 	TargetChannelControl, "TargetChannelControl", \
6053 	FlashNewCode, "FlashNewCode", \
6054 	DiskCheck, "DiskCheck", \
6055 	RequestSense, "RequestSense", \
6056 	DiskPERControl, "DiskPERControl", \
6057 	Read10, "Read10", \
6058 	Write10, "Write10"
6059 
6060 #define	AAC_AIFEN_KEY_STRINGS \
6061 	AifEnGeneric, "Generic", \
6062 	AifEnTaskComplete, "TaskComplete", \
6063 	AifEnConfigChange, "Config change", \
6064 	AifEnContainerChange, "Container change", \
6065 	AifEnDeviceFailure, "device failed", \
6066 	AifEnMirrorFailover, "Mirror failover", \
6067 	AifEnContainerEvent, "container event", \
6068 	AifEnFileSystemChange, "File system changed", \
6069 	AifEnConfigPause, "Container pause event", \
6070 	AifEnConfigResume, "Container resume event", \
6071 	AifEnFailoverChange, "Failover space assignment changed", \
6072 	AifEnRAID5RebuildDone, "RAID5 rebuild finished", \
6073 	AifEnEnclosureManagement, "Enclosure management event", \
6074 	AifEnBatteryEvent, "battery event", \
6075 	AifEnAddContainer, "Add container", \
6076 	AifEnDeleteContainer, "Delete container", \
6077 	AifEnSMARTEvent, "SMART Event", \
6078 	AifEnBatteryNeedsRecond, "battery needs reconditioning", \
6079 	AifEnClusterEvent, "cluster event", \
6080 	AifEnDiskSetEvent, "disk set event occured", \
6081 	AifDenMorphComplete, "morph operation completed", \
6082 	AifDenVolumeExtendComplete, "VolumeExtendComplete"
6083 
6084 struct aac_key_strings {
6085 	int key;
6086 	char *message;
6087 };
6088 
6089 extern struct scsi_key_strings scsi_cmds[];
6090 
6091 static struct aac_key_strings aac_fib_cmds[] = {
6092 	AAC_FIB_CMD_KEY_STRINGS,
6093 	-1,			NULL
6094 };
6095 
6096 static struct aac_key_strings aac_ctvm_subcmds[] = {
6097 	AAC_CTVM_SUBCMD_KEY_STRINGS,
6098 	-1,			NULL
6099 };
6100 
6101 static struct aac_key_strings aac_ct_subcmds[] = {
6102 	AAC_CT_SUBCMD_KEY_STRINGS,
6103 	-1,			NULL
6104 };
6105 
6106 static struct aac_key_strings aac_cl_subcmds[] = {
6107 	AAC_CL_SUBCMD_KEY_STRINGS,
6108 	-1,			NULL
6109 };
6110 
6111 static struct aac_key_strings aac_aif_subcmds[] = {
6112 	AAC_AIF_SUBCMD_KEY_STRINGS,
6113 	-1,			NULL
6114 };
6115 
6116 static struct aac_key_strings aac_ioctl_subcmds[] = {
6117 	AAC_IOCTL_SUBCMD_KEY_STRINGS,
6118 	-1,			NULL
6119 };
6120 
6121 static struct aac_key_strings aac_aifens[] = {
6122 	AAC_AIFEN_KEY_STRINGS,
6123 	-1,			NULL
6124 };
6125 
6126 /*
6127  * The following function comes from Adaptec:
6128  *
6129  * Get the firmware print buffer parameters from the firmware,
6130  * if the command was successful map in the address.
6131  */
6132 static int
6133 aac_get_fw_debug_buffer(struct aac_softstate *softs)
6134 {
6135 	if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP,
6136 	    0, 0, 0, 0, NULL) == AACOK) {
6137 		uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1);
6138 		uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2);
6139 		uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3);
6140 		uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4);
6141 
6142 		if (mondrv_buf_size) {
6143 			uint32_t offset = mondrv_buf_paddrl - \
6144 			    softs->pci_mem_base_paddr;
6145 
6146 			/*
6147 			 * See if the address is already mapped in, and
6148 			 * if so set it up from the base address
6149 			 */
6150 			if ((mondrv_buf_paddrh == 0) &&
6151 			    (offset + mondrv_buf_size < softs->map_size)) {
6152 				mutex_enter(&aac_prt_mutex);
6153 				softs->debug_buf_offset = offset;
6154 				softs->debug_header_size = mondrv_hdr_size;
6155 				softs->debug_buf_size = mondrv_buf_size;
6156 				softs->debug_fw_flags = 0;
6157 				softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
6158 				mutex_exit(&aac_prt_mutex);
6159 
6160 				return (AACOK);
6161 			}
6162 		}
6163 	}
6164 	return (AACERR);
6165 }
6166 
6167 int
6168 aac_dbflag_on(struct aac_softstate *softs, int flag)
6169 {
6170 	int debug_flags = softs ? softs->debug_flags : aac_debug_flags;
6171 
6172 	return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \
6173 	    AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag));
6174 }
6175 
6176 static void
6177 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader)
6178 {
6179 	if (noheader) {
6180 		if (sl) {
6181 			aac_fmt[0] = sl;
6182 			cmn_err(lev, aac_fmt, aac_prt_buf);
6183 		} else {
6184 			cmn_err(lev, &aac_fmt[1], aac_prt_buf);
6185 		}
6186 	} else {
6187 		if (sl) {
6188 			aac_fmt_header[0] = sl;
6189 			cmn_err(lev, aac_fmt_header,
6190 			    softs->vendor_name, softs->instance,
6191 			    aac_prt_buf);
6192 		} else {
6193 			cmn_err(lev, &aac_fmt_header[1],
6194 			    softs->vendor_name, softs->instance,
6195 			    aac_prt_buf);
6196 		}
6197 	}
6198 }
6199 
6200 /*
6201  * The following function comes from Adaptec:
6202  *
6203  * Format and print out the data passed in to UART or console
6204  * as specified by debug flags.
6205  */
6206 void
6207 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...)
6208 {
6209 	va_list args;
6210 	char sl; /* system log character */
6211 
6212 	mutex_enter(&aac_prt_mutex);
6213 	/* Set up parameters and call sprintf function to format the data */
6214 	if (strchr("^!?", fmt[0]) == NULL) {
6215 		sl = 0;
6216 	} else {
6217 		sl = fmt[0];
6218 		fmt++;
6219 	}
6220 	va_start(args, fmt);
6221 	(void) vsprintf(aac_prt_buf, fmt, args);
6222 	va_end(args);
6223 
6224 	/* Make sure the softs structure has been passed in for this section */
6225 	if (softs) {
6226 		if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) &&
6227 		    /* If we are set up for a Firmware print */
6228 		    (softs->debug_buf_size)) {
6229 			uint32_t count, i;
6230 
6231 			/* Make sure the string size is within boundaries */
6232 			count = strlen(aac_prt_buf);
6233 			if (count > softs->debug_buf_size)
6234 				count = (uint16_t)softs->debug_buf_size;
6235 
6236 			/*
6237 			 * Wait for no more than AAC_PRINT_TIMEOUT for the
6238 			 * previous message length to clear (the handshake).
6239 			 */
6240 			for (i = 0; i < AAC_PRINT_TIMEOUT; i++) {
6241 				if (!PCI_MEM_GET32(softs,
6242 				    softs->debug_buf_offset + \
6243 				    AAC_FW_DBG_STRLEN_OFFSET))
6244 					break;
6245 
6246 				drv_usecwait(1000);
6247 			}
6248 
6249 			/*
6250 			 * If the length is clear, copy over the message, the
6251 			 * flags, and the length. Make sure the length is the
6252 			 * last because that is the signal for the Firmware to
6253 			 * pick it up.
6254 			 */
6255 			if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \
6256 			    AAC_FW_DBG_STRLEN_OFFSET)) {
6257 				PCI_MEM_REP_PUT8(softs,
6258 				    softs->debug_buf_offset + \
6259 				    softs->debug_header_size,
6260 				    aac_prt_buf, count);
6261 				PCI_MEM_PUT32(softs,
6262 				    softs->debug_buf_offset + \
6263 				    AAC_FW_DBG_FLAGS_OFFSET,
6264 				    softs->debug_fw_flags);
6265 				PCI_MEM_PUT32(softs,
6266 				    softs->debug_buf_offset + \
6267 				    AAC_FW_DBG_STRLEN_OFFSET, count);
6268 			} else {
6269 				cmn_err(CE_WARN, "UART output fail");
6270 				softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
6271 			}
6272 		}
6273 
6274 		/*
6275 		 * If the Kernel Debug Print flag is set, send it off
6276 		 * to the Kernel Debugger
6277 		 */
6278 		if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT)
6279 			aac_cmn_err(softs, lev, sl,
6280 			    (softs->debug_flags & AACDB_FLAGS_NO_HEADERS));
6281 	} else {
6282 		/* Driver not initialized yet, no firmware or header output */
6283 		if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT)
6284 			aac_cmn_err(softs, lev, sl, 1);
6285 	}
6286 	mutex_exit(&aac_prt_mutex);
6287 }
6288 
6289 /*
6290  * Translate command number to description string
6291  */
6292 static char *
6293 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist)
6294 {
6295 	int i;
6296 
6297 	for (i = 0; cmdlist[i].key != -1; i++) {
6298 		if (cmd == cmdlist[i].key)
6299 			return (cmdlist[i].message);
6300 	}
6301 	return (NULL);
6302 }
6303 
6304 static void
6305 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
6306 {
6307 	struct scsi_pkt *pkt = acp->pkt;
6308 	struct scsi_address *ap = &pkt->pkt_address;
6309 	int ctl = ddi_get_instance(softs->devinfo_p);
6310 	int tgt = ap->a_target;
6311 	int lun = ap->a_lun;
6312 	union scsi_cdb *cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
6313 	uchar_t cmd = cdbp->scc_cmd;
6314 	char *desc;
6315 
6316 	if ((desc = aac_cmd_name(cmd,
6317 	    (struct aac_key_strings *)scsi_cmds)) == NULL) {
6318 		aac_printf(softs, CE_NOTE,
6319 		    "SCMD> Unknown(0x%2x) --> c%dt%dL%d",
6320 		    cmd, ctl, tgt, lun);
6321 		return;
6322 	}
6323 
6324 	switch (cmd) {
6325 	case SCMD_READ:
6326 	case SCMD_WRITE:
6327 		aac_printf(softs, CE_NOTE,
6328 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d",
6329 		    desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp),
6330 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
6331 		    ctl, tgt, lun);
6332 		break;
6333 	case SCMD_READ_G1:
6334 	case SCMD_WRITE_G1:
6335 		aac_printf(softs, CE_NOTE,
6336 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d",
6337 		    desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp),
6338 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
6339 		    ctl, tgt, lun);
6340 		break;
6341 	case SCMD_READ_G4:
6342 	case SCMD_WRITE_G4:
6343 		aac_printf(softs, CE_NOTE,
6344 		    "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d",
6345 		    desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp),
6346 		    GETG4COUNT(cdbp),
6347 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
6348 		    ctl, tgt, lun);
6349 		break;
6350 	default:
6351 		aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d",
6352 		    desc, ctl, tgt, lun);
6353 	}
6354 }
6355 
6356 void
6357 aac_print_fib(struct aac_softstate *softs, struct aac_fib *fibp)
6358 {
6359 	uint16_t fib_size;
6360 	int32_t fib_cmd, sub_cmd;
6361 	char *cmdstr, *subcmdstr;
6362 	struct aac_Container *pContainer;
6363 
6364 	fib_cmd = LE_16(fibp->Header.Command);
6365 	cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds);
6366 	sub_cmd = -1;
6367 	subcmdstr = NULL;
6368 
6369 	switch (fib_cmd) {
6370 	case ContainerCommand:
6371 		pContainer = (struct aac_Container *)fibp->data;
6372 		sub_cmd = LE_32(pContainer->Command);
6373 		subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds);
6374 		if (subcmdstr == NULL)
6375 			break;
6376 		fib_cmd = sub_cmd;
6377 		cmdstr = subcmdstr;
6378 		sub_cmd = -1;
6379 		subcmdstr = NULL;
6380 
6381 		switch (pContainer->Command) {
6382 		case VM_ContainerConfig:
6383 			sub_cmd = LE_32(pContainer->CTCommand.command);
6384 			subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds);
6385 			if (subcmdstr == NULL)
6386 				break;
6387 			aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)",
6388 			    subcmdstr,
6389 			    LE_32(pContainer->CTCommand.param[0]),
6390 			    LE_32(pContainer->CTCommand.param[1]),
6391 			    LE_32(pContainer->CTCommand.param[2]));
6392 			return;
6393 		case VM_Ioctl:
6394 			sub_cmd = LE_32(((int32_t *)pContainer)[4]);
6395 			subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds);
6396 			break;
6397 		}
6398 		break;
6399 
6400 	case ClusterCommand:
6401 		sub_cmd = LE_32(((int32_t *)fibp->data)[0]);
6402 		subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds);
6403 		break;
6404 
6405 	case AifRequest:
6406 		sub_cmd = LE_32(((int32_t *)fibp->data)[0]);
6407 		subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds);
6408 		break;
6409 
6410 	default:
6411 		break;
6412 	}
6413 
6414 	fib_size = LE_16(fibp->Header.Size);
6415 	if (subcmdstr)
6416 		aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
6417 		    subcmdstr, fib_size);
6418 	else if (cmdstr && sub_cmd == -1)
6419 		aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
6420 		    cmdstr, fib_size);
6421 	else if (cmdstr)
6422 		aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d",
6423 		    cmdstr, sub_cmd, fib_size);
6424 	else
6425 		aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d",
6426 		    fib_cmd, fib_size);
6427 }
6428 
6429 static void
6430 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
6431 {
6432 	int aif_command;
6433 	uint32_t aif_seqnumber;
6434 	int aif_en_type;
6435 	char *str;
6436 
6437 	aif_command = LE_32(aif->command);
6438 	aif_seqnumber = LE_32(aif->seqNumber);
6439 	aif_en_type = LE_32(aif->data.EN.type);
6440 
6441 	switch (aif_command) {
6442 	case AifCmdEventNotify:
6443 		str = aac_cmd_name(aif_en_type, aac_aifens);
6444 		if (str)
6445 			aac_printf(softs, CE_NOTE, "AIF! %s", str);
6446 		else
6447 			aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)",
6448 			    aif_en_type);
6449 		break;
6450 
6451 	case AifCmdJobProgress:
6452 		switch (LE_32(aif->data.PR[0].status)) {
6453 		case AifJobStsSuccess:
6454 			str = "success"; break;
6455 		case AifJobStsFinished:
6456 			str = "finished"; break;
6457 		case AifJobStsAborted:
6458 			str = "aborted"; break;
6459 		case AifJobStsFailed:
6460 			str = "failed"; break;
6461 		case AifJobStsSuspended:
6462 			str = "suspended"; break;
6463 		case AifJobStsRunning:
6464 			str = "running"; break;
6465 		default:
6466 			str = "unknown"; break;
6467 		}
6468 		aac_printf(softs, CE_NOTE,
6469 		    "AIF! JobProgress (%d) - %s (%d, %d)",
6470 		    aif_seqnumber, str,
6471 		    LE_32(aif->data.PR[0].currentTick),
6472 		    LE_32(aif->data.PR[0].finalTick));
6473 		break;
6474 
6475 	case AifCmdAPIReport:
6476 		aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)",
6477 		    aif_seqnumber);
6478 		break;
6479 
6480 	case AifCmdDriverNotify:
6481 		aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)",
6482 		    aif_seqnumber);
6483 		break;
6484 
6485 	default:
6486 		aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)",
6487 		    aif_command, aif_seqnumber);
6488 		break;
6489 	}
6490 }
6491 
6492 #endif /* DEBUG */
6493