xref: /titanic_41/usr/src/uts/common/io/aac/aac.c (revision 92a0208178405fef708b0283ffcaa02fbc3468ff)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright 2005-08 Adaptec, Inc.
8  * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner
9  * Copyright (c) 2000 Michael Smith
10  * Copyright (c) 2001 Scott Long
11  * Copyright (c) 2000 BSDi
12  * All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/modctl.h>
36 #include <sys/conf.h>
37 #include <sys/cmn_err.h>
38 #include <sys/ddi.h>
39 #include <sys/devops.h>
40 #include <sys/pci.h>
41 #include <sys/types.h>
42 #include <sys/ddidmareq.h>
43 #include <sys/scsi/scsi.h>
44 #include <sys/ksynch.h>
45 #include <sys/sunddi.h>
46 #include <sys/byteorder.h>
47 #include "aac_regs.h"
48 #include "aac.h"
49 
50 /*
51  * FMA header files
52  */
53 #include <sys/ddifm.h>
54 #include <sys/fm/protocol.h>
55 #include <sys/fm/util.h>
56 #include <sys/fm/io/ddi.h>
57 
58 /*
59  * For minor nodes created by the SCSA framework, minor numbers are
60  * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
61  * number less than 64.
62  *
63  * To support cfgadm, need to confirm the SCSA framework by creating
64  * devctl/scsi and driver specific minor nodes under SCSA format,
65  * and calling scsi_hba_xxx() functions aacordingly.
66  */
67 
68 #define	AAC_MINOR		32
69 #define	INST2AAC(x)		(((x) << INST_MINOR_SHIFT) | AAC_MINOR)
70 #define	AAC_SCSA_MINOR(x)	((x) & TRAN_MINOR_MASK)
71 #define	AAC_IS_SCSA_NODE(x)	((x) == DEVCTL_MINOR || (x) == SCSI_MINOR)
72 
73 #define	SD2TRAN(sd)		((sd)->sd_address.a_hba_tran)
74 #define	AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private)
75 #define	AAC_DIP2TRAN(dip)	((scsi_hba_tran_t *)ddi_get_driver_private(dip))
76 #define	AAC_DIP2SOFTS(dip)	(AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip)))
77 #define	SD2AAC(sd)		(AAC_TRAN2SOFTS(SD2TRAN(sd)))
78 #define	AAC_PD(t)		((t) - AAC_MAX_LD)
79 #define	AAC_DEV(softs, t)	(((t) < AAC_MAX_LD) ? \
80 				&(softs)->containers[(t)].dev : \
81 				((t) < AAC_MAX_DEV(softs)) ? \
82 				&(softs)->nondasds[AAC_PD(t)].dev : NULL)
83 #define	AAC_DEVCFG_BEGIN(softs, tgt) \
84 				aac_devcfg((softs), (tgt), 1)
85 #define	AAC_DEVCFG_END(softs, tgt) \
86 				aac_devcfg((softs), (tgt), 0)
87 #define	PKT2AC(pkt)		((struct aac_cmd *)(pkt)->pkt_ha_private)
88 #define	AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \
89 		if (!(cond)) { \
90 			int count = (timeout) * 10; \
91 			while (count) { \
92 				drv_usecwait(100); \
93 				if (cond) \
94 					break; \
95 				count--; \
96 			} \
97 			(timeout) = (count + 9) / 10; \
98 		} \
99 	}
100 
101 #define	AAC_SENSE_DATA_DESCR_LEN \
102 	(sizeof (struct scsi_descr_sense_hdr) + \
103 	sizeof (struct scsi_information_sense_descr))
104 #define	AAC_ARQ64_LENGTH \
105 	(sizeof (struct scsi_arq_status) + \
106 	AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH)
107 
108 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
109 #define	AAC_GETGXADDR(cmdlen, cdbp) \
110 	((cmdlen == 6) ? GETG0ADDR(cdbp) : \
111 	(cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \
112 	((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp))
113 
114 #define	AAC_CDB_INQUIRY_CMDDT	0x02
115 #define	AAC_CDB_INQUIRY_EVPD	0x01
116 #define	AAC_VPD_PAGE_CODE	1
117 #define	AAC_VPD_PAGE_LENGTH	3
118 #define	AAC_VPD_PAGE_DATA	4
119 #define	AAC_VPD_ID_CODESET	0
120 #define	AAC_VPD_ID_TYPE		1
121 #define	AAC_VPD_ID_LENGTH	3
122 #define	AAC_VPD_ID_DATA		4
123 
124 #define	AAC_SCSI_RPTLUNS_HEAD_SIZE			0x08
125 #define	AAC_SCSI_RPTLUNS_ADDR_SIZE			0x08
126 #define	AAC_SCSI_RPTLUNS_ADDR_MASK			0xC0
127 /* 00b - peripheral device addressing method */
128 #define	AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL		0x00
129 /* 01b - flat space addressing method */
130 #define	AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE		0x40
131 /* 10b - logical unit addressing method */
132 #define	AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT		0x80
133 
134 /* Return the size of FIB with data part type data_type */
135 #define	AAC_FIB_SIZEOF(data_type) \
136 	(sizeof (struct aac_fib_header) + sizeof (data_type))
137 /* Return the container size defined in mir */
138 #define	AAC_MIR_SIZE(softs, acc, mir) \
139 	(((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \
140 	(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \
141 	((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \
142 	(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity))
143 
144 /* The last entry of aac_cards[] is for unknown cards */
145 #define	AAC_UNKNOWN_CARD \
146 	(sizeof (aac_cards) / sizeof (struct aac_card_type) - 1)
147 #define	CARD_IS_UNKNOWN(i)	(i == AAC_UNKNOWN_CARD)
148 #define	BUF_IS_READ(bp)		((bp)->b_flags & B_READ)
149 #define	AAC_IS_Q_EMPTY(q)	((q)->q_head == NULL)
150 #define	AAC_CMDQ(acp)		(!((acp)->flags & AAC_CMD_SYNC))
151 
152 #define	PCI_MEM_GET32(softs, off) \
153 	ddi_get32((softs)->pci_mem_handle, \
154 	    (void *)((softs)->pci_mem_base_vaddr + (off)))
155 #define	PCI_MEM_PUT32(softs, off, val) \
156 	ddi_put32((softs)->pci_mem_handle, \
157 	    (void *)((softs)->pci_mem_base_vaddr + (off)), \
158 	    (uint32_t)(val))
159 #define	PCI_MEM_GET16(softs, off) \
160 	ddi_get16((softs)->pci_mem_handle, \
161 	(void *)((softs)->pci_mem_base_vaddr + (off)))
162 #define	PCI_MEM_PUT16(softs, off, val) \
163 	ddi_put16((softs)->pci_mem_handle, \
164 	(void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val))
165 /* Write host data at valp to device mem[off] repeatedly count times */
166 #define	PCI_MEM_REP_PUT8(softs, off, valp, count) \
167 	ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \
168 	    (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
169 	    count, DDI_DEV_AUTOINCR)
170 /* Read device data at mem[off] to host addr valp repeatedly count times */
171 #define	PCI_MEM_REP_GET8(softs, off, valp, count) \
172 	ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \
173 	    (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
174 	    count, DDI_DEV_AUTOINCR)
175 #define	AAC_GET_FIELD8(acc, d, s, field) \
176 	(d)->field = ddi_get8(acc, (uint8_t *)&(s)->field)
177 #define	AAC_GET_FIELD32(acc, d, s, field) \
178 	(d)->field = ddi_get32(acc, (uint32_t *)&(s)->field)
179 #define	AAC_GET_FIELD64(acc, d, s, field) \
180 	(d)->field = ddi_get64(acc, (uint64_t *)&(s)->field)
181 #define	AAC_REP_GET_FIELD8(acc, d, s, field, r) \
182 	ddi_rep_get8((acc), (uint8_t *)&(d)->field, \
183 	    (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
184 #define	AAC_REP_GET_FIELD32(acc, d, s, field, r) \
185 	ddi_rep_get32((acc), (uint32_t *)&(d)->field, \
186 	    (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
187 
188 #define	AAC_ENABLE_INTR(softs) { \
189 		if (softs->flags & AAC_FLAGS_NEW_COMM) \
190 			PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \
191 		else \
192 			PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \
193 	}
194 
195 #define	AAC_DISABLE_INTR(softs)		PCI_MEM_PUT32(softs, AAC_OIMR, ~0)
196 #define	AAC_STATUS_CLR(softs, mask)	PCI_MEM_PUT32(softs, AAC_ODBR, mask)
197 #define	AAC_STATUS_GET(softs)		PCI_MEM_GET32(softs, AAC_ODBR)
198 #define	AAC_NOTIFY(softs, val)		PCI_MEM_PUT32(softs, AAC_IDBR, val)
199 #define	AAC_OUTB_GET(softs)		PCI_MEM_GET32(softs, AAC_OQUE)
200 #define	AAC_OUTB_SET(softs, val)	PCI_MEM_PUT32(softs, AAC_OQUE, val)
201 #define	AAC_FWSTATUS_GET(softs)	\
202 	((softs)->aac_if.aif_get_fwstatus(softs))
203 #define	AAC_MAILBOX_GET(softs, mb) \
204 	((softs)->aac_if.aif_get_mailbox((softs), (mb)))
205 #define	AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \
206 	((softs)->aac_if.aif_set_mailbox((softs), (cmd), \
207 	    (arg0), (arg1), (arg2), (arg3)))
208 
209 #define	AAC_THROTTLE_DRAIN	-1
210 
211 #define	AAC_QUIESCE_TICK	1	/* 1 second */
212 #define	AAC_QUIESCE_TIMEOUT	180	/* 180 seconds */
213 #define	AAC_DEFAULT_TICK	10	/* 10 seconds */
214 #define	AAC_SYNC_TICK		(30*60)	/* 30 minutes */
215 
216 /* Poll time for aac_do_poll_io() */
217 #define	AAC_POLL_TIME		60	/* 60 seconds */
218 
219 /* IOP reset */
220 #define	AAC_IOP_RESET_SUCCEED		0	/* IOP reset succeed */
221 #define	AAC_IOP_RESET_FAILED		-1	/* IOP reset failed */
222 #define	AAC_IOP_RESET_ABNORMAL		-2	/* Reset operation abnormal */
223 
224 /*
225  * Hardware access functions
226  */
227 static int aac_rx_get_fwstatus(struct aac_softstate *);
228 static int aac_rx_get_mailbox(struct aac_softstate *, int);
229 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
230     uint32_t, uint32_t, uint32_t);
231 static int aac_rkt_get_fwstatus(struct aac_softstate *);
232 static int aac_rkt_get_mailbox(struct aac_softstate *, int);
233 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
234     uint32_t, uint32_t, uint32_t);
235 
236 /*
237  * SCSA function prototypes
238  */
239 static int aac_attach(dev_info_t *, ddi_attach_cmd_t);
240 static int aac_detach(dev_info_t *, ddi_detach_cmd_t);
241 static int aac_reset(dev_info_t *, ddi_reset_cmd_t);
242 
243 /*
244  * Interrupt handler functions
245  */
246 static int aac_query_intrs(struct aac_softstate *, int);
247 static int aac_add_intrs(struct aac_softstate *);
248 static void aac_remove_intrs(struct aac_softstate *);
249 static uint_t aac_intr_old(caddr_t);
250 static uint_t aac_intr_new(caddr_t);
251 static uint_t aac_softintr(caddr_t);
252 
253 /*
254  * Internal functions in attach
255  */
256 static int aac_check_card_type(struct aac_softstate *);
257 static int aac_check_firmware(struct aac_softstate *);
258 static int aac_common_attach(struct aac_softstate *);
259 static void aac_common_detach(struct aac_softstate *);
260 static int aac_probe_containers(struct aac_softstate *);
261 static int aac_alloc_comm_space(struct aac_softstate *);
262 static int aac_setup_comm_space(struct aac_softstate *);
263 static void aac_free_comm_space(struct aac_softstate *);
264 static int aac_hba_setup(struct aac_softstate *);
265 
266 /*
267  * Sync FIB operation functions
268  */
269 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t,
270     uint32_t, uint32_t, uint32_t, uint32_t *);
271 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t);
272 
273 /*
274  * Command queue operation functions
275  */
276 static void aac_cmd_initq(struct aac_cmd_queue *);
277 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *);
278 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *);
279 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *);
280 
281 /*
282  * FIB queue operation functions
283  */
284 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t);
285 static int aac_fib_dequeue(struct aac_softstate *, int, int *);
286 
287 /*
288  * Slot operation functions
289  */
290 static int aac_create_slots(struct aac_softstate *);
291 static void aac_destroy_slots(struct aac_softstate *);
292 static void aac_alloc_fibs(struct aac_softstate *);
293 static void aac_destroy_fibs(struct aac_softstate *);
294 static struct aac_slot *aac_get_slot(struct aac_softstate *);
295 static void aac_release_slot(struct aac_softstate *, struct aac_slot *);
296 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *);
297 static void aac_free_fib(struct aac_slot *);
298 
299 /*
300  * Internal functions
301  */
302 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_slot *,
303     uint16_t, uint16_t);
304 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *);
305 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *);
306 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *);
307 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *);
308 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *);
309 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *);
310 static void aac_start_waiting_io(struct aac_softstate *);
311 static void aac_drain_comp_q(struct aac_softstate *);
312 int aac_do_io(struct aac_softstate *, struct aac_cmd *);
313 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *);
314 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *);
315 static int aac_send_command(struct aac_softstate *, struct aac_slot *);
316 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *);
317 static int aac_dma_sync_ac(struct aac_cmd *);
318 static int aac_shutdown(struct aac_softstate *);
319 static int aac_reset_adapter(struct aac_softstate *);
320 static int aac_do_quiesce(struct aac_softstate *softs);
321 static int aac_do_unquiesce(struct aac_softstate *softs);
322 static void aac_unhold_bus(struct aac_softstate *, int);
323 static void aac_set_throttle(struct aac_softstate *, struct aac_device *,
324     int, int);
325 
326 /*
327  * Adapter Initiated FIB handling function
328  */
329 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *);
330 
331 /*
332  * Timeout handling thread function
333  */
334 static void aac_daemon(void *);
335 
336 /*
337  * IOCTL interface related functions
338  */
339 static int aac_open(dev_t *, int, int, cred_t *);
340 static int aac_close(dev_t, int, int, cred_t *);
341 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
342 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int);
343 
344 /*
345  * FMA Prototypes
346  */
347 static void aac_fm_init(struct aac_softstate *);
348 static void aac_fm_fini(struct aac_softstate *);
349 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
350 int aac_check_acc_handle(ddi_acc_handle_t);
351 int aac_check_dma_handle(ddi_dma_handle_t);
352 void aac_fm_ereport(struct aac_softstate *, char *);
353 
354 /*
355  * Auto enumeration functions
356  */
357 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t);
358 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
359     void *, dev_info_t **);
360 static int aac_dr_event(struct aac_softstate *, int, int, int);
361 
362 #ifdef DEBUG
363 /*
364  * UART	debug output support
365  */
366 
367 #define	AAC_PRINT_BUFFER_SIZE		512
368 #define	AAC_PRINT_TIMEOUT		250	/* 1/4 sec. = 250 msec. */
369 
370 #define	AAC_FW_DBG_STRLEN_OFFSET	0x00
371 #define	AAC_FW_DBG_FLAGS_OFFSET		0x04
372 #define	AAC_FW_DBG_BLED_OFFSET		0x08
373 
374 static int aac_get_fw_debug_buffer(struct aac_softstate *);
375 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *);
376 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *);
377 
378 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
379 static char aac_fmt[] = " %s";
380 static char aac_fmt_header[] = " %s.%d: %s";
381 static kmutex_t aac_prt_mutex;
382 
383 /*
384  * Debug flags to be put into the softstate flags field
385  * when initialized
386  */
387 uint32_t aac_debug_flags =
388 /*    AACDB_FLAGS_KERNEL_PRINT | */
389 /*    AACDB_FLAGS_FW_PRINT |	*/
390 /*    AACDB_FLAGS_MISC |	*/
391 /*    AACDB_FLAGS_FUNC1 |	*/
392 /*    AACDB_FLAGS_FUNC2 |	*/
393 /*    AACDB_FLAGS_SCMD |	*/
394 /*    AACDB_FLAGS_AIF |		*/
395 /*    AACDB_FLAGS_FIB |		*/
396 /*    AACDB_FLAGS_IOCTL |	*/
397 0;
398 uint32_t aac_debug_fib_flags =
399 /*    AACDB_FLAGS_FIB_RW |	*/
400 /*    AACDB_FLAGS_FIB_IOCTL |	*/
401 /*    AACDB_FLAGS_FIB_SRB |	*/
402 /*    AACDB_FLAGS_FIB_SYNC |	*/
403 /*    AACDB_FLAGS_FIB_HEADER |	*/
404 /*    AACDB_FLAGS_FIB_TIMEOUT |	*/
405 0;
406 
407 #endif /* DEBUG */
408 
409 static struct cb_ops aac_cb_ops = {
410 	aac_open,	/* open */
411 	aac_close,	/* close */
412 	nodev,		/* strategy */
413 	nodev,		/* print */
414 	nodev,		/* dump */
415 	nodev,		/* read */
416 	nodev,		/* write */
417 	aac_ioctl,	/* ioctl */
418 	nodev,		/* devmap */
419 	nodev,		/* mmap */
420 	nodev,		/* segmap */
421 	nochpoll,	/* poll */
422 	ddi_prop_op,	/* cb_prop_op */
423 	NULL,		/* streamtab */
424 	D_64BIT | D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
425 	CB_REV,		/* cb_rev */
426 	nodev,		/* async I/O read entry point */
427 	nodev		/* async I/O write entry point */
428 };
429 
430 static struct dev_ops aac_dev_ops = {
431 	DEVO_REV,
432 	0,
433 	nodev,
434 	nulldev,
435 	nulldev,
436 	aac_attach,
437 	aac_detach,
438 	aac_reset,
439 	&aac_cb_ops,
440 	NULL,
441 	NULL
442 };
443 
444 static struct modldrv aac_modldrv = {
445 	&mod_driverops,
446 	"AAC Driver " AAC_DRIVER_VERSION,
447 	&aac_dev_ops,
448 };
449 
450 static struct modlinkage aac_modlinkage = {
451 	MODREV_1,
452 	&aac_modldrv,
453 	NULL
454 };
455 
456 static struct aac_softstate  *aac_softstatep;
457 
458 /*
459  * Supported card list
460  * ordered in vendor id, subvendor id, subdevice id, and device id
461  */
462 static struct aac_card_type aac_cards[] = {
463 	{0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX,
464 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
465 	    "Dell", "PERC 3/Di"},
466 	{0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX,
467 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
468 	    "Dell", "PERC 3/Di"},
469 	{0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX,
470 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
471 	    "Dell", "PERC 3/Si"},
472 	{0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX,
473 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
474 	    "Dell", "PERC 3/Di"},
475 	{0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX,
476 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
477 	    "Dell", "PERC 3/Si"},
478 	{0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX,
479 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
480 	    "Dell", "PERC 3/Di"},
481 	{0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX,
482 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
483 	    "Dell", "PERC 3/Di"},
484 	{0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX,
485 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
486 	    "Dell", "PERC 3/Di"},
487 	{0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX,
488 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
489 	    "Dell", "PERC 3/Di"},
490 	{0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX,
491 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
492 	    "Dell", "PERC 3/Di"},
493 	{0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX,
494 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
495 	    "Dell", "PERC 320/DC"},
496 	{0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX,
497 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"},
498 
499 	{0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX,
500 	    0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"},
501 	{0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX,
502 	    0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"},
503 	{0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT,
504 	    0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"},
505 
506 	{0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX,
507 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
508 	{0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX,
509 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
510 
511 	{0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX,
512 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
513 	    "Adaptec", "2200S"},
514 	{0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX,
515 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
516 	    "Adaptec", "2120S"},
517 	{0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX,
518 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
519 	    "Adaptec", "2200S"},
520 	{0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX,
521 	    0, AAC_TYPE_SCSI, "Adaptec", "3230S"},
522 	{0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX,
523 	    0, AAC_TYPE_SCSI, "Adaptec", "3240S"},
524 	{0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX,
525 	    0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"},
526 	{0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX,
527 	    0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"},
528 	{0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT,
529 	    0, AAC_TYPE_SCSI, "Adaptec", "2230S"},
530 	{0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT,
531 	    0, AAC_TYPE_SCSI, "Adaptec", "2130S"},
532 	{0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX,
533 	    0, AAC_TYPE_SATA, "Adaptec", "2020SA"},
534 	{0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX,
535 	    0, AAC_TYPE_SATA, "Adaptec", "2025SA"},
536 	{0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX,
537 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"},
538 	{0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX,
539 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"},
540 	{0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX,
541 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"},
542 	{0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX,
543 	    0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"},
544 	{0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX,
545 	    0, AAC_TYPE_SCSI, "Adaptec", "2240S"},
546 	{0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX,
547 	    0, AAC_TYPE_SAS, "Adaptec", "4005SAS"},
548 	{0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX,
549 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"},
550 	{0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX,
551 	    0, AAC_TYPE_SAS, "Adaptec", "4800SAS"},
552 	{0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX,
553 	    0, AAC_TYPE_SAS, "Adaptec", "4805SAS"},
554 	{0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT,
555 	    0, AAC_TYPE_SATA, "Adaptec", "2820SA"},
556 	{0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT,
557 	    0, AAC_TYPE_SATA, "Adaptec", "2620SA"},
558 	{0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT,
559 	    0, AAC_TYPE_SATA, "Adaptec", "2420SA"},
560 	{0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT,
561 	    0, AAC_TYPE_SATA, "ICP", "9024RO"},
562 	{0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT,
563 	    0, AAC_TYPE_SATA, "ICP", "9014RO"},
564 	{0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT,
565 	    0, AAC_TYPE_SATA, "ICP", "9047MA"},
566 	{0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT,
567 	    0, AAC_TYPE_SATA, "ICP", "9087MA"},
568 	{0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX,
569 	    0, AAC_TYPE_SAS, "ICP", "9085LI"},
570 	{0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX,
571 	    0, AAC_TYPE_SAS, "ICP", "5085BR"},
572 	{0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT,
573 	    0, AAC_TYPE_SATA, "ICP", "9067MA"},
574 	{0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX,
575 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"},
576 	{0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX,
577 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"},
578 	{0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX,
579 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"},
580 	{0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX,
581 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"},
582 	{0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX,
583 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"},
584 	{0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX,
585 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"},
586 
587 	{0, 0, 0, 0, AAC_HWIF_UNKNOWN,
588 	    0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"},
589 };
590 
591 /*
592  * Hardware access functions for i960 based cards
593  */
594 static struct aac_interface aac_rx_interface = {
595 	aac_rx_get_fwstatus,
596 	aac_rx_get_mailbox,
597 	aac_rx_set_mailbox
598 };
599 
600 /*
601  * Hardware access functions for Rocket based cards
602  */
603 static struct aac_interface aac_rkt_interface = {
604 	aac_rkt_get_fwstatus,
605 	aac_rkt_get_mailbox,
606 	aac_rkt_set_mailbox
607 };
608 
609 ddi_device_acc_attr_t aac_acc_attr = {
610 	DDI_DEVICE_ATTR_V0,
611 	DDI_STRUCTURE_LE_ACC,
612 	DDI_STRICTORDER_ACC
613 };
614 
615 static struct {
616 	int	size;
617 	int	notify;
618 } aac_qinfo[] = {
619 	{AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
620 	{AAC_HOST_HIGH_CMD_ENTRIES, 0},
621 	{AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
622 	{AAC_ADAP_HIGH_CMD_ENTRIES, 0},
623 	{AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
624 	{AAC_HOST_HIGH_RESP_ENTRIES, 0},
625 	{AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
626 	{AAC_ADAP_HIGH_RESP_ENTRIES, 0}
627 };
628 
629 /*
630  * Default aac dma attributes
631  */
632 static ddi_dma_attr_t aac_dma_attr = {
633 	DMA_ATTR_V0,
634 	0,		/* lowest usable address */
635 	0xffffffffull,	/* high DMA address range */
636 	0xffffffffull,	/* DMA counter register */
637 	AAC_DMA_ALIGN,	/* DMA address alignment */
638 	1,		/* DMA burstsizes */
639 	1,		/* min effective DMA size */
640 	0xffffffffull,	/* max DMA xfer size */
641 	0xffffffffull,	/* segment boundary */
642 	1,		/* s/g list length */
643 	AAC_BLK_SIZE,	/* granularity of device */
644 	0		/* DMA transfer flags */
645 };
646 
647 struct aac_drinfo {
648 	struct aac_softstate *softs;
649 	int tgt;
650 	int lun;
651 	int event;
652 };
653 
654 static int aac_tick = AAC_DEFAULT_TICK;	/* tick for the internal timer */
655 static uint32_t aac_timebase = 0;	/* internal timer in seconds */
656 static uint32_t aac_sync_time = 0;	/* next time to sync. with firmware */
657 
658 /*
659  * Warlock directives
660  *
661  * Different variables with the same types have to be protected by the
662  * same mutex; otherwise, warlock will complain with "variables don't
663  * seem to be protected consistently". For example,
664  * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected
665  * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to
666  * declare them as protected explictly at aac_cmd_dequeue().
667  */
668 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \
669     scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \
670     mode_format mode_geometry mode_header aac_cmd))
671 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \
672     aac_sge))
673 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \
674     aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \
675     aac_sg_table aac_srb))
676 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry))
677 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
678 _NOTE(SCHEME_PROTECTS_DATA("unique to dr event", aac_drinfo))
679 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf))
680 
681 int
682 _init(void)
683 {
684 	int rval = 0;
685 
686 #ifdef DEBUG
687 	mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL);
688 #endif
689 	DBCALLED(NULL, 1);
690 
691 	if ((rval = ddi_soft_state_init((void *)&aac_softstatep,
692 	    sizeof (struct aac_softstate), 0)) != 0)
693 		goto error;
694 
695 	if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) {
696 		ddi_soft_state_fini((void *)&aac_softstatep);
697 		goto error;
698 	}
699 
700 	if ((rval = mod_install(&aac_modlinkage)) != 0) {
701 		ddi_soft_state_fini((void *)&aac_softstatep);
702 		scsi_hba_fini(&aac_modlinkage);
703 		goto error;
704 	}
705 	return (rval);
706 
707 error:
708 	AACDB_PRINT(NULL, CE_WARN, "Mod init error!");
709 #ifdef DEBUG
710 	mutex_destroy(&aac_prt_mutex);
711 #endif
712 	return (rval);
713 }
714 
715 int
716 _info(struct modinfo *modinfop)
717 {
718 	DBCALLED(NULL, 1);
719 	return (mod_info(&aac_modlinkage, modinfop));
720 }
721 
722 /*
723  * An HBA driver cannot be unload unless you reboot,
724  * so this function will be of no use.
725  */
726 int
727 _fini(void)
728 {
729 	int rval;
730 
731 	DBCALLED(NULL, 1);
732 
733 	if ((rval = mod_remove(&aac_modlinkage)) != 0)
734 		goto error;
735 
736 	scsi_hba_fini(&aac_modlinkage);
737 	ddi_soft_state_fini((void *)&aac_softstatep);
738 #ifdef DEBUG
739 	mutex_destroy(&aac_prt_mutex);
740 #endif
741 	return (0);
742 
743 error:
744 	AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!");
745 	return (rval);
746 }
747 
748 static int
749 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
750 {
751 	int instance, i;
752 	struct aac_softstate *softs = NULL;
753 	int attach_state = 0;
754 	char *data;
755 	int intr_types;
756 
757 	DBCALLED(NULL, 1);
758 
759 	switch (cmd) {
760 	case DDI_ATTACH:
761 		break;
762 	case DDI_RESUME:
763 		return (DDI_FAILURE);
764 	default:
765 		return (DDI_FAILURE);
766 	}
767 
768 	instance = ddi_get_instance(dip);
769 
770 	/* Get soft state */
771 	if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) {
772 		AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state");
773 		goto error;
774 	}
775 	softs = ddi_get_soft_state(aac_softstatep, instance);
776 	attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED;
777 
778 	softs->instance = instance;
779 	softs->devinfo_p = dip;
780 	softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr;
781 	softs->addr_dma_attr.dma_attr_granular = 1;
782 	softs->acc_attr = aac_acc_attr;
783 	softs->card = AAC_UNKNOWN_CARD;
784 #ifdef DEBUG
785 	softs->debug_flags = aac_debug_flags;
786 	softs->debug_fib_flags = aac_debug_fib_flags;
787 #endif
788 
789 	/* Initialize FMA */
790 	aac_fm_init(softs);
791 
792 	/* Check the card type */
793 	if (aac_check_card_type(softs) == AACERR) {
794 		AACDB_PRINT(softs, CE_WARN, "Card not supported");
795 		goto error;
796 	}
797 	/* We have found the right card and everything is OK */
798 	attach_state |= AAC_ATTACH_CARD_DETECTED;
799 
800 	/* Map PCI mem space */
801 	if (ddi_regs_map_setup(dip, 1,
802 	    (caddr_t *)&softs->pci_mem_base_vaddr, 0,
803 	    softs->map_size_min, &softs->acc_attr,
804 	    &softs->pci_mem_handle) != DDI_SUCCESS)
805 		goto error;
806 
807 	softs->map_size = softs->map_size_min;
808 	attach_state |= AAC_ATTACH_PCI_MEM_MAPPED;
809 
810 	AAC_DISABLE_INTR(softs);
811 
812 	/* Get the type of device intrrupts */
813 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
814 		AACDB_PRINT(softs, CE_WARN,
815 		    "ddi_intr_get_supported_types() failed");
816 		goto error;
817 	}
818 	AACDB_PRINT(softs, CE_NOTE,
819 	    "ddi_intr_get_supported_types() ret: 0x%x", intr_types);
820 
821 	/* Query interrupt, and alloc/init all needed struct */
822 	if (intr_types & DDI_INTR_TYPE_MSI) {
823 		if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI)
824 		    != DDI_SUCCESS) {
825 			AACDB_PRINT(softs, CE_WARN,
826 			    "MSI interrupt query failed");
827 			goto error;
828 		}
829 		softs->intr_type = DDI_INTR_TYPE_MSI;
830 	} else if (intr_types & DDI_INTR_TYPE_FIXED) {
831 		if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED)
832 		    != DDI_SUCCESS) {
833 			AACDB_PRINT(softs, CE_WARN,
834 			    "FIXED interrupt query failed");
835 			goto error;
836 		}
837 		softs->intr_type = DDI_INTR_TYPE_FIXED;
838 	} else {
839 		AACDB_PRINT(softs, CE_WARN,
840 		    "Device cannot suppport both FIXED and MSI interrupts");
841 		goto error;
842 	}
843 
844 	/* Init mutexes */
845 	mutex_init(&softs->q_comp_mutex, NULL,
846 	    MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
847 	cv_init(&softs->event, NULL, CV_DRIVER, NULL);
848 	mutex_init(&softs->aifq_mutex, NULL,
849 	    MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
850 	cv_init(&softs->aifv, NULL, CV_DRIVER, NULL);
851 	cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL);
852 	mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER,
853 	    DDI_INTR_PRI(softs->intr_pri));
854 	attach_state |= AAC_ATTACH_KMUTEX_INITED;
855 
856 	/* Check for legacy device naming support */
857 	softs->legacy = 1; /* default to use legacy name */
858 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
859 	    "legacy-name-enable", &data) == DDI_SUCCESS)) {
860 		if (strcmp(data, "no") == 0) {
861 			AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled");
862 			softs->legacy = 0;
863 		}
864 		ddi_prop_free(data);
865 	}
866 
867 	/*
868 	 * Everything has been set up till now,
869 	 * we will do some common attach.
870 	 */
871 	if (aac_common_attach(softs) == AACERR)
872 		goto error;
873 	attach_state |= AAC_ATTACH_COMM_SPACE_SETUP;
874 
875 	/* Init the cmd queues */
876 	for (i = 0; i < AAC_CMDQ_NUM; i++)
877 		aac_cmd_initq(&softs->q_wait[i]);
878 	aac_cmd_initq(&softs->q_busy);
879 	aac_cmd_initq(&softs->q_comp);
880 
881 	if (aac_hba_setup(softs) != AACOK)
882 		goto error;
883 	attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP;
884 
885 	/* Connect interrupt handlers */
886 	if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id,
887 	    NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) {
888 		AACDB_PRINT(softs, CE_WARN,
889 		    "Can not setup soft interrupt handler!");
890 		goto error;
891 	}
892 	attach_state |= AAC_ATTACH_SOFT_INTR_SETUP;
893 
894 	if (aac_add_intrs(softs) != DDI_SUCCESS) {
895 		AACDB_PRINT(softs, CE_WARN,
896 		    "Interrupt registration failed, intr type: %s",
897 		    softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED");
898 		goto error;
899 	}
900 	attach_state |= AAC_ATTACH_HARD_INTR_SETUP;
901 
902 	/* Create devctl/scsi nodes for cfgadm */
903 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
904 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
905 		AACDB_PRINT(softs, CE_WARN, "failed to create devctl node");
906 		goto error;
907 	}
908 	attach_state |= AAC_ATTACH_CREATE_DEVCTL;
909 
910 	if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance),
911 	    DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
912 		AACDB_PRINT(softs, CE_WARN, "failed to create scsi node");
913 		goto error;
914 	}
915 	attach_state |= AAC_ATTACH_CREATE_SCSI;
916 
917 	/* Create aac node for app. to issue ioctls */
918 	if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance),
919 	    DDI_PSEUDO, 0) != DDI_SUCCESS) {
920 		AACDB_PRINT(softs, CE_WARN, "failed to create aac node");
921 		goto error;
922 	}
923 
924 	/* Create a taskq for dealing with dr events */
925 	if ((softs->taskq = ddi_taskq_create(dip, "aac_dr_taskq", 1,
926 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
927 		AACDB_PRINT(softs, CE_WARN, "ddi_taskq_create failed");
928 		goto error;
929 	}
930 
931 	aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
932 	softs->state = AAC_STATE_RUN;
933 
934 	/* Create a thread for command timeout */
935 	softs->timeout_id = timeout(aac_daemon, (void *)softs,
936 	    (60 * drv_usectohz(1000000)));
937 
938 	/* Common attach is OK, so we are attached! */
939 	AAC_ENABLE_INTR(softs);
940 	ddi_report_dev(dip);
941 	AACDB_PRINT(softs, CE_NOTE, "aac attached ok");
942 	return (DDI_SUCCESS);
943 
944 error:
945 	if (softs && softs->taskq)
946 		ddi_taskq_destroy(softs->taskq);
947 	if (attach_state & AAC_ATTACH_CREATE_SCSI)
948 		ddi_remove_minor_node(dip, "scsi");
949 	if (attach_state & AAC_ATTACH_CREATE_DEVCTL)
950 		ddi_remove_minor_node(dip, "devctl");
951 	if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP)
952 		aac_common_detach(softs);
953 	if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
954 		(void) scsi_hba_detach(dip);
955 		scsi_hba_tran_free(AAC_DIP2TRAN(dip));
956 	}
957 	if (attach_state & AAC_ATTACH_HARD_INTR_SETUP)
958 		aac_remove_intrs(softs);
959 	if (attach_state & AAC_ATTACH_SOFT_INTR_SETUP)
960 		ddi_remove_softintr(softs->softint_id);
961 	if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
962 		mutex_destroy(&softs->q_comp_mutex);
963 		cv_destroy(&softs->event);
964 		mutex_destroy(&softs->aifq_mutex);
965 		cv_destroy(&softs->aifv);
966 		cv_destroy(&softs->drain_cv);
967 		mutex_destroy(&softs->io_lock);
968 	}
969 	if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED)
970 		ddi_regs_map_free(&softs->pci_mem_handle);
971 	aac_fm_fini(softs);
972 	if (attach_state & AAC_ATTACH_CARD_DETECTED)
973 		softs->card = AACERR;
974 	if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED)
975 		ddi_soft_state_free(aac_softstatep, instance);
976 	return (DDI_FAILURE);
977 }
978 
979 static int
980 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
981 {
982 	scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip);
983 	struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
984 
985 	DBCALLED(softs, 1);
986 
987 	switch (cmd) {
988 	case DDI_DETACH:
989 		break;
990 	case DDI_SUSPEND:
991 		return (DDI_FAILURE);
992 	default:
993 		return (DDI_FAILURE);
994 	}
995 
996 	mutex_enter(&softs->io_lock);
997 	AAC_DISABLE_INTR(softs);
998 	softs->state = AAC_STATE_STOPPED;
999 
1000 	mutex_exit(&softs->io_lock);
1001 	(void) untimeout(softs->timeout_id);
1002 	mutex_enter(&softs->io_lock);
1003 	softs->timeout_id = 0;
1004 
1005 	ddi_taskq_destroy(softs->taskq);
1006 
1007 	ddi_remove_minor_node(dip, "aac");
1008 	ddi_remove_minor_node(dip, "scsi");
1009 	ddi_remove_minor_node(dip, "devctl");
1010 
1011 	mutex_exit(&softs->io_lock);
1012 	aac_remove_intrs(softs);
1013 	ddi_remove_softintr(softs->softint_id);
1014 
1015 	aac_common_detach(softs);
1016 
1017 	(void) scsi_hba_detach(dip);
1018 	scsi_hba_tran_free(tran);
1019 
1020 	mutex_destroy(&softs->q_comp_mutex);
1021 	cv_destroy(&softs->event);
1022 	mutex_destroy(&softs->aifq_mutex);
1023 	cv_destroy(&softs->aifv);
1024 	cv_destroy(&softs->drain_cv);
1025 	mutex_destroy(&softs->io_lock);
1026 
1027 	ddi_regs_map_free(&softs->pci_mem_handle);
1028 	aac_fm_fini(softs);
1029 	softs->hwif = AAC_HWIF_UNKNOWN;
1030 	softs->card = AAC_UNKNOWN_CARD;
1031 	ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip));
1032 
1033 	return (DDI_SUCCESS);
1034 }
1035 
1036 /*ARGSUSED*/
1037 static int
1038 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1039 {
1040 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1041 
1042 	DBCALLED(softs, 1);
1043 
1044 	mutex_enter(&softs->io_lock);
1045 	(void) aac_shutdown(softs);
1046 	mutex_exit(&softs->io_lock);
1047 
1048 	return (DDI_SUCCESS);
1049 }
1050 
1051 /*
1052  * Bring the controller down to a dormant state and detach all child devices.
1053  * This function is called before detach or system shutdown.
1054  * Note: we can assume that the q_wait on the controller is empty, as we
1055  * won't allow shutdown if any device is open.
1056  */
1057 static int
1058 aac_shutdown(struct aac_softstate *softs)
1059 {
1060 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
1061 	struct aac_close_command *cc = (struct aac_close_command *) \
1062 	    &softs->sync_slot.fibp->data[0];
1063 	int rval;
1064 
1065 	ddi_put32(acc, &cc->Command, VM_CloseAll);
1066 	ddi_put32(acc, &cc->ContainerId, 0xfffffffful);
1067 
1068 	/* Flush all caches, set FW to write through mode */
1069 	rval = aac_sync_fib(softs, ContainerCommand,
1070 	    AAC_FIB_SIZEOF(struct aac_close_command));
1071 
1072 	AACDB_PRINT(softs, CE_NOTE,
1073 	    "shutting down aac %s", (rval == AACOK) ? "ok" : "fail");
1074 	return (rval);
1075 }
1076 
1077 static uint_t
1078 aac_softintr(caddr_t arg)
1079 {
1080 	struct aac_softstate *softs = (void *)arg;
1081 
1082 	if (!AAC_IS_Q_EMPTY(&softs->q_comp)) {
1083 		aac_drain_comp_q(softs);
1084 		return (DDI_INTR_CLAIMED);
1085 	} else {
1086 		return (DDI_INTR_UNCLAIMED);
1087 	}
1088 }
1089 
1090 /*
1091  * Setup auto sense data for pkt
1092  */
1093 static void
1094 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key,
1095     uchar_t add_code, uchar_t qual_code, uint64_t info)
1096 {
1097 	struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp);
1098 
1099 	*pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
1100 	pkt->pkt_state |= STATE_ARQ_DONE;
1101 
1102 	*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1103 	arqstat->sts_rqpkt_reason = CMD_CMPLT;
1104 	arqstat->sts_rqpkt_resid = 0;
1105 	arqstat->sts_rqpkt_state =
1106 	    STATE_GOT_BUS |
1107 	    STATE_GOT_TARGET |
1108 	    STATE_SENT_CMD |
1109 	    STATE_XFERRED_DATA;
1110 	arqstat->sts_rqpkt_statistics = 0;
1111 
1112 	if (info <= 0xfffffffful) {
1113 		arqstat->sts_sensedata.es_valid = 1;
1114 		arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
1115 		arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT;
1116 		arqstat->sts_sensedata.es_key = key;
1117 		arqstat->sts_sensedata.es_add_code = add_code;
1118 		arqstat->sts_sensedata.es_qual_code = qual_code;
1119 
1120 		arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF;
1121 		arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF;
1122 		arqstat->sts_sensedata.es_info_3 = (info >>  8) & 0xFF;
1123 		arqstat->sts_sensedata.es_info_4 = info & 0xFF;
1124 	} else { /* 64-bit LBA */
1125 		struct scsi_descr_sense_hdr *dsp;
1126 		struct scsi_information_sense_descr *isd;
1127 
1128 		dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata;
1129 		dsp->ds_class = CLASS_EXTENDED_SENSE;
1130 		dsp->ds_code = CODE_FMT_DESCR_CURRENT;
1131 		dsp->ds_key = key;
1132 		dsp->ds_add_code = add_code;
1133 		dsp->ds_qual_code = qual_code;
1134 		dsp->ds_addl_sense_length =
1135 		    sizeof (struct scsi_information_sense_descr);
1136 
1137 		isd = (struct scsi_information_sense_descr *)(dsp+1);
1138 		isd->isd_descr_type = DESCR_INFORMATION;
1139 		isd->isd_valid = 1;
1140 		isd->isd_information[0] = (info >> 56) & 0xFF;
1141 		isd->isd_information[1] = (info >> 48) & 0xFF;
1142 		isd->isd_information[2] = (info >> 40) & 0xFF;
1143 		isd->isd_information[3] = (info >> 32) & 0xFF;
1144 		isd->isd_information[4] = (info >> 24) & 0xFF;
1145 		isd->isd_information[5] = (info >> 16) & 0xFF;
1146 		isd->isd_information[6] = (info >>  8) & 0xFF;
1147 		isd->isd_information[7] = (info) & 0xFF;
1148 	}
1149 }
1150 
1151 /*
1152  * Setup auto sense data for HARDWARE ERROR
1153  */
1154 static void
1155 aac_set_arq_data_hwerr(struct aac_cmd *acp)
1156 {
1157 	union scsi_cdb *cdbp;
1158 	uint64_t err_blkno;
1159 
1160 	cdbp = (void *)acp->pkt->pkt_cdbp;
1161 	err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp);
1162 	aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno);
1163 }
1164 
1165 /*
1166  * Setup auto sense data for UNIT ATTENTION
1167  */
1168 /*ARGSUSED*/
1169 static void
1170 aac_set_arq_data_reset(struct aac_softstate *softs, struct aac_cmd *acp)
1171 {
1172 	struct aac_container *dvp = (struct aac_container *)acp->dvp;
1173 
1174 	ASSERT(dvp->dev.type == AAC_DEV_LD);
1175 
1176 	if (dvp->reset) {
1177 		dvp->reset = 0;
1178 		aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 0x29, 0x02, 0);
1179 	}
1180 }
1181 
1182 /*
1183  * Send a command to the adapter in New Comm. interface
1184  */
1185 static int
1186 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp)
1187 {
1188 	uint32_t index, device;
1189 
1190 	index = PCI_MEM_GET32(softs, AAC_IQUE);
1191 	if (index == 0xffffffffUL) {
1192 		index = PCI_MEM_GET32(softs, AAC_IQUE);
1193 		if (index == 0xffffffffUL)
1194 			return (AACERR);
1195 	}
1196 
1197 	device = index;
1198 	PCI_MEM_PUT32(softs, device,
1199 	    (uint32_t)(slotp->fib_phyaddr & 0xfffffffful));
1200 	device += 4;
1201 	PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32));
1202 	device += 4;
1203 	PCI_MEM_PUT32(softs, device, slotp->acp->fib_size);
1204 	PCI_MEM_PUT32(softs, AAC_IQUE, index);
1205 	return (AACOK);
1206 }
1207 
1208 static void
1209 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp)
1210 {
1211 	struct aac_device *dvp = acp->dvp;
1212 	int q = AAC_CMDQ(acp);
1213 
1214 	if (acp->slotp) { /* outstanding cmd */
1215 		aac_release_slot(softs, acp->slotp);
1216 		acp->slotp = NULL;
1217 		if (dvp) {
1218 			dvp->ncmds[q]--;
1219 			if (dvp->throttle[q] == AAC_THROTTLE_DRAIN &&
1220 			    dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC)
1221 				aac_set_throttle(softs, dvp, q,
1222 				    softs->total_slots);
1223 		}
1224 		softs->bus_ncmds[q]--;
1225 		(void) aac_cmd_delete(&softs->q_busy, acp);
1226 	} else { /* cmd in waiting queue */
1227 		aac_cmd_delete(&softs->q_wait[q], acp);
1228 	}
1229 
1230 	if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */
1231 		mutex_enter(&softs->q_comp_mutex);
1232 		aac_cmd_enqueue(&softs->q_comp, acp);
1233 		mutex_exit(&softs->q_comp_mutex);
1234 	} else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */
1235 		cv_broadcast(&softs->event);
1236 	}
1237 }
1238 
1239 static void
1240 aac_handle_io(struct aac_softstate *softs, int index)
1241 {
1242 	struct aac_slot *slotp;
1243 	struct aac_cmd *acp;
1244 	uint32_t fast;
1245 
1246 	fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE;
1247 	index >>= 2;
1248 
1249 	/* Make sure firmware reported index is valid */
1250 	ASSERT(index >= 0 && index < softs->total_slots);
1251 	slotp = &softs->io_slot[index];
1252 	ASSERT(slotp->index == index);
1253 	acp = slotp->acp;
1254 
1255 	if (acp == NULL || acp->slotp != slotp) {
1256 		cmn_err(CE_WARN,
1257 		    "Firmware error: invalid slot index received from FW");
1258 		return;
1259 	}
1260 
1261 	acp->flags |= AAC_CMD_CMPLT;
1262 	(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1263 
1264 	if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) {
1265 		/*
1266 		 * For fast response IO, the firmware do not return any FIB
1267 		 * data, so we need to fill in the FIB status and state so that
1268 		 * FIB users can handle it correctly.
1269 		 */
1270 		if (fast) {
1271 			uint32_t state;
1272 
1273 			state = ddi_get32(slotp->fib_acc_handle,
1274 			    &slotp->fibp->Header.XferState);
1275 			/*
1276 			 * Update state for CPU not for device, no DMA sync
1277 			 * needed
1278 			 */
1279 			ddi_put32(slotp->fib_acc_handle,
1280 			    &slotp->fibp->Header.XferState,
1281 			    state | AAC_FIBSTATE_DONEADAP);
1282 			ddi_put32(slotp->fib_acc_handle,
1283 			    (void *)&slotp->fibp->data[0], ST_OK);
1284 		}
1285 
1286 		/* Handle completed ac */
1287 		acp->ac_comp(softs, acp);
1288 	} else {
1289 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1290 		acp->flags |= AAC_CMD_ERR;
1291 		if (acp->pkt) {
1292 			acp->pkt->pkt_reason = CMD_TRAN_ERR;
1293 			acp->pkt->pkt_statistics = 0;
1294 		}
1295 	}
1296 	aac_end_io(softs, acp);
1297 }
1298 
1299 /*
1300  * Interrupt handler for New Comm. interface
1301  * New Comm. interface use a different mechanism for interrupt. No explict
1302  * message queues, and driver need only accesses the mapped PCI mem space to
1303  * find the completed FIB or AIF.
1304  */
1305 static int
1306 aac_process_intr_new(struct aac_softstate *softs)
1307 {
1308 	uint32_t index;
1309 
1310 	index = AAC_OUTB_GET(softs);
1311 	if (index == 0xfffffffful)
1312 		index = AAC_OUTB_GET(softs);
1313 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1314 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1315 		return (0);
1316 	}
1317 	if (index != 0xfffffffful) {
1318 		do {
1319 			if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
1320 				aac_handle_io(softs, index);
1321 			} else if (index != 0xfffffffeul) {
1322 				struct aac_fib *fibp;	/* FIB in AIF queue */
1323 				uint16_t fib_size, fib_size0;
1324 
1325 				/*
1326 				 * 0xfffffffe means that the controller wants
1327 				 * more work, ignore it for now. Otherwise,
1328 				 * AIF received.
1329 				 */
1330 				index &= ~2;
1331 
1332 				mutex_enter(&softs->aifq_mutex);
1333 				/*
1334 				 * Copy AIF from adapter to the empty AIF slot
1335 				 */
1336 				fibp = &softs->aifq[softs->aifq_idx].d;
1337 				fib_size0 = PCI_MEM_GET16(softs, index + \
1338 				    offsetof(struct aac_fib, Header.Size));
1339 				fib_size = (fib_size0 > AAC_FIB_SIZE) ?
1340 				    AAC_FIB_SIZE : fib_size0;
1341 				PCI_MEM_REP_GET8(softs, index, fibp,
1342 				    fib_size);
1343 
1344 				if (aac_check_acc_handle(softs-> \
1345 				    pci_mem_handle) == DDI_SUCCESS)
1346 					(void) aac_handle_aif(softs, fibp);
1347 				else
1348 					ddi_fm_service_impact(softs->devinfo_p,
1349 					    DDI_SERVICE_UNAFFECTED);
1350 				mutex_exit(&softs->aifq_mutex);
1351 
1352 				/*
1353 				 * AIF memory is owned by the adapter, so let it
1354 				 * know that we are done with it.
1355 				 */
1356 				AAC_OUTB_SET(softs, index);
1357 				AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1358 			}
1359 
1360 			index = AAC_OUTB_GET(softs);
1361 		} while (index != 0xfffffffful);
1362 
1363 		/*
1364 		 * Process waiting cmds before start new ones to
1365 		 * ensure first IOs are serviced first.
1366 		 */
1367 		aac_start_waiting_io(softs);
1368 		return (AAC_DB_COMMAND_READY);
1369 	} else {
1370 		return (0);
1371 	}
1372 }
1373 
1374 static uint_t
1375 aac_intr_new(caddr_t arg)
1376 {
1377 	struct aac_softstate *softs = (void *)arg;
1378 	uint_t rval;
1379 
1380 	mutex_enter(&softs->io_lock);
1381 	if (aac_process_intr_new(softs))
1382 		rval = DDI_INTR_CLAIMED;
1383 	else
1384 		rval = DDI_INTR_UNCLAIMED;
1385 	mutex_exit(&softs->io_lock);
1386 
1387 	aac_drain_comp_q(softs);
1388 	return (rval);
1389 }
1390 
1391 /*
1392  * Interrupt handler for old interface
1393  * Explicit message queues are used to send FIB to and get completed FIB from
1394  * the adapter. Driver and adapter maitain the queues in the producer/consumer
1395  * manner. The driver has to query the queues to find the completed FIB.
1396  */
1397 static int
1398 aac_process_intr_old(struct aac_softstate *softs)
1399 {
1400 	uint16_t status;
1401 
1402 	status = AAC_STATUS_GET(softs);
1403 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1404 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1405 		return (DDI_INTR_UNCLAIMED);
1406 	}
1407 	if (status & AAC_DB_RESPONSE_READY) {
1408 		int slot_idx;
1409 
1410 		/* ACK the intr */
1411 		AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1412 		(void) AAC_STATUS_GET(softs);
1413 		while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q,
1414 		    &slot_idx) == AACOK)
1415 			aac_handle_io(softs, slot_idx);
1416 
1417 		/*
1418 		 * Process waiting cmds before start new ones to
1419 		 * ensure first IOs are serviced first.
1420 		 */
1421 		aac_start_waiting_io(softs);
1422 		return (AAC_DB_RESPONSE_READY);
1423 	} else if (status & AAC_DB_COMMAND_READY) {
1424 		int aif_idx;
1425 
1426 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY);
1427 		(void) AAC_STATUS_GET(softs);
1428 		if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) ==
1429 		    AACOK) {
1430 			ddi_acc_handle_t acc = softs->comm_space_acc_handle;
1431 			struct aac_fib *fibp;	/* FIB in AIF queue */
1432 			struct aac_fib *fibp0;	/* FIB in communication space */
1433 			uint16_t fib_size, fib_size0;
1434 			uint32_t fib_xfer_state;
1435 			uint32_t addr, size;
1436 
1437 			ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS));
1438 
1439 #define	AAC_SYNC_AIF(softs, aif_idx, type) \
1440 	{ (void) ddi_dma_sync((softs)->comm_space_dma_handle, \
1441 	    offsetof(struct aac_comm_space, \
1442 	    adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \
1443 	    (type)); }
1444 
1445 			mutex_enter(&softs->aifq_mutex);
1446 			/* Copy AIF from adapter to the empty AIF slot */
1447 			fibp = &softs->aifq[softs->aifq_idx].d;
1448 			AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU);
1449 			fibp0 = &softs->comm_space->adapter_fibs[aif_idx];
1450 			fib_size0 = ddi_get16(acc, &fibp0->Header.Size);
1451 			fib_size = (fib_size0 > AAC_FIB_SIZE) ?
1452 			    AAC_FIB_SIZE : fib_size0;
1453 			ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0,
1454 			    fib_size, DDI_DEV_AUTOINCR);
1455 
1456 			(void) aac_handle_aif(softs, fibp);
1457 			mutex_exit(&softs->aifq_mutex);
1458 
1459 			/* Complete AIF back to adapter with good status */
1460 			fib_xfer_state = LE_32(fibp->Header.XferState);
1461 			if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
1462 				ddi_put32(acc, &fibp0->Header.XferState,
1463 				    fib_xfer_state | AAC_FIBSTATE_DONEHOST);
1464 				ddi_put32(acc, (void *)&fibp0->data[0], ST_OK);
1465 				if (fib_size0 > AAC_FIB_SIZE)
1466 					ddi_put16(acc, &fibp0->Header.Size,
1467 					    AAC_FIB_SIZE);
1468 				AAC_SYNC_AIF(softs, aif_idx,
1469 				    DDI_DMA_SYNC_FORDEV);
1470 			}
1471 
1472 			/* Put the AIF response on the response queue */
1473 			addr = ddi_get32(acc,
1474 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1475 			    Header.SenderFibAddress);
1476 			size = (uint32_t)ddi_get16(acc,
1477 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1478 			    Header.Size);
1479 			ddi_put32(acc,
1480 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1481 			    Header.ReceiverFibAddress, addr);
1482 			if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q,
1483 			    addr, size) == AACERR)
1484 				cmn_err(CE_NOTE, "!AIF ack failed");
1485 		}
1486 		return (AAC_DB_COMMAND_READY);
1487 	} else if (status & AAC_DB_PRINTF_READY) {
1488 		/* ACK the intr */
1489 		AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY);
1490 		(void) AAC_STATUS_GET(softs);
1491 		(void) ddi_dma_sync(softs->comm_space_dma_handle,
1492 		    offsetof(struct aac_comm_space, adapter_print_buf),
1493 		    AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU);
1494 		if (aac_check_dma_handle(softs->comm_space_dma_handle) ==
1495 		    DDI_SUCCESS)
1496 			cmn_err(CE_NOTE, "MSG From Adapter: %s",
1497 			    softs->comm_space->adapter_print_buf);
1498 		else
1499 			ddi_fm_service_impact(softs->devinfo_p,
1500 			    DDI_SERVICE_UNAFFECTED);
1501 		AAC_NOTIFY(softs, AAC_DB_PRINTF_READY);
1502 		return (AAC_DB_PRINTF_READY);
1503 	} else if (status & AAC_DB_COMMAND_NOT_FULL) {
1504 		/*
1505 		 * Without these two condition statements, the OS could hang
1506 		 * after a while, especially if there are a lot of AIF's to
1507 		 * handle, for instance if a drive is pulled from an array
1508 		 * under heavy load.
1509 		 */
1510 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1511 		return (AAC_DB_COMMAND_NOT_FULL);
1512 	} else if (status & AAC_DB_RESPONSE_NOT_FULL) {
1513 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1514 		AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL);
1515 		return (AAC_DB_RESPONSE_NOT_FULL);
1516 	} else {
1517 		return (0);
1518 	}
1519 }
1520 
1521 static uint_t
1522 aac_intr_old(caddr_t arg)
1523 {
1524 	struct aac_softstate *softs = (void *)arg;
1525 	int rval;
1526 
1527 	mutex_enter(&softs->io_lock);
1528 	if (aac_process_intr_old(softs))
1529 		rval = DDI_INTR_CLAIMED;
1530 	else
1531 		rval = DDI_INTR_UNCLAIMED;
1532 	mutex_exit(&softs->io_lock);
1533 
1534 	aac_drain_comp_q(softs);
1535 	return (rval);
1536 }
1537 
1538 /*
1539  * Query FIXED or MSI interrupts
1540  */
1541 static int
1542 aac_query_intrs(struct aac_softstate *softs, int intr_type)
1543 {
1544 	dev_info_t *dip = softs->devinfo_p;
1545 	int avail, actual, intr_size, count;
1546 	int i, flag, ret;
1547 
1548 	AACDB_PRINT(softs, CE_NOTE,
1549 	    "aac_query_intrs:interrupt type 0x%x", intr_type);
1550 
1551 	/* Get number of interrupts */
1552 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1553 	if ((ret != DDI_SUCCESS) || (count == 0)) {
1554 		AACDB_PRINT(softs, CE_WARN,
1555 		    "ddi_intr_get_nintrs() failed, ret %d count %d",
1556 		    ret, count);
1557 		return (DDI_FAILURE);
1558 	}
1559 
1560 	/* Get number of available interrupts */
1561 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
1562 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
1563 		AACDB_PRINT(softs, CE_WARN,
1564 		    "ddi_intr_get_navail() failed, ret %d avail %d",
1565 		    ret, avail);
1566 		return (DDI_FAILURE);
1567 	}
1568 
1569 	AACDB_PRINT(softs, CE_NOTE,
1570 	    "ddi_intr_get_nvail returned %d, navail() returned %d",
1571 	    count, avail);
1572 
1573 	/* Allocate an array of interrupt handles */
1574 	intr_size = count * sizeof (ddi_intr_handle_t);
1575 	softs->htable = kmem_alloc(intr_size, KM_SLEEP);
1576 
1577 	if (intr_type == DDI_INTR_TYPE_MSI) {
1578 		count = 1; /* only one vector needed by now */
1579 		flag = DDI_INTR_ALLOC_STRICT;
1580 	} else { /* must be DDI_INTR_TYPE_FIXED */
1581 		flag = DDI_INTR_ALLOC_NORMAL;
1582 	}
1583 
1584 	/* Call ddi_intr_alloc() */
1585 	ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0,
1586 	    count, &actual, flag);
1587 
1588 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
1589 		AACDB_PRINT(softs, CE_WARN,
1590 		    "ddi_intr_alloc() failed, ret = %d", ret);
1591 		actual = 0;
1592 		goto error;
1593 	}
1594 
1595 	if (actual < count) {
1596 		AACDB_PRINT(softs, CE_NOTE,
1597 		    "Requested: %d, Received: %d", count, actual);
1598 		goto error;
1599 	}
1600 
1601 	softs->intr_cnt = actual;
1602 
1603 	/* Get priority for first msi, assume remaining are all the same */
1604 	if ((ret = ddi_intr_get_pri(softs->htable[0],
1605 	    &softs->intr_pri)) != DDI_SUCCESS) {
1606 		AACDB_PRINT(softs, CE_WARN,
1607 		    "ddi_intr_get_pri() failed, ret = %d", ret);
1608 		goto error;
1609 	}
1610 
1611 	/* Test for high level mutex */
1612 	if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) {
1613 		AACDB_PRINT(softs, CE_WARN,
1614 		    "aac_query_intrs: Hi level interrupt not supported");
1615 		goto error;
1616 	}
1617 
1618 	return (DDI_SUCCESS);
1619 
1620 error:
1621 	/* Free already allocated intr */
1622 	for (i = 0; i < actual; i++)
1623 		(void) ddi_intr_free(softs->htable[i]);
1624 
1625 	kmem_free(softs->htable, intr_size);
1626 	return (DDI_FAILURE);
1627 }
1628 
1629 /*
1630  * Register FIXED or MSI interrupts, and enable them
1631  */
1632 static int
1633 aac_add_intrs(struct aac_softstate *softs)
1634 {
1635 	int i, ret;
1636 	int intr_size, actual;
1637 	ddi_intr_handler_t *aac_intr;
1638 
1639 	actual = softs->intr_cnt;
1640 	intr_size = actual * sizeof (ddi_intr_handle_t);
1641 	aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ?
1642 	    aac_intr_new : aac_intr_old);
1643 
1644 	/* Call ddi_intr_add_handler() */
1645 	for (i = 0; i < actual; i++) {
1646 		if ((ret = ddi_intr_add_handler(softs->htable[i],
1647 		    aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) {
1648 			cmn_err(CE_WARN,
1649 			    "ddi_intr_add_handler() failed ret = %d", ret);
1650 
1651 			/* Free already allocated intr */
1652 			for (i = 0; i < actual; i++)
1653 				(void) ddi_intr_free(softs->htable[i]);
1654 
1655 			kmem_free(softs->htable, intr_size);
1656 			return (DDI_FAILURE);
1657 		}
1658 	}
1659 
1660 	if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap))
1661 	    != DDI_SUCCESS) {
1662 		cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret);
1663 
1664 		/* Free already allocated intr */
1665 		for (i = 0; i < actual; i++)
1666 			(void) ddi_intr_free(softs->htable[i]);
1667 
1668 		kmem_free(softs->htable, intr_size);
1669 		return (DDI_FAILURE);
1670 	}
1671 
1672 	/* Enable interrupts */
1673 	if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1674 		/* for MSI block enable */
1675 		(void) ddi_intr_block_enable(softs->htable, softs->intr_cnt);
1676 	} else {
1677 		/* Call ddi_intr_enable() for legacy/MSI non block enable */
1678 		for (i = 0; i < softs->intr_cnt; i++)
1679 			(void) ddi_intr_enable(softs->htable[i]);
1680 	}
1681 
1682 	return (DDI_SUCCESS);
1683 }
1684 
1685 /*
1686  * Unregister FIXED or MSI interrupts
1687  */
1688 static void
1689 aac_remove_intrs(struct aac_softstate *softs)
1690 {
1691 	int i;
1692 
1693 	/* Disable all interrupts */
1694 	if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1695 		/* Call ddi_intr_block_disable() */
1696 		(void) ddi_intr_block_disable(softs->htable, softs->intr_cnt);
1697 	} else {
1698 		for (i = 0; i < softs->intr_cnt; i++)
1699 			(void) ddi_intr_disable(softs->htable[i]);
1700 	}
1701 
1702 	/* Call ddi_intr_remove_handler() */
1703 	for (i = 0; i < softs->intr_cnt; i++) {
1704 		(void) ddi_intr_remove_handler(softs->htable[i]);
1705 		(void) ddi_intr_free(softs->htable[i]);
1706 	}
1707 
1708 	kmem_free(softs->htable, softs->intr_cnt * sizeof (ddi_intr_handle_t));
1709 }
1710 
1711 /*
1712  * Set pkt_reason and OR in pkt_statistics flag
1713  */
1714 static void
1715 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp,
1716     uchar_t reason, uint_t stat)
1717 {
1718 #ifndef __lock_lint
1719 	_NOTE(ARGUNUSED(softs))
1720 #endif
1721 	if (acp->pkt->pkt_reason == CMD_CMPLT)
1722 		acp->pkt->pkt_reason = reason;
1723 	acp->pkt->pkt_statistics |= stat;
1724 }
1725 
1726 /*
1727  * Handle a finished pkt of soft SCMD
1728  */
1729 static void
1730 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp)
1731 {
1732 	ASSERT(acp->pkt);
1733 
1734 	acp->flags |= AAC_CMD_CMPLT;
1735 
1736 	acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \
1737 	    STATE_SENT_CMD | STATE_GOT_STATUS;
1738 	if (acp->pkt->pkt_state & STATE_XFERRED_DATA)
1739 		acp->pkt->pkt_resid = 0;
1740 
1741 	/* AAC_CMD_NO_INTR means no complete callback */
1742 	if (!(acp->flags & AAC_CMD_NO_INTR)) {
1743 		mutex_enter(&softs->q_comp_mutex);
1744 		aac_cmd_enqueue(&softs->q_comp, acp);
1745 		mutex_exit(&softs->q_comp_mutex);
1746 		ddi_trigger_softintr(softs->softint_id);
1747 	}
1748 }
1749 
1750 /*
1751  * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
1752  */
1753 
1754 /*
1755  * Handle completed logical device IO command
1756  */
1757 /*ARGSUSED*/
1758 static void
1759 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1760 {
1761 	struct aac_slot *slotp = acp->slotp;
1762 	struct aac_blockread_response *resp;
1763 	uint32_t status;
1764 
1765 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1766 	ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1767 
1768 	acp->pkt->pkt_state |= STATE_GOT_STATUS;
1769 
1770 	/*
1771 	 * block_read/write has a similar response header, use blockread
1772 	 * response for both.
1773 	 */
1774 	resp = (struct aac_blockread_response *)&slotp->fibp->data[0];
1775 	status = ddi_get32(slotp->fib_acc_handle, &resp->Status);
1776 	if (status == ST_OK) {
1777 		acp->pkt->pkt_resid = 0;
1778 		acp->pkt->pkt_state |= STATE_XFERRED_DATA;
1779 	} else {
1780 		aac_set_arq_data_hwerr(acp);
1781 	}
1782 }
1783 
1784 /*
1785  * Handle completed phys. device IO command
1786  */
1787 static void
1788 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1789 {
1790 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
1791 	struct aac_fib *fibp = acp->slotp->fibp;
1792 	struct scsi_pkt *pkt = acp->pkt;
1793 	struct aac_srb_reply *resp;
1794 	uint32_t resp_status;
1795 
1796 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1797 	ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1798 
1799 	resp = (struct aac_srb_reply *)&fibp->data[0];
1800 	resp_status = ddi_get32(acc, &resp->status);
1801 
1802 	/* First check FIB status */
1803 	if (resp_status == ST_OK) {
1804 		uint32_t scsi_status;
1805 		uint32_t srb_status;
1806 		uint32_t data_xfer_length;
1807 
1808 		scsi_status = ddi_get32(acc, &resp->scsi_status);
1809 		srb_status = ddi_get32(acc, &resp->srb_status);
1810 		data_xfer_length = ddi_get32(acc, &resp->data_xfer_length);
1811 
1812 		*pkt->pkt_scbp = (uint8_t)scsi_status;
1813 		pkt->pkt_state |= STATE_GOT_STATUS;
1814 		if (scsi_status == STATUS_GOOD) {
1815 			uchar_t cmd = ((union scsi_cdb *)(void *)
1816 			    (pkt->pkt_cdbp))->scc_cmd;
1817 
1818 			/* Next check SRB status */
1819 			switch (srb_status & 0x3f) {
1820 			case SRB_STATUS_DATA_OVERRUN:
1821 				AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \
1822 				    "scmd=%d, xfer=%d, buflen=%d",
1823 				    (uint32_t)cmd, data_xfer_length,
1824 				    acp->bcount);
1825 
1826 				switch (cmd) {
1827 				case SCMD_READ:
1828 				case SCMD_WRITE:
1829 				case SCMD_READ_G1:
1830 				case SCMD_WRITE_G1:
1831 				case SCMD_READ_G4:
1832 				case SCMD_WRITE_G4:
1833 				case SCMD_READ_G5:
1834 				case SCMD_WRITE_G5:
1835 					aac_set_pkt_reason(softs, acp,
1836 					    CMD_DATA_OVR, 0);
1837 					break;
1838 				}
1839 				/*FALLTHRU*/
1840 			case SRB_STATUS_ERROR_RECOVERY:
1841 			case SRB_STATUS_PENDING:
1842 			case SRB_STATUS_SUCCESS:
1843 				/*
1844 				 * pkt_resid should only be calculated if the
1845 				 * status is ERROR_RECOVERY/PENDING/SUCCESS/
1846 				 * OVERRUN/UNDERRUN
1847 				 */
1848 				if (data_xfer_length) {
1849 					pkt->pkt_state |= STATE_XFERRED_DATA;
1850 					pkt->pkt_resid = acp->bcount - \
1851 					    data_xfer_length;
1852 					ASSERT(pkt->pkt_resid >= 0);
1853 				}
1854 				break;
1855 			case SRB_STATUS_ABORTED:
1856 				AACDB_PRINT(softs, CE_NOTE,
1857 				    "SRB_STATUS_ABORTED, xfer=%d, resid=%d",
1858 				    data_xfer_length, pkt->pkt_resid);
1859 				aac_set_pkt_reason(softs, acp, CMD_ABORTED,
1860 				    STAT_ABORTED);
1861 				break;
1862 			case SRB_STATUS_ABORT_FAILED:
1863 				AACDB_PRINT(softs, CE_NOTE,
1864 				    "SRB_STATUS_ABORT_FAILED, xfer=%d, " \
1865 				    "resid=%d", data_xfer_length,
1866 				    pkt->pkt_resid);
1867 				aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL,
1868 				    0);
1869 				break;
1870 			case SRB_STATUS_PARITY_ERROR:
1871 				AACDB_PRINT(softs, CE_NOTE,
1872 				    "SRB_STATUS_PARITY_ERROR, xfer=%d, " \
1873 				    "resid=%d", data_xfer_length,
1874 				    pkt->pkt_resid);
1875 				aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0);
1876 				break;
1877 			case SRB_STATUS_NO_DEVICE:
1878 			case SRB_STATUS_INVALID_PATH_ID:
1879 			case SRB_STATUS_INVALID_TARGET_ID:
1880 			case SRB_STATUS_INVALID_LUN:
1881 			case SRB_STATUS_SELECTION_TIMEOUT:
1882 #ifdef DEBUG
1883 				if (AAC_DEV_IS_VALID(acp->dvp)) {
1884 					AACDB_PRINT(softs, CE_NOTE,
1885 					    "SRB_STATUS_NO_DEVICE(%d), " \
1886 					    "xfer=%d, resid=%d ",
1887 					    srb_status & 0x3f,
1888 					    data_xfer_length, pkt->pkt_resid);
1889 				}
1890 #endif
1891 				aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0);
1892 				break;
1893 			case SRB_STATUS_COMMAND_TIMEOUT:
1894 			case SRB_STATUS_TIMEOUT:
1895 				AACDB_PRINT(softs, CE_NOTE,
1896 				    "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \
1897 				    "resid=%d", data_xfer_length,
1898 				    pkt->pkt_resid);
1899 				aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
1900 				    STAT_TIMEOUT);
1901 				break;
1902 			case SRB_STATUS_BUS_RESET:
1903 				AACDB_PRINT(softs, CE_NOTE,
1904 				    "SRB_STATUS_BUS_RESET, xfer=%d, " \
1905 				    "resid=%d", data_xfer_length,
1906 				    pkt->pkt_resid);
1907 				aac_set_pkt_reason(softs, acp, CMD_RESET,
1908 				    STAT_BUS_RESET);
1909 				break;
1910 			default:
1911 				AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \
1912 				    "xfer=%d, resid=%d", srb_status & 0x3f,
1913 				    data_xfer_length, pkt->pkt_resid);
1914 				aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1915 				break;
1916 			}
1917 		} else if (scsi_status == STATUS_CHECK) {
1918 			/* CHECK CONDITION */
1919 			struct scsi_arq_status *arqstat =
1920 			    (void *)(pkt->pkt_scbp);
1921 			uint32_t sense_data_size;
1922 
1923 			pkt->pkt_state |= STATE_ARQ_DONE;
1924 
1925 			*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1926 			arqstat->sts_rqpkt_reason = CMD_CMPLT;
1927 			arqstat->sts_rqpkt_resid = 0;
1928 			arqstat->sts_rqpkt_state =
1929 			    STATE_GOT_BUS |
1930 			    STATE_GOT_TARGET |
1931 			    STATE_SENT_CMD |
1932 			    STATE_XFERRED_DATA;
1933 			arqstat->sts_rqpkt_statistics = 0;
1934 
1935 			sense_data_size = ddi_get32(acc,
1936 			    &resp->sense_data_size);
1937 			ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE);
1938 			AACDB_PRINT(softs, CE_NOTE,
1939 			    "CHECK CONDITION: sense len=%d, xfer len=%d",
1940 			    sense_data_size, data_xfer_length);
1941 
1942 			if (sense_data_size > SENSE_LENGTH)
1943 				sense_data_size = SENSE_LENGTH;
1944 			ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata,
1945 			    (uint8_t *)resp->sense_data, sense_data_size,
1946 			    DDI_DEV_AUTOINCR);
1947 		} else {
1948 			AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \
1949 			    "scsi_status=%d, srb_status=%d",
1950 			    scsi_status, srb_status);
1951 			aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1952 		}
1953 	} else {
1954 		AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d",
1955 		    resp_status);
1956 		aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1957 	}
1958 }
1959 
1960 /*
1961  * Handle completed IOCTL command
1962  */
1963 /*ARGSUSED*/
1964 void
1965 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1966 {
1967 	struct aac_slot *slotp = acp->slotp;
1968 
1969 	/*
1970 	 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
1971 	 * may wait on softs->event, so use cv_broadcast() instead
1972 	 * of cv_signal().
1973 	 */
1974 	ASSERT(acp->flags & AAC_CMD_SYNC);
1975 	ASSERT(acp->flags & AAC_CMD_NO_CB);
1976 
1977 	/* Get the size of the response FIB from its FIB.Header.Size field */
1978 	acp->fib_size = ddi_get16(slotp->fib_acc_handle,
1979 	    &slotp->fibp->Header.Size);
1980 
1981 	ASSERT(acp->fib_size <= softs->aac_max_fib_size);
1982 	ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp,
1983 	    (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR);
1984 }
1985 
1986 /*
1987  * Handle completed Flush command
1988  */
1989 /*ARGSUSED*/
1990 static void
1991 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1992 {
1993 	struct aac_slot *slotp = acp->slotp;
1994 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
1995 	struct aac_synchronize_reply *resp;
1996 	uint32_t status;
1997 
1998 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1999 
2000 	acp->pkt->pkt_state |= STATE_GOT_STATUS;
2001 
2002 	resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0];
2003 	status = ddi_get32(acc, &resp->Status);
2004 	if (status != CT_OK)
2005 		aac_set_arq_data_hwerr(acp);
2006 }
2007 
2008 /*
2009  * Access PCI space to see if the driver can support the card
2010  */
2011 static int
2012 aac_check_card_type(struct aac_softstate *softs)
2013 {
2014 	ddi_acc_handle_t pci_config_handle;
2015 	int card_index;
2016 	uint32_t pci_cmd;
2017 
2018 	/* Map pci configuration space */
2019 	if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) !=
2020 	    DDI_SUCCESS) {
2021 		AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space");
2022 		return (AACERR);
2023 	}
2024 
2025 	softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID);
2026 	softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID);
2027 	softs->subvendid = pci_config_get16(pci_config_handle,
2028 	    PCI_CONF_SUBVENID);
2029 	softs->subsysid = pci_config_get16(pci_config_handle,
2030 	    PCI_CONF_SUBSYSID);
2031 
2032 	card_index = 0;
2033 	while (!CARD_IS_UNKNOWN(card_index)) {
2034 		if ((aac_cards[card_index].vendor == softs->vendid) &&
2035 		    (aac_cards[card_index].device == softs->devid) &&
2036 		    (aac_cards[card_index].subvendor == softs->subvendid) &&
2037 		    (aac_cards[card_index].subsys == softs->subsysid)) {
2038 			break;
2039 		}
2040 		card_index++;
2041 	}
2042 
2043 	softs->card = card_index;
2044 	softs->hwif = aac_cards[card_index].hwif;
2045 
2046 	/*
2047 	 * Unknown aac card
2048 	 * do a generic match based on the VendorID and DeviceID to
2049 	 * support the new cards in the aac family
2050 	 */
2051 	if (CARD_IS_UNKNOWN(card_index)) {
2052 		if (softs->vendid != 0x9005) {
2053 			AACDB_PRINT(softs, CE_WARN,
2054 			    "Unknown vendor 0x%x", softs->vendid);
2055 			goto error;
2056 		}
2057 		switch (softs->devid) {
2058 		case 0x285:
2059 			softs->hwif = AAC_HWIF_I960RX;
2060 			break;
2061 		case 0x286:
2062 			softs->hwif = AAC_HWIF_RKT;
2063 			break;
2064 		default:
2065 			AACDB_PRINT(softs, CE_WARN,
2066 			    "Unknown device \"pci9005,%x\"", softs->devid);
2067 			goto error;
2068 		}
2069 	}
2070 
2071 	/* Set hardware dependent interface */
2072 	switch (softs->hwif) {
2073 	case AAC_HWIF_I960RX:
2074 		softs->aac_if = aac_rx_interface;
2075 		softs->map_size_min = AAC_MAP_SIZE_MIN_RX;
2076 		break;
2077 	case AAC_HWIF_RKT:
2078 		softs->aac_if = aac_rkt_interface;
2079 		softs->map_size_min = AAC_MAP_SIZE_MIN_RKT;
2080 		break;
2081 	default:
2082 		AACDB_PRINT(softs, CE_WARN,
2083 		    "Unknown hardware interface %d", softs->hwif);
2084 		goto error;
2085 	}
2086 
2087 	/* Set card names */
2088 	(void *)strncpy(softs->vendor_name, aac_cards[card_index].vid,
2089 	    AAC_VENDOR_LEN);
2090 	(void *)strncpy(softs->product_name, aac_cards[card_index].desc,
2091 	    AAC_PRODUCT_LEN);
2092 
2093 	/* Set up quirks */
2094 	softs->flags = aac_cards[card_index].quirks;
2095 
2096 	/* Force the busmaster enable bit on */
2097 	pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2098 	if ((pci_cmd & PCI_COMM_ME) == 0) {
2099 		pci_cmd |= PCI_COMM_ME;
2100 		pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd);
2101 		pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2102 		if ((pci_cmd & PCI_COMM_ME) == 0) {
2103 			cmn_err(CE_CONT, "?Cannot enable busmaster bit");
2104 			goto error;
2105 		}
2106 	}
2107 
2108 	/* Set memory base to map */
2109 	softs->pci_mem_base_paddr = 0xfffffff0UL & \
2110 	    pci_config_get32(pci_config_handle, PCI_CONF_BASE0);
2111 
2112 	pci_config_teardown(&pci_config_handle);
2113 
2114 	return (AACOK); /* card type detected */
2115 error:
2116 	pci_config_teardown(&pci_config_handle);
2117 	return (AACERR); /* no matched card found */
2118 }
2119 
2120 /*
2121  * Check the firmware to determine the features to support and the FIB
2122  * parameters to use.
2123  */
2124 static int
2125 aac_check_firmware(struct aac_softstate *softs)
2126 {
2127 	uint32_t options;
2128 	uint32_t atu_size;
2129 	ddi_acc_handle_t pci_handle;
2130 	uint8_t *data;
2131 	uint32_t max_fibs;
2132 	uint32_t max_fib_size;
2133 	uint32_t sg_tablesize;
2134 	uint32_t max_sectors;
2135 	uint32_t status;
2136 
2137 	/* Get supported options */
2138 	if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0,
2139 	    &status)) != AACOK) {
2140 		if (status != SRB_STATUS_INVALID_REQUEST) {
2141 			cmn_err(CE_CONT,
2142 			    "?Fatal error: request adapter info error");
2143 			return (AACERR);
2144 		}
2145 		options = 0;
2146 		atu_size = 0;
2147 	} else {
2148 		options = AAC_MAILBOX_GET(softs, 1);
2149 		atu_size = AAC_MAILBOX_GET(softs, 2);
2150 	}
2151 
2152 	if (softs->state & AAC_STATE_RESET) {
2153 		if ((softs->support_opt == options) &&
2154 		    (softs->atu_size == atu_size))
2155 			return (AACOK);
2156 
2157 		cmn_err(CE_WARN,
2158 		    "?Fatal error: firmware changed, system needs reboot");
2159 		return (AACERR);
2160 	}
2161 
2162 	/*
2163 	 * The following critical settings are initialized only once during
2164 	 * driver attachment.
2165 	 */
2166 	softs->support_opt = options;
2167 	softs->atu_size = atu_size;
2168 
2169 	/* Process supported options */
2170 	if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
2171 	    (softs->flags & AAC_FLAGS_NO4GB) == 0) {
2172 		AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window");
2173 		softs->flags |= AAC_FLAGS_4GB_WINDOW;
2174 	} else {
2175 		/*
2176 		 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
2177 		 * only. IO is handled by the DMA engine which does not suffer
2178 		 * from the ATU window programming workarounds necessary for
2179 		 * CPU copy operations.
2180 		 */
2181 		softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull;
2182 		softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull;
2183 	}
2184 
2185 	if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
2186 		AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address");
2187 		softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
2188 		softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull;
2189 		softs->flags |= AAC_FLAGS_SG_64BIT;
2190 	}
2191 
2192 	if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
2193 		softs->flags |= AAC_FLAGS_ARRAY_64BIT;
2194 		AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size");
2195 	}
2196 
2197 	if (options & AAC_SUPPORTED_NONDASD) {
2198 		if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0,
2199 		    "nondasd-enable", (char **)&data) == DDI_SUCCESS)) {
2200 			if (strcmp((char *)data, "yes") == 0) {
2201 				AACDB_PRINT(softs, CE_NOTE,
2202 				    "!Enable Non-DASD access");
2203 				softs->flags |= AAC_FLAGS_NONDASD;
2204 			}
2205 			ddi_prop_free(data);
2206 		}
2207 	}
2208 
2209 	/* Read preferred settings */
2210 	max_fib_size = 0;
2211 	if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF,
2212 	    0, 0, 0, 0, NULL)) == AACOK) {
2213 		options = AAC_MAILBOX_GET(softs, 1);
2214 		max_fib_size = (options & 0xffff);
2215 		max_sectors = (options >> 16) << 1;
2216 		options = AAC_MAILBOX_GET(softs, 2);
2217 		sg_tablesize = (options >> 16);
2218 		options = AAC_MAILBOX_GET(softs, 3);
2219 		max_fibs = (options & 0xffff);
2220 	}
2221 
2222 	/* Enable new comm. and rawio at the same time */
2223 	if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) &&
2224 	    (max_fib_size != 0)) {
2225 		/* read out and save PCI MBR */
2226 		if ((atu_size > softs->map_size) &&
2227 		    (ddi_regs_map_setup(softs->devinfo_p, 1,
2228 		    (caddr_t *)&data, 0, atu_size, &softs->acc_attr,
2229 		    &pci_handle) == DDI_SUCCESS)) {
2230 			ddi_regs_map_free(&softs->pci_mem_handle);
2231 			softs->pci_mem_handle = pci_handle;
2232 			softs->pci_mem_base_vaddr = data;
2233 			softs->map_size = atu_size;
2234 		}
2235 		if (atu_size == softs->map_size) {
2236 			softs->flags |= AAC_FLAGS_NEW_COMM;
2237 			AACDB_PRINT(softs, CE_NOTE,
2238 			    "!Enable New Comm. interface");
2239 		}
2240 	}
2241 
2242 	/* Set FIB parameters */
2243 	if (softs->flags & AAC_FLAGS_NEW_COMM) {
2244 		softs->aac_max_fibs = max_fibs;
2245 		softs->aac_max_fib_size = max_fib_size;
2246 		softs->aac_max_sectors = max_sectors;
2247 		softs->aac_sg_tablesize = sg_tablesize;
2248 
2249 		softs->flags |= AAC_FLAGS_RAW_IO;
2250 		AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO");
2251 	} else {
2252 		softs->aac_max_fibs =
2253 		    (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512;
2254 		softs->aac_max_fib_size = AAC_FIB_SIZE;
2255 		softs->aac_max_sectors = 128;	/* 64K */
2256 		if (softs->flags & AAC_FLAGS_17SG)
2257 			softs->aac_sg_tablesize = 17;
2258 		else if (softs->flags & AAC_FLAGS_34SG)
2259 			softs->aac_sg_tablesize = 34;
2260 		else if (softs->flags & AAC_FLAGS_SG_64BIT)
2261 			softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2262 			    sizeof (struct aac_blockwrite64) +
2263 			    sizeof (struct aac_sg_entry64)) /
2264 			    sizeof (struct aac_sg_entry64);
2265 		else
2266 			softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2267 			    sizeof (struct aac_blockwrite) +
2268 			    sizeof (struct aac_sg_entry)) /
2269 			    sizeof (struct aac_sg_entry);
2270 	}
2271 
2272 	if ((softs->flags & AAC_FLAGS_RAW_IO) &&
2273 	    (softs->flags & AAC_FLAGS_ARRAY_64BIT)) {
2274 		softs->flags |= AAC_FLAGS_LBA_64BIT;
2275 		AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array");
2276 	}
2277 	softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize;
2278 	softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9;
2279 	/*
2280 	 * 64K maximum segment size in scatter gather list is controlled by
2281 	 * the NEW_COMM bit in the adapter information. If not set, the card
2282 	 * can only accept a maximum of 64K. It is not recommended to permit
2283 	 * more than 128KB of total transfer size to the adapters because
2284 	 * performance is negatively impacted.
2285 	 *
2286 	 * For new comm, segment size equals max xfer size. For old comm,
2287 	 * we use 64K for both.
2288 	 */
2289 	softs->buf_dma_attr.dma_attr_count_max =
2290 	    softs->buf_dma_attr.dma_attr_maxxfer - 1;
2291 
2292 	/* Setup FIB operations */
2293 	if (softs->flags & AAC_FLAGS_RAW_IO)
2294 		softs->aac_cmd_fib = aac_cmd_fib_rawio;
2295 	else if (softs->flags & AAC_FLAGS_SG_64BIT)
2296 		softs->aac_cmd_fib = aac_cmd_fib_brw64;
2297 	else
2298 		softs->aac_cmd_fib = aac_cmd_fib_brw;
2299 	softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \
2300 	    aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32;
2301 
2302 	/* 64-bit LBA needs descriptor format sense data */
2303 	softs->slen = sizeof (struct scsi_arq_status);
2304 	if ((softs->flags & AAC_FLAGS_LBA_64BIT) &&
2305 	    softs->slen < AAC_ARQ64_LENGTH)
2306 		softs->slen = AAC_ARQ64_LENGTH;
2307 
2308 	AACDB_PRINT(softs, CE_NOTE,
2309 	    "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
2310 	    softs->aac_max_fibs, softs->aac_max_fib_size,
2311 	    softs->aac_max_sectors, softs->aac_sg_tablesize);
2312 
2313 	return (AACOK);
2314 }
2315 
2316 static void
2317 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0,
2318     struct FsaRev *fsarev1)
2319 {
2320 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
2321 
2322 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash);
2323 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type);
2324 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor);
2325 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major);
2326 	AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber);
2327 }
2328 
2329 /*
2330  * The following function comes from Adaptec:
2331  *
2332  * Query adapter information and supplement adapter information
2333  */
2334 static int
2335 aac_get_adapter_info(struct aac_softstate *softs,
2336     struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr)
2337 {
2338 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2339 	struct aac_fib *fibp = softs->sync_slot.fibp;
2340 	struct aac_adapter_info *ainfp;
2341 	struct aac_supplement_adapter_info *sinfp;
2342 
2343 	ddi_put8(acc, &fibp->data[0], 0);
2344 	if (aac_sync_fib(softs, RequestAdapterInfo,
2345 	    sizeof (struct aac_fib_header)) != AACOK) {
2346 		AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed");
2347 		return (AACERR);
2348 	}
2349 	ainfp = (struct aac_adapter_info *)fibp->data;
2350 	if (ainfr) {
2351 		AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2352 		AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase);
2353 		AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture);
2354 		AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant);
2355 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed);
2356 		AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem);
2357 		AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem);
2358 		AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem);
2359 		aac_fsa_rev(softs, &ainfp->KernelRevision,
2360 		    &ainfr->KernelRevision);
2361 		aac_fsa_rev(softs, &ainfp->MonitorRevision,
2362 		    &ainfr->MonitorRevision);
2363 		aac_fsa_rev(softs, &ainfp->HardwareRevision,
2364 		    &ainfr->HardwareRevision);
2365 		aac_fsa_rev(softs, &ainfp->BIOSRevision,
2366 		    &ainfr->BIOSRevision);
2367 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled);
2368 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask);
2369 		AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber);
2370 		AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform);
2371 		AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2372 		AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant);
2373 	}
2374 	if (sinfr) {
2375 		if (!(softs->support_opt &
2376 		    AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) {
2377 			AACDB_PRINT(softs, CE_WARN,
2378 			    "SupplementAdapterInfo not supported");
2379 			return (AACERR);
2380 		}
2381 		ddi_put8(acc, &fibp->data[0], 0);
2382 		if (aac_sync_fib(softs, RequestSupplementAdapterInfo,
2383 		    sizeof (struct aac_fib_header)) != AACOK) {
2384 			AACDB_PRINT(softs, CE_WARN,
2385 			    "RequestSupplementAdapterInfo failed");
2386 			return (AACERR);
2387 		}
2388 		sinfp = (struct aac_supplement_adapter_info *)fibp->data;
2389 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1);
2390 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2);
2391 		AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize);
2392 		AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId);
2393 		AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts);
2394 		AAC_GET_FIELD32(acc, sinfr, sinfp, Version);
2395 		AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits);
2396 		AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber);
2397 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3);
2398 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12);
2399 		AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts);
2400 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo,
2401 		    sizeof (struct vpd_info));
2402 		aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision,
2403 		    &sinfr->FlashFirmwareRevision);
2404 		AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions);
2405 		aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision,
2406 		    &sinfr->FlashFirmwareBootRevision);
2407 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo,
2408 		    MFG_PCBA_SERIAL_NUMBER_WIDTH);
2409 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0],
2410 		    MFG_WWN_WIDTH);
2411 		AAC_REP_GET_FIELD32(acc, sinfr, sinfp, ReservedGrowth[0], 2);
2412 	}
2413 	return (AACOK);
2414 }
2415 
2416 static int
2417 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max,
2418     uint32_t *tgt_max)
2419 {
2420 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2421 	struct aac_fib *fibp = softs->sync_slot.fibp;
2422 	struct aac_ctcfg *c_cmd;
2423 	struct aac_ctcfg_resp *c_resp;
2424 	uint32_t scsi_method_id;
2425 	struct aac_bus_info *cmd;
2426 	struct aac_bus_info_response *resp;
2427 	int rval;
2428 
2429 	/* Detect MethodId */
2430 	c_cmd = (struct aac_ctcfg *)&fibp->data[0];
2431 	ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig);
2432 	ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD);
2433 	ddi_put32(acc, &c_cmd->param, 0);
2434 	rval = aac_sync_fib(softs, ContainerCommand,
2435 	    AAC_FIB_SIZEOF(struct aac_ctcfg));
2436 	c_resp = (struct aac_ctcfg_resp *)&fibp->data[0];
2437 	if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) {
2438 		AACDB_PRINT(softs, CE_WARN,
2439 		    "VM_ContainerConfig command fail");
2440 		return (AACERR);
2441 	}
2442 	scsi_method_id = ddi_get32(acc, &c_resp->param);
2443 
2444 	/* Detect phys. bus count and max. target id first */
2445 	cmd = (struct aac_bus_info *)&fibp->data[0];
2446 	ddi_put32(acc, &cmd->Command, VM_Ioctl);
2447 	ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */
2448 	ddi_put32(acc, &cmd->MethodId, scsi_method_id);
2449 	ddi_put32(acc, &cmd->ObjectId, 0);
2450 	ddi_put32(acc, &cmd->CtlCmd, GetBusInfo);
2451 	/*
2452 	 * For VM_Ioctl, the firmware uses the Header.Size filled from the
2453 	 * driver as the size to be returned. Therefore the driver has to use
2454 	 * sizeof (struct aac_bus_info_response) because it is greater than
2455 	 * sizeof (struct aac_bus_info).
2456 	 */
2457 	rval = aac_sync_fib(softs, ContainerCommand,
2458 	    AAC_FIB_SIZEOF(struct aac_bus_info_response));
2459 	resp = (struct aac_bus_info_response *)cmd;
2460 
2461 	/* Scan all coordinates with INQUIRY */
2462 	if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) {
2463 		AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail");
2464 		return (AACERR);
2465 	}
2466 	*bus_max = ddi_get32(acc, &resp->BusCount);
2467 	*tgt_max = ddi_get32(acc, &resp->TargetsPerBus);
2468 	return (AACOK);
2469 }
2470 
2471 /*
2472  * The following function comes from Adaptec:
2473  *
2474  * Routine to be called during initialization of communications with
2475  * the adapter to handle possible adapter configuration issues. When
2476  * the adapter first boots up, it examines attached drives, etc, and
2477  * potentially comes up with a new or revised configuration (relative to
2478  * what's stored in it's NVRAM). Additionally it may discover problems
2479  * that make the current physical configuration unworkable (currently
2480  * applicable only to cluster configuration issues).
2481  *
2482  * If there are no configuration issues or the issues are considered
2483  * trival by the adapter, it will set it's configuration status to
2484  * "FSACT_CONTINUE" and execute the "commit confiuguration" action
2485  * automatically on it's own.
2486  *
2487  * However, if there are non-trivial issues, the adapter will set it's
2488  * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT"
2489  * and wait for some agent on the host to issue the "\ContainerCommand
2490  * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the
2491  * adapter to commit the new/updated configuration and enable
2492  * un-inhibited operation.  The host agent should first issue the
2493  * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB
2494  * command to obtain information about config issues detected by
2495  * the adapter.
2496  *
2497  * Normally the adapter's PC BIOS will execute on the host following
2498  * adapter poweron and reset and will be responsible for querring the
2499  * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG
2500  * command if appropriate.
2501  *
2502  * However, with the introduction of IOP reset support, the adapter may
2503  * boot up without the benefit of the adapter's PC BIOS host agent.
2504  * This routine is intended to take care of these issues in situations
2505  * where BIOS doesn't execute following adapter poweron or reset.  The
2506  * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so
2507  * there is no harm in doing this when it's already been done.
2508  */
2509 static int
2510 aac_handle_adapter_config_issues(struct aac_softstate *softs)
2511 {
2512 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2513 	struct aac_fib *fibp = softs->sync_slot.fibp;
2514 	struct aac_Container *cmd;
2515 	struct aac_Container_resp *resp;
2516 	struct aac_cf_status_header *cfg_sts_hdr;
2517 	uint32_t resp_status;
2518 	uint32_t ct_status;
2519 	uint32_t cfg_stat_action;
2520 	int rval;
2521 
2522 	/* Get adapter config status */
2523 	cmd = (struct aac_Container *)&fibp->data[0];
2524 
2525 	bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2526 	ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2527 	ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS);
2528 	ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE],
2529 	    sizeof (struct aac_cf_status_header));
2530 	rval = aac_sync_fib(softs, ContainerCommand,
2531 	    AAC_FIB_SIZEOF(struct aac_Container));
2532 	resp = (struct aac_Container_resp *)cmd;
2533 	cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data;
2534 
2535 	resp_status = ddi_get32(acc, &resp->Status);
2536 	ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2537 	if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) {
2538 		cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action);
2539 
2540 		/* Commit configuration if it's reasonable to do so. */
2541 		if (cfg_stat_action <= CFACT_PAUSE) {
2542 			bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2543 			ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2544 			ddi_put32(acc, &cmd->CTCommand.command,
2545 			    CT_COMMIT_CONFIG);
2546 			rval = aac_sync_fib(softs, ContainerCommand,
2547 			    AAC_FIB_SIZEOF(struct aac_Container));
2548 
2549 			resp_status = ddi_get32(acc, &resp->Status);
2550 			ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2551 			if ((rval == AACOK) && (resp_status == 0) &&
2552 			    (ct_status == CT_OK))
2553 				/* Successful completion */
2554 				rval = AACMPE_OK;
2555 			else
2556 				/* Auto-commit aborted due to error(s). */
2557 				rval = AACMPE_COMMIT_CONFIG;
2558 		} else {
2559 			/*
2560 			 * Auto-commit aborted due to adapter indicating
2561 			 * configuration issue(s) too dangerous to auto-commit.
2562 			 */
2563 			rval = AACMPE_CONFIG_STATUS;
2564 		}
2565 	} else {
2566 		cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted");
2567 		rval = AACMPE_CONFIG_STATUS;
2568 	}
2569 	return (rval);
2570 }
2571 
2572 /*
2573  * Hardware initialization and resource allocation
2574  */
2575 static int
2576 aac_common_attach(struct aac_softstate *softs)
2577 {
2578 	uint32_t status;
2579 	int i;
2580 
2581 	DBCALLED(softs, 1);
2582 
2583 	/*
2584 	 * Do a little check here to make sure there aren't any outstanding
2585 	 * FIBs in the message queue. At this point there should not be and
2586 	 * if there are they are probably left over from another instance of
2587 	 * the driver like when the system crashes and the crash dump driver
2588 	 * gets loaded.
2589 	 */
2590 	while (AAC_OUTB_GET(softs) != 0xfffffffful)
2591 		;
2592 
2593 	/*
2594 	 * Wait the card to complete booting up before do anything that
2595 	 * attempts to communicate with it.
2596 	 */
2597 	status = AAC_FWSTATUS_GET(softs);
2598 	if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC)
2599 		goto error;
2600 	i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */
2601 	AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i);
2602 	if (i == 0) {
2603 		cmn_err(CE_CONT, "?Fatal error: controller not ready");
2604 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2605 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2606 		goto error;
2607 	}
2608 
2609 	/* Read and set card supported options and settings */
2610 	if (aac_check_firmware(softs) == AACERR) {
2611 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2612 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2613 		goto error;
2614 	}
2615 
2616 	/* Clear out all interrupts */
2617 	AAC_STATUS_CLR(softs, ~0);
2618 
2619 	/* Setup communication space with the card */
2620 	if (softs->comm_space_dma_handle == NULL) {
2621 		if (aac_alloc_comm_space(softs) != AACOK)
2622 			goto error;
2623 	}
2624 	if (aac_setup_comm_space(softs) != AACOK) {
2625 		cmn_err(CE_CONT, "?Setup communication space failed");
2626 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2627 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2628 		goto error;
2629 	}
2630 
2631 #ifdef DEBUG
2632 	if (aac_get_fw_debug_buffer(softs) != AACOK)
2633 		cmn_err(CE_CONT, "?firmware UART trace not supported");
2634 #endif
2635 
2636 	/* Allocate slots */
2637 	if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) {
2638 		cmn_err(CE_CONT, "?Fatal error: slots allocate failed");
2639 		goto error;
2640 	}
2641 	AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots);
2642 
2643 	/* Allocate FIBs */
2644 	if (softs->total_fibs < softs->total_slots) {
2645 		aac_alloc_fibs(softs);
2646 		if (softs->total_fibs == 0)
2647 			goto error;
2648 		AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated",
2649 		    softs->total_fibs);
2650 	}
2651 
2652 	/* Get adapter names */
2653 	if (CARD_IS_UNKNOWN(softs->card)) {
2654 		struct aac_supplement_adapter_info sinf;
2655 
2656 		if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) {
2657 			cmn_err(CE_CONT, "?Query adapter information failed");
2658 		} else {
2659 			char *p, *p0, *p1;
2660 
2661 			/*
2662 			 * Now find the controller name in supp_adapter_info->
2663 			 * AdapterTypeText. Use the first word as the vendor
2664 			 * and the other words as the product name.
2665 			 */
2666 			AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = "
2667 			    "\"%s\"", sinf.AdapterTypeText);
2668 			p = sinf.AdapterTypeText;
2669 			p0 = p1 = NULL;
2670 			/* Skip heading spaces */
2671 			while (*p && (*p == ' ' || *p == '\t'))
2672 				p++;
2673 			p0 = p;
2674 			while (*p && (*p != ' ' && *p != '\t'))
2675 				p++;
2676 			/* Remove middle spaces */
2677 			while (*p && (*p == ' ' || *p == '\t'))
2678 				*p++ = 0;
2679 			p1 = p;
2680 			/* Remove trailing spaces */
2681 			p = p1 + strlen(p1) - 1;
2682 			while (p > p1 && (*p == ' ' || *p == '\t'))
2683 				*p-- = 0;
2684 			if (*p0 && *p1) {
2685 				(void *)strncpy(softs->vendor_name, p0,
2686 				    AAC_VENDOR_LEN);
2687 				(void *)strncpy(softs->product_name, p1,
2688 				    AAC_PRODUCT_LEN);
2689 			} else {
2690 				cmn_err(CE_WARN,
2691 				    "?adapter name mis-formatted\n");
2692 				if (*p0)
2693 					(void *)strncpy(softs->product_name,
2694 					    p0, AAC_PRODUCT_LEN);
2695 			}
2696 		}
2697 	}
2698 
2699 	cmn_err(CE_NOTE,
2700 	    "!aac driver %d.%02d.%02d-%d, found card: " \
2701 	    "%s %s(pci0x%x.%x.%x.%x) at 0x%x",
2702 	    AAC_DRIVER_MAJOR_VERSION,
2703 	    AAC_DRIVER_MINOR_VERSION,
2704 	    AAC_DRIVER_BUGFIX_LEVEL,
2705 	    AAC_DRIVER_BUILD,
2706 	    softs->vendor_name, softs->product_name,
2707 	    softs->vendid, softs->devid, softs->subvendid, softs->subsysid,
2708 	    softs->pci_mem_base_paddr);
2709 
2710 	/* Perform acceptance of adapter-detected config changes if possible */
2711 	if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) {
2712 		cmn_err(CE_CONT, "?Handle adapter config issues failed");
2713 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2714 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2715 		goto error;
2716 	}
2717 
2718 	/* Setup containers (logical devices) */
2719 	if (aac_probe_containers(softs) != AACOK) {
2720 		cmn_err(CE_CONT, "?Fatal error: get container info error");
2721 		goto error;
2722 	}
2723 
2724 	/* Setup phys. devices */
2725 	if (softs->flags & AAC_FLAGS_NONDASD) {
2726 		uint32_t bus_max, tgt_max;
2727 		uint32_t bus, tgt;
2728 		int index;
2729 
2730 		if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) {
2731 			cmn_err(CE_CONT, "?Fatal error: get bus info error");
2732 			goto error;
2733 		}
2734 		AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d",
2735 		    bus_max, tgt_max);
2736 		if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) {
2737 			if (softs->state & AAC_STATE_RESET) {
2738 				cmn_err(CE_WARN,
2739 				    "?Fatal error: bus map changed");
2740 				goto error;
2741 			}
2742 			softs->bus_max = bus_max;
2743 			softs->tgt_max = tgt_max;
2744 			if (softs->nondasds) {
2745 				kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2746 				    sizeof (struct aac_nondasd));
2747 			}
2748 			softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \
2749 			    sizeof (struct aac_nondasd), KM_SLEEP);
2750 
2751 			index = 0;
2752 			for (bus = 0; bus < softs->bus_max; bus++) {
2753 				for (tgt = 0; tgt < softs->tgt_max; tgt++) {
2754 					struct aac_nondasd *dvp =
2755 					    &softs->nondasds[index++];
2756 					dvp->dev.type = AAC_DEV_PD;
2757 					dvp->bus = bus;
2758 					dvp->tid = tgt;
2759 				}
2760 			}
2761 		}
2762 	}
2763 
2764 	/* Check dma & acc handles allocated in attach */
2765 	if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) {
2766 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2767 		goto error;
2768 	}
2769 
2770 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
2771 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2772 		goto error;
2773 	}
2774 
2775 	for (i = 0; i < softs->total_slots; i++) {
2776 		if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) !=
2777 		    DDI_SUCCESS) {
2778 			ddi_fm_service_impact(softs->devinfo_p,
2779 			    DDI_SERVICE_LOST);
2780 			goto error;
2781 		}
2782 	}
2783 
2784 	return (AACOK);
2785 error:
2786 	if (softs->state & AAC_STATE_RESET)
2787 		return (AACERR);
2788 	if (softs->nondasds) {
2789 		kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2790 		    sizeof (struct aac_nondasd));
2791 		softs->nondasds = NULL;
2792 	}
2793 	if (softs->total_fibs > 0)
2794 		aac_destroy_fibs(softs);
2795 	if (softs->total_slots > 0)
2796 		aac_destroy_slots(softs);
2797 	if (softs->comm_space_dma_handle)
2798 		aac_free_comm_space(softs);
2799 	return (AACERR);
2800 }
2801 
2802 /*
2803  * Hardware shutdown and resource release
2804  */
2805 static void
2806 aac_common_detach(struct aac_softstate *softs)
2807 {
2808 	DBCALLED(softs, 1);
2809 
2810 	(void) aac_shutdown(softs);
2811 
2812 	if (softs->nondasds) {
2813 		kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2814 		    sizeof (struct aac_nondasd));
2815 		softs->nondasds = NULL;
2816 	}
2817 	aac_destroy_fibs(softs);
2818 	aac_destroy_slots(softs);
2819 	aac_free_comm_space(softs);
2820 }
2821 
2822 /*
2823  * Send a synchronous command to the controller and wait for a result.
2824  * Indicate if the controller completed the command with an error status.
2825  */
2826 int
2827 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd,
2828     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3,
2829     uint32_t *statusp)
2830 {
2831 	int timeout;
2832 	uint32_t status;
2833 
2834 	if (statusp != NULL)
2835 		*statusp = SRB_STATUS_SUCCESS;
2836 
2837 	/* Fill in mailbox */
2838 	AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3);
2839 
2840 	/* Ensure the sync command doorbell flag is cleared */
2841 	AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
2842 
2843 	/* Then set it to signal the adapter */
2844 	AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND);
2845 
2846 	/* Spin waiting for the command to complete */
2847 	timeout = AAC_IMMEDIATE_TIMEOUT * 1000;
2848 	AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout);
2849 	if (!timeout) {
2850 		AACDB_PRINT(softs, CE_WARN,
2851 		    "Sync command timed out after %d seconds (0x%x)!",
2852 		    AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs));
2853 		return (AACERR);
2854 	}
2855 
2856 	/* Clear the completion flag */
2857 	AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
2858 
2859 	/* Get the command status */
2860 	status = AAC_MAILBOX_GET(softs, 0);
2861 	if (statusp != NULL)
2862 		*statusp = status;
2863 	if (status != SRB_STATUS_SUCCESS) {
2864 		AACDB_PRINT(softs, CE_WARN,
2865 		    "Sync command fail: status = 0x%x", status);
2866 		return (AACERR);
2867 	}
2868 
2869 	return (AACOK);
2870 }
2871 
2872 /*
2873  * Send a synchronous FIB to the adapter and wait for its completion
2874  */
2875 static int
2876 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize)
2877 {
2878 	struct aac_slot *slotp = &softs->sync_slot;
2879 	ddi_dma_handle_t dma = slotp->fib_dma_handle;
2880 	uint32_t status;
2881 	int rval;
2882 
2883 	/* Sync fib only supports 512 bytes */
2884 	if (fibsize > AAC_FIB_SIZE)
2885 		return (AACERR);
2886 
2887 	/*
2888 	 * Setup sync fib
2889 	 * Need not reinitialize FIB header if it's already been filled
2890 	 * by others like aac_cmd_fib_scsi as aac_cmd.
2891 	 */
2892 	if (slotp->acp == NULL)
2893 		aac_cmd_fib_header(softs, slotp, cmd, fibsize);
2894 
2895 	AACDB_PRINT_FIB(softs, &softs->sync_slot);
2896 
2897 	(void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib),
2898 	    fibsize, DDI_DMA_SYNC_FORDEV);
2899 
2900 	/* Give the FIB to the controller, wait for a response. */
2901 	rval = aac_sync_mbcommand(softs, AAC_MONKER_SYNCFIB,
2902 	    slotp->fib_phyaddr, 0, 0, 0, &status);
2903 	if (rval == AACERR) {
2904 		AACDB_PRINT(softs, CE_WARN,
2905 		    "Send sync fib to controller failed");
2906 		return (AACERR);
2907 	}
2908 
2909 	(void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib),
2910 	    AAC_FIB_SIZE, DDI_DMA_SYNC_FORCPU);
2911 
2912 	if ((aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) ||
2913 	    (aac_check_dma_handle(dma) != DDI_SUCCESS)) {
2914 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
2915 		return (AACERR);
2916 	}
2917 
2918 	return (AACOK);
2919 }
2920 
2921 static void
2922 aac_cmd_initq(struct aac_cmd_queue *q)
2923 {
2924 	q->q_head = NULL;
2925 	q->q_tail = (struct aac_cmd *)&q->q_head;
2926 }
2927 
2928 /*
2929  * Remove a cmd from the head of q
2930  */
2931 static struct aac_cmd *
2932 aac_cmd_dequeue(struct aac_cmd_queue *q)
2933 {
2934 	struct aac_cmd *acp;
2935 
2936 	_NOTE(ASSUMING_PROTECTED(*q))
2937 
2938 	if ((acp = q->q_head) != NULL) {
2939 		if ((q->q_head = acp->next) != NULL)
2940 			acp->next = NULL;
2941 		else
2942 			q->q_tail = (struct aac_cmd *)&q->q_head;
2943 		acp->prev = NULL;
2944 	}
2945 	return (acp);
2946 }
2947 
2948 /*
2949  * Add a cmd to the tail of q
2950  */
2951 static void
2952 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp)
2953 {
2954 	ASSERT(acp->next == NULL);
2955 	acp->prev = q->q_tail;
2956 	q->q_tail->next = acp;
2957 	q->q_tail = acp;
2958 }
2959 
2960 /*
2961  * Remove the cmd ac from q
2962  */
2963 static void
2964 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp)
2965 {
2966 	if (acp->prev) {
2967 		if ((acp->prev->next = acp->next) != NULL) {
2968 			acp->next->prev = acp->prev;
2969 			acp->next = NULL;
2970 		} else {
2971 			q->q_tail = acp->prev;
2972 		}
2973 		acp->prev = NULL;
2974 	}
2975 	/* ac is not in the queue */
2976 }
2977 
2978 /*
2979  * Atomically insert an entry into the nominated queue, returns 0 on success or
2980  * AACERR if the queue is full.
2981  *
2982  * Note: it would be more efficient to defer notifying the controller in
2983  *	 the case where we may be inserting several entries in rapid succession,
2984  *	 but implementing this usefully may be difficult (it would involve a
2985  *	 separate queue/notify interface).
2986  */
2987 static int
2988 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr,
2989     uint32_t fib_size)
2990 {
2991 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
2992 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
2993 	uint32_t pi, ci;
2994 
2995 	DBCALLED(softs, 2);
2996 
2997 	ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q);
2998 
2999 	/* Get the producer/consumer indices */
3000 	(void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3001 	    (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3002 	    DDI_DMA_SYNC_FORCPU);
3003 	if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3004 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3005 		return (AACERR);
3006 	}
3007 
3008 	pi = ddi_get32(acc,
3009 	    &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3010 	ci = ddi_get32(acc,
3011 	    &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3012 
3013 	/*
3014 	 * Wrap the queue first before we check the queue to see
3015 	 * if it is full
3016 	 */
3017 	if (pi >= aac_qinfo[queue].size)
3018 		pi = 0;
3019 
3020 	/* XXX queue full */
3021 	if ((pi + 1) == ci)
3022 		return (AACERR);
3023 
3024 	/* Fill in queue entry */
3025 	ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size);
3026 	ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr);
3027 	(void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3028 	    (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3029 	    DDI_DMA_SYNC_FORDEV);
3030 
3031 	/* Update producer index */
3032 	ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX],
3033 	    pi + 1);
3034 	(void) ddi_dma_sync(dma,
3035 	    (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \
3036 	    (uintptr_t)softs->comm_space, sizeof (uint32_t),
3037 	    DDI_DMA_SYNC_FORDEV);
3038 
3039 	if (aac_qinfo[queue].notify != 0)
3040 		AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3041 	return (AACOK);
3042 }
3043 
3044 /*
3045  * Atomically remove one entry from the nominated queue, returns 0 on
3046  * success or AACERR if the queue is empty.
3047  */
3048 static int
3049 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp)
3050 {
3051 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3052 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3053 	uint32_t pi, ci;
3054 	int unfull = 0;
3055 
3056 	DBCALLED(softs, 2);
3057 
3058 	ASSERT(idxp);
3059 
3060 	/* Get the producer/consumer indices */
3061 	(void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3062 	    (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3063 	    DDI_DMA_SYNC_FORCPU);
3064 	pi = ddi_get32(acc,
3065 	    &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3066 	ci = ddi_get32(acc,
3067 	    &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3068 
3069 	/* Check for queue empty */
3070 	if (ci == pi)
3071 		return (AACERR);
3072 
3073 	if (pi >= aac_qinfo[queue].size)
3074 		pi = 0;
3075 
3076 	/* Check for queue full */
3077 	if (ci == pi + 1)
3078 		unfull = 1;
3079 
3080 	/*
3081 	 * The controller does not wrap the queue,
3082 	 * so we have to do it by ourselves
3083 	 */
3084 	if (ci >= aac_qinfo[queue].size)
3085 		ci = 0;
3086 
3087 	/* Fetch the entry */
3088 	(void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3089 	    (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3090 	    DDI_DMA_SYNC_FORCPU);
3091 	if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3092 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3093 		return (AACERR);
3094 	}
3095 
3096 	switch (queue) {
3097 	case AAC_HOST_NORM_RESP_Q:
3098 	case AAC_HOST_HIGH_RESP_Q:
3099 		*idxp = ddi_get32(acc,
3100 		    &(softs->qentries[queue] + ci)->aq_fib_addr);
3101 		break;
3102 
3103 	case AAC_HOST_NORM_CMD_Q:
3104 	case AAC_HOST_HIGH_CMD_Q:
3105 		*idxp = ddi_get32(acc,
3106 		    &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE;
3107 		break;
3108 
3109 	default:
3110 		cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()");
3111 		return (AACERR);
3112 	}
3113 
3114 	/* Update consumer index */
3115 	ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX],
3116 	    ci + 1);
3117 	(void) ddi_dma_sync(dma,
3118 	    (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \
3119 	    (uintptr_t)softs->comm_space, sizeof (uint32_t),
3120 	    DDI_DMA_SYNC_FORDEV);
3121 
3122 	if (unfull && aac_qinfo[queue].notify != 0)
3123 		AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3124 	return (AACOK);
3125 }
3126 
3127 /*
3128  * Request information of the container cid
3129  */
3130 static struct aac_mntinforesp *
3131 aac_get_container_info(struct aac_softstate *softs, int cid)
3132 {
3133 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3134 	struct aac_fib *fibp = softs->sync_slot.fibp;
3135 	struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0];
3136 	struct aac_mntinforesp *mir;
3137 
3138 	ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */
3139 	    (softs->flags & AAC_FLAGS_LBA_64BIT) ?
3140 	    VM_NameServe64 : VM_NameServe);
3141 	ddi_put32(acc, &mi->MntType, FT_FILESYS);
3142 	ddi_put32(acc, &mi->MntCount, cid);
3143 
3144 	if (aac_sync_fib(softs, ContainerCommand,
3145 	    AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) {
3146 		AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid);
3147 		return (NULL);
3148 	}
3149 
3150 	mir = (struct aac_mntinforesp *)&fibp->data[0];
3151 	if (ddi_get32(acc, &mir->Status) == ST_OK)
3152 		return (mir);
3153 	return (NULL);
3154 }
3155 
3156 static int
3157 aac_get_container_count(struct aac_softstate *softs, int *count)
3158 {
3159 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3160 	struct aac_mntinforesp *mir;
3161 
3162 	if ((mir = aac_get_container_info(softs, 0)) == NULL)
3163 		return (AACERR);
3164 	*count = ddi_get32(acc, &mir->MntRespCount);
3165 	if (*count > AAC_MAX_LD) {
3166 		AACDB_PRINT(softs, CE_CONT,
3167 		    "container count(%d) > AAC_MAX_LD", *count);
3168 		return (AACERR);
3169 	}
3170 	return (AACOK);
3171 }
3172 
3173 static int
3174 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid)
3175 {
3176 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3177 	struct aac_Container *ct = (struct aac_Container *) \
3178 	    &softs->sync_slot.fibp->data[0];
3179 
3180 	bzero(ct, sizeof (*ct) - CT_PACKET_SIZE);
3181 	ddi_put32(acc, &ct->Command, VM_ContainerConfig);
3182 	ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID);
3183 	ddi_put32(acc, &ct->CTCommand.param[0], cid);
3184 
3185 	if (aac_sync_fib(softs, ContainerCommand,
3186 	    AAC_FIB_SIZEOF(struct aac_Container)) == AACERR)
3187 		return (AACERR);
3188 	if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK)
3189 		return (AACERR);
3190 
3191 	*uid = ddi_get32(acc, &ct->CTCommand.param[1]);
3192 	return (AACOK);
3193 }
3194 
3195 static int
3196 aac_probe_container(struct aac_softstate *softs, uint32_t cid)
3197 {
3198 	struct aac_container *dvp = &softs->containers[cid];
3199 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3200 	struct aac_mntinforesp *mir;
3201 	uint64_t size;
3202 	uint32_t uid;
3203 
3204 	/* Get container basic info */
3205 	if ((mir = aac_get_container_info(softs, cid)) == NULL)
3206 		return (AACERR);
3207 
3208 	if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) {
3209 		if (AAC_DEV_IS_VALID(&dvp->dev)) {
3210 			AACDB_PRINT(softs, CE_NOTE,
3211 			    ">>> Container %d deleted", cid);
3212 			dvp->dev.flags &= ~AAC_DFLAG_VALID;
3213 			(void) aac_dr_event(softs, dvp->cid, -1,
3214 			    AAC_EVT_OFFLINE);
3215 		}
3216 	} else {
3217 		size = AAC_MIR_SIZE(softs, acc, mir);
3218 
3219 		/* Get container UID */
3220 		if (aac_get_container_uid(softs, cid, &uid) == AACERR) {
3221 			AACDB_PRINT(softs, CE_CONT,
3222 			    "query container %d uid failed", cid);
3223 			return (AACERR);
3224 		}
3225 		AACDB_PRINT(softs, CE_CONT, "uid=0x%08x", uid);
3226 
3227 		if (AAC_DEV_IS_VALID(&dvp->dev)) {
3228 			if (dvp->uid != uid) {
3229 				AACDB_PRINT(softs, CE_WARN,
3230 				    ">>> Container %u uid changed to %d",
3231 				    cid, uid);
3232 				dvp->uid = uid;
3233 			}
3234 			if (dvp->size != size) {
3235 				AACDB_PRINT(softs, CE_NOTE,
3236 				    ">>> Container %u size changed to %"PRIu64,
3237 				    cid, size);
3238 				dvp->size = size;
3239 			}
3240 		} else { /* Init new container */
3241 			AACDB_PRINT(softs, CE_NOTE,
3242 			    ">>> Container %d added: " \
3243 			    "size=0x%x.%08x, type=%d, name=%s",
3244 			    cid,
3245 			    ddi_get32(acc, &mir->MntObj.CapacityHigh),
3246 			    ddi_get32(acc, &mir->MntObj.Capacity),
3247 			    ddi_get32(acc, &mir->MntObj.VolType),
3248 			    mir->MntObj.FileSystemName);
3249 			dvp->dev.flags |= AAC_DFLAG_VALID;
3250 			dvp->dev.type = AAC_DEV_LD;
3251 
3252 			dvp->cid = cid;
3253 			dvp->uid = uid;
3254 			dvp->size = size;
3255 			dvp->locked = 0;
3256 			dvp->deleted = 0;
3257 			(void) aac_dr_event(softs, dvp->cid, -1,
3258 			    AAC_EVT_ONLINE);
3259 		}
3260 	}
3261 	return (AACOK);
3262 }
3263 
3264 /*
3265  * Do a rescan of all the possible containers and update the container list
3266  * with newly online/offline containers, and prepare for autoconfiguration.
3267  */
3268 static int
3269 aac_probe_containers(struct aac_softstate *softs)
3270 {
3271 	int i, count, total;
3272 
3273 	/* Loop over possible containers */
3274 	count = softs->container_count;
3275 	if (aac_get_container_count(softs, &count) == AACERR)
3276 		return (AACERR);
3277 	for (i = total = 0; i < count; i++) {
3278 		if (aac_probe_container(softs, i) == AACOK)
3279 			total++;
3280 	}
3281 	if (count < softs->container_count) {
3282 		struct aac_container *dvp;
3283 
3284 		for (dvp = &softs->containers[count];
3285 		    dvp < &softs->containers[softs->container_count]; dvp++) {
3286 			if (!AAC_DEV_IS_VALID(&dvp->dev))
3287 				continue;
3288 			AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted",
3289 			    dvp->cid);
3290 			dvp->dev.flags &= ~AAC_DFLAG_VALID;
3291 			(void) aac_dr_event(softs, dvp->cid, -1,
3292 			    AAC_EVT_OFFLINE);
3293 		}
3294 	}
3295 	softs->container_count = count;
3296 	AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total);
3297 	return (AACOK);
3298 }
3299 
3300 static int
3301 aac_alloc_comm_space(struct aac_softstate *softs)
3302 {
3303 	size_t rlen;
3304 	ddi_dma_cookie_t cookie;
3305 	uint_t cookien;
3306 
3307 	/* Allocate DMA for comm. space */
3308 	if (ddi_dma_alloc_handle(
3309 	    softs->devinfo_p,
3310 	    &softs->addr_dma_attr,
3311 	    DDI_DMA_SLEEP,
3312 	    NULL,
3313 	    &softs->comm_space_dma_handle) != DDI_SUCCESS) {
3314 		AACDB_PRINT(softs, CE_WARN,
3315 		    "Cannot alloc dma handle for communication area");
3316 		goto error;
3317 	}
3318 	if (ddi_dma_mem_alloc(
3319 	    softs->comm_space_dma_handle,
3320 	    sizeof (struct aac_comm_space),
3321 	    &softs->acc_attr,
3322 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3323 	    DDI_DMA_SLEEP,
3324 	    NULL,
3325 	    (caddr_t *)&softs->comm_space,
3326 	    &rlen,
3327 	    &softs->comm_space_acc_handle) != DDI_SUCCESS) {
3328 		AACDB_PRINT(softs, CE_WARN,
3329 		    "Cannot alloc mem for communication area");
3330 		goto error;
3331 	}
3332 	if (ddi_dma_addr_bind_handle(
3333 	    softs->comm_space_dma_handle,
3334 	    NULL,
3335 	    (caddr_t)softs->comm_space,
3336 	    sizeof (struct aac_comm_space),
3337 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3338 	    DDI_DMA_SLEEP,
3339 	    NULL,
3340 	    &cookie,
3341 	    &cookien) != DDI_DMA_MAPPED) {
3342 		AACDB_PRINT(softs, CE_WARN,
3343 		    "DMA bind failed for communication area");
3344 		goto error;
3345 	}
3346 	softs->comm_space_phyaddr = cookie.dmac_address;
3347 
3348 	/* Setup sync FIB space */
3349 	softs->sync_slot.fibp = &softs->comm_space->sync_fib;
3350 	softs->sync_slot.fib_phyaddr = softs->comm_space_phyaddr + \
3351 	    offsetof(struct aac_comm_space, sync_fib);
3352 	softs->sync_slot.fib_acc_handle = softs->comm_space_acc_handle;
3353 	softs->sync_slot.fib_dma_handle = softs->comm_space_dma_handle;
3354 
3355 	return (AACOK);
3356 error:
3357 	if (softs->comm_space_acc_handle) {
3358 		ddi_dma_mem_free(&softs->comm_space_acc_handle);
3359 		softs->comm_space_acc_handle = NULL;
3360 	}
3361 	if (softs->comm_space_dma_handle) {
3362 		ddi_dma_free_handle(&softs->comm_space_dma_handle);
3363 		softs->comm_space_dma_handle = NULL;
3364 	}
3365 	return (AACERR);
3366 }
3367 
3368 static void
3369 aac_free_comm_space(struct aac_softstate *softs)
3370 {
3371 	softs->sync_slot.fibp = NULL;
3372 	softs->sync_slot.fib_phyaddr = NULL;
3373 	softs->sync_slot.fib_acc_handle = NULL;
3374 	softs->sync_slot.fib_dma_handle = NULL;
3375 
3376 	(void) ddi_dma_unbind_handle(softs->comm_space_dma_handle);
3377 	ddi_dma_mem_free(&softs->comm_space_acc_handle);
3378 	softs->comm_space_acc_handle = NULL;
3379 	ddi_dma_free_handle(&softs->comm_space_dma_handle);
3380 	softs->comm_space_dma_handle = NULL;
3381 	softs->comm_space_phyaddr = NULL;
3382 }
3383 
3384 /*
3385  * Initialize the data structures that are required for the communication
3386  * interface to operate
3387  */
3388 static int
3389 aac_setup_comm_space(struct aac_softstate *softs)
3390 {
3391 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3392 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3393 	uint32_t comm_space_phyaddr;
3394 	struct aac_adapter_init *initp;
3395 	int qoffset;
3396 
3397 	comm_space_phyaddr = softs->comm_space_phyaddr;
3398 
3399 	/* Setup adapter init struct */
3400 	initp = &softs->comm_space->init_data;
3401 	bzero(initp, sizeof (struct aac_adapter_init));
3402 
3403 	ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION);
3404 	ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time());
3405 
3406 	/* Setup new/old comm. specific data */
3407 	if (softs->flags & AAC_FLAGS_RAW_IO) {
3408 		ddi_put32(acc, &initp->InitStructRevision,
3409 		    AAC_INIT_STRUCT_REVISION_4);
3410 		ddi_put32(acc, &initp->InitFlags,
3411 		    (softs->flags & AAC_FLAGS_NEW_COMM) ?
3412 		    AAC_INIT_FLAGS_NEW_COMM_SUPPORTED : 0);
3413 		/* Setup the preferred settings */
3414 		ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs);
3415 		ddi_put32(acc, &initp->MaxIoSize,
3416 		    (softs->aac_max_sectors << 9));
3417 		ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size);
3418 	} else {
3419 		/*
3420 		 * Tells the adapter about the physical location of various
3421 		 * important shared data structures
3422 		 */
3423 		ddi_put32(acc, &initp->AdapterFibsPhysicalAddress,
3424 		    comm_space_phyaddr + \
3425 		    offsetof(struct aac_comm_space, adapter_fibs));
3426 		ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0);
3427 		ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE);
3428 		ddi_put32(acc, &initp->AdapterFibsSize,
3429 		    AAC_ADAPTER_FIBS * AAC_FIB_SIZE);
3430 		ddi_put32(acc, &initp->PrintfBufferAddress,
3431 		    comm_space_phyaddr + \
3432 		    offsetof(struct aac_comm_space, adapter_print_buf));
3433 		ddi_put32(acc, &initp->PrintfBufferSize,
3434 		    AAC_ADAPTER_PRINT_BUFSIZE);
3435 		ddi_put32(acc, &initp->MiniPortRevision,
3436 		    AAC_INIT_STRUCT_MINIPORT_REVISION);
3437 		ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN);
3438 
3439 		qoffset = (comm_space_phyaddr + \
3440 		    offsetof(struct aac_comm_space, qtable)) % \
3441 		    AAC_QUEUE_ALIGN;
3442 		if (qoffset)
3443 			qoffset = AAC_QUEUE_ALIGN - qoffset;
3444 		softs->qtablep = (struct aac_queue_table *) \
3445 		    ((char *)&softs->comm_space->qtable + qoffset);
3446 		ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \
3447 		    offsetof(struct aac_comm_space, qtable) + qoffset);
3448 
3449 		/* Init queue table */
3450 		ddi_put32(acc, &softs->qtablep-> \
3451 		    qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3452 		    AAC_HOST_NORM_CMD_ENTRIES);
3453 		ddi_put32(acc, &softs->qtablep-> \
3454 		    qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3455 		    AAC_HOST_NORM_CMD_ENTRIES);
3456 		ddi_put32(acc, &softs->qtablep-> \
3457 		    qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3458 		    AAC_HOST_HIGH_CMD_ENTRIES);
3459 		ddi_put32(acc, &softs->qtablep-> \
3460 		    qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3461 		    AAC_HOST_HIGH_CMD_ENTRIES);
3462 		ddi_put32(acc, &softs->qtablep-> \
3463 		    qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3464 		    AAC_ADAP_NORM_CMD_ENTRIES);
3465 		ddi_put32(acc, &softs->qtablep-> \
3466 		    qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3467 		    AAC_ADAP_NORM_CMD_ENTRIES);
3468 		ddi_put32(acc, &softs->qtablep-> \
3469 		    qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3470 		    AAC_ADAP_HIGH_CMD_ENTRIES);
3471 		ddi_put32(acc, &softs->qtablep-> \
3472 		    qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3473 		    AAC_ADAP_HIGH_CMD_ENTRIES);
3474 		ddi_put32(acc, &softs->qtablep-> \
3475 		    qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3476 		    AAC_HOST_NORM_RESP_ENTRIES);
3477 		ddi_put32(acc, &softs->qtablep-> \
3478 		    qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3479 		    AAC_HOST_NORM_RESP_ENTRIES);
3480 		ddi_put32(acc, &softs->qtablep-> \
3481 		    qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3482 		    AAC_HOST_HIGH_RESP_ENTRIES);
3483 		ddi_put32(acc, &softs->qtablep-> \
3484 		    qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3485 		    AAC_HOST_HIGH_RESP_ENTRIES);
3486 		ddi_put32(acc, &softs->qtablep-> \
3487 		    qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3488 		    AAC_ADAP_NORM_RESP_ENTRIES);
3489 		ddi_put32(acc, &softs->qtablep-> \
3490 		    qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3491 		    AAC_ADAP_NORM_RESP_ENTRIES);
3492 		ddi_put32(acc, &softs->qtablep-> \
3493 		    qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3494 		    AAC_ADAP_HIGH_RESP_ENTRIES);
3495 		ddi_put32(acc, &softs->qtablep-> \
3496 		    qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3497 		    AAC_ADAP_HIGH_RESP_ENTRIES);
3498 
3499 		/* Init queue entries */
3500 		softs->qentries[AAC_HOST_NORM_CMD_Q] =
3501 		    &softs->qtablep->qt_HostNormCmdQueue[0];
3502 		softs->qentries[AAC_HOST_HIGH_CMD_Q] =
3503 		    &softs->qtablep->qt_HostHighCmdQueue[0];
3504 		softs->qentries[AAC_ADAP_NORM_CMD_Q] =
3505 		    &softs->qtablep->qt_AdapNormCmdQueue[0];
3506 		softs->qentries[AAC_ADAP_HIGH_CMD_Q] =
3507 		    &softs->qtablep->qt_AdapHighCmdQueue[0];
3508 		softs->qentries[AAC_HOST_NORM_RESP_Q] =
3509 		    &softs->qtablep->qt_HostNormRespQueue[0];
3510 		softs->qentries[AAC_HOST_HIGH_RESP_Q] =
3511 		    &softs->qtablep->qt_HostHighRespQueue[0];
3512 		softs->qentries[AAC_ADAP_NORM_RESP_Q] =
3513 		    &softs->qtablep->qt_AdapNormRespQueue[0];
3514 		softs->qentries[AAC_ADAP_HIGH_RESP_Q] =
3515 		    &softs->qtablep->qt_AdapHighRespQueue[0];
3516 	}
3517 	(void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV);
3518 
3519 	/* Send init structure to the card */
3520 	if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT,
3521 	    comm_space_phyaddr + \
3522 	    offsetof(struct aac_comm_space, init_data),
3523 	    0, 0, 0, NULL) == AACERR) {
3524 		AACDB_PRINT(softs, CE_WARN,
3525 		    "Cannot send init structure to adapter");
3526 		return (AACERR);
3527 	}
3528 
3529 	return (AACOK);
3530 }
3531 
3532 static uchar_t *
3533 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf)
3534 {
3535 	(void) memset(buf, ' ', AAC_VENDOR_LEN);
3536 	bcopy(softs->vendor_name, buf, strlen(softs->vendor_name));
3537 	return (buf + AAC_VENDOR_LEN);
3538 }
3539 
3540 static uchar_t *
3541 aac_product_id(struct aac_softstate *softs, uchar_t *buf)
3542 {
3543 	(void) memset(buf, ' ', AAC_PRODUCT_LEN);
3544 	bcopy(softs->product_name, buf, strlen(softs->product_name));
3545 	return (buf + AAC_PRODUCT_LEN);
3546 }
3547 
3548 /*
3549  * Construct unit serial number from container uid
3550  */
3551 static uchar_t *
3552 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf)
3553 {
3554 	int i, d;
3555 	uint32_t uid;
3556 
3557 	ASSERT(tgt >= 0 && tgt < AAC_MAX_LD);
3558 
3559 	uid = softs->containers[tgt].uid;
3560 	for (i = 7; i >= 0; i--) {
3561 		d = uid & 0xf;
3562 		buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d;
3563 		uid >>= 4;
3564 	}
3565 	return (buf + 8);
3566 }
3567 
3568 /*
3569  * SPC-3 7.5 INQUIRY command implementation
3570  */
3571 static void
3572 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt,
3573     union scsi_cdb *cdbp, struct buf *bp)
3574 {
3575 	int tgt = pkt->pkt_address.a_target;
3576 	char *b_addr = NULL;
3577 	uchar_t page = cdbp->cdb_opaque[2];
3578 
3579 	if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) {
3580 		/* Command Support Data is not supported */
3581 		aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0);
3582 		return;
3583 	}
3584 
3585 	if (bp && bp->b_un.b_addr && bp->b_bcount) {
3586 		if (bp->b_flags & (B_PHYS | B_PAGEIO))
3587 			bp_mapin(bp);
3588 		b_addr = bp->b_un.b_addr;
3589 	}
3590 
3591 	if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) {
3592 		uchar_t *vpdp = (uchar_t *)b_addr;
3593 		uchar_t *idp, *sp;
3594 
3595 		/* SPC-3 8.4 Vital product data parameters */
3596 		switch (page) {
3597 		case 0x00:
3598 			/* Supported VPD pages */
3599 			if (vpdp == NULL ||
3600 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 3))
3601 				return;
3602 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3603 			vpdp[AAC_VPD_PAGE_CODE] = 0x00;
3604 			vpdp[AAC_VPD_PAGE_LENGTH] = 3;
3605 
3606 			vpdp[AAC_VPD_PAGE_DATA] = 0x00;
3607 			vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80;
3608 			vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83;
3609 
3610 			pkt->pkt_state |= STATE_XFERRED_DATA;
3611 			break;
3612 
3613 		case 0x80:
3614 			/* Unit serial number page */
3615 			if (vpdp == NULL ||
3616 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 8))
3617 				return;
3618 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3619 			vpdp[AAC_VPD_PAGE_CODE] = 0x80;
3620 			vpdp[AAC_VPD_PAGE_LENGTH] = 8;
3621 
3622 			sp = &vpdp[AAC_VPD_PAGE_DATA];
3623 			(void) aac_lun_serialno(softs, tgt, sp);
3624 
3625 			pkt->pkt_state |= STATE_XFERRED_DATA;
3626 			break;
3627 
3628 		case 0x83:
3629 			/* Device identification page */
3630 			if (vpdp == NULL ||
3631 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 32))
3632 				return;
3633 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3634 			vpdp[AAC_VPD_PAGE_CODE] = 0x83;
3635 
3636 			idp = &vpdp[AAC_VPD_PAGE_DATA];
3637 			bzero(idp, AAC_VPD_ID_LENGTH);
3638 			idp[AAC_VPD_ID_CODESET] = 0x02;
3639 			idp[AAC_VPD_ID_TYPE] = 0x01;
3640 
3641 			/*
3642 			 * SPC-3 Table 111 - Identifier type
3643 			 * One recommanded method of constructing the remainder
3644 			 * of identifier field is to concatenate the product
3645 			 * identification field from the standard INQUIRY data
3646 			 * field and the product serial number field from the
3647 			 * unit serial number page.
3648 			 */
3649 			sp = &idp[AAC_VPD_ID_DATA];
3650 			sp = aac_vendor_id(softs, sp);
3651 			sp = aac_product_id(softs, sp);
3652 			sp = aac_lun_serialno(softs, tgt, sp);
3653 			idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \
3654 			    (uintptr_t)&idp[AAC_VPD_ID_DATA];
3655 
3656 			vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \
3657 			    (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA];
3658 			pkt->pkt_state |= STATE_XFERRED_DATA;
3659 			break;
3660 
3661 		default:
3662 			aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3663 			    0x24, 0x00, 0);
3664 			break;
3665 		}
3666 	} else {
3667 		struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr;
3668 		size_t len = sizeof (struct scsi_inquiry);
3669 
3670 		if (page != 0) {
3671 			aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3672 			    0x24, 0x00, 0);
3673 			return;
3674 		}
3675 		if (inqp == NULL || bp->b_bcount < len)
3676 			return;
3677 
3678 		bzero(inqp, len);
3679 		inqp->inq_len = AAC_ADDITIONAL_LEN;
3680 		inqp->inq_ansi = AAC_ANSI_VER;
3681 		inqp->inq_rdf = AAC_RESP_DATA_FORMAT;
3682 		(void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid);
3683 		(void) aac_product_id(softs, (uchar_t *)inqp->inq_pid);
3684 		bcopy("V1.0", inqp->inq_revision, 4);
3685 		inqp->inq_cmdque = 1; /* enable tagged-queuing */
3686 		/*
3687 		 * For "sd-max-xfer-size" property which may impact performance
3688 		 * when IO threads increase.
3689 		 */
3690 		inqp->inq_wbus32 = 1;
3691 
3692 		pkt->pkt_state |= STATE_XFERRED_DATA;
3693 	}
3694 }
3695 
3696 /*
3697  * SPC-3 7.10 MODE SENSE command implementation
3698  */
3699 static void
3700 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt,
3701     union scsi_cdb *cdbp, struct buf *bp, int capacity)
3702 {
3703 	uchar_t pagecode;
3704 	struct mode_header *headerp;
3705 	struct mode_header_g1 *g1_headerp;
3706 	unsigned int ncyl;
3707 	caddr_t sense_data;
3708 	caddr_t next_page;
3709 	size_t sdata_size;
3710 	size_t pages_size;
3711 	int unsupport_page = 0;
3712 
3713 	ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE ||
3714 	    cdbp->scc_cmd == SCMD_MODE_SENSE_G1);
3715 
3716 	if (!(bp && bp->b_un.b_addr && bp->b_bcount))
3717 		return;
3718 
3719 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
3720 		bp_mapin(bp);
3721 	pkt->pkt_state |= STATE_XFERRED_DATA;
3722 	pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F;
3723 
3724 	/* calculate the size of needed buffer */
3725 	if (cdbp->scc_cmd == SCMD_MODE_SENSE)
3726 		sdata_size = MODE_HEADER_LENGTH;
3727 	else /* must be SCMD_MODE_SENSE_G1 */
3728 		sdata_size = MODE_HEADER_LENGTH_G1;
3729 
3730 	pages_size = 0;
3731 	switch (pagecode) {
3732 	case SD_MODE_SENSE_PAGE3_CODE:
3733 		pages_size += sizeof (struct mode_format);
3734 		break;
3735 
3736 	case SD_MODE_SENSE_PAGE4_CODE:
3737 		pages_size += sizeof (struct mode_geometry);
3738 		break;
3739 
3740 	case MODEPAGE_CTRL_MODE:
3741 		if (softs->flags & AAC_FLAGS_LBA_64BIT) {
3742 			pages_size += sizeof (struct mode_control_scsi3);
3743 		} else {
3744 			unsupport_page = 1;
3745 		}
3746 		break;
3747 
3748 	case MODEPAGE_ALLPAGES:
3749 		if (softs->flags & AAC_FLAGS_LBA_64BIT) {
3750 			pages_size += sizeof (struct mode_format) +
3751 			    sizeof (struct mode_geometry) +
3752 			    sizeof (struct mode_control_scsi3);
3753 		} else {
3754 			pages_size += sizeof (struct mode_format) +
3755 			    sizeof (struct mode_geometry);
3756 		}
3757 		break;
3758 
3759 	default:
3760 		/* unsupported pages */
3761 		unsupport_page = 1;
3762 	}
3763 
3764 	/* allocate buffer to fill the send data */
3765 	sdata_size += pages_size;
3766 	sense_data = kmem_zalloc(sdata_size, KM_SLEEP);
3767 
3768 	if (cdbp->scc_cmd == SCMD_MODE_SENSE) {
3769 		headerp = (struct mode_header *)sense_data;
3770 		headerp->length = MODE_HEADER_LENGTH + pages_size -
3771 		    sizeof (headerp->length);
3772 		headerp->bdesc_length = 0;
3773 		next_page = sense_data + sizeof (struct mode_header);
3774 	} else {
3775 		g1_headerp = (void *)sense_data;
3776 		g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size -
3777 		    sizeof (g1_headerp->length));
3778 		g1_headerp->bdesc_length = 0;
3779 		next_page = sense_data + sizeof (struct mode_header_g1);
3780 	}
3781 
3782 	if (unsupport_page)
3783 		goto finish;
3784 
3785 	if (pagecode == SD_MODE_SENSE_PAGE3_CODE ||
3786 	    pagecode == MODEPAGE_ALLPAGES) {
3787 		/* SBC-3 7.1.3.3 Format device page */
3788 		struct mode_format *page3p;
3789 
3790 		page3p = (void *)next_page;
3791 		page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE;
3792 		page3p->mode_page.length = sizeof (struct mode_format);
3793 		page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE);
3794 		page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK);
3795 
3796 		next_page += sizeof (struct mode_format);
3797 	}
3798 
3799 	if (pagecode == SD_MODE_SENSE_PAGE4_CODE ||
3800 	    pagecode == MODEPAGE_ALLPAGES) {
3801 		/* SBC-3 7.1.3.8 Rigid disk device geometry page */
3802 		struct mode_geometry *page4p;
3803 
3804 		page4p = (void *)next_page;
3805 		page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE;
3806 		page4p->mode_page.length = sizeof (struct mode_geometry);
3807 		page4p->heads = AAC_NUMBER_OF_HEADS;
3808 		page4p->rpm = BE_16(AAC_ROTATION_SPEED);
3809 		ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK);
3810 		page4p->cyl_lb = ncyl & 0xff;
3811 		page4p->cyl_mb = (ncyl >> 8) & 0xff;
3812 		page4p->cyl_ub = (ncyl >> 16) & 0xff;
3813 
3814 		next_page += sizeof (struct mode_geometry);
3815 	}
3816 
3817 	if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) &&
3818 	    softs->flags & AAC_FLAGS_LBA_64BIT) {
3819 		/* 64-bit LBA need large sense data */
3820 		struct mode_control_scsi3 *mctl;
3821 
3822 		mctl = (void *)next_page;
3823 		mctl->mode_page.code = MODEPAGE_CTRL_MODE;
3824 		mctl->mode_page.length =
3825 		    sizeof (struct mode_control_scsi3) -
3826 		    sizeof (struct mode_page);
3827 		mctl->d_sense = 1;
3828 	}
3829 
3830 finish:
3831 	/* copyout the valid data. */
3832 	bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount));
3833 	kmem_free(sense_data, sdata_size);
3834 }
3835 
3836 static int
3837 aac_name_node(dev_info_t *dip, char *name, int len)
3838 {
3839 	int tgt, lun;
3840 
3841 	tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3842 	    DDI_PROP_DONTPASS, "target", -1);
3843 	if (tgt == -1)
3844 		return (DDI_FAILURE);
3845 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3846 	    DDI_PROP_DONTPASS, "lun", -1);
3847 	if (lun == -1)
3848 		return (DDI_FAILURE);
3849 
3850 	(void) snprintf(name, len, "%x,%x", tgt, lun);
3851 	return (DDI_SUCCESS);
3852 }
3853 
3854 /*ARGSUSED*/
3855 static int
3856 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3857     scsi_hba_tran_t *tran, struct scsi_device *sd)
3858 {
3859 	struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
3860 #if defined(DEBUG) || defined(__lock_lint)
3861 	int ctl = ddi_get_instance(softs->devinfo_p);
3862 #endif
3863 	uint16_t tgt = sd->sd_address.a_target;
3864 	uint8_t lun = sd->sd_address.a_lun;
3865 	struct aac_device *dvp;
3866 
3867 	DBCALLED(softs, 2);
3868 
3869 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3870 		/*
3871 		 * If no persistent node exist, we don't allow .conf node
3872 		 * to be created.
3873 		 */
3874 		if (aac_find_child(softs, tgt, lun) != NULL) {
3875 			if (ndi_merge_node(tgt_dip, aac_name_node) !=
3876 			    DDI_SUCCESS)
3877 				/* Create this .conf node */
3878 				return (DDI_SUCCESS);
3879 		}
3880 		return (DDI_FAILURE);
3881 	}
3882 
3883 	/*
3884 	 * Only support container/phys. device that has been
3885 	 * detected and valid
3886 	 */
3887 	mutex_enter(&softs->io_lock);
3888 	if (tgt >= AAC_MAX_DEV(softs)) {
3889 		AACDB_PRINT_TRAN(softs,
3890 		    "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun);
3891 		mutex_exit(&softs->io_lock);
3892 		return (DDI_FAILURE);
3893 	}
3894 
3895 	if (tgt < AAC_MAX_LD) {
3896 		dvp = (struct aac_device *)&softs->containers[tgt];
3897 		if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) {
3898 			AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d",
3899 			    ctl, tgt, lun);
3900 			mutex_exit(&softs->io_lock);
3901 			return (DDI_FAILURE);
3902 		}
3903 		/*
3904 		 * Save the tgt_dip for the given target if one doesn't exist
3905 		 * already. Dip's for non-existance tgt's will be cleared in
3906 		 * tgt_free.
3907 		 */
3908 		if (softs->containers[tgt].dev.dip == NULL &&
3909 		    strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
3910 			softs->containers[tgt].dev.dip = tgt_dip;
3911 	} else {
3912 		dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)];
3913 	}
3914 
3915 	AACDB_PRINT(softs, CE_NOTE,
3916 	    "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun,
3917 	    (dvp->type == AAC_DEV_PD) ? "pd" : "ld");
3918 	mutex_exit(&softs->io_lock);
3919 	return (DDI_SUCCESS);
3920 }
3921 
3922 static void
3923 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3924     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3925 {
3926 #ifndef __lock_lint
3927 	_NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran))
3928 #endif
3929 
3930 	struct aac_softstate *softs = SD2AAC(sd);
3931 	int tgt = sd->sd_address.a_target;
3932 
3933 	mutex_enter(&softs->io_lock);
3934 	if (tgt < AAC_MAX_LD) {
3935 		if (softs->containers[tgt].dev.dip == tgt_dip)
3936 			softs->containers[tgt].dev.dip = NULL;
3937 	} else {
3938 		softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID;
3939 	}
3940 	mutex_exit(&softs->io_lock);
3941 }
3942 
3943 /*
3944  * Check if the firmware is Up And Running. If it is in the Kernel Panic
3945  * state, (BlinkLED code + 1) is returned.
3946  *    0 -- firmware up and running
3947  *   -1 -- firmware dead
3948  *   >0 -- firmware kernel panic
3949  */
3950 static int
3951 aac_check_adapter_health(struct aac_softstate *softs)
3952 {
3953 	int rval;
3954 
3955 	rval = PCI_MEM_GET32(softs, AAC_OMR0);
3956 
3957 	if (rval & AAC_KERNEL_UP_AND_RUNNING) {
3958 		rval = 0;
3959 	} else if (rval & AAC_KERNEL_PANIC) {
3960 		cmn_err(CE_WARN, "firmware panic");
3961 		rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */
3962 	} else {
3963 		cmn_err(CE_WARN, "firmware dead");
3964 		rval = -1;
3965 	}
3966 	return (rval);
3967 }
3968 
3969 static void
3970 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp,
3971     uchar_t reason)
3972 {
3973 	acp->flags |= AAC_CMD_ABORT;
3974 
3975 	if (acp->pkt) {
3976 		/*
3977 		 * Each lun should generate a unit attention
3978 		 * condition when reset.
3979 		 * Phys. drives are treated as logical ones
3980 		 * during error recovery.
3981 		 */
3982 		if (acp->slotp) { /* outstanding cmd */
3983 			acp->pkt->pkt_state |= STATE_GOT_STATUS;
3984 			aac_set_arq_data_reset(softs, acp);
3985 		}
3986 
3987 		switch (reason) {
3988 		case CMD_TIMEOUT:
3989 			AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p",
3990 			    acp);
3991 			aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
3992 			    STAT_TIMEOUT | STAT_BUS_RESET);
3993 			break;
3994 		case CMD_RESET:
3995 			/* aac support only RESET_ALL */
3996 			AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp);
3997 			aac_set_pkt_reason(softs, acp, CMD_RESET,
3998 			    STAT_BUS_RESET);
3999 			break;
4000 		case CMD_ABORTED:
4001 			AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p",
4002 			    acp);
4003 			aac_set_pkt_reason(softs, acp, CMD_ABORTED,
4004 			    STAT_ABORTED);
4005 			break;
4006 		}
4007 	}
4008 	aac_end_io(softs, acp);
4009 }
4010 
4011 /*
4012  * Abort all the pending commands of type iocmd or just the command pkt
4013  * corresponding to pkt
4014  */
4015 static void
4016 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt,
4017     int reason)
4018 {
4019 	struct aac_cmd *ac_arg, *acp;
4020 	int i;
4021 
4022 	if (pkt == NULL) {
4023 		ac_arg = NULL;
4024 	} else {
4025 		ac_arg = PKT2AC(pkt);
4026 		iocmd = (ac_arg->flags & AAC_CMD_SYNC) ?
4027 		    AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC;
4028 	}
4029 
4030 	/*
4031 	 * a) outstanding commands on the controller
4032 	 * Note: should abort outstanding commands only after one
4033 	 * IOP reset has been done.
4034 	 */
4035 	if (iocmd & AAC_IOCMD_OUTSTANDING) {
4036 		struct aac_cmd *acp;
4037 
4038 		for (i = 0; i < AAC_MAX_LD; i++) {
4039 			if (AAC_DEV_IS_VALID(&softs->containers[i].dev))
4040 				softs->containers[i].reset = 1;
4041 		}
4042 		while ((acp = softs->q_busy.q_head) != NULL)
4043 			aac_abort_iocmd(softs, acp, reason);
4044 	}
4045 
4046 	/* b) commands in the waiting queues */
4047 	for (i = 0; i < AAC_CMDQ_NUM; i++) {
4048 		if (iocmd & (1 << i)) {
4049 			if (ac_arg) {
4050 				aac_abort_iocmd(softs, ac_arg, reason);
4051 			} else {
4052 				while ((acp = softs->q_wait[i].q_head) != NULL)
4053 					aac_abort_iocmd(softs, acp, reason);
4054 			}
4055 		}
4056 	}
4057 }
4058 
4059 /*
4060  * The draining thread is shared among quiesce threads. It terminates
4061  * when the adapter is quiesced or stopped by aac_stop_drain().
4062  */
4063 static void
4064 aac_check_drain(void *arg)
4065 {
4066 	struct aac_softstate *softs = arg;
4067 
4068 	mutex_enter(&softs->io_lock);
4069 	if (softs->ndrains) {
4070 		softs->drain_timeid = 0;
4071 		/*
4072 		 * If both ASYNC and SYNC bus throttle are held,
4073 		 * wake up threads only when both are drained out.
4074 		 */
4075 		if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 ||
4076 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) &&
4077 		    (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 ||
4078 		    softs->bus_ncmds[AAC_CMDQ_SYNC] == 0))
4079 			cv_broadcast(&softs->drain_cv);
4080 		else
4081 			softs->drain_timeid = timeout(aac_check_drain, softs,
4082 			    AAC_QUIESCE_TICK * drv_usectohz(1000000));
4083 	}
4084 	mutex_exit(&softs->io_lock);
4085 }
4086 
4087 /*
4088  * If not draining the outstanding cmds, drain them. Otherwise,
4089  * only update ndrains.
4090  */
4091 static void
4092 aac_start_drain(struct aac_softstate *softs)
4093 {
4094 	if (softs->ndrains == 0) {
4095 		ASSERT(softs->drain_timeid == 0);
4096 		softs->drain_timeid = timeout(aac_check_drain, softs,
4097 		    AAC_QUIESCE_TICK * drv_usectohz(1000000));
4098 	}
4099 	softs->ndrains++;
4100 }
4101 
4102 /*
4103  * Stop the draining thread when no other threads use it any longer.
4104  * Side effect: io_lock may be released in the middle.
4105  */
4106 static void
4107 aac_stop_drain(struct aac_softstate *softs)
4108 {
4109 	softs->ndrains--;
4110 	if (softs->ndrains == 0) {
4111 		if (softs->drain_timeid != 0) {
4112 			timeout_id_t tid = softs->drain_timeid;
4113 
4114 			softs->drain_timeid = 0;
4115 			mutex_exit(&softs->io_lock);
4116 			(void) untimeout(tid);
4117 			mutex_enter(&softs->io_lock);
4118 		}
4119 	}
4120 }
4121 
4122 /*
4123  * The following function comes from Adaptec:
4124  *
4125  * Once do an IOP reset, basically the driver have to re-initialize the card
4126  * as if up from a cold boot, and the driver is responsible for any IO that
4127  * is outstanding to the adapter at the time of the IOP RESET. And prepare
4128  * for IOP RESET by making the init code modular with the ability to call it
4129  * from multiple places.
4130  */
4131 static int
4132 aac_reset_adapter(struct aac_softstate *softs)
4133 {
4134 	int health;
4135 	uint32_t status;
4136 	int rval = AAC_IOP_RESET_FAILED;
4137 
4138 	DBCALLED(softs, 1);
4139 
4140 	ASSERT(softs->state & AAC_STATE_RESET);
4141 
4142 	ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0);
4143 	/* Disable interrupt */
4144 	AAC_DISABLE_INTR(softs);
4145 
4146 	health = aac_check_adapter_health(softs);
4147 	if (health == -1) {
4148 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
4149 		goto finish;
4150 	}
4151 	if (health == 0) /* flush drives if possible */
4152 		(void) aac_shutdown(softs);
4153 
4154 	/* Execute IOP reset */
4155 	if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0,
4156 	    &status)) != AACOK) {
4157 		ddi_acc_handle_t acc = softs->comm_space_acc_handle;
4158 		struct aac_fib *fibp;
4159 		struct aac_pause_command *pc;
4160 
4161 		if ((status & 0xf) == 0xf) {
4162 			uint32_t wait_count;
4163 
4164 			/*
4165 			 * Sunrise Lake has dual cores and we must drag the
4166 			 * other core with us to reset simultaneously. There
4167 			 * are 2 bits in the Inbound Reset Control and Status
4168 			 * Register (offset 0x38) of the Sunrise Lake to reset
4169 			 * the chip without clearing out the PCI configuration
4170 			 * info (COMMAND & BARS).
4171 			 */
4172 			PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST);
4173 
4174 			/*
4175 			 * We need to wait for 5 seconds before accessing the MU
4176 			 * again 10000 * 100us = 1000,000us = 1000ms = 1s
4177 			 */
4178 			wait_count = 5 * 10000;
4179 			while (wait_count) {
4180 				drv_usecwait(100); /* delay 100 microseconds */
4181 				wait_count--;
4182 			}
4183 		} else {
4184 			if (status == SRB_STATUS_INVALID_REQUEST)
4185 				cmn_err(CE_WARN, "!IOP_RESET not supported");
4186 			else /* probably timeout */
4187 				cmn_err(CE_WARN, "!IOP_RESET failed");
4188 
4189 			/* Unwind aac_shutdown() */
4190 			fibp = softs->sync_slot.fibp;
4191 			pc = (struct aac_pause_command *)&fibp->data[0];
4192 
4193 			bzero(pc, sizeof (*pc));
4194 			ddi_put32(acc, &pc->Command, VM_ContainerConfig);
4195 			ddi_put32(acc, &pc->Type, CT_PAUSE_IO);
4196 			ddi_put32(acc, &pc->Timeout, 1);
4197 			ddi_put32(acc, &pc->Min, 1);
4198 			ddi_put32(acc, &pc->NoRescan, 1);
4199 
4200 			(void) aac_sync_fib(softs, ContainerCommand,
4201 			    AAC_FIB_SIZEOF(struct aac_pause_command));
4202 
4203 			if (aac_check_adapter_health(softs) != 0)
4204 				ddi_fm_service_impact(softs->devinfo_p,
4205 				    DDI_SERVICE_LOST);
4206 			else
4207 				/*
4208 				 * IOP reset not supported or IOP not reseted
4209 				 */
4210 				rval = AAC_IOP_RESET_ABNORMAL;
4211 			goto finish;
4212 		}
4213 	}
4214 
4215 	/*
4216 	 * Re-read and renegotiate the FIB parameters, as one of the actions
4217 	 * that can result from an IOP reset is the running of a new firmware
4218 	 * image.
4219 	 */
4220 	if (aac_common_attach(softs) != AACOK)
4221 		goto finish;
4222 
4223 	rval = AAC_IOP_RESET_SUCCEED;
4224 
4225 finish:
4226 	AAC_ENABLE_INTR(softs);
4227 	return (rval);
4228 }
4229 
4230 static void
4231 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q,
4232     int throttle)
4233 {
4234 	/*
4235 	 * If the bus is draining/quiesced, no changes to the throttles
4236 	 * are allowed. All throttles should have been set to 0.
4237 	 */
4238 	if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains)
4239 		return;
4240 	dvp->throttle[q] = throttle;
4241 }
4242 
4243 static void
4244 aac_hold_bus(struct aac_softstate *softs, int iocmds)
4245 {
4246 	int i, q;
4247 
4248 	/* Hold bus by holding every device on the bus */
4249 	for (q = 0; q < AAC_CMDQ_NUM; q++) {
4250 		if (iocmds & (1 << q)) {
4251 			softs->bus_throttle[q] = 0;
4252 			for (i = 0; i < AAC_MAX_LD; i++)
4253 				aac_set_throttle(softs,
4254 				    &softs->containers[i].dev, q, 0);
4255 			for (i = 0; i < AAC_MAX_PD(softs); i++)
4256 				aac_set_throttle(softs,
4257 				    &softs->nondasds[i].dev, q, 0);
4258 		}
4259 	}
4260 }
4261 
4262 static void
4263 aac_unhold_bus(struct aac_softstate *softs, int iocmds)
4264 {
4265 	int i, q;
4266 
4267 	for (q = 0; q < AAC_CMDQ_NUM; q++) {
4268 		if (iocmds & (1 << q)) {
4269 			/*
4270 			 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been
4271 			 * quiesced or being drained by possibly some quiesce
4272 			 * threads.
4273 			 */
4274 			if (q == AAC_CMDQ_ASYNC && ((softs->state &
4275 			    AAC_STATE_QUIESCED) || softs->ndrains))
4276 				continue;
4277 			softs->bus_throttle[q] = softs->total_slots;
4278 			for (i = 0; i < AAC_MAX_LD; i++)
4279 				aac_set_throttle(softs,
4280 				    &softs->containers[i].dev,
4281 				    q, softs->total_slots);
4282 			for (i = 0; i < AAC_MAX_PD(softs); i++)
4283 				aac_set_throttle(softs, &softs->nondasds[i].dev,
4284 				    q, softs->total_slots);
4285 		}
4286 	}
4287 }
4288 
4289 static int
4290 aac_do_reset(struct aac_softstate *softs)
4291 {
4292 	int health;
4293 	int rval;
4294 
4295 	softs->state |= AAC_STATE_RESET;
4296 	health = aac_check_adapter_health(softs);
4297 
4298 	/*
4299 	 * Hold off new io commands and wait all outstanding io
4300 	 * commands to complete.
4301 	 */
4302 	if (health == 0) {
4303 		int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC];
4304 		int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC];
4305 
4306 		if (sync_cmds == 0 && async_cmds == 0) {
4307 			rval = AAC_IOP_RESET_SUCCEED;
4308 			goto finish;
4309 		}
4310 		/*
4311 		 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds
4312 		 * to complete the outstanding io commands
4313 		 */
4314 		int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10;
4315 		int (*intr_handler)(struct aac_softstate *);
4316 
4317 		aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4318 		/*
4319 		 * Poll the adapter by ourselves in case interrupt is disabled
4320 		 * and to avoid releasing the io_lock.
4321 		 */
4322 		intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
4323 		    aac_process_intr_new : aac_process_intr_old;
4324 		while ((softs->bus_ncmds[AAC_CMDQ_SYNC] ||
4325 		    softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) {
4326 			drv_usecwait(100);
4327 			(void) intr_handler(softs);
4328 			timeout--;
4329 		}
4330 		aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4331 
4332 		if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 &&
4333 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) {
4334 			/* Cmds drained out */
4335 			rval = AAC_IOP_RESET_SUCCEED;
4336 			goto finish;
4337 		} else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds ||
4338 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) {
4339 			/* Cmds not drained out, adapter overloaded */
4340 			rval = AAC_IOP_RESET_ABNORMAL;
4341 			goto finish;
4342 		}
4343 	}
4344 
4345 	/*
4346 	 * If a longer waiting time still can't drain any outstanding io
4347 	 * commands, do IOP reset.
4348 	 */
4349 	if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED)
4350 		softs->state |= AAC_STATE_DEAD;
4351 
4352 finish:
4353 	softs->state &= ~AAC_STATE_RESET;
4354 	return (rval);
4355 }
4356 
4357 static int
4358 aac_tran_reset(struct scsi_address *ap, int level)
4359 {
4360 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4361 	int rval;
4362 
4363 	DBCALLED(softs, 1);
4364 
4365 	if (level != RESET_ALL) {
4366 		cmn_err(CE_NOTE, "!reset target/lun not supported");
4367 		return (0);
4368 	}
4369 
4370 	mutex_enter(&softs->io_lock);
4371 	switch (rval = aac_do_reset(softs)) {
4372 	case AAC_IOP_RESET_SUCCEED:
4373 		aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC,
4374 		    NULL, CMD_RESET);
4375 		aac_start_waiting_io(softs);
4376 		break;
4377 	case AAC_IOP_RESET_FAILED:
4378 		/* Abort IOCTL cmds when adapter is dead */
4379 		aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET);
4380 		break;
4381 	case AAC_IOP_RESET_ABNORMAL:
4382 		aac_start_waiting_io(softs);
4383 	}
4384 	mutex_exit(&softs->io_lock);
4385 
4386 	aac_drain_comp_q(softs);
4387 	return (rval == 0);
4388 }
4389 
4390 static int
4391 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4392 {
4393 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4394 
4395 	DBCALLED(softs, 1);
4396 
4397 	mutex_enter(&softs->io_lock);
4398 	aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED);
4399 	mutex_exit(&softs->io_lock);
4400 
4401 	aac_drain_comp_q(softs);
4402 	return (1);
4403 }
4404 
4405 void
4406 aac_free_dmamap(struct aac_cmd *acp)
4407 {
4408 	/* Free dma mapping */
4409 	if (acp->flags & AAC_CMD_DMA_VALID) {
4410 		ASSERT(acp->buf_dma_handle);
4411 		(void) ddi_dma_unbind_handle(acp->buf_dma_handle);
4412 		acp->flags &= ~AAC_CMD_DMA_VALID;
4413 	}
4414 
4415 	if (acp->abp != NULL) { /* free non-aligned buf DMA */
4416 		ASSERT(acp->buf_dma_handle);
4417 		if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp)
4418 			ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr,
4419 			    (uint8_t *)acp->abp, acp->bp->b_bcount,
4420 			    DDI_DEV_AUTOINCR);
4421 		ddi_dma_mem_free(&acp->abh);
4422 		acp->abp = NULL;
4423 	}
4424 
4425 	if (acp->buf_dma_handle) {
4426 		ddi_dma_free_handle(&acp->buf_dma_handle);
4427 		acp->buf_dma_handle = NULL;
4428 	}
4429 }
4430 
4431 static void
4432 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
4433 {
4434 	AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported",
4435 	    ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd);
4436 	aac_free_dmamap(acp);
4437 	aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0);
4438 	aac_soft_callback(softs, acp);
4439 }
4440 
4441 /*
4442  * Handle command to logical device
4443  */
4444 static int
4445 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp)
4446 {
4447 	struct aac_container *dvp;
4448 	struct scsi_pkt *pkt;
4449 	union scsi_cdb *cdbp;
4450 	struct buf *bp;
4451 	int rval;
4452 
4453 	dvp = (struct aac_container *)acp->dvp;
4454 	pkt = acp->pkt;
4455 	cdbp = (void *)pkt->pkt_cdbp;
4456 	bp = acp->bp;
4457 
4458 	switch (cdbp->scc_cmd) {
4459 	case SCMD_INQUIRY: /* inquiry */
4460 		aac_free_dmamap(acp);
4461 		aac_inquiry(softs, pkt, cdbp, bp);
4462 		aac_soft_callback(softs, acp);
4463 		rval = TRAN_ACCEPT;
4464 		break;
4465 
4466 	case SCMD_READ_CAPACITY: /* read capacity */
4467 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
4468 			struct scsi_capacity cap;
4469 			uint64_t last_lba;
4470 
4471 			/* check 64-bit LBA */
4472 			last_lba = dvp->size - 1;
4473 			if (last_lba > 0xffffffffull) {
4474 				cap.capacity = 0xfffffffful;
4475 			} else {
4476 				cap.capacity = BE_32(last_lba);
4477 			}
4478 			cap.lbasize = BE_32(AAC_SECTOR_SIZE);
4479 
4480 			aac_free_dmamap(acp);
4481 			if (bp->b_flags & (B_PHYS|B_PAGEIO))
4482 				bp_mapin(bp);
4483 			bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8));
4484 			pkt->pkt_state |= STATE_XFERRED_DATA;
4485 		}
4486 		aac_soft_callback(softs, acp);
4487 		rval = TRAN_ACCEPT;
4488 		break;
4489 
4490 	case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */
4491 		/* Check if containers need 64-bit LBA support */
4492 		if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) {
4493 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
4494 				struct scsi_capacity_16 cap16;
4495 				int cap_len = sizeof (struct scsi_capacity_16);
4496 
4497 				bzero(&cap16, cap_len);
4498 				cap16.sc_capacity = BE_64(dvp->size - 1);
4499 				cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE);
4500 
4501 				aac_free_dmamap(acp);
4502 				if (bp->b_flags & (B_PHYS | B_PAGEIO))
4503 					bp_mapin(bp);
4504 				bcopy(&cap16, bp->b_un.b_addr,
4505 				    min(bp->b_bcount, cap_len));
4506 				pkt->pkt_state |= STATE_XFERRED_DATA;
4507 			}
4508 			aac_soft_callback(softs, acp);
4509 		} else {
4510 			aac_unknown_scmd(softs, acp);
4511 		}
4512 		rval = TRAN_ACCEPT;
4513 		break;
4514 
4515 	case SCMD_READ_G4: /* read_16 */
4516 	case SCMD_WRITE_G4: /* write_16 */
4517 		if (softs->flags & AAC_FLAGS_RAW_IO) {
4518 			/* NOTE: GETG4ADDRTL(cdbp) is int32_t */
4519 			acp->blkno = ((uint64_t) \
4520 			    GETG4ADDR(cdbp) << 32) | \
4521 			    (uint32_t)GETG4ADDRTL(cdbp);
4522 			goto do_io;
4523 		}
4524 		AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported");
4525 		aac_unknown_scmd(softs, acp);
4526 		rval = TRAN_ACCEPT;
4527 		break;
4528 
4529 	case SCMD_READ: /* read_6 */
4530 	case SCMD_WRITE: /* write_6 */
4531 		acp->blkno = GETG0ADDR(cdbp);
4532 		goto do_io;
4533 
4534 	case SCMD_READ_G5: /* read_12 */
4535 	case SCMD_WRITE_G5: /* write_12 */
4536 		acp->blkno = GETG5ADDR(cdbp);
4537 		goto do_io;
4538 
4539 	case SCMD_READ_G1: /* read_10 */
4540 	case SCMD_WRITE_G1: /* write_10 */
4541 		acp->blkno = (uint32_t)GETG1ADDR(cdbp);
4542 do_io:
4543 		if (acp->flags & AAC_CMD_DMA_VALID) {
4544 			uint64_t cnt_size = dvp->size;
4545 
4546 			/*
4547 			 * If LBA > array size AND rawio, the
4548 			 * adapter may hang. So check it before
4549 			 * sending.
4550 			 * NOTE: (blkno + blkcnt) may overflow
4551 			 */
4552 			if ((acp->blkno < cnt_size) &&
4553 			    ((acp->blkno + acp->bcount /
4554 			    AAC_BLK_SIZE) <= cnt_size)) {
4555 				rval = aac_do_io(softs, acp);
4556 			} else {
4557 			/*
4558 			 * Request exceeds the capacity of disk,
4559 			 * set error block number to last LBA
4560 			 * + 1.
4561 			 */
4562 				aac_set_arq_data(pkt,
4563 				    KEY_ILLEGAL_REQUEST, 0x21,
4564 				    0x00, cnt_size);
4565 				aac_soft_callback(softs, acp);
4566 				rval = TRAN_ACCEPT;
4567 			}
4568 		} else if (acp->bcount == 0) {
4569 			/* For 0 length IO, just return ok */
4570 			aac_soft_callback(softs, acp);
4571 			rval = TRAN_ACCEPT;
4572 		} else {
4573 			rval = TRAN_BADPKT;
4574 		}
4575 		break;
4576 
4577 	case SCMD_MODE_SENSE: /* mode_sense_6 */
4578 	case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */
4579 		int capacity;
4580 
4581 		aac_free_dmamap(acp);
4582 		if (dvp->size > 0xffffffffull)
4583 			capacity = 0xfffffffful; /* 64-bit LBA */
4584 		else
4585 			capacity = dvp->size;
4586 		aac_mode_sense(softs, pkt, cdbp, bp, capacity);
4587 		aac_soft_callback(softs, acp);
4588 		rval = TRAN_ACCEPT;
4589 		break;
4590 	}
4591 
4592 	case SCMD_TEST_UNIT_READY:
4593 	case SCMD_REQUEST_SENSE:
4594 	case SCMD_FORMAT:
4595 	case SCMD_START_STOP:
4596 		aac_free_dmamap(acp);
4597 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
4598 			if (acp->flags & AAC_CMD_BUF_READ) {
4599 				if (bp->b_flags & (B_PHYS|B_PAGEIO))
4600 					bp_mapin(bp);
4601 				bzero(bp->b_un.b_addr, bp->b_bcount);
4602 			}
4603 			pkt->pkt_state |= STATE_XFERRED_DATA;
4604 		}
4605 		aac_soft_callback(softs, acp);
4606 		rval = TRAN_ACCEPT;
4607 		break;
4608 
4609 	case SCMD_SYNCHRONIZE_CACHE:
4610 		acp->flags |= AAC_CMD_NTAG;
4611 		acp->aac_cmd_fib = aac_cmd_fib_sync;
4612 		acp->ac_comp = aac_synccache_complete;
4613 		rval = aac_do_io(softs, acp);
4614 		break;
4615 
4616 	case SCMD_DOORLOCK:
4617 		aac_free_dmamap(acp);
4618 		dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0;
4619 		aac_soft_callback(softs, acp);
4620 		rval = TRAN_ACCEPT;
4621 		break;
4622 
4623 	default: /* unknown command */
4624 		aac_unknown_scmd(softs, acp);
4625 		rval = TRAN_ACCEPT;
4626 		break;
4627 	}
4628 
4629 	return (rval);
4630 }
4631 
4632 static int
4633 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
4634 {
4635 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4636 	struct aac_cmd *acp = PKT2AC(pkt);
4637 	struct aac_device *dvp = acp->dvp;
4638 	int rval;
4639 
4640 	DBCALLED(softs, 2);
4641 
4642 	/*
4643 	 * Reinitialize some fields of ac and pkt; the packet may
4644 	 * have been resubmitted
4645 	 */
4646 	acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \
4647 	    AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID;
4648 	acp->timeout = acp->pkt->pkt_time;
4649 	if (pkt->pkt_flags & FLAG_NOINTR)
4650 		acp->flags |= AAC_CMD_NO_INTR;
4651 #ifdef DEBUG
4652 	acp->fib_flags = AACDB_FLAGS_FIB_SCMD;
4653 #endif
4654 	pkt->pkt_reason = CMD_CMPLT;
4655 	pkt->pkt_state = 0;
4656 	pkt->pkt_statistics = 0;
4657 	*pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
4658 
4659 	if (acp->flags & AAC_CMD_DMA_VALID) {
4660 		pkt->pkt_resid = acp->bcount;
4661 		/* Consistent packets need to be sync'ed first */
4662 		if ((acp->flags & AAC_CMD_CONSISTENT) &&
4663 		    (acp->flags & AAC_CMD_BUF_WRITE))
4664 			if (aac_dma_sync_ac(acp) != AACOK) {
4665 				ddi_fm_service_impact(softs->devinfo_p,
4666 				    DDI_SERVICE_UNAFFECTED);
4667 				return (TRAN_BADPKT);
4668 			}
4669 	} else {
4670 		pkt->pkt_resid = 0;
4671 	}
4672 
4673 	mutex_enter(&softs->io_lock);
4674 	AACDB_PRINT_SCMD(softs, acp);
4675 	if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) &&
4676 	    !(softs->state & AAC_STATE_DEAD)) {
4677 		if (dvp->type == AAC_DEV_LD) {
4678 			if (ap->a_lun == 0)
4679 				rval = aac_tran_start_ld(softs, acp);
4680 			else
4681 				goto error;
4682 		} else {
4683 			rval = aac_do_io(softs, acp);
4684 		}
4685 	} else {
4686 error:
4687 #ifdef DEBUG
4688 		if (!(softs->state & AAC_STATE_DEAD)) {
4689 			AACDB_PRINT_TRAN(softs,
4690 			    "Cannot send cmd to target t%dL%d: %s",
4691 			    ap->a_target, ap->a_lun,
4692 			    "target invalid");
4693 		} else {
4694 			AACDB_PRINT(softs, CE_WARN,
4695 			    "Cannot send cmd to target t%dL%d: %s",
4696 			    ap->a_target, ap->a_lun,
4697 			    "adapter dead");
4698 		}
4699 #endif
4700 		rval = TRAN_FATAL_ERROR;
4701 	}
4702 	mutex_exit(&softs->io_lock);
4703 	return (rval);
4704 }
4705 
4706 static int
4707 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom)
4708 {
4709 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4710 	struct aac_device *dvp;
4711 	int rval;
4712 
4713 	DBCALLED(softs, 2);
4714 
4715 	/* We don't allow inquiring about capabilities for other targets */
4716 	if (cap == NULL || whom == 0) {
4717 		AACDB_PRINT(softs, CE_WARN,
4718 		    "GetCap> %s not supported: whom=%d", cap, whom);
4719 		return (-1);
4720 	}
4721 
4722 	mutex_enter(&softs->io_lock);
4723 	dvp = AAC_DEV(softs, ap->a_target);
4724 	if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
4725 		mutex_exit(&softs->io_lock);
4726 		AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap",
4727 		    ap->a_target, ap->a_lun);
4728 		return (-1);
4729 	}
4730 
4731 	switch (scsi_hba_lookup_capstr(cap)) {
4732 	case SCSI_CAP_ARQ: /* auto request sense */
4733 		rval = 1;
4734 		break;
4735 	case SCSI_CAP_UNTAGGED_QING:
4736 	case SCSI_CAP_TAGGED_QING:
4737 		rval = 1;
4738 		break;
4739 	case SCSI_CAP_DMA_MAX:
4740 		rval = softs->buf_dma_attr.dma_attr_maxxfer;
4741 		break;
4742 	default:
4743 		rval = -1;
4744 		break;
4745 	}
4746 	mutex_exit(&softs->io_lock);
4747 
4748 	AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d",
4749 	    cap, ap->a_target, ap->a_lun, rval);
4750 	return (rval);
4751 }
4752 
4753 /*ARGSUSED*/
4754 static int
4755 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4756 {
4757 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4758 	struct aac_device *dvp;
4759 	int rval;
4760 
4761 	DBCALLED(softs, 2);
4762 
4763 	/* We don't allow inquiring about capabilities for other targets */
4764 	if (cap == NULL || whom == 0) {
4765 		AACDB_PRINT(softs, CE_WARN,
4766 		    "SetCap> %s not supported: whom=%d", cap, whom);
4767 		return (-1);
4768 	}
4769 
4770 	mutex_enter(&softs->io_lock);
4771 	dvp = AAC_DEV(softs, ap->a_target);
4772 	if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
4773 		mutex_exit(&softs->io_lock);
4774 		AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap",
4775 		    ap->a_target, ap->a_lun);
4776 		return (-1);
4777 	}
4778 
4779 	switch (scsi_hba_lookup_capstr(cap)) {
4780 	case SCSI_CAP_ARQ:
4781 		/* Force auto request sense */
4782 		rval = (value == 1) ? 1 : 0;
4783 		break;
4784 	case SCSI_CAP_UNTAGGED_QING:
4785 	case SCSI_CAP_TAGGED_QING:
4786 		rval = (value == 1) ? 1 : 0;
4787 		break;
4788 	default:
4789 		rval = -1;
4790 		break;
4791 	}
4792 	mutex_exit(&softs->io_lock);
4793 
4794 	AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d",
4795 	    cap, ap->a_target, ap->a_lun, value, rval);
4796 	return (rval);
4797 }
4798 
4799 static void
4800 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4801 {
4802 	struct aac_cmd *acp = PKT2AC(pkt);
4803 
4804 	DBCALLED(NULL, 2);
4805 
4806 	if (acp->sgt) {
4807 		kmem_free(acp->sgt, sizeof (struct aac_sge) * \
4808 		    acp->left_cookien);
4809 	}
4810 	aac_free_dmamap(acp);
4811 	ASSERT(acp->slotp == NULL);
4812 	scsi_hba_pkt_free(ap, pkt);
4813 }
4814 
4815 int
4816 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp,
4817     struct buf *bp, int flags, int (*cb)(), caddr_t arg)
4818 {
4819 	int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
4820 	uint_t oldcookiec;
4821 	int bioerr;
4822 	int rval;
4823 
4824 	oldcookiec = acp->left_cookien;
4825 
4826 	/* Move window to build s/g map */
4827 	if (acp->total_nwin > 0) {
4828 		if (++acp->cur_win < acp->total_nwin) {
4829 			off_t off;
4830 			size_t len;
4831 
4832 			rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win,
4833 			    &off, &len, &acp->cookie, &acp->left_cookien);
4834 			if (rval == DDI_SUCCESS)
4835 				goto get_dma_cookies;
4836 			AACDB_PRINT(softs, CE_WARN,
4837 			    "ddi_dma_getwin() fail %d", rval);
4838 			return (AACERR);
4839 		}
4840 		AACDB_PRINT(softs, CE_WARN, "Nothing to transfer");
4841 		return (AACERR);
4842 	}
4843 
4844 	/* We need to transfer data, so we alloc DMA resources for this pkt */
4845 	if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) {
4846 		uint_t dma_flags = 0;
4847 		struct aac_sge *sge;
4848 
4849 		/*
4850 		 * We will still use this point to fake some
4851 		 * infomation in tran_start
4852 		 */
4853 		acp->bp = bp;
4854 
4855 		/* Set dma flags */
4856 		if (BUF_IS_READ(bp)) {
4857 			dma_flags |= DDI_DMA_READ;
4858 			acp->flags |= AAC_CMD_BUF_READ;
4859 		} else {
4860 			dma_flags |= DDI_DMA_WRITE;
4861 			acp->flags |= AAC_CMD_BUF_WRITE;
4862 		}
4863 		if (flags & PKT_CONSISTENT)
4864 			dma_flags |= DDI_DMA_CONSISTENT;
4865 		if (flags & PKT_DMA_PARTIAL)
4866 			dma_flags |= DDI_DMA_PARTIAL;
4867 
4868 		/* Alloc buf dma handle */
4869 		if (!acp->buf_dma_handle) {
4870 			rval = ddi_dma_alloc_handle(softs->devinfo_p,
4871 			    &softs->buf_dma_attr, cb, arg,
4872 			    &acp->buf_dma_handle);
4873 			if (rval != DDI_SUCCESS) {
4874 				AACDB_PRINT(softs, CE_WARN,
4875 				    "Can't allocate DMA handle, errno=%d",
4876 				    rval);
4877 				goto error_out;
4878 			}
4879 		}
4880 
4881 		/* Bind buf */
4882 		if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) {
4883 			rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle,
4884 			    bp, dma_flags, cb, arg, &acp->cookie,
4885 			    &acp->left_cookien);
4886 		} else {
4887 			size_t bufsz;
4888 
4889 			AACDB_PRINT_TRAN(softs,
4890 			    "non-aligned buffer: addr=0x%p, cnt=%lu",
4891 			    (void *)bp->b_un.b_addr, bp->b_bcount);
4892 			if (bp->b_flags & (B_PAGEIO|B_PHYS))
4893 				bp_mapin(bp);
4894 
4895 			rval = ddi_dma_mem_alloc(acp->buf_dma_handle,
4896 			    AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN),
4897 			    &softs->acc_attr, DDI_DMA_STREAMING,
4898 			    cb, arg, &acp->abp, &bufsz, &acp->abh);
4899 
4900 			if (rval != DDI_SUCCESS) {
4901 				AACDB_PRINT(softs, CE_NOTE,
4902 				    "Cannot alloc DMA to non-aligned buf");
4903 				bioerr = 0;
4904 				goto error_out;
4905 			}
4906 
4907 			if (acp->flags & AAC_CMD_BUF_WRITE)
4908 				ddi_rep_put8(acp->abh,
4909 				    (uint8_t *)bp->b_un.b_addr,
4910 				    (uint8_t *)acp->abp, bp->b_bcount,
4911 				    DDI_DEV_AUTOINCR);
4912 
4913 			rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle,
4914 			    NULL, acp->abp, bufsz, dma_flags, cb, arg,
4915 			    &acp->cookie, &acp->left_cookien);
4916 		}
4917 
4918 		switch (rval) {
4919 		case DDI_DMA_PARTIAL_MAP:
4920 			if (ddi_dma_numwin(acp->buf_dma_handle,
4921 			    &acp->total_nwin) == DDI_FAILURE) {
4922 				AACDB_PRINT(softs, CE_WARN,
4923 				    "Cannot get number of DMA windows");
4924 				bioerr = 0;
4925 				goto error_out;
4926 			}
4927 			AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
4928 			    acp->left_cookien);
4929 			acp->cur_win = 0;
4930 			break;
4931 
4932 		case DDI_DMA_MAPPED:
4933 			AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
4934 			    acp->left_cookien);
4935 			acp->cur_win = 0;
4936 			acp->total_nwin = 1;
4937 			break;
4938 
4939 		case DDI_DMA_NORESOURCES:
4940 			bioerr = 0;
4941 			AACDB_PRINT(softs, CE_WARN,
4942 			    "Cannot bind buf for DMA: DDI_DMA_NORESOURCES");
4943 			goto error_out;
4944 		case DDI_DMA_BADATTR:
4945 		case DDI_DMA_NOMAPPING:
4946 			bioerr = EFAULT;
4947 			AACDB_PRINT(softs, CE_WARN,
4948 			    "Cannot bind buf for DMA: DDI_DMA_NOMAPPING");
4949 			goto error_out;
4950 		case DDI_DMA_TOOBIG:
4951 			bioerr = EINVAL;
4952 			AACDB_PRINT(softs, CE_WARN,
4953 			    "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)",
4954 			    bp->b_bcount);
4955 			goto error_out;
4956 		default:
4957 			bioerr = EINVAL;
4958 			AACDB_PRINT(softs, CE_WARN,
4959 			    "Cannot bind buf for DMA: %d", rval);
4960 			goto error_out;
4961 		}
4962 		acp->flags |= AAC_CMD_DMA_VALID;
4963 
4964 get_dma_cookies:
4965 		ASSERT(acp->left_cookien > 0);
4966 		if (acp->left_cookien > softs->aac_sg_tablesize) {
4967 			AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d",
4968 			    acp->left_cookien);
4969 			bioerr = EINVAL;
4970 			goto error_out;
4971 		}
4972 		if (oldcookiec != acp->left_cookien && acp->sgt != NULL) {
4973 			kmem_free(acp->sgt, sizeof (struct aac_sge) * \
4974 			    oldcookiec);
4975 			acp->sgt = NULL;
4976 		}
4977 		if (acp->sgt == NULL) {
4978 			acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \
4979 			    acp->left_cookien, kf);
4980 			if (acp->sgt == NULL) {
4981 				AACDB_PRINT(softs, CE_WARN,
4982 				    "sgt kmem_alloc fail");
4983 				bioerr = ENOMEM;
4984 				goto error_out;
4985 			}
4986 		}
4987 
4988 		sge = &acp->sgt[0];
4989 		sge->bcount = acp->cookie.dmac_size;
4990 		sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
4991 		sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
4992 		acp->bcount = acp->cookie.dmac_size;
4993 		for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) {
4994 			ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie);
4995 			sge->bcount = acp->cookie.dmac_size;
4996 			sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
4997 			sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
4998 			acp->bcount += acp->cookie.dmac_size;
4999 		}
5000 
5001 		/*
5002 		 * Note: The old DMA engine do not correctly handle
5003 		 * dma_attr_maxxfer attribute. So we have to ensure
5004 		 * it by ourself.
5005 		 */
5006 		if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) {
5007 			AACDB_PRINT(softs, CE_NOTE,
5008 			    "large xfer size received %d\n", acp->bcount);
5009 			bioerr = EINVAL;
5010 			goto error_out;
5011 		}
5012 
5013 		acp->total_xfer += acp->bcount;
5014 
5015 		if (acp->pkt) {
5016 			/* Return remaining byte count */
5017 			if (acp->total_xfer <= bp->b_bcount) {
5018 				acp->pkt->pkt_resid = bp->b_bcount - \
5019 				    acp->total_xfer;
5020 			} else {
5021 				/*
5022 				 * Allocated DMA size is greater than the buf
5023 				 * size of bp. This is caused by devices like
5024 				 * tape. we have extra bytes allocated, but
5025 				 * the packet residual has to stay correct.
5026 				 */
5027 				acp->pkt->pkt_resid = 0;
5028 			}
5029 			AACDB_PRINT_TRAN(softs,
5030 			    "bp=0x%p, xfered=%d/%d, resid=%d",
5031 			    (void *)bp->b_un.b_addr, (int)acp->total_xfer,
5032 			    (int)bp->b_bcount, (int)acp->pkt->pkt_resid);
5033 		}
5034 	}
5035 	return (AACOK);
5036 
5037 error_out:
5038 	bioerror(bp, bioerr);
5039 	return (AACERR);
5040 }
5041 
5042 static struct scsi_pkt *
5043 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
5044     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
5045     int (*callback)(), caddr_t arg)
5046 {
5047 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5048 	struct aac_cmd *acp, *new_acp;
5049 
5050 	DBCALLED(softs, 2);
5051 
5052 	/* Allocate pkt */
5053 	if (pkt == NULL) {
5054 		int slen;
5055 
5056 		/* Force auto request sense */
5057 		slen = (statuslen > softs->slen) ? statuslen : softs->slen;
5058 		pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen,
5059 		    slen, tgtlen, sizeof (struct aac_cmd), callback, arg);
5060 		if (pkt == NULL) {
5061 			AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed");
5062 			return (NULL);
5063 		}
5064 		acp = new_acp = PKT2AC(pkt);
5065 		acp->pkt = pkt;
5066 		acp->cmdlen = cmdlen;
5067 
5068 		if (ap->a_target < AAC_MAX_LD) {
5069 			acp->dvp = &softs->containers[ap->a_target].dev;
5070 			acp->aac_cmd_fib = softs->aac_cmd_fib;
5071 			acp->ac_comp = aac_ld_complete;
5072 		} else {
5073 			_NOTE(ASSUMING_PROTECTED(softs->nondasds))
5074 
5075 			acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev;
5076 			acp->aac_cmd_fib = softs->aac_cmd_fib_scsi;
5077 			acp->ac_comp = aac_pd_complete;
5078 		}
5079 	} else {
5080 		acp = PKT2AC(pkt);
5081 		new_acp = NULL;
5082 	}
5083 
5084 	if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK)
5085 		return (pkt);
5086 
5087 	if (new_acp)
5088 		aac_tran_destroy_pkt(ap, pkt);
5089 	return (NULL);
5090 }
5091 
5092 /*
5093  * tran_sync_pkt(9E) - explicit DMA synchronization
5094  */
5095 /*ARGSUSED*/
5096 static void
5097 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5098 {
5099 	struct aac_cmd *acp = PKT2AC(pkt);
5100 
5101 	DBCALLED(NULL, 2);
5102 
5103 	if (aac_dma_sync_ac(acp) != AACOK)
5104 		ddi_fm_service_impact(
5105 		    (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p,
5106 		    DDI_SERVICE_UNAFFECTED);
5107 }
5108 
5109 /*
5110  * tran_dmafree(9E) - deallocate DMA resources allocated for command
5111  */
5112 /*ARGSUSED*/
5113 static void
5114 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
5115 {
5116 	struct aac_cmd *acp = PKT2AC(pkt);
5117 
5118 	DBCALLED(NULL, 2);
5119 
5120 	aac_free_dmamap(acp);
5121 }
5122 
5123 static int
5124 aac_do_quiesce(struct aac_softstate *softs)
5125 {
5126 	aac_hold_bus(softs, AAC_IOCMD_ASYNC);
5127 	if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) {
5128 		aac_start_drain(softs);
5129 		do {
5130 			if (cv_wait_sig(&softs->drain_cv,
5131 			    &softs->io_lock) == 0) {
5132 				/* Quiesce has been interrupted */
5133 				aac_stop_drain(softs);
5134 				aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5135 				aac_start_waiting_io(softs);
5136 				return (AACERR);
5137 			}
5138 		} while (softs->bus_ncmds[AAC_CMDQ_ASYNC]);
5139 		aac_stop_drain(softs);
5140 	}
5141 
5142 	softs->state |= AAC_STATE_QUIESCED;
5143 	return (AACOK);
5144 }
5145 
5146 static int
5147 aac_tran_quiesce(dev_info_t *dip)
5148 {
5149 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5150 	int rval;
5151 
5152 	DBCALLED(softs, 1);
5153 
5154 	mutex_enter(&softs->io_lock);
5155 	if (aac_do_quiesce(softs) == AACOK)
5156 		rval = 0;
5157 	else
5158 		rval = 1;
5159 	mutex_exit(&softs->io_lock);
5160 	return (rval);
5161 }
5162 
5163 static int
5164 aac_do_unquiesce(struct aac_softstate *softs)
5165 {
5166 	softs->state &= ~AAC_STATE_QUIESCED;
5167 	aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5168 
5169 	aac_start_waiting_io(softs);
5170 	return (AACOK);
5171 }
5172 
5173 static int
5174 aac_tran_unquiesce(dev_info_t *dip)
5175 {
5176 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5177 	int rval;
5178 
5179 	DBCALLED(softs, 1);
5180 
5181 	mutex_enter(&softs->io_lock);
5182 	if (aac_do_unquiesce(softs) == AACOK)
5183 		rval = 0;
5184 	else
5185 		rval = 1;
5186 	mutex_exit(&softs->io_lock);
5187 	return (rval);
5188 }
5189 
5190 static int
5191 aac_hba_setup(struct aac_softstate *softs)
5192 {
5193 	scsi_hba_tran_t *hba_tran;
5194 	int rval;
5195 
5196 	hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP);
5197 	if (hba_tran == NULL)
5198 		return (AACERR);
5199 	hba_tran->tran_hba_private = softs;
5200 	hba_tran->tran_tgt_init = aac_tran_tgt_init;
5201 	hba_tran->tran_tgt_free = aac_tran_tgt_free;
5202 	hba_tran->tran_tgt_probe = scsi_hba_probe;
5203 	hba_tran->tran_start = aac_tran_start;
5204 	hba_tran->tran_getcap = aac_tran_getcap;
5205 	hba_tran->tran_setcap = aac_tran_setcap;
5206 	hba_tran->tran_init_pkt = aac_tran_init_pkt;
5207 	hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt;
5208 	hba_tran->tran_reset = aac_tran_reset;
5209 	hba_tran->tran_abort = aac_tran_abort;
5210 	hba_tran->tran_sync_pkt = aac_tran_sync_pkt;
5211 	hba_tran->tran_dmafree = aac_tran_dmafree;
5212 	hba_tran->tran_quiesce = aac_tran_quiesce;
5213 	hba_tran->tran_unquiesce = aac_tran_unquiesce;
5214 	hba_tran->tran_bus_config = aac_tran_bus_config;
5215 	rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr,
5216 	    hba_tran, 0);
5217 	if (rval != DDI_SUCCESS) {
5218 		scsi_hba_tran_free(hba_tran);
5219 		AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed");
5220 		return (AACERR);
5221 	}
5222 
5223 	softs->hba_tran = hba_tran;
5224 	return (AACOK);
5225 }
5226 
5227 /*
5228  * FIB setup operations
5229  */
5230 
5231 /*
5232  * Init FIB header
5233  */
5234 static void
5235 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_slot *slotp,
5236     uint16_t cmd, uint16_t fib_size)
5237 {
5238 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5239 	struct aac_fib *fibp = slotp->fibp;
5240 	uint32_t xfer_state;
5241 
5242 	xfer_state =
5243 	    AAC_FIBSTATE_HOSTOWNED |
5244 	    AAC_FIBSTATE_INITIALISED |
5245 	    AAC_FIBSTATE_EMPTY |
5246 	    AAC_FIBSTATE_FROMHOST |
5247 	    AAC_FIBSTATE_REXPECTED |
5248 	    AAC_FIBSTATE_NORM;
5249 	if (slotp->acp && !(slotp->acp->flags & AAC_CMD_SYNC)) {
5250 		xfer_state |=
5251 		    AAC_FIBSTATE_ASYNC |
5252 		    AAC_FIBSTATE_FAST_RESPONSE /* enable fast io */;
5253 		ddi_put16(acc, &fibp->Header.SenderSize,
5254 		    softs->aac_max_fib_size);
5255 	} else {
5256 		ddi_put16(acc, &fibp->Header.SenderSize, AAC_FIB_SIZE);
5257 	}
5258 
5259 	ddi_put32(acc, &fibp->Header.XferState, xfer_state);
5260 	ddi_put16(acc, &fibp->Header.Command, cmd);
5261 	ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB);
5262 	ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */
5263 	ddi_put16(acc, &fibp->Header.Size, fib_size);
5264 	ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2));
5265 	ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5266 	ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */
5267 }
5268 
5269 /*
5270  * Init FIB for raw IO command
5271  */
5272 static void
5273 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp)
5274 {
5275 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5276 	struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0];
5277 	struct aac_sg_entryraw *sgp;
5278 	struct aac_sge *sge;
5279 
5280 	/* Calculate FIB size */
5281 	acp->fib_size = sizeof (struct aac_fib_header) + \
5282 	    sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \
5283 	    sizeof (struct aac_sg_entryraw);
5284 
5285 	aac_cmd_fib_header(softs, acp->slotp, RawIo, acp->fib_size);
5286 
5287 	ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0);
5288 	ddi_put16(acc, &io->BpTotal, 0);
5289 	ddi_put16(acc, &io->BpComplete, 0);
5290 
5291 	ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno));
5292 	ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno));
5293 	ddi_put16(acc, &io->ContainerId,
5294 	    ((struct aac_container *)acp->dvp)->cid);
5295 
5296 	/* Fill SG table */
5297 	ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien);
5298 	ddi_put32(acc, &io->ByteCount, acp->bcount);
5299 
5300 	for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0];
5301 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5302 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5303 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5304 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5305 		sgp->Next = 0;
5306 		sgp->Prev = 0;
5307 		sgp->Flags = 0;
5308 	}
5309 }
5310 
5311 /* Init FIB for 64-bit block IO command */
5312 static void
5313 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp)
5314 {
5315 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5316 	struct aac_blockread64 *br = (struct aac_blockread64 *) \
5317 	    &acp->slotp->fibp->data[0];
5318 	struct aac_sg_entry64 *sgp;
5319 	struct aac_sge *sge;
5320 
5321 	acp->fib_size = sizeof (struct aac_fib_header) + \
5322 	    sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \
5323 	    sizeof (struct aac_sg_entry64);
5324 
5325 	aac_cmd_fib_header(softs, acp->slotp, ContainerCommand64,
5326 	    acp->fib_size);
5327 
5328 	/*
5329 	 * The definitions for aac_blockread64 and aac_blockwrite64
5330 	 * are the same.
5331 	 */
5332 	ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5333 	ddi_put16(acc, &br->ContainerId,
5334 	    ((struct aac_container *)acp->dvp)->cid);
5335 	ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ?
5336 	    VM_CtHostRead64 : VM_CtHostWrite64);
5337 	ddi_put16(acc, &br->Pad, 0);
5338 	ddi_put16(acc, &br->Flags, 0);
5339 
5340 	/* Fill SG table */
5341 	ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien);
5342 	ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE);
5343 
5344 	for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0];
5345 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5346 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5347 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5348 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5349 	}
5350 }
5351 
5352 /* Init FIB for block IO command */
5353 static void
5354 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp)
5355 {
5356 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5357 	struct aac_blockread *br = (struct aac_blockread *) \
5358 	    &acp->slotp->fibp->data[0];
5359 	struct aac_sg_entry *sgp;
5360 	struct aac_sge *sge = &acp->sgt[0];
5361 
5362 	if (acp->flags & AAC_CMD_BUF_READ) {
5363 		acp->fib_size = sizeof (struct aac_fib_header) + \
5364 		    sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \
5365 		    sizeof (struct aac_sg_entry);
5366 
5367 		ddi_put32(acc, &br->Command, VM_CtBlockRead);
5368 		ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien);
5369 		sgp = &br->SgMap.SgEntry[0];
5370 	} else {
5371 		struct aac_blockwrite *bw = (struct aac_blockwrite *)br;
5372 
5373 		acp->fib_size = sizeof (struct aac_fib_header) + \
5374 		    sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \
5375 		    sizeof (struct aac_sg_entry);
5376 
5377 		ddi_put32(acc, &bw->Command, VM_CtBlockWrite);
5378 		ddi_put32(acc, &bw->Stable, CUNSTABLE);
5379 		ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien);
5380 		sgp = &bw->SgMap.SgEntry[0];
5381 	}
5382 	aac_cmd_fib_header(softs, acp->slotp, ContainerCommand, acp->fib_size);
5383 
5384 	/*
5385 	 * aac_blockread and aac_blockwrite have the similar
5386 	 * structure head, so use br for bw here
5387 	 */
5388 	ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5389 	ddi_put32(acc, &br->ContainerId,
5390 	    ((struct aac_container *)acp->dvp)->cid);
5391 	ddi_put32(acc, &br->ByteCount, acp->bcount);
5392 
5393 	/* Fill SG table */
5394 	for (sge = &acp->sgt[0];
5395 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5396 		ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5397 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5398 	}
5399 }
5400 
5401 /*ARGSUSED*/
5402 void
5403 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp)
5404 {
5405 	struct aac_slot *slotp = acp->slotp;
5406 	struct aac_fib *fibp = slotp->fibp;
5407 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5408 
5409 	ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp,
5410 	    acp->fib_size,   /* only copy data of needed length */
5411 	    DDI_DEV_AUTOINCR);
5412 	ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5413 	ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2);
5414 }
5415 
5416 static void
5417 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp)
5418 {
5419 	struct aac_slot *slotp = acp->slotp;
5420 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5421 	struct aac_synchronize_command *sync =
5422 	    (struct aac_synchronize_command *)&slotp->fibp->data[0];
5423 
5424 	acp->fib_size = sizeof (struct aac_fib_header) + \
5425 	    sizeof (struct aac_synchronize_command);
5426 
5427 	aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size);
5428 	ddi_put32(acc, &sync->Command, VM_ContainerConfig);
5429 	ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE);
5430 	ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid);
5431 	ddi_put32(acc, &sync->Count,
5432 	    sizeof (((struct aac_synchronize_reply *)0)->Data));
5433 }
5434 
5435 /*
5436  * Init FIB for pass-through SCMD
5437  */
5438 static void
5439 aac_cmd_fib_srb(struct aac_cmd *acp)
5440 {
5441 	struct aac_slot *slotp = acp->slotp;
5442 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5443 	struct aac_srb *srb = (struct aac_srb *)&slotp->fibp->data[0];
5444 	uint8_t *cdb;
5445 
5446 	ddi_put32(acc, &srb->function, SRBF_ExecuteScsi);
5447 	ddi_put32(acc, &srb->retry_limit, 0);
5448 	ddi_put32(acc, &srb->cdb_size, acp->cmdlen);
5449 	ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */
5450 	if (acp->fibp == NULL) {
5451 		if (acp->flags & AAC_CMD_BUF_READ)
5452 			ddi_put32(acc, &srb->flags, SRB_DataIn);
5453 		else if (acp->flags & AAC_CMD_BUF_WRITE)
5454 			ddi_put32(acc, &srb->flags, SRB_DataOut);
5455 		ddi_put32(acc, &srb->channel,
5456 		    ((struct aac_nondasd *)acp->dvp)->bus);
5457 		ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid);
5458 		ddi_put32(acc, &srb->lun, 0);
5459 		cdb = acp->pkt->pkt_cdbp;
5460 	} else {
5461 		struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0];
5462 
5463 		ddi_put32(acc, &srb->flags, srb0->flags);
5464 		ddi_put32(acc, &srb->channel, srb0->channel);
5465 		ddi_put32(acc, &srb->id, srb0->id);
5466 		ddi_put32(acc, &srb->lun, srb0->lun);
5467 		cdb = srb0->cdb;
5468 	}
5469 	ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR);
5470 }
5471 
5472 static void
5473 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp)
5474 {
5475 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5476 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5477 	struct aac_sg_entry *sgp;
5478 	struct aac_sge *sge;
5479 
5480 	acp->fib_size = sizeof (struct aac_fib_header) + \
5481 	    sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5482 	    acp->left_cookien * sizeof (struct aac_sg_entry);
5483 
5484 	/* Fill FIB and SRB headers, and copy cdb */
5485 	aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommand, acp->fib_size);
5486 	aac_cmd_fib_srb(acp);
5487 
5488 	/* Fill SG table */
5489 	ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5490 	ddi_put32(acc, &srb->count, acp->bcount);
5491 
5492 	for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0];
5493 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5494 		ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5495 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5496 	}
5497 }
5498 
5499 static void
5500 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp)
5501 {
5502 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5503 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5504 	struct aac_sg_entry64 *sgp;
5505 	struct aac_sge *sge;
5506 
5507 	acp->fib_size = sizeof (struct aac_fib_header) + \
5508 	    sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5509 	    acp->left_cookien * sizeof (struct aac_sg_entry64);
5510 
5511 	/* Fill FIB and SRB headers, and copy cdb */
5512 	aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommandU64,
5513 	    acp->fib_size);
5514 	aac_cmd_fib_srb(acp);
5515 
5516 	/* Fill SG table */
5517 	ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5518 	ddi_put32(acc, &srb->count, acp->bcount);
5519 
5520 	for (sge = &acp->sgt[0],
5521 	    sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0];
5522 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5523 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5524 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5525 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5526 	}
5527 }
5528 
5529 static int
5530 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5531 {
5532 	struct aac_slot *slotp;
5533 
5534 	if (slotp = aac_get_slot(softs)) {
5535 		acp->slotp = slotp;
5536 		slotp->acp = acp;
5537 		acp->aac_cmd_fib(softs, acp);
5538 		(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0,
5539 		    DDI_DMA_SYNC_FORDEV);
5540 		return (AACOK);
5541 	}
5542 	return (AACERR);
5543 }
5544 
5545 static int
5546 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp)
5547 {
5548 	struct aac_device *dvp = acp->dvp;
5549 	int q = AAC_CMDQ(acp);
5550 
5551 	if (dvp) {
5552 		if (dvp->ncmds[q] < dvp->throttle[q]) {
5553 			if (!(acp->flags & AAC_CMD_NTAG) ||
5554 			    dvp->ncmds[q] == 0) {
5555 do_bind:
5556 				return (aac_cmd_slot_bind(softs, acp));
5557 			}
5558 			ASSERT(q == AAC_CMDQ_ASYNC);
5559 			aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC,
5560 			    AAC_THROTTLE_DRAIN);
5561 		}
5562 	} else {
5563 		if (softs->bus_ncmds[q] < softs->bus_throttle[q])
5564 			goto do_bind;
5565 	}
5566 	return (AACERR);
5567 }
5568 
5569 static void
5570 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp)
5571 {
5572 	struct aac_slot *slotp = acp->slotp;
5573 	int q = AAC_CMDQ(acp);
5574 	int rval;
5575 
5576 	/* Set ac and pkt */
5577 	if (acp->pkt) { /* ac from ioctl has no pkt */
5578 		acp->pkt->pkt_state |=
5579 		    STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
5580 	}
5581 	if (acp->timeout) /* 0 indicates no timeout */
5582 		acp->timeout += aac_timebase + aac_tick;
5583 
5584 	if (acp->dvp)
5585 		acp->dvp->ncmds[q]++;
5586 	softs->bus_ncmds[q]++;
5587 	aac_cmd_enqueue(&softs->q_busy, acp);
5588 
5589 	AACDB_PRINT_FIB(softs, slotp);
5590 
5591 	if (softs->flags & AAC_FLAGS_NEW_COMM) {
5592 		rval = aac_send_command(softs, slotp);
5593 	} else {
5594 		/*
5595 		 * If fib can not be enqueued, the adapter is in an abnormal
5596 		 * state, there will be no interrupt to us.
5597 		 */
5598 		rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q,
5599 		    slotp->fib_phyaddr, acp->fib_size);
5600 	}
5601 
5602 	if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS)
5603 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
5604 
5605 	/*
5606 	 * NOTE: We send command only when slots availabe, so should never
5607 	 * reach here.
5608 	 */
5609 	if (rval != AACOK) {
5610 		AACDB_PRINT(softs, CE_NOTE, "SCMD send failed");
5611 		if (acp->pkt) {
5612 			acp->pkt->pkt_state &= ~STATE_SENT_CMD;
5613 			aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0);
5614 		}
5615 		aac_end_io(softs, acp);
5616 		if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB)))
5617 			ddi_trigger_softintr(softs->softint_id);
5618 	}
5619 }
5620 
5621 static void
5622 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q)
5623 {
5624 	struct aac_cmd *acp, *next_acp;
5625 
5626 	/* Serve as many waiting io's as possible */
5627 	for (acp = q->q_head; acp; acp = next_acp) {
5628 		next_acp = acp->next;
5629 		if (aac_bind_io(softs, acp) == AACOK) {
5630 			aac_cmd_delete(q, acp);
5631 			aac_start_io(softs, acp);
5632 		}
5633 		if (softs->free_io_slot_head == NULL)
5634 			break;
5635 	}
5636 }
5637 
5638 static void
5639 aac_start_waiting_io(struct aac_softstate *softs)
5640 {
5641 	/*
5642 	 * Sync FIB io is served before async FIB io so that io requests
5643 	 * sent by interactive userland commands get responded asap.
5644 	 */
5645 	if (softs->q_wait[AAC_CMDQ_SYNC].q_head)
5646 		aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]);
5647 	if (softs->q_wait[AAC_CMDQ_ASYNC].q_head)
5648 		aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]);
5649 }
5650 
5651 static void
5652 aac_drain_comp_q(struct aac_softstate *softs)
5653 {
5654 	struct aac_cmd *acp;
5655 	struct scsi_pkt *pkt;
5656 
5657 	/*CONSTCOND*/
5658 	while (1) {
5659 		mutex_enter(&softs->q_comp_mutex);
5660 		acp = aac_cmd_dequeue(&softs->q_comp);
5661 		mutex_exit(&softs->q_comp_mutex);
5662 		if (acp != NULL) {
5663 			ASSERT(acp->pkt != NULL);
5664 			pkt = acp->pkt;
5665 
5666 			if (pkt->pkt_reason == CMD_CMPLT) {
5667 				/*
5668 				 * Consistent packets need to be sync'ed first
5669 				 */
5670 				if ((acp->flags & AAC_CMD_CONSISTENT) &&
5671 				    (acp->flags & AAC_CMD_BUF_READ)) {
5672 					if (aac_dma_sync_ac(acp) != AACOK) {
5673 						ddi_fm_service_impact(
5674 						    softs->devinfo_p,
5675 						    DDI_SERVICE_UNAFFECTED);
5676 						pkt->pkt_reason = CMD_TRAN_ERR;
5677 						pkt->pkt_statistics = 0;
5678 					}
5679 				}
5680 				if ((aac_check_acc_handle(softs-> \
5681 				    comm_space_acc_handle) != DDI_SUCCESS) ||
5682 				    (aac_check_acc_handle(softs-> \
5683 				    pci_mem_handle) != DDI_SUCCESS)) {
5684 					ddi_fm_service_impact(softs->devinfo_p,
5685 					    DDI_SERVICE_UNAFFECTED);
5686 					ddi_fm_acc_err_clear(softs-> \
5687 					    pci_mem_handle, DDI_FME_VER0);
5688 					pkt->pkt_reason = CMD_TRAN_ERR;
5689 					pkt->pkt_statistics = 0;
5690 				}
5691 				if (aac_check_dma_handle(softs-> \
5692 				    comm_space_dma_handle) != DDI_SUCCESS) {
5693 					ddi_fm_service_impact(softs->devinfo_p,
5694 					    DDI_SERVICE_UNAFFECTED);
5695 					pkt->pkt_reason = CMD_TRAN_ERR;
5696 					pkt->pkt_statistics = 0;
5697 				}
5698 			}
5699 			(*pkt->pkt_comp)(pkt);
5700 		} else {
5701 			break;
5702 		}
5703 	}
5704 }
5705 
5706 static int
5707 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp)
5708 {
5709 	size_t rlen;
5710 	ddi_dma_cookie_t cookie;
5711 	uint_t cookien;
5712 
5713 	/* Allocate FIB dma resource */
5714 	if (ddi_dma_alloc_handle(
5715 	    softs->devinfo_p,
5716 	    &softs->addr_dma_attr,
5717 	    DDI_DMA_SLEEP,
5718 	    NULL,
5719 	    &slotp->fib_dma_handle) != DDI_SUCCESS) {
5720 		AACDB_PRINT(softs, CE_WARN,
5721 		    "Cannot alloc dma handle for slot fib area");
5722 		goto error;
5723 	}
5724 	if (ddi_dma_mem_alloc(
5725 	    slotp->fib_dma_handle,
5726 	    softs->aac_max_fib_size,
5727 	    &softs->acc_attr,
5728 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
5729 	    DDI_DMA_SLEEP,
5730 	    NULL,
5731 	    (caddr_t *)&slotp->fibp,
5732 	    &rlen,
5733 	    &slotp->fib_acc_handle) != DDI_SUCCESS) {
5734 		AACDB_PRINT(softs, CE_WARN,
5735 		    "Cannot alloc mem for slot fib area");
5736 		goto error;
5737 	}
5738 	if (ddi_dma_addr_bind_handle(
5739 	    slotp->fib_dma_handle,
5740 	    NULL,
5741 	    (caddr_t)slotp->fibp,
5742 	    softs->aac_max_fib_size,
5743 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
5744 	    DDI_DMA_SLEEP,
5745 	    NULL,
5746 	    &cookie,
5747 	    &cookien) != DDI_DMA_MAPPED) {
5748 		AACDB_PRINT(softs, CE_WARN,
5749 		    "dma bind failed for slot fib area");
5750 		goto error;
5751 	}
5752 
5753 	/* Check dma handles allocated in fib attach */
5754 	if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) {
5755 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
5756 		goto error;
5757 	}
5758 
5759 	/* Check acc handles allocated in fib attach */
5760 	if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) {
5761 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
5762 		goto error;
5763 	}
5764 
5765 	slotp->fib_phyaddr = cookie.dmac_laddress;
5766 	return (AACOK);
5767 
5768 error:
5769 	if (slotp->fib_acc_handle) {
5770 		ddi_dma_mem_free(&slotp->fib_acc_handle);
5771 		slotp->fib_acc_handle = NULL;
5772 	}
5773 	if (slotp->fib_dma_handle) {
5774 		ddi_dma_free_handle(&slotp->fib_dma_handle);
5775 		slotp->fib_dma_handle = NULL;
5776 	}
5777 	return (AACERR);
5778 }
5779 
5780 static void
5781 aac_free_fib(struct aac_slot *slotp)
5782 {
5783 	(void) ddi_dma_unbind_handle(slotp->fib_dma_handle);
5784 	ddi_dma_mem_free(&slotp->fib_acc_handle);
5785 	slotp->fib_acc_handle = NULL;
5786 	ddi_dma_free_handle(&slotp->fib_dma_handle);
5787 	slotp->fib_dma_handle = NULL;
5788 	slotp->fib_phyaddr = 0;
5789 }
5790 
5791 static void
5792 aac_alloc_fibs(struct aac_softstate *softs)
5793 {
5794 	int i;
5795 	struct aac_slot *slotp;
5796 
5797 	for (i = 0; i < softs->total_slots &&
5798 	    softs->total_fibs < softs->total_slots; i++) {
5799 		slotp = &(softs->io_slot[i]);
5800 		if (slotp->fib_phyaddr)
5801 			continue;
5802 		if (aac_alloc_fib(softs, slotp) != AACOK)
5803 			break;
5804 
5805 		/* Insert the slot to the free slot list */
5806 		aac_release_slot(softs, slotp);
5807 		softs->total_fibs++;
5808 	}
5809 }
5810 
5811 static void
5812 aac_destroy_fibs(struct aac_softstate *softs)
5813 {
5814 	struct aac_slot *slotp;
5815 
5816 	while ((slotp = softs->free_io_slot_head) != NULL) {
5817 		ASSERT(slotp->fib_phyaddr);
5818 		softs->free_io_slot_head = slotp->next;
5819 		aac_free_fib(slotp);
5820 		ASSERT(slotp->index == (slotp - softs->io_slot));
5821 		softs->total_fibs--;
5822 	}
5823 	ASSERT(softs->total_fibs == 0);
5824 }
5825 
5826 static int
5827 aac_create_slots(struct aac_softstate *softs)
5828 {
5829 	int i;
5830 
5831 	softs->total_slots = softs->aac_max_fibs;
5832 	softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \
5833 	    softs->total_slots, KM_SLEEP);
5834 	if (softs->io_slot == NULL) {
5835 		AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot");
5836 		return (AACERR);
5837 	}
5838 	for (i = 0; i < softs->total_slots; i++)
5839 		softs->io_slot[i].index = i;
5840 	softs->free_io_slot_head = NULL;
5841 	softs->total_fibs = 0;
5842 	return (AACOK);
5843 }
5844 
5845 static void
5846 aac_destroy_slots(struct aac_softstate *softs)
5847 {
5848 	ASSERT(softs->free_io_slot_head == NULL);
5849 
5850 	kmem_free(softs->io_slot, sizeof (struct aac_slot) * \
5851 	    softs->total_slots);
5852 	softs->io_slot = NULL;
5853 	softs->total_slots = 0;
5854 }
5855 
5856 struct aac_slot *
5857 aac_get_slot(struct aac_softstate *softs)
5858 {
5859 	struct aac_slot *slotp;
5860 
5861 	if ((slotp = softs->free_io_slot_head) != NULL) {
5862 		softs->free_io_slot_head = slotp->next;
5863 		slotp->next = NULL;
5864 	}
5865 	return (slotp);
5866 }
5867 
5868 static void
5869 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp)
5870 {
5871 	ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots));
5872 	ASSERT(slotp == &softs->io_slot[slotp->index]);
5873 
5874 	slotp->acp = NULL;
5875 	slotp->next = softs->free_io_slot_head;
5876 	softs->free_io_slot_head = slotp;
5877 }
5878 
5879 int
5880 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp)
5881 {
5882 	if (aac_bind_io(softs, acp) == AACOK)
5883 		aac_start_io(softs, acp);
5884 	else
5885 		aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp);
5886 
5887 	if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR)))
5888 		return (TRAN_ACCEPT);
5889 	/*
5890 	 * Because sync FIB is always 512 bytes and used for critical
5891 	 * functions, async FIB is used for poll IO.
5892 	 */
5893 	if (acp->flags & AAC_CMD_NO_INTR) {
5894 		if (aac_do_poll_io(softs, acp) == AACOK)
5895 			return (TRAN_ACCEPT);
5896 	} else {
5897 		if (aac_do_sync_io(softs, acp) == AACOK)
5898 			return (TRAN_ACCEPT);
5899 	}
5900 	return (TRAN_BADPKT);
5901 }
5902 
5903 static int
5904 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp)
5905 {
5906 	int (*intr_handler)(struct aac_softstate *);
5907 
5908 	/*
5909 	 * Interrupt is disabled, we have to poll the adapter by ourselves.
5910 	 */
5911 	intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
5912 	    aac_process_intr_new : aac_process_intr_old;
5913 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) {
5914 		int i = AAC_POLL_TIME * 1000;
5915 
5916 		AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i);
5917 		if (i == 0)
5918 			aac_cmd_timeout(softs, acp);
5919 	}
5920 
5921 	ddi_trigger_softintr(softs->softint_id);
5922 
5923 	if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR))
5924 		return (AACOK);
5925 	return (AACERR);
5926 }
5927 
5928 static int
5929 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp)
5930 {
5931 	ASSERT(softs && acp);
5932 
5933 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT)))
5934 		cv_wait(&softs->event, &softs->io_lock);
5935 
5936 	if (acp->flags & AAC_CMD_CMPLT)
5937 		return (AACOK);
5938 	return (AACERR);
5939 }
5940 
5941 static int
5942 aac_dma_sync_ac(struct aac_cmd *acp)
5943 {
5944 	if (acp->buf_dma_handle) {
5945 		if (acp->flags & AAC_CMD_BUF_WRITE) {
5946 			if (acp->abp != NULL)
5947 				ddi_rep_put8(acp->abh,
5948 				    (uint8_t *)acp->bp->b_un.b_addr,
5949 				    (uint8_t *)acp->abp, acp->bp->b_bcount,
5950 				    DDI_DEV_AUTOINCR);
5951 			(void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
5952 			    DDI_DMA_SYNC_FORDEV);
5953 		} else {
5954 			(void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
5955 			    DDI_DMA_SYNC_FORCPU);
5956 			if (aac_check_dma_handle(acp->buf_dma_handle) !=
5957 			    DDI_SUCCESS)
5958 				return (AACERR);
5959 			if (acp->abp != NULL)
5960 				ddi_rep_get8(acp->abh,
5961 				    (uint8_t *)acp->bp->b_un.b_addr,
5962 				    (uint8_t *)acp->abp, acp->bp->b_bcount,
5963 				    DDI_DEV_AUTOINCR);
5964 		}
5965 	}
5966 	return (AACOK);
5967 }
5968 
5969 /*
5970  * The following function comes from Adaptec:
5971  *
5972  * When driver sees a particular event that means containers are changed, it
5973  * will rescan containers. However a change may not be complete until some
5974  * other event is received. For example, creating or deleting an array will
5975  * incur as many as six AifEnConfigChange events which would generate six
5976  * container rescans. To diminish rescans, driver set a flag to wait for
5977  * another particular event. When sees that events come in, it will do rescan.
5978  */
5979 static int
5980 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp)
5981 {
5982 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
5983 	uint16_t fib_command;
5984 	struct aac_aif_command *aif;
5985 	int en_type;
5986 	int devcfg_needed;
5987 	int current, next;
5988 
5989 	fib_command = LE_16(fibp->Header.Command);
5990 	if (fib_command != AifRequest) {
5991 		cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x",
5992 		    fib_command);
5993 		return (AACERR);
5994 	}
5995 
5996 	/* Update internal container state */
5997 	aif = (struct aac_aif_command *)&fibp->data[0];
5998 
5999 	AACDB_PRINT_AIF(softs, aif);
6000 	devcfg_needed = 0;
6001 	en_type = LE_32((uint32_t)aif->data.EN.type);
6002 
6003 	switch (LE_32((uint32_t)aif->command)) {
6004 	case AifCmdDriverNotify: {
6005 		int cid = LE_32(aif->data.EN.data.ECC.container[0]);
6006 
6007 		switch (en_type) {
6008 		case AifDenMorphComplete:
6009 		case AifDenVolumeExtendComplete:
6010 			if (AAC_DEV_IS_VALID(&softs->containers[cid].dev))
6011 				softs->devcfg_wait_on = AifEnConfigChange;
6012 			break;
6013 		}
6014 		if (softs->devcfg_wait_on == en_type)
6015 			devcfg_needed = 1;
6016 		break;
6017 	}
6018 
6019 	case AifCmdEventNotify:
6020 		switch (en_type) {
6021 		case AifEnAddContainer:
6022 		case AifEnDeleteContainer:
6023 			softs->devcfg_wait_on = AifEnConfigChange;
6024 			break;
6025 		case AifEnContainerChange:
6026 			if (!softs->devcfg_wait_on)
6027 				softs->devcfg_wait_on = AifEnConfigChange;
6028 			break;
6029 		case AifEnContainerEvent:
6030 			if (ddi_get32(acc, &aif-> \
6031 			    data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE)
6032 				devcfg_needed = 1;
6033 			break;
6034 		}
6035 		if (softs->devcfg_wait_on == en_type)
6036 			devcfg_needed = 1;
6037 		break;
6038 
6039 	case AifCmdJobProgress:
6040 		if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) {
6041 			int pr_status;
6042 			uint32_t pr_ftick, pr_ctick;
6043 
6044 			pr_status = LE_32((uint32_t)aif->data.PR[0].status);
6045 			pr_ctick = LE_32(aif->data.PR[0].currentTick);
6046 			pr_ftick = LE_32(aif->data.PR[0].finalTick);
6047 
6048 			if ((pr_ctick == pr_ftick) ||
6049 			    (pr_status == AifJobStsSuccess))
6050 				softs->devcfg_wait_on = AifEnContainerChange;
6051 			else if ((pr_ctick == 0) &&
6052 			    (pr_status == AifJobStsRunning))
6053 				softs->devcfg_wait_on = AifEnContainerChange;
6054 		}
6055 		break;
6056 	}
6057 
6058 	if (devcfg_needed) {
6059 		softs->devcfg_wait_on = 0;
6060 		(void) aac_probe_containers(softs);
6061 	}
6062 
6063 	/* Modify AIF contexts */
6064 	current = softs->aifq_idx;
6065 	next = (current + 1) % AAC_AIFQ_LENGTH;
6066 	if (next == 0) {
6067 		struct aac_fib_context *ctx;
6068 
6069 		softs->aifq_wrap = 1;
6070 		for (ctx = softs->fibctx; ctx; ctx = ctx->next) {
6071 			if (next == ctx->ctx_idx) {
6072 				ctx->ctx_filled = 1;
6073 			} else if (current == ctx->ctx_idx && ctx->ctx_filled) {
6074 				ctx->ctx_idx = next;
6075 				AACDB_PRINT(softs, CE_NOTE,
6076 				    "-- AIF queue(%x) overrun", ctx->unique);
6077 			}
6078 		}
6079 	}
6080 	softs->aifq_idx = next;
6081 
6082 	/* Wakeup applications */
6083 	cv_broadcast(&softs->aifv);
6084 	return (AACOK);
6085 }
6086 
6087 /*
6088  * Timeout recovery
6089  */
6090 /*ARGSUSED*/
6091 static void
6092 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp)
6093 {
6094 #ifdef DEBUG
6095 	acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT;
6096 	AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp);
6097 	AACDB_PRINT_FIB(softs, acp->slotp);
6098 #endif
6099 
6100 	/*
6101 	 * Besides the firmware in unhealthy state, an overloaded
6102 	 * adapter may also incur pkt timeout.
6103 	 * There is a chance for an adapter with a slower IOP to take
6104 	 * longer than 60 seconds to process the commands, such as when
6105 	 * to perform IOs. So the adapter is doing a build on a RAID-5
6106 	 * while being required longer completion times should be
6107 	 * tolerated.
6108 	 */
6109 	switch (aac_do_reset(softs)) {
6110 	case AAC_IOP_RESET_SUCCEED:
6111 		aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET);
6112 		aac_start_waiting_io(softs);
6113 		break;
6114 	case AAC_IOP_RESET_FAILED:
6115 		/* Abort all waiting cmds when adapter is dead */
6116 		aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT);
6117 		break;
6118 	case AAC_IOP_RESET_ABNORMAL:
6119 		aac_start_waiting_io(softs);
6120 	}
6121 }
6122 
6123 /*
6124  * The following function comes from Adaptec:
6125  *
6126  * Time sync. command added to synchronize time with firmware every 30
6127  * minutes (required for correct AIF timestamps etc.)
6128  */
6129 static int
6130 aac_sync_tick(struct aac_softstate *softs)
6131 {
6132 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
6133 	struct aac_fib *fibp = softs->sync_slot.fibp;
6134 
6135 	ddi_put32(acc, (void *)&fibp->data[0], ddi_get_time());
6136 	return (aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t)));
6137 }
6138 
6139 static void
6140 aac_daemon(void *arg)
6141 {
6142 	struct aac_softstate *softs = (struct aac_softstate *)arg;
6143 	struct aac_cmd *acp;
6144 
6145 	DBCALLED(softs, 2);
6146 
6147 	mutex_enter(&softs->io_lock);
6148 	/* Check slot for timeout pkts */
6149 	aac_timebase += aac_tick;
6150 	for (acp = softs->q_busy.q_head; acp; acp = acp->next) {
6151 		if (acp->timeout) {
6152 			if (acp->timeout <= aac_timebase) {
6153 				aac_cmd_timeout(softs, acp);
6154 				ddi_trigger_softintr(softs->softint_id);
6155 			}
6156 			break;
6157 		}
6158 	}
6159 
6160 	/* Time sync. with firmware every AAC_SYNC_TICK */
6161 	if (aac_sync_time <= aac_timebase) {
6162 		aac_sync_time = aac_timebase;
6163 		if (aac_sync_tick(softs) != AACOK)
6164 			aac_sync_time += aac_tick << 1; /* retry shortly */
6165 		else
6166 			aac_sync_time += AAC_SYNC_TICK;
6167 	}
6168 
6169 	if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0))
6170 		softs->timeout_id = timeout(aac_daemon, (void *)softs,
6171 		    (aac_tick * drv_usectohz(1000000)));
6172 	mutex_exit(&softs->io_lock);
6173 }
6174 
6175 /*
6176  * Architecture dependent functions
6177  */
6178 static int
6179 aac_rx_get_fwstatus(struct aac_softstate *softs)
6180 {
6181 	return (PCI_MEM_GET32(softs, AAC_OMR0));
6182 }
6183 
6184 static int
6185 aac_rx_get_mailbox(struct aac_softstate *softs, int mb)
6186 {
6187 	return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4));
6188 }
6189 
6190 static void
6191 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6192     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6193 {
6194 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd);
6195 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0);
6196 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1);
6197 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2);
6198 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3);
6199 }
6200 
6201 static int
6202 aac_rkt_get_fwstatus(struct aac_softstate *softs)
6203 {
6204 	return (PCI_MEM_GET32(softs, AAC_OMR0));
6205 }
6206 
6207 static int
6208 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb)
6209 {
6210 	return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4));
6211 }
6212 
6213 static void
6214 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6215     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6216 {
6217 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd);
6218 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0);
6219 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1);
6220 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2);
6221 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3);
6222 }
6223 
6224 /*
6225  * cb_ops functions
6226  */
6227 static int
6228 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred)
6229 {
6230 	struct aac_softstate *softs;
6231 	int minor0, minor;
6232 	int instance;
6233 
6234 	DBCALLED(NULL, 2);
6235 
6236 	if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6237 		return (EINVAL);
6238 
6239 	minor0 = getminor(*devp);
6240 	minor = AAC_SCSA_MINOR(minor0);
6241 
6242 	if (AAC_IS_SCSA_NODE(minor))
6243 		return (scsi_hba_open(devp, flag, otyp, cred));
6244 
6245 	instance = MINOR2INST(minor0);
6246 	if (instance >= AAC_MAX_ADAPTERS)
6247 		return (ENXIO);
6248 
6249 	softs = ddi_get_soft_state(aac_softstatep, instance);
6250 	if (softs == NULL)
6251 		return (ENXIO);
6252 
6253 	return (0);
6254 }
6255 
6256 /*ARGSUSED*/
6257 static int
6258 aac_close(dev_t dev, int flag, int otyp, cred_t *cred)
6259 {
6260 	int minor0, minor;
6261 	int instance;
6262 
6263 	DBCALLED(NULL, 2);
6264 
6265 	if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6266 		return (EINVAL);
6267 
6268 	minor0 = getminor(dev);
6269 	minor = AAC_SCSA_MINOR(minor0);
6270 
6271 	if (AAC_IS_SCSA_NODE(minor))
6272 		return (scsi_hba_close(dev, flag, otyp, cred));
6273 
6274 	instance = MINOR2INST(minor0);
6275 	if (instance >= AAC_MAX_ADAPTERS)
6276 		return (ENXIO);
6277 
6278 	return (0);
6279 }
6280 
6281 static int
6282 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p,
6283     int *rval_p)
6284 {
6285 	struct aac_softstate *softs;
6286 	int minor0, minor;
6287 	int instance;
6288 
6289 	DBCALLED(NULL, 2);
6290 
6291 	if (drv_priv(cred_p) != 0)
6292 		return (EPERM);
6293 
6294 	minor0 = getminor(dev);
6295 	minor = AAC_SCSA_MINOR(minor0);
6296 
6297 	if (AAC_IS_SCSA_NODE(minor))
6298 		return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p));
6299 
6300 	instance = MINOR2INST(minor0);
6301 	if (instance < AAC_MAX_ADAPTERS) {
6302 		softs = ddi_get_soft_state(aac_softstatep, instance);
6303 		return (aac_do_ioctl(softs, dev, cmd, arg, flag));
6304 	}
6305 	return (ENXIO);
6306 }
6307 
6308 /*
6309  * The IO fault service error handling callback function
6310  */
6311 /*ARGSUSED*/
6312 static int
6313 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6314 {
6315 	/*
6316 	 * as the driver can always deal with an error in any dma or
6317 	 * access handle, we can just return the fme_status value.
6318 	 */
6319 	pci_ereport_post(dip, err, NULL);
6320 	return (err->fme_status);
6321 }
6322 
6323 /*
6324  * aac_fm_init - initialize fma capabilities and register with IO
6325  *               fault services.
6326  */
6327 static void
6328 aac_fm_init(struct aac_softstate *softs)
6329 {
6330 	/*
6331 	 * Need to change iblock to priority for new MSI intr
6332 	 */
6333 	ddi_iblock_cookie_t fm_ibc;
6334 
6335 	softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p,
6336 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
6337 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
6338 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
6339 
6340 	/* Only register with IO Fault Services if we have some capability */
6341 	if (softs->fm_capabilities) {
6342 		/* Adjust access and dma attributes for FMA */
6343 		softs->acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC;
6344 		softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6345 		softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6346 
6347 		/*
6348 		 * Register capabilities with IO Fault Services.
6349 		 * fm_capabilities will be updated to indicate
6350 		 * capabilities actually supported (not requested.)
6351 		 */
6352 		ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc);
6353 
6354 		/*
6355 		 * Initialize pci ereport capabilities if ereport
6356 		 * capable (should always be.)
6357 		 */
6358 		if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6359 		    DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6360 			pci_ereport_setup(softs->devinfo_p);
6361 		}
6362 
6363 		/*
6364 		 * Register error callback if error callback capable.
6365 		 */
6366 		if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6367 			ddi_fm_handler_register(softs->devinfo_p,
6368 			    aac_fm_error_cb, (void *) softs);
6369 		}
6370 	}
6371 }
6372 
6373 /*
6374  * aac_fm_fini - Releases fma capabilities and un-registers with IO
6375  *               fault services.
6376  */
6377 static void
6378 aac_fm_fini(struct aac_softstate *softs)
6379 {
6380 	/* Only unregister FMA capabilities if registered */
6381 	if (softs->fm_capabilities) {
6382 		/*
6383 		 * Un-register error callback if error callback capable.
6384 		 */
6385 		if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6386 			ddi_fm_handler_unregister(softs->devinfo_p);
6387 		}
6388 
6389 		/*
6390 		 * Release any resources allocated by pci_ereport_setup()
6391 		 */
6392 		if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6393 		    DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6394 			pci_ereport_teardown(softs->devinfo_p);
6395 		}
6396 
6397 		/* Unregister from IO Fault Services */
6398 		ddi_fm_fini(softs->devinfo_p);
6399 
6400 		/* Adjust access and dma attributes for FMA */
6401 		softs->acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC;
6402 		softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6403 		softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6404 	}
6405 }
6406 
6407 int
6408 aac_check_acc_handle(ddi_acc_handle_t handle)
6409 {
6410 	ddi_fm_error_t de;
6411 
6412 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6413 	return (de.fme_status);
6414 }
6415 
6416 int
6417 aac_check_dma_handle(ddi_dma_handle_t handle)
6418 {
6419 	ddi_fm_error_t de;
6420 
6421 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6422 	return (de.fme_status);
6423 }
6424 
6425 void
6426 aac_fm_ereport(struct aac_softstate *softs, char *detail)
6427 {
6428 	uint64_t ena;
6429 	char buf[FM_MAX_CLASS];
6430 
6431 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6432 	ena = fm_ena_generate(0, FM_ENA_FMT1);
6433 	if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) {
6434 		ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP,
6435 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
6436 	}
6437 }
6438 
6439 /*
6440  * Autoconfiguration support
6441  */
6442 static int
6443 aac_parse_devname(char *devnm, int *tgt, int *lun)
6444 {
6445 	char devbuf[SCSI_MAXNAMELEN];
6446 	char *addr;
6447 	char *p,  *tp, *lp;
6448 	long num;
6449 
6450 	/* Parse dev name and address */
6451 	(void) strcpy(devbuf, devnm);
6452 	addr = "";
6453 	for (p = devbuf; *p != '\0'; p++) {
6454 		if (*p == '@') {
6455 			addr = p + 1;
6456 			*p = '\0';
6457 		} else if (*p == ':') {
6458 			*p = '\0';
6459 			break;
6460 		}
6461 	}
6462 
6463 	/* Parse taget and lun */
6464 	for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
6465 		if (*p == ',') {
6466 			lp = p + 1;
6467 			*p = '\0';
6468 			break;
6469 		}
6470 	}
6471 	if (tgt && tp) {
6472 		if (ddi_strtol(tp, NULL, 0x10, &num))
6473 			return (AACERR);
6474 		*tgt = (int)num;
6475 	}
6476 	if (lun && lp) {
6477 		if (ddi_strtol(lp, NULL, 0x10, &num))
6478 			return (AACERR);
6479 		*lun = (int)num;
6480 	}
6481 	return (AACOK);
6482 }
6483 
6484 static dev_info_t *
6485 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun)
6486 {
6487 	dev_info_t *child = NULL;
6488 	char addr[SCSI_MAXNAMELEN];
6489 	char tmp[MAXNAMELEN];
6490 
6491 	if (tgt < AAC_MAX_LD) {
6492 		if (lun == 0) {
6493 			struct aac_device *dvp = &softs->containers[tgt].dev;
6494 
6495 			child = dvp->dip;
6496 		}
6497 	} else {
6498 		(void) sprintf(addr, "%x,%x", tgt, lun);
6499 		for (child = ddi_get_child(softs->devinfo_p);
6500 		    child; child = ddi_get_next_sibling(child)) {
6501 			/* We don't care about non-persistent node */
6502 			if (ndi_dev_is_persistent_node(child) == 0)
6503 				continue;
6504 
6505 			if (aac_name_node(child, tmp, MAXNAMELEN) !=
6506 			    DDI_SUCCESS)
6507 				continue;
6508 			if (strcmp(addr, tmp) == 0)
6509 				break;
6510 		}
6511 	}
6512 	return (child);
6513 }
6514 
6515 static int
6516 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd,
6517     dev_info_t **dipp)
6518 {
6519 	char *nodename = NULL;
6520 	char **compatible = NULL;
6521 	int ncompatible = 0;
6522 	char *childname;
6523 	dev_info_t *ldip = NULL;
6524 	int tgt = sd->sd_address.a_target;
6525 	int lun = sd->sd_address.a_lun;
6526 	int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
6527 	int rval;
6528 
6529 	DBCALLED(softs, 2);
6530 
6531 	scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
6532 	    NULL, &nodename, &compatible, &ncompatible);
6533 	if (nodename == NULL) {
6534 		AACDB_PRINT(softs, CE_WARN,
6535 		    "found no comptible driver for t%dL%d", tgt, lun);
6536 		rval = NDI_FAILURE;
6537 		goto finish;
6538 	}
6539 	childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename;
6540 
6541 	/* Create dev node */
6542 	rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID,
6543 	    &ldip);
6544 	if (rval == NDI_SUCCESS) {
6545 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt)
6546 		    != DDI_PROP_SUCCESS) {
6547 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6548 			    "property for t%dL%d (target)", tgt, lun);
6549 			rval = NDI_FAILURE;
6550 			goto finish;
6551 		}
6552 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun)
6553 		    != DDI_PROP_SUCCESS) {
6554 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6555 			    "property for t%dL%d (lun)", tgt, lun);
6556 			rval = NDI_FAILURE;
6557 			goto finish;
6558 		}
6559 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
6560 		    "compatible", compatible, ncompatible)
6561 		    != DDI_PROP_SUCCESS) {
6562 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6563 			    "property for t%dL%d (compatible)", tgt, lun);
6564 			rval = NDI_FAILURE;
6565 			goto finish;
6566 		}
6567 
6568 		rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
6569 		if (rval != NDI_SUCCESS) {
6570 			AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d",
6571 			    tgt, lun);
6572 			ndi_prop_remove_all(ldip);
6573 			(void) ndi_devi_free(ldip);
6574 		}
6575 	}
6576 finish:
6577 	if (dipp)
6578 		*dipp = ldip;
6579 
6580 	scsi_hba_nodename_compatible_free(nodename, compatible);
6581 	return (rval);
6582 }
6583 
6584 /*ARGSUSED*/
6585 static int
6586 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd)
6587 {
6588 	int tgt = sd->sd_address.a_target;
6589 	int lun = sd->sd_address.a_lun;
6590 
6591 	DBCALLED(softs, 2);
6592 
6593 	if (tgt < AAC_MAX_LD) {
6594 		int rval;
6595 
6596 		if (lun == 0) {
6597 			mutex_enter(&softs->io_lock);
6598 			rval = aac_probe_container(softs, tgt);
6599 			mutex_exit(&softs->io_lock);
6600 			if (rval == AACOK) {
6601 				if (scsi_hba_probe(sd, NULL) ==
6602 				    SCSIPROBE_EXISTS)
6603 					return (NDI_SUCCESS);
6604 			}
6605 		}
6606 		return (NDI_FAILURE);
6607 	} else {
6608 		int dtype;
6609 
6610 		if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS)
6611 			return (NDI_FAILURE);
6612 
6613 		dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
6614 
6615 		AACDB_PRINT(softs, CE_NOTE,
6616 		    "Phys. device found: tgt %d dtype %d: %s",
6617 		    tgt, dtype, sd->sd_inq->inq_vid);
6618 
6619 		/* Only non-DASD exposed */
6620 		if (dtype != DTYPE_RODIRECT /* CDROM */ &&
6621 		    dtype != DTYPE_SEQUENTIAL /* TAPE */ &&
6622 		    dtype != DTYPE_ESI /* SES */)
6623 			return (NDI_FAILURE);
6624 
6625 		AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt);
6626 		mutex_enter(&softs->io_lock);
6627 		softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID;
6628 		mutex_exit(&softs->io_lock);
6629 		return (NDI_SUCCESS);
6630 	}
6631 }
6632 
6633 static int
6634 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun,
6635     dev_info_t **ldip)
6636 {
6637 	struct scsi_device sd;
6638 	dev_info_t *child;
6639 	int rval;
6640 
6641 	DBCALLED(softs, 2);
6642 
6643 	if ((child = aac_find_child(softs, tgt, lun)) != NULL) {
6644 		if (ldip)
6645 			*ldip = child;
6646 		return (NDI_SUCCESS);
6647 	}
6648 
6649 	bzero(&sd, sizeof (struct scsi_device));
6650 	sd.sd_address.a_hba_tran = softs->hba_tran;
6651 	sd.sd_address.a_target = (uint16_t)tgt;
6652 	sd.sd_address.a_lun = (uint8_t)lun;
6653 	if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS)
6654 		rval = aac_config_child(softs, &sd, ldip);
6655 	scsi_unprobe(&sd);
6656 	return (rval);
6657 }
6658 
6659 static int
6660 aac_config_tgt(struct aac_softstate *softs, int tgt)
6661 {
6662 	struct scsi_address ap;
6663 	struct buf *bp = NULL;
6664 	int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE;
6665 	int list_len = 0;
6666 	int lun_total = 0;
6667 	dev_info_t *ldip;
6668 	int i;
6669 
6670 	ap.a_hba_tran = softs->hba_tran;
6671 	ap.a_target = (uint16_t)tgt;
6672 	ap.a_lun = 0;
6673 
6674 	for (i = 0; i < 2; i++) {
6675 		struct scsi_pkt *pkt;
6676 		uchar_t *cdb;
6677 		uchar_t *p;
6678 		uint32_t data;
6679 
6680 		if (bp == NULL) {
6681 			if ((bp = scsi_alloc_consistent_buf(&ap, NULL,
6682 			    buf_len, B_READ, NULL_FUNC, NULL)) == NULL)
6683 			return (AACERR);
6684 		}
6685 		if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5,
6686 		    sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT,
6687 		    NULL, NULL)) == NULL) {
6688 			scsi_free_consistent_buf(bp);
6689 			return (AACERR);
6690 		}
6691 		cdb = pkt->pkt_cdbp;
6692 		bzero(cdb, CDB_GROUP5);
6693 		cdb[0] = SCMD_REPORT_LUNS;
6694 
6695 		/* Convert buffer len from local to LE_32 */
6696 		data = buf_len;
6697 		for (p = &cdb[9]; p > &cdb[5]; p--) {
6698 			*p = data & 0xff;
6699 			data >>= 8;
6700 		}
6701 
6702 		if (scsi_poll(pkt) < 0 ||
6703 		    ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) {
6704 			scsi_destroy_pkt(pkt);
6705 			break;
6706 		}
6707 
6708 		/* Convert list_len from LE_32 to local */
6709 		for (p = (uchar_t *)bp->b_un.b_addr;
6710 		    p < (uchar_t *)bp->b_un.b_addr + 4; p++) {
6711 			data <<= 8;
6712 			data |= *p;
6713 		}
6714 		list_len = data;
6715 		if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) {
6716 			scsi_free_consistent_buf(bp);
6717 			bp = NULL;
6718 			buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE;
6719 		}
6720 		scsi_destroy_pkt(pkt);
6721 	}
6722 	if (i >= 2) {
6723 		uint8_t *buf = (uint8_t *)(bp->b_un.b_addr +
6724 		    AAC_SCSI_RPTLUNS_HEAD_SIZE);
6725 
6726 		for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) {
6727 			uint16_t lun;
6728 
6729 			/* Determine report luns addressing type */
6730 			switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) {
6731 			/*
6732 			 * Vendors in the field have been found to be
6733 			 * concatenating bus/target/lun to equal the
6734 			 * complete lun value instead of switching to
6735 			 * flat space addressing
6736 			 */
6737 			case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL:
6738 			case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT:
6739 			case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE:
6740 				lun = ((buf[0] & 0x3f) << 8) | buf[1];
6741 				if (lun > UINT8_MAX) {
6742 					AACDB_PRINT(softs, CE_WARN,
6743 					    "abnormal lun number: %d", lun);
6744 					break;
6745 				}
6746 				if (aac_config_lun(softs, tgt, lun, &ldip) ==
6747 				    NDI_SUCCESS)
6748 					lun_total++;
6749 				break;
6750 			}
6751 
6752 			buf += AAC_SCSI_RPTLUNS_ADDR_SIZE;
6753 		}
6754 	} else {
6755 		/* The target may do not support SCMD_REPORT_LUNS. */
6756 		if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS)
6757 			lun_total++;
6758 	}
6759 	scsi_free_consistent_buf(bp);
6760 	return (lun_total);
6761 }
6762 
6763 static void
6764 aac_devcfg(struct aac_softstate *softs, int tgt, int en)
6765 {
6766 	struct aac_device *dvp;
6767 
6768 	mutex_enter(&softs->io_lock);
6769 	dvp = AAC_DEV(softs, tgt);
6770 	if (en)
6771 		dvp->flags |= AAC_DFLAG_CONFIGURING;
6772 	else
6773 		dvp->flags &= ~AAC_DFLAG_CONFIGURING;
6774 	mutex_exit(&softs->io_lock);
6775 }
6776 
6777 static int
6778 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op,
6779     void *arg, dev_info_t **childp)
6780 {
6781 	struct aac_softstate *softs;
6782 	int circ = 0;
6783 	int rval;
6784 
6785 	if ((softs = ddi_get_soft_state(aac_softstatep,
6786 	    ddi_get_instance(parent))) == NULL)
6787 		return (NDI_FAILURE);
6788 
6789 	/* Commands for bus config should be blocked as the bus is quiesced */
6790 	mutex_enter(&softs->io_lock);
6791 	if (softs->state & AAC_STATE_QUIESCED) {
6792 		AACDB_PRINT(softs, CE_NOTE,
6793 		    "bus_config abroted because bus is quiesced");
6794 		mutex_exit(&softs->io_lock);
6795 		return (NDI_FAILURE);
6796 	}
6797 	mutex_exit(&softs->io_lock);
6798 
6799 	DBCALLED(softs, 1);
6800 
6801 	/* Hold the nexus across the bus_config */
6802 	ndi_devi_enter(parent, &circ);
6803 	switch (op) {
6804 	case BUS_CONFIG_ONE: {
6805 		int tgt, lun;
6806 
6807 		if (aac_parse_devname(arg, &tgt, &lun) != AACOK) {
6808 			rval = NDI_FAILURE;
6809 			break;
6810 		}
6811 
6812 		AAC_DEVCFG_BEGIN(softs, tgt);
6813 		rval = aac_config_lun(softs, tgt, lun, childp);
6814 		AAC_DEVCFG_END(softs, tgt);
6815 		break;
6816 	}
6817 
6818 	case BUS_CONFIG_DRIVER:
6819 	case BUS_CONFIG_ALL: {
6820 		uint32_t bus, tgt;
6821 		int index, total;
6822 
6823 		for (tgt = 0; tgt < AAC_MAX_LD; tgt++) {
6824 			AAC_DEVCFG_BEGIN(softs, tgt);
6825 			(void) aac_config_lun(softs, tgt, 0, NULL);
6826 			AAC_DEVCFG_END(softs, tgt);
6827 		}
6828 
6829 		/* Config the non-DASD devices connected to the card */
6830 		total = 0;
6831 		index = AAC_MAX_LD;
6832 		for (bus = 0; bus < softs->bus_max; bus++) {
6833 			AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus);
6834 			for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) {
6835 				AAC_DEVCFG_BEGIN(softs, index);
6836 				if (aac_config_tgt(softs, index))
6837 					total++;
6838 				AAC_DEVCFG_END(softs, index);
6839 			}
6840 		}
6841 		AACDB_PRINT(softs, CE_CONT,
6842 		    "?Total %d phys. device(s) found", total);
6843 		rval = NDI_SUCCESS;
6844 		break;
6845 	}
6846 	}
6847 
6848 	if (rval == NDI_SUCCESS)
6849 		rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
6850 	ndi_devi_exit(parent, circ);
6851 	return (rval);
6852 }
6853 
6854 static void
6855 aac_handle_dr(struct aac_drinfo *drp)
6856 {
6857 	struct aac_softstate *softs = drp->softs;
6858 	struct aac_device *dvp;
6859 	dev_info_t *dip;
6860 	int valid;
6861 	int circ1 = 0;
6862 
6863 	DBCALLED(softs, 1);
6864 
6865 	/* Hold the nexus across the bus_config */
6866 	mutex_enter(&softs->io_lock);
6867 	dvp = AAC_DEV(softs, drp->tgt);
6868 	valid = AAC_DEV_IS_VALID(dvp);
6869 	dip = dvp->dip;
6870 	mutex_exit(&softs->io_lock);
6871 
6872 	switch (drp->event) {
6873 	case AAC_EVT_ONLINE:
6874 	case AAC_EVT_OFFLINE:
6875 		/* Device onlined */
6876 		if (dip == NULL && valid) {
6877 			ndi_devi_enter(softs->devinfo_p, &circ1);
6878 			(void) aac_config_lun(softs, drp->tgt, 0, NULL);
6879 			AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined",
6880 			    softs->instance, drp->tgt, drp->lun);
6881 			ndi_devi_exit(softs->devinfo_p, circ1);
6882 		}
6883 		/* Device offlined */
6884 		if (dip && !valid) {
6885 			mutex_enter(&softs->io_lock);
6886 			(void) aac_do_reset(softs);
6887 			mutex_exit(&softs->io_lock);
6888 
6889 			(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
6890 			AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined",
6891 			    softs->instance, drp->tgt, drp->lun);
6892 		}
6893 		break;
6894 	}
6895 	kmem_free(drp, sizeof (struct aac_drinfo));
6896 }
6897 
6898 static int
6899 aac_dr_event(struct aac_softstate *softs, int tgt, int lun, int event)
6900 {
6901 	struct aac_drinfo *drp;
6902 
6903 	DBCALLED(softs, 1);
6904 
6905 	if (softs->taskq == NULL ||
6906 	    (drp = kmem_zalloc(sizeof (struct aac_drinfo), KM_NOSLEEP)) == NULL)
6907 		return (AACERR);
6908 
6909 	drp->softs = softs;
6910 	drp->tgt = tgt;
6911 	drp->lun = lun;
6912 	drp->event = event;
6913 	if ((ddi_taskq_dispatch(softs->taskq, (void (*)(void *))aac_handle_dr,
6914 	    drp, DDI_NOSLEEP)) != DDI_SUCCESS) {
6915 		AACDB_PRINT(softs, CE_WARN, "DR task start failed");
6916 		kmem_free(drp, sizeof (struct aac_drinfo));
6917 		return (AACERR);
6918 	}
6919 	return (AACOK);
6920 }
6921 
6922 #ifdef DEBUG
6923 
6924 /* -------------------------debug aid functions-------------------------- */
6925 
6926 #define	AAC_FIB_CMD_KEY_STRINGS \
6927 	TestCommandResponse, "TestCommandResponse", \
6928 	TestAdapterCommand, "TestAdapterCommand", \
6929 	LastTestCommand, "LastTestCommand", \
6930 	ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \
6931 	ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \
6932 	ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \
6933 	ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \
6934 	ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \
6935 	ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \
6936 	ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \
6937 	ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \
6938 	InterfaceShutdown, "InterfaceShutdown", \
6939 	DmaCommandFib, "DmaCommandFib", \
6940 	StartProfile, "StartProfile", \
6941 	TermProfile, "TermProfile", \
6942 	SpeedTest, "SpeedTest", \
6943 	TakeABreakPt, "TakeABreakPt", \
6944 	RequestPerfData, "RequestPerfData", \
6945 	SetInterruptDefTimer, "SetInterruptDefTimer", \
6946 	SetInterruptDefCount, "SetInterruptDefCount", \
6947 	GetInterruptDefStatus, "GetInterruptDefStatus", \
6948 	LastCommCommand, "LastCommCommand", \
6949 	NuFileSystem, "NuFileSystem", \
6950 	UFS, "UFS", \
6951 	HostFileSystem, "HostFileSystem", \
6952 	LastFileSystemCommand, "LastFileSystemCommand", \
6953 	ContainerCommand, "ContainerCommand", \
6954 	ContainerCommand64, "ContainerCommand64", \
6955 	ClusterCommand, "ClusterCommand", \
6956 	ScsiPortCommand, "ScsiPortCommand", \
6957 	ScsiPortCommandU64, "ScsiPortCommandU64", \
6958 	AifRequest, "AifRequest", \
6959 	CheckRevision, "CheckRevision", \
6960 	FsaHostShutdown, "FsaHostShutdown", \
6961 	RequestAdapterInfo, "RequestAdapterInfo", \
6962 	IsAdapterPaused, "IsAdapterPaused", \
6963 	SendHostTime, "SendHostTime", \
6964 	LastMiscCommand, "LastMiscCommand"
6965 
6966 #define	AAC_CTVM_SUBCMD_KEY_STRINGS \
6967 	VM_Null, "VM_Null", \
6968 	VM_NameServe, "VM_NameServe", \
6969 	VM_ContainerConfig, "VM_ContainerConfig", \
6970 	VM_Ioctl, "VM_Ioctl", \
6971 	VM_FilesystemIoctl, "VM_FilesystemIoctl", \
6972 	VM_CloseAll, "VM_CloseAll", \
6973 	VM_CtBlockRead, "VM_CtBlockRead", \
6974 	VM_CtBlockWrite, "VM_CtBlockWrite", \
6975 	VM_SliceBlockRead, "VM_SliceBlockRead", \
6976 	VM_SliceBlockWrite, "VM_SliceBlockWrite", \
6977 	VM_DriveBlockRead, "VM_DriveBlockRead", \
6978 	VM_DriveBlockWrite, "VM_DriveBlockWrite", \
6979 	VM_EnclosureMgt, "VM_EnclosureMgt", \
6980 	VM_Unused, "VM_Unused", \
6981 	VM_CtBlockVerify, "VM_CtBlockVerify", \
6982 	VM_CtPerf, "VM_CtPerf", \
6983 	VM_CtBlockRead64, "VM_CtBlockRead64", \
6984 	VM_CtBlockWrite64, "VM_CtBlockWrite64", \
6985 	VM_CtBlockVerify64, "VM_CtBlockVerify64", \
6986 	VM_CtHostRead64, "VM_CtHostRead64", \
6987 	VM_CtHostWrite64, "VM_CtHostWrite64", \
6988 	VM_NameServe64, "VM_NameServe64"
6989 
6990 #define	AAC_CT_SUBCMD_KEY_STRINGS \
6991 	CT_Null, "CT_Null", \
6992 	CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \
6993 	CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \
6994 	CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \
6995 	CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \
6996 	CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \
6997 	CT_WRITE_MBR, "CT_WRITE_MBR", \
6998 	CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \
6999 	CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \
7000 	CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \
7001 	CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \
7002 	CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \
7003 	CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \
7004 	CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \
7005 	CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \
7006 	CT_READ_MBR, "CT_READ_MBR", \
7007 	CT_READ_PARTITION, "CT_READ_PARTITION", \
7008 	CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \
7009 	CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \
7010 	CT_SLICE_SIZE, "CT_SLICE_SIZE", \
7011 	CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \
7012 	CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \
7013 	CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \
7014 	CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \
7015 	CT_UNMIRROR, "CT_UNMIRROR", \
7016 	CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \
7017 	CT_GEN_MIRROR, "CT_GEN_MIRROR", \
7018 	CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \
7019 	CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \
7020 	CT_MOVE2, "CT_MOVE2", \
7021 	CT_SPLIT, "CT_SPLIT", \
7022 	CT_SPLIT2, "CT_SPLIT2", \
7023 	CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \
7024 	CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \
7025 	CT_RECONFIG, "CT_RECONFIG", \
7026 	CT_BREAK2, "CT_BREAK2", \
7027 	CT_BREAK, "CT_BREAK", \
7028 	CT_MERGE2, "CT_MERGE2", \
7029 	CT_MERGE, "CT_MERGE", \
7030 	CT_FORCE_ERROR, "CT_FORCE_ERROR", \
7031 	CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \
7032 	CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \
7033 	CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \
7034 	CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \
7035 	CT_VOLUME_ADD, "CT_VOLUME_ADD", \
7036 	CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \
7037 	CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \
7038 	CT_COPY_STATUS, "CT_COPY_STATUS", \
7039 	CT_COPY, "CT_COPY", \
7040 	CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \
7041 	CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \
7042 	CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \
7043 	CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \
7044 	CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \
7045 	CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \
7046 	CT_SET, "CT_SET", \
7047 	CT_GET, "CT_GET", \
7048 	CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \
7049 	CT_GET_DELAY, "CT_GET_DELAY", \
7050 	CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \
7051 	CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \
7052 	CT_SCRUB, "CT_SCRUB", \
7053 	CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \
7054 	CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \
7055 	CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \
7056 	CT_PAUSE_IO, "CT_PAUSE_IO", \
7057 	CT_RELEASE_IO, "CT_RELEASE_IO", \
7058 	CT_SCRUB2, "CT_SCRUB2", \
7059 	CT_MCHECK, "CT_MCHECK", \
7060 	CT_CORRUPT, "CT_CORRUPT", \
7061 	CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \
7062 	CT_PROMOTE, "CT_PROMOTE", \
7063 	CT_SET_DEAD, "CT_SET_DEAD", \
7064 	CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \
7065 	CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \
7066 	CT_GET_PARAM, "CT_GET_PARAM", \
7067 	CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \
7068 	CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \
7069 	CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \
7070 	CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \
7071 	CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \
7072 	CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \
7073 	CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \
7074 	CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \
7075 	CT_STOP_DATA, "CT_STOP_DATA", \
7076 	CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \
7077 	CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \
7078 	CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \
7079 	CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \
7080 	CT_GET_TIME, "CT_GET_TIME", \
7081 	CT_READ_DATA, "CT_READ_DATA", \
7082 	CT_CTR, "CT_CTR", \
7083 	CT_CTL, "CT_CTL", \
7084 	CT_DRAINIO, "CT_DRAINIO", \
7085 	CT_RELEASEIO, "CT_RELEASEIO", \
7086 	CT_GET_NVRAM, "CT_GET_NVRAM", \
7087 	CT_GET_MEMORY, "CT_GET_MEMORY", \
7088 	CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \
7089 	CT_ADD_LEVEL, "CT_ADD_LEVEL", \
7090 	CT_NV_ZERO, "CT_NV_ZERO", \
7091 	CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \
7092 	CT_THROTTLE_ON, "CT_THROTTLE_ON", \
7093 	CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \
7094 	CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \
7095 	CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \
7096 	CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \
7097 	CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \
7098 	CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \
7099 	CT_MONITOR, "CT_MONITOR", \
7100 	CT_GEN_MORPH, "CT_GEN_MORPH", \
7101 	CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \
7102 	CT_CACHE_SET, "CT_CACHE_SET", \
7103 	CT_CACHE_STAT, "CT_CACHE_STAT", \
7104 	CT_TRACE_START, "CT_TRACE_START", \
7105 	CT_TRACE_STOP, "CT_TRACE_STOP", \
7106 	CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \
7107 	CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \
7108 	CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \
7109 	CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \
7110 	CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \
7111 	CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \
7112 	CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \
7113 	CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \
7114 	CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \
7115 	CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \
7116 	CT_STOP_DUMPS, "CT_STOP_DUMPS", \
7117 	CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \
7118 	CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \
7119 	CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \
7120 	CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \
7121 	CT_READ_NAME, "CT_READ_NAME", \
7122 	CT_WRITE_NAME, "CT_WRITE_NAME", \
7123 	CT_TOSS_CACHE, "CT_TOSS_CACHE", \
7124 	CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \
7125 	CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \
7126 	CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \
7127 	CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \
7128 	CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \
7129 	CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \
7130 	CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \
7131 	CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \
7132 	CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \
7133 	CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \
7134 	CT_FLUSH, "CT_FLUSH", \
7135 	CT_REBUILD, "CT_REBUILD", \
7136 	CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \
7137 	CT_RESTART, "CT_RESTART", \
7138 	CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \
7139 	CT_TRACE_FLAG, "CT_TRACE_FLAG", \
7140 	CT_RESTART_MORPH, "CT_RESTART_MORPH", \
7141 	CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \
7142 	CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \
7143 	CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \
7144 	CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \
7145 	CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \
7146 	CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \
7147 	CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \
7148 	CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \
7149 	CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \
7150 	CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \
7151 	CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \
7152 	CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \
7153 	CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \
7154 	CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \
7155 	CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \
7156 	CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \
7157 	CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \
7158 	CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \
7159 	CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \
7160 	CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \
7161 	CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \
7162 	CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \
7163 	CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \
7164 	CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \
7165 	CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \
7166 	CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \
7167 	CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \
7168 	CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \
7169 	CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \
7170 	CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \
7171 	CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \
7172 	CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \
7173 	CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \
7174 	CT_IS_CONTAINER_MEATADATA_STANDARD, \
7175 	    "CT_IS_CONTAINER_MEATADATA_STANDARD", \
7176 	CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \
7177 	CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \
7178 	CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \
7179 	CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \
7180 	CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \
7181 	CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \
7182 	CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \
7183 	CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \
7184 	CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \
7185 	CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \
7186 	CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \
7187 	CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \
7188 	CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \
7189 	CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \
7190 	CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \
7191 	CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \
7192 	CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \
7193 	CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \
7194 	CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE"
7195 
7196 #define	AAC_CL_SUBCMD_KEY_STRINGS \
7197 	CL_NULL, "CL_NULL", \
7198 	DS_INIT, "DS_INIT", \
7199 	DS_RESCAN, "DS_RESCAN", \
7200 	DS_CREATE, "DS_CREATE", \
7201 	DS_DELETE, "DS_DELETE", \
7202 	DS_ADD_DISK, "DS_ADD_DISK", \
7203 	DS_REMOVE_DISK, "DS_REMOVE_DISK", \
7204 	DS_MOVE_DISK, "DS_MOVE_DISK", \
7205 	DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \
7206 	DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \
7207 	DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \
7208 	DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \
7209 	DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \
7210 	DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \
7211 	DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \
7212 	DS_GET_DRIVES, "DS_GET_DRIVES", \
7213 	DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \
7214 	DS_ONLINE, "DS_ONLINE", \
7215 	DS_OFFLINE, "DS_OFFLINE", \
7216 	DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \
7217 	DS_FSAPRINT, "DS_FSAPRINT", \
7218 	CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \
7219 	CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \
7220 	CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \
7221 	CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \
7222 	CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \
7223 	CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \
7224 	CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \
7225 	CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \
7226 	CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \
7227 	CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \
7228 	CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \
7229 	CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \
7230 	CC_GET_BUSINFO, "CC_GET_BUSINFO", \
7231 	CC_GET_PORTINFO, "CC_GET_PORTINFO", \
7232 	CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \
7233 	CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \
7234 	CQ_QUORUM_OP, "CQ_QUORUM_OP"
7235 
7236 #define	AAC_AIF_SUBCMD_KEY_STRINGS \
7237 	AifCmdEventNotify, "AifCmdEventNotify", \
7238 	AifCmdJobProgress, "AifCmdJobProgress", \
7239 	AifCmdAPIReport, "AifCmdAPIReport", \
7240 	AifCmdDriverNotify, "AifCmdDriverNotify", \
7241 	AifReqJobList, "AifReqJobList", \
7242 	AifReqJobsForCtr, "AifReqJobsForCtr", \
7243 	AifReqJobsForScsi, "AifReqJobsForScsi", \
7244 	AifReqJobReport, "AifReqJobReport", \
7245 	AifReqTerminateJob, "AifReqTerminateJob", \
7246 	AifReqSuspendJob, "AifReqSuspendJob", \
7247 	AifReqResumeJob, "AifReqResumeJob", \
7248 	AifReqSendAPIReport, "AifReqSendAPIReport", \
7249 	AifReqAPIJobStart, "AifReqAPIJobStart", \
7250 	AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \
7251 	AifReqAPIJobFinish, "AifReqAPIJobFinish"
7252 
7253 #define	AAC_IOCTL_SUBCMD_KEY_STRINGS \
7254 	Reserved_IOCTL, "Reserved_IOCTL", \
7255 	GetDeviceHandle, "GetDeviceHandle", \
7256 	BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \
7257 	DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \
7258 	RescanBus, "RescanBus", \
7259 	GetDeviceProbeInfo, "GetDeviceProbeInfo", \
7260 	GetDeviceCapacity, "GetDeviceCapacity", \
7261 	GetContainerProbeInfo, "GetContainerProbeInfo", \
7262 	GetRequestedMemorySize, "GetRequestedMemorySize", \
7263 	GetBusInfo, "GetBusInfo", \
7264 	GetVendorSpecific, "GetVendorSpecific", \
7265 	EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \
7266 	EnhancedGetBusInfo, "EnhancedGetBusInfo", \
7267 	SetupExtendedCounters, "SetupExtendedCounters", \
7268 	GetPerformanceCounters, "GetPerformanceCounters", \
7269 	ResetPerformanceCounters, "ResetPerformanceCounters", \
7270 	ReadModePage, "ReadModePage", \
7271 	WriteModePage, "WriteModePage", \
7272 	ReadDriveParameter, "ReadDriveParameter", \
7273 	WriteDriveParameter, "WriteDriveParameter", \
7274 	ResetAdapter, "ResetAdapter", \
7275 	ResetBus, "ResetBus", \
7276 	ResetBusDevice, "ResetBusDevice", \
7277 	ExecuteSrb, "ExecuteSrb", \
7278 	Create_IO_Task, "Create_IO_Task", \
7279 	Delete_IO_Task, "Delete_IO_Task", \
7280 	Get_IO_Task_Info, "Get_IO_Task_Info", \
7281 	Check_Task_Progress, "Check_Task_Progress", \
7282 	InjectError, "InjectError", \
7283 	GetDeviceDefectCounts, "GetDeviceDefectCounts", \
7284 	GetDeviceDefectInfo, "GetDeviceDefectInfo", \
7285 	GetDeviceStatus, "GetDeviceStatus", \
7286 	ClearDeviceStatus, "ClearDeviceStatus", \
7287 	DiskSpinControl, "DiskSpinControl", \
7288 	DiskSmartControl, "DiskSmartControl", \
7289 	WriteSame, "WriteSame", \
7290 	ReadWriteLong, "ReadWriteLong", \
7291 	FormatUnit, "FormatUnit", \
7292 	TargetDeviceControl, "TargetDeviceControl", \
7293 	TargetChannelControl, "TargetChannelControl", \
7294 	FlashNewCode, "FlashNewCode", \
7295 	DiskCheck, "DiskCheck", \
7296 	RequestSense, "RequestSense", \
7297 	DiskPERControl, "DiskPERControl", \
7298 	Read10, "Read10", \
7299 	Write10, "Write10"
7300 
7301 #define	AAC_AIFEN_KEY_STRINGS \
7302 	AifEnGeneric, "Generic", \
7303 	AifEnTaskComplete, "TaskComplete", \
7304 	AifEnConfigChange, "Config change", \
7305 	AifEnContainerChange, "Container change", \
7306 	AifEnDeviceFailure, "device failed", \
7307 	AifEnMirrorFailover, "Mirror failover", \
7308 	AifEnContainerEvent, "container event", \
7309 	AifEnFileSystemChange, "File system changed", \
7310 	AifEnConfigPause, "Container pause event", \
7311 	AifEnConfigResume, "Container resume event", \
7312 	AifEnFailoverChange, "Failover space assignment changed", \
7313 	AifEnRAID5RebuildDone, "RAID5 rebuild finished", \
7314 	AifEnEnclosureManagement, "Enclosure management event", \
7315 	AifEnBatteryEvent, "battery event", \
7316 	AifEnAddContainer, "Add container", \
7317 	AifEnDeleteContainer, "Delete container", \
7318 	AifEnSMARTEvent, "SMART Event", \
7319 	AifEnBatteryNeedsRecond, "battery needs reconditioning", \
7320 	AifEnClusterEvent, "cluster event", \
7321 	AifEnDiskSetEvent, "disk set event occured", \
7322 	AifDenMorphComplete, "morph operation completed", \
7323 	AifDenVolumeExtendComplete, "VolumeExtendComplete"
7324 
7325 struct aac_key_strings {
7326 	int key;
7327 	char *message;
7328 };
7329 
7330 extern struct scsi_key_strings scsi_cmds[];
7331 
7332 static struct aac_key_strings aac_fib_cmds[] = {
7333 	AAC_FIB_CMD_KEY_STRINGS,
7334 	-1,			NULL
7335 };
7336 
7337 static struct aac_key_strings aac_ctvm_subcmds[] = {
7338 	AAC_CTVM_SUBCMD_KEY_STRINGS,
7339 	-1,			NULL
7340 };
7341 
7342 static struct aac_key_strings aac_ct_subcmds[] = {
7343 	AAC_CT_SUBCMD_KEY_STRINGS,
7344 	-1,			NULL
7345 };
7346 
7347 static struct aac_key_strings aac_cl_subcmds[] = {
7348 	AAC_CL_SUBCMD_KEY_STRINGS,
7349 	-1,			NULL
7350 };
7351 
7352 static struct aac_key_strings aac_aif_subcmds[] = {
7353 	AAC_AIF_SUBCMD_KEY_STRINGS,
7354 	-1,			NULL
7355 };
7356 
7357 static struct aac_key_strings aac_ioctl_subcmds[] = {
7358 	AAC_IOCTL_SUBCMD_KEY_STRINGS,
7359 	-1,			NULL
7360 };
7361 
7362 static struct aac_key_strings aac_aifens[] = {
7363 	AAC_AIFEN_KEY_STRINGS,
7364 	-1,			NULL
7365 };
7366 
7367 /*
7368  * The following function comes from Adaptec:
7369  *
7370  * Get the firmware print buffer parameters from the firmware,
7371  * if the command was successful map in the address.
7372  */
7373 static int
7374 aac_get_fw_debug_buffer(struct aac_softstate *softs)
7375 {
7376 	if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP,
7377 	    0, 0, 0, 0, NULL) == AACOK) {
7378 		uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1);
7379 		uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2);
7380 		uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3);
7381 		uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4);
7382 
7383 		if (mondrv_buf_size) {
7384 			uint32_t offset = mondrv_buf_paddrl - \
7385 			    softs->pci_mem_base_paddr;
7386 
7387 			/*
7388 			 * See if the address is already mapped in, and
7389 			 * if so set it up from the base address
7390 			 */
7391 			if ((mondrv_buf_paddrh == 0) &&
7392 			    (offset + mondrv_buf_size < softs->map_size)) {
7393 				mutex_enter(&aac_prt_mutex);
7394 				softs->debug_buf_offset = offset;
7395 				softs->debug_header_size = mondrv_hdr_size;
7396 				softs->debug_buf_size = mondrv_buf_size;
7397 				softs->debug_fw_flags = 0;
7398 				softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
7399 				mutex_exit(&aac_prt_mutex);
7400 
7401 				return (AACOK);
7402 			}
7403 		}
7404 	}
7405 	return (AACERR);
7406 }
7407 
7408 int
7409 aac_dbflag_on(struct aac_softstate *softs, int flag)
7410 {
7411 	int debug_flags = softs ? softs->debug_flags : aac_debug_flags;
7412 
7413 	return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \
7414 	    AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag));
7415 }
7416 
7417 static void
7418 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader)
7419 {
7420 	if (noheader) {
7421 		if (sl) {
7422 			aac_fmt[0] = sl;
7423 			cmn_err(lev, aac_fmt, aac_prt_buf);
7424 		} else {
7425 			cmn_err(lev, &aac_fmt[1], aac_prt_buf);
7426 		}
7427 	} else {
7428 		if (sl) {
7429 			aac_fmt_header[0] = sl;
7430 			cmn_err(lev, aac_fmt_header,
7431 			    softs->vendor_name, softs->instance,
7432 			    aac_prt_buf);
7433 		} else {
7434 			cmn_err(lev, &aac_fmt_header[1],
7435 			    softs->vendor_name, softs->instance,
7436 			    aac_prt_buf);
7437 		}
7438 	}
7439 }
7440 
7441 /*
7442  * The following function comes from Adaptec:
7443  *
7444  * Format and print out the data passed in to UART or console
7445  * as specified by debug flags.
7446  */
7447 void
7448 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...)
7449 {
7450 	va_list args;
7451 	char sl; /* system log character */
7452 
7453 	mutex_enter(&aac_prt_mutex);
7454 	/* Set up parameters and call sprintf function to format the data */
7455 	if (strchr("^!?", fmt[0]) == NULL) {
7456 		sl = 0;
7457 	} else {
7458 		sl = fmt[0];
7459 		fmt++;
7460 	}
7461 	va_start(args, fmt);
7462 	(void) vsprintf(aac_prt_buf, fmt, args);
7463 	va_end(args);
7464 
7465 	/* Make sure the softs structure has been passed in for this section */
7466 	if (softs) {
7467 		if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) &&
7468 		    /* If we are set up for a Firmware print */
7469 		    (softs->debug_buf_size)) {
7470 			uint32_t count, i;
7471 
7472 			/* Make sure the string size is within boundaries */
7473 			count = strlen(aac_prt_buf);
7474 			if (count > softs->debug_buf_size)
7475 				count = (uint16_t)softs->debug_buf_size;
7476 
7477 			/*
7478 			 * Wait for no more than AAC_PRINT_TIMEOUT for the
7479 			 * previous message length to clear (the handshake).
7480 			 */
7481 			for (i = 0; i < AAC_PRINT_TIMEOUT; i++) {
7482 				if (!PCI_MEM_GET32(softs,
7483 				    softs->debug_buf_offset + \
7484 				    AAC_FW_DBG_STRLEN_OFFSET))
7485 					break;
7486 
7487 				drv_usecwait(1000);
7488 			}
7489 
7490 			/*
7491 			 * If the length is clear, copy over the message, the
7492 			 * flags, and the length. Make sure the length is the
7493 			 * last because that is the signal for the Firmware to
7494 			 * pick it up.
7495 			 */
7496 			if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \
7497 			    AAC_FW_DBG_STRLEN_OFFSET)) {
7498 				PCI_MEM_REP_PUT8(softs,
7499 				    softs->debug_buf_offset + \
7500 				    softs->debug_header_size,
7501 				    aac_prt_buf, count);
7502 				PCI_MEM_PUT32(softs,
7503 				    softs->debug_buf_offset + \
7504 				    AAC_FW_DBG_FLAGS_OFFSET,
7505 				    softs->debug_fw_flags);
7506 				PCI_MEM_PUT32(softs,
7507 				    softs->debug_buf_offset + \
7508 				    AAC_FW_DBG_STRLEN_OFFSET, count);
7509 			} else {
7510 				cmn_err(CE_WARN, "UART output fail");
7511 				softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
7512 			}
7513 		}
7514 
7515 		/*
7516 		 * If the Kernel Debug Print flag is set, send it off
7517 		 * to the Kernel Debugger
7518 		 */
7519 		if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT)
7520 			aac_cmn_err(softs, lev, sl,
7521 			    (softs->debug_flags & AACDB_FLAGS_NO_HEADERS));
7522 	} else {
7523 		/* Driver not initialized yet, no firmware or header output */
7524 		if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT)
7525 			aac_cmn_err(softs, lev, sl, 1);
7526 	}
7527 	mutex_exit(&aac_prt_mutex);
7528 }
7529 
7530 /*
7531  * Translate command number to description string
7532  */
7533 static char *
7534 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist)
7535 {
7536 	int i;
7537 
7538 	for (i = 0; cmdlist[i].key != -1; i++) {
7539 		if (cmd == cmdlist[i].key)
7540 			return (cmdlist[i].message);
7541 	}
7542 	return (NULL);
7543 }
7544 
7545 static void
7546 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
7547 {
7548 	struct scsi_pkt *pkt = acp->pkt;
7549 	struct scsi_address *ap = &pkt->pkt_address;
7550 	int is_pd = 0;
7551 	int ctl = ddi_get_instance(softs->devinfo_p);
7552 	int tgt = ap->a_target;
7553 	int lun = ap->a_lun;
7554 	union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp;
7555 	uchar_t cmd = cdbp->scc_cmd;
7556 	char *desc;
7557 
7558 	if (tgt >= AAC_MAX_LD) {
7559 		is_pd = 1;
7560 		ctl = ((struct aac_nondasd *)acp->dvp)->bus;
7561 		tgt = ((struct aac_nondasd *)acp->dvp)->tid;
7562 		lun = 0;
7563 	}
7564 
7565 	if ((desc = aac_cmd_name(cmd,
7566 	    (struct aac_key_strings *)scsi_cmds)) == NULL) {
7567 		aac_printf(softs, CE_NOTE,
7568 		    "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s",
7569 		    cmd, ctl, tgt, lun, is_pd ? "(pd)" : "");
7570 		return;
7571 	}
7572 
7573 	switch (cmd) {
7574 	case SCMD_READ:
7575 	case SCMD_WRITE:
7576 		aac_printf(softs, CE_NOTE,
7577 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7578 		    desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp),
7579 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7580 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7581 		break;
7582 	case SCMD_READ_G1:
7583 	case SCMD_WRITE_G1:
7584 		aac_printf(softs, CE_NOTE,
7585 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7586 		    desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp),
7587 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7588 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7589 		break;
7590 	case SCMD_READ_G4:
7591 	case SCMD_WRITE_G4:
7592 		aac_printf(softs, CE_NOTE,
7593 		    "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s",
7594 		    desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp),
7595 		    GETG4COUNT(cdbp),
7596 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7597 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7598 		break;
7599 	case SCMD_READ_G5:
7600 	case SCMD_WRITE_G5:
7601 		aac_printf(softs, CE_NOTE,
7602 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7603 		    desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp),
7604 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7605 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7606 		break;
7607 	default:
7608 		aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s",
7609 		    desc, ctl, tgt, lun, is_pd ? "(pd)" : "");
7610 	}
7611 }
7612 
7613 void
7614 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp)
7615 {
7616 	struct aac_cmd *acp = slotp->acp;
7617 	struct aac_fib *fibp = slotp->fibp;
7618 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
7619 	uint16_t fib_size;
7620 	uint32_t fib_cmd, sub_cmd;
7621 	char *cmdstr, *subcmdstr;
7622 	char *caller;
7623 	int i;
7624 
7625 	if (acp) {
7626 		if (!(softs->debug_fib_flags & acp->fib_flags))
7627 			return;
7628 		if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD)
7629 			caller = "SCMD";
7630 		else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL)
7631 			caller = "IOCTL";
7632 		else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB)
7633 			caller = "SRB";
7634 		else
7635 			return;
7636 	} else {
7637 		if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC))
7638 			return;
7639 		caller = "SYNC";
7640 	}
7641 
7642 	fib_cmd = ddi_get16(acc, &fibp->Header.Command);
7643 	cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds);
7644 	sub_cmd = (uint32_t)-1;
7645 	subcmdstr = NULL;
7646 
7647 	/* Print FIB header */
7648 	if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) {
7649 		aac_printf(softs, CE_NOTE, "FIB> from %s", caller);
7650 		aac_printf(softs, CE_NOTE, "     XferState  %d",
7651 		    ddi_get32(acc, &fibp->Header.XferState));
7652 		aac_printf(softs, CE_NOTE, "     Command    %d",
7653 		    ddi_get16(acc, &fibp->Header.Command));
7654 		aac_printf(softs, CE_NOTE, "     StructType %d",
7655 		    ddi_get8(acc, &fibp->Header.StructType));
7656 		aac_printf(softs, CE_NOTE, "     Flags      0x%x",
7657 		    ddi_get8(acc, &fibp->Header.Flags));
7658 		aac_printf(softs, CE_NOTE, "     Size       %d",
7659 		    ddi_get16(acc, &fibp->Header.Size));
7660 		aac_printf(softs, CE_NOTE, "     SenderSize %d",
7661 		    ddi_get16(acc, &fibp->Header.SenderSize));
7662 		aac_printf(softs, CE_NOTE, "     SenderAddr 0x%x",
7663 		    ddi_get32(acc, &fibp->Header.SenderFibAddress));
7664 		aac_printf(softs, CE_NOTE, "     RcvrAddr   0x%x",
7665 		    ddi_get32(acc, &fibp->Header.ReceiverFibAddress));
7666 		aac_printf(softs, CE_NOTE, "     SenderData 0x%x",
7667 		    ddi_get32(acc, &fibp->Header.SenderData));
7668 	}
7669 
7670 	/* Print FIB data */
7671 	switch (fib_cmd) {
7672 	case ContainerCommand:
7673 		sub_cmd = ddi_get32(acc,
7674 		    (void *)&(((uint32_t *)(void *)&fibp->data[0])[0]));
7675 		subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds);
7676 		if (subcmdstr == NULL)
7677 			break;
7678 
7679 		switch (sub_cmd) {
7680 		case VM_ContainerConfig: {
7681 			struct aac_Container *pContainer =
7682 			    (struct aac_Container *)fibp->data;
7683 
7684 			fib_cmd = sub_cmd;
7685 			cmdstr = subcmdstr;
7686 			sub_cmd = (uint32_t)-1;
7687 			subcmdstr = NULL;
7688 
7689 			sub_cmd = ddi_get32(acc,
7690 			    &pContainer->CTCommand.command);
7691 			subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds);
7692 			if (subcmdstr == NULL)
7693 				break;
7694 			aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)",
7695 			    subcmdstr,
7696 			    ddi_get32(acc, &pContainer->CTCommand.param[0]),
7697 			    ddi_get32(acc, &pContainer->CTCommand.param[1]),
7698 			    ddi_get32(acc, &pContainer->CTCommand.param[2]));
7699 			return;
7700 		}
7701 
7702 		case VM_Ioctl:
7703 			fib_cmd = sub_cmd;
7704 			cmdstr = subcmdstr;
7705 			sub_cmd = (uint32_t)-1;
7706 			subcmdstr = NULL;
7707 
7708 			sub_cmd = ddi_get32(acc,
7709 			    (void *)&(((uint32_t *)(void *)&fibp->data[0])[4]));
7710 			subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds);
7711 			break;
7712 
7713 		case VM_CtBlockRead:
7714 		case VM_CtBlockWrite: {
7715 			struct aac_blockread *br =
7716 			    (struct aac_blockread *)fibp->data;
7717 			struct aac_sg_table *sg = &br->SgMap;
7718 			uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
7719 
7720 			aac_printf(softs, CE_NOTE,
7721 			    "FIB> %s Container %d  0x%x/%d", subcmdstr,
7722 			    ddi_get32(acc, &br->ContainerId),
7723 			    ddi_get32(acc, &br->BlockNumber),
7724 			    ddi_get32(acc, &br->ByteCount));
7725 			for (i = 0; i < sgcount; i++)
7726 				aac_printf(softs, CE_NOTE,
7727 				    "     %d: 0x%08x/%d", i,
7728 				    ddi_get32(acc, &sg->SgEntry[i].SgAddress),
7729 				    ddi_get32(acc, &sg->SgEntry[i]. \
7730 				    SgByteCount));
7731 			return;
7732 		}
7733 		}
7734 		break;
7735 
7736 	case ContainerCommand64: {
7737 		struct aac_blockread64 *br =
7738 		    (struct aac_blockread64 *)fibp->data;
7739 		struct aac_sg_table64 *sg = &br->SgMap64;
7740 		uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
7741 		uint64_t sgaddr;
7742 
7743 		sub_cmd = br->Command;
7744 		subcmdstr = NULL;
7745 		if (sub_cmd == VM_CtHostRead64)
7746 			subcmdstr = "VM_CtHostRead64";
7747 		else if (sub_cmd == VM_CtHostWrite64)
7748 			subcmdstr = "VM_CtHostWrite64";
7749 		else
7750 			break;
7751 
7752 		aac_printf(softs, CE_NOTE,
7753 		    "FIB> %s Container %d  0x%x/%d", subcmdstr,
7754 		    ddi_get16(acc, &br->ContainerId),
7755 		    ddi_get32(acc, &br->BlockNumber),
7756 		    ddi_get16(acc, &br->SectorCount));
7757 		for (i = 0; i < sgcount; i++) {
7758 			sgaddr = ddi_get64(acc,
7759 			    &sg->SgEntry64[i].SgAddress);
7760 			aac_printf(softs, CE_NOTE,
7761 			    "     %d: 0x%08x.%08x/%d", i,
7762 			    AAC_MS32(sgaddr), AAC_LS32(sgaddr),
7763 			    ddi_get32(acc, &sg->SgEntry64[i]. \
7764 			    SgByteCount));
7765 		}
7766 		return;
7767 	}
7768 
7769 	case RawIo: {
7770 		struct aac_raw_io *io = (struct aac_raw_io *)fibp->data;
7771 		struct aac_sg_tableraw *sg = &io->SgMapRaw;
7772 		uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
7773 		uint64_t sgaddr;
7774 
7775 		aac_printf(softs, CE_NOTE,
7776 		    "FIB> RawIo Container %d  0x%llx/%d 0x%x",
7777 		    ddi_get16(acc, &io->ContainerId),
7778 		    ddi_get64(acc, &io->BlockNumber),
7779 		    ddi_get32(acc, &io->ByteCount),
7780 		    ddi_get16(acc, &io->Flags));
7781 		for (i = 0; i < sgcount; i++) {
7782 			sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress);
7783 			aac_printf(softs, CE_NOTE, "     %d: 0x%08x.%08x/%d", i,
7784 			    AAC_MS32(sgaddr), AAC_LS32(sgaddr),
7785 			    ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount));
7786 		}
7787 		return;
7788 	}
7789 
7790 	case ClusterCommand:
7791 		sub_cmd = ddi_get32(acc,
7792 		    (void *)&(((uint32_t *)(void *)fibp->data)[0]));
7793 		subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds);
7794 		break;
7795 
7796 	case AifRequest:
7797 		sub_cmd = ddi_get32(acc,
7798 		    (void *)&(((uint32_t *)(void *)fibp->data)[0]));
7799 		subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds);
7800 		break;
7801 
7802 	default:
7803 		break;
7804 	}
7805 
7806 	fib_size = ddi_get16(acc, &(fibp->Header.Size));
7807 	if (subcmdstr)
7808 		aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
7809 		    subcmdstr, fib_size);
7810 	else if (cmdstr && sub_cmd == (uint32_t)-1)
7811 		aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
7812 		    cmdstr, fib_size);
7813 	else if (cmdstr)
7814 		aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d",
7815 		    cmdstr, sub_cmd, fib_size);
7816 	else
7817 		aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d",
7818 		    fib_cmd, fib_size);
7819 }
7820 
7821 static void
7822 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
7823 {
7824 	int aif_command;
7825 	uint32_t aif_seqnumber;
7826 	int aif_en_type;
7827 	char *str;
7828 
7829 	aif_command = LE_32(aif->command);
7830 	aif_seqnumber = LE_32(aif->seqNumber);
7831 	aif_en_type = LE_32(aif->data.EN.type);
7832 
7833 	switch (aif_command) {
7834 	case AifCmdEventNotify:
7835 		str = aac_cmd_name(aif_en_type, aac_aifens);
7836 		if (str)
7837 			aac_printf(softs, CE_NOTE, "AIF! %s", str);
7838 		else
7839 			aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)",
7840 			    aif_en_type);
7841 		break;
7842 
7843 	case AifCmdJobProgress:
7844 		switch (LE_32(aif->data.PR[0].status)) {
7845 		case AifJobStsSuccess:
7846 			str = "success"; break;
7847 		case AifJobStsFinished:
7848 			str = "finished"; break;
7849 		case AifJobStsAborted:
7850 			str = "aborted"; break;
7851 		case AifJobStsFailed:
7852 			str = "failed"; break;
7853 		case AifJobStsSuspended:
7854 			str = "suspended"; break;
7855 		case AifJobStsRunning:
7856 			str = "running"; break;
7857 		default:
7858 			str = "unknown"; break;
7859 		}
7860 		aac_printf(softs, CE_NOTE,
7861 		    "AIF! JobProgress (%d) - %s (%d, %d)",
7862 		    aif_seqnumber, str,
7863 		    LE_32(aif->data.PR[0].currentTick),
7864 		    LE_32(aif->data.PR[0].finalTick));
7865 		break;
7866 
7867 	case AifCmdAPIReport:
7868 		aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)",
7869 		    aif_seqnumber);
7870 		break;
7871 
7872 	case AifCmdDriverNotify:
7873 		aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)",
7874 		    aif_seqnumber);
7875 		break;
7876 
7877 	default:
7878 		aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)",
7879 		    aif_command, aif_seqnumber);
7880 		break;
7881 	}
7882 }
7883 
7884 #endif /* DEBUG */
7885