xref: /titanic_41/usr/src/uts/common/io/aac/aac.c (revision fc0105de770cee6a3fd91de949b9927fba6e894b)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright 2005-08 Adaptec, Inc.
8  * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner
9  * Copyright (c) 2000 Michael Smith
10  * Copyright (c) 2001 Scott Long
11  * Copyright (c) 2000 BSDi
12  * All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/modctl.h>
36 #include <sys/conf.h>
37 #include <sys/cmn_err.h>
38 #include <sys/ddi.h>
39 #include <sys/devops.h>
40 #include <sys/pci.h>
41 #include <sys/types.h>
42 #include <sys/ddidmareq.h>
43 #include <sys/scsi/scsi.h>
44 #include <sys/ksynch.h>
45 #include <sys/sunddi.h>
46 #include <sys/byteorder.h>
47 #include "aac_regs.h"
48 #include "aac.h"
49 
50 /*
51  * FMA header files
52  */
53 #include <sys/ddifm.h>
54 #include <sys/fm/protocol.h>
55 #include <sys/fm/util.h>
56 #include <sys/fm/io/ddi.h>
57 
58 /*
59  * For minor nodes created by the SCSA framework, minor numbers are
60  * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
61  * number less than 64.
62  *
63  * To support cfgadm, need to confirm the SCSA framework by creating
64  * devctl/scsi and driver specific minor nodes under SCSA format,
65  * and calling scsi_hba_xxx() functions aacordingly.
66  */
67 
68 #define	AAC_MINOR		32
69 #define	INST2AAC(x)		(((x) << INST_MINOR_SHIFT) | AAC_MINOR)
70 #define	AAC_SCSA_MINOR(x)	((x) & TRAN_MINOR_MASK)
71 #define	AAC_IS_SCSA_NODE(x)	((x) == DEVCTL_MINOR || (x) == SCSI_MINOR)
72 
73 #define	SD2TRAN(sd)		((sd)->sd_address.a_hba_tran)
74 #define	AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private)
75 #define	AAC_DIP2TRAN(dip)	((scsi_hba_tran_t *)ddi_get_driver_private(dip))
76 #define	AAC_DIP2SOFTS(dip)	(AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip)))
77 #define	SD2AAC(sd)		(AAC_TRAN2SOFTS(SD2TRAN(sd)))
78 #define	AAC_PD(t)		((t) - AAC_MAX_LD)
79 #define	AAC_DEV(softs, t)	(((t) < AAC_MAX_LD) ? \
80 				&(softs)->containers[(t)].dev : \
81 				((t) < AAC_MAX_DEV(softs)) ? \
82 				&(softs)->nondasds[AAC_PD(t)].dev : NULL)
83 #define	AAC_DEVCFG_BEGIN(softs, tgt) \
84 				aac_devcfg((softs), (tgt), 1)
85 #define	AAC_DEVCFG_END(softs, tgt) \
86 				aac_devcfg((softs), (tgt), 0)
87 #define	PKT2AC(pkt)		((struct aac_cmd *)(pkt)->pkt_ha_private)
88 #define	AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \
89 		if (!(cond)) { \
90 			int count = (timeout) * 10; \
91 			while (count) { \
92 				drv_usecwait(100); \
93 				if (cond) \
94 					break; \
95 				count--; \
96 			} \
97 			(timeout) = (count + 9) / 10; \
98 		} \
99 	}
100 
101 #define	AAC_SENSE_DATA_DESCR_LEN \
102 	(sizeof (struct scsi_descr_sense_hdr) + \
103 	sizeof (struct scsi_information_sense_descr))
104 #define	AAC_ARQ64_LENGTH \
105 	(sizeof (struct scsi_arq_status) + \
106 	AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH)
107 
108 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
109 #define	AAC_GETGXADDR(cmdlen, cdbp) \
110 	((cmdlen == 6) ? GETG0ADDR(cdbp) : \
111 	(cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \
112 	((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp))
113 
114 #define	AAC_CDB_INQUIRY_CMDDT	0x02
115 #define	AAC_CDB_INQUIRY_EVPD	0x01
116 #define	AAC_VPD_PAGE_CODE	1
117 #define	AAC_VPD_PAGE_LENGTH	3
118 #define	AAC_VPD_PAGE_DATA	4
119 #define	AAC_VPD_ID_CODESET	0
120 #define	AAC_VPD_ID_TYPE		1
121 #define	AAC_VPD_ID_LENGTH	3
122 #define	AAC_VPD_ID_DATA		4
123 
124 #define	AAC_SCSI_RPTLUNS_HEAD_SIZE			0x08
125 #define	AAC_SCSI_RPTLUNS_ADDR_SIZE			0x08
126 #define	AAC_SCSI_RPTLUNS_ADDR_MASK			0xC0
127 /* 00b - peripheral device addressing method */
128 #define	AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL		0x00
129 /* 01b - flat space addressing method */
130 #define	AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE		0x40
131 /* 10b - logical unit addressing method */
132 #define	AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT		0x80
133 
134 /* Return the size of FIB with data part type data_type */
135 #define	AAC_FIB_SIZEOF(data_type) \
136 	(sizeof (struct aac_fib_header) + sizeof (data_type))
137 /* Return the container size defined in mir */
138 #define	AAC_MIR_SIZE(softs, acc, mir) \
139 	(((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \
140 	(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \
141 	((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \
142 	(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity))
143 
144 /* The last entry of aac_cards[] is for unknown cards */
145 #define	AAC_UNKNOWN_CARD \
146 	(sizeof (aac_cards) / sizeof (struct aac_card_type) - 1)
147 #define	CARD_IS_UNKNOWN(i)	(i == AAC_UNKNOWN_CARD)
148 #define	BUF_IS_READ(bp)		((bp)->b_flags & B_READ)
149 #define	AAC_IS_Q_EMPTY(q)	((q)->q_head == NULL)
150 #define	AAC_CMDQ(acp)		(!((acp)->flags & AAC_CMD_SYNC))
151 
152 #define	PCI_MEM_GET32(softs, off) \
153 	ddi_get32((softs)->pci_mem_handle, \
154 	    (void *)((softs)->pci_mem_base_vaddr + (off)))
155 #define	PCI_MEM_PUT32(softs, off, val) \
156 	ddi_put32((softs)->pci_mem_handle, \
157 	    (void *)((softs)->pci_mem_base_vaddr + (off)), \
158 	    (uint32_t)(val))
159 #define	PCI_MEM_GET16(softs, off) \
160 	ddi_get16((softs)->pci_mem_handle, \
161 	(void *)((softs)->pci_mem_base_vaddr + (off)))
162 #define	PCI_MEM_PUT16(softs, off, val) \
163 	ddi_put16((softs)->pci_mem_handle, \
164 	(void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val))
165 /* Write host data at valp to device mem[off] repeatedly count times */
166 #define	PCI_MEM_REP_PUT8(softs, off, valp, count) \
167 	ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \
168 	    (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
169 	    count, DDI_DEV_AUTOINCR)
170 /* Read device data at mem[off] to host addr valp repeatedly count times */
171 #define	PCI_MEM_REP_GET8(softs, off, valp, count) \
172 	ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \
173 	    (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
174 	    count, DDI_DEV_AUTOINCR)
175 #define	AAC_GET_FIELD8(acc, d, s, field) \
176 	(d)->field = ddi_get8(acc, (uint8_t *)&(s)->field)
177 #define	AAC_GET_FIELD32(acc, d, s, field) \
178 	(d)->field = ddi_get32(acc, (uint32_t *)&(s)->field)
179 #define	AAC_GET_FIELD64(acc, d, s, field) \
180 	(d)->field = ddi_get64(acc, (uint64_t *)&(s)->field)
181 #define	AAC_REP_GET_FIELD8(acc, d, s, field, r) \
182 	ddi_rep_get8((acc), (uint8_t *)&(d)->field, \
183 	    (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
184 #define	AAC_REP_GET_FIELD32(acc, d, s, field, r) \
185 	ddi_rep_get32((acc), (uint32_t *)&(d)->field, \
186 	    (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
187 
188 #define	AAC_ENABLE_INTR(softs) { \
189 		if (softs->flags & AAC_FLAGS_NEW_COMM) \
190 			PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \
191 		else \
192 			PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \
193 	}
194 
195 #define	AAC_DISABLE_INTR(softs)		PCI_MEM_PUT32(softs, AAC_OIMR, ~0)
196 #define	AAC_STATUS_CLR(softs, mask)	PCI_MEM_PUT32(softs, AAC_ODBR, mask)
197 #define	AAC_STATUS_GET(softs)		PCI_MEM_GET32(softs, AAC_ODBR)
198 #define	AAC_NOTIFY(softs, val)		PCI_MEM_PUT32(softs, AAC_IDBR, val)
199 #define	AAC_OUTB_GET(softs)		PCI_MEM_GET32(softs, AAC_OQUE)
200 #define	AAC_OUTB_SET(softs, val)	PCI_MEM_PUT32(softs, AAC_OQUE, val)
201 #define	AAC_FWSTATUS_GET(softs)	\
202 	((softs)->aac_if.aif_get_fwstatus(softs))
203 #define	AAC_MAILBOX_GET(softs, mb) \
204 	((softs)->aac_if.aif_get_mailbox((softs), (mb)))
205 #define	AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \
206 	((softs)->aac_if.aif_set_mailbox((softs), (cmd), \
207 	    (arg0), (arg1), (arg2), (arg3)))
208 
209 #define	AAC_THROTTLE_DRAIN	-1
210 
211 #define	AAC_QUIESCE_TICK	1	/* 1 second */
212 #define	AAC_QUIESCE_TIMEOUT	180	/* 180 seconds */
213 #define	AAC_DEFAULT_TICK	10	/* 10 seconds */
214 #define	AAC_SYNC_TICK		(30*60)	/* 30 minutes */
215 
216 /* Poll time for aac_do_poll_io() */
217 #define	AAC_POLL_TIME		60	/* 60 seconds */
218 
219 /* IOP reset */
220 #define	AAC_IOP_RESET_SUCCEED		0	/* IOP reset succeed */
221 #define	AAC_IOP_RESET_FAILED		-1	/* IOP reset failed */
222 #define	AAC_IOP_RESET_ABNORMAL		-2	/* Reset operation abnormal */
223 
224 /*
225  * Hardware access functions
226  */
227 static int aac_rx_get_fwstatus(struct aac_softstate *);
228 static int aac_rx_get_mailbox(struct aac_softstate *, int);
229 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
230     uint32_t, uint32_t, uint32_t);
231 static int aac_rkt_get_fwstatus(struct aac_softstate *);
232 static int aac_rkt_get_mailbox(struct aac_softstate *, int);
233 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
234     uint32_t, uint32_t, uint32_t);
235 
236 /*
237  * SCSA function prototypes
238  */
239 static int aac_attach(dev_info_t *, ddi_attach_cmd_t);
240 static int aac_detach(dev_info_t *, ddi_detach_cmd_t);
241 static int aac_reset(dev_info_t *, ddi_reset_cmd_t);
242 static int aac_quiesce(dev_info_t *);
243 
244 /*
245  * Interrupt handler functions
246  */
247 static int aac_query_intrs(struct aac_softstate *, int);
248 static int aac_add_intrs(struct aac_softstate *);
249 static void aac_remove_intrs(struct aac_softstate *);
250 static uint_t aac_intr_old(caddr_t);
251 static uint_t aac_intr_new(caddr_t);
252 static uint_t aac_softintr(caddr_t);
253 
254 /*
255  * Internal functions in attach
256  */
257 static int aac_check_card_type(struct aac_softstate *);
258 static int aac_check_firmware(struct aac_softstate *);
259 static int aac_common_attach(struct aac_softstate *);
260 static void aac_common_detach(struct aac_softstate *);
261 static int aac_probe_containers(struct aac_softstate *);
262 static int aac_alloc_comm_space(struct aac_softstate *);
263 static int aac_setup_comm_space(struct aac_softstate *);
264 static void aac_free_comm_space(struct aac_softstate *);
265 static int aac_hba_setup(struct aac_softstate *);
266 
267 /*
268  * Sync FIB operation functions
269  */
270 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t,
271     uint32_t, uint32_t, uint32_t, uint32_t *);
272 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t);
273 
274 /*
275  * Command queue operation functions
276  */
277 static void aac_cmd_initq(struct aac_cmd_queue *);
278 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *);
279 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *);
280 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *);
281 
282 /*
283  * FIB queue operation functions
284  */
285 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t);
286 static int aac_fib_dequeue(struct aac_softstate *, int, int *);
287 
288 /*
289  * Slot operation functions
290  */
291 static int aac_create_slots(struct aac_softstate *);
292 static void aac_destroy_slots(struct aac_softstate *);
293 static void aac_alloc_fibs(struct aac_softstate *);
294 static void aac_destroy_fibs(struct aac_softstate *);
295 static struct aac_slot *aac_get_slot(struct aac_softstate *);
296 static void aac_release_slot(struct aac_softstate *, struct aac_slot *);
297 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *);
298 static void aac_free_fib(struct aac_slot *);
299 
300 /*
301  * Internal functions
302  */
303 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_slot *,
304     uint16_t, uint16_t);
305 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *);
306 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *);
307 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *);
308 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *);
309 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *);
310 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *);
311 static void aac_start_waiting_io(struct aac_softstate *);
312 static void aac_drain_comp_q(struct aac_softstate *);
313 int aac_do_io(struct aac_softstate *, struct aac_cmd *);
314 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *);
315 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *);
316 static int aac_send_command(struct aac_softstate *, struct aac_slot *);
317 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *);
318 static int aac_dma_sync_ac(struct aac_cmd *);
319 static int aac_shutdown(struct aac_softstate *);
320 static int aac_reset_adapter(struct aac_softstate *);
321 static int aac_do_quiesce(struct aac_softstate *softs);
322 static int aac_do_unquiesce(struct aac_softstate *softs);
323 static void aac_unhold_bus(struct aac_softstate *, int);
324 static void aac_set_throttle(struct aac_softstate *, struct aac_device *,
325     int, int);
326 
327 /*
328  * Adapter Initiated FIB handling function
329  */
330 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *);
331 
332 /*
333  * Timeout handling thread function
334  */
335 static void aac_daemon(void *);
336 
337 /*
338  * IOCTL interface related functions
339  */
340 static int aac_open(dev_t *, int, int, cred_t *);
341 static int aac_close(dev_t, int, int, cred_t *);
342 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
343 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int);
344 
345 /*
346  * FMA Prototypes
347  */
348 static void aac_fm_init(struct aac_softstate *);
349 static void aac_fm_fini(struct aac_softstate *);
350 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
351 int aac_check_acc_handle(ddi_acc_handle_t);
352 int aac_check_dma_handle(ddi_dma_handle_t);
353 void aac_fm_ereport(struct aac_softstate *, char *);
354 
355 /*
356  * Auto enumeration functions
357  */
358 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t);
359 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
360     void *, dev_info_t **);
361 static int aac_dr_event(struct aac_softstate *, int, int, int);
362 
363 #ifdef DEBUG
364 /*
365  * UART	debug output support
366  */
367 
368 #define	AAC_PRINT_BUFFER_SIZE		512
369 #define	AAC_PRINT_TIMEOUT		250	/* 1/4 sec. = 250 msec. */
370 
371 #define	AAC_FW_DBG_STRLEN_OFFSET	0x00
372 #define	AAC_FW_DBG_FLAGS_OFFSET		0x04
373 #define	AAC_FW_DBG_BLED_OFFSET		0x08
374 
375 static int aac_get_fw_debug_buffer(struct aac_softstate *);
376 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *);
377 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *);
378 
379 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
380 static char aac_fmt[] = " %s";
381 static char aac_fmt_header[] = " %s.%d: %s";
382 static kmutex_t aac_prt_mutex;
383 
384 /*
385  * Debug flags to be put into the softstate flags field
386  * when initialized
387  */
388 uint32_t aac_debug_flags =
389 /*    AACDB_FLAGS_KERNEL_PRINT | */
390 /*    AACDB_FLAGS_FW_PRINT |	*/
391 /*    AACDB_FLAGS_MISC |	*/
392 /*    AACDB_FLAGS_FUNC1 |	*/
393 /*    AACDB_FLAGS_FUNC2 |	*/
394 /*    AACDB_FLAGS_SCMD |	*/
395 /*    AACDB_FLAGS_AIF |		*/
396 /*    AACDB_FLAGS_FIB |		*/
397 /*    AACDB_FLAGS_IOCTL |	*/
398 0;
399 uint32_t aac_debug_fib_flags =
400 /*    AACDB_FLAGS_FIB_RW |	*/
401 /*    AACDB_FLAGS_FIB_IOCTL |	*/
402 /*    AACDB_FLAGS_FIB_SRB |	*/
403 /*    AACDB_FLAGS_FIB_SYNC |	*/
404 /*    AACDB_FLAGS_FIB_HEADER |	*/
405 /*    AACDB_FLAGS_FIB_TIMEOUT |	*/
406 0;
407 
408 #endif /* DEBUG */
409 
410 static struct cb_ops aac_cb_ops = {
411 	aac_open,	/* open */
412 	aac_close,	/* close */
413 	nodev,		/* strategy */
414 	nodev,		/* print */
415 	nodev,		/* dump */
416 	nodev,		/* read */
417 	nodev,		/* write */
418 	aac_ioctl,	/* ioctl */
419 	nodev,		/* devmap */
420 	nodev,		/* mmap */
421 	nodev,		/* segmap */
422 	nochpoll,	/* poll */
423 	ddi_prop_op,	/* cb_prop_op */
424 	NULL,		/* streamtab */
425 	D_64BIT | D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
426 	CB_REV,		/* cb_rev */
427 	nodev,		/* async I/O read entry point */
428 	nodev		/* async I/O write entry point */
429 };
430 
431 static struct dev_ops aac_dev_ops = {
432 	DEVO_REV,
433 	0,
434 	nodev,
435 	nulldev,
436 	nulldev,
437 	aac_attach,
438 	aac_detach,
439 	aac_reset,
440 	&aac_cb_ops,
441 	NULL,
442 	NULL,
443 	aac_quiesce,
444 };
445 
446 static struct modldrv aac_modldrv = {
447 	&mod_driverops,
448 	"AAC Driver " AAC_DRIVER_VERSION,
449 	&aac_dev_ops,
450 };
451 
452 static struct modlinkage aac_modlinkage = {
453 	MODREV_1,
454 	&aac_modldrv,
455 	NULL
456 };
457 
458 static struct aac_softstate  *aac_softstatep;
459 
460 /*
461  * Supported card list
462  * ordered in vendor id, subvendor id, subdevice id, and device id
463  */
464 static struct aac_card_type aac_cards[] = {
465 	{0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX,
466 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
467 	    "Dell", "PERC 3/Di"},
468 	{0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX,
469 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
470 	    "Dell", "PERC 3/Di"},
471 	{0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX,
472 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
473 	    "Dell", "PERC 3/Si"},
474 	{0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX,
475 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
476 	    "Dell", "PERC 3/Di"},
477 	{0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX,
478 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
479 	    "Dell", "PERC 3/Si"},
480 	{0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX,
481 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
482 	    "Dell", "PERC 3/Di"},
483 	{0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX,
484 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
485 	    "Dell", "PERC 3/Di"},
486 	{0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX,
487 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
488 	    "Dell", "PERC 3/Di"},
489 	{0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX,
490 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
491 	    "Dell", "PERC 3/Di"},
492 	{0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX,
493 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
494 	    "Dell", "PERC 3/Di"},
495 	{0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX,
496 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
497 	    "Dell", "PERC 320/DC"},
498 	{0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX,
499 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"},
500 
501 	{0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX,
502 	    0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"},
503 	{0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX,
504 	    0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"},
505 	{0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT,
506 	    0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"},
507 
508 	{0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX,
509 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
510 	{0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX,
511 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
512 
513 	{0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX,
514 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
515 	    "Adaptec", "2200S"},
516 	{0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX,
517 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
518 	    "Adaptec", "2120S"},
519 	{0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX,
520 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
521 	    "Adaptec", "2200S"},
522 	{0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX,
523 	    0, AAC_TYPE_SCSI, "Adaptec", "3230S"},
524 	{0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX,
525 	    0, AAC_TYPE_SCSI, "Adaptec", "3240S"},
526 	{0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX,
527 	    0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"},
528 	{0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX,
529 	    0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"},
530 	{0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT,
531 	    0, AAC_TYPE_SCSI, "Adaptec", "2230S"},
532 	{0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT,
533 	    0, AAC_TYPE_SCSI, "Adaptec", "2130S"},
534 	{0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX,
535 	    0, AAC_TYPE_SATA, "Adaptec", "2020SA"},
536 	{0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX,
537 	    0, AAC_TYPE_SATA, "Adaptec", "2025SA"},
538 	{0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX,
539 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"},
540 	{0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX,
541 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"},
542 	{0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX,
543 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"},
544 	{0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX,
545 	    0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"},
546 	{0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX,
547 	    0, AAC_TYPE_SCSI, "Adaptec", "2240S"},
548 	{0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX,
549 	    0, AAC_TYPE_SAS, "Adaptec", "4005SAS"},
550 	{0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX,
551 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"},
552 	{0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX,
553 	    0, AAC_TYPE_SAS, "Adaptec", "4800SAS"},
554 	{0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX,
555 	    0, AAC_TYPE_SAS, "Adaptec", "4805SAS"},
556 	{0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT,
557 	    0, AAC_TYPE_SATA, "Adaptec", "2820SA"},
558 	{0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT,
559 	    0, AAC_TYPE_SATA, "Adaptec", "2620SA"},
560 	{0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT,
561 	    0, AAC_TYPE_SATA, "Adaptec", "2420SA"},
562 	{0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT,
563 	    0, AAC_TYPE_SATA, "ICP", "9024RO"},
564 	{0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT,
565 	    0, AAC_TYPE_SATA, "ICP", "9014RO"},
566 	{0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT,
567 	    0, AAC_TYPE_SATA, "ICP", "9047MA"},
568 	{0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT,
569 	    0, AAC_TYPE_SATA, "ICP", "9087MA"},
570 	{0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX,
571 	    0, AAC_TYPE_SAS, "ICP", "9085LI"},
572 	{0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX,
573 	    0, AAC_TYPE_SAS, "ICP", "5085BR"},
574 	{0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT,
575 	    0, AAC_TYPE_SATA, "ICP", "9067MA"},
576 	{0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX,
577 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"},
578 	{0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX,
579 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"},
580 	{0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX,
581 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"},
582 	{0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX,
583 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"},
584 	{0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX,
585 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"},
586 	{0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX,
587 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"},
588 
589 	{0, 0, 0, 0, AAC_HWIF_UNKNOWN,
590 	    0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"},
591 };
592 
593 /*
594  * Hardware access functions for i960 based cards
595  */
596 static struct aac_interface aac_rx_interface = {
597 	aac_rx_get_fwstatus,
598 	aac_rx_get_mailbox,
599 	aac_rx_set_mailbox
600 };
601 
602 /*
603  * Hardware access functions for Rocket based cards
604  */
605 static struct aac_interface aac_rkt_interface = {
606 	aac_rkt_get_fwstatus,
607 	aac_rkt_get_mailbox,
608 	aac_rkt_set_mailbox
609 };
610 
611 ddi_device_acc_attr_t aac_acc_attr = {
612 	DDI_DEVICE_ATTR_V0,
613 	DDI_STRUCTURE_LE_ACC,
614 	DDI_STRICTORDER_ACC
615 };
616 
617 static struct {
618 	int	size;
619 	int	notify;
620 } aac_qinfo[] = {
621 	{AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
622 	{AAC_HOST_HIGH_CMD_ENTRIES, 0},
623 	{AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
624 	{AAC_ADAP_HIGH_CMD_ENTRIES, 0},
625 	{AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
626 	{AAC_HOST_HIGH_RESP_ENTRIES, 0},
627 	{AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
628 	{AAC_ADAP_HIGH_RESP_ENTRIES, 0}
629 };
630 
631 /*
632  * Default aac dma attributes
633  */
634 static ddi_dma_attr_t aac_dma_attr = {
635 	DMA_ATTR_V0,
636 	0,		/* lowest usable address */
637 	0xffffffffull,	/* high DMA address range */
638 	0xffffffffull,	/* DMA counter register */
639 	AAC_DMA_ALIGN,	/* DMA address alignment */
640 	1,		/* DMA burstsizes */
641 	1,		/* min effective DMA size */
642 	0xffffffffull,	/* max DMA xfer size */
643 	0xffffffffull,	/* segment boundary */
644 	1,		/* s/g list length */
645 	AAC_BLK_SIZE,	/* granularity of device */
646 	0		/* DMA transfer flags */
647 };
648 
649 struct aac_drinfo {
650 	struct aac_softstate *softs;
651 	int tgt;
652 	int lun;
653 	int event;
654 };
655 
656 static int aac_tick = AAC_DEFAULT_TICK;	/* tick for the internal timer */
657 static uint32_t aac_timebase = 0;	/* internal timer in seconds */
658 static uint32_t aac_sync_time = 0;	/* next time to sync. with firmware */
659 
660 /*
661  * Warlock directives
662  *
663  * Different variables with the same types have to be protected by the
664  * same mutex; otherwise, warlock will complain with "variables don't
665  * seem to be protected consistently". For example,
666  * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected
667  * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to
668  * declare them as protected explictly at aac_cmd_dequeue().
669  */
670 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \
671     scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \
672     mode_format mode_geometry mode_header aac_cmd))
673 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \
674     aac_sge))
675 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \
676     aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \
677     aac_sg_table aac_srb))
678 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry))
679 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
680 _NOTE(SCHEME_PROTECTS_DATA("unique to dr event", aac_drinfo))
681 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf))
682 
683 int
684 _init(void)
685 {
686 	int rval = 0;
687 
688 #ifdef DEBUG
689 	mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL);
690 #endif
691 	DBCALLED(NULL, 1);
692 
693 	if ((rval = ddi_soft_state_init((void *)&aac_softstatep,
694 	    sizeof (struct aac_softstate), 0)) != 0)
695 		goto error;
696 
697 	if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) {
698 		ddi_soft_state_fini((void *)&aac_softstatep);
699 		goto error;
700 	}
701 
702 	if ((rval = mod_install(&aac_modlinkage)) != 0) {
703 		ddi_soft_state_fini((void *)&aac_softstatep);
704 		scsi_hba_fini(&aac_modlinkage);
705 		goto error;
706 	}
707 	return (rval);
708 
709 error:
710 	AACDB_PRINT(NULL, CE_WARN, "Mod init error!");
711 #ifdef DEBUG
712 	mutex_destroy(&aac_prt_mutex);
713 #endif
714 	return (rval);
715 }
716 
717 int
718 _info(struct modinfo *modinfop)
719 {
720 	DBCALLED(NULL, 1);
721 	return (mod_info(&aac_modlinkage, modinfop));
722 }
723 
724 /*
725  * An HBA driver cannot be unload unless you reboot,
726  * so this function will be of no use.
727  */
728 int
729 _fini(void)
730 {
731 	int rval;
732 
733 	DBCALLED(NULL, 1);
734 
735 	if ((rval = mod_remove(&aac_modlinkage)) != 0)
736 		goto error;
737 
738 	scsi_hba_fini(&aac_modlinkage);
739 	ddi_soft_state_fini((void *)&aac_softstatep);
740 #ifdef DEBUG
741 	mutex_destroy(&aac_prt_mutex);
742 #endif
743 	return (0);
744 
745 error:
746 	AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!");
747 	return (rval);
748 }
749 
750 static int
751 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
752 {
753 	int instance, i;
754 	struct aac_softstate *softs = NULL;
755 	int attach_state = 0;
756 	char *data;
757 	int intr_types;
758 
759 	DBCALLED(NULL, 1);
760 
761 	switch (cmd) {
762 	case DDI_ATTACH:
763 		break;
764 	case DDI_RESUME:
765 		return (DDI_FAILURE);
766 	default:
767 		return (DDI_FAILURE);
768 	}
769 
770 	instance = ddi_get_instance(dip);
771 
772 	/* Get soft state */
773 	if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) {
774 		AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state");
775 		goto error;
776 	}
777 	softs = ddi_get_soft_state(aac_softstatep, instance);
778 	attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED;
779 
780 	softs->instance = instance;
781 	softs->devinfo_p = dip;
782 	softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr;
783 	softs->addr_dma_attr.dma_attr_granular = 1;
784 	softs->acc_attr = aac_acc_attr;
785 	softs->card = AAC_UNKNOWN_CARD;
786 #ifdef DEBUG
787 	softs->debug_flags = aac_debug_flags;
788 	softs->debug_fib_flags = aac_debug_fib_flags;
789 #endif
790 
791 	/* Initialize FMA */
792 	aac_fm_init(softs);
793 
794 	/* Check the card type */
795 	if (aac_check_card_type(softs) == AACERR) {
796 		AACDB_PRINT(softs, CE_WARN, "Card not supported");
797 		goto error;
798 	}
799 	/* We have found the right card and everything is OK */
800 	attach_state |= AAC_ATTACH_CARD_DETECTED;
801 
802 	/* Map PCI mem space */
803 	if (ddi_regs_map_setup(dip, 1,
804 	    (caddr_t *)&softs->pci_mem_base_vaddr, 0,
805 	    softs->map_size_min, &softs->acc_attr,
806 	    &softs->pci_mem_handle) != DDI_SUCCESS)
807 		goto error;
808 
809 	softs->map_size = softs->map_size_min;
810 	attach_state |= AAC_ATTACH_PCI_MEM_MAPPED;
811 
812 	AAC_DISABLE_INTR(softs);
813 
814 	/* Get the type of device intrrupts */
815 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
816 		AACDB_PRINT(softs, CE_WARN,
817 		    "ddi_intr_get_supported_types() failed");
818 		goto error;
819 	}
820 	AACDB_PRINT(softs, CE_NOTE,
821 	    "ddi_intr_get_supported_types() ret: 0x%x", intr_types);
822 
823 	/* Query interrupt, and alloc/init all needed struct */
824 	if (intr_types & DDI_INTR_TYPE_MSI) {
825 		if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI)
826 		    != DDI_SUCCESS) {
827 			AACDB_PRINT(softs, CE_WARN,
828 			    "MSI interrupt query failed");
829 			goto error;
830 		}
831 		softs->intr_type = DDI_INTR_TYPE_MSI;
832 	} else if (intr_types & DDI_INTR_TYPE_FIXED) {
833 		if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED)
834 		    != DDI_SUCCESS) {
835 			AACDB_PRINT(softs, CE_WARN,
836 			    "FIXED interrupt query failed");
837 			goto error;
838 		}
839 		softs->intr_type = DDI_INTR_TYPE_FIXED;
840 	} else {
841 		AACDB_PRINT(softs, CE_WARN,
842 		    "Device cannot suppport both FIXED and MSI interrupts");
843 		goto error;
844 	}
845 
846 	/* Init mutexes */
847 	mutex_init(&softs->q_comp_mutex, NULL,
848 	    MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
849 	cv_init(&softs->event, NULL, CV_DRIVER, NULL);
850 	mutex_init(&softs->aifq_mutex, NULL,
851 	    MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
852 	cv_init(&softs->aifv, NULL, CV_DRIVER, NULL);
853 	cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL);
854 	mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER,
855 	    DDI_INTR_PRI(softs->intr_pri));
856 	attach_state |= AAC_ATTACH_KMUTEX_INITED;
857 
858 	/* Check for legacy device naming support */
859 	softs->legacy = 1; /* default to use legacy name */
860 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
861 	    "legacy-name-enable", &data) == DDI_SUCCESS)) {
862 		if (strcmp(data, "no") == 0) {
863 			AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled");
864 			softs->legacy = 0;
865 		}
866 		ddi_prop_free(data);
867 	}
868 
869 	/*
870 	 * Everything has been set up till now,
871 	 * we will do some common attach.
872 	 */
873 	if (aac_common_attach(softs) == AACERR)
874 		goto error;
875 	attach_state |= AAC_ATTACH_COMM_SPACE_SETUP;
876 
877 	/* Check for buf breakup support */
878 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
879 	    "breakup-enable", &data) == DDI_SUCCESS)) {
880 		if (strcmp(data, "yes") == 0) {
881 			AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled");
882 			softs->flags |= AAC_FLAGS_BRKUP;
883 		}
884 		ddi_prop_free(data);
885 	}
886 	softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer;
887 	if (softs->flags & AAC_FLAGS_BRKUP) {
888 		softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
889 		    DDI_PROP_DONTPASS, "dma-max", softs->dma_max);
890 	}
891 
892 	/* Init the cmd queues */
893 	for (i = 0; i < AAC_CMDQ_NUM; i++)
894 		aac_cmd_initq(&softs->q_wait[i]);
895 	aac_cmd_initq(&softs->q_busy);
896 	aac_cmd_initq(&softs->q_comp);
897 
898 	if (aac_hba_setup(softs) != AACOK)
899 		goto error;
900 	attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP;
901 
902 	/* Connect interrupt handlers */
903 	if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id,
904 	    NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) {
905 		AACDB_PRINT(softs, CE_WARN,
906 		    "Can not setup soft interrupt handler!");
907 		goto error;
908 	}
909 	attach_state |= AAC_ATTACH_SOFT_INTR_SETUP;
910 
911 	if (aac_add_intrs(softs) != DDI_SUCCESS) {
912 		AACDB_PRINT(softs, CE_WARN,
913 		    "Interrupt registration failed, intr type: %s",
914 		    softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED");
915 		goto error;
916 	}
917 	attach_state |= AAC_ATTACH_HARD_INTR_SETUP;
918 
919 	/* Create devctl/scsi nodes for cfgadm */
920 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
921 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
922 		AACDB_PRINT(softs, CE_WARN, "failed to create devctl node");
923 		goto error;
924 	}
925 	attach_state |= AAC_ATTACH_CREATE_DEVCTL;
926 
927 	if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance),
928 	    DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
929 		AACDB_PRINT(softs, CE_WARN, "failed to create scsi node");
930 		goto error;
931 	}
932 	attach_state |= AAC_ATTACH_CREATE_SCSI;
933 
934 	/* Create aac node for app. to issue ioctls */
935 	if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance),
936 	    DDI_PSEUDO, 0) != DDI_SUCCESS) {
937 		AACDB_PRINT(softs, CE_WARN, "failed to create aac node");
938 		goto error;
939 	}
940 
941 	/* Create a taskq for dealing with dr events */
942 	if ((softs->taskq = ddi_taskq_create(dip, "aac_dr_taskq", 1,
943 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
944 		AACDB_PRINT(softs, CE_WARN, "ddi_taskq_create failed");
945 		goto error;
946 	}
947 
948 	aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
949 	softs->state = AAC_STATE_RUN;
950 
951 	/* Create a thread for command timeout */
952 	softs->timeout_id = timeout(aac_daemon, (void *)softs,
953 	    (60 * drv_usectohz(1000000)));
954 
955 	/* Common attach is OK, so we are attached! */
956 	AAC_ENABLE_INTR(softs);
957 	ddi_report_dev(dip);
958 	AACDB_PRINT(softs, CE_NOTE, "aac attached ok");
959 	return (DDI_SUCCESS);
960 
961 error:
962 	if (softs && softs->taskq)
963 		ddi_taskq_destroy(softs->taskq);
964 	if (attach_state & AAC_ATTACH_CREATE_SCSI)
965 		ddi_remove_minor_node(dip, "scsi");
966 	if (attach_state & AAC_ATTACH_CREATE_DEVCTL)
967 		ddi_remove_minor_node(dip, "devctl");
968 	if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP)
969 		aac_common_detach(softs);
970 	if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
971 		(void) scsi_hba_detach(dip);
972 		scsi_hba_tran_free(AAC_DIP2TRAN(dip));
973 	}
974 	if (attach_state & AAC_ATTACH_HARD_INTR_SETUP)
975 		aac_remove_intrs(softs);
976 	if (attach_state & AAC_ATTACH_SOFT_INTR_SETUP)
977 		ddi_remove_softintr(softs->softint_id);
978 	if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
979 		mutex_destroy(&softs->q_comp_mutex);
980 		cv_destroy(&softs->event);
981 		mutex_destroy(&softs->aifq_mutex);
982 		cv_destroy(&softs->aifv);
983 		cv_destroy(&softs->drain_cv);
984 		mutex_destroy(&softs->io_lock);
985 	}
986 	if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED)
987 		ddi_regs_map_free(&softs->pci_mem_handle);
988 	aac_fm_fini(softs);
989 	if (attach_state & AAC_ATTACH_CARD_DETECTED)
990 		softs->card = AACERR;
991 	if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED)
992 		ddi_soft_state_free(aac_softstatep, instance);
993 	return (DDI_FAILURE);
994 }
995 
996 static int
997 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
998 {
999 	scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip);
1000 	struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
1001 
1002 	DBCALLED(softs, 1);
1003 
1004 	switch (cmd) {
1005 	case DDI_DETACH:
1006 		break;
1007 	case DDI_SUSPEND:
1008 		return (DDI_FAILURE);
1009 	default:
1010 		return (DDI_FAILURE);
1011 	}
1012 
1013 	mutex_enter(&softs->io_lock);
1014 	AAC_DISABLE_INTR(softs);
1015 	softs->state = AAC_STATE_STOPPED;
1016 
1017 	mutex_exit(&softs->io_lock);
1018 	(void) untimeout(softs->timeout_id);
1019 	mutex_enter(&softs->io_lock);
1020 	softs->timeout_id = 0;
1021 
1022 	ddi_taskq_destroy(softs->taskq);
1023 
1024 	ddi_remove_minor_node(dip, "aac");
1025 	ddi_remove_minor_node(dip, "scsi");
1026 	ddi_remove_minor_node(dip, "devctl");
1027 
1028 	mutex_exit(&softs->io_lock);
1029 	aac_remove_intrs(softs);
1030 	ddi_remove_softintr(softs->softint_id);
1031 
1032 	aac_common_detach(softs);
1033 
1034 	(void) scsi_hba_detach(dip);
1035 	scsi_hba_tran_free(tran);
1036 
1037 	mutex_destroy(&softs->q_comp_mutex);
1038 	cv_destroy(&softs->event);
1039 	mutex_destroy(&softs->aifq_mutex);
1040 	cv_destroy(&softs->aifv);
1041 	cv_destroy(&softs->drain_cv);
1042 	mutex_destroy(&softs->io_lock);
1043 
1044 	ddi_regs_map_free(&softs->pci_mem_handle);
1045 	aac_fm_fini(softs);
1046 	softs->hwif = AAC_HWIF_UNKNOWN;
1047 	softs->card = AAC_UNKNOWN_CARD;
1048 	ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip));
1049 
1050 	return (DDI_SUCCESS);
1051 }
1052 
1053 /*ARGSUSED*/
1054 static int
1055 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1056 {
1057 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1058 
1059 	DBCALLED(softs, 1);
1060 
1061 	mutex_enter(&softs->io_lock);
1062 	(void) aac_shutdown(softs);
1063 	mutex_exit(&softs->io_lock);
1064 
1065 	return (DDI_SUCCESS);
1066 }
1067 
1068 /*
1069  * quiesce(9E) entry point.
1070  *
1071  * This function is called when the system is single-threaded at high
1072  * PIL with preemption disabled. Therefore, this function must not be
1073  * blocked.
1074  *
1075  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1076  * DDI_FAILURE indicates an error condition and should almost never happen.
1077  */
1078 static int
1079 aac_quiesce(dev_info_t *dip)
1080 {
1081 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1082 
1083 	if (softs == NULL)
1084 		return (DDI_FAILURE);
1085 
1086 	AAC_DISABLE_INTR(softs);
1087 
1088 	return (DDI_SUCCESS);
1089 }
1090 
1091 /*
1092  * Bring the controller down to a dormant state and detach all child devices.
1093  * This function is called before detach or system shutdown.
1094  * Note: we can assume that the q_wait on the controller is empty, as we
1095  * won't allow shutdown if any device is open.
1096  */
1097 static int
1098 aac_shutdown(struct aac_softstate *softs)
1099 {
1100 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
1101 	struct aac_close_command *cc = (struct aac_close_command *) \
1102 	    &softs->sync_slot.fibp->data[0];
1103 	int rval;
1104 
1105 	ddi_put32(acc, &cc->Command, VM_CloseAll);
1106 	ddi_put32(acc, &cc->ContainerId, 0xfffffffful);
1107 
1108 	/* Flush all caches, set FW to write through mode */
1109 	rval = aac_sync_fib(softs, ContainerCommand,
1110 	    AAC_FIB_SIZEOF(struct aac_close_command));
1111 
1112 	AACDB_PRINT(softs, CE_NOTE,
1113 	    "shutting down aac %s", (rval == AACOK) ? "ok" : "fail");
1114 	return (rval);
1115 }
1116 
1117 static uint_t
1118 aac_softintr(caddr_t arg)
1119 {
1120 	struct aac_softstate *softs = (void *)arg;
1121 
1122 	if (!AAC_IS_Q_EMPTY(&softs->q_comp)) {
1123 		aac_drain_comp_q(softs);
1124 		return (DDI_INTR_CLAIMED);
1125 	} else {
1126 		return (DDI_INTR_UNCLAIMED);
1127 	}
1128 }
1129 
1130 /*
1131  * Setup auto sense data for pkt
1132  */
1133 static void
1134 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key,
1135     uchar_t add_code, uchar_t qual_code, uint64_t info)
1136 {
1137 	struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp);
1138 
1139 	*pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
1140 	pkt->pkt_state |= STATE_ARQ_DONE;
1141 
1142 	*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1143 	arqstat->sts_rqpkt_reason = CMD_CMPLT;
1144 	arqstat->sts_rqpkt_resid = 0;
1145 	arqstat->sts_rqpkt_state =
1146 	    STATE_GOT_BUS |
1147 	    STATE_GOT_TARGET |
1148 	    STATE_SENT_CMD |
1149 	    STATE_XFERRED_DATA;
1150 	arqstat->sts_rqpkt_statistics = 0;
1151 
1152 	if (info <= 0xfffffffful) {
1153 		arqstat->sts_sensedata.es_valid = 1;
1154 		arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
1155 		arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT;
1156 		arqstat->sts_sensedata.es_key = key;
1157 		arqstat->sts_sensedata.es_add_code = add_code;
1158 		arqstat->sts_sensedata.es_qual_code = qual_code;
1159 
1160 		arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF;
1161 		arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF;
1162 		arqstat->sts_sensedata.es_info_3 = (info >>  8) & 0xFF;
1163 		arqstat->sts_sensedata.es_info_4 = info & 0xFF;
1164 	} else { /* 64-bit LBA */
1165 		struct scsi_descr_sense_hdr *dsp;
1166 		struct scsi_information_sense_descr *isd;
1167 
1168 		dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata;
1169 		dsp->ds_class = CLASS_EXTENDED_SENSE;
1170 		dsp->ds_code = CODE_FMT_DESCR_CURRENT;
1171 		dsp->ds_key = key;
1172 		dsp->ds_add_code = add_code;
1173 		dsp->ds_qual_code = qual_code;
1174 		dsp->ds_addl_sense_length =
1175 		    sizeof (struct scsi_information_sense_descr);
1176 
1177 		isd = (struct scsi_information_sense_descr *)(dsp+1);
1178 		isd->isd_descr_type = DESCR_INFORMATION;
1179 		isd->isd_valid = 1;
1180 		isd->isd_information[0] = (info >> 56) & 0xFF;
1181 		isd->isd_information[1] = (info >> 48) & 0xFF;
1182 		isd->isd_information[2] = (info >> 40) & 0xFF;
1183 		isd->isd_information[3] = (info >> 32) & 0xFF;
1184 		isd->isd_information[4] = (info >> 24) & 0xFF;
1185 		isd->isd_information[5] = (info >> 16) & 0xFF;
1186 		isd->isd_information[6] = (info >>  8) & 0xFF;
1187 		isd->isd_information[7] = (info) & 0xFF;
1188 	}
1189 }
1190 
1191 /*
1192  * Setup auto sense data for HARDWARE ERROR
1193  */
1194 static void
1195 aac_set_arq_data_hwerr(struct aac_cmd *acp)
1196 {
1197 	union scsi_cdb *cdbp;
1198 	uint64_t err_blkno;
1199 
1200 	cdbp = (void *)acp->pkt->pkt_cdbp;
1201 	err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp);
1202 	aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno);
1203 }
1204 
1205 /*
1206  * Setup auto sense data for UNIT ATTENTION
1207  */
1208 /*ARGSUSED*/
1209 static void
1210 aac_set_arq_data_reset(struct aac_softstate *softs, struct aac_cmd *acp)
1211 {
1212 	struct aac_container *dvp = (struct aac_container *)acp->dvp;
1213 
1214 	ASSERT(dvp->dev.type == AAC_DEV_LD);
1215 
1216 	if (dvp->reset) {
1217 		dvp->reset = 0;
1218 		aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 0x29, 0x02, 0);
1219 	}
1220 }
1221 
1222 /*
1223  * Send a command to the adapter in New Comm. interface
1224  */
1225 static int
1226 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp)
1227 {
1228 	uint32_t index, device;
1229 
1230 	index = PCI_MEM_GET32(softs, AAC_IQUE);
1231 	if (index == 0xffffffffUL) {
1232 		index = PCI_MEM_GET32(softs, AAC_IQUE);
1233 		if (index == 0xffffffffUL)
1234 			return (AACERR);
1235 	}
1236 
1237 	device = index;
1238 	PCI_MEM_PUT32(softs, device,
1239 	    (uint32_t)(slotp->fib_phyaddr & 0xfffffffful));
1240 	device += 4;
1241 	PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32));
1242 	device += 4;
1243 	PCI_MEM_PUT32(softs, device, slotp->acp->fib_size);
1244 	PCI_MEM_PUT32(softs, AAC_IQUE, index);
1245 	return (AACOK);
1246 }
1247 
1248 static void
1249 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp)
1250 {
1251 	struct aac_device *dvp = acp->dvp;
1252 	int q = AAC_CMDQ(acp);
1253 
1254 	if (acp->slotp) { /* outstanding cmd */
1255 		aac_release_slot(softs, acp->slotp);
1256 		acp->slotp = NULL;
1257 		if (dvp) {
1258 			dvp->ncmds[q]--;
1259 			if (dvp->throttle[q] == AAC_THROTTLE_DRAIN &&
1260 			    dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC)
1261 				aac_set_throttle(softs, dvp, q,
1262 				    softs->total_slots);
1263 		}
1264 		softs->bus_ncmds[q]--;
1265 		(void) aac_cmd_delete(&softs->q_busy, acp);
1266 	} else { /* cmd in waiting queue */
1267 		aac_cmd_delete(&softs->q_wait[q], acp);
1268 	}
1269 
1270 	if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */
1271 		mutex_enter(&softs->q_comp_mutex);
1272 		aac_cmd_enqueue(&softs->q_comp, acp);
1273 		mutex_exit(&softs->q_comp_mutex);
1274 	} else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */
1275 		cv_broadcast(&softs->event);
1276 	}
1277 }
1278 
1279 static void
1280 aac_handle_io(struct aac_softstate *softs, int index)
1281 {
1282 	struct aac_slot *slotp;
1283 	struct aac_cmd *acp;
1284 	uint32_t fast;
1285 
1286 	fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE;
1287 	index >>= 2;
1288 
1289 	/* Make sure firmware reported index is valid */
1290 	ASSERT(index >= 0 && index < softs->total_slots);
1291 	slotp = &softs->io_slot[index];
1292 	ASSERT(slotp->index == index);
1293 	acp = slotp->acp;
1294 
1295 	if (acp == NULL || acp->slotp != slotp) {
1296 		cmn_err(CE_WARN,
1297 		    "Firmware error: invalid slot index received from FW");
1298 		return;
1299 	}
1300 
1301 	acp->flags |= AAC_CMD_CMPLT;
1302 	(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1303 
1304 	if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) {
1305 		/*
1306 		 * For fast response IO, the firmware do not return any FIB
1307 		 * data, so we need to fill in the FIB status and state so that
1308 		 * FIB users can handle it correctly.
1309 		 */
1310 		if (fast) {
1311 			uint32_t state;
1312 
1313 			state = ddi_get32(slotp->fib_acc_handle,
1314 			    &slotp->fibp->Header.XferState);
1315 			/*
1316 			 * Update state for CPU not for device, no DMA sync
1317 			 * needed
1318 			 */
1319 			ddi_put32(slotp->fib_acc_handle,
1320 			    &slotp->fibp->Header.XferState,
1321 			    state | AAC_FIBSTATE_DONEADAP);
1322 			ddi_put32(slotp->fib_acc_handle,
1323 			    (void *)&slotp->fibp->data[0], ST_OK);
1324 		}
1325 
1326 		/* Handle completed ac */
1327 		acp->ac_comp(softs, acp);
1328 	} else {
1329 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1330 		acp->flags |= AAC_CMD_ERR;
1331 		if (acp->pkt) {
1332 			acp->pkt->pkt_reason = CMD_TRAN_ERR;
1333 			acp->pkt->pkt_statistics = 0;
1334 		}
1335 	}
1336 	aac_end_io(softs, acp);
1337 }
1338 
1339 /*
1340  * Interrupt handler for New Comm. interface
1341  * New Comm. interface use a different mechanism for interrupt. No explict
1342  * message queues, and driver need only accesses the mapped PCI mem space to
1343  * find the completed FIB or AIF.
1344  */
1345 static int
1346 aac_process_intr_new(struct aac_softstate *softs)
1347 {
1348 	uint32_t index;
1349 
1350 	index = AAC_OUTB_GET(softs);
1351 	if (index == 0xfffffffful)
1352 		index = AAC_OUTB_GET(softs);
1353 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1354 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1355 		return (0);
1356 	}
1357 	if (index != 0xfffffffful) {
1358 		do {
1359 			if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
1360 				aac_handle_io(softs, index);
1361 			} else if (index != 0xfffffffeul) {
1362 				struct aac_fib *fibp;	/* FIB in AIF queue */
1363 				uint16_t fib_size, fib_size0;
1364 
1365 				/*
1366 				 * 0xfffffffe means that the controller wants
1367 				 * more work, ignore it for now. Otherwise,
1368 				 * AIF received.
1369 				 */
1370 				index &= ~2;
1371 
1372 				mutex_enter(&softs->aifq_mutex);
1373 				/*
1374 				 * Copy AIF from adapter to the empty AIF slot
1375 				 */
1376 				fibp = &softs->aifq[softs->aifq_idx].d;
1377 				fib_size0 = PCI_MEM_GET16(softs, index + \
1378 				    offsetof(struct aac_fib, Header.Size));
1379 				fib_size = (fib_size0 > AAC_FIB_SIZE) ?
1380 				    AAC_FIB_SIZE : fib_size0;
1381 				PCI_MEM_REP_GET8(softs, index, fibp,
1382 				    fib_size);
1383 
1384 				if (aac_check_acc_handle(softs-> \
1385 				    pci_mem_handle) == DDI_SUCCESS)
1386 					(void) aac_handle_aif(softs, fibp);
1387 				else
1388 					ddi_fm_service_impact(softs->devinfo_p,
1389 					    DDI_SERVICE_UNAFFECTED);
1390 				mutex_exit(&softs->aifq_mutex);
1391 
1392 				/*
1393 				 * AIF memory is owned by the adapter, so let it
1394 				 * know that we are done with it.
1395 				 */
1396 				AAC_OUTB_SET(softs, index);
1397 				AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1398 			}
1399 
1400 			index = AAC_OUTB_GET(softs);
1401 		} while (index != 0xfffffffful);
1402 
1403 		/*
1404 		 * Process waiting cmds before start new ones to
1405 		 * ensure first IOs are serviced first.
1406 		 */
1407 		aac_start_waiting_io(softs);
1408 		return (AAC_DB_COMMAND_READY);
1409 	} else {
1410 		return (0);
1411 	}
1412 }
1413 
1414 static uint_t
1415 aac_intr_new(caddr_t arg)
1416 {
1417 	struct aac_softstate *softs = (void *)arg;
1418 	uint_t rval;
1419 
1420 	mutex_enter(&softs->io_lock);
1421 	if (aac_process_intr_new(softs))
1422 		rval = DDI_INTR_CLAIMED;
1423 	else
1424 		rval = DDI_INTR_UNCLAIMED;
1425 	mutex_exit(&softs->io_lock);
1426 
1427 	aac_drain_comp_q(softs);
1428 	return (rval);
1429 }
1430 
1431 /*
1432  * Interrupt handler for old interface
1433  * Explicit message queues are used to send FIB to and get completed FIB from
1434  * the adapter. Driver and adapter maitain the queues in the producer/consumer
1435  * manner. The driver has to query the queues to find the completed FIB.
1436  */
1437 static int
1438 aac_process_intr_old(struct aac_softstate *softs)
1439 {
1440 	uint16_t status;
1441 
1442 	status = AAC_STATUS_GET(softs);
1443 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1444 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1445 		return (DDI_INTR_UNCLAIMED);
1446 	}
1447 	if (status & AAC_DB_RESPONSE_READY) {
1448 		int slot_idx;
1449 
1450 		/* ACK the intr */
1451 		AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1452 		(void) AAC_STATUS_GET(softs);
1453 		while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q,
1454 		    &slot_idx) == AACOK)
1455 			aac_handle_io(softs, slot_idx);
1456 
1457 		/*
1458 		 * Process waiting cmds before start new ones to
1459 		 * ensure first IOs are serviced first.
1460 		 */
1461 		aac_start_waiting_io(softs);
1462 		return (AAC_DB_RESPONSE_READY);
1463 	} else if (status & AAC_DB_COMMAND_READY) {
1464 		int aif_idx;
1465 
1466 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY);
1467 		(void) AAC_STATUS_GET(softs);
1468 		if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) ==
1469 		    AACOK) {
1470 			ddi_acc_handle_t acc = softs->comm_space_acc_handle;
1471 			struct aac_fib *fibp;	/* FIB in AIF queue */
1472 			struct aac_fib *fibp0;	/* FIB in communication space */
1473 			uint16_t fib_size, fib_size0;
1474 			uint32_t fib_xfer_state;
1475 			uint32_t addr, size;
1476 
1477 			ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS));
1478 
1479 #define	AAC_SYNC_AIF(softs, aif_idx, type) \
1480 	{ (void) ddi_dma_sync((softs)->comm_space_dma_handle, \
1481 	    offsetof(struct aac_comm_space, \
1482 	    adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \
1483 	    (type)); }
1484 
1485 			mutex_enter(&softs->aifq_mutex);
1486 			/* Copy AIF from adapter to the empty AIF slot */
1487 			fibp = &softs->aifq[softs->aifq_idx].d;
1488 			AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU);
1489 			fibp0 = &softs->comm_space->adapter_fibs[aif_idx];
1490 			fib_size0 = ddi_get16(acc, &fibp0->Header.Size);
1491 			fib_size = (fib_size0 > AAC_FIB_SIZE) ?
1492 			    AAC_FIB_SIZE : fib_size0;
1493 			ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0,
1494 			    fib_size, DDI_DEV_AUTOINCR);
1495 
1496 			(void) aac_handle_aif(softs, fibp);
1497 			mutex_exit(&softs->aifq_mutex);
1498 
1499 			/* Complete AIF back to adapter with good status */
1500 			fib_xfer_state = LE_32(fibp->Header.XferState);
1501 			if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
1502 				ddi_put32(acc, &fibp0->Header.XferState,
1503 				    fib_xfer_state | AAC_FIBSTATE_DONEHOST);
1504 				ddi_put32(acc, (void *)&fibp0->data[0], ST_OK);
1505 				if (fib_size0 > AAC_FIB_SIZE)
1506 					ddi_put16(acc, &fibp0->Header.Size,
1507 					    AAC_FIB_SIZE);
1508 				AAC_SYNC_AIF(softs, aif_idx,
1509 				    DDI_DMA_SYNC_FORDEV);
1510 			}
1511 
1512 			/* Put the AIF response on the response queue */
1513 			addr = ddi_get32(acc,
1514 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1515 			    Header.SenderFibAddress);
1516 			size = (uint32_t)ddi_get16(acc,
1517 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1518 			    Header.Size);
1519 			ddi_put32(acc,
1520 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1521 			    Header.ReceiverFibAddress, addr);
1522 			if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q,
1523 			    addr, size) == AACERR)
1524 				cmn_err(CE_NOTE, "!AIF ack failed");
1525 		}
1526 		return (AAC_DB_COMMAND_READY);
1527 	} else if (status & AAC_DB_PRINTF_READY) {
1528 		/* ACK the intr */
1529 		AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY);
1530 		(void) AAC_STATUS_GET(softs);
1531 		(void) ddi_dma_sync(softs->comm_space_dma_handle,
1532 		    offsetof(struct aac_comm_space, adapter_print_buf),
1533 		    AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU);
1534 		if (aac_check_dma_handle(softs->comm_space_dma_handle) ==
1535 		    DDI_SUCCESS)
1536 			cmn_err(CE_NOTE, "MSG From Adapter: %s",
1537 			    softs->comm_space->adapter_print_buf);
1538 		else
1539 			ddi_fm_service_impact(softs->devinfo_p,
1540 			    DDI_SERVICE_UNAFFECTED);
1541 		AAC_NOTIFY(softs, AAC_DB_PRINTF_READY);
1542 		return (AAC_DB_PRINTF_READY);
1543 	} else if (status & AAC_DB_COMMAND_NOT_FULL) {
1544 		/*
1545 		 * Without these two condition statements, the OS could hang
1546 		 * after a while, especially if there are a lot of AIF's to
1547 		 * handle, for instance if a drive is pulled from an array
1548 		 * under heavy load.
1549 		 */
1550 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1551 		return (AAC_DB_COMMAND_NOT_FULL);
1552 	} else if (status & AAC_DB_RESPONSE_NOT_FULL) {
1553 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1554 		AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL);
1555 		return (AAC_DB_RESPONSE_NOT_FULL);
1556 	} else {
1557 		return (0);
1558 	}
1559 }
1560 
1561 static uint_t
1562 aac_intr_old(caddr_t arg)
1563 {
1564 	struct aac_softstate *softs = (void *)arg;
1565 	int rval;
1566 
1567 	mutex_enter(&softs->io_lock);
1568 	if (aac_process_intr_old(softs))
1569 		rval = DDI_INTR_CLAIMED;
1570 	else
1571 		rval = DDI_INTR_UNCLAIMED;
1572 	mutex_exit(&softs->io_lock);
1573 
1574 	aac_drain_comp_q(softs);
1575 	return (rval);
1576 }
1577 
1578 /*
1579  * Query FIXED or MSI interrupts
1580  */
1581 static int
1582 aac_query_intrs(struct aac_softstate *softs, int intr_type)
1583 {
1584 	dev_info_t *dip = softs->devinfo_p;
1585 	int avail, actual, intr_size, count;
1586 	int i, flag, ret;
1587 
1588 	AACDB_PRINT(softs, CE_NOTE,
1589 	    "aac_query_intrs:interrupt type 0x%x", intr_type);
1590 
1591 	/* Get number of interrupts */
1592 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1593 	if ((ret != DDI_SUCCESS) || (count == 0)) {
1594 		AACDB_PRINT(softs, CE_WARN,
1595 		    "ddi_intr_get_nintrs() failed, ret %d count %d",
1596 		    ret, count);
1597 		return (DDI_FAILURE);
1598 	}
1599 
1600 	/* Get number of available interrupts */
1601 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
1602 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
1603 		AACDB_PRINT(softs, CE_WARN,
1604 		    "ddi_intr_get_navail() failed, ret %d avail %d",
1605 		    ret, avail);
1606 		return (DDI_FAILURE);
1607 	}
1608 
1609 	AACDB_PRINT(softs, CE_NOTE,
1610 	    "ddi_intr_get_nvail returned %d, navail() returned %d",
1611 	    count, avail);
1612 
1613 	/* Allocate an array of interrupt handles */
1614 	intr_size = count * sizeof (ddi_intr_handle_t);
1615 	softs->htable = kmem_alloc(intr_size, KM_SLEEP);
1616 
1617 	if (intr_type == DDI_INTR_TYPE_MSI) {
1618 		count = 1; /* only one vector needed by now */
1619 		flag = DDI_INTR_ALLOC_STRICT;
1620 	} else { /* must be DDI_INTR_TYPE_FIXED */
1621 		flag = DDI_INTR_ALLOC_NORMAL;
1622 	}
1623 
1624 	/* Call ddi_intr_alloc() */
1625 	ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0,
1626 	    count, &actual, flag);
1627 
1628 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
1629 		AACDB_PRINT(softs, CE_WARN,
1630 		    "ddi_intr_alloc() failed, ret = %d", ret);
1631 		actual = 0;
1632 		goto error;
1633 	}
1634 
1635 	if (actual < count) {
1636 		AACDB_PRINT(softs, CE_NOTE,
1637 		    "Requested: %d, Received: %d", count, actual);
1638 		goto error;
1639 	}
1640 
1641 	softs->intr_cnt = actual;
1642 
1643 	/* Get priority for first msi, assume remaining are all the same */
1644 	if ((ret = ddi_intr_get_pri(softs->htable[0],
1645 	    &softs->intr_pri)) != DDI_SUCCESS) {
1646 		AACDB_PRINT(softs, CE_WARN,
1647 		    "ddi_intr_get_pri() failed, ret = %d", ret);
1648 		goto error;
1649 	}
1650 
1651 	/* Test for high level mutex */
1652 	if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) {
1653 		AACDB_PRINT(softs, CE_WARN,
1654 		    "aac_query_intrs: Hi level interrupt not supported");
1655 		goto error;
1656 	}
1657 
1658 	return (DDI_SUCCESS);
1659 
1660 error:
1661 	/* Free already allocated intr */
1662 	for (i = 0; i < actual; i++)
1663 		(void) ddi_intr_free(softs->htable[i]);
1664 
1665 	kmem_free(softs->htable, intr_size);
1666 	return (DDI_FAILURE);
1667 }
1668 
1669 /*
1670  * Register FIXED or MSI interrupts, and enable them
1671  */
1672 static int
1673 aac_add_intrs(struct aac_softstate *softs)
1674 {
1675 	int i, ret;
1676 	int intr_size, actual;
1677 	ddi_intr_handler_t *aac_intr;
1678 
1679 	actual = softs->intr_cnt;
1680 	intr_size = actual * sizeof (ddi_intr_handle_t);
1681 	aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ?
1682 	    aac_intr_new : aac_intr_old);
1683 
1684 	/* Call ddi_intr_add_handler() */
1685 	for (i = 0; i < actual; i++) {
1686 		if ((ret = ddi_intr_add_handler(softs->htable[i],
1687 		    aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) {
1688 			cmn_err(CE_WARN,
1689 			    "ddi_intr_add_handler() failed ret = %d", ret);
1690 
1691 			/* Free already allocated intr */
1692 			for (i = 0; i < actual; i++)
1693 				(void) ddi_intr_free(softs->htable[i]);
1694 
1695 			kmem_free(softs->htable, intr_size);
1696 			return (DDI_FAILURE);
1697 		}
1698 	}
1699 
1700 	if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap))
1701 	    != DDI_SUCCESS) {
1702 		cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret);
1703 
1704 		/* Free already allocated intr */
1705 		for (i = 0; i < actual; i++)
1706 			(void) ddi_intr_free(softs->htable[i]);
1707 
1708 		kmem_free(softs->htable, intr_size);
1709 		return (DDI_FAILURE);
1710 	}
1711 
1712 	/* Enable interrupts */
1713 	if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1714 		/* for MSI block enable */
1715 		(void) ddi_intr_block_enable(softs->htable, softs->intr_cnt);
1716 	} else {
1717 		/* Call ddi_intr_enable() for legacy/MSI non block enable */
1718 		for (i = 0; i < softs->intr_cnt; i++)
1719 			(void) ddi_intr_enable(softs->htable[i]);
1720 	}
1721 
1722 	return (DDI_SUCCESS);
1723 }
1724 
1725 /*
1726  * Unregister FIXED or MSI interrupts
1727  */
1728 static void
1729 aac_remove_intrs(struct aac_softstate *softs)
1730 {
1731 	int i;
1732 
1733 	/* Disable all interrupts */
1734 	if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1735 		/* Call ddi_intr_block_disable() */
1736 		(void) ddi_intr_block_disable(softs->htable, softs->intr_cnt);
1737 	} else {
1738 		for (i = 0; i < softs->intr_cnt; i++)
1739 			(void) ddi_intr_disable(softs->htable[i]);
1740 	}
1741 
1742 	/* Call ddi_intr_remove_handler() */
1743 	for (i = 0; i < softs->intr_cnt; i++) {
1744 		(void) ddi_intr_remove_handler(softs->htable[i]);
1745 		(void) ddi_intr_free(softs->htable[i]);
1746 	}
1747 
1748 	kmem_free(softs->htable, softs->intr_cnt * sizeof (ddi_intr_handle_t));
1749 }
1750 
1751 /*
1752  * Set pkt_reason and OR in pkt_statistics flag
1753  */
1754 static void
1755 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp,
1756     uchar_t reason, uint_t stat)
1757 {
1758 #ifndef __lock_lint
1759 	_NOTE(ARGUNUSED(softs))
1760 #endif
1761 	if (acp->pkt->pkt_reason == CMD_CMPLT)
1762 		acp->pkt->pkt_reason = reason;
1763 	acp->pkt->pkt_statistics |= stat;
1764 }
1765 
1766 /*
1767  * Handle a finished pkt of soft SCMD
1768  */
1769 static void
1770 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp)
1771 {
1772 	ASSERT(acp->pkt);
1773 
1774 	acp->flags |= AAC_CMD_CMPLT;
1775 
1776 	acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \
1777 	    STATE_SENT_CMD | STATE_GOT_STATUS;
1778 	if (acp->pkt->pkt_state & STATE_XFERRED_DATA)
1779 		acp->pkt->pkt_resid = 0;
1780 
1781 	/* AAC_CMD_NO_INTR means no complete callback */
1782 	if (!(acp->flags & AAC_CMD_NO_INTR)) {
1783 		mutex_enter(&softs->q_comp_mutex);
1784 		aac_cmd_enqueue(&softs->q_comp, acp);
1785 		mutex_exit(&softs->q_comp_mutex);
1786 		ddi_trigger_softintr(softs->softint_id);
1787 	}
1788 }
1789 
1790 /*
1791  * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
1792  */
1793 
1794 /*
1795  * Handle completed logical device IO command
1796  */
1797 /*ARGSUSED*/
1798 static void
1799 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1800 {
1801 	struct aac_slot *slotp = acp->slotp;
1802 	struct aac_blockread_response *resp;
1803 	uint32_t status;
1804 
1805 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1806 	ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1807 
1808 	acp->pkt->pkt_state |= STATE_GOT_STATUS;
1809 
1810 	/*
1811 	 * block_read/write has a similar response header, use blockread
1812 	 * response for both.
1813 	 */
1814 	resp = (struct aac_blockread_response *)&slotp->fibp->data[0];
1815 	status = ddi_get32(slotp->fib_acc_handle, &resp->Status);
1816 	if (status == ST_OK) {
1817 		acp->pkt->pkt_resid = 0;
1818 		acp->pkt->pkt_state |= STATE_XFERRED_DATA;
1819 	} else {
1820 		aac_set_arq_data_hwerr(acp);
1821 	}
1822 }
1823 
1824 /*
1825  * Handle completed phys. device IO command
1826  */
1827 static void
1828 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1829 {
1830 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
1831 	struct aac_fib *fibp = acp->slotp->fibp;
1832 	struct scsi_pkt *pkt = acp->pkt;
1833 	struct aac_srb_reply *resp;
1834 	uint32_t resp_status;
1835 
1836 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1837 	ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1838 
1839 	resp = (struct aac_srb_reply *)&fibp->data[0];
1840 	resp_status = ddi_get32(acc, &resp->status);
1841 
1842 	/* First check FIB status */
1843 	if (resp_status == ST_OK) {
1844 		uint32_t scsi_status;
1845 		uint32_t srb_status;
1846 		uint32_t data_xfer_length;
1847 
1848 		scsi_status = ddi_get32(acc, &resp->scsi_status);
1849 		srb_status = ddi_get32(acc, &resp->srb_status);
1850 		data_xfer_length = ddi_get32(acc, &resp->data_xfer_length);
1851 
1852 		*pkt->pkt_scbp = (uint8_t)scsi_status;
1853 		pkt->pkt_state |= STATE_GOT_STATUS;
1854 		if (scsi_status == STATUS_GOOD) {
1855 			uchar_t cmd = ((union scsi_cdb *)(void *)
1856 			    (pkt->pkt_cdbp))->scc_cmd;
1857 
1858 			/* Next check SRB status */
1859 			switch (srb_status & 0x3f) {
1860 			case SRB_STATUS_DATA_OVERRUN:
1861 				AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \
1862 				    "scmd=%d, xfer=%d, buflen=%d",
1863 				    (uint32_t)cmd, data_xfer_length,
1864 				    acp->bcount);
1865 
1866 				switch (cmd) {
1867 				case SCMD_READ:
1868 				case SCMD_WRITE:
1869 				case SCMD_READ_G1:
1870 				case SCMD_WRITE_G1:
1871 				case SCMD_READ_G4:
1872 				case SCMD_WRITE_G4:
1873 				case SCMD_READ_G5:
1874 				case SCMD_WRITE_G5:
1875 					aac_set_pkt_reason(softs, acp,
1876 					    CMD_DATA_OVR, 0);
1877 					break;
1878 				}
1879 				/*FALLTHRU*/
1880 			case SRB_STATUS_ERROR_RECOVERY:
1881 			case SRB_STATUS_PENDING:
1882 			case SRB_STATUS_SUCCESS:
1883 				/*
1884 				 * pkt_resid should only be calculated if the
1885 				 * status is ERROR_RECOVERY/PENDING/SUCCESS/
1886 				 * OVERRUN/UNDERRUN
1887 				 */
1888 				if (data_xfer_length) {
1889 					pkt->pkt_state |= STATE_XFERRED_DATA;
1890 					pkt->pkt_resid = acp->bcount - \
1891 					    data_xfer_length;
1892 					ASSERT(pkt->pkt_resid >= 0);
1893 				}
1894 				break;
1895 			case SRB_STATUS_ABORTED:
1896 				AACDB_PRINT(softs, CE_NOTE,
1897 				    "SRB_STATUS_ABORTED, xfer=%d, resid=%d",
1898 				    data_xfer_length, pkt->pkt_resid);
1899 				aac_set_pkt_reason(softs, acp, CMD_ABORTED,
1900 				    STAT_ABORTED);
1901 				break;
1902 			case SRB_STATUS_ABORT_FAILED:
1903 				AACDB_PRINT(softs, CE_NOTE,
1904 				    "SRB_STATUS_ABORT_FAILED, xfer=%d, " \
1905 				    "resid=%d", data_xfer_length,
1906 				    pkt->pkt_resid);
1907 				aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL,
1908 				    0);
1909 				break;
1910 			case SRB_STATUS_PARITY_ERROR:
1911 				AACDB_PRINT(softs, CE_NOTE,
1912 				    "SRB_STATUS_PARITY_ERROR, xfer=%d, " \
1913 				    "resid=%d", data_xfer_length,
1914 				    pkt->pkt_resid);
1915 				aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0);
1916 				break;
1917 			case SRB_STATUS_NO_DEVICE:
1918 			case SRB_STATUS_INVALID_PATH_ID:
1919 			case SRB_STATUS_INVALID_TARGET_ID:
1920 			case SRB_STATUS_INVALID_LUN:
1921 			case SRB_STATUS_SELECTION_TIMEOUT:
1922 #ifdef DEBUG
1923 				if (AAC_DEV_IS_VALID(acp->dvp)) {
1924 					AACDB_PRINT(softs, CE_NOTE,
1925 					    "SRB_STATUS_NO_DEVICE(%d), " \
1926 					    "xfer=%d, resid=%d ",
1927 					    srb_status & 0x3f,
1928 					    data_xfer_length, pkt->pkt_resid);
1929 				}
1930 #endif
1931 				aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0);
1932 				break;
1933 			case SRB_STATUS_COMMAND_TIMEOUT:
1934 			case SRB_STATUS_TIMEOUT:
1935 				AACDB_PRINT(softs, CE_NOTE,
1936 				    "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \
1937 				    "resid=%d", data_xfer_length,
1938 				    pkt->pkt_resid);
1939 				aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
1940 				    STAT_TIMEOUT);
1941 				break;
1942 			case SRB_STATUS_BUS_RESET:
1943 				AACDB_PRINT(softs, CE_NOTE,
1944 				    "SRB_STATUS_BUS_RESET, xfer=%d, " \
1945 				    "resid=%d", data_xfer_length,
1946 				    pkt->pkt_resid);
1947 				aac_set_pkt_reason(softs, acp, CMD_RESET,
1948 				    STAT_BUS_RESET);
1949 				break;
1950 			default:
1951 				AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \
1952 				    "xfer=%d, resid=%d", srb_status & 0x3f,
1953 				    data_xfer_length, pkt->pkt_resid);
1954 				aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1955 				break;
1956 			}
1957 		} else if (scsi_status == STATUS_CHECK) {
1958 			/* CHECK CONDITION */
1959 			struct scsi_arq_status *arqstat =
1960 			    (void *)(pkt->pkt_scbp);
1961 			uint32_t sense_data_size;
1962 
1963 			pkt->pkt_state |= STATE_ARQ_DONE;
1964 
1965 			*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1966 			arqstat->sts_rqpkt_reason = CMD_CMPLT;
1967 			arqstat->sts_rqpkt_resid = 0;
1968 			arqstat->sts_rqpkt_state =
1969 			    STATE_GOT_BUS |
1970 			    STATE_GOT_TARGET |
1971 			    STATE_SENT_CMD |
1972 			    STATE_XFERRED_DATA;
1973 			arqstat->sts_rqpkt_statistics = 0;
1974 
1975 			sense_data_size = ddi_get32(acc,
1976 			    &resp->sense_data_size);
1977 			ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE);
1978 			AACDB_PRINT(softs, CE_NOTE,
1979 			    "CHECK CONDITION: sense len=%d, xfer len=%d",
1980 			    sense_data_size, data_xfer_length);
1981 
1982 			if (sense_data_size > SENSE_LENGTH)
1983 				sense_data_size = SENSE_LENGTH;
1984 			ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata,
1985 			    (uint8_t *)resp->sense_data, sense_data_size,
1986 			    DDI_DEV_AUTOINCR);
1987 		} else {
1988 			AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \
1989 			    "scsi_status=%d, srb_status=%d",
1990 			    scsi_status, srb_status);
1991 			aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1992 		}
1993 	} else {
1994 		AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d",
1995 		    resp_status);
1996 		aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1997 	}
1998 }
1999 
2000 /*
2001  * Handle completed IOCTL command
2002  */
2003 /*ARGSUSED*/
2004 void
2005 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2006 {
2007 	struct aac_slot *slotp = acp->slotp;
2008 
2009 	/*
2010 	 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
2011 	 * may wait on softs->event, so use cv_broadcast() instead
2012 	 * of cv_signal().
2013 	 */
2014 	ASSERT(acp->flags & AAC_CMD_SYNC);
2015 	ASSERT(acp->flags & AAC_CMD_NO_CB);
2016 
2017 	/* Get the size of the response FIB from its FIB.Header.Size field */
2018 	acp->fib_size = ddi_get16(slotp->fib_acc_handle,
2019 	    &slotp->fibp->Header.Size);
2020 
2021 	ASSERT(acp->fib_size <= softs->aac_max_fib_size);
2022 	ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp,
2023 	    (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR);
2024 }
2025 
2026 /*
2027  * Handle completed Flush command
2028  */
2029 /*ARGSUSED*/
2030 static void
2031 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2032 {
2033 	struct aac_slot *slotp = acp->slotp;
2034 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
2035 	struct aac_synchronize_reply *resp;
2036 	uint32_t status;
2037 
2038 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
2039 
2040 	acp->pkt->pkt_state |= STATE_GOT_STATUS;
2041 
2042 	resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0];
2043 	status = ddi_get32(acc, &resp->Status);
2044 	if (status != CT_OK)
2045 		aac_set_arq_data_hwerr(acp);
2046 }
2047 
2048 /*
2049  * Access PCI space to see if the driver can support the card
2050  */
2051 static int
2052 aac_check_card_type(struct aac_softstate *softs)
2053 {
2054 	ddi_acc_handle_t pci_config_handle;
2055 	int card_index;
2056 	uint32_t pci_cmd;
2057 
2058 	/* Map pci configuration space */
2059 	if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) !=
2060 	    DDI_SUCCESS) {
2061 		AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space");
2062 		return (AACERR);
2063 	}
2064 
2065 	softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID);
2066 	softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID);
2067 	softs->subvendid = pci_config_get16(pci_config_handle,
2068 	    PCI_CONF_SUBVENID);
2069 	softs->subsysid = pci_config_get16(pci_config_handle,
2070 	    PCI_CONF_SUBSYSID);
2071 
2072 	card_index = 0;
2073 	while (!CARD_IS_UNKNOWN(card_index)) {
2074 		if ((aac_cards[card_index].vendor == softs->vendid) &&
2075 		    (aac_cards[card_index].device == softs->devid) &&
2076 		    (aac_cards[card_index].subvendor == softs->subvendid) &&
2077 		    (aac_cards[card_index].subsys == softs->subsysid)) {
2078 			break;
2079 		}
2080 		card_index++;
2081 	}
2082 
2083 	softs->card = card_index;
2084 	softs->hwif = aac_cards[card_index].hwif;
2085 
2086 	/*
2087 	 * Unknown aac card
2088 	 * do a generic match based on the VendorID and DeviceID to
2089 	 * support the new cards in the aac family
2090 	 */
2091 	if (CARD_IS_UNKNOWN(card_index)) {
2092 		if (softs->vendid != 0x9005) {
2093 			AACDB_PRINT(softs, CE_WARN,
2094 			    "Unknown vendor 0x%x", softs->vendid);
2095 			goto error;
2096 		}
2097 		switch (softs->devid) {
2098 		case 0x285:
2099 			softs->hwif = AAC_HWIF_I960RX;
2100 			break;
2101 		case 0x286:
2102 			softs->hwif = AAC_HWIF_RKT;
2103 			break;
2104 		default:
2105 			AACDB_PRINT(softs, CE_WARN,
2106 			    "Unknown device \"pci9005,%x\"", softs->devid);
2107 			goto error;
2108 		}
2109 	}
2110 
2111 	/* Set hardware dependent interface */
2112 	switch (softs->hwif) {
2113 	case AAC_HWIF_I960RX:
2114 		softs->aac_if = aac_rx_interface;
2115 		softs->map_size_min = AAC_MAP_SIZE_MIN_RX;
2116 		break;
2117 	case AAC_HWIF_RKT:
2118 		softs->aac_if = aac_rkt_interface;
2119 		softs->map_size_min = AAC_MAP_SIZE_MIN_RKT;
2120 		break;
2121 	default:
2122 		AACDB_PRINT(softs, CE_WARN,
2123 		    "Unknown hardware interface %d", softs->hwif);
2124 		goto error;
2125 	}
2126 
2127 	/* Set card names */
2128 	(void *)strncpy(softs->vendor_name, aac_cards[card_index].vid,
2129 	    AAC_VENDOR_LEN);
2130 	(void *)strncpy(softs->product_name, aac_cards[card_index].desc,
2131 	    AAC_PRODUCT_LEN);
2132 
2133 	/* Set up quirks */
2134 	softs->flags = aac_cards[card_index].quirks;
2135 
2136 	/* Force the busmaster enable bit on */
2137 	pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2138 	if ((pci_cmd & PCI_COMM_ME) == 0) {
2139 		pci_cmd |= PCI_COMM_ME;
2140 		pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd);
2141 		pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2142 		if ((pci_cmd & PCI_COMM_ME) == 0) {
2143 			cmn_err(CE_CONT, "?Cannot enable busmaster bit");
2144 			goto error;
2145 		}
2146 	}
2147 
2148 	/* Set memory base to map */
2149 	softs->pci_mem_base_paddr = 0xfffffff0UL & \
2150 	    pci_config_get32(pci_config_handle, PCI_CONF_BASE0);
2151 
2152 	pci_config_teardown(&pci_config_handle);
2153 
2154 	return (AACOK); /* card type detected */
2155 error:
2156 	pci_config_teardown(&pci_config_handle);
2157 	return (AACERR); /* no matched card found */
2158 }
2159 
2160 /*
2161  * Check the firmware to determine the features to support and the FIB
2162  * parameters to use.
2163  */
2164 static int
2165 aac_check_firmware(struct aac_softstate *softs)
2166 {
2167 	uint32_t options;
2168 	uint32_t atu_size;
2169 	ddi_acc_handle_t pci_handle;
2170 	uint8_t *data;
2171 	uint32_t max_fibs;
2172 	uint32_t max_fib_size;
2173 	uint32_t sg_tablesize;
2174 	uint32_t max_sectors;
2175 	uint32_t status;
2176 
2177 	/* Get supported options */
2178 	if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0,
2179 	    &status)) != AACOK) {
2180 		if (status != SRB_STATUS_INVALID_REQUEST) {
2181 			cmn_err(CE_CONT,
2182 			    "?Fatal error: request adapter info error");
2183 			return (AACERR);
2184 		}
2185 		options = 0;
2186 		atu_size = 0;
2187 	} else {
2188 		options = AAC_MAILBOX_GET(softs, 1);
2189 		atu_size = AAC_MAILBOX_GET(softs, 2);
2190 	}
2191 
2192 	if (softs->state & AAC_STATE_RESET) {
2193 		if ((softs->support_opt == options) &&
2194 		    (softs->atu_size == atu_size))
2195 			return (AACOK);
2196 
2197 		cmn_err(CE_WARN,
2198 		    "?Fatal error: firmware changed, system needs reboot");
2199 		return (AACERR);
2200 	}
2201 
2202 	/*
2203 	 * The following critical settings are initialized only once during
2204 	 * driver attachment.
2205 	 */
2206 	softs->support_opt = options;
2207 	softs->atu_size = atu_size;
2208 
2209 	/* Process supported options */
2210 	if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
2211 	    (softs->flags & AAC_FLAGS_NO4GB) == 0) {
2212 		AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window");
2213 		softs->flags |= AAC_FLAGS_4GB_WINDOW;
2214 	} else {
2215 		/*
2216 		 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
2217 		 * only. IO is handled by the DMA engine which does not suffer
2218 		 * from the ATU window programming workarounds necessary for
2219 		 * CPU copy operations.
2220 		 */
2221 		softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull;
2222 		softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull;
2223 	}
2224 
2225 	if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
2226 		AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address");
2227 		softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
2228 		softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull;
2229 		softs->flags |= AAC_FLAGS_SG_64BIT;
2230 	}
2231 
2232 	if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
2233 		softs->flags |= AAC_FLAGS_ARRAY_64BIT;
2234 		AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size");
2235 	}
2236 
2237 	if (options & AAC_SUPPORTED_NONDASD) {
2238 		if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0,
2239 		    "nondasd-enable", (char **)&data) == DDI_SUCCESS)) {
2240 			if (strcmp((char *)data, "yes") == 0) {
2241 				AACDB_PRINT(softs, CE_NOTE,
2242 				    "!Enable Non-DASD access");
2243 				softs->flags |= AAC_FLAGS_NONDASD;
2244 			}
2245 			ddi_prop_free(data);
2246 		}
2247 	}
2248 
2249 	/* Read preferred settings */
2250 	max_fib_size = 0;
2251 	if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF,
2252 	    0, 0, 0, 0, NULL)) == AACOK) {
2253 		options = AAC_MAILBOX_GET(softs, 1);
2254 		max_fib_size = (options & 0xffff);
2255 		max_sectors = (options >> 16) << 1;
2256 		options = AAC_MAILBOX_GET(softs, 2);
2257 		sg_tablesize = (options >> 16);
2258 		options = AAC_MAILBOX_GET(softs, 3);
2259 		max_fibs = (options & 0xffff);
2260 	}
2261 
2262 	/* Enable new comm. and rawio at the same time */
2263 	if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) &&
2264 	    (max_fib_size != 0)) {
2265 		/* read out and save PCI MBR */
2266 		if ((atu_size > softs->map_size) &&
2267 		    (ddi_regs_map_setup(softs->devinfo_p, 1,
2268 		    (caddr_t *)&data, 0, atu_size, &softs->acc_attr,
2269 		    &pci_handle) == DDI_SUCCESS)) {
2270 			ddi_regs_map_free(&softs->pci_mem_handle);
2271 			softs->pci_mem_handle = pci_handle;
2272 			softs->pci_mem_base_vaddr = data;
2273 			softs->map_size = atu_size;
2274 		}
2275 		if (atu_size == softs->map_size) {
2276 			softs->flags |= AAC_FLAGS_NEW_COMM;
2277 			AACDB_PRINT(softs, CE_NOTE,
2278 			    "!Enable New Comm. interface");
2279 		}
2280 	}
2281 
2282 	/* Set FIB parameters */
2283 	if (softs->flags & AAC_FLAGS_NEW_COMM) {
2284 		softs->aac_max_fibs = max_fibs;
2285 		softs->aac_max_fib_size = max_fib_size;
2286 		softs->aac_max_sectors = max_sectors;
2287 		softs->aac_sg_tablesize = sg_tablesize;
2288 
2289 		softs->flags |= AAC_FLAGS_RAW_IO;
2290 		AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO");
2291 	} else {
2292 		softs->aac_max_fibs =
2293 		    (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512;
2294 		softs->aac_max_fib_size = AAC_FIB_SIZE;
2295 		softs->aac_max_sectors = 128;	/* 64K */
2296 		if (softs->flags & AAC_FLAGS_17SG)
2297 			softs->aac_sg_tablesize = 17;
2298 		else if (softs->flags & AAC_FLAGS_34SG)
2299 			softs->aac_sg_tablesize = 34;
2300 		else if (softs->flags & AAC_FLAGS_SG_64BIT)
2301 			softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2302 			    sizeof (struct aac_blockwrite64) +
2303 			    sizeof (struct aac_sg_entry64)) /
2304 			    sizeof (struct aac_sg_entry64);
2305 		else
2306 			softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2307 			    sizeof (struct aac_blockwrite) +
2308 			    sizeof (struct aac_sg_entry)) /
2309 			    sizeof (struct aac_sg_entry);
2310 	}
2311 
2312 	if ((softs->flags & AAC_FLAGS_RAW_IO) &&
2313 	    (softs->flags & AAC_FLAGS_ARRAY_64BIT)) {
2314 		softs->flags |= AAC_FLAGS_LBA_64BIT;
2315 		AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array");
2316 	}
2317 	softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize;
2318 	softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9;
2319 	/*
2320 	 * 64K maximum segment size in scatter gather list is controlled by
2321 	 * the NEW_COMM bit in the adapter information. If not set, the card
2322 	 * can only accept a maximum of 64K. It is not recommended to permit
2323 	 * more than 128KB of total transfer size to the adapters because
2324 	 * performance is negatively impacted.
2325 	 *
2326 	 * For new comm, segment size equals max xfer size. For old comm,
2327 	 * we use 64K for both.
2328 	 */
2329 	softs->buf_dma_attr.dma_attr_count_max =
2330 	    softs->buf_dma_attr.dma_attr_maxxfer - 1;
2331 
2332 	/* Setup FIB operations */
2333 	if (softs->flags & AAC_FLAGS_RAW_IO)
2334 		softs->aac_cmd_fib = aac_cmd_fib_rawio;
2335 	else if (softs->flags & AAC_FLAGS_SG_64BIT)
2336 		softs->aac_cmd_fib = aac_cmd_fib_brw64;
2337 	else
2338 		softs->aac_cmd_fib = aac_cmd_fib_brw;
2339 	softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \
2340 	    aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32;
2341 
2342 	/* 64-bit LBA needs descriptor format sense data */
2343 	softs->slen = sizeof (struct scsi_arq_status);
2344 	if ((softs->flags & AAC_FLAGS_LBA_64BIT) &&
2345 	    softs->slen < AAC_ARQ64_LENGTH)
2346 		softs->slen = AAC_ARQ64_LENGTH;
2347 
2348 	AACDB_PRINT(softs, CE_NOTE,
2349 	    "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
2350 	    softs->aac_max_fibs, softs->aac_max_fib_size,
2351 	    softs->aac_max_sectors, softs->aac_sg_tablesize);
2352 
2353 	return (AACOK);
2354 }
2355 
2356 static void
2357 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0,
2358     struct FsaRev *fsarev1)
2359 {
2360 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
2361 
2362 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash);
2363 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type);
2364 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor);
2365 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major);
2366 	AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber);
2367 }
2368 
2369 /*
2370  * The following function comes from Adaptec:
2371  *
2372  * Query adapter information and supplement adapter information
2373  */
2374 static int
2375 aac_get_adapter_info(struct aac_softstate *softs,
2376     struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr)
2377 {
2378 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2379 	struct aac_fib *fibp = softs->sync_slot.fibp;
2380 	struct aac_adapter_info *ainfp;
2381 	struct aac_supplement_adapter_info *sinfp;
2382 
2383 	ddi_put8(acc, &fibp->data[0], 0);
2384 	if (aac_sync_fib(softs, RequestAdapterInfo,
2385 	    sizeof (struct aac_fib_header)) != AACOK) {
2386 		AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed");
2387 		return (AACERR);
2388 	}
2389 	ainfp = (struct aac_adapter_info *)fibp->data;
2390 	if (ainfr) {
2391 		AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2392 		AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase);
2393 		AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture);
2394 		AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant);
2395 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed);
2396 		AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem);
2397 		AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem);
2398 		AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem);
2399 		aac_fsa_rev(softs, &ainfp->KernelRevision,
2400 		    &ainfr->KernelRevision);
2401 		aac_fsa_rev(softs, &ainfp->MonitorRevision,
2402 		    &ainfr->MonitorRevision);
2403 		aac_fsa_rev(softs, &ainfp->HardwareRevision,
2404 		    &ainfr->HardwareRevision);
2405 		aac_fsa_rev(softs, &ainfp->BIOSRevision,
2406 		    &ainfr->BIOSRevision);
2407 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled);
2408 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask);
2409 		AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber);
2410 		AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform);
2411 		AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2412 		AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant);
2413 	}
2414 	if (sinfr) {
2415 		if (!(softs->support_opt &
2416 		    AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) {
2417 			AACDB_PRINT(softs, CE_WARN,
2418 			    "SupplementAdapterInfo not supported");
2419 			return (AACERR);
2420 		}
2421 		ddi_put8(acc, &fibp->data[0], 0);
2422 		if (aac_sync_fib(softs, RequestSupplementAdapterInfo,
2423 		    sizeof (struct aac_fib_header)) != AACOK) {
2424 			AACDB_PRINT(softs, CE_WARN,
2425 			    "RequestSupplementAdapterInfo failed");
2426 			return (AACERR);
2427 		}
2428 		sinfp = (struct aac_supplement_adapter_info *)fibp->data;
2429 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1);
2430 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2);
2431 		AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize);
2432 		AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId);
2433 		AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts);
2434 		AAC_GET_FIELD32(acc, sinfr, sinfp, Version);
2435 		AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits);
2436 		AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber);
2437 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3);
2438 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12);
2439 		AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts);
2440 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo,
2441 		    sizeof (struct vpd_info));
2442 		aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision,
2443 		    &sinfr->FlashFirmwareRevision);
2444 		AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions);
2445 		aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision,
2446 		    &sinfr->FlashFirmwareBootRevision);
2447 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo,
2448 		    MFG_PCBA_SERIAL_NUMBER_WIDTH);
2449 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0],
2450 		    MFG_WWN_WIDTH);
2451 		AAC_REP_GET_FIELD32(acc, sinfr, sinfp, ReservedGrowth[0], 2);
2452 	}
2453 	return (AACOK);
2454 }
2455 
2456 static int
2457 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max,
2458     uint32_t *tgt_max)
2459 {
2460 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2461 	struct aac_fib *fibp = softs->sync_slot.fibp;
2462 	struct aac_ctcfg *c_cmd;
2463 	struct aac_ctcfg_resp *c_resp;
2464 	uint32_t scsi_method_id;
2465 	struct aac_bus_info *cmd;
2466 	struct aac_bus_info_response *resp;
2467 	int rval;
2468 
2469 	/* Detect MethodId */
2470 	c_cmd = (struct aac_ctcfg *)&fibp->data[0];
2471 	ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig);
2472 	ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD);
2473 	ddi_put32(acc, &c_cmd->param, 0);
2474 	rval = aac_sync_fib(softs, ContainerCommand,
2475 	    AAC_FIB_SIZEOF(struct aac_ctcfg));
2476 	c_resp = (struct aac_ctcfg_resp *)&fibp->data[0];
2477 	if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) {
2478 		AACDB_PRINT(softs, CE_WARN,
2479 		    "VM_ContainerConfig command fail");
2480 		return (AACERR);
2481 	}
2482 	scsi_method_id = ddi_get32(acc, &c_resp->param);
2483 
2484 	/* Detect phys. bus count and max. target id first */
2485 	cmd = (struct aac_bus_info *)&fibp->data[0];
2486 	ddi_put32(acc, &cmd->Command, VM_Ioctl);
2487 	ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */
2488 	ddi_put32(acc, &cmd->MethodId, scsi_method_id);
2489 	ddi_put32(acc, &cmd->ObjectId, 0);
2490 	ddi_put32(acc, &cmd->CtlCmd, GetBusInfo);
2491 	/*
2492 	 * For VM_Ioctl, the firmware uses the Header.Size filled from the
2493 	 * driver as the size to be returned. Therefore the driver has to use
2494 	 * sizeof (struct aac_bus_info_response) because it is greater than
2495 	 * sizeof (struct aac_bus_info).
2496 	 */
2497 	rval = aac_sync_fib(softs, ContainerCommand,
2498 	    AAC_FIB_SIZEOF(struct aac_bus_info_response));
2499 	resp = (struct aac_bus_info_response *)cmd;
2500 
2501 	/* Scan all coordinates with INQUIRY */
2502 	if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) {
2503 		AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail");
2504 		return (AACERR);
2505 	}
2506 	*bus_max = ddi_get32(acc, &resp->BusCount);
2507 	*tgt_max = ddi_get32(acc, &resp->TargetsPerBus);
2508 	return (AACOK);
2509 }
2510 
2511 /*
2512  * The following function comes from Adaptec:
2513  *
2514  * Routine to be called during initialization of communications with
2515  * the adapter to handle possible adapter configuration issues. When
2516  * the adapter first boots up, it examines attached drives, etc, and
2517  * potentially comes up with a new or revised configuration (relative to
2518  * what's stored in it's NVRAM). Additionally it may discover problems
2519  * that make the current physical configuration unworkable (currently
2520  * applicable only to cluster configuration issues).
2521  *
2522  * If there are no configuration issues or the issues are considered
2523  * trival by the adapter, it will set it's configuration status to
2524  * "FSACT_CONTINUE" and execute the "commit confiuguration" action
2525  * automatically on it's own.
2526  *
2527  * However, if there are non-trivial issues, the adapter will set it's
2528  * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT"
2529  * and wait for some agent on the host to issue the "\ContainerCommand
2530  * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the
2531  * adapter to commit the new/updated configuration and enable
2532  * un-inhibited operation.  The host agent should first issue the
2533  * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB
2534  * command to obtain information about config issues detected by
2535  * the adapter.
2536  *
2537  * Normally the adapter's PC BIOS will execute on the host following
2538  * adapter poweron and reset and will be responsible for querring the
2539  * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG
2540  * command if appropriate.
2541  *
2542  * However, with the introduction of IOP reset support, the adapter may
2543  * boot up without the benefit of the adapter's PC BIOS host agent.
2544  * This routine is intended to take care of these issues in situations
2545  * where BIOS doesn't execute following adapter poweron or reset.  The
2546  * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so
2547  * there is no harm in doing this when it's already been done.
2548  */
2549 static int
2550 aac_handle_adapter_config_issues(struct aac_softstate *softs)
2551 {
2552 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2553 	struct aac_fib *fibp = softs->sync_slot.fibp;
2554 	struct aac_Container *cmd;
2555 	struct aac_Container_resp *resp;
2556 	struct aac_cf_status_header *cfg_sts_hdr;
2557 	uint32_t resp_status;
2558 	uint32_t ct_status;
2559 	uint32_t cfg_stat_action;
2560 	int rval;
2561 
2562 	/* Get adapter config status */
2563 	cmd = (struct aac_Container *)&fibp->data[0];
2564 
2565 	bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2566 	ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2567 	ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS);
2568 	ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE],
2569 	    sizeof (struct aac_cf_status_header));
2570 	rval = aac_sync_fib(softs, ContainerCommand,
2571 	    AAC_FIB_SIZEOF(struct aac_Container));
2572 	resp = (struct aac_Container_resp *)cmd;
2573 	cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data;
2574 
2575 	resp_status = ddi_get32(acc, &resp->Status);
2576 	ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2577 	if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) {
2578 		cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action);
2579 
2580 		/* Commit configuration if it's reasonable to do so. */
2581 		if (cfg_stat_action <= CFACT_PAUSE) {
2582 			bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2583 			ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2584 			ddi_put32(acc, &cmd->CTCommand.command,
2585 			    CT_COMMIT_CONFIG);
2586 			rval = aac_sync_fib(softs, ContainerCommand,
2587 			    AAC_FIB_SIZEOF(struct aac_Container));
2588 
2589 			resp_status = ddi_get32(acc, &resp->Status);
2590 			ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2591 			if ((rval == AACOK) && (resp_status == 0) &&
2592 			    (ct_status == CT_OK))
2593 				/* Successful completion */
2594 				rval = AACMPE_OK;
2595 			else
2596 				/* Auto-commit aborted due to error(s). */
2597 				rval = AACMPE_COMMIT_CONFIG;
2598 		} else {
2599 			/*
2600 			 * Auto-commit aborted due to adapter indicating
2601 			 * configuration issue(s) too dangerous to auto-commit.
2602 			 */
2603 			rval = AACMPE_CONFIG_STATUS;
2604 		}
2605 	} else {
2606 		cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted");
2607 		rval = AACMPE_CONFIG_STATUS;
2608 	}
2609 	return (rval);
2610 }
2611 
2612 /*
2613  * Hardware initialization and resource allocation
2614  */
2615 static int
2616 aac_common_attach(struct aac_softstate *softs)
2617 {
2618 	uint32_t status;
2619 	int i;
2620 
2621 	DBCALLED(softs, 1);
2622 
2623 	/*
2624 	 * Do a little check here to make sure there aren't any outstanding
2625 	 * FIBs in the message queue. At this point there should not be and
2626 	 * if there are they are probably left over from another instance of
2627 	 * the driver like when the system crashes and the crash dump driver
2628 	 * gets loaded.
2629 	 */
2630 	while (AAC_OUTB_GET(softs) != 0xfffffffful)
2631 		;
2632 
2633 	/*
2634 	 * Wait the card to complete booting up before do anything that
2635 	 * attempts to communicate with it.
2636 	 */
2637 	status = AAC_FWSTATUS_GET(softs);
2638 	if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC)
2639 		goto error;
2640 	i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */
2641 	AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i);
2642 	if (i == 0) {
2643 		cmn_err(CE_CONT, "?Fatal error: controller not ready");
2644 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2645 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2646 		goto error;
2647 	}
2648 
2649 	/* Read and set card supported options and settings */
2650 	if (aac_check_firmware(softs) == AACERR) {
2651 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2652 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2653 		goto error;
2654 	}
2655 
2656 	/* Clear out all interrupts */
2657 	AAC_STATUS_CLR(softs, ~0);
2658 
2659 	/* Setup communication space with the card */
2660 	if (softs->comm_space_dma_handle == NULL) {
2661 		if (aac_alloc_comm_space(softs) != AACOK)
2662 			goto error;
2663 	}
2664 	if (aac_setup_comm_space(softs) != AACOK) {
2665 		cmn_err(CE_CONT, "?Setup communication space failed");
2666 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2667 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2668 		goto error;
2669 	}
2670 
2671 #ifdef DEBUG
2672 	if (aac_get_fw_debug_buffer(softs) != AACOK)
2673 		cmn_err(CE_CONT, "?firmware UART trace not supported");
2674 #endif
2675 
2676 	/* Allocate slots */
2677 	if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) {
2678 		cmn_err(CE_CONT, "?Fatal error: slots allocate failed");
2679 		goto error;
2680 	}
2681 	AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots);
2682 
2683 	/* Allocate FIBs */
2684 	if (softs->total_fibs < softs->total_slots) {
2685 		aac_alloc_fibs(softs);
2686 		if (softs->total_fibs == 0)
2687 			goto error;
2688 		AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated",
2689 		    softs->total_fibs);
2690 	}
2691 
2692 	/* Get adapter names */
2693 	if (CARD_IS_UNKNOWN(softs->card)) {
2694 		struct aac_supplement_adapter_info sinf;
2695 
2696 		if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) {
2697 			cmn_err(CE_CONT, "?Query adapter information failed");
2698 		} else {
2699 			char *p, *p0, *p1;
2700 
2701 			/*
2702 			 * Now find the controller name in supp_adapter_info->
2703 			 * AdapterTypeText. Use the first word as the vendor
2704 			 * and the other words as the product name.
2705 			 */
2706 			AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = "
2707 			    "\"%s\"", sinf.AdapterTypeText);
2708 			p = sinf.AdapterTypeText;
2709 			p0 = p1 = NULL;
2710 			/* Skip heading spaces */
2711 			while (*p && (*p == ' ' || *p == '\t'))
2712 				p++;
2713 			p0 = p;
2714 			while (*p && (*p != ' ' && *p != '\t'))
2715 				p++;
2716 			/* Remove middle spaces */
2717 			while (*p && (*p == ' ' || *p == '\t'))
2718 				*p++ = 0;
2719 			p1 = p;
2720 			/* Remove trailing spaces */
2721 			p = p1 + strlen(p1) - 1;
2722 			while (p > p1 && (*p == ' ' || *p == '\t'))
2723 				*p-- = 0;
2724 			if (*p0 && *p1) {
2725 				(void *)strncpy(softs->vendor_name, p0,
2726 				    AAC_VENDOR_LEN);
2727 				(void *)strncpy(softs->product_name, p1,
2728 				    AAC_PRODUCT_LEN);
2729 			} else {
2730 				cmn_err(CE_WARN,
2731 				    "?adapter name mis-formatted\n");
2732 				if (*p0)
2733 					(void *)strncpy(softs->product_name,
2734 					    p0, AAC_PRODUCT_LEN);
2735 			}
2736 		}
2737 	}
2738 
2739 	cmn_err(CE_NOTE,
2740 	    "!aac driver %d.%02d.%02d-%d, found card: " \
2741 	    "%s %s(pci0x%x.%x.%x.%x) at 0x%x",
2742 	    AAC_DRIVER_MAJOR_VERSION,
2743 	    AAC_DRIVER_MINOR_VERSION,
2744 	    AAC_DRIVER_BUGFIX_LEVEL,
2745 	    AAC_DRIVER_BUILD,
2746 	    softs->vendor_name, softs->product_name,
2747 	    softs->vendid, softs->devid, softs->subvendid, softs->subsysid,
2748 	    softs->pci_mem_base_paddr);
2749 
2750 	/* Perform acceptance of adapter-detected config changes if possible */
2751 	if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) {
2752 		cmn_err(CE_CONT, "?Handle adapter config issues failed");
2753 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2754 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2755 		goto error;
2756 	}
2757 
2758 	/* Setup containers (logical devices) */
2759 	if (aac_probe_containers(softs) != AACOK) {
2760 		cmn_err(CE_CONT, "?Fatal error: get container info error");
2761 		goto error;
2762 	}
2763 
2764 	/* Setup phys. devices */
2765 	if (softs->flags & AAC_FLAGS_NONDASD) {
2766 		uint32_t bus_max, tgt_max;
2767 		uint32_t bus, tgt;
2768 		int index;
2769 
2770 		if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) {
2771 			cmn_err(CE_CONT, "?Fatal error: get bus info error");
2772 			goto error;
2773 		}
2774 		AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d",
2775 		    bus_max, tgt_max);
2776 		if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) {
2777 			if (softs->state & AAC_STATE_RESET) {
2778 				cmn_err(CE_WARN,
2779 				    "?Fatal error: bus map changed");
2780 				goto error;
2781 			}
2782 			softs->bus_max = bus_max;
2783 			softs->tgt_max = tgt_max;
2784 			if (softs->nondasds) {
2785 				kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2786 				    sizeof (struct aac_nondasd));
2787 			}
2788 			softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \
2789 			    sizeof (struct aac_nondasd), KM_SLEEP);
2790 
2791 			index = 0;
2792 			for (bus = 0; bus < softs->bus_max; bus++) {
2793 				for (tgt = 0; tgt < softs->tgt_max; tgt++) {
2794 					struct aac_nondasd *dvp =
2795 					    &softs->nondasds[index++];
2796 					dvp->dev.type = AAC_DEV_PD;
2797 					dvp->bus = bus;
2798 					dvp->tid = tgt;
2799 				}
2800 			}
2801 		}
2802 	}
2803 
2804 	/* Check dma & acc handles allocated in attach */
2805 	if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) {
2806 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2807 		goto error;
2808 	}
2809 
2810 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
2811 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2812 		goto error;
2813 	}
2814 
2815 	for (i = 0; i < softs->total_slots; i++) {
2816 		if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) !=
2817 		    DDI_SUCCESS) {
2818 			ddi_fm_service_impact(softs->devinfo_p,
2819 			    DDI_SERVICE_LOST);
2820 			goto error;
2821 		}
2822 	}
2823 
2824 	return (AACOK);
2825 error:
2826 	if (softs->state & AAC_STATE_RESET)
2827 		return (AACERR);
2828 	if (softs->nondasds) {
2829 		kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2830 		    sizeof (struct aac_nondasd));
2831 		softs->nondasds = NULL;
2832 	}
2833 	if (softs->total_fibs > 0)
2834 		aac_destroy_fibs(softs);
2835 	if (softs->total_slots > 0)
2836 		aac_destroy_slots(softs);
2837 	if (softs->comm_space_dma_handle)
2838 		aac_free_comm_space(softs);
2839 	return (AACERR);
2840 }
2841 
2842 /*
2843  * Hardware shutdown and resource release
2844  */
2845 static void
2846 aac_common_detach(struct aac_softstate *softs)
2847 {
2848 	DBCALLED(softs, 1);
2849 
2850 	(void) aac_shutdown(softs);
2851 
2852 	if (softs->nondasds) {
2853 		kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2854 		    sizeof (struct aac_nondasd));
2855 		softs->nondasds = NULL;
2856 	}
2857 	aac_destroy_fibs(softs);
2858 	aac_destroy_slots(softs);
2859 	aac_free_comm_space(softs);
2860 }
2861 
2862 /*
2863  * Send a synchronous command to the controller and wait for a result.
2864  * Indicate if the controller completed the command with an error status.
2865  */
2866 int
2867 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd,
2868     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3,
2869     uint32_t *statusp)
2870 {
2871 	int timeout;
2872 	uint32_t status;
2873 
2874 	if (statusp != NULL)
2875 		*statusp = SRB_STATUS_SUCCESS;
2876 
2877 	/* Fill in mailbox */
2878 	AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3);
2879 
2880 	/* Ensure the sync command doorbell flag is cleared */
2881 	AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
2882 
2883 	/* Then set it to signal the adapter */
2884 	AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND);
2885 
2886 	/* Spin waiting for the command to complete */
2887 	timeout = AAC_IMMEDIATE_TIMEOUT * 1000;
2888 	AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout);
2889 	if (!timeout) {
2890 		AACDB_PRINT(softs, CE_WARN,
2891 		    "Sync command timed out after %d seconds (0x%x)!",
2892 		    AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs));
2893 		return (AACERR);
2894 	}
2895 
2896 	/* Clear the completion flag */
2897 	AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
2898 
2899 	/* Get the command status */
2900 	status = AAC_MAILBOX_GET(softs, 0);
2901 	if (statusp != NULL)
2902 		*statusp = status;
2903 	if (status != SRB_STATUS_SUCCESS) {
2904 		AACDB_PRINT(softs, CE_WARN,
2905 		    "Sync command fail: status = 0x%x", status);
2906 		return (AACERR);
2907 	}
2908 
2909 	return (AACOK);
2910 }
2911 
2912 /*
2913  * Send a synchronous FIB to the adapter and wait for its completion
2914  */
2915 static int
2916 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize)
2917 {
2918 	struct aac_slot *slotp = &softs->sync_slot;
2919 	ddi_dma_handle_t dma = slotp->fib_dma_handle;
2920 	uint32_t status;
2921 	int rval;
2922 
2923 	/* Sync fib only supports 512 bytes */
2924 	if (fibsize > AAC_FIB_SIZE)
2925 		return (AACERR);
2926 
2927 	/*
2928 	 * Setup sync fib
2929 	 * Need not reinitialize FIB header if it's already been filled
2930 	 * by others like aac_cmd_fib_scsi as aac_cmd.
2931 	 */
2932 	if (slotp->acp == NULL)
2933 		aac_cmd_fib_header(softs, slotp, cmd, fibsize);
2934 
2935 	AACDB_PRINT_FIB(softs, &softs->sync_slot);
2936 
2937 	(void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib),
2938 	    fibsize, DDI_DMA_SYNC_FORDEV);
2939 
2940 	/* Give the FIB to the controller, wait for a response. */
2941 	rval = aac_sync_mbcommand(softs, AAC_MONKER_SYNCFIB,
2942 	    slotp->fib_phyaddr, 0, 0, 0, &status);
2943 	if (rval == AACERR) {
2944 		AACDB_PRINT(softs, CE_WARN,
2945 		    "Send sync fib to controller failed");
2946 		return (AACERR);
2947 	}
2948 
2949 	(void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib),
2950 	    AAC_FIB_SIZE, DDI_DMA_SYNC_FORCPU);
2951 
2952 	if ((aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) ||
2953 	    (aac_check_dma_handle(dma) != DDI_SUCCESS)) {
2954 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
2955 		return (AACERR);
2956 	}
2957 
2958 	return (AACOK);
2959 }
2960 
2961 static void
2962 aac_cmd_initq(struct aac_cmd_queue *q)
2963 {
2964 	q->q_head = NULL;
2965 	q->q_tail = (struct aac_cmd *)&q->q_head;
2966 }
2967 
2968 /*
2969  * Remove a cmd from the head of q
2970  */
2971 static struct aac_cmd *
2972 aac_cmd_dequeue(struct aac_cmd_queue *q)
2973 {
2974 	struct aac_cmd *acp;
2975 
2976 	_NOTE(ASSUMING_PROTECTED(*q))
2977 
2978 	if ((acp = q->q_head) != NULL) {
2979 		if ((q->q_head = acp->next) != NULL)
2980 			acp->next = NULL;
2981 		else
2982 			q->q_tail = (struct aac_cmd *)&q->q_head;
2983 		acp->prev = NULL;
2984 	}
2985 	return (acp);
2986 }
2987 
2988 /*
2989  * Add a cmd to the tail of q
2990  */
2991 static void
2992 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp)
2993 {
2994 	ASSERT(acp->next == NULL);
2995 	acp->prev = q->q_tail;
2996 	q->q_tail->next = acp;
2997 	q->q_tail = acp;
2998 }
2999 
3000 /*
3001  * Remove the cmd ac from q
3002  */
3003 static void
3004 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp)
3005 {
3006 	if (acp->prev) {
3007 		if ((acp->prev->next = acp->next) != NULL) {
3008 			acp->next->prev = acp->prev;
3009 			acp->next = NULL;
3010 		} else {
3011 			q->q_tail = acp->prev;
3012 		}
3013 		acp->prev = NULL;
3014 	}
3015 	/* ac is not in the queue */
3016 }
3017 
3018 /*
3019  * Atomically insert an entry into the nominated queue, returns 0 on success or
3020  * AACERR if the queue is full.
3021  *
3022  * Note: it would be more efficient to defer notifying the controller in
3023  *	 the case where we may be inserting several entries in rapid succession,
3024  *	 but implementing this usefully may be difficult (it would involve a
3025  *	 separate queue/notify interface).
3026  */
3027 static int
3028 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr,
3029     uint32_t fib_size)
3030 {
3031 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3032 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3033 	uint32_t pi, ci;
3034 
3035 	DBCALLED(softs, 2);
3036 
3037 	ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q);
3038 
3039 	/* Get the producer/consumer indices */
3040 	(void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3041 	    (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3042 	    DDI_DMA_SYNC_FORCPU);
3043 	if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3044 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3045 		return (AACERR);
3046 	}
3047 
3048 	pi = ddi_get32(acc,
3049 	    &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3050 	ci = ddi_get32(acc,
3051 	    &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3052 
3053 	/*
3054 	 * Wrap the queue first before we check the queue to see
3055 	 * if it is full
3056 	 */
3057 	if (pi >= aac_qinfo[queue].size)
3058 		pi = 0;
3059 
3060 	/* XXX queue full */
3061 	if ((pi + 1) == ci)
3062 		return (AACERR);
3063 
3064 	/* Fill in queue entry */
3065 	ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size);
3066 	ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr);
3067 	(void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3068 	    (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3069 	    DDI_DMA_SYNC_FORDEV);
3070 
3071 	/* Update producer index */
3072 	ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX],
3073 	    pi + 1);
3074 	(void) ddi_dma_sync(dma,
3075 	    (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \
3076 	    (uintptr_t)softs->comm_space, sizeof (uint32_t),
3077 	    DDI_DMA_SYNC_FORDEV);
3078 
3079 	if (aac_qinfo[queue].notify != 0)
3080 		AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3081 	return (AACOK);
3082 }
3083 
3084 /*
3085  * Atomically remove one entry from the nominated queue, returns 0 on
3086  * success or AACERR if the queue is empty.
3087  */
3088 static int
3089 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp)
3090 {
3091 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3092 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3093 	uint32_t pi, ci;
3094 	int unfull = 0;
3095 
3096 	DBCALLED(softs, 2);
3097 
3098 	ASSERT(idxp);
3099 
3100 	/* Get the producer/consumer indices */
3101 	(void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3102 	    (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3103 	    DDI_DMA_SYNC_FORCPU);
3104 	pi = ddi_get32(acc,
3105 	    &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3106 	ci = ddi_get32(acc,
3107 	    &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3108 
3109 	/* Check for queue empty */
3110 	if (ci == pi)
3111 		return (AACERR);
3112 
3113 	if (pi >= aac_qinfo[queue].size)
3114 		pi = 0;
3115 
3116 	/* Check for queue full */
3117 	if (ci == pi + 1)
3118 		unfull = 1;
3119 
3120 	/*
3121 	 * The controller does not wrap the queue,
3122 	 * so we have to do it by ourselves
3123 	 */
3124 	if (ci >= aac_qinfo[queue].size)
3125 		ci = 0;
3126 
3127 	/* Fetch the entry */
3128 	(void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3129 	    (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3130 	    DDI_DMA_SYNC_FORCPU);
3131 	if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3132 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3133 		return (AACERR);
3134 	}
3135 
3136 	switch (queue) {
3137 	case AAC_HOST_NORM_RESP_Q:
3138 	case AAC_HOST_HIGH_RESP_Q:
3139 		*idxp = ddi_get32(acc,
3140 		    &(softs->qentries[queue] + ci)->aq_fib_addr);
3141 		break;
3142 
3143 	case AAC_HOST_NORM_CMD_Q:
3144 	case AAC_HOST_HIGH_CMD_Q:
3145 		*idxp = ddi_get32(acc,
3146 		    &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE;
3147 		break;
3148 
3149 	default:
3150 		cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()");
3151 		return (AACERR);
3152 	}
3153 
3154 	/* Update consumer index */
3155 	ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX],
3156 	    ci + 1);
3157 	(void) ddi_dma_sync(dma,
3158 	    (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \
3159 	    (uintptr_t)softs->comm_space, sizeof (uint32_t),
3160 	    DDI_DMA_SYNC_FORDEV);
3161 
3162 	if (unfull && aac_qinfo[queue].notify != 0)
3163 		AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3164 	return (AACOK);
3165 }
3166 
3167 /*
3168  * Request information of the container cid
3169  */
3170 static struct aac_mntinforesp *
3171 aac_get_container_info(struct aac_softstate *softs, int cid)
3172 {
3173 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3174 	struct aac_fib *fibp = softs->sync_slot.fibp;
3175 	struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0];
3176 	struct aac_mntinforesp *mir;
3177 
3178 	ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */
3179 	    (softs->flags & AAC_FLAGS_LBA_64BIT) ?
3180 	    VM_NameServe64 : VM_NameServe);
3181 	ddi_put32(acc, &mi->MntType, FT_FILESYS);
3182 	ddi_put32(acc, &mi->MntCount, cid);
3183 
3184 	if (aac_sync_fib(softs, ContainerCommand,
3185 	    AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) {
3186 		AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid);
3187 		return (NULL);
3188 	}
3189 
3190 	mir = (struct aac_mntinforesp *)&fibp->data[0];
3191 	if (ddi_get32(acc, &mir->Status) == ST_OK)
3192 		return (mir);
3193 	return (NULL);
3194 }
3195 
3196 static int
3197 aac_get_container_count(struct aac_softstate *softs, int *count)
3198 {
3199 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3200 	struct aac_mntinforesp *mir;
3201 
3202 	if ((mir = aac_get_container_info(softs, 0)) == NULL)
3203 		return (AACERR);
3204 	*count = ddi_get32(acc, &mir->MntRespCount);
3205 	if (*count > AAC_MAX_LD) {
3206 		AACDB_PRINT(softs, CE_CONT,
3207 		    "container count(%d) > AAC_MAX_LD", *count);
3208 		return (AACERR);
3209 	}
3210 	return (AACOK);
3211 }
3212 
3213 static int
3214 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid)
3215 {
3216 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3217 	struct aac_Container *ct = (struct aac_Container *) \
3218 	    &softs->sync_slot.fibp->data[0];
3219 
3220 	bzero(ct, sizeof (*ct) - CT_PACKET_SIZE);
3221 	ddi_put32(acc, &ct->Command, VM_ContainerConfig);
3222 	ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID);
3223 	ddi_put32(acc, &ct->CTCommand.param[0], cid);
3224 
3225 	if (aac_sync_fib(softs, ContainerCommand,
3226 	    AAC_FIB_SIZEOF(struct aac_Container)) == AACERR)
3227 		return (AACERR);
3228 	if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK)
3229 		return (AACERR);
3230 
3231 	*uid = ddi_get32(acc, &ct->CTCommand.param[1]);
3232 	return (AACOK);
3233 }
3234 
3235 static int
3236 aac_probe_container(struct aac_softstate *softs, uint32_t cid)
3237 {
3238 	struct aac_container *dvp = &softs->containers[cid];
3239 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3240 	struct aac_mntinforesp *mir;
3241 	uint64_t size;
3242 	uint32_t uid;
3243 
3244 	/* Get container basic info */
3245 	if ((mir = aac_get_container_info(softs, cid)) == NULL)
3246 		return (AACERR);
3247 
3248 	if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) {
3249 		if (AAC_DEV_IS_VALID(&dvp->dev)) {
3250 			AACDB_PRINT(softs, CE_NOTE,
3251 			    ">>> Container %d deleted", cid);
3252 			dvp->dev.flags &= ~AAC_DFLAG_VALID;
3253 			(void) aac_dr_event(softs, dvp->cid, -1,
3254 			    AAC_EVT_OFFLINE);
3255 		}
3256 	} else {
3257 		size = AAC_MIR_SIZE(softs, acc, mir);
3258 
3259 		/* Get container UID */
3260 		if (aac_get_container_uid(softs, cid, &uid) == AACERR) {
3261 			AACDB_PRINT(softs, CE_CONT,
3262 			    "query container %d uid failed", cid);
3263 			return (AACERR);
3264 		}
3265 		AACDB_PRINT(softs, CE_CONT, "uid=0x%08x", uid);
3266 
3267 		if (AAC_DEV_IS_VALID(&dvp->dev)) {
3268 			if (dvp->uid != uid) {
3269 				AACDB_PRINT(softs, CE_WARN,
3270 				    ">>> Container %u uid changed to %d",
3271 				    cid, uid);
3272 				dvp->uid = uid;
3273 			}
3274 			if (dvp->size != size) {
3275 				AACDB_PRINT(softs, CE_NOTE,
3276 				    ">>> Container %u size changed to %"PRIu64,
3277 				    cid, size);
3278 				dvp->size = size;
3279 			}
3280 		} else { /* Init new container */
3281 			AACDB_PRINT(softs, CE_NOTE,
3282 			    ">>> Container %d added: " \
3283 			    "size=0x%x.%08x, type=%d, name=%s",
3284 			    cid,
3285 			    ddi_get32(acc, &mir->MntObj.CapacityHigh),
3286 			    ddi_get32(acc, &mir->MntObj.Capacity),
3287 			    ddi_get32(acc, &mir->MntObj.VolType),
3288 			    mir->MntObj.FileSystemName);
3289 			dvp->dev.flags |= AAC_DFLAG_VALID;
3290 			dvp->dev.type = AAC_DEV_LD;
3291 
3292 			dvp->cid = cid;
3293 			dvp->uid = uid;
3294 			dvp->size = size;
3295 			dvp->locked = 0;
3296 			dvp->deleted = 0;
3297 			(void) aac_dr_event(softs, dvp->cid, -1,
3298 			    AAC_EVT_ONLINE);
3299 		}
3300 	}
3301 	return (AACOK);
3302 }
3303 
3304 /*
3305  * Do a rescan of all the possible containers and update the container list
3306  * with newly online/offline containers, and prepare for autoconfiguration.
3307  */
3308 static int
3309 aac_probe_containers(struct aac_softstate *softs)
3310 {
3311 	int i, count, total;
3312 
3313 	/* Loop over possible containers */
3314 	count = softs->container_count;
3315 	if (aac_get_container_count(softs, &count) == AACERR)
3316 		return (AACERR);
3317 	for (i = total = 0; i < count; i++) {
3318 		if (aac_probe_container(softs, i) == AACOK)
3319 			total++;
3320 	}
3321 	if (count < softs->container_count) {
3322 		struct aac_container *dvp;
3323 
3324 		for (dvp = &softs->containers[count];
3325 		    dvp < &softs->containers[softs->container_count]; dvp++) {
3326 			if (!AAC_DEV_IS_VALID(&dvp->dev))
3327 				continue;
3328 			AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted",
3329 			    dvp->cid);
3330 			dvp->dev.flags &= ~AAC_DFLAG_VALID;
3331 			(void) aac_dr_event(softs, dvp->cid, -1,
3332 			    AAC_EVT_OFFLINE);
3333 		}
3334 	}
3335 	softs->container_count = count;
3336 	AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total);
3337 	return (AACOK);
3338 }
3339 
3340 static int
3341 aac_alloc_comm_space(struct aac_softstate *softs)
3342 {
3343 	size_t rlen;
3344 	ddi_dma_cookie_t cookie;
3345 	uint_t cookien;
3346 
3347 	/* Allocate DMA for comm. space */
3348 	if (ddi_dma_alloc_handle(
3349 	    softs->devinfo_p,
3350 	    &softs->addr_dma_attr,
3351 	    DDI_DMA_SLEEP,
3352 	    NULL,
3353 	    &softs->comm_space_dma_handle) != DDI_SUCCESS) {
3354 		AACDB_PRINT(softs, CE_WARN,
3355 		    "Cannot alloc dma handle for communication area");
3356 		goto error;
3357 	}
3358 	if (ddi_dma_mem_alloc(
3359 	    softs->comm_space_dma_handle,
3360 	    sizeof (struct aac_comm_space),
3361 	    &softs->acc_attr,
3362 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3363 	    DDI_DMA_SLEEP,
3364 	    NULL,
3365 	    (caddr_t *)&softs->comm_space,
3366 	    &rlen,
3367 	    &softs->comm_space_acc_handle) != DDI_SUCCESS) {
3368 		AACDB_PRINT(softs, CE_WARN,
3369 		    "Cannot alloc mem for communication area");
3370 		goto error;
3371 	}
3372 	if (ddi_dma_addr_bind_handle(
3373 	    softs->comm_space_dma_handle,
3374 	    NULL,
3375 	    (caddr_t)softs->comm_space,
3376 	    sizeof (struct aac_comm_space),
3377 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3378 	    DDI_DMA_SLEEP,
3379 	    NULL,
3380 	    &cookie,
3381 	    &cookien) != DDI_DMA_MAPPED) {
3382 		AACDB_PRINT(softs, CE_WARN,
3383 		    "DMA bind failed for communication area");
3384 		goto error;
3385 	}
3386 	softs->comm_space_phyaddr = cookie.dmac_address;
3387 
3388 	/* Setup sync FIB space */
3389 	softs->sync_slot.fibp = &softs->comm_space->sync_fib;
3390 	softs->sync_slot.fib_phyaddr = softs->comm_space_phyaddr + \
3391 	    offsetof(struct aac_comm_space, sync_fib);
3392 	softs->sync_slot.fib_acc_handle = softs->comm_space_acc_handle;
3393 	softs->sync_slot.fib_dma_handle = softs->comm_space_dma_handle;
3394 
3395 	return (AACOK);
3396 error:
3397 	if (softs->comm_space_acc_handle) {
3398 		ddi_dma_mem_free(&softs->comm_space_acc_handle);
3399 		softs->comm_space_acc_handle = NULL;
3400 	}
3401 	if (softs->comm_space_dma_handle) {
3402 		ddi_dma_free_handle(&softs->comm_space_dma_handle);
3403 		softs->comm_space_dma_handle = NULL;
3404 	}
3405 	return (AACERR);
3406 }
3407 
3408 static void
3409 aac_free_comm_space(struct aac_softstate *softs)
3410 {
3411 	softs->sync_slot.fibp = NULL;
3412 	softs->sync_slot.fib_phyaddr = NULL;
3413 	softs->sync_slot.fib_acc_handle = NULL;
3414 	softs->sync_slot.fib_dma_handle = NULL;
3415 
3416 	(void) ddi_dma_unbind_handle(softs->comm_space_dma_handle);
3417 	ddi_dma_mem_free(&softs->comm_space_acc_handle);
3418 	softs->comm_space_acc_handle = NULL;
3419 	ddi_dma_free_handle(&softs->comm_space_dma_handle);
3420 	softs->comm_space_dma_handle = NULL;
3421 	softs->comm_space_phyaddr = NULL;
3422 }
3423 
3424 /*
3425  * Initialize the data structures that are required for the communication
3426  * interface to operate
3427  */
3428 static int
3429 aac_setup_comm_space(struct aac_softstate *softs)
3430 {
3431 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3432 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3433 	uint32_t comm_space_phyaddr;
3434 	struct aac_adapter_init *initp;
3435 	int qoffset;
3436 
3437 	comm_space_phyaddr = softs->comm_space_phyaddr;
3438 
3439 	/* Setup adapter init struct */
3440 	initp = &softs->comm_space->init_data;
3441 	bzero(initp, sizeof (struct aac_adapter_init));
3442 
3443 	ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION);
3444 	ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time());
3445 
3446 	/* Setup new/old comm. specific data */
3447 	if (softs->flags & AAC_FLAGS_RAW_IO) {
3448 		ddi_put32(acc, &initp->InitStructRevision,
3449 		    AAC_INIT_STRUCT_REVISION_4);
3450 		ddi_put32(acc, &initp->InitFlags,
3451 		    (softs->flags & AAC_FLAGS_NEW_COMM) ?
3452 		    AAC_INIT_FLAGS_NEW_COMM_SUPPORTED : 0);
3453 		/* Setup the preferred settings */
3454 		ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs);
3455 		ddi_put32(acc, &initp->MaxIoSize,
3456 		    (softs->aac_max_sectors << 9));
3457 		ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size);
3458 	} else {
3459 		/*
3460 		 * Tells the adapter about the physical location of various
3461 		 * important shared data structures
3462 		 */
3463 		ddi_put32(acc, &initp->AdapterFibsPhysicalAddress,
3464 		    comm_space_phyaddr + \
3465 		    offsetof(struct aac_comm_space, adapter_fibs));
3466 		ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0);
3467 		ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE);
3468 		ddi_put32(acc, &initp->AdapterFibsSize,
3469 		    AAC_ADAPTER_FIBS * AAC_FIB_SIZE);
3470 		ddi_put32(acc, &initp->PrintfBufferAddress,
3471 		    comm_space_phyaddr + \
3472 		    offsetof(struct aac_comm_space, adapter_print_buf));
3473 		ddi_put32(acc, &initp->PrintfBufferSize,
3474 		    AAC_ADAPTER_PRINT_BUFSIZE);
3475 		ddi_put32(acc, &initp->MiniPortRevision,
3476 		    AAC_INIT_STRUCT_MINIPORT_REVISION);
3477 		ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN);
3478 
3479 		qoffset = (comm_space_phyaddr + \
3480 		    offsetof(struct aac_comm_space, qtable)) % \
3481 		    AAC_QUEUE_ALIGN;
3482 		if (qoffset)
3483 			qoffset = AAC_QUEUE_ALIGN - qoffset;
3484 		softs->qtablep = (struct aac_queue_table *) \
3485 		    ((char *)&softs->comm_space->qtable + qoffset);
3486 		ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \
3487 		    offsetof(struct aac_comm_space, qtable) + qoffset);
3488 
3489 		/* Init queue table */
3490 		ddi_put32(acc, &softs->qtablep-> \
3491 		    qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3492 		    AAC_HOST_NORM_CMD_ENTRIES);
3493 		ddi_put32(acc, &softs->qtablep-> \
3494 		    qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3495 		    AAC_HOST_NORM_CMD_ENTRIES);
3496 		ddi_put32(acc, &softs->qtablep-> \
3497 		    qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3498 		    AAC_HOST_HIGH_CMD_ENTRIES);
3499 		ddi_put32(acc, &softs->qtablep-> \
3500 		    qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3501 		    AAC_HOST_HIGH_CMD_ENTRIES);
3502 		ddi_put32(acc, &softs->qtablep-> \
3503 		    qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3504 		    AAC_ADAP_NORM_CMD_ENTRIES);
3505 		ddi_put32(acc, &softs->qtablep-> \
3506 		    qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3507 		    AAC_ADAP_NORM_CMD_ENTRIES);
3508 		ddi_put32(acc, &softs->qtablep-> \
3509 		    qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3510 		    AAC_ADAP_HIGH_CMD_ENTRIES);
3511 		ddi_put32(acc, &softs->qtablep-> \
3512 		    qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3513 		    AAC_ADAP_HIGH_CMD_ENTRIES);
3514 		ddi_put32(acc, &softs->qtablep-> \
3515 		    qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3516 		    AAC_HOST_NORM_RESP_ENTRIES);
3517 		ddi_put32(acc, &softs->qtablep-> \
3518 		    qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3519 		    AAC_HOST_NORM_RESP_ENTRIES);
3520 		ddi_put32(acc, &softs->qtablep-> \
3521 		    qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3522 		    AAC_HOST_HIGH_RESP_ENTRIES);
3523 		ddi_put32(acc, &softs->qtablep-> \
3524 		    qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3525 		    AAC_HOST_HIGH_RESP_ENTRIES);
3526 		ddi_put32(acc, &softs->qtablep-> \
3527 		    qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3528 		    AAC_ADAP_NORM_RESP_ENTRIES);
3529 		ddi_put32(acc, &softs->qtablep-> \
3530 		    qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3531 		    AAC_ADAP_NORM_RESP_ENTRIES);
3532 		ddi_put32(acc, &softs->qtablep-> \
3533 		    qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3534 		    AAC_ADAP_HIGH_RESP_ENTRIES);
3535 		ddi_put32(acc, &softs->qtablep-> \
3536 		    qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3537 		    AAC_ADAP_HIGH_RESP_ENTRIES);
3538 
3539 		/* Init queue entries */
3540 		softs->qentries[AAC_HOST_NORM_CMD_Q] =
3541 		    &softs->qtablep->qt_HostNormCmdQueue[0];
3542 		softs->qentries[AAC_HOST_HIGH_CMD_Q] =
3543 		    &softs->qtablep->qt_HostHighCmdQueue[0];
3544 		softs->qentries[AAC_ADAP_NORM_CMD_Q] =
3545 		    &softs->qtablep->qt_AdapNormCmdQueue[0];
3546 		softs->qentries[AAC_ADAP_HIGH_CMD_Q] =
3547 		    &softs->qtablep->qt_AdapHighCmdQueue[0];
3548 		softs->qentries[AAC_HOST_NORM_RESP_Q] =
3549 		    &softs->qtablep->qt_HostNormRespQueue[0];
3550 		softs->qentries[AAC_HOST_HIGH_RESP_Q] =
3551 		    &softs->qtablep->qt_HostHighRespQueue[0];
3552 		softs->qentries[AAC_ADAP_NORM_RESP_Q] =
3553 		    &softs->qtablep->qt_AdapNormRespQueue[0];
3554 		softs->qentries[AAC_ADAP_HIGH_RESP_Q] =
3555 		    &softs->qtablep->qt_AdapHighRespQueue[0];
3556 	}
3557 	(void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV);
3558 
3559 	/* Send init structure to the card */
3560 	if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT,
3561 	    comm_space_phyaddr + \
3562 	    offsetof(struct aac_comm_space, init_data),
3563 	    0, 0, 0, NULL) == AACERR) {
3564 		AACDB_PRINT(softs, CE_WARN,
3565 		    "Cannot send init structure to adapter");
3566 		return (AACERR);
3567 	}
3568 
3569 	return (AACOK);
3570 }
3571 
3572 static uchar_t *
3573 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf)
3574 {
3575 	(void) memset(buf, ' ', AAC_VENDOR_LEN);
3576 	bcopy(softs->vendor_name, buf, strlen(softs->vendor_name));
3577 	return (buf + AAC_VENDOR_LEN);
3578 }
3579 
3580 static uchar_t *
3581 aac_product_id(struct aac_softstate *softs, uchar_t *buf)
3582 {
3583 	(void) memset(buf, ' ', AAC_PRODUCT_LEN);
3584 	bcopy(softs->product_name, buf, strlen(softs->product_name));
3585 	return (buf + AAC_PRODUCT_LEN);
3586 }
3587 
3588 /*
3589  * Construct unit serial number from container uid
3590  */
3591 static uchar_t *
3592 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf)
3593 {
3594 	int i, d;
3595 	uint32_t uid;
3596 
3597 	ASSERT(tgt >= 0 && tgt < AAC_MAX_LD);
3598 
3599 	uid = softs->containers[tgt].uid;
3600 	for (i = 7; i >= 0; i--) {
3601 		d = uid & 0xf;
3602 		buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d;
3603 		uid >>= 4;
3604 	}
3605 	return (buf + 8);
3606 }
3607 
3608 /*
3609  * SPC-3 7.5 INQUIRY command implementation
3610  */
3611 static void
3612 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt,
3613     union scsi_cdb *cdbp, struct buf *bp)
3614 {
3615 	int tgt = pkt->pkt_address.a_target;
3616 	char *b_addr = NULL;
3617 	uchar_t page = cdbp->cdb_opaque[2];
3618 
3619 	if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) {
3620 		/* Command Support Data is not supported */
3621 		aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0);
3622 		return;
3623 	}
3624 
3625 	if (bp && bp->b_un.b_addr && bp->b_bcount) {
3626 		if (bp->b_flags & (B_PHYS | B_PAGEIO))
3627 			bp_mapin(bp);
3628 		b_addr = bp->b_un.b_addr;
3629 	}
3630 
3631 	if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) {
3632 		uchar_t *vpdp = (uchar_t *)b_addr;
3633 		uchar_t *idp, *sp;
3634 
3635 		/* SPC-3 8.4 Vital product data parameters */
3636 		switch (page) {
3637 		case 0x00:
3638 			/* Supported VPD pages */
3639 			if (vpdp == NULL ||
3640 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 3))
3641 				return;
3642 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3643 			vpdp[AAC_VPD_PAGE_CODE] = 0x00;
3644 			vpdp[AAC_VPD_PAGE_LENGTH] = 3;
3645 
3646 			vpdp[AAC_VPD_PAGE_DATA] = 0x00;
3647 			vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80;
3648 			vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83;
3649 
3650 			pkt->pkt_state |= STATE_XFERRED_DATA;
3651 			break;
3652 
3653 		case 0x80:
3654 			/* Unit serial number page */
3655 			if (vpdp == NULL ||
3656 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 8))
3657 				return;
3658 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3659 			vpdp[AAC_VPD_PAGE_CODE] = 0x80;
3660 			vpdp[AAC_VPD_PAGE_LENGTH] = 8;
3661 
3662 			sp = &vpdp[AAC_VPD_PAGE_DATA];
3663 			(void) aac_lun_serialno(softs, tgt, sp);
3664 
3665 			pkt->pkt_state |= STATE_XFERRED_DATA;
3666 			break;
3667 
3668 		case 0x83:
3669 			/* Device identification page */
3670 			if (vpdp == NULL ||
3671 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 32))
3672 				return;
3673 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3674 			vpdp[AAC_VPD_PAGE_CODE] = 0x83;
3675 
3676 			idp = &vpdp[AAC_VPD_PAGE_DATA];
3677 			bzero(idp, AAC_VPD_ID_LENGTH);
3678 			idp[AAC_VPD_ID_CODESET] = 0x02;
3679 			idp[AAC_VPD_ID_TYPE] = 0x01;
3680 
3681 			/*
3682 			 * SPC-3 Table 111 - Identifier type
3683 			 * One recommanded method of constructing the remainder
3684 			 * of identifier field is to concatenate the product
3685 			 * identification field from the standard INQUIRY data
3686 			 * field and the product serial number field from the
3687 			 * unit serial number page.
3688 			 */
3689 			sp = &idp[AAC_VPD_ID_DATA];
3690 			sp = aac_vendor_id(softs, sp);
3691 			sp = aac_product_id(softs, sp);
3692 			sp = aac_lun_serialno(softs, tgt, sp);
3693 			idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \
3694 			    (uintptr_t)&idp[AAC_VPD_ID_DATA];
3695 
3696 			vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \
3697 			    (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA];
3698 			pkt->pkt_state |= STATE_XFERRED_DATA;
3699 			break;
3700 
3701 		default:
3702 			aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3703 			    0x24, 0x00, 0);
3704 			break;
3705 		}
3706 	} else {
3707 		struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr;
3708 		size_t len = sizeof (struct scsi_inquiry);
3709 
3710 		if (page != 0) {
3711 			aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3712 			    0x24, 0x00, 0);
3713 			return;
3714 		}
3715 		if (inqp == NULL || bp->b_bcount < len)
3716 			return;
3717 
3718 		bzero(inqp, len);
3719 		inqp->inq_len = AAC_ADDITIONAL_LEN;
3720 		inqp->inq_ansi = AAC_ANSI_VER;
3721 		inqp->inq_rdf = AAC_RESP_DATA_FORMAT;
3722 		(void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid);
3723 		(void) aac_product_id(softs, (uchar_t *)inqp->inq_pid);
3724 		bcopy("V1.0", inqp->inq_revision, 4);
3725 		inqp->inq_cmdque = 1; /* enable tagged-queuing */
3726 		/*
3727 		 * For "sd-max-xfer-size" property which may impact performance
3728 		 * when IO threads increase.
3729 		 */
3730 		inqp->inq_wbus32 = 1;
3731 
3732 		pkt->pkt_state |= STATE_XFERRED_DATA;
3733 	}
3734 }
3735 
3736 /*
3737  * SPC-3 7.10 MODE SENSE command implementation
3738  */
3739 static void
3740 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt,
3741     union scsi_cdb *cdbp, struct buf *bp, int capacity)
3742 {
3743 	uchar_t pagecode;
3744 	struct mode_header *headerp;
3745 	struct mode_header_g1 *g1_headerp;
3746 	unsigned int ncyl;
3747 	caddr_t sense_data;
3748 	caddr_t next_page;
3749 	size_t sdata_size;
3750 	size_t pages_size;
3751 	int unsupport_page = 0;
3752 
3753 	ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE ||
3754 	    cdbp->scc_cmd == SCMD_MODE_SENSE_G1);
3755 
3756 	if (!(bp && bp->b_un.b_addr && bp->b_bcount))
3757 		return;
3758 
3759 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
3760 		bp_mapin(bp);
3761 	pkt->pkt_state |= STATE_XFERRED_DATA;
3762 	pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F;
3763 
3764 	/* calculate the size of needed buffer */
3765 	if (cdbp->scc_cmd == SCMD_MODE_SENSE)
3766 		sdata_size = MODE_HEADER_LENGTH;
3767 	else /* must be SCMD_MODE_SENSE_G1 */
3768 		sdata_size = MODE_HEADER_LENGTH_G1;
3769 
3770 	pages_size = 0;
3771 	switch (pagecode) {
3772 	case SD_MODE_SENSE_PAGE3_CODE:
3773 		pages_size += sizeof (struct mode_format);
3774 		break;
3775 
3776 	case SD_MODE_SENSE_PAGE4_CODE:
3777 		pages_size += sizeof (struct mode_geometry);
3778 		break;
3779 
3780 	case MODEPAGE_CTRL_MODE:
3781 		if (softs->flags & AAC_FLAGS_LBA_64BIT) {
3782 			pages_size += sizeof (struct mode_control_scsi3);
3783 		} else {
3784 			unsupport_page = 1;
3785 		}
3786 		break;
3787 
3788 	case MODEPAGE_ALLPAGES:
3789 		if (softs->flags & AAC_FLAGS_LBA_64BIT) {
3790 			pages_size += sizeof (struct mode_format) +
3791 			    sizeof (struct mode_geometry) +
3792 			    sizeof (struct mode_control_scsi3);
3793 		} else {
3794 			pages_size += sizeof (struct mode_format) +
3795 			    sizeof (struct mode_geometry);
3796 		}
3797 		break;
3798 
3799 	default:
3800 		/* unsupported pages */
3801 		unsupport_page = 1;
3802 	}
3803 
3804 	/* allocate buffer to fill the send data */
3805 	sdata_size += pages_size;
3806 	sense_data = kmem_zalloc(sdata_size, KM_SLEEP);
3807 
3808 	if (cdbp->scc_cmd == SCMD_MODE_SENSE) {
3809 		headerp = (struct mode_header *)sense_data;
3810 		headerp->length = MODE_HEADER_LENGTH + pages_size -
3811 		    sizeof (headerp->length);
3812 		headerp->bdesc_length = 0;
3813 		next_page = sense_data + sizeof (struct mode_header);
3814 	} else {
3815 		g1_headerp = (void *)sense_data;
3816 		g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size -
3817 		    sizeof (g1_headerp->length));
3818 		g1_headerp->bdesc_length = 0;
3819 		next_page = sense_data + sizeof (struct mode_header_g1);
3820 	}
3821 
3822 	if (unsupport_page)
3823 		goto finish;
3824 
3825 	if (pagecode == SD_MODE_SENSE_PAGE3_CODE ||
3826 	    pagecode == MODEPAGE_ALLPAGES) {
3827 		/* SBC-3 7.1.3.3 Format device page */
3828 		struct mode_format *page3p;
3829 
3830 		page3p = (void *)next_page;
3831 		page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE;
3832 		page3p->mode_page.length = sizeof (struct mode_format);
3833 		page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE);
3834 		page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK);
3835 
3836 		next_page += sizeof (struct mode_format);
3837 	}
3838 
3839 	if (pagecode == SD_MODE_SENSE_PAGE4_CODE ||
3840 	    pagecode == MODEPAGE_ALLPAGES) {
3841 		/* SBC-3 7.1.3.8 Rigid disk device geometry page */
3842 		struct mode_geometry *page4p;
3843 
3844 		page4p = (void *)next_page;
3845 		page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE;
3846 		page4p->mode_page.length = sizeof (struct mode_geometry);
3847 		page4p->heads = AAC_NUMBER_OF_HEADS;
3848 		page4p->rpm = BE_16(AAC_ROTATION_SPEED);
3849 		ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK);
3850 		page4p->cyl_lb = ncyl & 0xff;
3851 		page4p->cyl_mb = (ncyl >> 8) & 0xff;
3852 		page4p->cyl_ub = (ncyl >> 16) & 0xff;
3853 
3854 		next_page += sizeof (struct mode_geometry);
3855 	}
3856 
3857 	if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) &&
3858 	    softs->flags & AAC_FLAGS_LBA_64BIT) {
3859 		/* 64-bit LBA need large sense data */
3860 		struct mode_control_scsi3 *mctl;
3861 
3862 		mctl = (void *)next_page;
3863 		mctl->mode_page.code = MODEPAGE_CTRL_MODE;
3864 		mctl->mode_page.length =
3865 		    sizeof (struct mode_control_scsi3) -
3866 		    sizeof (struct mode_page);
3867 		mctl->d_sense = 1;
3868 	}
3869 
3870 finish:
3871 	/* copyout the valid data. */
3872 	bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount));
3873 	kmem_free(sense_data, sdata_size);
3874 }
3875 
3876 static int
3877 aac_name_node(dev_info_t *dip, char *name, int len)
3878 {
3879 	int tgt, lun;
3880 
3881 	tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3882 	    DDI_PROP_DONTPASS, "target", -1);
3883 	if (tgt == -1)
3884 		return (DDI_FAILURE);
3885 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3886 	    DDI_PROP_DONTPASS, "lun", -1);
3887 	if (lun == -1)
3888 		return (DDI_FAILURE);
3889 
3890 	(void) snprintf(name, len, "%x,%x", tgt, lun);
3891 	return (DDI_SUCCESS);
3892 }
3893 
3894 /*ARGSUSED*/
3895 static int
3896 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3897     scsi_hba_tran_t *tran, struct scsi_device *sd)
3898 {
3899 	struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
3900 #if defined(DEBUG) || defined(__lock_lint)
3901 	int ctl = ddi_get_instance(softs->devinfo_p);
3902 #endif
3903 	uint16_t tgt = sd->sd_address.a_target;
3904 	uint8_t lun = sd->sd_address.a_lun;
3905 	struct aac_device *dvp;
3906 
3907 	DBCALLED(softs, 2);
3908 
3909 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3910 		/*
3911 		 * If no persistent node exist, we don't allow .conf node
3912 		 * to be created.
3913 		 */
3914 		if (aac_find_child(softs, tgt, lun) != NULL) {
3915 			if (ndi_merge_node(tgt_dip, aac_name_node) !=
3916 			    DDI_SUCCESS)
3917 				/* Create this .conf node */
3918 				return (DDI_SUCCESS);
3919 		}
3920 		return (DDI_FAILURE);
3921 	}
3922 
3923 	/*
3924 	 * Only support container/phys. device that has been
3925 	 * detected and valid
3926 	 */
3927 	mutex_enter(&softs->io_lock);
3928 	if (tgt >= AAC_MAX_DEV(softs)) {
3929 		AACDB_PRINT_TRAN(softs,
3930 		    "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun);
3931 		mutex_exit(&softs->io_lock);
3932 		return (DDI_FAILURE);
3933 	}
3934 
3935 	if (tgt < AAC_MAX_LD) {
3936 		dvp = (struct aac_device *)&softs->containers[tgt];
3937 		if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) {
3938 			AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d",
3939 			    ctl, tgt, lun);
3940 			mutex_exit(&softs->io_lock);
3941 			return (DDI_FAILURE);
3942 		}
3943 		/*
3944 		 * Save the tgt_dip for the given target if one doesn't exist
3945 		 * already. Dip's for non-existance tgt's will be cleared in
3946 		 * tgt_free.
3947 		 */
3948 		if (softs->containers[tgt].dev.dip == NULL &&
3949 		    strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
3950 			softs->containers[tgt].dev.dip = tgt_dip;
3951 	} else {
3952 		dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)];
3953 	}
3954 
3955 	if (softs->flags & AAC_FLAGS_BRKUP) {
3956 		if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
3957 		    "buf_break", 1) != DDI_PROP_SUCCESS) {
3958 			cmn_err(CE_CONT, "unable to create "
3959 			    "property for t%dL%d (buf_break)", tgt, lun);
3960 		}
3961 	}
3962 
3963 	AACDB_PRINT(softs, CE_NOTE,
3964 	    "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun,
3965 	    (dvp->type == AAC_DEV_PD) ? "pd" : "ld");
3966 	mutex_exit(&softs->io_lock);
3967 	return (DDI_SUCCESS);
3968 }
3969 
3970 static void
3971 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3972     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3973 {
3974 #ifndef __lock_lint
3975 	_NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran))
3976 #endif
3977 
3978 	struct aac_softstate *softs = SD2AAC(sd);
3979 	int tgt = sd->sd_address.a_target;
3980 
3981 	mutex_enter(&softs->io_lock);
3982 	if (tgt < AAC_MAX_LD) {
3983 		if (softs->containers[tgt].dev.dip == tgt_dip)
3984 			softs->containers[tgt].dev.dip = NULL;
3985 	} else {
3986 		softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID;
3987 	}
3988 	mutex_exit(&softs->io_lock);
3989 }
3990 
3991 /*
3992  * Check if the firmware is Up And Running. If it is in the Kernel Panic
3993  * state, (BlinkLED code + 1) is returned.
3994  *    0 -- firmware up and running
3995  *   -1 -- firmware dead
3996  *   >0 -- firmware kernel panic
3997  */
3998 static int
3999 aac_check_adapter_health(struct aac_softstate *softs)
4000 {
4001 	int rval;
4002 
4003 	rval = PCI_MEM_GET32(softs, AAC_OMR0);
4004 
4005 	if (rval & AAC_KERNEL_UP_AND_RUNNING) {
4006 		rval = 0;
4007 	} else if (rval & AAC_KERNEL_PANIC) {
4008 		cmn_err(CE_WARN, "firmware panic");
4009 		rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */
4010 	} else {
4011 		cmn_err(CE_WARN, "firmware dead");
4012 		rval = -1;
4013 	}
4014 	return (rval);
4015 }
4016 
4017 static void
4018 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp,
4019     uchar_t reason)
4020 {
4021 	acp->flags |= AAC_CMD_ABORT;
4022 
4023 	if (acp->pkt) {
4024 		/*
4025 		 * Each lun should generate a unit attention
4026 		 * condition when reset.
4027 		 * Phys. drives are treated as logical ones
4028 		 * during error recovery.
4029 		 */
4030 		if (acp->slotp) { /* outstanding cmd */
4031 			acp->pkt->pkt_state |= STATE_GOT_STATUS;
4032 			aac_set_arq_data_reset(softs, acp);
4033 		}
4034 
4035 		switch (reason) {
4036 		case CMD_TIMEOUT:
4037 			AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p",
4038 			    acp);
4039 			aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
4040 			    STAT_TIMEOUT | STAT_BUS_RESET);
4041 			break;
4042 		case CMD_RESET:
4043 			/* aac support only RESET_ALL */
4044 			AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp);
4045 			aac_set_pkt_reason(softs, acp, CMD_RESET,
4046 			    STAT_BUS_RESET);
4047 			break;
4048 		case CMD_ABORTED:
4049 			AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p",
4050 			    acp);
4051 			aac_set_pkt_reason(softs, acp, CMD_ABORTED,
4052 			    STAT_ABORTED);
4053 			break;
4054 		}
4055 	}
4056 	aac_end_io(softs, acp);
4057 }
4058 
4059 /*
4060  * Abort all the pending commands of type iocmd or just the command pkt
4061  * corresponding to pkt
4062  */
4063 static void
4064 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt,
4065     int reason)
4066 {
4067 	struct aac_cmd *ac_arg, *acp;
4068 	int i;
4069 
4070 	if (pkt == NULL) {
4071 		ac_arg = NULL;
4072 	} else {
4073 		ac_arg = PKT2AC(pkt);
4074 		iocmd = (ac_arg->flags & AAC_CMD_SYNC) ?
4075 		    AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC;
4076 	}
4077 
4078 	/*
4079 	 * a) outstanding commands on the controller
4080 	 * Note: should abort outstanding commands only after one
4081 	 * IOP reset has been done.
4082 	 */
4083 	if (iocmd & AAC_IOCMD_OUTSTANDING) {
4084 		struct aac_cmd *acp;
4085 
4086 		for (i = 0; i < AAC_MAX_LD; i++) {
4087 			if (AAC_DEV_IS_VALID(&softs->containers[i].dev))
4088 				softs->containers[i].reset = 1;
4089 		}
4090 		while ((acp = softs->q_busy.q_head) != NULL)
4091 			aac_abort_iocmd(softs, acp, reason);
4092 	}
4093 
4094 	/* b) commands in the waiting queues */
4095 	for (i = 0; i < AAC_CMDQ_NUM; i++) {
4096 		if (iocmd & (1 << i)) {
4097 			if (ac_arg) {
4098 				aac_abort_iocmd(softs, ac_arg, reason);
4099 			} else {
4100 				while ((acp = softs->q_wait[i].q_head) != NULL)
4101 					aac_abort_iocmd(softs, acp, reason);
4102 			}
4103 		}
4104 	}
4105 }
4106 
4107 /*
4108  * The draining thread is shared among quiesce threads. It terminates
4109  * when the adapter is quiesced or stopped by aac_stop_drain().
4110  */
4111 static void
4112 aac_check_drain(void *arg)
4113 {
4114 	struct aac_softstate *softs = arg;
4115 
4116 	mutex_enter(&softs->io_lock);
4117 	if (softs->ndrains) {
4118 		softs->drain_timeid = 0;
4119 		/*
4120 		 * If both ASYNC and SYNC bus throttle are held,
4121 		 * wake up threads only when both are drained out.
4122 		 */
4123 		if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 ||
4124 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) &&
4125 		    (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 ||
4126 		    softs->bus_ncmds[AAC_CMDQ_SYNC] == 0))
4127 			cv_broadcast(&softs->drain_cv);
4128 		else
4129 			softs->drain_timeid = timeout(aac_check_drain, softs,
4130 			    AAC_QUIESCE_TICK * drv_usectohz(1000000));
4131 	}
4132 	mutex_exit(&softs->io_lock);
4133 }
4134 
4135 /*
4136  * If not draining the outstanding cmds, drain them. Otherwise,
4137  * only update ndrains.
4138  */
4139 static void
4140 aac_start_drain(struct aac_softstate *softs)
4141 {
4142 	if (softs->ndrains == 0) {
4143 		ASSERT(softs->drain_timeid == 0);
4144 		softs->drain_timeid = timeout(aac_check_drain, softs,
4145 		    AAC_QUIESCE_TICK * drv_usectohz(1000000));
4146 	}
4147 	softs->ndrains++;
4148 }
4149 
4150 /*
4151  * Stop the draining thread when no other threads use it any longer.
4152  * Side effect: io_lock may be released in the middle.
4153  */
4154 static void
4155 aac_stop_drain(struct aac_softstate *softs)
4156 {
4157 	softs->ndrains--;
4158 	if (softs->ndrains == 0) {
4159 		if (softs->drain_timeid != 0) {
4160 			timeout_id_t tid = softs->drain_timeid;
4161 
4162 			softs->drain_timeid = 0;
4163 			mutex_exit(&softs->io_lock);
4164 			(void) untimeout(tid);
4165 			mutex_enter(&softs->io_lock);
4166 		}
4167 	}
4168 }
4169 
4170 /*
4171  * The following function comes from Adaptec:
4172  *
4173  * Once do an IOP reset, basically the driver have to re-initialize the card
4174  * as if up from a cold boot, and the driver is responsible for any IO that
4175  * is outstanding to the adapter at the time of the IOP RESET. And prepare
4176  * for IOP RESET by making the init code modular with the ability to call it
4177  * from multiple places.
4178  */
4179 static int
4180 aac_reset_adapter(struct aac_softstate *softs)
4181 {
4182 	int health;
4183 	uint32_t status;
4184 	int rval = AAC_IOP_RESET_FAILED;
4185 
4186 	DBCALLED(softs, 1);
4187 
4188 	ASSERT(softs->state & AAC_STATE_RESET);
4189 
4190 	ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0);
4191 	/* Disable interrupt */
4192 	AAC_DISABLE_INTR(softs);
4193 
4194 	health = aac_check_adapter_health(softs);
4195 	if (health == -1) {
4196 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
4197 		goto finish;
4198 	}
4199 	if (health == 0) /* flush drives if possible */
4200 		(void) aac_shutdown(softs);
4201 
4202 	/* Execute IOP reset */
4203 	if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0,
4204 	    &status)) != AACOK) {
4205 		ddi_acc_handle_t acc = softs->comm_space_acc_handle;
4206 		struct aac_fib *fibp;
4207 		struct aac_pause_command *pc;
4208 
4209 		if ((status & 0xf) == 0xf) {
4210 			uint32_t wait_count;
4211 
4212 			/*
4213 			 * Sunrise Lake has dual cores and we must drag the
4214 			 * other core with us to reset simultaneously. There
4215 			 * are 2 bits in the Inbound Reset Control and Status
4216 			 * Register (offset 0x38) of the Sunrise Lake to reset
4217 			 * the chip without clearing out the PCI configuration
4218 			 * info (COMMAND & BARS).
4219 			 */
4220 			PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST);
4221 
4222 			/*
4223 			 * We need to wait for 5 seconds before accessing the MU
4224 			 * again 10000 * 100us = 1000,000us = 1000ms = 1s
4225 			 */
4226 			wait_count = 5 * 10000;
4227 			while (wait_count) {
4228 				drv_usecwait(100); /* delay 100 microseconds */
4229 				wait_count--;
4230 			}
4231 		} else {
4232 			if (status == SRB_STATUS_INVALID_REQUEST)
4233 				cmn_err(CE_WARN, "!IOP_RESET not supported");
4234 			else /* probably timeout */
4235 				cmn_err(CE_WARN, "!IOP_RESET failed");
4236 
4237 			/* Unwind aac_shutdown() */
4238 			fibp = softs->sync_slot.fibp;
4239 			pc = (struct aac_pause_command *)&fibp->data[0];
4240 
4241 			bzero(pc, sizeof (*pc));
4242 			ddi_put32(acc, &pc->Command, VM_ContainerConfig);
4243 			ddi_put32(acc, &pc->Type, CT_PAUSE_IO);
4244 			ddi_put32(acc, &pc->Timeout, 1);
4245 			ddi_put32(acc, &pc->Min, 1);
4246 			ddi_put32(acc, &pc->NoRescan, 1);
4247 
4248 			(void) aac_sync_fib(softs, ContainerCommand,
4249 			    AAC_FIB_SIZEOF(struct aac_pause_command));
4250 
4251 			if (aac_check_adapter_health(softs) != 0)
4252 				ddi_fm_service_impact(softs->devinfo_p,
4253 				    DDI_SERVICE_LOST);
4254 			else
4255 				/*
4256 				 * IOP reset not supported or IOP not reseted
4257 				 */
4258 				rval = AAC_IOP_RESET_ABNORMAL;
4259 			goto finish;
4260 		}
4261 	}
4262 
4263 	/*
4264 	 * Re-read and renegotiate the FIB parameters, as one of the actions
4265 	 * that can result from an IOP reset is the running of a new firmware
4266 	 * image.
4267 	 */
4268 	if (aac_common_attach(softs) != AACOK)
4269 		goto finish;
4270 
4271 	rval = AAC_IOP_RESET_SUCCEED;
4272 
4273 finish:
4274 	AAC_ENABLE_INTR(softs);
4275 	return (rval);
4276 }
4277 
4278 static void
4279 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q,
4280     int throttle)
4281 {
4282 	/*
4283 	 * If the bus is draining/quiesced, no changes to the throttles
4284 	 * are allowed. All throttles should have been set to 0.
4285 	 */
4286 	if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains)
4287 		return;
4288 	dvp->throttle[q] = throttle;
4289 }
4290 
4291 static void
4292 aac_hold_bus(struct aac_softstate *softs, int iocmds)
4293 {
4294 	int i, q;
4295 
4296 	/* Hold bus by holding every device on the bus */
4297 	for (q = 0; q < AAC_CMDQ_NUM; q++) {
4298 		if (iocmds & (1 << q)) {
4299 			softs->bus_throttle[q] = 0;
4300 			for (i = 0; i < AAC_MAX_LD; i++)
4301 				aac_set_throttle(softs,
4302 				    &softs->containers[i].dev, q, 0);
4303 			for (i = 0; i < AAC_MAX_PD(softs); i++)
4304 				aac_set_throttle(softs,
4305 				    &softs->nondasds[i].dev, q, 0);
4306 		}
4307 	}
4308 }
4309 
4310 static void
4311 aac_unhold_bus(struct aac_softstate *softs, int iocmds)
4312 {
4313 	int i, q;
4314 
4315 	for (q = 0; q < AAC_CMDQ_NUM; q++) {
4316 		if (iocmds & (1 << q)) {
4317 			/*
4318 			 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been
4319 			 * quiesced or being drained by possibly some quiesce
4320 			 * threads.
4321 			 */
4322 			if (q == AAC_CMDQ_ASYNC && ((softs->state &
4323 			    AAC_STATE_QUIESCED) || softs->ndrains))
4324 				continue;
4325 			softs->bus_throttle[q] = softs->total_slots;
4326 			for (i = 0; i < AAC_MAX_LD; i++)
4327 				aac_set_throttle(softs,
4328 				    &softs->containers[i].dev,
4329 				    q, softs->total_slots);
4330 			for (i = 0; i < AAC_MAX_PD(softs); i++)
4331 				aac_set_throttle(softs, &softs->nondasds[i].dev,
4332 				    q, softs->total_slots);
4333 		}
4334 	}
4335 }
4336 
4337 static int
4338 aac_do_reset(struct aac_softstate *softs)
4339 {
4340 	int health;
4341 	int rval;
4342 
4343 	softs->state |= AAC_STATE_RESET;
4344 	health = aac_check_adapter_health(softs);
4345 
4346 	/*
4347 	 * Hold off new io commands and wait all outstanding io
4348 	 * commands to complete.
4349 	 */
4350 	if (health == 0) {
4351 		int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC];
4352 		int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC];
4353 
4354 		if (sync_cmds == 0 && async_cmds == 0) {
4355 			rval = AAC_IOP_RESET_SUCCEED;
4356 			goto finish;
4357 		}
4358 		/*
4359 		 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds
4360 		 * to complete the outstanding io commands
4361 		 */
4362 		int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10;
4363 		int (*intr_handler)(struct aac_softstate *);
4364 
4365 		aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4366 		/*
4367 		 * Poll the adapter by ourselves in case interrupt is disabled
4368 		 * and to avoid releasing the io_lock.
4369 		 */
4370 		intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
4371 		    aac_process_intr_new : aac_process_intr_old;
4372 		while ((softs->bus_ncmds[AAC_CMDQ_SYNC] ||
4373 		    softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) {
4374 			drv_usecwait(100);
4375 			(void) intr_handler(softs);
4376 			timeout--;
4377 		}
4378 		aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4379 
4380 		if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 &&
4381 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) {
4382 			/* Cmds drained out */
4383 			rval = AAC_IOP_RESET_SUCCEED;
4384 			goto finish;
4385 		} else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds ||
4386 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) {
4387 			/* Cmds not drained out, adapter overloaded */
4388 			rval = AAC_IOP_RESET_ABNORMAL;
4389 			goto finish;
4390 		}
4391 	}
4392 
4393 	/*
4394 	 * If a longer waiting time still can't drain any outstanding io
4395 	 * commands, do IOP reset.
4396 	 */
4397 	if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED)
4398 		softs->state |= AAC_STATE_DEAD;
4399 
4400 finish:
4401 	softs->state &= ~AAC_STATE_RESET;
4402 	return (rval);
4403 }
4404 
4405 static int
4406 aac_tran_reset(struct scsi_address *ap, int level)
4407 {
4408 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4409 	int rval;
4410 
4411 	DBCALLED(softs, 1);
4412 
4413 	if (level != RESET_ALL) {
4414 		cmn_err(CE_NOTE, "!reset target/lun not supported");
4415 		return (0);
4416 	}
4417 
4418 	mutex_enter(&softs->io_lock);
4419 	switch (rval = aac_do_reset(softs)) {
4420 	case AAC_IOP_RESET_SUCCEED:
4421 		aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC,
4422 		    NULL, CMD_RESET);
4423 		aac_start_waiting_io(softs);
4424 		break;
4425 	case AAC_IOP_RESET_FAILED:
4426 		/* Abort IOCTL cmds when adapter is dead */
4427 		aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET);
4428 		break;
4429 	case AAC_IOP_RESET_ABNORMAL:
4430 		aac_start_waiting_io(softs);
4431 	}
4432 	mutex_exit(&softs->io_lock);
4433 
4434 	aac_drain_comp_q(softs);
4435 	return (rval == 0);
4436 }
4437 
4438 static int
4439 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4440 {
4441 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4442 
4443 	DBCALLED(softs, 1);
4444 
4445 	mutex_enter(&softs->io_lock);
4446 	aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED);
4447 	mutex_exit(&softs->io_lock);
4448 
4449 	aac_drain_comp_q(softs);
4450 	return (1);
4451 }
4452 
4453 void
4454 aac_free_dmamap(struct aac_cmd *acp)
4455 {
4456 	/* Free dma mapping */
4457 	if (acp->flags & AAC_CMD_DMA_VALID) {
4458 		ASSERT(acp->buf_dma_handle);
4459 		(void) ddi_dma_unbind_handle(acp->buf_dma_handle);
4460 		acp->flags &= ~AAC_CMD_DMA_VALID;
4461 	}
4462 
4463 	if (acp->abp != NULL) { /* free non-aligned buf DMA */
4464 		ASSERT(acp->buf_dma_handle);
4465 		if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp)
4466 			ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr,
4467 			    (uint8_t *)acp->abp, acp->bp->b_bcount,
4468 			    DDI_DEV_AUTOINCR);
4469 		ddi_dma_mem_free(&acp->abh);
4470 		acp->abp = NULL;
4471 	}
4472 
4473 	if (acp->buf_dma_handle) {
4474 		ddi_dma_free_handle(&acp->buf_dma_handle);
4475 		acp->buf_dma_handle = NULL;
4476 	}
4477 }
4478 
4479 static void
4480 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
4481 {
4482 	AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported",
4483 	    ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd);
4484 	aac_free_dmamap(acp);
4485 	aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0);
4486 	aac_soft_callback(softs, acp);
4487 }
4488 
4489 /*
4490  * Handle command to logical device
4491  */
4492 static int
4493 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp)
4494 {
4495 	struct aac_container *dvp;
4496 	struct scsi_pkt *pkt;
4497 	union scsi_cdb *cdbp;
4498 	struct buf *bp;
4499 	int rval;
4500 
4501 	dvp = (struct aac_container *)acp->dvp;
4502 	pkt = acp->pkt;
4503 	cdbp = (void *)pkt->pkt_cdbp;
4504 	bp = acp->bp;
4505 
4506 	switch (cdbp->scc_cmd) {
4507 	case SCMD_INQUIRY: /* inquiry */
4508 		aac_free_dmamap(acp);
4509 		aac_inquiry(softs, pkt, cdbp, bp);
4510 		aac_soft_callback(softs, acp);
4511 		rval = TRAN_ACCEPT;
4512 		break;
4513 
4514 	case SCMD_READ_CAPACITY: /* read capacity */
4515 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
4516 			struct scsi_capacity cap;
4517 			uint64_t last_lba;
4518 
4519 			/* check 64-bit LBA */
4520 			last_lba = dvp->size - 1;
4521 			if (last_lba > 0xffffffffull) {
4522 				cap.capacity = 0xfffffffful;
4523 			} else {
4524 				cap.capacity = BE_32(last_lba);
4525 			}
4526 			cap.lbasize = BE_32(AAC_SECTOR_SIZE);
4527 
4528 			aac_free_dmamap(acp);
4529 			if (bp->b_flags & (B_PHYS|B_PAGEIO))
4530 				bp_mapin(bp);
4531 			bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8));
4532 			pkt->pkt_state |= STATE_XFERRED_DATA;
4533 		}
4534 		aac_soft_callback(softs, acp);
4535 		rval = TRAN_ACCEPT;
4536 		break;
4537 
4538 	case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */
4539 		/* Check if containers need 64-bit LBA support */
4540 		if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) {
4541 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
4542 				struct scsi_capacity_16 cap16;
4543 				int cap_len = sizeof (struct scsi_capacity_16);
4544 
4545 				bzero(&cap16, cap_len);
4546 				cap16.sc_capacity = BE_64(dvp->size - 1);
4547 				cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE);
4548 
4549 				aac_free_dmamap(acp);
4550 				if (bp->b_flags & (B_PHYS | B_PAGEIO))
4551 					bp_mapin(bp);
4552 				bcopy(&cap16, bp->b_un.b_addr,
4553 				    min(bp->b_bcount, cap_len));
4554 				pkt->pkt_state |= STATE_XFERRED_DATA;
4555 			}
4556 			aac_soft_callback(softs, acp);
4557 		} else {
4558 			aac_unknown_scmd(softs, acp);
4559 		}
4560 		rval = TRAN_ACCEPT;
4561 		break;
4562 
4563 	case SCMD_READ_G4: /* read_16 */
4564 	case SCMD_WRITE_G4: /* write_16 */
4565 		if (softs->flags & AAC_FLAGS_RAW_IO) {
4566 			/* NOTE: GETG4ADDRTL(cdbp) is int32_t */
4567 			acp->blkno = ((uint64_t) \
4568 			    GETG4ADDR(cdbp) << 32) | \
4569 			    (uint32_t)GETG4ADDRTL(cdbp);
4570 			goto do_io;
4571 		}
4572 		AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported");
4573 		aac_unknown_scmd(softs, acp);
4574 		rval = TRAN_ACCEPT;
4575 		break;
4576 
4577 	case SCMD_READ: /* read_6 */
4578 	case SCMD_WRITE: /* write_6 */
4579 		acp->blkno = GETG0ADDR(cdbp);
4580 		goto do_io;
4581 
4582 	case SCMD_READ_G5: /* read_12 */
4583 	case SCMD_WRITE_G5: /* write_12 */
4584 		acp->blkno = GETG5ADDR(cdbp);
4585 		goto do_io;
4586 
4587 	case SCMD_READ_G1: /* read_10 */
4588 	case SCMD_WRITE_G1: /* write_10 */
4589 		acp->blkno = (uint32_t)GETG1ADDR(cdbp);
4590 do_io:
4591 		if (acp->flags & AAC_CMD_DMA_VALID) {
4592 			uint64_t cnt_size = dvp->size;
4593 
4594 			/*
4595 			 * If LBA > array size AND rawio, the
4596 			 * adapter may hang. So check it before
4597 			 * sending.
4598 			 * NOTE: (blkno + blkcnt) may overflow
4599 			 */
4600 			if ((acp->blkno < cnt_size) &&
4601 			    ((acp->blkno + acp->bcount /
4602 			    AAC_BLK_SIZE) <= cnt_size)) {
4603 				rval = aac_do_io(softs, acp);
4604 			} else {
4605 			/*
4606 			 * Request exceeds the capacity of disk,
4607 			 * set error block number to last LBA
4608 			 * + 1.
4609 			 */
4610 				aac_set_arq_data(pkt,
4611 				    KEY_ILLEGAL_REQUEST, 0x21,
4612 				    0x00, cnt_size);
4613 				aac_soft_callback(softs, acp);
4614 				rval = TRAN_ACCEPT;
4615 			}
4616 		} else if (acp->bcount == 0) {
4617 			/* For 0 length IO, just return ok */
4618 			aac_soft_callback(softs, acp);
4619 			rval = TRAN_ACCEPT;
4620 		} else {
4621 			rval = TRAN_BADPKT;
4622 		}
4623 		break;
4624 
4625 	case SCMD_MODE_SENSE: /* mode_sense_6 */
4626 	case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */
4627 		int capacity;
4628 
4629 		aac_free_dmamap(acp);
4630 		if (dvp->size > 0xffffffffull)
4631 			capacity = 0xfffffffful; /* 64-bit LBA */
4632 		else
4633 			capacity = dvp->size;
4634 		aac_mode_sense(softs, pkt, cdbp, bp, capacity);
4635 		aac_soft_callback(softs, acp);
4636 		rval = TRAN_ACCEPT;
4637 		break;
4638 	}
4639 
4640 	case SCMD_TEST_UNIT_READY:
4641 	case SCMD_REQUEST_SENSE:
4642 	case SCMD_FORMAT:
4643 	case SCMD_START_STOP:
4644 		aac_free_dmamap(acp);
4645 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
4646 			if (acp->flags & AAC_CMD_BUF_READ) {
4647 				if (bp->b_flags & (B_PHYS|B_PAGEIO))
4648 					bp_mapin(bp);
4649 				bzero(bp->b_un.b_addr, bp->b_bcount);
4650 			}
4651 			pkt->pkt_state |= STATE_XFERRED_DATA;
4652 		}
4653 		aac_soft_callback(softs, acp);
4654 		rval = TRAN_ACCEPT;
4655 		break;
4656 
4657 	case SCMD_SYNCHRONIZE_CACHE:
4658 		acp->flags |= AAC_CMD_NTAG;
4659 		acp->aac_cmd_fib = aac_cmd_fib_sync;
4660 		acp->ac_comp = aac_synccache_complete;
4661 		rval = aac_do_io(softs, acp);
4662 		break;
4663 
4664 	case SCMD_DOORLOCK:
4665 		aac_free_dmamap(acp);
4666 		dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0;
4667 		aac_soft_callback(softs, acp);
4668 		rval = TRAN_ACCEPT;
4669 		break;
4670 
4671 	default: /* unknown command */
4672 		aac_unknown_scmd(softs, acp);
4673 		rval = TRAN_ACCEPT;
4674 		break;
4675 	}
4676 
4677 	return (rval);
4678 }
4679 
4680 static int
4681 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
4682 {
4683 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4684 	struct aac_cmd *acp = PKT2AC(pkt);
4685 	struct aac_device *dvp = acp->dvp;
4686 	int rval;
4687 
4688 	DBCALLED(softs, 2);
4689 
4690 	/*
4691 	 * Reinitialize some fields of ac and pkt; the packet may
4692 	 * have been resubmitted
4693 	 */
4694 	acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \
4695 	    AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID;
4696 	acp->timeout = acp->pkt->pkt_time;
4697 	if (pkt->pkt_flags & FLAG_NOINTR)
4698 		acp->flags |= AAC_CMD_NO_INTR;
4699 #ifdef DEBUG
4700 	acp->fib_flags = AACDB_FLAGS_FIB_SCMD;
4701 #endif
4702 	pkt->pkt_reason = CMD_CMPLT;
4703 	pkt->pkt_state = 0;
4704 	pkt->pkt_statistics = 0;
4705 	*pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
4706 
4707 	if (acp->flags & AAC_CMD_DMA_VALID) {
4708 		pkt->pkt_resid = acp->bcount;
4709 		/* Consistent packets need to be sync'ed first */
4710 		if ((acp->flags & AAC_CMD_CONSISTENT) &&
4711 		    (acp->flags & AAC_CMD_BUF_WRITE))
4712 			if (aac_dma_sync_ac(acp) != AACOK) {
4713 				ddi_fm_service_impact(softs->devinfo_p,
4714 				    DDI_SERVICE_UNAFFECTED);
4715 				return (TRAN_BADPKT);
4716 			}
4717 	} else {
4718 		pkt->pkt_resid = 0;
4719 	}
4720 
4721 	mutex_enter(&softs->io_lock);
4722 	AACDB_PRINT_SCMD(softs, acp);
4723 	if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) &&
4724 	    !(softs->state & AAC_STATE_DEAD)) {
4725 		if (dvp->type == AAC_DEV_LD) {
4726 			if (ap->a_lun == 0)
4727 				rval = aac_tran_start_ld(softs, acp);
4728 			else
4729 				goto error;
4730 		} else {
4731 			rval = aac_do_io(softs, acp);
4732 		}
4733 	} else {
4734 error:
4735 #ifdef DEBUG
4736 		if (!(softs->state & AAC_STATE_DEAD)) {
4737 			AACDB_PRINT_TRAN(softs,
4738 			    "Cannot send cmd to target t%dL%d: %s",
4739 			    ap->a_target, ap->a_lun,
4740 			    "target invalid");
4741 		} else {
4742 			AACDB_PRINT(softs, CE_WARN,
4743 			    "Cannot send cmd to target t%dL%d: %s",
4744 			    ap->a_target, ap->a_lun,
4745 			    "adapter dead");
4746 		}
4747 #endif
4748 		rval = TRAN_FATAL_ERROR;
4749 	}
4750 	mutex_exit(&softs->io_lock);
4751 	return (rval);
4752 }
4753 
4754 static int
4755 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom)
4756 {
4757 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4758 	struct aac_device *dvp;
4759 	int rval;
4760 
4761 	DBCALLED(softs, 2);
4762 
4763 	/* We don't allow inquiring about capabilities for other targets */
4764 	if (cap == NULL || whom == 0) {
4765 		AACDB_PRINT(softs, CE_WARN,
4766 		    "GetCap> %s not supported: whom=%d", cap, whom);
4767 		return (-1);
4768 	}
4769 
4770 	mutex_enter(&softs->io_lock);
4771 	dvp = AAC_DEV(softs, ap->a_target);
4772 	if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
4773 		mutex_exit(&softs->io_lock);
4774 		AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap",
4775 		    ap->a_target, ap->a_lun);
4776 		return (-1);
4777 	}
4778 
4779 	switch (scsi_hba_lookup_capstr(cap)) {
4780 	case SCSI_CAP_ARQ: /* auto request sense */
4781 		rval = 1;
4782 		break;
4783 	case SCSI_CAP_UNTAGGED_QING:
4784 	case SCSI_CAP_TAGGED_QING:
4785 		rval = 1;
4786 		break;
4787 	case SCSI_CAP_DMA_MAX:
4788 		rval = softs->dma_max;
4789 		break;
4790 	default:
4791 		rval = -1;
4792 		break;
4793 	}
4794 	mutex_exit(&softs->io_lock);
4795 
4796 	AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d",
4797 	    cap, ap->a_target, ap->a_lun, rval);
4798 	return (rval);
4799 }
4800 
4801 /*ARGSUSED*/
4802 static int
4803 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4804 {
4805 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4806 	struct aac_device *dvp;
4807 	int rval;
4808 
4809 	DBCALLED(softs, 2);
4810 
4811 	/* We don't allow inquiring about capabilities for other targets */
4812 	if (cap == NULL || whom == 0) {
4813 		AACDB_PRINT(softs, CE_WARN,
4814 		    "SetCap> %s not supported: whom=%d", cap, whom);
4815 		return (-1);
4816 	}
4817 
4818 	mutex_enter(&softs->io_lock);
4819 	dvp = AAC_DEV(softs, ap->a_target);
4820 	if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
4821 		mutex_exit(&softs->io_lock);
4822 		AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap",
4823 		    ap->a_target, ap->a_lun);
4824 		return (-1);
4825 	}
4826 
4827 	switch (scsi_hba_lookup_capstr(cap)) {
4828 	case SCSI_CAP_ARQ:
4829 		/* Force auto request sense */
4830 		rval = (value == 1) ? 1 : 0;
4831 		break;
4832 	case SCSI_CAP_UNTAGGED_QING:
4833 	case SCSI_CAP_TAGGED_QING:
4834 		rval = (value == 1) ? 1 : 0;
4835 		break;
4836 	default:
4837 		rval = -1;
4838 		break;
4839 	}
4840 	mutex_exit(&softs->io_lock);
4841 
4842 	AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d",
4843 	    cap, ap->a_target, ap->a_lun, value, rval);
4844 	return (rval);
4845 }
4846 
4847 static void
4848 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4849 {
4850 	struct aac_cmd *acp = PKT2AC(pkt);
4851 
4852 	DBCALLED(NULL, 2);
4853 
4854 	if (acp->sgt) {
4855 		kmem_free(acp->sgt, sizeof (struct aac_sge) * \
4856 		    acp->left_cookien);
4857 	}
4858 	aac_free_dmamap(acp);
4859 	ASSERT(acp->slotp == NULL);
4860 	scsi_hba_pkt_free(ap, pkt);
4861 }
4862 
4863 int
4864 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp,
4865     struct buf *bp, int flags, int (*cb)(), caddr_t arg)
4866 {
4867 	int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
4868 	uint_t oldcookiec;
4869 	int bioerr;
4870 	int rval;
4871 
4872 	oldcookiec = acp->left_cookien;
4873 
4874 	/* Move window to build s/g map */
4875 	if (acp->total_nwin > 0) {
4876 		if (++acp->cur_win < acp->total_nwin) {
4877 			off_t off;
4878 			size_t len;
4879 
4880 			rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win,
4881 			    &off, &len, &acp->cookie, &acp->left_cookien);
4882 			if (rval == DDI_SUCCESS)
4883 				goto get_dma_cookies;
4884 			AACDB_PRINT(softs, CE_WARN,
4885 			    "ddi_dma_getwin() fail %d", rval);
4886 			return (AACERR);
4887 		}
4888 		AACDB_PRINT(softs, CE_WARN, "Nothing to transfer");
4889 		return (AACERR);
4890 	}
4891 
4892 	/* We need to transfer data, so we alloc DMA resources for this pkt */
4893 	if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) {
4894 		uint_t dma_flags = 0;
4895 		struct aac_sge *sge;
4896 
4897 		/*
4898 		 * We will still use this point to fake some
4899 		 * infomation in tran_start
4900 		 */
4901 		acp->bp = bp;
4902 
4903 		/* Set dma flags */
4904 		if (BUF_IS_READ(bp)) {
4905 			dma_flags |= DDI_DMA_READ;
4906 			acp->flags |= AAC_CMD_BUF_READ;
4907 		} else {
4908 			dma_flags |= DDI_DMA_WRITE;
4909 			acp->flags |= AAC_CMD_BUF_WRITE;
4910 		}
4911 		if (flags & PKT_CONSISTENT)
4912 			dma_flags |= DDI_DMA_CONSISTENT;
4913 		if (flags & PKT_DMA_PARTIAL)
4914 			dma_flags |= DDI_DMA_PARTIAL;
4915 
4916 		/* Alloc buf dma handle */
4917 		if (!acp->buf_dma_handle) {
4918 			rval = ddi_dma_alloc_handle(softs->devinfo_p,
4919 			    &softs->buf_dma_attr, cb, arg,
4920 			    &acp->buf_dma_handle);
4921 			if (rval != DDI_SUCCESS) {
4922 				AACDB_PRINT(softs, CE_WARN,
4923 				    "Can't allocate DMA handle, errno=%d",
4924 				    rval);
4925 				goto error_out;
4926 			}
4927 		}
4928 
4929 		/* Bind buf */
4930 		if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) {
4931 			rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle,
4932 			    bp, dma_flags, cb, arg, &acp->cookie,
4933 			    &acp->left_cookien);
4934 		} else {
4935 			size_t bufsz;
4936 
4937 			AACDB_PRINT_TRAN(softs,
4938 			    "non-aligned buffer: addr=0x%p, cnt=%lu",
4939 			    (void *)bp->b_un.b_addr, bp->b_bcount);
4940 			if (bp->b_flags & (B_PAGEIO|B_PHYS))
4941 				bp_mapin(bp);
4942 
4943 			rval = ddi_dma_mem_alloc(acp->buf_dma_handle,
4944 			    AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN),
4945 			    &softs->acc_attr, DDI_DMA_STREAMING,
4946 			    cb, arg, &acp->abp, &bufsz, &acp->abh);
4947 
4948 			if (rval != DDI_SUCCESS) {
4949 				AACDB_PRINT(softs, CE_NOTE,
4950 				    "Cannot alloc DMA to non-aligned buf");
4951 				bioerr = 0;
4952 				goto error_out;
4953 			}
4954 
4955 			if (acp->flags & AAC_CMD_BUF_WRITE)
4956 				ddi_rep_put8(acp->abh,
4957 				    (uint8_t *)bp->b_un.b_addr,
4958 				    (uint8_t *)acp->abp, bp->b_bcount,
4959 				    DDI_DEV_AUTOINCR);
4960 
4961 			rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle,
4962 			    NULL, acp->abp, bufsz, dma_flags, cb, arg,
4963 			    &acp->cookie, &acp->left_cookien);
4964 		}
4965 
4966 		switch (rval) {
4967 		case DDI_DMA_PARTIAL_MAP:
4968 			if (ddi_dma_numwin(acp->buf_dma_handle,
4969 			    &acp->total_nwin) == DDI_FAILURE) {
4970 				AACDB_PRINT(softs, CE_WARN,
4971 				    "Cannot get number of DMA windows");
4972 				bioerr = 0;
4973 				goto error_out;
4974 			}
4975 			AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
4976 			    acp->left_cookien);
4977 			acp->cur_win = 0;
4978 			break;
4979 
4980 		case DDI_DMA_MAPPED:
4981 			AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
4982 			    acp->left_cookien);
4983 			acp->cur_win = 0;
4984 			acp->total_nwin = 1;
4985 			break;
4986 
4987 		case DDI_DMA_NORESOURCES:
4988 			bioerr = 0;
4989 			AACDB_PRINT(softs, CE_WARN,
4990 			    "Cannot bind buf for DMA: DDI_DMA_NORESOURCES");
4991 			goto error_out;
4992 		case DDI_DMA_BADATTR:
4993 		case DDI_DMA_NOMAPPING:
4994 			bioerr = EFAULT;
4995 			AACDB_PRINT(softs, CE_WARN,
4996 			    "Cannot bind buf for DMA: DDI_DMA_NOMAPPING");
4997 			goto error_out;
4998 		case DDI_DMA_TOOBIG:
4999 			bioerr = EINVAL;
5000 			AACDB_PRINT(softs, CE_WARN,
5001 			    "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)",
5002 			    bp->b_bcount);
5003 			goto error_out;
5004 		default:
5005 			bioerr = EINVAL;
5006 			AACDB_PRINT(softs, CE_WARN,
5007 			    "Cannot bind buf for DMA: %d", rval);
5008 			goto error_out;
5009 		}
5010 		acp->flags |= AAC_CMD_DMA_VALID;
5011 
5012 get_dma_cookies:
5013 		ASSERT(acp->left_cookien > 0);
5014 		if (acp->left_cookien > softs->aac_sg_tablesize) {
5015 			AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d",
5016 			    acp->left_cookien);
5017 			bioerr = EINVAL;
5018 			goto error_out;
5019 		}
5020 		if (oldcookiec != acp->left_cookien && acp->sgt != NULL) {
5021 			kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5022 			    oldcookiec);
5023 			acp->sgt = NULL;
5024 		}
5025 		if (acp->sgt == NULL) {
5026 			acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \
5027 			    acp->left_cookien, kf);
5028 			if (acp->sgt == NULL) {
5029 				AACDB_PRINT(softs, CE_WARN,
5030 				    "sgt kmem_alloc fail");
5031 				bioerr = ENOMEM;
5032 				goto error_out;
5033 			}
5034 		}
5035 
5036 		sge = &acp->sgt[0];
5037 		sge->bcount = acp->cookie.dmac_size;
5038 		sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5039 		sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5040 		acp->bcount = acp->cookie.dmac_size;
5041 		for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) {
5042 			ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie);
5043 			sge->bcount = acp->cookie.dmac_size;
5044 			sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5045 			sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5046 			acp->bcount += acp->cookie.dmac_size;
5047 		}
5048 
5049 		/*
5050 		 * Note: The old DMA engine do not correctly handle
5051 		 * dma_attr_maxxfer attribute. So we have to ensure
5052 		 * it by ourself.
5053 		 */
5054 		if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) {
5055 			AACDB_PRINT(softs, CE_NOTE,
5056 			    "large xfer size received %d\n", acp->bcount);
5057 			bioerr = EINVAL;
5058 			goto error_out;
5059 		}
5060 
5061 		acp->total_xfer += acp->bcount;
5062 
5063 		if (acp->pkt) {
5064 			/* Return remaining byte count */
5065 			if (acp->total_xfer <= bp->b_bcount) {
5066 				acp->pkt->pkt_resid = bp->b_bcount - \
5067 				    acp->total_xfer;
5068 			} else {
5069 				/*
5070 				 * Allocated DMA size is greater than the buf
5071 				 * size of bp. This is caused by devices like
5072 				 * tape. we have extra bytes allocated, but
5073 				 * the packet residual has to stay correct.
5074 				 */
5075 				acp->pkt->pkt_resid = 0;
5076 			}
5077 			AACDB_PRINT_TRAN(softs,
5078 			    "bp=0x%p, xfered=%d/%d, resid=%d",
5079 			    (void *)bp->b_un.b_addr, (int)acp->total_xfer,
5080 			    (int)bp->b_bcount, (int)acp->pkt->pkt_resid);
5081 		}
5082 	}
5083 	return (AACOK);
5084 
5085 error_out:
5086 	bioerror(bp, bioerr);
5087 	return (AACERR);
5088 }
5089 
5090 static struct scsi_pkt *
5091 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
5092     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
5093     int (*callback)(), caddr_t arg)
5094 {
5095 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5096 	struct aac_cmd *acp, *new_acp;
5097 
5098 	DBCALLED(softs, 2);
5099 
5100 	/* Allocate pkt */
5101 	if (pkt == NULL) {
5102 		int slen;
5103 
5104 		/* Force auto request sense */
5105 		slen = (statuslen > softs->slen) ? statuslen : softs->slen;
5106 		pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen,
5107 		    slen, tgtlen, sizeof (struct aac_cmd), callback, arg);
5108 		if (pkt == NULL) {
5109 			AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed");
5110 			return (NULL);
5111 		}
5112 		acp = new_acp = PKT2AC(pkt);
5113 		acp->pkt = pkt;
5114 		acp->cmdlen = cmdlen;
5115 
5116 		if (ap->a_target < AAC_MAX_LD) {
5117 			acp->dvp = &softs->containers[ap->a_target].dev;
5118 			acp->aac_cmd_fib = softs->aac_cmd_fib;
5119 			acp->ac_comp = aac_ld_complete;
5120 		} else {
5121 			_NOTE(ASSUMING_PROTECTED(softs->nondasds))
5122 
5123 			acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev;
5124 			acp->aac_cmd_fib = softs->aac_cmd_fib_scsi;
5125 			acp->ac_comp = aac_pd_complete;
5126 		}
5127 	} else {
5128 		acp = PKT2AC(pkt);
5129 		new_acp = NULL;
5130 	}
5131 
5132 	if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK)
5133 		return (pkt);
5134 
5135 	if (new_acp)
5136 		aac_tran_destroy_pkt(ap, pkt);
5137 	return (NULL);
5138 }
5139 
5140 /*
5141  * tran_sync_pkt(9E) - explicit DMA synchronization
5142  */
5143 /*ARGSUSED*/
5144 static void
5145 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5146 {
5147 	struct aac_cmd *acp = PKT2AC(pkt);
5148 
5149 	DBCALLED(NULL, 2);
5150 
5151 	if (aac_dma_sync_ac(acp) != AACOK)
5152 		ddi_fm_service_impact(
5153 		    (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p,
5154 		    DDI_SERVICE_UNAFFECTED);
5155 }
5156 
5157 /*
5158  * tran_dmafree(9E) - deallocate DMA resources allocated for command
5159  */
5160 /*ARGSUSED*/
5161 static void
5162 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
5163 {
5164 	struct aac_cmd *acp = PKT2AC(pkt);
5165 
5166 	DBCALLED(NULL, 2);
5167 
5168 	aac_free_dmamap(acp);
5169 }
5170 
5171 static int
5172 aac_do_quiesce(struct aac_softstate *softs)
5173 {
5174 	aac_hold_bus(softs, AAC_IOCMD_ASYNC);
5175 	if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) {
5176 		aac_start_drain(softs);
5177 		do {
5178 			if (cv_wait_sig(&softs->drain_cv,
5179 			    &softs->io_lock) == 0) {
5180 				/* Quiesce has been interrupted */
5181 				aac_stop_drain(softs);
5182 				aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5183 				aac_start_waiting_io(softs);
5184 				return (AACERR);
5185 			}
5186 		} while (softs->bus_ncmds[AAC_CMDQ_ASYNC]);
5187 		aac_stop_drain(softs);
5188 	}
5189 
5190 	softs->state |= AAC_STATE_QUIESCED;
5191 	return (AACOK);
5192 }
5193 
5194 static int
5195 aac_tran_quiesce(dev_info_t *dip)
5196 {
5197 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5198 	int rval;
5199 
5200 	DBCALLED(softs, 1);
5201 
5202 	mutex_enter(&softs->io_lock);
5203 	if (aac_do_quiesce(softs) == AACOK)
5204 		rval = 0;
5205 	else
5206 		rval = 1;
5207 	mutex_exit(&softs->io_lock);
5208 	return (rval);
5209 }
5210 
5211 static int
5212 aac_do_unquiesce(struct aac_softstate *softs)
5213 {
5214 	softs->state &= ~AAC_STATE_QUIESCED;
5215 	aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5216 
5217 	aac_start_waiting_io(softs);
5218 	return (AACOK);
5219 }
5220 
5221 static int
5222 aac_tran_unquiesce(dev_info_t *dip)
5223 {
5224 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5225 	int rval;
5226 
5227 	DBCALLED(softs, 1);
5228 
5229 	mutex_enter(&softs->io_lock);
5230 	if (aac_do_unquiesce(softs) == AACOK)
5231 		rval = 0;
5232 	else
5233 		rval = 1;
5234 	mutex_exit(&softs->io_lock);
5235 	return (rval);
5236 }
5237 
5238 static int
5239 aac_hba_setup(struct aac_softstate *softs)
5240 {
5241 	scsi_hba_tran_t *hba_tran;
5242 	int rval;
5243 
5244 	hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP);
5245 	if (hba_tran == NULL)
5246 		return (AACERR);
5247 	hba_tran->tran_hba_private = softs;
5248 	hba_tran->tran_tgt_init = aac_tran_tgt_init;
5249 	hba_tran->tran_tgt_free = aac_tran_tgt_free;
5250 	hba_tran->tran_tgt_probe = scsi_hba_probe;
5251 	hba_tran->tran_start = aac_tran_start;
5252 	hba_tran->tran_getcap = aac_tran_getcap;
5253 	hba_tran->tran_setcap = aac_tran_setcap;
5254 	hba_tran->tran_init_pkt = aac_tran_init_pkt;
5255 	hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt;
5256 	hba_tran->tran_reset = aac_tran_reset;
5257 	hba_tran->tran_abort = aac_tran_abort;
5258 	hba_tran->tran_sync_pkt = aac_tran_sync_pkt;
5259 	hba_tran->tran_dmafree = aac_tran_dmafree;
5260 	hba_tran->tran_quiesce = aac_tran_quiesce;
5261 	hba_tran->tran_unquiesce = aac_tran_unquiesce;
5262 	hba_tran->tran_bus_config = aac_tran_bus_config;
5263 	rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr,
5264 	    hba_tran, 0);
5265 	if (rval != DDI_SUCCESS) {
5266 		scsi_hba_tran_free(hba_tran);
5267 		AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed");
5268 		return (AACERR);
5269 	}
5270 
5271 	softs->hba_tran = hba_tran;
5272 	return (AACOK);
5273 }
5274 
5275 /*
5276  * FIB setup operations
5277  */
5278 
5279 /*
5280  * Init FIB header
5281  */
5282 static void
5283 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_slot *slotp,
5284     uint16_t cmd, uint16_t fib_size)
5285 {
5286 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5287 	struct aac_fib *fibp = slotp->fibp;
5288 	uint32_t xfer_state;
5289 
5290 	xfer_state =
5291 	    AAC_FIBSTATE_HOSTOWNED |
5292 	    AAC_FIBSTATE_INITIALISED |
5293 	    AAC_FIBSTATE_EMPTY |
5294 	    AAC_FIBSTATE_FROMHOST |
5295 	    AAC_FIBSTATE_REXPECTED |
5296 	    AAC_FIBSTATE_NORM;
5297 	if (slotp->acp && !(slotp->acp->flags & AAC_CMD_SYNC)) {
5298 		xfer_state |=
5299 		    AAC_FIBSTATE_ASYNC |
5300 		    AAC_FIBSTATE_FAST_RESPONSE /* enable fast io */;
5301 		ddi_put16(acc, &fibp->Header.SenderSize,
5302 		    softs->aac_max_fib_size);
5303 	} else {
5304 		ddi_put16(acc, &fibp->Header.SenderSize, AAC_FIB_SIZE);
5305 	}
5306 
5307 	ddi_put32(acc, &fibp->Header.XferState, xfer_state);
5308 	ddi_put16(acc, &fibp->Header.Command, cmd);
5309 	ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB);
5310 	ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */
5311 	ddi_put16(acc, &fibp->Header.Size, fib_size);
5312 	ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2));
5313 	ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5314 	ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */
5315 }
5316 
5317 /*
5318  * Init FIB for raw IO command
5319  */
5320 static void
5321 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp)
5322 {
5323 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5324 	struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0];
5325 	struct aac_sg_entryraw *sgp;
5326 	struct aac_sge *sge;
5327 
5328 	/* Calculate FIB size */
5329 	acp->fib_size = sizeof (struct aac_fib_header) + \
5330 	    sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \
5331 	    sizeof (struct aac_sg_entryraw);
5332 
5333 	aac_cmd_fib_header(softs, acp->slotp, RawIo, acp->fib_size);
5334 
5335 	ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0);
5336 	ddi_put16(acc, &io->BpTotal, 0);
5337 	ddi_put16(acc, &io->BpComplete, 0);
5338 
5339 	ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno));
5340 	ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno));
5341 	ddi_put16(acc, &io->ContainerId,
5342 	    ((struct aac_container *)acp->dvp)->cid);
5343 
5344 	/* Fill SG table */
5345 	ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien);
5346 	ddi_put32(acc, &io->ByteCount, acp->bcount);
5347 
5348 	for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0];
5349 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5350 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5351 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5352 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5353 		sgp->Next = 0;
5354 		sgp->Prev = 0;
5355 		sgp->Flags = 0;
5356 	}
5357 }
5358 
5359 /* Init FIB for 64-bit block IO command */
5360 static void
5361 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp)
5362 {
5363 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5364 	struct aac_blockread64 *br = (struct aac_blockread64 *) \
5365 	    &acp->slotp->fibp->data[0];
5366 	struct aac_sg_entry64 *sgp;
5367 	struct aac_sge *sge;
5368 
5369 	acp->fib_size = sizeof (struct aac_fib_header) + \
5370 	    sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \
5371 	    sizeof (struct aac_sg_entry64);
5372 
5373 	aac_cmd_fib_header(softs, acp->slotp, ContainerCommand64,
5374 	    acp->fib_size);
5375 
5376 	/*
5377 	 * The definitions for aac_blockread64 and aac_blockwrite64
5378 	 * are the same.
5379 	 */
5380 	ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5381 	ddi_put16(acc, &br->ContainerId,
5382 	    ((struct aac_container *)acp->dvp)->cid);
5383 	ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ?
5384 	    VM_CtHostRead64 : VM_CtHostWrite64);
5385 	ddi_put16(acc, &br->Pad, 0);
5386 	ddi_put16(acc, &br->Flags, 0);
5387 
5388 	/* Fill SG table */
5389 	ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien);
5390 	ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE);
5391 
5392 	for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0];
5393 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5394 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5395 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5396 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5397 	}
5398 }
5399 
5400 /* Init FIB for block IO command */
5401 static void
5402 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp)
5403 {
5404 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5405 	struct aac_blockread *br = (struct aac_blockread *) \
5406 	    &acp->slotp->fibp->data[0];
5407 	struct aac_sg_entry *sgp;
5408 	struct aac_sge *sge = &acp->sgt[0];
5409 
5410 	if (acp->flags & AAC_CMD_BUF_READ) {
5411 		acp->fib_size = sizeof (struct aac_fib_header) + \
5412 		    sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \
5413 		    sizeof (struct aac_sg_entry);
5414 
5415 		ddi_put32(acc, &br->Command, VM_CtBlockRead);
5416 		ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien);
5417 		sgp = &br->SgMap.SgEntry[0];
5418 	} else {
5419 		struct aac_blockwrite *bw = (struct aac_blockwrite *)br;
5420 
5421 		acp->fib_size = sizeof (struct aac_fib_header) + \
5422 		    sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \
5423 		    sizeof (struct aac_sg_entry);
5424 
5425 		ddi_put32(acc, &bw->Command, VM_CtBlockWrite);
5426 		ddi_put32(acc, &bw->Stable, CUNSTABLE);
5427 		ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien);
5428 		sgp = &bw->SgMap.SgEntry[0];
5429 	}
5430 	aac_cmd_fib_header(softs, acp->slotp, ContainerCommand, acp->fib_size);
5431 
5432 	/*
5433 	 * aac_blockread and aac_blockwrite have the similar
5434 	 * structure head, so use br for bw here
5435 	 */
5436 	ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5437 	ddi_put32(acc, &br->ContainerId,
5438 	    ((struct aac_container *)acp->dvp)->cid);
5439 	ddi_put32(acc, &br->ByteCount, acp->bcount);
5440 
5441 	/* Fill SG table */
5442 	for (sge = &acp->sgt[0];
5443 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5444 		ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5445 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5446 	}
5447 }
5448 
5449 /*ARGSUSED*/
5450 void
5451 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp)
5452 {
5453 	struct aac_slot *slotp = acp->slotp;
5454 	struct aac_fib *fibp = slotp->fibp;
5455 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5456 
5457 	ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp,
5458 	    acp->fib_size,   /* only copy data of needed length */
5459 	    DDI_DEV_AUTOINCR);
5460 	ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5461 	ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2);
5462 }
5463 
5464 static void
5465 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp)
5466 {
5467 	struct aac_slot *slotp = acp->slotp;
5468 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5469 	struct aac_synchronize_command *sync =
5470 	    (struct aac_synchronize_command *)&slotp->fibp->data[0];
5471 
5472 	acp->fib_size = sizeof (struct aac_fib_header) + \
5473 	    sizeof (struct aac_synchronize_command);
5474 
5475 	aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size);
5476 	ddi_put32(acc, &sync->Command, VM_ContainerConfig);
5477 	ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE);
5478 	ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid);
5479 	ddi_put32(acc, &sync->Count,
5480 	    sizeof (((struct aac_synchronize_reply *)0)->Data));
5481 }
5482 
5483 /*
5484  * Init FIB for pass-through SCMD
5485  */
5486 static void
5487 aac_cmd_fib_srb(struct aac_cmd *acp)
5488 {
5489 	struct aac_slot *slotp = acp->slotp;
5490 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5491 	struct aac_srb *srb = (struct aac_srb *)&slotp->fibp->data[0];
5492 	uint8_t *cdb;
5493 
5494 	ddi_put32(acc, &srb->function, SRBF_ExecuteScsi);
5495 	ddi_put32(acc, &srb->retry_limit, 0);
5496 	ddi_put32(acc, &srb->cdb_size, acp->cmdlen);
5497 	ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */
5498 	if (acp->fibp == NULL) {
5499 		if (acp->flags & AAC_CMD_BUF_READ)
5500 			ddi_put32(acc, &srb->flags, SRB_DataIn);
5501 		else if (acp->flags & AAC_CMD_BUF_WRITE)
5502 			ddi_put32(acc, &srb->flags, SRB_DataOut);
5503 		ddi_put32(acc, &srb->channel,
5504 		    ((struct aac_nondasd *)acp->dvp)->bus);
5505 		ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid);
5506 		ddi_put32(acc, &srb->lun, 0);
5507 		cdb = acp->pkt->pkt_cdbp;
5508 	} else {
5509 		struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0];
5510 
5511 		ddi_put32(acc, &srb->flags, srb0->flags);
5512 		ddi_put32(acc, &srb->channel, srb0->channel);
5513 		ddi_put32(acc, &srb->id, srb0->id);
5514 		ddi_put32(acc, &srb->lun, srb0->lun);
5515 		cdb = srb0->cdb;
5516 	}
5517 	ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR);
5518 }
5519 
5520 static void
5521 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp)
5522 {
5523 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5524 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5525 	struct aac_sg_entry *sgp;
5526 	struct aac_sge *sge;
5527 
5528 	acp->fib_size = sizeof (struct aac_fib_header) + \
5529 	    sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5530 	    acp->left_cookien * sizeof (struct aac_sg_entry);
5531 
5532 	/* Fill FIB and SRB headers, and copy cdb */
5533 	aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommand, acp->fib_size);
5534 	aac_cmd_fib_srb(acp);
5535 
5536 	/* Fill SG table */
5537 	ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5538 	ddi_put32(acc, &srb->count, acp->bcount);
5539 
5540 	for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0];
5541 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5542 		ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5543 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5544 	}
5545 }
5546 
5547 static void
5548 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp)
5549 {
5550 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5551 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5552 	struct aac_sg_entry64 *sgp;
5553 	struct aac_sge *sge;
5554 
5555 	acp->fib_size = sizeof (struct aac_fib_header) + \
5556 	    sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5557 	    acp->left_cookien * sizeof (struct aac_sg_entry64);
5558 
5559 	/* Fill FIB and SRB headers, and copy cdb */
5560 	aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommandU64,
5561 	    acp->fib_size);
5562 	aac_cmd_fib_srb(acp);
5563 
5564 	/* Fill SG table */
5565 	ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5566 	ddi_put32(acc, &srb->count, acp->bcount);
5567 
5568 	for (sge = &acp->sgt[0],
5569 	    sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0];
5570 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5571 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5572 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5573 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5574 	}
5575 }
5576 
5577 static int
5578 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5579 {
5580 	struct aac_slot *slotp;
5581 
5582 	if (slotp = aac_get_slot(softs)) {
5583 		acp->slotp = slotp;
5584 		slotp->acp = acp;
5585 		acp->aac_cmd_fib(softs, acp);
5586 		(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0,
5587 		    DDI_DMA_SYNC_FORDEV);
5588 		return (AACOK);
5589 	}
5590 	return (AACERR);
5591 }
5592 
5593 static int
5594 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp)
5595 {
5596 	struct aac_device *dvp = acp->dvp;
5597 	int q = AAC_CMDQ(acp);
5598 
5599 	if (dvp) {
5600 		if (dvp->ncmds[q] < dvp->throttle[q]) {
5601 			if (!(acp->flags & AAC_CMD_NTAG) ||
5602 			    dvp->ncmds[q] == 0) {
5603 do_bind:
5604 				return (aac_cmd_slot_bind(softs, acp));
5605 			}
5606 			ASSERT(q == AAC_CMDQ_ASYNC);
5607 			aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC,
5608 			    AAC_THROTTLE_DRAIN);
5609 		}
5610 	} else {
5611 		if (softs->bus_ncmds[q] < softs->bus_throttle[q])
5612 			goto do_bind;
5613 	}
5614 	return (AACERR);
5615 }
5616 
5617 static void
5618 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp)
5619 {
5620 	struct aac_slot *slotp = acp->slotp;
5621 	int q = AAC_CMDQ(acp);
5622 	int rval;
5623 
5624 	/* Set ac and pkt */
5625 	if (acp->pkt) { /* ac from ioctl has no pkt */
5626 		acp->pkt->pkt_state |=
5627 		    STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
5628 	}
5629 	if (acp->timeout) /* 0 indicates no timeout */
5630 		acp->timeout += aac_timebase + aac_tick;
5631 
5632 	if (acp->dvp)
5633 		acp->dvp->ncmds[q]++;
5634 	softs->bus_ncmds[q]++;
5635 	aac_cmd_enqueue(&softs->q_busy, acp);
5636 
5637 	AACDB_PRINT_FIB(softs, slotp);
5638 
5639 	if (softs->flags & AAC_FLAGS_NEW_COMM) {
5640 		rval = aac_send_command(softs, slotp);
5641 	} else {
5642 		/*
5643 		 * If fib can not be enqueued, the adapter is in an abnormal
5644 		 * state, there will be no interrupt to us.
5645 		 */
5646 		rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q,
5647 		    slotp->fib_phyaddr, acp->fib_size);
5648 	}
5649 
5650 	if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS)
5651 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
5652 
5653 	/*
5654 	 * NOTE: We send command only when slots availabe, so should never
5655 	 * reach here.
5656 	 */
5657 	if (rval != AACOK) {
5658 		AACDB_PRINT(softs, CE_NOTE, "SCMD send failed");
5659 		if (acp->pkt) {
5660 			acp->pkt->pkt_state &= ~STATE_SENT_CMD;
5661 			aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0);
5662 		}
5663 		aac_end_io(softs, acp);
5664 		if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB)))
5665 			ddi_trigger_softintr(softs->softint_id);
5666 	}
5667 }
5668 
5669 static void
5670 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q)
5671 {
5672 	struct aac_cmd *acp, *next_acp;
5673 
5674 	/* Serve as many waiting io's as possible */
5675 	for (acp = q->q_head; acp; acp = next_acp) {
5676 		next_acp = acp->next;
5677 		if (aac_bind_io(softs, acp) == AACOK) {
5678 			aac_cmd_delete(q, acp);
5679 			aac_start_io(softs, acp);
5680 		}
5681 		if (softs->free_io_slot_head == NULL)
5682 			break;
5683 	}
5684 }
5685 
5686 static void
5687 aac_start_waiting_io(struct aac_softstate *softs)
5688 {
5689 	/*
5690 	 * Sync FIB io is served before async FIB io so that io requests
5691 	 * sent by interactive userland commands get responded asap.
5692 	 */
5693 	if (softs->q_wait[AAC_CMDQ_SYNC].q_head)
5694 		aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]);
5695 	if (softs->q_wait[AAC_CMDQ_ASYNC].q_head)
5696 		aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]);
5697 }
5698 
5699 static void
5700 aac_drain_comp_q(struct aac_softstate *softs)
5701 {
5702 	struct aac_cmd *acp;
5703 	struct scsi_pkt *pkt;
5704 
5705 	/*CONSTCOND*/
5706 	while (1) {
5707 		mutex_enter(&softs->q_comp_mutex);
5708 		acp = aac_cmd_dequeue(&softs->q_comp);
5709 		mutex_exit(&softs->q_comp_mutex);
5710 		if (acp != NULL) {
5711 			ASSERT(acp->pkt != NULL);
5712 			pkt = acp->pkt;
5713 
5714 			if (pkt->pkt_reason == CMD_CMPLT) {
5715 				/*
5716 				 * Consistent packets need to be sync'ed first
5717 				 */
5718 				if ((acp->flags & AAC_CMD_CONSISTENT) &&
5719 				    (acp->flags & AAC_CMD_BUF_READ)) {
5720 					if (aac_dma_sync_ac(acp) != AACOK) {
5721 						ddi_fm_service_impact(
5722 						    softs->devinfo_p,
5723 						    DDI_SERVICE_UNAFFECTED);
5724 						pkt->pkt_reason = CMD_TRAN_ERR;
5725 						pkt->pkt_statistics = 0;
5726 					}
5727 				}
5728 				if ((aac_check_acc_handle(softs-> \
5729 				    comm_space_acc_handle) != DDI_SUCCESS) ||
5730 				    (aac_check_acc_handle(softs-> \
5731 				    pci_mem_handle) != DDI_SUCCESS)) {
5732 					ddi_fm_service_impact(softs->devinfo_p,
5733 					    DDI_SERVICE_UNAFFECTED);
5734 					ddi_fm_acc_err_clear(softs-> \
5735 					    pci_mem_handle, DDI_FME_VER0);
5736 					pkt->pkt_reason = CMD_TRAN_ERR;
5737 					pkt->pkt_statistics = 0;
5738 				}
5739 				if (aac_check_dma_handle(softs-> \
5740 				    comm_space_dma_handle) != DDI_SUCCESS) {
5741 					ddi_fm_service_impact(softs->devinfo_p,
5742 					    DDI_SERVICE_UNAFFECTED);
5743 					pkt->pkt_reason = CMD_TRAN_ERR;
5744 					pkt->pkt_statistics = 0;
5745 				}
5746 			}
5747 			scsi_hba_pkt_comp(pkt);
5748 		} else {
5749 			break;
5750 		}
5751 	}
5752 }
5753 
5754 static int
5755 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp)
5756 {
5757 	size_t rlen;
5758 	ddi_dma_cookie_t cookie;
5759 	uint_t cookien;
5760 
5761 	/* Allocate FIB dma resource */
5762 	if (ddi_dma_alloc_handle(
5763 	    softs->devinfo_p,
5764 	    &softs->addr_dma_attr,
5765 	    DDI_DMA_SLEEP,
5766 	    NULL,
5767 	    &slotp->fib_dma_handle) != DDI_SUCCESS) {
5768 		AACDB_PRINT(softs, CE_WARN,
5769 		    "Cannot alloc dma handle for slot fib area");
5770 		goto error;
5771 	}
5772 	if (ddi_dma_mem_alloc(
5773 	    slotp->fib_dma_handle,
5774 	    softs->aac_max_fib_size,
5775 	    &softs->acc_attr,
5776 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
5777 	    DDI_DMA_SLEEP,
5778 	    NULL,
5779 	    (caddr_t *)&slotp->fibp,
5780 	    &rlen,
5781 	    &slotp->fib_acc_handle) != DDI_SUCCESS) {
5782 		AACDB_PRINT(softs, CE_WARN,
5783 		    "Cannot alloc mem for slot fib area");
5784 		goto error;
5785 	}
5786 	if (ddi_dma_addr_bind_handle(
5787 	    slotp->fib_dma_handle,
5788 	    NULL,
5789 	    (caddr_t)slotp->fibp,
5790 	    softs->aac_max_fib_size,
5791 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
5792 	    DDI_DMA_SLEEP,
5793 	    NULL,
5794 	    &cookie,
5795 	    &cookien) != DDI_DMA_MAPPED) {
5796 		AACDB_PRINT(softs, CE_WARN,
5797 		    "dma bind failed for slot fib area");
5798 		goto error;
5799 	}
5800 
5801 	/* Check dma handles allocated in fib attach */
5802 	if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) {
5803 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
5804 		goto error;
5805 	}
5806 
5807 	/* Check acc handles allocated in fib attach */
5808 	if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) {
5809 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
5810 		goto error;
5811 	}
5812 
5813 	slotp->fib_phyaddr = cookie.dmac_laddress;
5814 	return (AACOK);
5815 
5816 error:
5817 	if (slotp->fib_acc_handle) {
5818 		ddi_dma_mem_free(&slotp->fib_acc_handle);
5819 		slotp->fib_acc_handle = NULL;
5820 	}
5821 	if (slotp->fib_dma_handle) {
5822 		ddi_dma_free_handle(&slotp->fib_dma_handle);
5823 		slotp->fib_dma_handle = NULL;
5824 	}
5825 	return (AACERR);
5826 }
5827 
5828 static void
5829 aac_free_fib(struct aac_slot *slotp)
5830 {
5831 	(void) ddi_dma_unbind_handle(slotp->fib_dma_handle);
5832 	ddi_dma_mem_free(&slotp->fib_acc_handle);
5833 	slotp->fib_acc_handle = NULL;
5834 	ddi_dma_free_handle(&slotp->fib_dma_handle);
5835 	slotp->fib_dma_handle = NULL;
5836 	slotp->fib_phyaddr = 0;
5837 }
5838 
5839 static void
5840 aac_alloc_fibs(struct aac_softstate *softs)
5841 {
5842 	int i;
5843 	struct aac_slot *slotp;
5844 
5845 	for (i = 0; i < softs->total_slots &&
5846 	    softs->total_fibs < softs->total_slots; i++) {
5847 		slotp = &(softs->io_slot[i]);
5848 		if (slotp->fib_phyaddr)
5849 			continue;
5850 		if (aac_alloc_fib(softs, slotp) != AACOK)
5851 			break;
5852 
5853 		/* Insert the slot to the free slot list */
5854 		aac_release_slot(softs, slotp);
5855 		softs->total_fibs++;
5856 	}
5857 }
5858 
5859 static void
5860 aac_destroy_fibs(struct aac_softstate *softs)
5861 {
5862 	struct aac_slot *slotp;
5863 
5864 	while ((slotp = softs->free_io_slot_head) != NULL) {
5865 		ASSERT(slotp->fib_phyaddr);
5866 		softs->free_io_slot_head = slotp->next;
5867 		aac_free_fib(slotp);
5868 		ASSERT(slotp->index == (slotp - softs->io_slot));
5869 		softs->total_fibs--;
5870 	}
5871 	ASSERT(softs->total_fibs == 0);
5872 }
5873 
5874 static int
5875 aac_create_slots(struct aac_softstate *softs)
5876 {
5877 	int i;
5878 
5879 	softs->total_slots = softs->aac_max_fibs;
5880 	softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \
5881 	    softs->total_slots, KM_SLEEP);
5882 	if (softs->io_slot == NULL) {
5883 		AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot");
5884 		return (AACERR);
5885 	}
5886 	for (i = 0; i < softs->total_slots; i++)
5887 		softs->io_slot[i].index = i;
5888 	softs->free_io_slot_head = NULL;
5889 	softs->total_fibs = 0;
5890 	return (AACOK);
5891 }
5892 
5893 static void
5894 aac_destroy_slots(struct aac_softstate *softs)
5895 {
5896 	ASSERT(softs->free_io_slot_head == NULL);
5897 
5898 	kmem_free(softs->io_slot, sizeof (struct aac_slot) * \
5899 	    softs->total_slots);
5900 	softs->io_slot = NULL;
5901 	softs->total_slots = 0;
5902 }
5903 
5904 struct aac_slot *
5905 aac_get_slot(struct aac_softstate *softs)
5906 {
5907 	struct aac_slot *slotp;
5908 
5909 	if ((slotp = softs->free_io_slot_head) != NULL) {
5910 		softs->free_io_slot_head = slotp->next;
5911 		slotp->next = NULL;
5912 	}
5913 	return (slotp);
5914 }
5915 
5916 static void
5917 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp)
5918 {
5919 	ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots));
5920 	ASSERT(slotp == &softs->io_slot[slotp->index]);
5921 
5922 	slotp->acp = NULL;
5923 	slotp->next = softs->free_io_slot_head;
5924 	softs->free_io_slot_head = slotp;
5925 }
5926 
5927 int
5928 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp)
5929 {
5930 	if (aac_bind_io(softs, acp) == AACOK)
5931 		aac_start_io(softs, acp);
5932 	else
5933 		aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp);
5934 
5935 	if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR)))
5936 		return (TRAN_ACCEPT);
5937 	/*
5938 	 * Because sync FIB is always 512 bytes and used for critical
5939 	 * functions, async FIB is used for poll IO.
5940 	 */
5941 	if (acp->flags & AAC_CMD_NO_INTR) {
5942 		if (aac_do_poll_io(softs, acp) == AACOK)
5943 			return (TRAN_ACCEPT);
5944 	} else {
5945 		if (aac_do_sync_io(softs, acp) == AACOK)
5946 			return (TRAN_ACCEPT);
5947 	}
5948 	return (TRAN_BADPKT);
5949 }
5950 
5951 static int
5952 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp)
5953 {
5954 	int (*intr_handler)(struct aac_softstate *);
5955 
5956 	/*
5957 	 * Interrupt is disabled, we have to poll the adapter by ourselves.
5958 	 */
5959 	intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
5960 	    aac_process_intr_new : aac_process_intr_old;
5961 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) {
5962 		int i = AAC_POLL_TIME * 1000;
5963 
5964 		AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i);
5965 		if (i == 0)
5966 			aac_cmd_timeout(softs, acp);
5967 	}
5968 
5969 	ddi_trigger_softintr(softs->softint_id);
5970 
5971 	if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR))
5972 		return (AACOK);
5973 	return (AACERR);
5974 }
5975 
5976 static int
5977 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp)
5978 {
5979 	ASSERT(softs && acp);
5980 
5981 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT)))
5982 		cv_wait(&softs->event, &softs->io_lock);
5983 
5984 	if (acp->flags & AAC_CMD_CMPLT)
5985 		return (AACOK);
5986 	return (AACERR);
5987 }
5988 
5989 static int
5990 aac_dma_sync_ac(struct aac_cmd *acp)
5991 {
5992 	if (acp->buf_dma_handle) {
5993 		if (acp->flags & AAC_CMD_BUF_WRITE) {
5994 			if (acp->abp != NULL)
5995 				ddi_rep_put8(acp->abh,
5996 				    (uint8_t *)acp->bp->b_un.b_addr,
5997 				    (uint8_t *)acp->abp, acp->bp->b_bcount,
5998 				    DDI_DEV_AUTOINCR);
5999 			(void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6000 			    DDI_DMA_SYNC_FORDEV);
6001 		} else {
6002 			(void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6003 			    DDI_DMA_SYNC_FORCPU);
6004 			if (aac_check_dma_handle(acp->buf_dma_handle) !=
6005 			    DDI_SUCCESS)
6006 				return (AACERR);
6007 			if (acp->abp != NULL)
6008 				ddi_rep_get8(acp->abh,
6009 				    (uint8_t *)acp->bp->b_un.b_addr,
6010 				    (uint8_t *)acp->abp, acp->bp->b_bcount,
6011 				    DDI_DEV_AUTOINCR);
6012 		}
6013 	}
6014 	return (AACOK);
6015 }
6016 
6017 /*
6018  * The following function comes from Adaptec:
6019  *
6020  * When driver sees a particular event that means containers are changed, it
6021  * will rescan containers. However a change may not be complete until some
6022  * other event is received. For example, creating or deleting an array will
6023  * incur as many as six AifEnConfigChange events which would generate six
6024  * container rescans. To diminish rescans, driver set a flag to wait for
6025  * another particular event. When sees that events come in, it will do rescan.
6026  */
6027 static int
6028 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp)
6029 {
6030 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
6031 	uint16_t fib_command;
6032 	struct aac_aif_command *aif;
6033 	int en_type;
6034 	int devcfg_needed;
6035 	int current, next;
6036 
6037 	fib_command = LE_16(fibp->Header.Command);
6038 	if (fib_command != AifRequest) {
6039 		cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x",
6040 		    fib_command);
6041 		return (AACERR);
6042 	}
6043 
6044 	/* Update internal container state */
6045 	aif = (struct aac_aif_command *)&fibp->data[0];
6046 
6047 	AACDB_PRINT_AIF(softs, aif);
6048 	devcfg_needed = 0;
6049 	en_type = LE_32((uint32_t)aif->data.EN.type);
6050 
6051 	switch (LE_32((uint32_t)aif->command)) {
6052 	case AifCmdDriverNotify: {
6053 		int cid = LE_32(aif->data.EN.data.ECC.container[0]);
6054 
6055 		switch (en_type) {
6056 		case AifDenMorphComplete:
6057 		case AifDenVolumeExtendComplete:
6058 			if (AAC_DEV_IS_VALID(&softs->containers[cid].dev))
6059 				softs->devcfg_wait_on = AifEnConfigChange;
6060 			break;
6061 		}
6062 		if (softs->devcfg_wait_on == en_type)
6063 			devcfg_needed = 1;
6064 		break;
6065 	}
6066 
6067 	case AifCmdEventNotify:
6068 		switch (en_type) {
6069 		case AifEnAddContainer:
6070 		case AifEnDeleteContainer:
6071 			softs->devcfg_wait_on = AifEnConfigChange;
6072 			break;
6073 		case AifEnContainerChange:
6074 			if (!softs->devcfg_wait_on)
6075 				softs->devcfg_wait_on = AifEnConfigChange;
6076 			break;
6077 		case AifEnContainerEvent:
6078 			if (ddi_get32(acc, &aif-> \
6079 			    data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE)
6080 				devcfg_needed = 1;
6081 			break;
6082 		}
6083 		if (softs->devcfg_wait_on == en_type)
6084 			devcfg_needed = 1;
6085 		break;
6086 
6087 	case AifCmdJobProgress:
6088 		if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) {
6089 			int pr_status;
6090 			uint32_t pr_ftick, pr_ctick;
6091 
6092 			pr_status = LE_32((uint32_t)aif->data.PR[0].status);
6093 			pr_ctick = LE_32(aif->data.PR[0].currentTick);
6094 			pr_ftick = LE_32(aif->data.PR[0].finalTick);
6095 
6096 			if ((pr_ctick == pr_ftick) ||
6097 			    (pr_status == AifJobStsSuccess))
6098 				softs->devcfg_wait_on = AifEnContainerChange;
6099 			else if ((pr_ctick == 0) &&
6100 			    (pr_status == AifJobStsRunning))
6101 				softs->devcfg_wait_on = AifEnContainerChange;
6102 		}
6103 		break;
6104 	}
6105 
6106 	if (devcfg_needed) {
6107 		softs->devcfg_wait_on = 0;
6108 		(void) aac_probe_containers(softs);
6109 	}
6110 
6111 	/* Modify AIF contexts */
6112 	current = softs->aifq_idx;
6113 	next = (current + 1) % AAC_AIFQ_LENGTH;
6114 	if (next == 0) {
6115 		struct aac_fib_context *ctx;
6116 
6117 		softs->aifq_wrap = 1;
6118 		for (ctx = softs->fibctx; ctx; ctx = ctx->next) {
6119 			if (next == ctx->ctx_idx) {
6120 				ctx->ctx_filled = 1;
6121 			} else if (current == ctx->ctx_idx && ctx->ctx_filled) {
6122 				ctx->ctx_idx = next;
6123 				AACDB_PRINT(softs, CE_NOTE,
6124 				    "-- AIF queue(%x) overrun", ctx->unique);
6125 			}
6126 		}
6127 	}
6128 	softs->aifq_idx = next;
6129 
6130 	/* Wakeup applications */
6131 	cv_broadcast(&softs->aifv);
6132 	return (AACOK);
6133 }
6134 
6135 /*
6136  * Timeout recovery
6137  */
6138 /*ARGSUSED*/
6139 static void
6140 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp)
6141 {
6142 #ifdef DEBUG
6143 	acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT;
6144 	AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp);
6145 	AACDB_PRINT_FIB(softs, acp->slotp);
6146 #endif
6147 
6148 	/*
6149 	 * Besides the firmware in unhealthy state, an overloaded
6150 	 * adapter may also incur pkt timeout.
6151 	 * There is a chance for an adapter with a slower IOP to take
6152 	 * longer than 60 seconds to process the commands, such as when
6153 	 * to perform IOs. So the adapter is doing a build on a RAID-5
6154 	 * while being required longer completion times should be
6155 	 * tolerated.
6156 	 */
6157 	switch (aac_do_reset(softs)) {
6158 	case AAC_IOP_RESET_SUCCEED:
6159 		aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET);
6160 		aac_start_waiting_io(softs);
6161 		break;
6162 	case AAC_IOP_RESET_FAILED:
6163 		/* Abort all waiting cmds when adapter is dead */
6164 		aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT);
6165 		break;
6166 	case AAC_IOP_RESET_ABNORMAL:
6167 		aac_start_waiting_io(softs);
6168 	}
6169 }
6170 
6171 /*
6172  * The following function comes from Adaptec:
6173  *
6174  * Time sync. command added to synchronize time with firmware every 30
6175  * minutes (required for correct AIF timestamps etc.)
6176  */
6177 static int
6178 aac_sync_tick(struct aac_softstate *softs)
6179 {
6180 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
6181 	struct aac_fib *fibp = softs->sync_slot.fibp;
6182 
6183 	ddi_put32(acc, (void *)&fibp->data[0], ddi_get_time());
6184 	return (aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t)));
6185 }
6186 
6187 static void
6188 aac_daemon(void *arg)
6189 {
6190 	struct aac_softstate *softs = (struct aac_softstate *)arg;
6191 	struct aac_cmd *acp;
6192 
6193 	DBCALLED(softs, 2);
6194 
6195 	mutex_enter(&softs->io_lock);
6196 	/* Check slot for timeout pkts */
6197 	aac_timebase += aac_tick;
6198 	for (acp = softs->q_busy.q_head; acp; acp = acp->next) {
6199 		if (acp->timeout) {
6200 			if (acp->timeout <= aac_timebase) {
6201 				aac_cmd_timeout(softs, acp);
6202 				ddi_trigger_softintr(softs->softint_id);
6203 			}
6204 			break;
6205 		}
6206 	}
6207 
6208 	/* Time sync. with firmware every AAC_SYNC_TICK */
6209 	if (aac_sync_time <= aac_timebase) {
6210 		aac_sync_time = aac_timebase;
6211 		if (aac_sync_tick(softs) != AACOK)
6212 			aac_sync_time += aac_tick << 1; /* retry shortly */
6213 		else
6214 			aac_sync_time += AAC_SYNC_TICK;
6215 	}
6216 
6217 	if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0))
6218 		softs->timeout_id = timeout(aac_daemon, (void *)softs,
6219 		    (aac_tick * drv_usectohz(1000000)));
6220 	mutex_exit(&softs->io_lock);
6221 }
6222 
6223 /*
6224  * Architecture dependent functions
6225  */
6226 static int
6227 aac_rx_get_fwstatus(struct aac_softstate *softs)
6228 {
6229 	return (PCI_MEM_GET32(softs, AAC_OMR0));
6230 }
6231 
6232 static int
6233 aac_rx_get_mailbox(struct aac_softstate *softs, int mb)
6234 {
6235 	return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4));
6236 }
6237 
6238 static void
6239 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6240     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6241 {
6242 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd);
6243 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0);
6244 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1);
6245 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2);
6246 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3);
6247 }
6248 
6249 static int
6250 aac_rkt_get_fwstatus(struct aac_softstate *softs)
6251 {
6252 	return (PCI_MEM_GET32(softs, AAC_OMR0));
6253 }
6254 
6255 static int
6256 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb)
6257 {
6258 	return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4));
6259 }
6260 
6261 static void
6262 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6263     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6264 {
6265 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd);
6266 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0);
6267 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1);
6268 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2);
6269 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3);
6270 }
6271 
6272 /*
6273  * cb_ops functions
6274  */
6275 static int
6276 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred)
6277 {
6278 	struct aac_softstate *softs;
6279 	int minor0, minor;
6280 	int instance;
6281 
6282 	DBCALLED(NULL, 2);
6283 
6284 	if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6285 		return (EINVAL);
6286 
6287 	minor0 = getminor(*devp);
6288 	minor = AAC_SCSA_MINOR(minor0);
6289 
6290 	if (AAC_IS_SCSA_NODE(minor))
6291 		return (scsi_hba_open(devp, flag, otyp, cred));
6292 
6293 	instance = MINOR2INST(minor0);
6294 	if (instance >= AAC_MAX_ADAPTERS)
6295 		return (ENXIO);
6296 
6297 	softs = ddi_get_soft_state(aac_softstatep, instance);
6298 	if (softs == NULL)
6299 		return (ENXIO);
6300 
6301 	return (0);
6302 }
6303 
6304 /*ARGSUSED*/
6305 static int
6306 aac_close(dev_t dev, int flag, int otyp, cred_t *cred)
6307 {
6308 	int minor0, minor;
6309 	int instance;
6310 
6311 	DBCALLED(NULL, 2);
6312 
6313 	if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6314 		return (EINVAL);
6315 
6316 	minor0 = getminor(dev);
6317 	minor = AAC_SCSA_MINOR(minor0);
6318 
6319 	if (AAC_IS_SCSA_NODE(minor))
6320 		return (scsi_hba_close(dev, flag, otyp, cred));
6321 
6322 	instance = MINOR2INST(minor0);
6323 	if (instance >= AAC_MAX_ADAPTERS)
6324 		return (ENXIO);
6325 
6326 	return (0);
6327 }
6328 
6329 static int
6330 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p,
6331     int *rval_p)
6332 {
6333 	struct aac_softstate *softs;
6334 	int minor0, minor;
6335 	int instance;
6336 
6337 	DBCALLED(NULL, 2);
6338 
6339 	if (drv_priv(cred_p) != 0)
6340 		return (EPERM);
6341 
6342 	minor0 = getminor(dev);
6343 	minor = AAC_SCSA_MINOR(minor0);
6344 
6345 	if (AAC_IS_SCSA_NODE(minor))
6346 		return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p));
6347 
6348 	instance = MINOR2INST(minor0);
6349 	if (instance < AAC_MAX_ADAPTERS) {
6350 		softs = ddi_get_soft_state(aac_softstatep, instance);
6351 		return (aac_do_ioctl(softs, dev, cmd, arg, flag));
6352 	}
6353 	return (ENXIO);
6354 }
6355 
6356 /*
6357  * The IO fault service error handling callback function
6358  */
6359 /*ARGSUSED*/
6360 static int
6361 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6362 {
6363 	/*
6364 	 * as the driver can always deal with an error in any dma or
6365 	 * access handle, we can just return the fme_status value.
6366 	 */
6367 	pci_ereport_post(dip, err, NULL);
6368 	return (err->fme_status);
6369 }
6370 
6371 /*
6372  * aac_fm_init - initialize fma capabilities and register with IO
6373  *               fault services.
6374  */
6375 static void
6376 aac_fm_init(struct aac_softstate *softs)
6377 {
6378 	/*
6379 	 * Need to change iblock to priority for new MSI intr
6380 	 */
6381 	ddi_iblock_cookie_t fm_ibc;
6382 
6383 	softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p,
6384 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
6385 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
6386 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
6387 
6388 	/* Only register with IO Fault Services if we have some capability */
6389 	if (softs->fm_capabilities) {
6390 		/* Adjust access and dma attributes for FMA */
6391 		softs->acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC;
6392 		softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6393 		softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6394 
6395 		/*
6396 		 * Register capabilities with IO Fault Services.
6397 		 * fm_capabilities will be updated to indicate
6398 		 * capabilities actually supported (not requested.)
6399 		 */
6400 		ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc);
6401 
6402 		/*
6403 		 * Initialize pci ereport capabilities if ereport
6404 		 * capable (should always be.)
6405 		 */
6406 		if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6407 		    DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6408 			pci_ereport_setup(softs->devinfo_p);
6409 		}
6410 
6411 		/*
6412 		 * Register error callback if error callback capable.
6413 		 */
6414 		if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6415 			ddi_fm_handler_register(softs->devinfo_p,
6416 			    aac_fm_error_cb, (void *) softs);
6417 		}
6418 	}
6419 }
6420 
6421 /*
6422  * aac_fm_fini - Releases fma capabilities and un-registers with IO
6423  *               fault services.
6424  */
6425 static void
6426 aac_fm_fini(struct aac_softstate *softs)
6427 {
6428 	/* Only unregister FMA capabilities if registered */
6429 	if (softs->fm_capabilities) {
6430 		/*
6431 		 * Un-register error callback if error callback capable.
6432 		 */
6433 		if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6434 			ddi_fm_handler_unregister(softs->devinfo_p);
6435 		}
6436 
6437 		/*
6438 		 * Release any resources allocated by pci_ereport_setup()
6439 		 */
6440 		if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6441 		    DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6442 			pci_ereport_teardown(softs->devinfo_p);
6443 		}
6444 
6445 		/* Unregister from IO Fault Services */
6446 		ddi_fm_fini(softs->devinfo_p);
6447 
6448 		/* Adjust access and dma attributes for FMA */
6449 		softs->acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC;
6450 		softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6451 		softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6452 	}
6453 }
6454 
6455 int
6456 aac_check_acc_handle(ddi_acc_handle_t handle)
6457 {
6458 	ddi_fm_error_t de;
6459 
6460 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6461 	return (de.fme_status);
6462 }
6463 
6464 int
6465 aac_check_dma_handle(ddi_dma_handle_t handle)
6466 {
6467 	ddi_fm_error_t de;
6468 
6469 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6470 	return (de.fme_status);
6471 }
6472 
6473 void
6474 aac_fm_ereport(struct aac_softstate *softs, char *detail)
6475 {
6476 	uint64_t ena;
6477 	char buf[FM_MAX_CLASS];
6478 
6479 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6480 	ena = fm_ena_generate(0, FM_ENA_FMT1);
6481 	if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) {
6482 		ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP,
6483 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
6484 	}
6485 }
6486 
6487 /*
6488  * Autoconfiguration support
6489  */
6490 static int
6491 aac_parse_devname(char *devnm, int *tgt, int *lun)
6492 {
6493 	char devbuf[SCSI_MAXNAMELEN];
6494 	char *addr;
6495 	char *p,  *tp, *lp;
6496 	long num;
6497 
6498 	/* Parse dev name and address */
6499 	(void) strcpy(devbuf, devnm);
6500 	addr = "";
6501 	for (p = devbuf; *p != '\0'; p++) {
6502 		if (*p == '@') {
6503 			addr = p + 1;
6504 			*p = '\0';
6505 		} else if (*p == ':') {
6506 			*p = '\0';
6507 			break;
6508 		}
6509 	}
6510 
6511 	/* Parse taget and lun */
6512 	for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
6513 		if (*p == ',') {
6514 			lp = p + 1;
6515 			*p = '\0';
6516 			break;
6517 		}
6518 	}
6519 	if (tgt && tp) {
6520 		if (ddi_strtol(tp, NULL, 0x10, &num))
6521 			return (AACERR);
6522 		*tgt = (int)num;
6523 	}
6524 	if (lun && lp) {
6525 		if (ddi_strtol(lp, NULL, 0x10, &num))
6526 			return (AACERR);
6527 		*lun = (int)num;
6528 	}
6529 	return (AACOK);
6530 }
6531 
6532 static dev_info_t *
6533 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun)
6534 {
6535 	dev_info_t *child = NULL;
6536 	char addr[SCSI_MAXNAMELEN];
6537 	char tmp[MAXNAMELEN];
6538 
6539 	if (tgt < AAC_MAX_LD) {
6540 		if (lun == 0) {
6541 			struct aac_device *dvp = &softs->containers[tgt].dev;
6542 
6543 			child = dvp->dip;
6544 		}
6545 	} else {
6546 		(void) sprintf(addr, "%x,%x", tgt, lun);
6547 		for (child = ddi_get_child(softs->devinfo_p);
6548 		    child; child = ddi_get_next_sibling(child)) {
6549 			/* We don't care about non-persistent node */
6550 			if (ndi_dev_is_persistent_node(child) == 0)
6551 				continue;
6552 
6553 			if (aac_name_node(child, tmp, MAXNAMELEN) !=
6554 			    DDI_SUCCESS)
6555 				continue;
6556 			if (strcmp(addr, tmp) == 0)
6557 				break;
6558 		}
6559 	}
6560 	return (child);
6561 }
6562 
6563 static int
6564 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd,
6565     dev_info_t **dipp)
6566 {
6567 	char *nodename = NULL;
6568 	char **compatible = NULL;
6569 	int ncompatible = 0;
6570 	char *childname;
6571 	dev_info_t *ldip = NULL;
6572 	int tgt = sd->sd_address.a_target;
6573 	int lun = sd->sd_address.a_lun;
6574 	int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
6575 	int rval;
6576 
6577 	DBCALLED(softs, 2);
6578 
6579 	scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
6580 	    NULL, &nodename, &compatible, &ncompatible);
6581 	if (nodename == NULL) {
6582 		AACDB_PRINT(softs, CE_WARN,
6583 		    "found no comptible driver for t%dL%d", tgt, lun);
6584 		rval = NDI_FAILURE;
6585 		goto finish;
6586 	}
6587 	childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename;
6588 
6589 	/* Create dev node */
6590 	rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID,
6591 	    &ldip);
6592 	if (rval == NDI_SUCCESS) {
6593 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt)
6594 		    != DDI_PROP_SUCCESS) {
6595 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6596 			    "property for t%dL%d (target)", tgt, lun);
6597 			rval = NDI_FAILURE;
6598 			goto finish;
6599 		}
6600 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun)
6601 		    != DDI_PROP_SUCCESS) {
6602 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6603 			    "property for t%dL%d (lun)", tgt, lun);
6604 			rval = NDI_FAILURE;
6605 			goto finish;
6606 		}
6607 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
6608 		    "compatible", compatible, ncompatible)
6609 		    != DDI_PROP_SUCCESS) {
6610 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6611 			    "property for t%dL%d (compatible)", tgt, lun);
6612 			rval = NDI_FAILURE;
6613 			goto finish;
6614 		}
6615 
6616 		rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
6617 		if (rval != NDI_SUCCESS) {
6618 			AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d",
6619 			    tgt, lun);
6620 			ndi_prop_remove_all(ldip);
6621 			(void) ndi_devi_free(ldip);
6622 		}
6623 	}
6624 finish:
6625 	if (dipp)
6626 		*dipp = ldip;
6627 
6628 	scsi_hba_nodename_compatible_free(nodename, compatible);
6629 	return (rval);
6630 }
6631 
6632 /*ARGSUSED*/
6633 static int
6634 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd)
6635 {
6636 	int tgt = sd->sd_address.a_target;
6637 	int lun = sd->sd_address.a_lun;
6638 
6639 	DBCALLED(softs, 2);
6640 
6641 	if (tgt < AAC_MAX_LD) {
6642 		int rval;
6643 
6644 		if (lun == 0) {
6645 			mutex_enter(&softs->io_lock);
6646 			rval = aac_probe_container(softs, tgt);
6647 			mutex_exit(&softs->io_lock);
6648 			if (rval == AACOK) {
6649 				if (scsi_hba_probe(sd, NULL) ==
6650 				    SCSIPROBE_EXISTS)
6651 					return (NDI_SUCCESS);
6652 			}
6653 		}
6654 		return (NDI_FAILURE);
6655 	} else {
6656 		int dtype;
6657 
6658 		if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS)
6659 			return (NDI_FAILURE);
6660 
6661 		dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
6662 
6663 		AACDB_PRINT(softs, CE_NOTE,
6664 		    "Phys. device found: tgt %d dtype %d: %s",
6665 		    tgt, dtype, sd->sd_inq->inq_vid);
6666 
6667 		/* Only non-DASD exposed */
6668 		if (dtype != DTYPE_RODIRECT /* CDROM */ &&
6669 		    dtype != DTYPE_SEQUENTIAL /* TAPE */ &&
6670 		    dtype != DTYPE_ESI /* SES */)
6671 			return (NDI_FAILURE);
6672 
6673 		AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt);
6674 		mutex_enter(&softs->io_lock);
6675 		softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID;
6676 		mutex_exit(&softs->io_lock);
6677 		return (NDI_SUCCESS);
6678 	}
6679 }
6680 
6681 static int
6682 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun,
6683     dev_info_t **ldip)
6684 {
6685 	struct scsi_device sd;
6686 	dev_info_t *child;
6687 	int rval;
6688 
6689 	DBCALLED(softs, 2);
6690 
6691 	if ((child = aac_find_child(softs, tgt, lun)) != NULL) {
6692 		if (ldip)
6693 			*ldip = child;
6694 		return (NDI_SUCCESS);
6695 	}
6696 
6697 	bzero(&sd, sizeof (struct scsi_device));
6698 	sd.sd_address.a_hba_tran = softs->hba_tran;
6699 	sd.sd_address.a_target = (uint16_t)tgt;
6700 	sd.sd_address.a_lun = (uint8_t)lun;
6701 	if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS)
6702 		rval = aac_config_child(softs, &sd, ldip);
6703 	scsi_unprobe(&sd);
6704 	return (rval);
6705 }
6706 
6707 static int
6708 aac_config_tgt(struct aac_softstate *softs, int tgt)
6709 {
6710 	struct scsi_address ap;
6711 	struct buf *bp = NULL;
6712 	int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE;
6713 	int list_len = 0;
6714 	int lun_total = 0;
6715 	dev_info_t *ldip;
6716 	int i;
6717 
6718 	ap.a_hba_tran = softs->hba_tran;
6719 	ap.a_target = (uint16_t)tgt;
6720 	ap.a_lun = 0;
6721 
6722 	for (i = 0; i < 2; i++) {
6723 		struct scsi_pkt *pkt;
6724 		uchar_t *cdb;
6725 		uchar_t *p;
6726 		uint32_t data;
6727 
6728 		if (bp == NULL) {
6729 			if ((bp = scsi_alloc_consistent_buf(&ap, NULL,
6730 			    buf_len, B_READ, NULL_FUNC, NULL)) == NULL)
6731 			return (AACERR);
6732 		}
6733 		if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5,
6734 		    sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT,
6735 		    NULL, NULL)) == NULL) {
6736 			scsi_free_consistent_buf(bp);
6737 			return (AACERR);
6738 		}
6739 		cdb = pkt->pkt_cdbp;
6740 		bzero(cdb, CDB_GROUP5);
6741 		cdb[0] = SCMD_REPORT_LUNS;
6742 
6743 		/* Convert buffer len from local to LE_32 */
6744 		data = buf_len;
6745 		for (p = &cdb[9]; p > &cdb[5]; p--) {
6746 			*p = data & 0xff;
6747 			data >>= 8;
6748 		}
6749 
6750 		if (scsi_poll(pkt) < 0 ||
6751 		    ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) {
6752 			scsi_destroy_pkt(pkt);
6753 			break;
6754 		}
6755 
6756 		/* Convert list_len from LE_32 to local */
6757 		for (p = (uchar_t *)bp->b_un.b_addr;
6758 		    p < (uchar_t *)bp->b_un.b_addr + 4; p++) {
6759 			data <<= 8;
6760 			data |= *p;
6761 		}
6762 		list_len = data;
6763 		if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) {
6764 			scsi_free_consistent_buf(bp);
6765 			bp = NULL;
6766 			buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE;
6767 		}
6768 		scsi_destroy_pkt(pkt);
6769 	}
6770 	if (i >= 2) {
6771 		uint8_t *buf = (uint8_t *)(bp->b_un.b_addr +
6772 		    AAC_SCSI_RPTLUNS_HEAD_SIZE);
6773 
6774 		for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) {
6775 			uint16_t lun;
6776 
6777 			/* Determine report luns addressing type */
6778 			switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) {
6779 			/*
6780 			 * Vendors in the field have been found to be
6781 			 * concatenating bus/target/lun to equal the
6782 			 * complete lun value instead of switching to
6783 			 * flat space addressing
6784 			 */
6785 			case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL:
6786 			case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT:
6787 			case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE:
6788 				lun = ((buf[0] & 0x3f) << 8) | buf[1];
6789 				if (lun > UINT8_MAX) {
6790 					AACDB_PRINT(softs, CE_WARN,
6791 					    "abnormal lun number: %d", lun);
6792 					break;
6793 				}
6794 				if (aac_config_lun(softs, tgt, lun, &ldip) ==
6795 				    NDI_SUCCESS)
6796 					lun_total++;
6797 				break;
6798 			}
6799 
6800 			buf += AAC_SCSI_RPTLUNS_ADDR_SIZE;
6801 		}
6802 	} else {
6803 		/* The target may do not support SCMD_REPORT_LUNS. */
6804 		if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS)
6805 			lun_total++;
6806 	}
6807 	scsi_free_consistent_buf(bp);
6808 	return (lun_total);
6809 }
6810 
6811 static void
6812 aac_devcfg(struct aac_softstate *softs, int tgt, int en)
6813 {
6814 	struct aac_device *dvp;
6815 
6816 	mutex_enter(&softs->io_lock);
6817 	dvp = AAC_DEV(softs, tgt);
6818 	if (en)
6819 		dvp->flags |= AAC_DFLAG_CONFIGURING;
6820 	else
6821 		dvp->flags &= ~AAC_DFLAG_CONFIGURING;
6822 	mutex_exit(&softs->io_lock);
6823 }
6824 
6825 static int
6826 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op,
6827     void *arg, dev_info_t **childp)
6828 {
6829 	struct aac_softstate *softs;
6830 	int circ = 0;
6831 	int rval;
6832 
6833 	if ((softs = ddi_get_soft_state(aac_softstatep,
6834 	    ddi_get_instance(parent))) == NULL)
6835 		return (NDI_FAILURE);
6836 
6837 	/* Commands for bus config should be blocked as the bus is quiesced */
6838 	mutex_enter(&softs->io_lock);
6839 	if (softs->state & AAC_STATE_QUIESCED) {
6840 		AACDB_PRINT(softs, CE_NOTE,
6841 		    "bus_config abroted because bus is quiesced");
6842 		mutex_exit(&softs->io_lock);
6843 		return (NDI_FAILURE);
6844 	}
6845 	mutex_exit(&softs->io_lock);
6846 
6847 	DBCALLED(softs, 1);
6848 
6849 	/* Hold the nexus across the bus_config */
6850 	ndi_devi_enter(parent, &circ);
6851 	switch (op) {
6852 	case BUS_CONFIG_ONE: {
6853 		int tgt, lun;
6854 
6855 		if (aac_parse_devname(arg, &tgt, &lun) != AACOK) {
6856 			rval = NDI_FAILURE;
6857 			break;
6858 		}
6859 
6860 		AAC_DEVCFG_BEGIN(softs, tgt);
6861 		rval = aac_config_lun(softs, tgt, lun, childp);
6862 		AAC_DEVCFG_END(softs, tgt);
6863 		break;
6864 	}
6865 
6866 	case BUS_CONFIG_DRIVER:
6867 	case BUS_CONFIG_ALL: {
6868 		uint32_t bus, tgt;
6869 		int index, total;
6870 
6871 		for (tgt = 0; tgt < AAC_MAX_LD; tgt++) {
6872 			AAC_DEVCFG_BEGIN(softs, tgt);
6873 			(void) aac_config_lun(softs, tgt, 0, NULL);
6874 			AAC_DEVCFG_END(softs, tgt);
6875 		}
6876 
6877 		/* Config the non-DASD devices connected to the card */
6878 		total = 0;
6879 		index = AAC_MAX_LD;
6880 		for (bus = 0; bus < softs->bus_max; bus++) {
6881 			AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus);
6882 			for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) {
6883 				AAC_DEVCFG_BEGIN(softs, index);
6884 				if (aac_config_tgt(softs, index))
6885 					total++;
6886 				AAC_DEVCFG_END(softs, index);
6887 			}
6888 		}
6889 		AACDB_PRINT(softs, CE_CONT,
6890 		    "?Total %d phys. device(s) found", total);
6891 		rval = NDI_SUCCESS;
6892 		break;
6893 	}
6894 	}
6895 
6896 	if (rval == NDI_SUCCESS)
6897 		rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
6898 	ndi_devi_exit(parent, circ);
6899 	return (rval);
6900 }
6901 
6902 static void
6903 aac_handle_dr(struct aac_drinfo *drp)
6904 {
6905 	struct aac_softstate *softs = drp->softs;
6906 	struct aac_device *dvp;
6907 	dev_info_t *dip;
6908 	int valid;
6909 	int circ1 = 0;
6910 
6911 	DBCALLED(softs, 1);
6912 
6913 	/* Hold the nexus across the bus_config */
6914 	mutex_enter(&softs->io_lock);
6915 	dvp = AAC_DEV(softs, drp->tgt);
6916 	valid = AAC_DEV_IS_VALID(dvp);
6917 	dip = dvp->dip;
6918 	mutex_exit(&softs->io_lock);
6919 
6920 	switch (drp->event) {
6921 	case AAC_EVT_ONLINE:
6922 	case AAC_EVT_OFFLINE:
6923 		/* Device onlined */
6924 		if (dip == NULL && valid) {
6925 			ndi_devi_enter(softs->devinfo_p, &circ1);
6926 			(void) aac_config_lun(softs, drp->tgt, 0, NULL);
6927 			AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined",
6928 			    softs->instance, drp->tgt, drp->lun);
6929 			ndi_devi_exit(softs->devinfo_p, circ1);
6930 		}
6931 		/* Device offlined */
6932 		if (dip && !valid) {
6933 			mutex_enter(&softs->io_lock);
6934 			(void) aac_do_reset(softs);
6935 			mutex_exit(&softs->io_lock);
6936 
6937 			(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
6938 			AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined",
6939 			    softs->instance, drp->tgt, drp->lun);
6940 		}
6941 		break;
6942 	}
6943 	kmem_free(drp, sizeof (struct aac_drinfo));
6944 }
6945 
6946 static int
6947 aac_dr_event(struct aac_softstate *softs, int tgt, int lun, int event)
6948 {
6949 	struct aac_drinfo *drp;
6950 
6951 	DBCALLED(softs, 1);
6952 
6953 	if (softs->taskq == NULL ||
6954 	    (drp = kmem_zalloc(sizeof (struct aac_drinfo), KM_NOSLEEP)) == NULL)
6955 		return (AACERR);
6956 
6957 	drp->softs = softs;
6958 	drp->tgt = tgt;
6959 	drp->lun = lun;
6960 	drp->event = event;
6961 	if ((ddi_taskq_dispatch(softs->taskq, (void (*)(void *))aac_handle_dr,
6962 	    drp, DDI_NOSLEEP)) != DDI_SUCCESS) {
6963 		AACDB_PRINT(softs, CE_WARN, "DR task start failed");
6964 		kmem_free(drp, sizeof (struct aac_drinfo));
6965 		return (AACERR);
6966 	}
6967 	return (AACOK);
6968 }
6969 
6970 #ifdef DEBUG
6971 
6972 /* -------------------------debug aid functions-------------------------- */
6973 
6974 #define	AAC_FIB_CMD_KEY_STRINGS \
6975 	TestCommandResponse, "TestCommandResponse", \
6976 	TestAdapterCommand, "TestAdapterCommand", \
6977 	LastTestCommand, "LastTestCommand", \
6978 	ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \
6979 	ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \
6980 	ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \
6981 	ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \
6982 	ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \
6983 	ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \
6984 	ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \
6985 	ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \
6986 	InterfaceShutdown, "InterfaceShutdown", \
6987 	DmaCommandFib, "DmaCommandFib", \
6988 	StartProfile, "StartProfile", \
6989 	TermProfile, "TermProfile", \
6990 	SpeedTest, "SpeedTest", \
6991 	TakeABreakPt, "TakeABreakPt", \
6992 	RequestPerfData, "RequestPerfData", \
6993 	SetInterruptDefTimer, "SetInterruptDefTimer", \
6994 	SetInterruptDefCount, "SetInterruptDefCount", \
6995 	GetInterruptDefStatus, "GetInterruptDefStatus", \
6996 	LastCommCommand, "LastCommCommand", \
6997 	NuFileSystem, "NuFileSystem", \
6998 	UFS, "UFS", \
6999 	HostFileSystem, "HostFileSystem", \
7000 	LastFileSystemCommand, "LastFileSystemCommand", \
7001 	ContainerCommand, "ContainerCommand", \
7002 	ContainerCommand64, "ContainerCommand64", \
7003 	ClusterCommand, "ClusterCommand", \
7004 	ScsiPortCommand, "ScsiPortCommand", \
7005 	ScsiPortCommandU64, "ScsiPortCommandU64", \
7006 	AifRequest, "AifRequest", \
7007 	CheckRevision, "CheckRevision", \
7008 	FsaHostShutdown, "FsaHostShutdown", \
7009 	RequestAdapterInfo, "RequestAdapterInfo", \
7010 	IsAdapterPaused, "IsAdapterPaused", \
7011 	SendHostTime, "SendHostTime", \
7012 	LastMiscCommand, "LastMiscCommand"
7013 
7014 #define	AAC_CTVM_SUBCMD_KEY_STRINGS \
7015 	VM_Null, "VM_Null", \
7016 	VM_NameServe, "VM_NameServe", \
7017 	VM_ContainerConfig, "VM_ContainerConfig", \
7018 	VM_Ioctl, "VM_Ioctl", \
7019 	VM_FilesystemIoctl, "VM_FilesystemIoctl", \
7020 	VM_CloseAll, "VM_CloseAll", \
7021 	VM_CtBlockRead, "VM_CtBlockRead", \
7022 	VM_CtBlockWrite, "VM_CtBlockWrite", \
7023 	VM_SliceBlockRead, "VM_SliceBlockRead", \
7024 	VM_SliceBlockWrite, "VM_SliceBlockWrite", \
7025 	VM_DriveBlockRead, "VM_DriveBlockRead", \
7026 	VM_DriveBlockWrite, "VM_DriveBlockWrite", \
7027 	VM_EnclosureMgt, "VM_EnclosureMgt", \
7028 	VM_Unused, "VM_Unused", \
7029 	VM_CtBlockVerify, "VM_CtBlockVerify", \
7030 	VM_CtPerf, "VM_CtPerf", \
7031 	VM_CtBlockRead64, "VM_CtBlockRead64", \
7032 	VM_CtBlockWrite64, "VM_CtBlockWrite64", \
7033 	VM_CtBlockVerify64, "VM_CtBlockVerify64", \
7034 	VM_CtHostRead64, "VM_CtHostRead64", \
7035 	VM_CtHostWrite64, "VM_CtHostWrite64", \
7036 	VM_NameServe64, "VM_NameServe64"
7037 
7038 #define	AAC_CT_SUBCMD_KEY_STRINGS \
7039 	CT_Null, "CT_Null", \
7040 	CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \
7041 	CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \
7042 	CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \
7043 	CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \
7044 	CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \
7045 	CT_WRITE_MBR, "CT_WRITE_MBR", \
7046 	CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \
7047 	CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \
7048 	CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \
7049 	CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \
7050 	CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \
7051 	CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \
7052 	CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \
7053 	CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \
7054 	CT_READ_MBR, "CT_READ_MBR", \
7055 	CT_READ_PARTITION, "CT_READ_PARTITION", \
7056 	CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \
7057 	CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \
7058 	CT_SLICE_SIZE, "CT_SLICE_SIZE", \
7059 	CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \
7060 	CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \
7061 	CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \
7062 	CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \
7063 	CT_UNMIRROR, "CT_UNMIRROR", \
7064 	CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \
7065 	CT_GEN_MIRROR, "CT_GEN_MIRROR", \
7066 	CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \
7067 	CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \
7068 	CT_MOVE2, "CT_MOVE2", \
7069 	CT_SPLIT, "CT_SPLIT", \
7070 	CT_SPLIT2, "CT_SPLIT2", \
7071 	CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \
7072 	CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \
7073 	CT_RECONFIG, "CT_RECONFIG", \
7074 	CT_BREAK2, "CT_BREAK2", \
7075 	CT_BREAK, "CT_BREAK", \
7076 	CT_MERGE2, "CT_MERGE2", \
7077 	CT_MERGE, "CT_MERGE", \
7078 	CT_FORCE_ERROR, "CT_FORCE_ERROR", \
7079 	CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \
7080 	CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \
7081 	CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \
7082 	CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \
7083 	CT_VOLUME_ADD, "CT_VOLUME_ADD", \
7084 	CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \
7085 	CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \
7086 	CT_COPY_STATUS, "CT_COPY_STATUS", \
7087 	CT_COPY, "CT_COPY", \
7088 	CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \
7089 	CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \
7090 	CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \
7091 	CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \
7092 	CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \
7093 	CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \
7094 	CT_SET, "CT_SET", \
7095 	CT_GET, "CT_GET", \
7096 	CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \
7097 	CT_GET_DELAY, "CT_GET_DELAY", \
7098 	CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \
7099 	CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \
7100 	CT_SCRUB, "CT_SCRUB", \
7101 	CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \
7102 	CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \
7103 	CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \
7104 	CT_PAUSE_IO, "CT_PAUSE_IO", \
7105 	CT_RELEASE_IO, "CT_RELEASE_IO", \
7106 	CT_SCRUB2, "CT_SCRUB2", \
7107 	CT_MCHECK, "CT_MCHECK", \
7108 	CT_CORRUPT, "CT_CORRUPT", \
7109 	CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \
7110 	CT_PROMOTE, "CT_PROMOTE", \
7111 	CT_SET_DEAD, "CT_SET_DEAD", \
7112 	CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \
7113 	CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \
7114 	CT_GET_PARAM, "CT_GET_PARAM", \
7115 	CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \
7116 	CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \
7117 	CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \
7118 	CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \
7119 	CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \
7120 	CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \
7121 	CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \
7122 	CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \
7123 	CT_STOP_DATA, "CT_STOP_DATA", \
7124 	CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \
7125 	CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \
7126 	CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \
7127 	CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \
7128 	CT_GET_TIME, "CT_GET_TIME", \
7129 	CT_READ_DATA, "CT_READ_DATA", \
7130 	CT_CTR, "CT_CTR", \
7131 	CT_CTL, "CT_CTL", \
7132 	CT_DRAINIO, "CT_DRAINIO", \
7133 	CT_RELEASEIO, "CT_RELEASEIO", \
7134 	CT_GET_NVRAM, "CT_GET_NVRAM", \
7135 	CT_GET_MEMORY, "CT_GET_MEMORY", \
7136 	CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \
7137 	CT_ADD_LEVEL, "CT_ADD_LEVEL", \
7138 	CT_NV_ZERO, "CT_NV_ZERO", \
7139 	CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \
7140 	CT_THROTTLE_ON, "CT_THROTTLE_ON", \
7141 	CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \
7142 	CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \
7143 	CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \
7144 	CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \
7145 	CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \
7146 	CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \
7147 	CT_MONITOR, "CT_MONITOR", \
7148 	CT_GEN_MORPH, "CT_GEN_MORPH", \
7149 	CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \
7150 	CT_CACHE_SET, "CT_CACHE_SET", \
7151 	CT_CACHE_STAT, "CT_CACHE_STAT", \
7152 	CT_TRACE_START, "CT_TRACE_START", \
7153 	CT_TRACE_STOP, "CT_TRACE_STOP", \
7154 	CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \
7155 	CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \
7156 	CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \
7157 	CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \
7158 	CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \
7159 	CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \
7160 	CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \
7161 	CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \
7162 	CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \
7163 	CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \
7164 	CT_STOP_DUMPS, "CT_STOP_DUMPS", \
7165 	CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \
7166 	CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \
7167 	CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \
7168 	CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \
7169 	CT_READ_NAME, "CT_READ_NAME", \
7170 	CT_WRITE_NAME, "CT_WRITE_NAME", \
7171 	CT_TOSS_CACHE, "CT_TOSS_CACHE", \
7172 	CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \
7173 	CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \
7174 	CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \
7175 	CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \
7176 	CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \
7177 	CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \
7178 	CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \
7179 	CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \
7180 	CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \
7181 	CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \
7182 	CT_FLUSH, "CT_FLUSH", \
7183 	CT_REBUILD, "CT_REBUILD", \
7184 	CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \
7185 	CT_RESTART, "CT_RESTART", \
7186 	CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \
7187 	CT_TRACE_FLAG, "CT_TRACE_FLAG", \
7188 	CT_RESTART_MORPH, "CT_RESTART_MORPH", \
7189 	CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \
7190 	CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \
7191 	CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \
7192 	CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \
7193 	CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \
7194 	CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \
7195 	CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \
7196 	CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \
7197 	CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \
7198 	CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \
7199 	CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \
7200 	CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \
7201 	CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \
7202 	CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \
7203 	CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \
7204 	CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \
7205 	CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \
7206 	CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \
7207 	CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \
7208 	CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \
7209 	CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \
7210 	CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \
7211 	CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \
7212 	CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \
7213 	CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \
7214 	CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \
7215 	CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \
7216 	CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \
7217 	CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \
7218 	CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \
7219 	CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \
7220 	CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \
7221 	CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \
7222 	CT_IS_CONTAINER_MEATADATA_STANDARD, \
7223 	    "CT_IS_CONTAINER_MEATADATA_STANDARD", \
7224 	CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \
7225 	CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \
7226 	CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \
7227 	CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \
7228 	CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \
7229 	CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \
7230 	CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \
7231 	CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \
7232 	CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \
7233 	CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \
7234 	CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \
7235 	CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \
7236 	CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \
7237 	CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \
7238 	CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \
7239 	CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \
7240 	CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \
7241 	CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \
7242 	CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE"
7243 
7244 #define	AAC_CL_SUBCMD_KEY_STRINGS \
7245 	CL_NULL, "CL_NULL", \
7246 	DS_INIT, "DS_INIT", \
7247 	DS_RESCAN, "DS_RESCAN", \
7248 	DS_CREATE, "DS_CREATE", \
7249 	DS_DELETE, "DS_DELETE", \
7250 	DS_ADD_DISK, "DS_ADD_DISK", \
7251 	DS_REMOVE_DISK, "DS_REMOVE_DISK", \
7252 	DS_MOVE_DISK, "DS_MOVE_DISK", \
7253 	DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \
7254 	DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \
7255 	DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \
7256 	DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \
7257 	DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \
7258 	DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \
7259 	DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \
7260 	DS_GET_DRIVES, "DS_GET_DRIVES", \
7261 	DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \
7262 	DS_ONLINE, "DS_ONLINE", \
7263 	DS_OFFLINE, "DS_OFFLINE", \
7264 	DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \
7265 	DS_FSAPRINT, "DS_FSAPRINT", \
7266 	CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \
7267 	CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \
7268 	CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \
7269 	CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \
7270 	CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \
7271 	CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \
7272 	CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \
7273 	CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \
7274 	CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \
7275 	CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \
7276 	CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \
7277 	CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \
7278 	CC_GET_BUSINFO, "CC_GET_BUSINFO", \
7279 	CC_GET_PORTINFO, "CC_GET_PORTINFO", \
7280 	CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \
7281 	CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \
7282 	CQ_QUORUM_OP, "CQ_QUORUM_OP"
7283 
7284 #define	AAC_AIF_SUBCMD_KEY_STRINGS \
7285 	AifCmdEventNotify, "AifCmdEventNotify", \
7286 	AifCmdJobProgress, "AifCmdJobProgress", \
7287 	AifCmdAPIReport, "AifCmdAPIReport", \
7288 	AifCmdDriverNotify, "AifCmdDriverNotify", \
7289 	AifReqJobList, "AifReqJobList", \
7290 	AifReqJobsForCtr, "AifReqJobsForCtr", \
7291 	AifReqJobsForScsi, "AifReqJobsForScsi", \
7292 	AifReqJobReport, "AifReqJobReport", \
7293 	AifReqTerminateJob, "AifReqTerminateJob", \
7294 	AifReqSuspendJob, "AifReqSuspendJob", \
7295 	AifReqResumeJob, "AifReqResumeJob", \
7296 	AifReqSendAPIReport, "AifReqSendAPIReport", \
7297 	AifReqAPIJobStart, "AifReqAPIJobStart", \
7298 	AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \
7299 	AifReqAPIJobFinish, "AifReqAPIJobFinish"
7300 
7301 #define	AAC_IOCTL_SUBCMD_KEY_STRINGS \
7302 	Reserved_IOCTL, "Reserved_IOCTL", \
7303 	GetDeviceHandle, "GetDeviceHandle", \
7304 	BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \
7305 	DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \
7306 	RescanBus, "RescanBus", \
7307 	GetDeviceProbeInfo, "GetDeviceProbeInfo", \
7308 	GetDeviceCapacity, "GetDeviceCapacity", \
7309 	GetContainerProbeInfo, "GetContainerProbeInfo", \
7310 	GetRequestedMemorySize, "GetRequestedMemorySize", \
7311 	GetBusInfo, "GetBusInfo", \
7312 	GetVendorSpecific, "GetVendorSpecific", \
7313 	EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \
7314 	EnhancedGetBusInfo, "EnhancedGetBusInfo", \
7315 	SetupExtendedCounters, "SetupExtendedCounters", \
7316 	GetPerformanceCounters, "GetPerformanceCounters", \
7317 	ResetPerformanceCounters, "ResetPerformanceCounters", \
7318 	ReadModePage, "ReadModePage", \
7319 	WriteModePage, "WriteModePage", \
7320 	ReadDriveParameter, "ReadDriveParameter", \
7321 	WriteDriveParameter, "WriteDriveParameter", \
7322 	ResetAdapter, "ResetAdapter", \
7323 	ResetBus, "ResetBus", \
7324 	ResetBusDevice, "ResetBusDevice", \
7325 	ExecuteSrb, "ExecuteSrb", \
7326 	Create_IO_Task, "Create_IO_Task", \
7327 	Delete_IO_Task, "Delete_IO_Task", \
7328 	Get_IO_Task_Info, "Get_IO_Task_Info", \
7329 	Check_Task_Progress, "Check_Task_Progress", \
7330 	InjectError, "InjectError", \
7331 	GetDeviceDefectCounts, "GetDeviceDefectCounts", \
7332 	GetDeviceDefectInfo, "GetDeviceDefectInfo", \
7333 	GetDeviceStatus, "GetDeviceStatus", \
7334 	ClearDeviceStatus, "ClearDeviceStatus", \
7335 	DiskSpinControl, "DiskSpinControl", \
7336 	DiskSmartControl, "DiskSmartControl", \
7337 	WriteSame, "WriteSame", \
7338 	ReadWriteLong, "ReadWriteLong", \
7339 	FormatUnit, "FormatUnit", \
7340 	TargetDeviceControl, "TargetDeviceControl", \
7341 	TargetChannelControl, "TargetChannelControl", \
7342 	FlashNewCode, "FlashNewCode", \
7343 	DiskCheck, "DiskCheck", \
7344 	RequestSense, "RequestSense", \
7345 	DiskPERControl, "DiskPERControl", \
7346 	Read10, "Read10", \
7347 	Write10, "Write10"
7348 
7349 #define	AAC_AIFEN_KEY_STRINGS \
7350 	AifEnGeneric, "Generic", \
7351 	AifEnTaskComplete, "TaskComplete", \
7352 	AifEnConfigChange, "Config change", \
7353 	AifEnContainerChange, "Container change", \
7354 	AifEnDeviceFailure, "device failed", \
7355 	AifEnMirrorFailover, "Mirror failover", \
7356 	AifEnContainerEvent, "container event", \
7357 	AifEnFileSystemChange, "File system changed", \
7358 	AifEnConfigPause, "Container pause event", \
7359 	AifEnConfigResume, "Container resume event", \
7360 	AifEnFailoverChange, "Failover space assignment changed", \
7361 	AifEnRAID5RebuildDone, "RAID5 rebuild finished", \
7362 	AifEnEnclosureManagement, "Enclosure management event", \
7363 	AifEnBatteryEvent, "battery event", \
7364 	AifEnAddContainer, "Add container", \
7365 	AifEnDeleteContainer, "Delete container", \
7366 	AifEnSMARTEvent, "SMART Event", \
7367 	AifEnBatteryNeedsRecond, "battery needs reconditioning", \
7368 	AifEnClusterEvent, "cluster event", \
7369 	AifEnDiskSetEvent, "disk set event occured", \
7370 	AifDenMorphComplete, "morph operation completed", \
7371 	AifDenVolumeExtendComplete, "VolumeExtendComplete"
7372 
7373 struct aac_key_strings {
7374 	int key;
7375 	char *message;
7376 };
7377 
7378 extern struct scsi_key_strings scsi_cmds[];
7379 
7380 static struct aac_key_strings aac_fib_cmds[] = {
7381 	AAC_FIB_CMD_KEY_STRINGS,
7382 	-1,			NULL
7383 };
7384 
7385 static struct aac_key_strings aac_ctvm_subcmds[] = {
7386 	AAC_CTVM_SUBCMD_KEY_STRINGS,
7387 	-1,			NULL
7388 };
7389 
7390 static struct aac_key_strings aac_ct_subcmds[] = {
7391 	AAC_CT_SUBCMD_KEY_STRINGS,
7392 	-1,			NULL
7393 };
7394 
7395 static struct aac_key_strings aac_cl_subcmds[] = {
7396 	AAC_CL_SUBCMD_KEY_STRINGS,
7397 	-1,			NULL
7398 };
7399 
7400 static struct aac_key_strings aac_aif_subcmds[] = {
7401 	AAC_AIF_SUBCMD_KEY_STRINGS,
7402 	-1,			NULL
7403 };
7404 
7405 static struct aac_key_strings aac_ioctl_subcmds[] = {
7406 	AAC_IOCTL_SUBCMD_KEY_STRINGS,
7407 	-1,			NULL
7408 };
7409 
7410 static struct aac_key_strings aac_aifens[] = {
7411 	AAC_AIFEN_KEY_STRINGS,
7412 	-1,			NULL
7413 };
7414 
7415 /*
7416  * The following function comes from Adaptec:
7417  *
7418  * Get the firmware print buffer parameters from the firmware,
7419  * if the command was successful map in the address.
7420  */
7421 static int
7422 aac_get_fw_debug_buffer(struct aac_softstate *softs)
7423 {
7424 	if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP,
7425 	    0, 0, 0, 0, NULL) == AACOK) {
7426 		uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1);
7427 		uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2);
7428 		uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3);
7429 		uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4);
7430 
7431 		if (mondrv_buf_size) {
7432 			uint32_t offset = mondrv_buf_paddrl - \
7433 			    softs->pci_mem_base_paddr;
7434 
7435 			/*
7436 			 * See if the address is already mapped in, and
7437 			 * if so set it up from the base address
7438 			 */
7439 			if ((mondrv_buf_paddrh == 0) &&
7440 			    (offset + mondrv_buf_size < softs->map_size)) {
7441 				mutex_enter(&aac_prt_mutex);
7442 				softs->debug_buf_offset = offset;
7443 				softs->debug_header_size = mondrv_hdr_size;
7444 				softs->debug_buf_size = mondrv_buf_size;
7445 				softs->debug_fw_flags = 0;
7446 				softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
7447 				mutex_exit(&aac_prt_mutex);
7448 
7449 				return (AACOK);
7450 			}
7451 		}
7452 	}
7453 	return (AACERR);
7454 }
7455 
7456 int
7457 aac_dbflag_on(struct aac_softstate *softs, int flag)
7458 {
7459 	int debug_flags = softs ? softs->debug_flags : aac_debug_flags;
7460 
7461 	return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \
7462 	    AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag));
7463 }
7464 
7465 static void
7466 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader)
7467 {
7468 	if (noheader) {
7469 		if (sl) {
7470 			aac_fmt[0] = sl;
7471 			cmn_err(lev, aac_fmt, aac_prt_buf);
7472 		} else {
7473 			cmn_err(lev, &aac_fmt[1], aac_prt_buf);
7474 		}
7475 	} else {
7476 		if (sl) {
7477 			aac_fmt_header[0] = sl;
7478 			cmn_err(lev, aac_fmt_header,
7479 			    softs->vendor_name, softs->instance,
7480 			    aac_prt_buf);
7481 		} else {
7482 			cmn_err(lev, &aac_fmt_header[1],
7483 			    softs->vendor_name, softs->instance,
7484 			    aac_prt_buf);
7485 		}
7486 	}
7487 }
7488 
7489 /*
7490  * The following function comes from Adaptec:
7491  *
7492  * Format and print out the data passed in to UART or console
7493  * as specified by debug flags.
7494  */
7495 void
7496 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...)
7497 {
7498 	va_list args;
7499 	char sl; /* system log character */
7500 
7501 	mutex_enter(&aac_prt_mutex);
7502 	/* Set up parameters and call sprintf function to format the data */
7503 	if (strchr("^!?", fmt[0]) == NULL) {
7504 		sl = 0;
7505 	} else {
7506 		sl = fmt[0];
7507 		fmt++;
7508 	}
7509 	va_start(args, fmt);
7510 	(void) vsprintf(aac_prt_buf, fmt, args);
7511 	va_end(args);
7512 
7513 	/* Make sure the softs structure has been passed in for this section */
7514 	if (softs) {
7515 		if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) &&
7516 		    /* If we are set up for a Firmware print */
7517 		    (softs->debug_buf_size)) {
7518 			uint32_t count, i;
7519 
7520 			/* Make sure the string size is within boundaries */
7521 			count = strlen(aac_prt_buf);
7522 			if (count > softs->debug_buf_size)
7523 				count = (uint16_t)softs->debug_buf_size;
7524 
7525 			/*
7526 			 * Wait for no more than AAC_PRINT_TIMEOUT for the
7527 			 * previous message length to clear (the handshake).
7528 			 */
7529 			for (i = 0; i < AAC_PRINT_TIMEOUT; i++) {
7530 				if (!PCI_MEM_GET32(softs,
7531 				    softs->debug_buf_offset + \
7532 				    AAC_FW_DBG_STRLEN_OFFSET))
7533 					break;
7534 
7535 				drv_usecwait(1000);
7536 			}
7537 
7538 			/*
7539 			 * If the length is clear, copy over the message, the
7540 			 * flags, and the length. Make sure the length is the
7541 			 * last because that is the signal for the Firmware to
7542 			 * pick it up.
7543 			 */
7544 			if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \
7545 			    AAC_FW_DBG_STRLEN_OFFSET)) {
7546 				PCI_MEM_REP_PUT8(softs,
7547 				    softs->debug_buf_offset + \
7548 				    softs->debug_header_size,
7549 				    aac_prt_buf, count);
7550 				PCI_MEM_PUT32(softs,
7551 				    softs->debug_buf_offset + \
7552 				    AAC_FW_DBG_FLAGS_OFFSET,
7553 				    softs->debug_fw_flags);
7554 				PCI_MEM_PUT32(softs,
7555 				    softs->debug_buf_offset + \
7556 				    AAC_FW_DBG_STRLEN_OFFSET, count);
7557 			} else {
7558 				cmn_err(CE_WARN, "UART output fail");
7559 				softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
7560 			}
7561 		}
7562 
7563 		/*
7564 		 * If the Kernel Debug Print flag is set, send it off
7565 		 * to the Kernel Debugger
7566 		 */
7567 		if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT)
7568 			aac_cmn_err(softs, lev, sl,
7569 			    (softs->debug_flags & AACDB_FLAGS_NO_HEADERS));
7570 	} else {
7571 		/* Driver not initialized yet, no firmware or header output */
7572 		if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT)
7573 			aac_cmn_err(softs, lev, sl, 1);
7574 	}
7575 	mutex_exit(&aac_prt_mutex);
7576 }
7577 
7578 /*
7579  * Translate command number to description string
7580  */
7581 static char *
7582 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist)
7583 {
7584 	int i;
7585 
7586 	for (i = 0; cmdlist[i].key != -1; i++) {
7587 		if (cmd == cmdlist[i].key)
7588 			return (cmdlist[i].message);
7589 	}
7590 	return (NULL);
7591 }
7592 
7593 static void
7594 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
7595 {
7596 	struct scsi_pkt *pkt = acp->pkt;
7597 	struct scsi_address *ap = &pkt->pkt_address;
7598 	int is_pd = 0;
7599 	int ctl = ddi_get_instance(softs->devinfo_p);
7600 	int tgt = ap->a_target;
7601 	int lun = ap->a_lun;
7602 	union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp;
7603 	uchar_t cmd = cdbp->scc_cmd;
7604 	char *desc;
7605 
7606 	if (tgt >= AAC_MAX_LD) {
7607 		is_pd = 1;
7608 		ctl = ((struct aac_nondasd *)acp->dvp)->bus;
7609 		tgt = ((struct aac_nondasd *)acp->dvp)->tid;
7610 		lun = 0;
7611 	}
7612 
7613 	if ((desc = aac_cmd_name(cmd,
7614 	    (struct aac_key_strings *)scsi_cmds)) == NULL) {
7615 		aac_printf(softs, CE_NOTE,
7616 		    "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s",
7617 		    cmd, ctl, tgt, lun, is_pd ? "(pd)" : "");
7618 		return;
7619 	}
7620 
7621 	switch (cmd) {
7622 	case SCMD_READ:
7623 	case SCMD_WRITE:
7624 		aac_printf(softs, CE_NOTE,
7625 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7626 		    desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp),
7627 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7628 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7629 		break;
7630 	case SCMD_READ_G1:
7631 	case SCMD_WRITE_G1:
7632 		aac_printf(softs, CE_NOTE,
7633 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7634 		    desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp),
7635 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7636 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7637 		break;
7638 	case SCMD_READ_G4:
7639 	case SCMD_WRITE_G4:
7640 		aac_printf(softs, CE_NOTE,
7641 		    "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s",
7642 		    desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp),
7643 		    GETG4COUNT(cdbp),
7644 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7645 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7646 		break;
7647 	case SCMD_READ_G5:
7648 	case SCMD_WRITE_G5:
7649 		aac_printf(softs, CE_NOTE,
7650 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7651 		    desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp),
7652 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7653 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7654 		break;
7655 	default:
7656 		aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s",
7657 		    desc, ctl, tgt, lun, is_pd ? "(pd)" : "");
7658 	}
7659 }
7660 
7661 void
7662 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp)
7663 {
7664 	struct aac_cmd *acp = slotp->acp;
7665 	struct aac_fib *fibp = slotp->fibp;
7666 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
7667 	uint16_t fib_size;
7668 	uint32_t fib_cmd, sub_cmd;
7669 	char *cmdstr, *subcmdstr;
7670 	char *caller;
7671 	int i;
7672 
7673 	if (acp) {
7674 		if (!(softs->debug_fib_flags & acp->fib_flags))
7675 			return;
7676 		if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD)
7677 			caller = "SCMD";
7678 		else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL)
7679 			caller = "IOCTL";
7680 		else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB)
7681 			caller = "SRB";
7682 		else
7683 			return;
7684 	} else {
7685 		if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC))
7686 			return;
7687 		caller = "SYNC";
7688 	}
7689 
7690 	fib_cmd = ddi_get16(acc, &fibp->Header.Command);
7691 	cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds);
7692 	sub_cmd = (uint32_t)-1;
7693 	subcmdstr = NULL;
7694 
7695 	/* Print FIB header */
7696 	if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) {
7697 		aac_printf(softs, CE_NOTE, "FIB> from %s", caller);
7698 		aac_printf(softs, CE_NOTE, "     XferState  %d",
7699 		    ddi_get32(acc, &fibp->Header.XferState));
7700 		aac_printf(softs, CE_NOTE, "     Command    %d",
7701 		    ddi_get16(acc, &fibp->Header.Command));
7702 		aac_printf(softs, CE_NOTE, "     StructType %d",
7703 		    ddi_get8(acc, &fibp->Header.StructType));
7704 		aac_printf(softs, CE_NOTE, "     Flags      0x%x",
7705 		    ddi_get8(acc, &fibp->Header.Flags));
7706 		aac_printf(softs, CE_NOTE, "     Size       %d",
7707 		    ddi_get16(acc, &fibp->Header.Size));
7708 		aac_printf(softs, CE_NOTE, "     SenderSize %d",
7709 		    ddi_get16(acc, &fibp->Header.SenderSize));
7710 		aac_printf(softs, CE_NOTE, "     SenderAddr 0x%x",
7711 		    ddi_get32(acc, &fibp->Header.SenderFibAddress));
7712 		aac_printf(softs, CE_NOTE, "     RcvrAddr   0x%x",
7713 		    ddi_get32(acc, &fibp->Header.ReceiverFibAddress));
7714 		aac_printf(softs, CE_NOTE, "     SenderData 0x%x",
7715 		    ddi_get32(acc, &fibp->Header.SenderData));
7716 	}
7717 
7718 	/* Print FIB data */
7719 	switch (fib_cmd) {
7720 	case ContainerCommand:
7721 		sub_cmd = ddi_get32(acc,
7722 		    (void *)&(((uint32_t *)(void *)&fibp->data[0])[0]));
7723 		subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds);
7724 		if (subcmdstr == NULL)
7725 			break;
7726 
7727 		switch (sub_cmd) {
7728 		case VM_ContainerConfig: {
7729 			struct aac_Container *pContainer =
7730 			    (struct aac_Container *)fibp->data;
7731 
7732 			fib_cmd = sub_cmd;
7733 			cmdstr = subcmdstr;
7734 			sub_cmd = (uint32_t)-1;
7735 			subcmdstr = NULL;
7736 
7737 			sub_cmd = ddi_get32(acc,
7738 			    &pContainer->CTCommand.command);
7739 			subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds);
7740 			if (subcmdstr == NULL)
7741 				break;
7742 			aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)",
7743 			    subcmdstr,
7744 			    ddi_get32(acc, &pContainer->CTCommand.param[0]),
7745 			    ddi_get32(acc, &pContainer->CTCommand.param[1]),
7746 			    ddi_get32(acc, &pContainer->CTCommand.param[2]));
7747 			return;
7748 		}
7749 
7750 		case VM_Ioctl:
7751 			fib_cmd = sub_cmd;
7752 			cmdstr = subcmdstr;
7753 			sub_cmd = (uint32_t)-1;
7754 			subcmdstr = NULL;
7755 
7756 			sub_cmd = ddi_get32(acc,
7757 			    (void *)&(((uint32_t *)(void *)&fibp->data[0])[4]));
7758 			subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds);
7759 			break;
7760 
7761 		case VM_CtBlockRead:
7762 		case VM_CtBlockWrite: {
7763 			struct aac_blockread *br =
7764 			    (struct aac_blockread *)fibp->data;
7765 			struct aac_sg_table *sg = &br->SgMap;
7766 			uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
7767 
7768 			aac_printf(softs, CE_NOTE,
7769 			    "FIB> %s Container %d  0x%x/%d", subcmdstr,
7770 			    ddi_get32(acc, &br->ContainerId),
7771 			    ddi_get32(acc, &br->BlockNumber),
7772 			    ddi_get32(acc, &br->ByteCount));
7773 			for (i = 0; i < sgcount; i++)
7774 				aac_printf(softs, CE_NOTE,
7775 				    "     %d: 0x%08x/%d", i,
7776 				    ddi_get32(acc, &sg->SgEntry[i].SgAddress),
7777 				    ddi_get32(acc, &sg->SgEntry[i]. \
7778 				    SgByteCount));
7779 			return;
7780 		}
7781 		}
7782 		break;
7783 
7784 	case ContainerCommand64: {
7785 		struct aac_blockread64 *br =
7786 		    (struct aac_blockread64 *)fibp->data;
7787 		struct aac_sg_table64 *sg = &br->SgMap64;
7788 		uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
7789 		uint64_t sgaddr;
7790 
7791 		sub_cmd = br->Command;
7792 		subcmdstr = NULL;
7793 		if (sub_cmd == VM_CtHostRead64)
7794 			subcmdstr = "VM_CtHostRead64";
7795 		else if (sub_cmd == VM_CtHostWrite64)
7796 			subcmdstr = "VM_CtHostWrite64";
7797 		else
7798 			break;
7799 
7800 		aac_printf(softs, CE_NOTE,
7801 		    "FIB> %s Container %d  0x%x/%d", subcmdstr,
7802 		    ddi_get16(acc, &br->ContainerId),
7803 		    ddi_get32(acc, &br->BlockNumber),
7804 		    ddi_get16(acc, &br->SectorCount));
7805 		for (i = 0; i < sgcount; i++) {
7806 			sgaddr = ddi_get64(acc,
7807 			    &sg->SgEntry64[i].SgAddress);
7808 			aac_printf(softs, CE_NOTE,
7809 			    "     %d: 0x%08x.%08x/%d", i,
7810 			    AAC_MS32(sgaddr), AAC_LS32(sgaddr),
7811 			    ddi_get32(acc, &sg->SgEntry64[i]. \
7812 			    SgByteCount));
7813 		}
7814 		return;
7815 	}
7816 
7817 	case RawIo: {
7818 		struct aac_raw_io *io = (struct aac_raw_io *)fibp->data;
7819 		struct aac_sg_tableraw *sg = &io->SgMapRaw;
7820 		uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
7821 		uint64_t sgaddr;
7822 
7823 		aac_printf(softs, CE_NOTE,
7824 		    "FIB> RawIo Container %d  0x%llx/%d 0x%x",
7825 		    ddi_get16(acc, &io->ContainerId),
7826 		    ddi_get64(acc, &io->BlockNumber),
7827 		    ddi_get32(acc, &io->ByteCount),
7828 		    ddi_get16(acc, &io->Flags));
7829 		for (i = 0; i < sgcount; i++) {
7830 			sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress);
7831 			aac_printf(softs, CE_NOTE, "     %d: 0x%08x.%08x/%d", i,
7832 			    AAC_MS32(sgaddr), AAC_LS32(sgaddr),
7833 			    ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount));
7834 		}
7835 		return;
7836 	}
7837 
7838 	case ClusterCommand:
7839 		sub_cmd = ddi_get32(acc,
7840 		    (void *)&(((uint32_t *)(void *)fibp->data)[0]));
7841 		subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds);
7842 		break;
7843 
7844 	case AifRequest:
7845 		sub_cmd = ddi_get32(acc,
7846 		    (void *)&(((uint32_t *)(void *)fibp->data)[0]));
7847 		subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds);
7848 		break;
7849 
7850 	default:
7851 		break;
7852 	}
7853 
7854 	fib_size = ddi_get16(acc, &(fibp->Header.Size));
7855 	if (subcmdstr)
7856 		aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
7857 		    subcmdstr, fib_size);
7858 	else if (cmdstr && sub_cmd == (uint32_t)-1)
7859 		aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
7860 		    cmdstr, fib_size);
7861 	else if (cmdstr)
7862 		aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d",
7863 		    cmdstr, sub_cmd, fib_size);
7864 	else
7865 		aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d",
7866 		    fib_cmd, fib_size);
7867 }
7868 
7869 static void
7870 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
7871 {
7872 	int aif_command;
7873 	uint32_t aif_seqnumber;
7874 	int aif_en_type;
7875 	char *str;
7876 
7877 	aif_command = LE_32(aif->command);
7878 	aif_seqnumber = LE_32(aif->seqNumber);
7879 	aif_en_type = LE_32(aif->data.EN.type);
7880 
7881 	switch (aif_command) {
7882 	case AifCmdEventNotify:
7883 		str = aac_cmd_name(aif_en_type, aac_aifens);
7884 		if (str)
7885 			aac_printf(softs, CE_NOTE, "AIF! %s", str);
7886 		else
7887 			aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)",
7888 			    aif_en_type);
7889 		break;
7890 
7891 	case AifCmdJobProgress:
7892 		switch (LE_32(aif->data.PR[0].status)) {
7893 		case AifJobStsSuccess:
7894 			str = "success"; break;
7895 		case AifJobStsFinished:
7896 			str = "finished"; break;
7897 		case AifJobStsAborted:
7898 			str = "aborted"; break;
7899 		case AifJobStsFailed:
7900 			str = "failed"; break;
7901 		case AifJobStsSuspended:
7902 			str = "suspended"; break;
7903 		case AifJobStsRunning:
7904 			str = "running"; break;
7905 		default:
7906 			str = "unknown"; break;
7907 		}
7908 		aac_printf(softs, CE_NOTE,
7909 		    "AIF! JobProgress (%d) - %s (%d, %d)",
7910 		    aif_seqnumber, str,
7911 		    LE_32(aif->data.PR[0].currentTick),
7912 		    LE_32(aif->data.PR[0].finalTick));
7913 		break;
7914 
7915 	case AifCmdAPIReport:
7916 		aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)",
7917 		    aif_seqnumber);
7918 		break;
7919 
7920 	case AifCmdDriverNotify:
7921 		aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)",
7922 		    aif_seqnumber);
7923 		break;
7924 
7925 	default:
7926 		aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)",
7927 		    aif_command, aif_seqnumber);
7928 		break;
7929 	}
7930 }
7931 
7932 #endif /* DEBUG */
7933