xref: /titanic_50/usr/src/uts/common/io/aac/aac.c (revision 24fe0b3bf671e123467ce1df0b67cadd3614c8e4)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright 2005-08 Adaptec, Inc.
8  * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner
9  * Copyright (c) 2000 Michael Smith
10  * Copyright (c) 2001 Scott Long
11  * Copyright (c) 2000 BSDi
12  * All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include <sys/modctl.h>
36 #include <sys/conf.h>
37 #include <sys/cmn_err.h>
38 #include <sys/ddi.h>
39 #include <sys/devops.h>
40 #include <sys/pci.h>
41 #include <sys/types.h>
42 #include <sys/ddidmareq.h>
43 #include <sys/scsi/scsi.h>
44 #include <sys/ksynch.h>
45 #include <sys/sunddi.h>
46 #include <sys/byteorder.h>
47 #include "aac_regs.h"
48 #include "aac.h"
49 
50 /*
51  * FMA header files
52  */
53 #include <sys/ddifm.h>
54 #include <sys/fm/protocol.h>
55 #include <sys/fm/util.h>
56 #include <sys/fm/io/ddi.h>
57 
58 /*
59  * For minor nodes created by the SCSA framework, minor numbers are
60  * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
61  * number less than 64.
62  *
63  * To support cfgadm, need to confirm the SCSA framework by creating
64  * devctl/scsi and driver specific minor nodes under SCSA format,
65  * and calling scsi_hba_xxx() functions aacordingly.
66  */
67 
68 #define	AAC_MINOR		32
69 #define	INST2AAC(x)		(((x) << INST_MINOR_SHIFT) | AAC_MINOR)
70 #define	AAC_SCSA_MINOR(x)	((x) & TRAN_MINOR_MASK)
71 #define	AAC_IS_SCSA_NODE(x)	((x) == DEVCTL_MINOR || (x) == SCSI_MINOR)
72 
73 #define	SD2TRAN(sd)		((sd)->sd_address.a_hba_tran)
74 #define	AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private)
75 #define	AAC_DIP2TRAN(dip)	((scsi_hba_tran_t *)ddi_get_driver_private(dip))
76 #define	AAC_DIP2SOFTS(dip)	(AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip)))
77 #define	SD2AAC(sd)		(AAC_TRAN2SOFTS(SD2TRAN(sd)))
78 #define	AAC_PD(t)		((t) - AAC_MAX_LD)
79 #define	AAC_DEV(softs, t)	(((t) < AAC_MAX_LD) ? \
80 				&(softs)->containers[(t)].dev : \
81 				((t) < AAC_MAX_DEV(softs)) ? \
82 				&(softs)->nondasds[AAC_PD(t)].dev : NULL)
83 #define	AAC_DEVCFG_BEGIN(softs, tgt) \
84 				aac_devcfg((softs), (tgt), 1)
85 #define	AAC_DEVCFG_END(softs, tgt) \
86 				aac_devcfg((softs), (tgt), 0)
87 #define	PKT2AC(pkt)		((struct aac_cmd *)(pkt)->pkt_ha_private)
88 #define	AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \
89 		if (!(cond)) { \
90 			int count = (timeout) * 10; \
91 			while (count) { \
92 				drv_usecwait(100); \
93 				if (cond) \
94 					break; \
95 				count--; \
96 			} \
97 			(timeout) = (count + 9) / 10; \
98 		} \
99 	}
100 
101 #define	AAC_SENSE_DATA_DESCR_LEN \
102 	(sizeof (struct scsi_descr_sense_hdr) + \
103 	sizeof (struct scsi_information_sense_descr))
104 #define	AAC_ARQ64_LENGTH \
105 	(sizeof (struct scsi_arq_status) + \
106 	AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH)
107 
108 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
109 #define	AAC_GETGXADDR(cmdlen, cdbp) \
110 	((cmdlen == 6) ? GETG0ADDR(cdbp) : \
111 	(cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \
112 	((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp))
113 
114 #define	AAC_CDB_INQUIRY_CMDDT	0x02
115 #define	AAC_CDB_INQUIRY_EVPD	0x01
116 #define	AAC_VPD_PAGE_CODE	1
117 #define	AAC_VPD_PAGE_LENGTH	3
118 #define	AAC_VPD_PAGE_DATA	4
119 #define	AAC_VPD_ID_CODESET	0
120 #define	AAC_VPD_ID_TYPE		1
121 #define	AAC_VPD_ID_LENGTH	3
122 #define	AAC_VPD_ID_DATA		4
123 
124 #define	AAC_SCSI_RPTLUNS_HEAD_SIZE			0x08
125 #define	AAC_SCSI_RPTLUNS_ADDR_SIZE			0x08
126 #define	AAC_SCSI_RPTLUNS_ADDR_MASK			0xC0
127 /* 00b - peripheral device addressing method */
128 #define	AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL		0x00
129 /* 01b - flat space addressing method */
130 #define	AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE		0x40
131 /* 10b - logical unit addressing method */
132 #define	AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT		0x80
133 
134 /* Return the size of FIB with data part type data_type */
135 #define	AAC_FIB_SIZEOF(data_type) \
136 	(sizeof (struct aac_fib_header) + sizeof (data_type))
137 /* Return the container size defined in mir */
138 #define	AAC_MIR_SIZE(softs, acc, mir) \
139 	(((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \
140 	(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \
141 	((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \
142 	(uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity))
143 
144 /* The last entry of aac_cards[] is for unknown cards */
145 #define	AAC_UNKNOWN_CARD \
146 	(sizeof (aac_cards) / sizeof (struct aac_card_type) - 1)
147 #define	CARD_IS_UNKNOWN(i)	(i == AAC_UNKNOWN_CARD)
148 #define	BUF_IS_READ(bp)		((bp)->b_flags & B_READ)
149 #define	AAC_IS_Q_EMPTY(q)	((q)->q_head == NULL)
150 #define	AAC_CMDQ(acp)		(!((acp)->flags & AAC_CMD_SYNC))
151 
152 #define	PCI_MEM_GET32(softs, off) \
153 	ddi_get32((softs)->pci_mem_handle, \
154 	    (void *)((softs)->pci_mem_base_vaddr + (off)))
155 #define	PCI_MEM_PUT32(softs, off, val) \
156 	ddi_put32((softs)->pci_mem_handle, \
157 	    (void *)((softs)->pci_mem_base_vaddr + (off)), \
158 	    (uint32_t)(val))
159 #define	PCI_MEM_GET16(softs, off) \
160 	ddi_get16((softs)->pci_mem_handle, \
161 	(void *)((softs)->pci_mem_base_vaddr + (off)))
162 #define	PCI_MEM_PUT16(softs, off, val) \
163 	ddi_put16((softs)->pci_mem_handle, \
164 	(void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val))
165 /* Write host data at valp to device mem[off] repeatedly count times */
166 #define	PCI_MEM_REP_PUT8(softs, off, valp, count) \
167 	ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \
168 	    (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
169 	    count, DDI_DEV_AUTOINCR)
170 /* Read device data at mem[off] to host addr valp repeatedly count times */
171 #define	PCI_MEM_REP_GET8(softs, off, valp, count) \
172 	ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \
173 	    (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
174 	    count, DDI_DEV_AUTOINCR)
175 #define	AAC_GET_FIELD8(acc, d, s, field) \
176 	(d)->field = ddi_get8(acc, (uint8_t *)&(s)->field)
177 #define	AAC_GET_FIELD32(acc, d, s, field) \
178 	(d)->field = ddi_get32(acc, (uint32_t *)&(s)->field)
179 #define	AAC_GET_FIELD64(acc, d, s, field) \
180 	(d)->field = ddi_get64(acc, (uint64_t *)&(s)->field)
181 #define	AAC_REP_GET_FIELD8(acc, d, s, field, r) \
182 	ddi_rep_get8((acc), (uint8_t *)&(d)->field, \
183 	    (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
184 #define	AAC_REP_GET_FIELD32(acc, d, s, field, r) \
185 	ddi_rep_get32((acc), (uint32_t *)&(d)->field, \
186 	    (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
187 
188 #define	AAC_ENABLE_INTR(softs) { \
189 		if (softs->flags & AAC_FLAGS_NEW_COMM) \
190 			PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \
191 		else \
192 			PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \
193 	}
194 
195 #define	AAC_DISABLE_INTR(softs)		PCI_MEM_PUT32(softs, AAC_OIMR, ~0)
196 #define	AAC_STATUS_CLR(softs, mask)	PCI_MEM_PUT32(softs, AAC_ODBR, mask)
197 #define	AAC_STATUS_GET(softs)		PCI_MEM_GET32(softs, AAC_ODBR)
198 #define	AAC_NOTIFY(softs, val)		PCI_MEM_PUT32(softs, AAC_IDBR, val)
199 #define	AAC_OUTB_GET(softs)		PCI_MEM_GET32(softs, AAC_OQUE)
200 #define	AAC_OUTB_SET(softs, val)	PCI_MEM_PUT32(softs, AAC_OQUE, val)
201 #define	AAC_FWSTATUS_GET(softs)	\
202 	((softs)->aac_if.aif_get_fwstatus(softs))
203 #define	AAC_MAILBOX_GET(softs, mb) \
204 	((softs)->aac_if.aif_get_mailbox((softs), (mb)))
205 #define	AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \
206 	((softs)->aac_if.aif_set_mailbox((softs), (cmd), \
207 	    (arg0), (arg1), (arg2), (arg3)))
208 
209 #define	AAC_THROTTLE_DRAIN	-1
210 
211 #define	AAC_QUIESCE_TICK	1	/* 1 second */
212 #define	AAC_QUIESCE_TIMEOUT	180	/* 180 seconds */
213 #define	AAC_DEFAULT_TICK	10	/* 10 seconds */
214 #define	AAC_SYNC_TICK		(30*60)	/* 30 minutes */
215 
216 /* Poll time for aac_do_poll_io() */
217 #define	AAC_POLL_TIME		60	/* 60 seconds */
218 
219 /* IOP reset */
220 #define	AAC_IOP_RESET_SUCCEED		0	/* IOP reset succeed */
221 #define	AAC_IOP_RESET_FAILED		-1	/* IOP reset failed */
222 #define	AAC_IOP_RESET_ABNORMAL		-2	/* Reset operation abnormal */
223 
224 /*
225  * Hardware access functions
226  */
227 static int aac_rx_get_fwstatus(struct aac_softstate *);
228 static int aac_rx_get_mailbox(struct aac_softstate *, int);
229 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
230     uint32_t, uint32_t, uint32_t);
231 static int aac_rkt_get_fwstatus(struct aac_softstate *);
232 static int aac_rkt_get_mailbox(struct aac_softstate *, int);
233 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
234     uint32_t, uint32_t, uint32_t);
235 
236 /*
237  * SCSA function prototypes
238  */
239 static int aac_attach(dev_info_t *, ddi_attach_cmd_t);
240 static int aac_detach(dev_info_t *, ddi_detach_cmd_t);
241 static int aac_reset(dev_info_t *, ddi_reset_cmd_t);
242 static int aac_quiesce(dev_info_t *);
243 
244 /*
245  * Interrupt handler functions
246  */
247 static int aac_query_intrs(struct aac_softstate *, int);
248 static int aac_add_intrs(struct aac_softstate *);
249 static void aac_remove_intrs(struct aac_softstate *);
250 static uint_t aac_intr_old(caddr_t);
251 static uint_t aac_intr_new(caddr_t);
252 static uint_t aac_softintr(caddr_t);
253 
254 /*
255  * Internal functions in attach
256  */
257 static int aac_check_card_type(struct aac_softstate *);
258 static int aac_check_firmware(struct aac_softstate *);
259 static int aac_common_attach(struct aac_softstate *);
260 static void aac_common_detach(struct aac_softstate *);
261 static int aac_probe_containers(struct aac_softstate *);
262 static int aac_alloc_comm_space(struct aac_softstate *);
263 static int aac_setup_comm_space(struct aac_softstate *);
264 static void aac_free_comm_space(struct aac_softstate *);
265 static int aac_hba_setup(struct aac_softstate *);
266 
267 /*
268  * Sync FIB operation functions
269  */
270 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t,
271     uint32_t, uint32_t, uint32_t, uint32_t *);
272 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t);
273 
274 /*
275  * Command queue operation functions
276  */
277 static void aac_cmd_initq(struct aac_cmd_queue *);
278 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *);
279 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *);
280 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *);
281 
282 /*
283  * FIB queue operation functions
284  */
285 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t);
286 static int aac_fib_dequeue(struct aac_softstate *, int, int *);
287 
288 /*
289  * Slot operation functions
290  */
291 static int aac_create_slots(struct aac_softstate *);
292 static void aac_destroy_slots(struct aac_softstate *);
293 static void aac_alloc_fibs(struct aac_softstate *);
294 static void aac_destroy_fibs(struct aac_softstate *);
295 static struct aac_slot *aac_get_slot(struct aac_softstate *);
296 static void aac_release_slot(struct aac_softstate *, struct aac_slot *);
297 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *);
298 static void aac_free_fib(struct aac_slot *);
299 
300 /*
301  * Internal functions
302  */
303 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_slot *,
304     uint16_t, uint16_t);
305 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *);
306 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *);
307 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *);
308 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *);
309 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *);
310 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *);
311 static void aac_start_waiting_io(struct aac_softstate *);
312 static void aac_drain_comp_q(struct aac_softstate *);
313 int aac_do_io(struct aac_softstate *, struct aac_cmd *);
314 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *);
315 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *);
316 static int aac_send_command(struct aac_softstate *, struct aac_slot *);
317 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *);
318 static int aac_dma_sync_ac(struct aac_cmd *);
319 static int aac_shutdown(struct aac_softstate *);
320 static int aac_reset_adapter(struct aac_softstate *);
321 static int aac_do_quiesce(struct aac_softstate *softs);
322 static int aac_do_unquiesce(struct aac_softstate *softs);
323 static void aac_unhold_bus(struct aac_softstate *, int);
324 static void aac_set_throttle(struct aac_softstate *, struct aac_device *,
325     int, int);
326 
327 /*
328  * Adapter Initiated FIB handling function
329  */
330 static int aac_handle_aif(struct aac_softstate *, struct aac_fib *);
331 
332 /*
333  * Timeout handling thread function
334  */
335 static void aac_daemon(void *);
336 
337 /*
338  * IOCTL interface related functions
339  */
340 static int aac_open(dev_t *, int, int, cred_t *);
341 static int aac_close(dev_t, int, int, cred_t *);
342 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
343 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int);
344 
345 /*
346  * FMA Prototypes
347  */
348 static void aac_fm_init(struct aac_softstate *);
349 static void aac_fm_fini(struct aac_softstate *);
350 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
351 int aac_check_acc_handle(ddi_acc_handle_t);
352 int aac_check_dma_handle(ddi_dma_handle_t);
353 void aac_fm_ereport(struct aac_softstate *, char *);
354 
355 /*
356  * Auto enumeration functions
357  */
358 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t);
359 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
360     void *, dev_info_t **);
361 static int aac_dr_event(struct aac_softstate *, int, int, int);
362 
363 #ifdef DEBUG
364 /*
365  * UART	debug output support
366  */
367 
368 #define	AAC_PRINT_BUFFER_SIZE		512
369 #define	AAC_PRINT_TIMEOUT		250	/* 1/4 sec. = 250 msec. */
370 
371 #define	AAC_FW_DBG_STRLEN_OFFSET	0x00
372 #define	AAC_FW_DBG_FLAGS_OFFSET		0x04
373 #define	AAC_FW_DBG_BLED_OFFSET		0x08
374 
375 static int aac_get_fw_debug_buffer(struct aac_softstate *);
376 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *);
377 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *);
378 
379 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
380 static char aac_fmt[] = " %s";
381 static char aac_fmt_header[] = " %s.%d: %s";
382 static kmutex_t aac_prt_mutex;
383 
384 /*
385  * Debug flags to be put into the softstate flags field
386  * when initialized
387  */
388 uint32_t aac_debug_flags =
389 /*    AACDB_FLAGS_KERNEL_PRINT | */
390 /*    AACDB_FLAGS_FW_PRINT |	*/
391 /*    AACDB_FLAGS_MISC |	*/
392 /*    AACDB_FLAGS_FUNC1 |	*/
393 /*    AACDB_FLAGS_FUNC2 |	*/
394 /*    AACDB_FLAGS_SCMD |	*/
395 /*    AACDB_FLAGS_AIF |		*/
396 /*    AACDB_FLAGS_FIB |		*/
397 /*    AACDB_FLAGS_IOCTL |	*/
398 0;
399 uint32_t aac_debug_fib_flags =
400 /*    AACDB_FLAGS_FIB_RW |	*/
401 /*    AACDB_FLAGS_FIB_IOCTL |	*/
402 /*    AACDB_FLAGS_FIB_SRB |	*/
403 /*    AACDB_FLAGS_FIB_SYNC |	*/
404 /*    AACDB_FLAGS_FIB_HEADER |	*/
405 /*    AACDB_FLAGS_FIB_TIMEOUT |	*/
406 0;
407 
408 #endif /* DEBUG */
409 
410 static struct cb_ops aac_cb_ops = {
411 	aac_open,	/* open */
412 	aac_close,	/* close */
413 	nodev,		/* strategy */
414 	nodev,		/* print */
415 	nodev,		/* dump */
416 	nodev,		/* read */
417 	nodev,		/* write */
418 	aac_ioctl,	/* ioctl */
419 	nodev,		/* devmap */
420 	nodev,		/* mmap */
421 	nodev,		/* segmap */
422 	nochpoll,	/* poll */
423 	ddi_prop_op,	/* cb_prop_op */
424 	NULL,		/* streamtab */
425 	D_64BIT | D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
426 	CB_REV,		/* cb_rev */
427 	nodev,		/* async I/O read entry point */
428 	nodev		/* async I/O write entry point */
429 };
430 
431 static struct dev_ops aac_dev_ops = {
432 	DEVO_REV,
433 	0,
434 	nodev,
435 	nulldev,
436 	nulldev,
437 	aac_attach,
438 	aac_detach,
439 	aac_reset,
440 	&aac_cb_ops,
441 	NULL,
442 	NULL,
443 	aac_quiesce,
444 };
445 
446 static struct modldrv aac_modldrv = {
447 	&mod_driverops,
448 	"AAC Driver " AAC_DRIVER_VERSION,
449 	&aac_dev_ops,
450 };
451 
452 static struct modlinkage aac_modlinkage = {
453 	MODREV_1,
454 	&aac_modldrv,
455 	NULL
456 };
457 
458 static struct aac_softstate  *aac_softstatep;
459 
460 /*
461  * Supported card list
462  * ordered in vendor id, subvendor id, subdevice id, and device id
463  */
464 static struct aac_card_type aac_cards[] = {
465 	{0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX,
466 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
467 	    "Dell", "PERC 3/Di"},
468 	{0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX,
469 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
470 	    "Dell", "PERC 3/Di"},
471 	{0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX,
472 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
473 	    "Dell", "PERC 3/Si"},
474 	{0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX,
475 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
476 	    "Dell", "PERC 3/Di"},
477 	{0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX,
478 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
479 	    "Dell", "PERC 3/Si"},
480 	{0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX,
481 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
482 	    "Dell", "PERC 3/Di"},
483 	{0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX,
484 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
485 	    "Dell", "PERC 3/Di"},
486 	{0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX,
487 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
488 	    "Dell", "PERC 3/Di"},
489 	{0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX,
490 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
491 	    "Dell", "PERC 3/Di"},
492 	{0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX,
493 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
494 	    "Dell", "PERC 3/Di"},
495 	{0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX,
496 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
497 	    "Dell", "PERC 320/DC"},
498 	{0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX,
499 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"},
500 
501 	{0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX,
502 	    0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"},
503 	{0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX,
504 	    0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"},
505 	{0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT,
506 	    0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"},
507 
508 	{0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX,
509 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
510 	{0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX,
511 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
512 
513 	{0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX,
514 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
515 	    "Adaptec", "2200S"},
516 	{0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX,
517 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
518 	    "Adaptec", "2120S"},
519 	{0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX,
520 	    AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
521 	    "Adaptec", "2200S"},
522 	{0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX,
523 	    0, AAC_TYPE_SCSI, "Adaptec", "3230S"},
524 	{0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX,
525 	    0, AAC_TYPE_SCSI, "Adaptec", "3240S"},
526 	{0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX,
527 	    0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"},
528 	{0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX,
529 	    0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"},
530 	{0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT,
531 	    0, AAC_TYPE_SCSI, "Adaptec", "2230S"},
532 	{0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT,
533 	    0, AAC_TYPE_SCSI, "Adaptec", "2130S"},
534 	{0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX,
535 	    0, AAC_TYPE_SATA, "Adaptec", "2020SA"},
536 	{0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX,
537 	    0, AAC_TYPE_SATA, "Adaptec", "2025SA"},
538 	{0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX,
539 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"},
540 	{0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX,
541 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"},
542 	{0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX,
543 	    AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"},
544 	{0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX,
545 	    0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"},
546 	{0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX,
547 	    0, AAC_TYPE_SCSI, "Adaptec", "2240S"},
548 	{0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX,
549 	    0, AAC_TYPE_SAS, "Adaptec", "4005SAS"},
550 	{0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX,
551 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"},
552 	{0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX,
553 	    0, AAC_TYPE_SAS, "Adaptec", "4800SAS"},
554 	{0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX,
555 	    0, AAC_TYPE_SAS, "Adaptec", "4805SAS"},
556 	{0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT,
557 	    0, AAC_TYPE_SATA, "Adaptec", "2820SA"},
558 	{0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT,
559 	    0, AAC_TYPE_SATA, "Adaptec", "2620SA"},
560 	{0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT,
561 	    0, AAC_TYPE_SATA, "Adaptec", "2420SA"},
562 	{0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT,
563 	    0, AAC_TYPE_SATA, "ICP", "9024RO"},
564 	{0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT,
565 	    0, AAC_TYPE_SATA, "ICP", "9014RO"},
566 	{0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT,
567 	    0, AAC_TYPE_SATA, "ICP", "9047MA"},
568 	{0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT,
569 	    0, AAC_TYPE_SATA, "ICP", "9087MA"},
570 	{0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX,
571 	    0, AAC_TYPE_SAS, "ICP", "9085LI"},
572 	{0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX,
573 	    0, AAC_TYPE_SAS, "ICP", "5085BR"},
574 	{0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT,
575 	    0, AAC_TYPE_SATA, "ICP", "9067MA"},
576 	{0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX,
577 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"},
578 	{0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX,
579 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"},
580 	{0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX,
581 	    0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"},
582 	{0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX,
583 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"},
584 	{0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX,
585 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"},
586 	{0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX,
587 	    0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"},
588 
589 	{0, 0, 0, 0, AAC_HWIF_UNKNOWN,
590 	    0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"},
591 };
592 
593 /*
594  * Hardware access functions for i960 based cards
595  */
596 static struct aac_interface aac_rx_interface = {
597 	aac_rx_get_fwstatus,
598 	aac_rx_get_mailbox,
599 	aac_rx_set_mailbox
600 };
601 
602 /*
603  * Hardware access functions for Rocket based cards
604  */
605 static struct aac_interface aac_rkt_interface = {
606 	aac_rkt_get_fwstatus,
607 	aac_rkt_get_mailbox,
608 	aac_rkt_set_mailbox
609 };
610 
611 ddi_device_acc_attr_t aac_acc_attr = {
612 	DDI_DEVICE_ATTR_V0,
613 	DDI_STRUCTURE_LE_ACC,
614 	DDI_STRICTORDER_ACC
615 };
616 
617 static struct {
618 	int	size;
619 	int	notify;
620 } aac_qinfo[] = {
621 	{AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
622 	{AAC_HOST_HIGH_CMD_ENTRIES, 0},
623 	{AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
624 	{AAC_ADAP_HIGH_CMD_ENTRIES, 0},
625 	{AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
626 	{AAC_HOST_HIGH_RESP_ENTRIES, 0},
627 	{AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
628 	{AAC_ADAP_HIGH_RESP_ENTRIES, 0}
629 };
630 
631 /*
632  * Default aac dma attributes
633  */
634 static ddi_dma_attr_t aac_dma_attr = {
635 	DMA_ATTR_V0,
636 	0,		/* lowest usable address */
637 	0xffffffffull,	/* high DMA address range */
638 	0xffffffffull,	/* DMA counter register */
639 	AAC_DMA_ALIGN,	/* DMA address alignment */
640 	1,		/* DMA burstsizes */
641 	1,		/* min effective DMA size */
642 	0xffffffffull,	/* max DMA xfer size */
643 	0xffffffffull,	/* segment boundary */
644 	1,		/* s/g list length */
645 	AAC_BLK_SIZE,	/* granularity of device */
646 	0		/* DMA transfer flags */
647 };
648 
649 struct aac_drinfo {
650 	struct aac_softstate *softs;
651 	int tgt;
652 	int lun;
653 	int event;
654 };
655 
656 static int aac_tick = AAC_DEFAULT_TICK;	/* tick for the internal timer */
657 static uint32_t aac_timebase = 0;	/* internal timer in seconds */
658 static uint32_t aac_sync_time = 0;	/* next time to sync. with firmware */
659 
660 /*
661  * Warlock directives
662  *
663  * Different variables with the same types have to be protected by the
664  * same mutex; otherwise, warlock will complain with "variables don't
665  * seem to be protected consistently". For example,
666  * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected
667  * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to
668  * declare them as protected explictly at aac_cmd_dequeue().
669  */
670 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \
671     scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \
672     mode_format mode_geometry mode_header aac_cmd))
673 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \
674     aac_sge))
675 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \
676     aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \
677     aac_sg_table aac_srb))
678 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry))
679 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
680 _NOTE(SCHEME_PROTECTS_DATA("unique to dr event", aac_drinfo))
681 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf))
682 
683 int
684 _init(void)
685 {
686 	int rval = 0;
687 
688 #ifdef DEBUG
689 	mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL);
690 #endif
691 	DBCALLED(NULL, 1);
692 
693 	if ((rval = ddi_soft_state_init((void *)&aac_softstatep,
694 	    sizeof (struct aac_softstate), 0)) != 0)
695 		goto error;
696 
697 	if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) {
698 		ddi_soft_state_fini((void *)&aac_softstatep);
699 		goto error;
700 	}
701 
702 	if ((rval = mod_install(&aac_modlinkage)) != 0) {
703 		ddi_soft_state_fini((void *)&aac_softstatep);
704 		scsi_hba_fini(&aac_modlinkage);
705 		goto error;
706 	}
707 	return (rval);
708 
709 error:
710 	AACDB_PRINT(NULL, CE_WARN, "Mod init error!");
711 #ifdef DEBUG
712 	mutex_destroy(&aac_prt_mutex);
713 #endif
714 	return (rval);
715 }
716 
717 int
718 _info(struct modinfo *modinfop)
719 {
720 	DBCALLED(NULL, 1);
721 	return (mod_info(&aac_modlinkage, modinfop));
722 }
723 
724 /*
725  * An HBA driver cannot be unload unless you reboot,
726  * so this function will be of no use.
727  */
728 int
729 _fini(void)
730 {
731 	int rval;
732 
733 	DBCALLED(NULL, 1);
734 
735 	if ((rval = mod_remove(&aac_modlinkage)) != 0)
736 		goto error;
737 
738 	scsi_hba_fini(&aac_modlinkage);
739 	ddi_soft_state_fini((void *)&aac_softstatep);
740 #ifdef DEBUG
741 	mutex_destroy(&aac_prt_mutex);
742 #endif
743 	return (0);
744 
745 error:
746 	AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!");
747 	return (rval);
748 }
749 
750 int aac_use_msi = 1;
751 
752 static int
753 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
754 {
755 	int instance, i;
756 	struct aac_softstate *softs = NULL;
757 	int attach_state = 0;
758 	char *data;
759 	int intr_types;
760 
761 	DBCALLED(NULL, 1);
762 
763 	switch (cmd) {
764 	case DDI_ATTACH:
765 		break;
766 	case DDI_RESUME:
767 		return (DDI_FAILURE);
768 	default:
769 		return (DDI_FAILURE);
770 	}
771 
772 	instance = ddi_get_instance(dip);
773 
774 	/* Get soft state */
775 	if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) {
776 		AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state");
777 		goto error;
778 	}
779 	softs = ddi_get_soft_state(aac_softstatep, instance);
780 	attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED;
781 
782 	softs->instance = instance;
783 	softs->devinfo_p = dip;
784 	softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr;
785 	softs->addr_dma_attr.dma_attr_granular = 1;
786 	softs->acc_attr = aac_acc_attr;
787 	softs->card = AAC_UNKNOWN_CARD;
788 #ifdef DEBUG
789 	softs->debug_flags = aac_debug_flags;
790 	softs->debug_fib_flags = aac_debug_fib_flags;
791 #endif
792 
793 	/* Initialize FMA */
794 	aac_fm_init(softs);
795 
796 	/* Check the card type */
797 	if (aac_check_card_type(softs) == AACERR) {
798 		AACDB_PRINT(softs, CE_WARN, "Card not supported");
799 		goto error;
800 	}
801 	/* We have found the right card and everything is OK */
802 	attach_state |= AAC_ATTACH_CARD_DETECTED;
803 
804 	/* Map PCI mem space */
805 	if (ddi_regs_map_setup(dip, 1,
806 	    (caddr_t *)&softs->pci_mem_base_vaddr, 0,
807 	    softs->map_size_min, &softs->acc_attr,
808 	    &softs->pci_mem_handle) != DDI_SUCCESS)
809 		goto error;
810 
811 	softs->map_size = softs->map_size_min;
812 	attach_state |= AAC_ATTACH_PCI_MEM_MAPPED;
813 
814 	AAC_DISABLE_INTR(softs);
815 
816 	/* Get the type of device intrrupts */
817 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
818 		AACDB_PRINT(softs, CE_WARN,
819 		    "ddi_intr_get_supported_types() failed");
820 		goto error;
821 	}
822 	AACDB_PRINT(softs, CE_NOTE,
823 	    "ddi_intr_get_supported_types() ret: 0x%x", intr_types);
824 
825 	/* Query interrupt, and alloc/init all needed struct */
826 	if ((intr_types & DDI_INTR_TYPE_MSI) && aac_use_msi) {
827 		if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI)
828 		    != DDI_SUCCESS) {
829 			AACDB_PRINT(softs, CE_WARN,
830 			    "MSI interrupt query failed");
831 			goto error;
832 		}
833 		softs->intr_type = DDI_INTR_TYPE_MSI;
834 	} else if (intr_types & DDI_INTR_TYPE_FIXED) {
835 		if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED)
836 		    != DDI_SUCCESS) {
837 			AACDB_PRINT(softs, CE_WARN,
838 			    "FIXED interrupt query failed");
839 			goto error;
840 		}
841 		softs->intr_type = DDI_INTR_TYPE_FIXED;
842 	} else {
843 		AACDB_PRINT(softs, CE_WARN,
844 		    "Device cannot suppport both FIXED and MSI interrupts");
845 		goto error;
846 	}
847 
848 	/* Init mutexes */
849 	mutex_init(&softs->q_comp_mutex, NULL,
850 	    MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
851 	cv_init(&softs->event, NULL, CV_DRIVER, NULL);
852 	mutex_init(&softs->aifq_mutex, NULL,
853 	    MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
854 	cv_init(&softs->aifv, NULL, CV_DRIVER, NULL);
855 	cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL);
856 	mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER,
857 	    DDI_INTR_PRI(softs->intr_pri));
858 	attach_state |= AAC_ATTACH_KMUTEX_INITED;
859 
860 	/* Check for legacy device naming support */
861 	softs->legacy = 1; /* default to use legacy name */
862 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
863 	    "legacy-name-enable", &data) == DDI_SUCCESS)) {
864 		if (strcmp(data, "no") == 0) {
865 			AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled");
866 			softs->legacy = 0;
867 		}
868 		ddi_prop_free(data);
869 	}
870 
871 	/*
872 	 * Everything has been set up till now,
873 	 * we will do some common attach.
874 	 */
875 	if (aac_common_attach(softs) == AACERR)
876 		goto error;
877 	attach_state |= AAC_ATTACH_COMM_SPACE_SETUP;
878 
879 	/* Check for buf breakup support */
880 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
881 	    "breakup-enable", &data) == DDI_SUCCESS)) {
882 		if (strcmp(data, "yes") == 0) {
883 			AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled");
884 			softs->flags |= AAC_FLAGS_BRKUP;
885 		}
886 		ddi_prop_free(data);
887 	}
888 	softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer;
889 	if (softs->flags & AAC_FLAGS_BRKUP) {
890 		softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
891 		    DDI_PROP_DONTPASS, "dma-max", softs->dma_max);
892 	}
893 
894 	/* Init the cmd queues */
895 	for (i = 0; i < AAC_CMDQ_NUM; i++)
896 		aac_cmd_initq(&softs->q_wait[i]);
897 	aac_cmd_initq(&softs->q_busy);
898 	aac_cmd_initq(&softs->q_comp);
899 
900 	if (aac_hba_setup(softs) != AACOK)
901 		goto error;
902 	attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP;
903 
904 	/* Connect interrupt handlers */
905 	if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id,
906 	    NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) {
907 		AACDB_PRINT(softs, CE_WARN,
908 		    "Can not setup soft interrupt handler!");
909 		goto error;
910 	}
911 	attach_state |= AAC_ATTACH_SOFT_INTR_SETUP;
912 
913 	if (aac_add_intrs(softs) != DDI_SUCCESS) {
914 		AACDB_PRINT(softs, CE_WARN,
915 		    "Interrupt registration failed, intr type: %s",
916 		    softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED");
917 		goto error;
918 	}
919 	attach_state |= AAC_ATTACH_HARD_INTR_SETUP;
920 
921 	/* Create devctl/scsi nodes for cfgadm */
922 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
923 	    INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
924 		AACDB_PRINT(softs, CE_WARN, "failed to create devctl node");
925 		goto error;
926 	}
927 	attach_state |= AAC_ATTACH_CREATE_DEVCTL;
928 
929 	if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance),
930 	    DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
931 		AACDB_PRINT(softs, CE_WARN, "failed to create scsi node");
932 		goto error;
933 	}
934 	attach_state |= AAC_ATTACH_CREATE_SCSI;
935 
936 	/* Create aac node for app. to issue ioctls */
937 	if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance),
938 	    DDI_PSEUDO, 0) != DDI_SUCCESS) {
939 		AACDB_PRINT(softs, CE_WARN, "failed to create aac node");
940 		goto error;
941 	}
942 
943 	/* Create a taskq for dealing with dr events */
944 	if ((softs->taskq = ddi_taskq_create(dip, "aac_dr_taskq", 1,
945 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
946 		AACDB_PRINT(softs, CE_WARN, "ddi_taskq_create failed");
947 		goto error;
948 	}
949 
950 	aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
951 	softs->state = AAC_STATE_RUN;
952 
953 	/* Create a thread for command timeout */
954 	softs->timeout_id = timeout(aac_daemon, (void *)softs,
955 	    (60 * drv_usectohz(1000000)));
956 
957 	/* Common attach is OK, so we are attached! */
958 	AAC_ENABLE_INTR(softs);
959 	ddi_report_dev(dip);
960 	AACDB_PRINT(softs, CE_NOTE, "aac attached ok");
961 	return (DDI_SUCCESS);
962 
963 error:
964 	if (softs && softs->taskq)
965 		ddi_taskq_destroy(softs->taskq);
966 	if (attach_state & AAC_ATTACH_CREATE_SCSI)
967 		ddi_remove_minor_node(dip, "scsi");
968 	if (attach_state & AAC_ATTACH_CREATE_DEVCTL)
969 		ddi_remove_minor_node(dip, "devctl");
970 	if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP)
971 		aac_common_detach(softs);
972 	if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
973 		(void) scsi_hba_detach(dip);
974 		scsi_hba_tran_free(AAC_DIP2TRAN(dip));
975 	}
976 	if (attach_state & AAC_ATTACH_HARD_INTR_SETUP)
977 		aac_remove_intrs(softs);
978 	if (attach_state & AAC_ATTACH_SOFT_INTR_SETUP)
979 		ddi_remove_softintr(softs->softint_id);
980 	if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
981 		mutex_destroy(&softs->q_comp_mutex);
982 		cv_destroy(&softs->event);
983 		mutex_destroy(&softs->aifq_mutex);
984 		cv_destroy(&softs->aifv);
985 		cv_destroy(&softs->drain_cv);
986 		mutex_destroy(&softs->io_lock);
987 	}
988 	if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED)
989 		ddi_regs_map_free(&softs->pci_mem_handle);
990 	aac_fm_fini(softs);
991 	if (attach_state & AAC_ATTACH_CARD_DETECTED)
992 		softs->card = AACERR;
993 	if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED)
994 		ddi_soft_state_free(aac_softstatep, instance);
995 	return (DDI_FAILURE);
996 }
997 
998 static int
999 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1000 {
1001 	scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip);
1002 	struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
1003 
1004 	DBCALLED(softs, 1);
1005 
1006 	switch (cmd) {
1007 	case DDI_DETACH:
1008 		break;
1009 	case DDI_SUSPEND:
1010 		return (DDI_FAILURE);
1011 	default:
1012 		return (DDI_FAILURE);
1013 	}
1014 
1015 	mutex_enter(&softs->io_lock);
1016 	AAC_DISABLE_INTR(softs);
1017 	softs->state = AAC_STATE_STOPPED;
1018 
1019 	mutex_exit(&softs->io_lock);
1020 	(void) untimeout(softs->timeout_id);
1021 	mutex_enter(&softs->io_lock);
1022 	softs->timeout_id = 0;
1023 
1024 	ddi_taskq_destroy(softs->taskq);
1025 
1026 	ddi_remove_minor_node(dip, "aac");
1027 	ddi_remove_minor_node(dip, "scsi");
1028 	ddi_remove_minor_node(dip, "devctl");
1029 
1030 	mutex_exit(&softs->io_lock);
1031 	aac_remove_intrs(softs);
1032 	ddi_remove_softintr(softs->softint_id);
1033 
1034 	aac_common_detach(softs);
1035 
1036 	(void) scsi_hba_detach(dip);
1037 	scsi_hba_tran_free(tran);
1038 
1039 	mutex_destroy(&softs->q_comp_mutex);
1040 	cv_destroy(&softs->event);
1041 	mutex_destroy(&softs->aifq_mutex);
1042 	cv_destroy(&softs->aifv);
1043 	cv_destroy(&softs->drain_cv);
1044 	mutex_destroy(&softs->io_lock);
1045 
1046 	ddi_regs_map_free(&softs->pci_mem_handle);
1047 	aac_fm_fini(softs);
1048 	softs->hwif = AAC_HWIF_UNKNOWN;
1049 	softs->card = AAC_UNKNOWN_CARD;
1050 	ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip));
1051 
1052 	return (DDI_SUCCESS);
1053 }
1054 
1055 /*ARGSUSED*/
1056 static int
1057 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1058 {
1059 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1060 
1061 	DBCALLED(softs, 1);
1062 
1063 	mutex_enter(&softs->io_lock);
1064 	(void) aac_shutdown(softs);
1065 	mutex_exit(&softs->io_lock);
1066 
1067 	return (DDI_SUCCESS);
1068 }
1069 
1070 /*
1071  * quiesce(9E) entry point.
1072  *
1073  * This function is called when the system is single-threaded at high
1074  * PIL with preemption disabled. Therefore, this function must not be
1075  * blocked.
1076  *
1077  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1078  * DDI_FAILURE indicates an error condition and should almost never happen.
1079  */
1080 static int
1081 aac_quiesce(dev_info_t *dip)
1082 {
1083 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1084 
1085 	if (softs == NULL)
1086 		return (DDI_FAILURE);
1087 
1088 	AAC_DISABLE_INTR(softs);
1089 
1090 	return (DDI_SUCCESS);
1091 }
1092 
1093 /*
1094  * Bring the controller down to a dormant state and detach all child devices.
1095  * This function is called before detach or system shutdown.
1096  * Note: we can assume that the q_wait on the controller is empty, as we
1097  * won't allow shutdown if any device is open.
1098  */
1099 static int
1100 aac_shutdown(struct aac_softstate *softs)
1101 {
1102 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
1103 	struct aac_close_command *cc = (struct aac_close_command *) \
1104 	    &softs->sync_slot.fibp->data[0];
1105 	int rval;
1106 
1107 	ddi_put32(acc, &cc->Command, VM_CloseAll);
1108 	ddi_put32(acc, &cc->ContainerId, 0xfffffffful);
1109 
1110 	/* Flush all caches, set FW to write through mode */
1111 	rval = aac_sync_fib(softs, ContainerCommand,
1112 	    AAC_FIB_SIZEOF(struct aac_close_command));
1113 
1114 	AACDB_PRINT(softs, CE_NOTE,
1115 	    "shutting down aac %s", (rval == AACOK) ? "ok" : "fail");
1116 	return (rval);
1117 }
1118 
1119 static uint_t
1120 aac_softintr(caddr_t arg)
1121 {
1122 	struct aac_softstate *softs = (void *)arg;
1123 
1124 	if (!AAC_IS_Q_EMPTY(&softs->q_comp)) {
1125 		aac_drain_comp_q(softs);
1126 		return (DDI_INTR_CLAIMED);
1127 	} else {
1128 		return (DDI_INTR_UNCLAIMED);
1129 	}
1130 }
1131 
1132 /*
1133  * Setup auto sense data for pkt
1134  */
1135 static void
1136 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key,
1137     uchar_t add_code, uchar_t qual_code, uint64_t info)
1138 {
1139 	struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp);
1140 
1141 	*pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
1142 	pkt->pkt_state |= STATE_ARQ_DONE;
1143 
1144 	*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1145 	arqstat->sts_rqpkt_reason = CMD_CMPLT;
1146 	arqstat->sts_rqpkt_resid = 0;
1147 	arqstat->sts_rqpkt_state =
1148 	    STATE_GOT_BUS |
1149 	    STATE_GOT_TARGET |
1150 	    STATE_SENT_CMD |
1151 	    STATE_XFERRED_DATA;
1152 	arqstat->sts_rqpkt_statistics = 0;
1153 
1154 	if (info <= 0xfffffffful) {
1155 		arqstat->sts_sensedata.es_valid = 1;
1156 		arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
1157 		arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT;
1158 		arqstat->sts_sensedata.es_key = key;
1159 		arqstat->sts_sensedata.es_add_code = add_code;
1160 		arqstat->sts_sensedata.es_qual_code = qual_code;
1161 
1162 		arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF;
1163 		arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF;
1164 		arqstat->sts_sensedata.es_info_3 = (info >>  8) & 0xFF;
1165 		arqstat->sts_sensedata.es_info_4 = info & 0xFF;
1166 	} else { /* 64-bit LBA */
1167 		struct scsi_descr_sense_hdr *dsp;
1168 		struct scsi_information_sense_descr *isd;
1169 
1170 		dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata;
1171 		dsp->ds_class = CLASS_EXTENDED_SENSE;
1172 		dsp->ds_code = CODE_FMT_DESCR_CURRENT;
1173 		dsp->ds_key = key;
1174 		dsp->ds_add_code = add_code;
1175 		dsp->ds_qual_code = qual_code;
1176 		dsp->ds_addl_sense_length =
1177 		    sizeof (struct scsi_information_sense_descr);
1178 
1179 		isd = (struct scsi_information_sense_descr *)(dsp+1);
1180 		isd->isd_descr_type = DESCR_INFORMATION;
1181 		isd->isd_valid = 1;
1182 		isd->isd_information[0] = (info >> 56) & 0xFF;
1183 		isd->isd_information[1] = (info >> 48) & 0xFF;
1184 		isd->isd_information[2] = (info >> 40) & 0xFF;
1185 		isd->isd_information[3] = (info >> 32) & 0xFF;
1186 		isd->isd_information[4] = (info >> 24) & 0xFF;
1187 		isd->isd_information[5] = (info >> 16) & 0xFF;
1188 		isd->isd_information[6] = (info >>  8) & 0xFF;
1189 		isd->isd_information[7] = (info) & 0xFF;
1190 	}
1191 }
1192 
1193 /*
1194  * Setup auto sense data for HARDWARE ERROR
1195  */
1196 static void
1197 aac_set_arq_data_hwerr(struct aac_cmd *acp)
1198 {
1199 	union scsi_cdb *cdbp;
1200 	uint64_t err_blkno;
1201 
1202 	cdbp = (void *)acp->pkt->pkt_cdbp;
1203 	err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp);
1204 	aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno);
1205 }
1206 
1207 /*
1208  * Setup auto sense data for UNIT ATTENTION
1209  */
1210 /*ARGSUSED*/
1211 static void
1212 aac_set_arq_data_reset(struct aac_softstate *softs, struct aac_cmd *acp)
1213 {
1214 	struct aac_container *dvp = (struct aac_container *)acp->dvp;
1215 
1216 	ASSERT(dvp->dev.type == AAC_DEV_LD);
1217 
1218 	if (dvp->reset) {
1219 		dvp->reset = 0;
1220 		aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION, 0x29, 0x02, 0);
1221 	}
1222 }
1223 
1224 /*
1225  * Send a command to the adapter in New Comm. interface
1226  */
1227 static int
1228 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp)
1229 {
1230 	uint32_t index, device;
1231 
1232 	index = PCI_MEM_GET32(softs, AAC_IQUE);
1233 	if (index == 0xffffffffUL) {
1234 		index = PCI_MEM_GET32(softs, AAC_IQUE);
1235 		if (index == 0xffffffffUL)
1236 			return (AACERR);
1237 	}
1238 
1239 	device = index;
1240 	PCI_MEM_PUT32(softs, device,
1241 	    (uint32_t)(slotp->fib_phyaddr & 0xfffffffful));
1242 	device += 4;
1243 	PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32));
1244 	device += 4;
1245 	PCI_MEM_PUT32(softs, device, slotp->acp->fib_size);
1246 	PCI_MEM_PUT32(softs, AAC_IQUE, index);
1247 	return (AACOK);
1248 }
1249 
1250 static void
1251 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp)
1252 {
1253 	struct aac_device *dvp = acp->dvp;
1254 	int q = AAC_CMDQ(acp);
1255 
1256 	if (acp->slotp) { /* outstanding cmd */
1257 		aac_release_slot(softs, acp->slotp);
1258 		acp->slotp = NULL;
1259 		if (dvp) {
1260 			dvp->ncmds[q]--;
1261 			if (dvp->throttle[q] == AAC_THROTTLE_DRAIN &&
1262 			    dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC)
1263 				aac_set_throttle(softs, dvp, q,
1264 				    softs->total_slots);
1265 		}
1266 		softs->bus_ncmds[q]--;
1267 		(void) aac_cmd_delete(&softs->q_busy, acp);
1268 	} else { /* cmd in waiting queue */
1269 		aac_cmd_delete(&softs->q_wait[q], acp);
1270 	}
1271 
1272 	if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */
1273 		mutex_enter(&softs->q_comp_mutex);
1274 		aac_cmd_enqueue(&softs->q_comp, acp);
1275 		mutex_exit(&softs->q_comp_mutex);
1276 	} else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */
1277 		cv_broadcast(&softs->event);
1278 	}
1279 }
1280 
1281 static void
1282 aac_handle_io(struct aac_softstate *softs, int index)
1283 {
1284 	struct aac_slot *slotp;
1285 	struct aac_cmd *acp;
1286 	uint32_t fast;
1287 
1288 	fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE;
1289 	index >>= 2;
1290 
1291 	/* Make sure firmware reported index is valid */
1292 	ASSERT(index >= 0 && index < softs->total_slots);
1293 	slotp = &softs->io_slot[index];
1294 	ASSERT(slotp->index == index);
1295 	acp = slotp->acp;
1296 
1297 	if (acp == NULL || acp->slotp != slotp) {
1298 		cmn_err(CE_WARN,
1299 		    "Firmware error: invalid slot index received from FW");
1300 		return;
1301 	}
1302 
1303 	acp->flags |= AAC_CMD_CMPLT;
1304 	(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1305 
1306 	if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) {
1307 		/*
1308 		 * For fast response IO, the firmware do not return any FIB
1309 		 * data, so we need to fill in the FIB status and state so that
1310 		 * FIB users can handle it correctly.
1311 		 */
1312 		if (fast) {
1313 			uint32_t state;
1314 
1315 			state = ddi_get32(slotp->fib_acc_handle,
1316 			    &slotp->fibp->Header.XferState);
1317 			/*
1318 			 * Update state for CPU not for device, no DMA sync
1319 			 * needed
1320 			 */
1321 			ddi_put32(slotp->fib_acc_handle,
1322 			    &slotp->fibp->Header.XferState,
1323 			    state | AAC_FIBSTATE_DONEADAP);
1324 			ddi_put32(slotp->fib_acc_handle,
1325 			    (void *)&slotp->fibp->data[0], ST_OK);
1326 		}
1327 
1328 		/* Handle completed ac */
1329 		acp->ac_comp(softs, acp);
1330 	} else {
1331 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1332 		acp->flags |= AAC_CMD_ERR;
1333 		if (acp->pkt) {
1334 			acp->pkt->pkt_reason = CMD_TRAN_ERR;
1335 			acp->pkt->pkt_statistics = 0;
1336 		}
1337 	}
1338 	aac_end_io(softs, acp);
1339 }
1340 
1341 /*
1342  * Interrupt handler for New Comm. interface
1343  * New Comm. interface use a different mechanism for interrupt. No explict
1344  * message queues, and driver need only accesses the mapped PCI mem space to
1345  * find the completed FIB or AIF.
1346  */
1347 static int
1348 aac_process_intr_new(struct aac_softstate *softs)
1349 {
1350 	uint32_t index;
1351 
1352 	index = AAC_OUTB_GET(softs);
1353 	if (index == 0xfffffffful)
1354 		index = AAC_OUTB_GET(softs);
1355 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1356 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1357 		return (0);
1358 	}
1359 	if (index != 0xfffffffful) {
1360 		do {
1361 			if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
1362 				aac_handle_io(softs, index);
1363 			} else if (index != 0xfffffffeul) {
1364 				struct aac_fib *fibp;	/* FIB in AIF queue */
1365 				uint16_t fib_size, fib_size0;
1366 
1367 				/*
1368 				 * 0xfffffffe means that the controller wants
1369 				 * more work, ignore it for now. Otherwise,
1370 				 * AIF received.
1371 				 */
1372 				index &= ~2;
1373 
1374 				mutex_enter(&softs->aifq_mutex);
1375 				/*
1376 				 * Copy AIF from adapter to the empty AIF slot
1377 				 */
1378 				fibp = &softs->aifq[softs->aifq_idx].d;
1379 				fib_size0 = PCI_MEM_GET16(softs, index + \
1380 				    offsetof(struct aac_fib, Header.Size));
1381 				fib_size = (fib_size0 > AAC_FIB_SIZE) ?
1382 				    AAC_FIB_SIZE : fib_size0;
1383 				PCI_MEM_REP_GET8(softs, index, fibp,
1384 				    fib_size);
1385 
1386 				if (aac_check_acc_handle(softs-> \
1387 				    pci_mem_handle) == DDI_SUCCESS)
1388 					(void) aac_handle_aif(softs, fibp);
1389 				else
1390 					ddi_fm_service_impact(softs->devinfo_p,
1391 					    DDI_SERVICE_UNAFFECTED);
1392 				mutex_exit(&softs->aifq_mutex);
1393 
1394 				/*
1395 				 * AIF memory is owned by the adapter, so let it
1396 				 * know that we are done with it.
1397 				 */
1398 				AAC_OUTB_SET(softs, index);
1399 				AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1400 			}
1401 
1402 			index = AAC_OUTB_GET(softs);
1403 		} while (index != 0xfffffffful);
1404 
1405 		/*
1406 		 * Process waiting cmds before start new ones to
1407 		 * ensure first IOs are serviced first.
1408 		 */
1409 		aac_start_waiting_io(softs);
1410 		return (AAC_DB_COMMAND_READY);
1411 	} else {
1412 		return (0);
1413 	}
1414 }
1415 
1416 static uint_t
1417 aac_intr_new(caddr_t arg)
1418 {
1419 	struct aac_softstate *softs = (void *)arg;
1420 	uint_t rval;
1421 
1422 	mutex_enter(&softs->io_lock);
1423 	if (aac_process_intr_new(softs))
1424 		rval = DDI_INTR_CLAIMED;
1425 	else
1426 		rval = DDI_INTR_UNCLAIMED;
1427 	mutex_exit(&softs->io_lock);
1428 
1429 	aac_drain_comp_q(softs);
1430 	return (rval);
1431 }
1432 
1433 /*
1434  * Interrupt handler for old interface
1435  * Explicit message queues are used to send FIB to and get completed FIB from
1436  * the adapter. Driver and adapter maitain the queues in the producer/consumer
1437  * manner. The driver has to query the queues to find the completed FIB.
1438  */
1439 static int
1440 aac_process_intr_old(struct aac_softstate *softs)
1441 {
1442 	uint16_t status;
1443 
1444 	status = AAC_STATUS_GET(softs);
1445 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1446 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1447 		return (DDI_INTR_UNCLAIMED);
1448 	}
1449 	if (status & AAC_DB_RESPONSE_READY) {
1450 		int slot_idx;
1451 
1452 		/* ACK the intr */
1453 		AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1454 		(void) AAC_STATUS_GET(softs);
1455 		while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q,
1456 		    &slot_idx) == AACOK)
1457 			aac_handle_io(softs, slot_idx);
1458 
1459 		/*
1460 		 * Process waiting cmds before start new ones to
1461 		 * ensure first IOs are serviced first.
1462 		 */
1463 		aac_start_waiting_io(softs);
1464 		return (AAC_DB_RESPONSE_READY);
1465 	} else if (status & AAC_DB_COMMAND_READY) {
1466 		int aif_idx;
1467 
1468 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY);
1469 		(void) AAC_STATUS_GET(softs);
1470 		if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) ==
1471 		    AACOK) {
1472 			ddi_acc_handle_t acc = softs->comm_space_acc_handle;
1473 			struct aac_fib *fibp;	/* FIB in AIF queue */
1474 			struct aac_fib *fibp0;	/* FIB in communication space */
1475 			uint16_t fib_size, fib_size0;
1476 			uint32_t fib_xfer_state;
1477 			uint32_t addr, size;
1478 
1479 			ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS));
1480 
1481 #define	AAC_SYNC_AIF(softs, aif_idx, type) \
1482 	{ (void) ddi_dma_sync((softs)->comm_space_dma_handle, \
1483 	    offsetof(struct aac_comm_space, \
1484 	    adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \
1485 	    (type)); }
1486 
1487 			mutex_enter(&softs->aifq_mutex);
1488 			/* Copy AIF from adapter to the empty AIF slot */
1489 			fibp = &softs->aifq[softs->aifq_idx].d;
1490 			AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU);
1491 			fibp0 = &softs->comm_space->adapter_fibs[aif_idx];
1492 			fib_size0 = ddi_get16(acc, &fibp0->Header.Size);
1493 			fib_size = (fib_size0 > AAC_FIB_SIZE) ?
1494 			    AAC_FIB_SIZE : fib_size0;
1495 			ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0,
1496 			    fib_size, DDI_DEV_AUTOINCR);
1497 
1498 			(void) aac_handle_aif(softs, fibp);
1499 			mutex_exit(&softs->aifq_mutex);
1500 
1501 			/* Complete AIF back to adapter with good status */
1502 			fib_xfer_state = LE_32(fibp->Header.XferState);
1503 			if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
1504 				ddi_put32(acc, &fibp0->Header.XferState,
1505 				    fib_xfer_state | AAC_FIBSTATE_DONEHOST);
1506 				ddi_put32(acc, (void *)&fibp0->data[0], ST_OK);
1507 				if (fib_size0 > AAC_FIB_SIZE)
1508 					ddi_put16(acc, &fibp0->Header.Size,
1509 					    AAC_FIB_SIZE);
1510 				AAC_SYNC_AIF(softs, aif_idx,
1511 				    DDI_DMA_SYNC_FORDEV);
1512 			}
1513 
1514 			/* Put the AIF response on the response queue */
1515 			addr = ddi_get32(acc,
1516 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1517 			    Header.SenderFibAddress);
1518 			size = (uint32_t)ddi_get16(acc,
1519 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1520 			    Header.Size);
1521 			ddi_put32(acc,
1522 			    &softs->comm_space->adapter_fibs[aif_idx]. \
1523 			    Header.ReceiverFibAddress, addr);
1524 			if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q,
1525 			    addr, size) == AACERR)
1526 				cmn_err(CE_NOTE, "!AIF ack failed");
1527 		}
1528 		return (AAC_DB_COMMAND_READY);
1529 	} else if (status & AAC_DB_PRINTF_READY) {
1530 		/* ACK the intr */
1531 		AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY);
1532 		(void) AAC_STATUS_GET(softs);
1533 		(void) ddi_dma_sync(softs->comm_space_dma_handle,
1534 		    offsetof(struct aac_comm_space, adapter_print_buf),
1535 		    AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU);
1536 		if (aac_check_dma_handle(softs->comm_space_dma_handle) ==
1537 		    DDI_SUCCESS)
1538 			cmn_err(CE_NOTE, "MSG From Adapter: %s",
1539 			    softs->comm_space->adapter_print_buf);
1540 		else
1541 			ddi_fm_service_impact(softs->devinfo_p,
1542 			    DDI_SERVICE_UNAFFECTED);
1543 		AAC_NOTIFY(softs, AAC_DB_PRINTF_READY);
1544 		return (AAC_DB_PRINTF_READY);
1545 	} else if (status & AAC_DB_COMMAND_NOT_FULL) {
1546 		/*
1547 		 * Without these two condition statements, the OS could hang
1548 		 * after a while, especially if there are a lot of AIF's to
1549 		 * handle, for instance if a drive is pulled from an array
1550 		 * under heavy load.
1551 		 */
1552 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1553 		return (AAC_DB_COMMAND_NOT_FULL);
1554 	} else if (status & AAC_DB_RESPONSE_NOT_FULL) {
1555 		AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1556 		AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL);
1557 		return (AAC_DB_RESPONSE_NOT_FULL);
1558 	} else {
1559 		return (0);
1560 	}
1561 }
1562 
1563 static uint_t
1564 aac_intr_old(caddr_t arg)
1565 {
1566 	struct aac_softstate *softs = (void *)arg;
1567 	int rval;
1568 
1569 	mutex_enter(&softs->io_lock);
1570 	if (aac_process_intr_old(softs))
1571 		rval = DDI_INTR_CLAIMED;
1572 	else
1573 		rval = DDI_INTR_UNCLAIMED;
1574 	mutex_exit(&softs->io_lock);
1575 
1576 	aac_drain_comp_q(softs);
1577 	return (rval);
1578 }
1579 
1580 /*
1581  * Query FIXED or MSI interrupts
1582  */
1583 static int
1584 aac_query_intrs(struct aac_softstate *softs, int intr_type)
1585 {
1586 	dev_info_t *dip = softs->devinfo_p;
1587 	int avail, actual, intr_size, count;
1588 	int i, flag, ret;
1589 
1590 	AACDB_PRINT(softs, CE_NOTE,
1591 	    "aac_query_intrs:interrupt type 0x%x", intr_type);
1592 
1593 	/* Get number of interrupts */
1594 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1595 	if ((ret != DDI_SUCCESS) || (count == 0)) {
1596 		AACDB_PRINT(softs, CE_WARN,
1597 		    "ddi_intr_get_nintrs() failed, ret %d count %d",
1598 		    ret, count);
1599 		return (DDI_FAILURE);
1600 	}
1601 
1602 	/* Get number of available interrupts */
1603 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
1604 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
1605 		AACDB_PRINT(softs, CE_WARN,
1606 		    "ddi_intr_get_navail() failed, ret %d avail %d",
1607 		    ret, avail);
1608 		return (DDI_FAILURE);
1609 	}
1610 
1611 	AACDB_PRINT(softs, CE_NOTE,
1612 	    "ddi_intr_get_nvail returned %d, navail() returned %d",
1613 	    count, avail);
1614 
1615 	/* Allocate an array of interrupt handles */
1616 	intr_size = count * sizeof (ddi_intr_handle_t);
1617 	softs->htable = kmem_alloc(intr_size, KM_SLEEP);
1618 
1619 	if (intr_type == DDI_INTR_TYPE_MSI) {
1620 		count = 1; /* only one vector needed by now */
1621 		flag = DDI_INTR_ALLOC_STRICT;
1622 	} else { /* must be DDI_INTR_TYPE_FIXED */
1623 		flag = DDI_INTR_ALLOC_NORMAL;
1624 	}
1625 
1626 	/* Call ddi_intr_alloc() */
1627 	ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0,
1628 	    count, &actual, flag);
1629 
1630 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
1631 		AACDB_PRINT(softs, CE_WARN,
1632 		    "ddi_intr_alloc() failed, ret = %d", ret);
1633 		actual = 0;
1634 		goto error;
1635 	}
1636 
1637 	if (actual < count) {
1638 		AACDB_PRINT(softs, CE_NOTE,
1639 		    "Requested: %d, Received: %d", count, actual);
1640 		goto error;
1641 	}
1642 
1643 	softs->intr_cnt = actual;
1644 
1645 	/* Get priority for first msi, assume remaining are all the same */
1646 	if ((ret = ddi_intr_get_pri(softs->htable[0],
1647 	    &softs->intr_pri)) != DDI_SUCCESS) {
1648 		AACDB_PRINT(softs, CE_WARN,
1649 		    "ddi_intr_get_pri() failed, ret = %d", ret);
1650 		goto error;
1651 	}
1652 
1653 	/* Test for high level mutex */
1654 	if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) {
1655 		AACDB_PRINT(softs, CE_WARN,
1656 		    "aac_query_intrs: Hi level interrupt not supported");
1657 		goto error;
1658 	}
1659 
1660 	return (DDI_SUCCESS);
1661 
1662 error:
1663 	/* Free already allocated intr */
1664 	for (i = 0; i < actual; i++)
1665 		(void) ddi_intr_free(softs->htable[i]);
1666 
1667 	kmem_free(softs->htable, intr_size);
1668 	return (DDI_FAILURE);
1669 }
1670 
1671 
1672 /*
1673  * Register FIXED or MSI interrupts, and enable them
1674  */
1675 static int
1676 aac_add_intrs(struct aac_softstate *softs)
1677 {
1678 	int i, ret;
1679 	int intr_size, actual;
1680 	ddi_intr_handler_t *aac_intr;
1681 
1682 	actual = softs->intr_cnt;
1683 	intr_size = actual * sizeof (ddi_intr_handle_t);
1684 	aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ?
1685 	    aac_intr_new : aac_intr_old);
1686 
1687 	/* Call ddi_intr_add_handler() */
1688 	for (i = 0; i < actual; i++) {
1689 		if ((ret = ddi_intr_add_handler(softs->htable[i],
1690 		    aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) {
1691 			cmn_err(CE_WARN,
1692 			    "ddi_intr_add_handler() failed ret = %d", ret);
1693 
1694 			/* Free already allocated intr */
1695 			for (i = 0; i < actual; i++)
1696 				(void) ddi_intr_free(softs->htable[i]);
1697 
1698 			kmem_free(softs->htable, intr_size);
1699 			return (DDI_FAILURE);
1700 		}
1701 	}
1702 
1703 	if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap))
1704 	    != DDI_SUCCESS) {
1705 		cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret);
1706 
1707 		/* Free already allocated intr */
1708 		for (i = 0; i < actual; i++)
1709 			(void) ddi_intr_free(softs->htable[i]);
1710 
1711 		kmem_free(softs->htable, intr_size);
1712 		return (DDI_FAILURE);
1713 	}
1714 
1715 	/* Enable interrupts */
1716 	if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1717 		/* for MSI block enable */
1718 		(void) ddi_intr_block_enable(softs->htable, softs->intr_cnt);
1719 	} else {
1720 		/* Call ddi_intr_enable() for legacy/MSI non block enable */
1721 		for (i = 0; i < softs->intr_cnt; i++)
1722 			(void) ddi_intr_enable(softs->htable[i]);
1723 	}
1724 
1725 	return (DDI_SUCCESS);
1726 }
1727 
1728 /*
1729  * Unregister FIXED or MSI interrupts
1730  */
1731 static void
1732 aac_remove_intrs(struct aac_softstate *softs)
1733 {
1734 	int i;
1735 
1736 	/* Disable all interrupts */
1737 	if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1738 		/* Call ddi_intr_block_disable() */
1739 		(void) ddi_intr_block_disable(softs->htable, softs->intr_cnt);
1740 	} else {
1741 		for (i = 0; i < softs->intr_cnt; i++)
1742 			(void) ddi_intr_disable(softs->htable[i]);
1743 	}
1744 
1745 	/* Call ddi_intr_remove_handler() */
1746 	for (i = 0; i < softs->intr_cnt; i++) {
1747 		(void) ddi_intr_remove_handler(softs->htable[i]);
1748 		(void) ddi_intr_free(softs->htable[i]);
1749 	}
1750 
1751 	kmem_free(softs->htable, softs->intr_cnt * sizeof (ddi_intr_handle_t));
1752 }
1753 
1754 /*
1755  * Set pkt_reason and OR in pkt_statistics flag
1756  */
1757 static void
1758 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp,
1759     uchar_t reason, uint_t stat)
1760 {
1761 #ifndef __lock_lint
1762 	_NOTE(ARGUNUSED(softs))
1763 #endif
1764 	if (acp->pkt->pkt_reason == CMD_CMPLT)
1765 		acp->pkt->pkt_reason = reason;
1766 	acp->pkt->pkt_statistics |= stat;
1767 }
1768 
1769 /*
1770  * Handle a finished pkt of soft SCMD
1771  */
1772 static void
1773 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp)
1774 {
1775 	ASSERT(acp->pkt);
1776 
1777 	acp->flags |= AAC_CMD_CMPLT;
1778 
1779 	acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \
1780 	    STATE_SENT_CMD | STATE_GOT_STATUS;
1781 	if (acp->pkt->pkt_state & STATE_XFERRED_DATA)
1782 		acp->pkt->pkt_resid = 0;
1783 
1784 	/* AAC_CMD_NO_INTR means no complete callback */
1785 	if (!(acp->flags & AAC_CMD_NO_INTR)) {
1786 		mutex_enter(&softs->q_comp_mutex);
1787 		aac_cmd_enqueue(&softs->q_comp, acp);
1788 		mutex_exit(&softs->q_comp_mutex);
1789 		ddi_trigger_softintr(softs->softint_id);
1790 	}
1791 }
1792 
1793 /*
1794  * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
1795  */
1796 
1797 /*
1798  * Handle completed logical device IO command
1799  */
1800 /*ARGSUSED*/
1801 static void
1802 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1803 {
1804 	struct aac_slot *slotp = acp->slotp;
1805 	struct aac_blockread_response *resp;
1806 	uint32_t status;
1807 
1808 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1809 	ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1810 
1811 	acp->pkt->pkt_state |= STATE_GOT_STATUS;
1812 
1813 	/*
1814 	 * block_read/write has a similar response header, use blockread
1815 	 * response for both.
1816 	 */
1817 	resp = (struct aac_blockread_response *)&slotp->fibp->data[0];
1818 	status = ddi_get32(slotp->fib_acc_handle, &resp->Status);
1819 	if (status == ST_OK) {
1820 		acp->pkt->pkt_resid = 0;
1821 		acp->pkt->pkt_state |= STATE_XFERRED_DATA;
1822 	} else {
1823 		aac_set_arq_data_hwerr(acp);
1824 	}
1825 }
1826 
1827 /*
1828  * Handle completed phys. device IO command
1829  */
1830 static void
1831 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1832 {
1833 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
1834 	struct aac_fib *fibp = acp->slotp->fibp;
1835 	struct scsi_pkt *pkt = acp->pkt;
1836 	struct aac_srb_reply *resp;
1837 	uint32_t resp_status;
1838 
1839 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
1840 	ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1841 
1842 	resp = (struct aac_srb_reply *)&fibp->data[0];
1843 	resp_status = ddi_get32(acc, &resp->status);
1844 
1845 	/* First check FIB status */
1846 	if (resp_status == ST_OK) {
1847 		uint32_t scsi_status;
1848 		uint32_t srb_status;
1849 		uint32_t data_xfer_length;
1850 
1851 		scsi_status = ddi_get32(acc, &resp->scsi_status);
1852 		srb_status = ddi_get32(acc, &resp->srb_status);
1853 		data_xfer_length = ddi_get32(acc, &resp->data_xfer_length);
1854 
1855 		*pkt->pkt_scbp = (uint8_t)scsi_status;
1856 		pkt->pkt_state |= STATE_GOT_STATUS;
1857 		if (scsi_status == STATUS_GOOD) {
1858 			uchar_t cmd = ((union scsi_cdb *)(void *)
1859 			    (pkt->pkt_cdbp))->scc_cmd;
1860 
1861 			/* Next check SRB status */
1862 			switch (srb_status & 0x3f) {
1863 			case SRB_STATUS_DATA_OVERRUN:
1864 				AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \
1865 				    "scmd=%d, xfer=%d, buflen=%d",
1866 				    (uint32_t)cmd, data_xfer_length,
1867 				    acp->bcount);
1868 
1869 				switch (cmd) {
1870 				case SCMD_READ:
1871 				case SCMD_WRITE:
1872 				case SCMD_READ_G1:
1873 				case SCMD_WRITE_G1:
1874 				case SCMD_READ_G4:
1875 				case SCMD_WRITE_G4:
1876 				case SCMD_READ_G5:
1877 				case SCMD_WRITE_G5:
1878 					aac_set_pkt_reason(softs, acp,
1879 					    CMD_DATA_OVR, 0);
1880 					break;
1881 				}
1882 				/*FALLTHRU*/
1883 			case SRB_STATUS_ERROR_RECOVERY:
1884 			case SRB_STATUS_PENDING:
1885 			case SRB_STATUS_SUCCESS:
1886 				/*
1887 				 * pkt_resid should only be calculated if the
1888 				 * status is ERROR_RECOVERY/PENDING/SUCCESS/
1889 				 * OVERRUN/UNDERRUN
1890 				 */
1891 				if (data_xfer_length) {
1892 					pkt->pkt_state |= STATE_XFERRED_DATA;
1893 					pkt->pkt_resid = acp->bcount - \
1894 					    data_xfer_length;
1895 					ASSERT(pkt->pkt_resid >= 0);
1896 				}
1897 				break;
1898 			case SRB_STATUS_ABORTED:
1899 				AACDB_PRINT(softs, CE_NOTE,
1900 				    "SRB_STATUS_ABORTED, xfer=%d, resid=%d",
1901 				    data_xfer_length, pkt->pkt_resid);
1902 				aac_set_pkt_reason(softs, acp, CMD_ABORTED,
1903 				    STAT_ABORTED);
1904 				break;
1905 			case SRB_STATUS_ABORT_FAILED:
1906 				AACDB_PRINT(softs, CE_NOTE,
1907 				    "SRB_STATUS_ABORT_FAILED, xfer=%d, " \
1908 				    "resid=%d", data_xfer_length,
1909 				    pkt->pkt_resid);
1910 				aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL,
1911 				    0);
1912 				break;
1913 			case SRB_STATUS_PARITY_ERROR:
1914 				AACDB_PRINT(softs, CE_NOTE,
1915 				    "SRB_STATUS_PARITY_ERROR, xfer=%d, " \
1916 				    "resid=%d", data_xfer_length,
1917 				    pkt->pkt_resid);
1918 				aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0);
1919 				break;
1920 			case SRB_STATUS_NO_DEVICE:
1921 			case SRB_STATUS_INVALID_PATH_ID:
1922 			case SRB_STATUS_INVALID_TARGET_ID:
1923 			case SRB_STATUS_INVALID_LUN:
1924 			case SRB_STATUS_SELECTION_TIMEOUT:
1925 #ifdef DEBUG
1926 				if (AAC_DEV_IS_VALID(acp->dvp)) {
1927 					AACDB_PRINT(softs, CE_NOTE,
1928 					    "SRB_STATUS_NO_DEVICE(%d), " \
1929 					    "xfer=%d, resid=%d ",
1930 					    srb_status & 0x3f,
1931 					    data_xfer_length, pkt->pkt_resid);
1932 				}
1933 #endif
1934 				aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0);
1935 				break;
1936 			case SRB_STATUS_COMMAND_TIMEOUT:
1937 			case SRB_STATUS_TIMEOUT:
1938 				AACDB_PRINT(softs, CE_NOTE,
1939 				    "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \
1940 				    "resid=%d", data_xfer_length,
1941 				    pkt->pkt_resid);
1942 				aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
1943 				    STAT_TIMEOUT);
1944 				break;
1945 			case SRB_STATUS_BUS_RESET:
1946 				AACDB_PRINT(softs, CE_NOTE,
1947 				    "SRB_STATUS_BUS_RESET, xfer=%d, " \
1948 				    "resid=%d", data_xfer_length,
1949 				    pkt->pkt_resid);
1950 				aac_set_pkt_reason(softs, acp, CMD_RESET,
1951 				    STAT_BUS_RESET);
1952 				break;
1953 			default:
1954 				AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \
1955 				    "xfer=%d, resid=%d", srb_status & 0x3f,
1956 				    data_xfer_length, pkt->pkt_resid);
1957 				aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1958 				break;
1959 			}
1960 		} else if (scsi_status == STATUS_CHECK) {
1961 			/* CHECK CONDITION */
1962 			struct scsi_arq_status *arqstat =
1963 			    (void *)(pkt->pkt_scbp);
1964 			uint32_t sense_data_size;
1965 
1966 			pkt->pkt_state |= STATE_ARQ_DONE;
1967 
1968 			*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1969 			arqstat->sts_rqpkt_reason = CMD_CMPLT;
1970 			arqstat->sts_rqpkt_resid = 0;
1971 			arqstat->sts_rqpkt_state =
1972 			    STATE_GOT_BUS |
1973 			    STATE_GOT_TARGET |
1974 			    STATE_SENT_CMD |
1975 			    STATE_XFERRED_DATA;
1976 			arqstat->sts_rqpkt_statistics = 0;
1977 
1978 			sense_data_size = ddi_get32(acc,
1979 			    &resp->sense_data_size);
1980 			ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE);
1981 			AACDB_PRINT(softs, CE_NOTE,
1982 			    "CHECK CONDITION: sense len=%d, xfer len=%d",
1983 			    sense_data_size, data_xfer_length);
1984 
1985 			if (sense_data_size > SENSE_LENGTH)
1986 				sense_data_size = SENSE_LENGTH;
1987 			ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata,
1988 			    (uint8_t *)resp->sense_data, sense_data_size,
1989 			    DDI_DEV_AUTOINCR);
1990 		} else {
1991 			AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \
1992 			    "scsi_status=%d, srb_status=%d",
1993 			    scsi_status, srb_status);
1994 			aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1995 		}
1996 	} else {
1997 		AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d",
1998 		    resp_status);
1999 		aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
2000 	}
2001 }
2002 
2003 /*
2004  * Handle completed IOCTL command
2005  */
2006 /*ARGSUSED*/
2007 void
2008 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2009 {
2010 	struct aac_slot *slotp = acp->slotp;
2011 
2012 	/*
2013 	 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
2014 	 * may wait on softs->event, so use cv_broadcast() instead
2015 	 * of cv_signal().
2016 	 */
2017 	ASSERT(acp->flags & AAC_CMD_SYNC);
2018 	ASSERT(acp->flags & AAC_CMD_NO_CB);
2019 
2020 	/* Get the size of the response FIB from its FIB.Header.Size field */
2021 	acp->fib_size = ddi_get16(slotp->fib_acc_handle,
2022 	    &slotp->fibp->Header.Size);
2023 
2024 	ASSERT(acp->fib_size <= softs->aac_max_fib_size);
2025 	ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp,
2026 	    (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR);
2027 }
2028 
2029 /*
2030  * Handle completed Flush command
2031  */
2032 /*ARGSUSED*/
2033 static void
2034 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2035 {
2036 	struct aac_slot *slotp = acp->slotp;
2037 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
2038 	struct aac_synchronize_reply *resp;
2039 	uint32_t status;
2040 
2041 	ASSERT(!(acp->flags & AAC_CMD_SYNC));
2042 
2043 	acp->pkt->pkt_state |= STATE_GOT_STATUS;
2044 
2045 	resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0];
2046 	status = ddi_get32(acc, &resp->Status);
2047 	if (status != CT_OK)
2048 		aac_set_arq_data_hwerr(acp);
2049 }
2050 
2051 /*
2052  * Access PCI space to see if the driver can support the card
2053  */
2054 static int
2055 aac_check_card_type(struct aac_softstate *softs)
2056 {
2057 	ddi_acc_handle_t pci_config_handle;
2058 	int card_index;
2059 	uint32_t pci_cmd;
2060 
2061 	/* Map pci configuration space */
2062 	if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) !=
2063 	    DDI_SUCCESS) {
2064 		AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space");
2065 		return (AACERR);
2066 	}
2067 
2068 	softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID);
2069 	softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID);
2070 	softs->subvendid = pci_config_get16(pci_config_handle,
2071 	    PCI_CONF_SUBVENID);
2072 	softs->subsysid = pci_config_get16(pci_config_handle,
2073 	    PCI_CONF_SUBSYSID);
2074 
2075 	card_index = 0;
2076 	while (!CARD_IS_UNKNOWN(card_index)) {
2077 		if ((aac_cards[card_index].vendor == softs->vendid) &&
2078 		    (aac_cards[card_index].device == softs->devid) &&
2079 		    (aac_cards[card_index].subvendor == softs->subvendid) &&
2080 		    (aac_cards[card_index].subsys == softs->subsysid)) {
2081 			break;
2082 		}
2083 		card_index++;
2084 	}
2085 
2086 	softs->card = card_index;
2087 	softs->hwif = aac_cards[card_index].hwif;
2088 
2089 	/*
2090 	 * Unknown aac card
2091 	 * do a generic match based on the VendorID and DeviceID to
2092 	 * support the new cards in the aac family
2093 	 */
2094 	if (CARD_IS_UNKNOWN(card_index)) {
2095 		if (softs->vendid != 0x9005) {
2096 			AACDB_PRINT(softs, CE_WARN,
2097 			    "Unknown vendor 0x%x", softs->vendid);
2098 			goto error;
2099 		}
2100 		switch (softs->devid) {
2101 		case 0x285:
2102 			softs->hwif = AAC_HWIF_I960RX;
2103 			break;
2104 		case 0x286:
2105 			softs->hwif = AAC_HWIF_RKT;
2106 			break;
2107 		default:
2108 			AACDB_PRINT(softs, CE_WARN,
2109 			    "Unknown device \"pci9005,%x\"", softs->devid);
2110 			goto error;
2111 		}
2112 	}
2113 
2114 	/* Set hardware dependent interface */
2115 	switch (softs->hwif) {
2116 	case AAC_HWIF_I960RX:
2117 		softs->aac_if = aac_rx_interface;
2118 		softs->map_size_min = AAC_MAP_SIZE_MIN_RX;
2119 		break;
2120 	case AAC_HWIF_RKT:
2121 		softs->aac_if = aac_rkt_interface;
2122 		softs->map_size_min = AAC_MAP_SIZE_MIN_RKT;
2123 		break;
2124 	default:
2125 		AACDB_PRINT(softs, CE_WARN,
2126 		    "Unknown hardware interface %d", softs->hwif);
2127 		goto error;
2128 	}
2129 
2130 	/* Set card names */
2131 	(void *)strncpy(softs->vendor_name, aac_cards[card_index].vid,
2132 	    AAC_VENDOR_LEN);
2133 	(void *)strncpy(softs->product_name, aac_cards[card_index].desc,
2134 	    AAC_PRODUCT_LEN);
2135 
2136 	/* Set up quirks */
2137 	softs->flags = aac_cards[card_index].quirks;
2138 
2139 	/* Force the busmaster enable bit on */
2140 	pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2141 	if ((pci_cmd & PCI_COMM_ME) == 0) {
2142 		pci_cmd |= PCI_COMM_ME;
2143 		pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd);
2144 		pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2145 		if ((pci_cmd & PCI_COMM_ME) == 0) {
2146 			cmn_err(CE_CONT, "?Cannot enable busmaster bit");
2147 			goto error;
2148 		}
2149 	}
2150 
2151 	/* Set memory base to map */
2152 	softs->pci_mem_base_paddr = 0xfffffff0UL & \
2153 	    pci_config_get32(pci_config_handle, PCI_CONF_BASE0);
2154 
2155 	pci_config_teardown(&pci_config_handle);
2156 
2157 	return (AACOK); /* card type detected */
2158 error:
2159 	pci_config_teardown(&pci_config_handle);
2160 	return (AACERR); /* no matched card found */
2161 }
2162 
2163 /*
2164  * Check the firmware to determine the features to support and the FIB
2165  * parameters to use.
2166  */
2167 static int
2168 aac_check_firmware(struct aac_softstate *softs)
2169 {
2170 	uint32_t options;
2171 	uint32_t atu_size;
2172 	ddi_acc_handle_t pci_handle;
2173 	uint8_t *data;
2174 	uint32_t max_fibs;
2175 	uint32_t max_fib_size;
2176 	uint32_t sg_tablesize;
2177 	uint32_t max_sectors;
2178 	uint32_t status;
2179 
2180 	/* Get supported options */
2181 	if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0,
2182 	    &status)) != AACOK) {
2183 		if (status != SRB_STATUS_INVALID_REQUEST) {
2184 			cmn_err(CE_CONT,
2185 			    "?Fatal error: request adapter info error");
2186 			return (AACERR);
2187 		}
2188 		options = 0;
2189 		atu_size = 0;
2190 	} else {
2191 		options = AAC_MAILBOX_GET(softs, 1);
2192 		atu_size = AAC_MAILBOX_GET(softs, 2);
2193 	}
2194 
2195 	if (softs->state & AAC_STATE_RESET) {
2196 		if ((softs->support_opt == options) &&
2197 		    (softs->atu_size == atu_size))
2198 			return (AACOK);
2199 
2200 		cmn_err(CE_WARN,
2201 		    "?Fatal error: firmware changed, system needs reboot");
2202 		return (AACERR);
2203 	}
2204 
2205 	/*
2206 	 * The following critical settings are initialized only once during
2207 	 * driver attachment.
2208 	 */
2209 	softs->support_opt = options;
2210 	softs->atu_size = atu_size;
2211 
2212 	/* Process supported options */
2213 	if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
2214 	    (softs->flags & AAC_FLAGS_NO4GB) == 0) {
2215 		AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window");
2216 		softs->flags |= AAC_FLAGS_4GB_WINDOW;
2217 	} else {
2218 		/*
2219 		 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
2220 		 * only. IO is handled by the DMA engine which does not suffer
2221 		 * from the ATU window programming workarounds necessary for
2222 		 * CPU copy operations.
2223 		 */
2224 		softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull;
2225 		softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull;
2226 	}
2227 
2228 	if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
2229 		AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address");
2230 		softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
2231 		softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull;
2232 		softs->flags |= AAC_FLAGS_SG_64BIT;
2233 	}
2234 
2235 	if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
2236 		softs->flags |= AAC_FLAGS_ARRAY_64BIT;
2237 		AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size");
2238 	}
2239 
2240 	if (options & AAC_SUPPORTED_NONDASD) {
2241 		if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0,
2242 		    "nondasd-enable", (char **)&data) == DDI_SUCCESS)) {
2243 			if (strcmp((char *)data, "yes") == 0) {
2244 				AACDB_PRINT(softs, CE_NOTE,
2245 				    "!Enable Non-DASD access");
2246 				softs->flags |= AAC_FLAGS_NONDASD;
2247 			}
2248 			ddi_prop_free(data);
2249 		}
2250 	}
2251 
2252 	/* Read preferred settings */
2253 	max_fib_size = 0;
2254 	if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF,
2255 	    0, 0, 0, 0, NULL)) == AACOK) {
2256 		options = AAC_MAILBOX_GET(softs, 1);
2257 		max_fib_size = (options & 0xffff);
2258 		max_sectors = (options >> 16) << 1;
2259 		options = AAC_MAILBOX_GET(softs, 2);
2260 		sg_tablesize = (options >> 16);
2261 		options = AAC_MAILBOX_GET(softs, 3);
2262 		max_fibs = (options & 0xffff);
2263 	}
2264 
2265 	/* Enable new comm. and rawio at the same time */
2266 	if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) &&
2267 	    (max_fib_size != 0)) {
2268 		/* read out and save PCI MBR */
2269 		if ((atu_size > softs->map_size) &&
2270 		    (ddi_regs_map_setup(softs->devinfo_p, 1,
2271 		    (caddr_t *)&data, 0, atu_size, &softs->acc_attr,
2272 		    &pci_handle) == DDI_SUCCESS)) {
2273 			ddi_regs_map_free(&softs->pci_mem_handle);
2274 			softs->pci_mem_handle = pci_handle;
2275 			softs->pci_mem_base_vaddr = data;
2276 			softs->map_size = atu_size;
2277 		}
2278 		if (atu_size == softs->map_size) {
2279 			softs->flags |= AAC_FLAGS_NEW_COMM;
2280 			AACDB_PRINT(softs, CE_NOTE,
2281 			    "!Enable New Comm. interface");
2282 		}
2283 	}
2284 
2285 	/* Set FIB parameters */
2286 	if (softs->flags & AAC_FLAGS_NEW_COMM) {
2287 		softs->aac_max_fibs = max_fibs;
2288 		softs->aac_max_fib_size = max_fib_size;
2289 		softs->aac_max_sectors = max_sectors;
2290 		softs->aac_sg_tablesize = sg_tablesize;
2291 
2292 		softs->flags |= AAC_FLAGS_RAW_IO;
2293 		AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO");
2294 	} else {
2295 		softs->aac_max_fibs =
2296 		    (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512;
2297 		softs->aac_max_fib_size = AAC_FIB_SIZE;
2298 		softs->aac_max_sectors = 128;	/* 64K */
2299 		if (softs->flags & AAC_FLAGS_17SG)
2300 			softs->aac_sg_tablesize = 17;
2301 		else if (softs->flags & AAC_FLAGS_34SG)
2302 			softs->aac_sg_tablesize = 34;
2303 		else if (softs->flags & AAC_FLAGS_SG_64BIT)
2304 			softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2305 			    sizeof (struct aac_blockwrite64) +
2306 			    sizeof (struct aac_sg_entry64)) /
2307 			    sizeof (struct aac_sg_entry64);
2308 		else
2309 			softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2310 			    sizeof (struct aac_blockwrite) +
2311 			    sizeof (struct aac_sg_entry)) /
2312 			    sizeof (struct aac_sg_entry);
2313 	}
2314 
2315 	if ((softs->flags & AAC_FLAGS_RAW_IO) &&
2316 	    (softs->flags & AAC_FLAGS_ARRAY_64BIT)) {
2317 		softs->flags |= AAC_FLAGS_LBA_64BIT;
2318 		AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array");
2319 	}
2320 	softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize;
2321 	softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9;
2322 	/*
2323 	 * 64K maximum segment size in scatter gather list is controlled by
2324 	 * the NEW_COMM bit in the adapter information. If not set, the card
2325 	 * can only accept a maximum of 64K. It is not recommended to permit
2326 	 * more than 128KB of total transfer size to the adapters because
2327 	 * performance is negatively impacted.
2328 	 *
2329 	 * For new comm, segment size equals max xfer size. For old comm,
2330 	 * we use 64K for both.
2331 	 */
2332 	softs->buf_dma_attr.dma_attr_count_max =
2333 	    softs->buf_dma_attr.dma_attr_maxxfer - 1;
2334 
2335 	/* Setup FIB operations */
2336 	if (softs->flags & AAC_FLAGS_RAW_IO)
2337 		softs->aac_cmd_fib = aac_cmd_fib_rawio;
2338 	else if (softs->flags & AAC_FLAGS_SG_64BIT)
2339 		softs->aac_cmd_fib = aac_cmd_fib_brw64;
2340 	else
2341 		softs->aac_cmd_fib = aac_cmd_fib_brw;
2342 	softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \
2343 	    aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32;
2344 
2345 	/* 64-bit LBA needs descriptor format sense data */
2346 	softs->slen = sizeof (struct scsi_arq_status);
2347 	if ((softs->flags & AAC_FLAGS_LBA_64BIT) &&
2348 	    softs->slen < AAC_ARQ64_LENGTH)
2349 		softs->slen = AAC_ARQ64_LENGTH;
2350 
2351 	AACDB_PRINT(softs, CE_NOTE,
2352 	    "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
2353 	    softs->aac_max_fibs, softs->aac_max_fib_size,
2354 	    softs->aac_max_sectors, softs->aac_sg_tablesize);
2355 
2356 	return (AACOK);
2357 }
2358 
2359 static void
2360 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0,
2361     struct FsaRev *fsarev1)
2362 {
2363 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
2364 
2365 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash);
2366 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type);
2367 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor);
2368 	AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major);
2369 	AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber);
2370 }
2371 
2372 /*
2373  * The following function comes from Adaptec:
2374  *
2375  * Query adapter information and supplement adapter information
2376  */
2377 static int
2378 aac_get_adapter_info(struct aac_softstate *softs,
2379     struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr)
2380 {
2381 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2382 	struct aac_fib *fibp = softs->sync_slot.fibp;
2383 	struct aac_adapter_info *ainfp;
2384 	struct aac_supplement_adapter_info *sinfp;
2385 
2386 	ddi_put8(acc, &fibp->data[0], 0);
2387 	if (aac_sync_fib(softs, RequestAdapterInfo,
2388 	    sizeof (struct aac_fib_header)) != AACOK) {
2389 		AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed");
2390 		return (AACERR);
2391 	}
2392 	ainfp = (struct aac_adapter_info *)fibp->data;
2393 	if (ainfr) {
2394 		AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2395 		AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase);
2396 		AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture);
2397 		AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant);
2398 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed);
2399 		AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem);
2400 		AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem);
2401 		AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem);
2402 		aac_fsa_rev(softs, &ainfp->KernelRevision,
2403 		    &ainfr->KernelRevision);
2404 		aac_fsa_rev(softs, &ainfp->MonitorRevision,
2405 		    &ainfr->MonitorRevision);
2406 		aac_fsa_rev(softs, &ainfp->HardwareRevision,
2407 		    &ainfr->HardwareRevision);
2408 		aac_fsa_rev(softs, &ainfp->BIOSRevision,
2409 		    &ainfr->BIOSRevision);
2410 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled);
2411 		AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask);
2412 		AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber);
2413 		AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform);
2414 		AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2415 		AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant);
2416 	}
2417 	if (sinfr) {
2418 		if (!(softs->support_opt &
2419 		    AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) {
2420 			AACDB_PRINT(softs, CE_WARN,
2421 			    "SupplementAdapterInfo not supported");
2422 			return (AACERR);
2423 		}
2424 		ddi_put8(acc, &fibp->data[0], 0);
2425 		if (aac_sync_fib(softs, RequestSupplementAdapterInfo,
2426 		    sizeof (struct aac_fib_header)) != AACOK) {
2427 			AACDB_PRINT(softs, CE_WARN,
2428 			    "RequestSupplementAdapterInfo failed");
2429 			return (AACERR);
2430 		}
2431 		sinfp = (struct aac_supplement_adapter_info *)fibp->data;
2432 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1);
2433 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2);
2434 		AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize);
2435 		AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId);
2436 		AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts);
2437 		AAC_GET_FIELD32(acc, sinfr, sinfp, Version);
2438 		AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits);
2439 		AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber);
2440 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3);
2441 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12);
2442 		AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts);
2443 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo,
2444 		    sizeof (struct vpd_info));
2445 		aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision,
2446 		    &sinfr->FlashFirmwareRevision);
2447 		AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions);
2448 		aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision,
2449 		    &sinfr->FlashFirmwareBootRevision);
2450 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo,
2451 		    MFG_PCBA_SERIAL_NUMBER_WIDTH);
2452 		AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0],
2453 		    MFG_WWN_WIDTH);
2454 		AAC_REP_GET_FIELD32(acc, sinfr, sinfp, ReservedGrowth[0], 2);
2455 	}
2456 	return (AACOK);
2457 }
2458 
2459 static int
2460 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max,
2461     uint32_t *tgt_max)
2462 {
2463 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2464 	struct aac_fib *fibp = softs->sync_slot.fibp;
2465 	struct aac_ctcfg *c_cmd;
2466 	struct aac_ctcfg_resp *c_resp;
2467 	uint32_t scsi_method_id;
2468 	struct aac_bus_info *cmd;
2469 	struct aac_bus_info_response *resp;
2470 	int rval;
2471 
2472 	/* Detect MethodId */
2473 	c_cmd = (struct aac_ctcfg *)&fibp->data[0];
2474 	ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig);
2475 	ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD);
2476 	ddi_put32(acc, &c_cmd->param, 0);
2477 	rval = aac_sync_fib(softs, ContainerCommand,
2478 	    AAC_FIB_SIZEOF(struct aac_ctcfg));
2479 	c_resp = (struct aac_ctcfg_resp *)&fibp->data[0];
2480 	if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) {
2481 		AACDB_PRINT(softs, CE_WARN,
2482 		    "VM_ContainerConfig command fail");
2483 		return (AACERR);
2484 	}
2485 	scsi_method_id = ddi_get32(acc, &c_resp->param);
2486 
2487 	/* Detect phys. bus count and max. target id first */
2488 	cmd = (struct aac_bus_info *)&fibp->data[0];
2489 	ddi_put32(acc, &cmd->Command, VM_Ioctl);
2490 	ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */
2491 	ddi_put32(acc, &cmd->MethodId, scsi_method_id);
2492 	ddi_put32(acc, &cmd->ObjectId, 0);
2493 	ddi_put32(acc, &cmd->CtlCmd, GetBusInfo);
2494 	/*
2495 	 * For VM_Ioctl, the firmware uses the Header.Size filled from the
2496 	 * driver as the size to be returned. Therefore the driver has to use
2497 	 * sizeof (struct aac_bus_info_response) because it is greater than
2498 	 * sizeof (struct aac_bus_info).
2499 	 */
2500 	rval = aac_sync_fib(softs, ContainerCommand,
2501 	    AAC_FIB_SIZEOF(struct aac_bus_info_response));
2502 	resp = (struct aac_bus_info_response *)cmd;
2503 
2504 	/* Scan all coordinates with INQUIRY */
2505 	if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) {
2506 		AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail");
2507 		return (AACERR);
2508 	}
2509 	*bus_max = ddi_get32(acc, &resp->BusCount);
2510 	*tgt_max = ddi_get32(acc, &resp->TargetsPerBus);
2511 	return (AACOK);
2512 }
2513 
2514 /*
2515  * The following function comes from Adaptec:
2516  *
2517  * Routine to be called during initialization of communications with
2518  * the adapter to handle possible adapter configuration issues. When
2519  * the adapter first boots up, it examines attached drives, etc, and
2520  * potentially comes up with a new or revised configuration (relative to
2521  * what's stored in it's NVRAM). Additionally it may discover problems
2522  * that make the current physical configuration unworkable (currently
2523  * applicable only to cluster configuration issues).
2524  *
2525  * If there are no configuration issues or the issues are considered
2526  * trival by the adapter, it will set it's configuration status to
2527  * "FSACT_CONTINUE" and execute the "commit confiuguration" action
2528  * automatically on it's own.
2529  *
2530  * However, if there are non-trivial issues, the adapter will set it's
2531  * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT"
2532  * and wait for some agent on the host to issue the "\ContainerCommand
2533  * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the
2534  * adapter to commit the new/updated configuration and enable
2535  * un-inhibited operation.  The host agent should first issue the
2536  * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB
2537  * command to obtain information about config issues detected by
2538  * the adapter.
2539  *
2540  * Normally the adapter's PC BIOS will execute on the host following
2541  * adapter poweron and reset and will be responsible for querring the
2542  * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG
2543  * command if appropriate.
2544  *
2545  * However, with the introduction of IOP reset support, the adapter may
2546  * boot up without the benefit of the adapter's PC BIOS host agent.
2547  * This routine is intended to take care of these issues in situations
2548  * where BIOS doesn't execute following adapter poweron or reset.  The
2549  * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so
2550  * there is no harm in doing this when it's already been done.
2551  */
2552 static int
2553 aac_handle_adapter_config_issues(struct aac_softstate *softs)
2554 {
2555 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
2556 	struct aac_fib *fibp = softs->sync_slot.fibp;
2557 	struct aac_Container *cmd;
2558 	struct aac_Container_resp *resp;
2559 	struct aac_cf_status_header *cfg_sts_hdr;
2560 	uint32_t resp_status;
2561 	uint32_t ct_status;
2562 	uint32_t cfg_stat_action;
2563 	int rval;
2564 
2565 	/* Get adapter config status */
2566 	cmd = (struct aac_Container *)&fibp->data[0];
2567 
2568 	bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2569 	ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2570 	ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS);
2571 	ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE],
2572 	    sizeof (struct aac_cf_status_header));
2573 	rval = aac_sync_fib(softs, ContainerCommand,
2574 	    AAC_FIB_SIZEOF(struct aac_Container));
2575 	resp = (struct aac_Container_resp *)cmd;
2576 	cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data;
2577 
2578 	resp_status = ddi_get32(acc, &resp->Status);
2579 	ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2580 	if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) {
2581 		cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action);
2582 
2583 		/* Commit configuration if it's reasonable to do so. */
2584 		if (cfg_stat_action <= CFACT_PAUSE) {
2585 			bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2586 			ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2587 			ddi_put32(acc, &cmd->CTCommand.command,
2588 			    CT_COMMIT_CONFIG);
2589 			rval = aac_sync_fib(softs, ContainerCommand,
2590 			    AAC_FIB_SIZEOF(struct aac_Container));
2591 
2592 			resp_status = ddi_get32(acc, &resp->Status);
2593 			ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2594 			if ((rval == AACOK) && (resp_status == 0) &&
2595 			    (ct_status == CT_OK))
2596 				/* Successful completion */
2597 				rval = AACMPE_OK;
2598 			else
2599 				/* Auto-commit aborted due to error(s). */
2600 				rval = AACMPE_COMMIT_CONFIG;
2601 		} else {
2602 			/*
2603 			 * Auto-commit aborted due to adapter indicating
2604 			 * configuration issue(s) too dangerous to auto-commit.
2605 			 */
2606 			rval = AACMPE_CONFIG_STATUS;
2607 		}
2608 	} else {
2609 		cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted");
2610 		rval = AACMPE_CONFIG_STATUS;
2611 	}
2612 	return (rval);
2613 }
2614 
2615 /*
2616  * Hardware initialization and resource allocation
2617  */
2618 static int
2619 aac_common_attach(struct aac_softstate *softs)
2620 {
2621 	uint32_t status;
2622 	int i;
2623 
2624 	DBCALLED(softs, 1);
2625 
2626 	/*
2627 	 * Do a little check here to make sure there aren't any outstanding
2628 	 * FIBs in the message queue. At this point there should not be and
2629 	 * if there are they are probably left over from another instance of
2630 	 * the driver like when the system crashes and the crash dump driver
2631 	 * gets loaded.
2632 	 */
2633 	while (AAC_OUTB_GET(softs) != 0xfffffffful)
2634 		;
2635 
2636 	/*
2637 	 * Wait the card to complete booting up before do anything that
2638 	 * attempts to communicate with it.
2639 	 */
2640 	status = AAC_FWSTATUS_GET(softs);
2641 	if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC)
2642 		goto error;
2643 	i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */
2644 	AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i);
2645 	if (i == 0) {
2646 		cmn_err(CE_CONT, "?Fatal error: controller not ready");
2647 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2648 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2649 		goto error;
2650 	}
2651 
2652 	/* Read and set card supported options and settings */
2653 	if (aac_check_firmware(softs) == AACERR) {
2654 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2655 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2656 		goto error;
2657 	}
2658 
2659 	/* Clear out all interrupts */
2660 	AAC_STATUS_CLR(softs, ~0);
2661 
2662 	/* Setup communication space with the card */
2663 	if (softs->comm_space_dma_handle == NULL) {
2664 		if (aac_alloc_comm_space(softs) != AACOK)
2665 			goto error;
2666 	}
2667 	if (aac_setup_comm_space(softs) != AACOK) {
2668 		cmn_err(CE_CONT, "?Setup communication space failed");
2669 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2670 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2671 		goto error;
2672 	}
2673 
2674 #ifdef DEBUG
2675 	if (aac_get_fw_debug_buffer(softs) != AACOK)
2676 		cmn_err(CE_CONT, "?firmware UART trace not supported");
2677 #endif
2678 
2679 	/* Allocate slots */
2680 	if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) {
2681 		cmn_err(CE_CONT, "?Fatal error: slots allocate failed");
2682 		goto error;
2683 	}
2684 	AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots);
2685 
2686 	/* Allocate FIBs */
2687 	if (softs->total_fibs < softs->total_slots) {
2688 		aac_alloc_fibs(softs);
2689 		if (softs->total_fibs == 0)
2690 			goto error;
2691 		AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated",
2692 		    softs->total_fibs);
2693 	}
2694 
2695 	/* Get adapter names */
2696 	if (CARD_IS_UNKNOWN(softs->card)) {
2697 		struct aac_supplement_adapter_info sinf;
2698 
2699 		if (aac_get_adapter_info(softs, NULL, &sinf) != AACOK) {
2700 			cmn_err(CE_CONT, "?Query adapter information failed");
2701 		} else {
2702 			char *p, *p0, *p1;
2703 
2704 			/*
2705 			 * Now find the controller name in supp_adapter_info->
2706 			 * AdapterTypeText. Use the first word as the vendor
2707 			 * and the other words as the product name.
2708 			 */
2709 			AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = "
2710 			    "\"%s\"", sinf.AdapterTypeText);
2711 			p = sinf.AdapterTypeText;
2712 			p0 = p1 = NULL;
2713 			/* Skip heading spaces */
2714 			while (*p && (*p == ' ' || *p == '\t'))
2715 				p++;
2716 			p0 = p;
2717 			while (*p && (*p != ' ' && *p != '\t'))
2718 				p++;
2719 			/* Remove middle spaces */
2720 			while (*p && (*p == ' ' || *p == '\t'))
2721 				*p++ = 0;
2722 			p1 = p;
2723 			/* Remove trailing spaces */
2724 			p = p1 + strlen(p1) - 1;
2725 			while (p > p1 && (*p == ' ' || *p == '\t'))
2726 				*p-- = 0;
2727 			if (*p0 && *p1) {
2728 				(void *)strncpy(softs->vendor_name, p0,
2729 				    AAC_VENDOR_LEN);
2730 				(void *)strncpy(softs->product_name, p1,
2731 				    AAC_PRODUCT_LEN);
2732 			} else {
2733 				cmn_err(CE_WARN,
2734 				    "?adapter name mis-formatted\n");
2735 				if (*p0)
2736 					(void *)strncpy(softs->product_name,
2737 					    p0, AAC_PRODUCT_LEN);
2738 			}
2739 		}
2740 	}
2741 
2742 	cmn_err(CE_NOTE,
2743 	    "!aac driver %d.%02d.%02d-%d, found card: " \
2744 	    "%s %s(pci0x%x.%x.%x.%x) at 0x%x",
2745 	    AAC_DRIVER_MAJOR_VERSION,
2746 	    AAC_DRIVER_MINOR_VERSION,
2747 	    AAC_DRIVER_BUGFIX_LEVEL,
2748 	    AAC_DRIVER_BUILD,
2749 	    softs->vendor_name, softs->product_name,
2750 	    softs->vendid, softs->devid, softs->subvendid, softs->subsysid,
2751 	    softs->pci_mem_base_paddr);
2752 
2753 	/* Perform acceptance of adapter-detected config changes if possible */
2754 	if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) {
2755 		cmn_err(CE_CONT, "?Handle adapter config issues failed");
2756 		aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2757 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2758 		goto error;
2759 	}
2760 
2761 	/* Setup containers (logical devices) */
2762 	if (aac_probe_containers(softs) != AACOK) {
2763 		cmn_err(CE_CONT, "?Fatal error: get container info error");
2764 		goto error;
2765 	}
2766 
2767 	/* Setup phys. devices */
2768 	if (softs->flags & AAC_FLAGS_NONDASD) {
2769 		uint32_t bus_max, tgt_max;
2770 		uint32_t bus, tgt;
2771 		int index;
2772 
2773 		if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) {
2774 			cmn_err(CE_CONT, "?Fatal error: get bus info error");
2775 			goto error;
2776 		}
2777 		AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d",
2778 		    bus_max, tgt_max);
2779 		if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) {
2780 			if (softs->state & AAC_STATE_RESET) {
2781 				cmn_err(CE_WARN,
2782 				    "?Fatal error: bus map changed");
2783 				goto error;
2784 			}
2785 			softs->bus_max = bus_max;
2786 			softs->tgt_max = tgt_max;
2787 			if (softs->nondasds) {
2788 				kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2789 				    sizeof (struct aac_nondasd));
2790 			}
2791 			softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \
2792 			    sizeof (struct aac_nondasd), KM_SLEEP);
2793 
2794 			index = 0;
2795 			for (bus = 0; bus < softs->bus_max; bus++) {
2796 				for (tgt = 0; tgt < softs->tgt_max; tgt++) {
2797 					struct aac_nondasd *dvp =
2798 					    &softs->nondasds[index++];
2799 					dvp->dev.type = AAC_DEV_PD;
2800 					dvp->bus = bus;
2801 					dvp->tid = tgt;
2802 				}
2803 			}
2804 		}
2805 	}
2806 
2807 	/* Check dma & acc handles allocated in attach */
2808 	if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) {
2809 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2810 		goto error;
2811 	}
2812 
2813 	if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
2814 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2815 		goto error;
2816 	}
2817 
2818 	for (i = 0; i < softs->total_slots; i++) {
2819 		if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) !=
2820 		    DDI_SUCCESS) {
2821 			ddi_fm_service_impact(softs->devinfo_p,
2822 			    DDI_SERVICE_LOST);
2823 			goto error;
2824 		}
2825 	}
2826 
2827 	return (AACOK);
2828 error:
2829 	if (softs->state & AAC_STATE_RESET)
2830 		return (AACERR);
2831 	if (softs->nondasds) {
2832 		kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2833 		    sizeof (struct aac_nondasd));
2834 		softs->nondasds = NULL;
2835 	}
2836 	if (softs->total_fibs > 0)
2837 		aac_destroy_fibs(softs);
2838 	if (softs->total_slots > 0)
2839 		aac_destroy_slots(softs);
2840 	if (softs->comm_space_dma_handle)
2841 		aac_free_comm_space(softs);
2842 	return (AACERR);
2843 }
2844 
2845 /*
2846  * Hardware shutdown and resource release
2847  */
2848 static void
2849 aac_common_detach(struct aac_softstate *softs)
2850 {
2851 	DBCALLED(softs, 1);
2852 
2853 	(void) aac_shutdown(softs);
2854 
2855 	if (softs->nondasds) {
2856 		kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2857 		    sizeof (struct aac_nondasd));
2858 		softs->nondasds = NULL;
2859 	}
2860 	aac_destroy_fibs(softs);
2861 	aac_destroy_slots(softs);
2862 	aac_free_comm_space(softs);
2863 }
2864 
2865 /*
2866  * Send a synchronous command to the controller and wait for a result.
2867  * Indicate if the controller completed the command with an error status.
2868  */
2869 int
2870 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd,
2871     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3,
2872     uint32_t *statusp)
2873 {
2874 	int timeout;
2875 	uint32_t status;
2876 
2877 	if (statusp != NULL)
2878 		*statusp = SRB_STATUS_SUCCESS;
2879 
2880 	/* Fill in mailbox */
2881 	AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3);
2882 
2883 	/* Ensure the sync command doorbell flag is cleared */
2884 	AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
2885 
2886 	/* Then set it to signal the adapter */
2887 	AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND);
2888 
2889 	/* Spin waiting for the command to complete */
2890 	timeout = AAC_IMMEDIATE_TIMEOUT * 1000;
2891 	AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout);
2892 	if (!timeout) {
2893 		AACDB_PRINT(softs, CE_WARN,
2894 		    "Sync command timed out after %d seconds (0x%x)!",
2895 		    AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs));
2896 		return (AACERR);
2897 	}
2898 
2899 	/* Clear the completion flag */
2900 	AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
2901 
2902 	/* Get the command status */
2903 	status = AAC_MAILBOX_GET(softs, 0);
2904 	if (statusp != NULL)
2905 		*statusp = status;
2906 	if (status != SRB_STATUS_SUCCESS) {
2907 		AACDB_PRINT(softs, CE_WARN,
2908 		    "Sync command fail: status = 0x%x", status);
2909 		return (AACERR);
2910 	}
2911 
2912 	return (AACOK);
2913 }
2914 
2915 /*
2916  * Send a synchronous FIB to the adapter and wait for its completion
2917  */
2918 static int
2919 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize)
2920 {
2921 	struct aac_slot *slotp = &softs->sync_slot;
2922 	ddi_dma_handle_t dma = slotp->fib_dma_handle;
2923 	uint32_t status;
2924 	int rval;
2925 
2926 	/* Sync fib only supports 512 bytes */
2927 	if (fibsize > AAC_FIB_SIZE)
2928 		return (AACERR);
2929 
2930 	/*
2931 	 * Setup sync fib
2932 	 * Need not reinitialize FIB header if it's already been filled
2933 	 * by others like aac_cmd_fib_scsi as aac_cmd.
2934 	 */
2935 	if (slotp->acp == NULL)
2936 		aac_cmd_fib_header(softs, slotp, cmd, fibsize);
2937 
2938 	AACDB_PRINT_FIB(softs, &softs->sync_slot);
2939 
2940 	(void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib),
2941 	    fibsize, DDI_DMA_SYNC_FORDEV);
2942 
2943 	/* Give the FIB to the controller, wait for a response. */
2944 	rval = aac_sync_mbcommand(softs, AAC_MONKER_SYNCFIB,
2945 	    slotp->fib_phyaddr, 0, 0, 0, &status);
2946 	if (rval == AACERR) {
2947 		AACDB_PRINT(softs, CE_WARN,
2948 		    "Send sync fib to controller failed");
2949 		return (AACERR);
2950 	}
2951 
2952 	(void) ddi_dma_sync(dma, offsetof(struct aac_comm_space, sync_fib),
2953 	    AAC_FIB_SIZE, DDI_DMA_SYNC_FORCPU);
2954 
2955 	if ((aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) ||
2956 	    (aac_check_dma_handle(dma) != DDI_SUCCESS)) {
2957 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
2958 		return (AACERR);
2959 	}
2960 
2961 	return (AACOK);
2962 }
2963 
2964 static void
2965 aac_cmd_initq(struct aac_cmd_queue *q)
2966 {
2967 	q->q_head = NULL;
2968 	q->q_tail = (struct aac_cmd *)&q->q_head;
2969 }
2970 
2971 /*
2972  * Remove a cmd from the head of q
2973  */
2974 static struct aac_cmd *
2975 aac_cmd_dequeue(struct aac_cmd_queue *q)
2976 {
2977 	struct aac_cmd *acp;
2978 
2979 	_NOTE(ASSUMING_PROTECTED(*q))
2980 
2981 	if ((acp = q->q_head) != NULL) {
2982 		if ((q->q_head = acp->next) != NULL)
2983 			acp->next = NULL;
2984 		else
2985 			q->q_tail = (struct aac_cmd *)&q->q_head;
2986 		acp->prev = NULL;
2987 	}
2988 	return (acp);
2989 }
2990 
2991 /*
2992  * Add a cmd to the tail of q
2993  */
2994 static void
2995 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp)
2996 {
2997 	ASSERT(acp->next == NULL);
2998 	acp->prev = q->q_tail;
2999 	q->q_tail->next = acp;
3000 	q->q_tail = acp;
3001 }
3002 
3003 /*
3004  * Remove the cmd ac from q
3005  */
3006 static void
3007 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp)
3008 {
3009 	if (acp->prev) {
3010 		if ((acp->prev->next = acp->next) != NULL) {
3011 			acp->next->prev = acp->prev;
3012 			acp->next = NULL;
3013 		} else {
3014 			q->q_tail = acp->prev;
3015 		}
3016 		acp->prev = NULL;
3017 	}
3018 	/* ac is not in the queue */
3019 }
3020 
3021 /*
3022  * Atomically insert an entry into the nominated queue, returns 0 on success or
3023  * AACERR if the queue is full.
3024  *
3025  * Note: it would be more efficient to defer notifying the controller in
3026  *	 the case where we may be inserting several entries in rapid succession,
3027  *	 but implementing this usefully may be difficult (it would involve a
3028  *	 separate queue/notify interface).
3029  */
3030 static int
3031 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr,
3032     uint32_t fib_size)
3033 {
3034 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3035 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3036 	uint32_t pi, ci;
3037 
3038 	DBCALLED(softs, 2);
3039 
3040 	ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q);
3041 
3042 	/* Get the producer/consumer indices */
3043 	(void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3044 	    (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3045 	    DDI_DMA_SYNC_FORCPU);
3046 	if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3047 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3048 		return (AACERR);
3049 	}
3050 
3051 	pi = ddi_get32(acc,
3052 	    &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3053 	ci = ddi_get32(acc,
3054 	    &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3055 
3056 	/*
3057 	 * Wrap the queue first before we check the queue to see
3058 	 * if it is full
3059 	 */
3060 	if (pi >= aac_qinfo[queue].size)
3061 		pi = 0;
3062 
3063 	/* XXX queue full */
3064 	if ((pi + 1) == ci)
3065 		return (AACERR);
3066 
3067 	/* Fill in queue entry */
3068 	ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size);
3069 	ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr);
3070 	(void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3071 	    (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3072 	    DDI_DMA_SYNC_FORDEV);
3073 
3074 	/* Update producer index */
3075 	ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX],
3076 	    pi + 1);
3077 	(void) ddi_dma_sync(dma,
3078 	    (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \
3079 	    (uintptr_t)softs->comm_space, sizeof (uint32_t),
3080 	    DDI_DMA_SYNC_FORDEV);
3081 
3082 	if (aac_qinfo[queue].notify != 0)
3083 		AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3084 	return (AACOK);
3085 }
3086 
3087 /*
3088  * Atomically remove one entry from the nominated queue, returns 0 on
3089  * success or AACERR if the queue is empty.
3090  */
3091 static int
3092 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp)
3093 {
3094 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3095 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3096 	uint32_t pi, ci;
3097 	int unfull = 0;
3098 
3099 	DBCALLED(softs, 2);
3100 
3101 	ASSERT(idxp);
3102 
3103 	/* Get the producer/consumer indices */
3104 	(void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3105 	    (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3106 	    DDI_DMA_SYNC_FORCPU);
3107 	pi = ddi_get32(acc,
3108 	    &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3109 	ci = ddi_get32(acc,
3110 	    &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3111 
3112 	/* Check for queue empty */
3113 	if (ci == pi)
3114 		return (AACERR);
3115 
3116 	if (pi >= aac_qinfo[queue].size)
3117 		pi = 0;
3118 
3119 	/* Check for queue full */
3120 	if (ci == pi + 1)
3121 		unfull = 1;
3122 
3123 	/*
3124 	 * The controller does not wrap the queue,
3125 	 * so we have to do it by ourselves
3126 	 */
3127 	if (ci >= aac_qinfo[queue].size)
3128 		ci = 0;
3129 
3130 	/* Fetch the entry */
3131 	(void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3132 	    (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3133 	    DDI_DMA_SYNC_FORCPU);
3134 	if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3135 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3136 		return (AACERR);
3137 	}
3138 
3139 	switch (queue) {
3140 	case AAC_HOST_NORM_RESP_Q:
3141 	case AAC_HOST_HIGH_RESP_Q:
3142 		*idxp = ddi_get32(acc,
3143 		    &(softs->qentries[queue] + ci)->aq_fib_addr);
3144 		break;
3145 
3146 	case AAC_HOST_NORM_CMD_Q:
3147 	case AAC_HOST_HIGH_CMD_Q:
3148 		*idxp = ddi_get32(acc,
3149 		    &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE;
3150 		break;
3151 
3152 	default:
3153 		cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()");
3154 		return (AACERR);
3155 	}
3156 
3157 	/* Update consumer index */
3158 	ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX],
3159 	    ci + 1);
3160 	(void) ddi_dma_sync(dma,
3161 	    (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \
3162 	    (uintptr_t)softs->comm_space, sizeof (uint32_t),
3163 	    DDI_DMA_SYNC_FORDEV);
3164 
3165 	if (unfull && aac_qinfo[queue].notify != 0)
3166 		AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3167 	return (AACOK);
3168 }
3169 
3170 /*
3171  * Request information of the container cid
3172  */
3173 static struct aac_mntinforesp *
3174 aac_get_container_info(struct aac_softstate *softs, int cid)
3175 {
3176 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3177 	struct aac_fib *fibp = softs->sync_slot.fibp;
3178 	struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0];
3179 	struct aac_mntinforesp *mir;
3180 
3181 	ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */
3182 	    (softs->flags & AAC_FLAGS_LBA_64BIT) ?
3183 	    VM_NameServe64 : VM_NameServe);
3184 	ddi_put32(acc, &mi->MntType, FT_FILESYS);
3185 	ddi_put32(acc, &mi->MntCount, cid);
3186 
3187 	if (aac_sync_fib(softs, ContainerCommand,
3188 	    AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) {
3189 		AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid);
3190 		return (NULL);
3191 	}
3192 
3193 	mir = (struct aac_mntinforesp *)&fibp->data[0];
3194 	if (ddi_get32(acc, &mir->Status) == ST_OK)
3195 		return (mir);
3196 	return (NULL);
3197 }
3198 
3199 static int
3200 aac_get_container_count(struct aac_softstate *softs, int *count)
3201 {
3202 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3203 	struct aac_mntinforesp *mir;
3204 
3205 	if ((mir = aac_get_container_info(softs, 0)) == NULL)
3206 		return (AACERR);
3207 	*count = ddi_get32(acc, &mir->MntRespCount);
3208 	if (*count > AAC_MAX_LD) {
3209 		AACDB_PRINT(softs, CE_CONT,
3210 		    "container count(%d) > AAC_MAX_LD", *count);
3211 		return (AACERR);
3212 	}
3213 	return (AACOK);
3214 }
3215 
3216 static int
3217 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid)
3218 {
3219 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3220 	struct aac_Container *ct = (struct aac_Container *) \
3221 	    &softs->sync_slot.fibp->data[0];
3222 
3223 	bzero(ct, sizeof (*ct) - CT_PACKET_SIZE);
3224 	ddi_put32(acc, &ct->Command, VM_ContainerConfig);
3225 	ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID);
3226 	ddi_put32(acc, &ct->CTCommand.param[0], cid);
3227 
3228 	if (aac_sync_fib(softs, ContainerCommand,
3229 	    AAC_FIB_SIZEOF(struct aac_Container)) == AACERR)
3230 		return (AACERR);
3231 	if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK)
3232 		return (AACERR);
3233 
3234 	*uid = ddi_get32(acc, &ct->CTCommand.param[1]);
3235 	return (AACOK);
3236 }
3237 
3238 static int
3239 aac_probe_container(struct aac_softstate *softs, uint32_t cid)
3240 {
3241 	struct aac_container *dvp = &softs->containers[cid];
3242 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
3243 	struct aac_mntinforesp *mir;
3244 	uint64_t size;
3245 	uint32_t uid;
3246 
3247 	/* Get container basic info */
3248 	if ((mir = aac_get_container_info(softs, cid)) == NULL)
3249 		return (AACERR);
3250 
3251 	if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) {
3252 		if (AAC_DEV_IS_VALID(&dvp->dev)) {
3253 			AACDB_PRINT(softs, CE_NOTE,
3254 			    ">>> Container %d deleted", cid);
3255 			dvp->dev.flags &= ~AAC_DFLAG_VALID;
3256 			(void) aac_dr_event(softs, dvp->cid, -1,
3257 			    AAC_EVT_OFFLINE);
3258 		}
3259 	} else {
3260 		size = AAC_MIR_SIZE(softs, acc, mir);
3261 
3262 		/* Get container UID */
3263 		if (aac_get_container_uid(softs, cid, &uid) == AACERR) {
3264 			AACDB_PRINT(softs, CE_CONT,
3265 			    "query container %d uid failed", cid);
3266 			return (AACERR);
3267 		}
3268 		AACDB_PRINT(softs, CE_CONT, "uid=0x%08x", uid);
3269 
3270 		if (AAC_DEV_IS_VALID(&dvp->dev)) {
3271 			if (dvp->uid != uid) {
3272 				AACDB_PRINT(softs, CE_WARN,
3273 				    ">>> Container %u uid changed to %d",
3274 				    cid, uid);
3275 				dvp->uid = uid;
3276 			}
3277 			if (dvp->size != size) {
3278 				AACDB_PRINT(softs, CE_NOTE,
3279 				    ">>> Container %u size changed to %"PRIu64,
3280 				    cid, size);
3281 				dvp->size = size;
3282 			}
3283 		} else { /* Init new container */
3284 			AACDB_PRINT(softs, CE_NOTE,
3285 			    ">>> Container %d added: " \
3286 			    "size=0x%x.%08x, type=%d, name=%s",
3287 			    cid,
3288 			    ddi_get32(acc, &mir->MntObj.CapacityHigh),
3289 			    ddi_get32(acc, &mir->MntObj.Capacity),
3290 			    ddi_get32(acc, &mir->MntObj.VolType),
3291 			    mir->MntObj.FileSystemName);
3292 			dvp->dev.flags |= AAC_DFLAG_VALID;
3293 			dvp->dev.type = AAC_DEV_LD;
3294 
3295 			dvp->cid = cid;
3296 			dvp->uid = uid;
3297 			dvp->size = size;
3298 			dvp->locked = 0;
3299 			dvp->deleted = 0;
3300 			(void) aac_dr_event(softs, dvp->cid, -1,
3301 			    AAC_EVT_ONLINE);
3302 		}
3303 	}
3304 	return (AACOK);
3305 }
3306 
3307 /*
3308  * Do a rescan of all the possible containers and update the container list
3309  * with newly online/offline containers, and prepare for autoconfiguration.
3310  */
3311 static int
3312 aac_probe_containers(struct aac_softstate *softs)
3313 {
3314 	int i, count, total;
3315 
3316 	/* Loop over possible containers */
3317 	count = softs->container_count;
3318 	if (aac_get_container_count(softs, &count) == AACERR)
3319 		return (AACERR);
3320 	for (i = total = 0; i < count; i++) {
3321 		if (aac_probe_container(softs, i) == AACOK)
3322 			total++;
3323 	}
3324 	if (count < softs->container_count) {
3325 		struct aac_container *dvp;
3326 
3327 		for (dvp = &softs->containers[count];
3328 		    dvp < &softs->containers[softs->container_count]; dvp++) {
3329 			if (!AAC_DEV_IS_VALID(&dvp->dev))
3330 				continue;
3331 			AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted",
3332 			    dvp->cid);
3333 			dvp->dev.flags &= ~AAC_DFLAG_VALID;
3334 			(void) aac_dr_event(softs, dvp->cid, -1,
3335 			    AAC_EVT_OFFLINE);
3336 		}
3337 	}
3338 	softs->container_count = count;
3339 	AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total);
3340 	return (AACOK);
3341 }
3342 
3343 static int
3344 aac_alloc_comm_space(struct aac_softstate *softs)
3345 {
3346 	size_t rlen;
3347 	ddi_dma_cookie_t cookie;
3348 	uint_t cookien;
3349 
3350 	/* Allocate DMA for comm. space */
3351 	if (ddi_dma_alloc_handle(
3352 	    softs->devinfo_p,
3353 	    &softs->addr_dma_attr,
3354 	    DDI_DMA_SLEEP,
3355 	    NULL,
3356 	    &softs->comm_space_dma_handle) != DDI_SUCCESS) {
3357 		AACDB_PRINT(softs, CE_WARN,
3358 		    "Cannot alloc dma handle for communication area");
3359 		goto error;
3360 	}
3361 	if (ddi_dma_mem_alloc(
3362 	    softs->comm_space_dma_handle,
3363 	    sizeof (struct aac_comm_space),
3364 	    &softs->acc_attr,
3365 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3366 	    DDI_DMA_SLEEP,
3367 	    NULL,
3368 	    (caddr_t *)&softs->comm_space,
3369 	    &rlen,
3370 	    &softs->comm_space_acc_handle) != DDI_SUCCESS) {
3371 		AACDB_PRINT(softs, CE_WARN,
3372 		    "Cannot alloc mem for communication area");
3373 		goto error;
3374 	}
3375 	if (ddi_dma_addr_bind_handle(
3376 	    softs->comm_space_dma_handle,
3377 	    NULL,
3378 	    (caddr_t)softs->comm_space,
3379 	    sizeof (struct aac_comm_space),
3380 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3381 	    DDI_DMA_SLEEP,
3382 	    NULL,
3383 	    &cookie,
3384 	    &cookien) != DDI_DMA_MAPPED) {
3385 		AACDB_PRINT(softs, CE_WARN,
3386 		    "DMA bind failed for communication area");
3387 		goto error;
3388 	}
3389 	softs->comm_space_phyaddr = cookie.dmac_address;
3390 
3391 	/* Setup sync FIB space */
3392 	softs->sync_slot.fibp = &softs->comm_space->sync_fib;
3393 	softs->sync_slot.fib_phyaddr = softs->comm_space_phyaddr + \
3394 	    offsetof(struct aac_comm_space, sync_fib);
3395 	softs->sync_slot.fib_acc_handle = softs->comm_space_acc_handle;
3396 	softs->sync_slot.fib_dma_handle = softs->comm_space_dma_handle;
3397 
3398 	return (AACOK);
3399 error:
3400 	if (softs->comm_space_acc_handle) {
3401 		ddi_dma_mem_free(&softs->comm_space_acc_handle);
3402 		softs->comm_space_acc_handle = NULL;
3403 	}
3404 	if (softs->comm_space_dma_handle) {
3405 		ddi_dma_free_handle(&softs->comm_space_dma_handle);
3406 		softs->comm_space_dma_handle = NULL;
3407 	}
3408 	return (AACERR);
3409 }
3410 
3411 static void
3412 aac_free_comm_space(struct aac_softstate *softs)
3413 {
3414 	softs->sync_slot.fibp = NULL;
3415 	softs->sync_slot.fib_phyaddr = NULL;
3416 	softs->sync_slot.fib_acc_handle = NULL;
3417 	softs->sync_slot.fib_dma_handle = NULL;
3418 
3419 	(void) ddi_dma_unbind_handle(softs->comm_space_dma_handle);
3420 	ddi_dma_mem_free(&softs->comm_space_acc_handle);
3421 	softs->comm_space_acc_handle = NULL;
3422 	ddi_dma_free_handle(&softs->comm_space_dma_handle);
3423 	softs->comm_space_dma_handle = NULL;
3424 	softs->comm_space_phyaddr = NULL;
3425 }
3426 
3427 /*
3428  * Initialize the data structures that are required for the communication
3429  * interface to operate
3430  */
3431 static int
3432 aac_setup_comm_space(struct aac_softstate *softs)
3433 {
3434 	ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3435 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3436 	uint32_t comm_space_phyaddr;
3437 	struct aac_adapter_init *initp;
3438 	int qoffset;
3439 
3440 	comm_space_phyaddr = softs->comm_space_phyaddr;
3441 
3442 	/* Setup adapter init struct */
3443 	initp = &softs->comm_space->init_data;
3444 	bzero(initp, sizeof (struct aac_adapter_init));
3445 
3446 	ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION);
3447 	ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time());
3448 
3449 	/* Setup new/old comm. specific data */
3450 	if (softs->flags & AAC_FLAGS_RAW_IO) {
3451 		ddi_put32(acc, &initp->InitStructRevision,
3452 		    AAC_INIT_STRUCT_REVISION_4);
3453 		ddi_put32(acc, &initp->InitFlags,
3454 		    (softs->flags & AAC_FLAGS_NEW_COMM) ?
3455 		    AAC_INIT_FLAGS_NEW_COMM_SUPPORTED : 0);
3456 		/* Setup the preferred settings */
3457 		ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs);
3458 		ddi_put32(acc, &initp->MaxIoSize,
3459 		    (softs->aac_max_sectors << 9));
3460 		ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size);
3461 	} else {
3462 		/*
3463 		 * Tells the adapter about the physical location of various
3464 		 * important shared data structures
3465 		 */
3466 		ddi_put32(acc, &initp->AdapterFibsPhysicalAddress,
3467 		    comm_space_phyaddr + \
3468 		    offsetof(struct aac_comm_space, adapter_fibs));
3469 		ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0);
3470 		ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE);
3471 		ddi_put32(acc, &initp->AdapterFibsSize,
3472 		    AAC_ADAPTER_FIBS * AAC_FIB_SIZE);
3473 		ddi_put32(acc, &initp->PrintfBufferAddress,
3474 		    comm_space_phyaddr + \
3475 		    offsetof(struct aac_comm_space, adapter_print_buf));
3476 		ddi_put32(acc, &initp->PrintfBufferSize,
3477 		    AAC_ADAPTER_PRINT_BUFSIZE);
3478 		ddi_put32(acc, &initp->MiniPortRevision,
3479 		    AAC_INIT_STRUCT_MINIPORT_REVISION);
3480 		ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN);
3481 
3482 		qoffset = (comm_space_phyaddr + \
3483 		    offsetof(struct aac_comm_space, qtable)) % \
3484 		    AAC_QUEUE_ALIGN;
3485 		if (qoffset)
3486 			qoffset = AAC_QUEUE_ALIGN - qoffset;
3487 		softs->qtablep = (struct aac_queue_table *) \
3488 		    ((char *)&softs->comm_space->qtable + qoffset);
3489 		ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \
3490 		    offsetof(struct aac_comm_space, qtable) + qoffset);
3491 
3492 		/* Init queue table */
3493 		ddi_put32(acc, &softs->qtablep-> \
3494 		    qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3495 		    AAC_HOST_NORM_CMD_ENTRIES);
3496 		ddi_put32(acc, &softs->qtablep-> \
3497 		    qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3498 		    AAC_HOST_NORM_CMD_ENTRIES);
3499 		ddi_put32(acc, &softs->qtablep-> \
3500 		    qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3501 		    AAC_HOST_HIGH_CMD_ENTRIES);
3502 		ddi_put32(acc, &softs->qtablep-> \
3503 		    qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3504 		    AAC_HOST_HIGH_CMD_ENTRIES);
3505 		ddi_put32(acc, &softs->qtablep-> \
3506 		    qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3507 		    AAC_ADAP_NORM_CMD_ENTRIES);
3508 		ddi_put32(acc, &softs->qtablep-> \
3509 		    qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3510 		    AAC_ADAP_NORM_CMD_ENTRIES);
3511 		ddi_put32(acc, &softs->qtablep-> \
3512 		    qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3513 		    AAC_ADAP_HIGH_CMD_ENTRIES);
3514 		ddi_put32(acc, &softs->qtablep-> \
3515 		    qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3516 		    AAC_ADAP_HIGH_CMD_ENTRIES);
3517 		ddi_put32(acc, &softs->qtablep-> \
3518 		    qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3519 		    AAC_HOST_NORM_RESP_ENTRIES);
3520 		ddi_put32(acc, &softs->qtablep-> \
3521 		    qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3522 		    AAC_HOST_NORM_RESP_ENTRIES);
3523 		ddi_put32(acc, &softs->qtablep-> \
3524 		    qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3525 		    AAC_HOST_HIGH_RESP_ENTRIES);
3526 		ddi_put32(acc, &softs->qtablep-> \
3527 		    qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3528 		    AAC_HOST_HIGH_RESP_ENTRIES);
3529 		ddi_put32(acc, &softs->qtablep-> \
3530 		    qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3531 		    AAC_ADAP_NORM_RESP_ENTRIES);
3532 		ddi_put32(acc, &softs->qtablep-> \
3533 		    qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3534 		    AAC_ADAP_NORM_RESP_ENTRIES);
3535 		ddi_put32(acc, &softs->qtablep-> \
3536 		    qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3537 		    AAC_ADAP_HIGH_RESP_ENTRIES);
3538 		ddi_put32(acc, &softs->qtablep-> \
3539 		    qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3540 		    AAC_ADAP_HIGH_RESP_ENTRIES);
3541 
3542 		/* Init queue entries */
3543 		softs->qentries[AAC_HOST_NORM_CMD_Q] =
3544 		    &softs->qtablep->qt_HostNormCmdQueue[0];
3545 		softs->qentries[AAC_HOST_HIGH_CMD_Q] =
3546 		    &softs->qtablep->qt_HostHighCmdQueue[0];
3547 		softs->qentries[AAC_ADAP_NORM_CMD_Q] =
3548 		    &softs->qtablep->qt_AdapNormCmdQueue[0];
3549 		softs->qentries[AAC_ADAP_HIGH_CMD_Q] =
3550 		    &softs->qtablep->qt_AdapHighCmdQueue[0];
3551 		softs->qentries[AAC_HOST_NORM_RESP_Q] =
3552 		    &softs->qtablep->qt_HostNormRespQueue[0];
3553 		softs->qentries[AAC_HOST_HIGH_RESP_Q] =
3554 		    &softs->qtablep->qt_HostHighRespQueue[0];
3555 		softs->qentries[AAC_ADAP_NORM_RESP_Q] =
3556 		    &softs->qtablep->qt_AdapNormRespQueue[0];
3557 		softs->qentries[AAC_ADAP_HIGH_RESP_Q] =
3558 		    &softs->qtablep->qt_AdapHighRespQueue[0];
3559 	}
3560 	(void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV);
3561 
3562 	/* Send init structure to the card */
3563 	if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT,
3564 	    comm_space_phyaddr + \
3565 	    offsetof(struct aac_comm_space, init_data),
3566 	    0, 0, 0, NULL) == AACERR) {
3567 		AACDB_PRINT(softs, CE_WARN,
3568 		    "Cannot send init structure to adapter");
3569 		return (AACERR);
3570 	}
3571 
3572 	return (AACOK);
3573 }
3574 
3575 static uchar_t *
3576 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf)
3577 {
3578 	(void) memset(buf, ' ', AAC_VENDOR_LEN);
3579 	bcopy(softs->vendor_name, buf, strlen(softs->vendor_name));
3580 	return (buf + AAC_VENDOR_LEN);
3581 }
3582 
3583 static uchar_t *
3584 aac_product_id(struct aac_softstate *softs, uchar_t *buf)
3585 {
3586 	(void) memset(buf, ' ', AAC_PRODUCT_LEN);
3587 	bcopy(softs->product_name, buf, strlen(softs->product_name));
3588 	return (buf + AAC_PRODUCT_LEN);
3589 }
3590 
3591 /*
3592  * Construct unit serial number from container uid
3593  */
3594 static uchar_t *
3595 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf)
3596 {
3597 	int i, d;
3598 	uint32_t uid;
3599 
3600 	ASSERT(tgt >= 0 && tgt < AAC_MAX_LD);
3601 
3602 	uid = softs->containers[tgt].uid;
3603 	for (i = 7; i >= 0; i--) {
3604 		d = uid & 0xf;
3605 		buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d;
3606 		uid >>= 4;
3607 	}
3608 	return (buf + 8);
3609 }
3610 
3611 /*
3612  * SPC-3 7.5 INQUIRY command implementation
3613  */
3614 static void
3615 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt,
3616     union scsi_cdb *cdbp, struct buf *bp)
3617 {
3618 	int tgt = pkt->pkt_address.a_target;
3619 	char *b_addr = NULL;
3620 	uchar_t page = cdbp->cdb_opaque[2];
3621 
3622 	if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) {
3623 		/* Command Support Data is not supported */
3624 		aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0);
3625 		return;
3626 	}
3627 
3628 	if (bp && bp->b_un.b_addr && bp->b_bcount) {
3629 		if (bp->b_flags & (B_PHYS | B_PAGEIO))
3630 			bp_mapin(bp);
3631 		b_addr = bp->b_un.b_addr;
3632 	}
3633 
3634 	if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) {
3635 		uchar_t *vpdp = (uchar_t *)b_addr;
3636 		uchar_t *idp, *sp;
3637 
3638 		/* SPC-3 8.4 Vital product data parameters */
3639 		switch (page) {
3640 		case 0x00:
3641 			/* Supported VPD pages */
3642 			if (vpdp == NULL ||
3643 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 3))
3644 				return;
3645 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3646 			vpdp[AAC_VPD_PAGE_CODE] = 0x00;
3647 			vpdp[AAC_VPD_PAGE_LENGTH] = 3;
3648 
3649 			vpdp[AAC_VPD_PAGE_DATA] = 0x00;
3650 			vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80;
3651 			vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83;
3652 
3653 			pkt->pkt_state |= STATE_XFERRED_DATA;
3654 			break;
3655 
3656 		case 0x80:
3657 			/* Unit serial number page */
3658 			if (vpdp == NULL ||
3659 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 8))
3660 				return;
3661 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3662 			vpdp[AAC_VPD_PAGE_CODE] = 0x80;
3663 			vpdp[AAC_VPD_PAGE_LENGTH] = 8;
3664 
3665 			sp = &vpdp[AAC_VPD_PAGE_DATA];
3666 			(void) aac_lun_serialno(softs, tgt, sp);
3667 
3668 			pkt->pkt_state |= STATE_XFERRED_DATA;
3669 			break;
3670 
3671 		case 0x83:
3672 			/* Device identification page */
3673 			if (vpdp == NULL ||
3674 			    bp->b_bcount < (AAC_VPD_PAGE_DATA + 32))
3675 				return;
3676 			bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3677 			vpdp[AAC_VPD_PAGE_CODE] = 0x83;
3678 
3679 			idp = &vpdp[AAC_VPD_PAGE_DATA];
3680 			bzero(idp, AAC_VPD_ID_LENGTH);
3681 			idp[AAC_VPD_ID_CODESET] = 0x02;
3682 			idp[AAC_VPD_ID_TYPE] = 0x01;
3683 
3684 			/*
3685 			 * SPC-3 Table 111 - Identifier type
3686 			 * One recommanded method of constructing the remainder
3687 			 * of identifier field is to concatenate the product
3688 			 * identification field from the standard INQUIRY data
3689 			 * field and the product serial number field from the
3690 			 * unit serial number page.
3691 			 */
3692 			sp = &idp[AAC_VPD_ID_DATA];
3693 			sp = aac_vendor_id(softs, sp);
3694 			sp = aac_product_id(softs, sp);
3695 			sp = aac_lun_serialno(softs, tgt, sp);
3696 			idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \
3697 			    (uintptr_t)&idp[AAC_VPD_ID_DATA];
3698 
3699 			vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \
3700 			    (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA];
3701 			pkt->pkt_state |= STATE_XFERRED_DATA;
3702 			break;
3703 
3704 		default:
3705 			aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3706 			    0x24, 0x00, 0);
3707 			break;
3708 		}
3709 	} else {
3710 		struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr;
3711 		size_t len = sizeof (struct scsi_inquiry);
3712 
3713 		if (page != 0) {
3714 			aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3715 			    0x24, 0x00, 0);
3716 			return;
3717 		}
3718 		if (inqp == NULL || bp->b_bcount < len)
3719 			return;
3720 
3721 		bzero(inqp, len);
3722 		inqp->inq_len = AAC_ADDITIONAL_LEN;
3723 		inqp->inq_ansi = AAC_ANSI_VER;
3724 		inqp->inq_rdf = AAC_RESP_DATA_FORMAT;
3725 		(void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid);
3726 		(void) aac_product_id(softs, (uchar_t *)inqp->inq_pid);
3727 		bcopy("V1.0", inqp->inq_revision, 4);
3728 		inqp->inq_cmdque = 1; /* enable tagged-queuing */
3729 		/*
3730 		 * For "sd-max-xfer-size" property which may impact performance
3731 		 * when IO threads increase.
3732 		 */
3733 		inqp->inq_wbus32 = 1;
3734 
3735 		pkt->pkt_state |= STATE_XFERRED_DATA;
3736 	}
3737 }
3738 
3739 /*
3740  * SPC-3 7.10 MODE SENSE command implementation
3741  */
3742 static void
3743 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt,
3744     union scsi_cdb *cdbp, struct buf *bp, int capacity)
3745 {
3746 	uchar_t pagecode;
3747 	struct mode_header *headerp;
3748 	struct mode_header_g1 *g1_headerp;
3749 	unsigned int ncyl;
3750 	caddr_t sense_data;
3751 	caddr_t next_page;
3752 	size_t sdata_size;
3753 	size_t pages_size;
3754 	int unsupport_page = 0;
3755 
3756 	ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE ||
3757 	    cdbp->scc_cmd == SCMD_MODE_SENSE_G1);
3758 
3759 	if (!(bp && bp->b_un.b_addr && bp->b_bcount))
3760 		return;
3761 
3762 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
3763 		bp_mapin(bp);
3764 	pkt->pkt_state |= STATE_XFERRED_DATA;
3765 	pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F;
3766 
3767 	/* calculate the size of needed buffer */
3768 	if (cdbp->scc_cmd == SCMD_MODE_SENSE)
3769 		sdata_size = MODE_HEADER_LENGTH;
3770 	else /* must be SCMD_MODE_SENSE_G1 */
3771 		sdata_size = MODE_HEADER_LENGTH_G1;
3772 
3773 	pages_size = 0;
3774 	switch (pagecode) {
3775 	case SD_MODE_SENSE_PAGE3_CODE:
3776 		pages_size += sizeof (struct mode_format);
3777 		break;
3778 
3779 	case SD_MODE_SENSE_PAGE4_CODE:
3780 		pages_size += sizeof (struct mode_geometry);
3781 		break;
3782 
3783 	case MODEPAGE_CTRL_MODE:
3784 		if (softs->flags & AAC_FLAGS_LBA_64BIT) {
3785 			pages_size += sizeof (struct mode_control_scsi3);
3786 		} else {
3787 			unsupport_page = 1;
3788 		}
3789 		break;
3790 
3791 	case MODEPAGE_ALLPAGES:
3792 		if (softs->flags & AAC_FLAGS_LBA_64BIT) {
3793 			pages_size += sizeof (struct mode_format) +
3794 			    sizeof (struct mode_geometry) +
3795 			    sizeof (struct mode_control_scsi3);
3796 		} else {
3797 			pages_size += sizeof (struct mode_format) +
3798 			    sizeof (struct mode_geometry);
3799 		}
3800 		break;
3801 
3802 	default:
3803 		/* unsupported pages */
3804 		unsupport_page = 1;
3805 	}
3806 
3807 	/* allocate buffer to fill the send data */
3808 	sdata_size += pages_size;
3809 	sense_data = kmem_zalloc(sdata_size, KM_SLEEP);
3810 
3811 	if (cdbp->scc_cmd == SCMD_MODE_SENSE) {
3812 		headerp = (struct mode_header *)sense_data;
3813 		headerp->length = MODE_HEADER_LENGTH + pages_size -
3814 		    sizeof (headerp->length);
3815 		headerp->bdesc_length = 0;
3816 		next_page = sense_data + sizeof (struct mode_header);
3817 	} else {
3818 		g1_headerp = (void *)sense_data;
3819 		g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size -
3820 		    sizeof (g1_headerp->length));
3821 		g1_headerp->bdesc_length = 0;
3822 		next_page = sense_data + sizeof (struct mode_header_g1);
3823 	}
3824 
3825 	if (unsupport_page)
3826 		goto finish;
3827 
3828 	if (pagecode == SD_MODE_SENSE_PAGE3_CODE ||
3829 	    pagecode == MODEPAGE_ALLPAGES) {
3830 		/* SBC-3 7.1.3.3 Format device page */
3831 		struct mode_format *page3p;
3832 
3833 		page3p = (void *)next_page;
3834 		page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE;
3835 		page3p->mode_page.length = sizeof (struct mode_format);
3836 		page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE);
3837 		page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK);
3838 
3839 		next_page += sizeof (struct mode_format);
3840 	}
3841 
3842 	if (pagecode == SD_MODE_SENSE_PAGE4_CODE ||
3843 	    pagecode == MODEPAGE_ALLPAGES) {
3844 		/* SBC-3 7.1.3.8 Rigid disk device geometry page */
3845 		struct mode_geometry *page4p;
3846 
3847 		page4p = (void *)next_page;
3848 		page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE;
3849 		page4p->mode_page.length = sizeof (struct mode_geometry);
3850 		page4p->heads = AAC_NUMBER_OF_HEADS;
3851 		page4p->rpm = BE_16(AAC_ROTATION_SPEED);
3852 		ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK);
3853 		page4p->cyl_lb = ncyl & 0xff;
3854 		page4p->cyl_mb = (ncyl >> 8) & 0xff;
3855 		page4p->cyl_ub = (ncyl >> 16) & 0xff;
3856 
3857 		next_page += sizeof (struct mode_geometry);
3858 	}
3859 
3860 	if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) &&
3861 	    softs->flags & AAC_FLAGS_LBA_64BIT) {
3862 		/* 64-bit LBA need large sense data */
3863 		struct mode_control_scsi3 *mctl;
3864 
3865 		mctl = (void *)next_page;
3866 		mctl->mode_page.code = MODEPAGE_CTRL_MODE;
3867 		mctl->mode_page.length =
3868 		    sizeof (struct mode_control_scsi3) -
3869 		    sizeof (struct mode_page);
3870 		mctl->d_sense = 1;
3871 	}
3872 
3873 finish:
3874 	/* copyout the valid data. */
3875 	bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount));
3876 	kmem_free(sense_data, sdata_size);
3877 }
3878 
3879 static int
3880 aac_name_node(dev_info_t *dip, char *name, int len)
3881 {
3882 	int tgt, lun;
3883 
3884 	tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3885 	    DDI_PROP_DONTPASS, "target", -1);
3886 	if (tgt == -1)
3887 		return (DDI_FAILURE);
3888 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3889 	    DDI_PROP_DONTPASS, "lun", -1);
3890 	if (lun == -1)
3891 		return (DDI_FAILURE);
3892 
3893 	(void) snprintf(name, len, "%x,%x", tgt, lun);
3894 	return (DDI_SUCCESS);
3895 }
3896 
3897 /*ARGSUSED*/
3898 static int
3899 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3900     scsi_hba_tran_t *tran, struct scsi_device *sd)
3901 {
3902 	struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
3903 #if defined(DEBUG) || defined(__lock_lint)
3904 	int ctl = ddi_get_instance(softs->devinfo_p);
3905 #endif
3906 	uint16_t tgt = sd->sd_address.a_target;
3907 	uint8_t lun = sd->sd_address.a_lun;
3908 	struct aac_device *dvp;
3909 
3910 	DBCALLED(softs, 2);
3911 
3912 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3913 		/*
3914 		 * If no persistent node exist, we don't allow .conf node
3915 		 * to be created.
3916 		 */
3917 		if (aac_find_child(softs, tgt, lun) != NULL) {
3918 			if (ndi_merge_node(tgt_dip, aac_name_node) !=
3919 			    DDI_SUCCESS)
3920 				/* Create this .conf node */
3921 				return (DDI_SUCCESS);
3922 		}
3923 		return (DDI_FAILURE);
3924 	}
3925 
3926 	/*
3927 	 * Only support container/phys. device that has been
3928 	 * detected and valid
3929 	 */
3930 	mutex_enter(&softs->io_lock);
3931 	if (tgt >= AAC_MAX_DEV(softs)) {
3932 		AACDB_PRINT_TRAN(softs,
3933 		    "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun);
3934 		mutex_exit(&softs->io_lock);
3935 		return (DDI_FAILURE);
3936 	}
3937 
3938 	if (tgt < AAC_MAX_LD) {
3939 		dvp = (struct aac_device *)&softs->containers[tgt];
3940 		if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) {
3941 			AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d",
3942 			    ctl, tgt, lun);
3943 			mutex_exit(&softs->io_lock);
3944 			return (DDI_FAILURE);
3945 		}
3946 		/*
3947 		 * Save the tgt_dip for the given target if one doesn't exist
3948 		 * already. Dip's for non-existance tgt's will be cleared in
3949 		 * tgt_free.
3950 		 */
3951 		if (softs->containers[tgt].dev.dip == NULL &&
3952 		    strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
3953 			softs->containers[tgt].dev.dip = tgt_dip;
3954 	} else {
3955 		dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)];
3956 	}
3957 
3958 	if (softs->flags & AAC_FLAGS_BRKUP) {
3959 		if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
3960 		    "buf_break", 1) != DDI_PROP_SUCCESS) {
3961 			cmn_err(CE_CONT, "unable to create "
3962 			    "property for t%dL%d (buf_break)", tgt, lun);
3963 		}
3964 	}
3965 
3966 	AACDB_PRINT(softs, CE_NOTE,
3967 	    "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun,
3968 	    (dvp->type == AAC_DEV_PD) ? "pd" : "ld");
3969 	mutex_exit(&softs->io_lock);
3970 	return (DDI_SUCCESS);
3971 }
3972 
3973 static void
3974 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3975     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3976 {
3977 #ifndef __lock_lint
3978 	_NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran))
3979 #endif
3980 
3981 	struct aac_softstate *softs = SD2AAC(sd);
3982 	int tgt = sd->sd_address.a_target;
3983 
3984 	mutex_enter(&softs->io_lock);
3985 	if (tgt < AAC_MAX_LD) {
3986 		if (softs->containers[tgt].dev.dip == tgt_dip)
3987 			softs->containers[tgt].dev.dip = NULL;
3988 	} else {
3989 		softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID;
3990 	}
3991 	mutex_exit(&softs->io_lock);
3992 }
3993 
3994 /*
3995  * Check if the firmware is Up And Running. If it is in the Kernel Panic
3996  * state, (BlinkLED code + 1) is returned.
3997  *    0 -- firmware up and running
3998  *   -1 -- firmware dead
3999  *   >0 -- firmware kernel panic
4000  */
4001 static int
4002 aac_check_adapter_health(struct aac_softstate *softs)
4003 {
4004 	int rval;
4005 
4006 	rval = PCI_MEM_GET32(softs, AAC_OMR0);
4007 
4008 	if (rval & AAC_KERNEL_UP_AND_RUNNING) {
4009 		rval = 0;
4010 	} else if (rval & AAC_KERNEL_PANIC) {
4011 		cmn_err(CE_WARN, "firmware panic");
4012 		rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */
4013 	} else {
4014 		cmn_err(CE_WARN, "firmware dead");
4015 		rval = -1;
4016 	}
4017 	return (rval);
4018 }
4019 
4020 static void
4021 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp,
4022     uchar_t reason)
4023 {
4024 	acp->flags |= AAC_CMD_ABORT;
4025 
4026 	if (acp->pkt) {
4027 		/*
4028 		 * Each lun should generate a unit attention
4029 		 * condition when reset.
4030 		 * Phys. drives are treated as logical ones
4031 		 * during error recovery.
4032 		 */
4033 		if (acp->slotp) { /* outstanding cmd */
4034 			acp->pkt->pkt_state |= STATE_GOT_STATUS;
4035 			aac_set_arq_data_reset(softs, acp);
4036 		}
4037 
4038 		switch (reason) {
4039 		case CMD_TIMEOUT:
4040 			AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p",
4041 			    acp);
4042 			aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
4043 			    STAT_TIMEOUT | STAT_BUS_RESET);
4044 			break;
4045 		case CMD_RESET:
4046 			/* aac support only RESET_ALL */
4047 			AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp);
4048 			aac_set_pkt_reason(softs, acp, CMD_RESET,
4049 			    STAT_BUS_RESET);
4050 			break;
4051 		case CMD_ABORTED:
4052 			AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p",
4053 			    acp);
4054 			aac_set_pkt_reason(softs, acp, CMD_ABORTED,
4055 			    STAT_ABORTED);
4056 			break;
4057 		}
4058 	}
4059 	aac_end_io(softs, acp);
4060 }
4061 
4062 /*
4063  * Abort all the pending commands of type iocmd or just the command pkt
4064  * corresponding to pkt
4065  */
4066 static void
4067 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt,
4068     int reason)
4069 {
4070 	struct aac_cmd *ac_arg, *acp;
4071 	int i;
4072 
4073 	if (pkt == NULL) {
4074 		ac_arg = NULL;
4075 	} else {
4076 		ac_arg = PKT2AC(pkt);
4077 		iocmd = (ac_arg->flags & AAC_CMD_SYNC) ?
4078 		    AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC;
4079 	}
4080 
4081 	/*
4082 	 * a) outstanding commands on the controller
4083 	 * Note: should abort outstanding commands only after one
4084 	 * IOP reset has been done.
4085 	 */
4086 	if (iocmd & AAC_IOCMD_OUTSTANDING) {
4087 		struct aac_cmd *acp;
4088 
4089 		for (i = 0; i < AAC_MAX_LD; i++) {
4090 			if (AAC_DEV_IS_VALID(&softs->containers[i].dev))
4091 				softs->containers[i].reset = 1;
4092 		}
4093 		while ((acp = softs->q_busy.q_head) != NULL)
4094 			aac_abort_iocmd(softs, acp, reason);
4095 	}
4096 
4097 	/* b) commands in the waiting queues */
4098 	for (i = 0; i < AAC_CMDQ_NUM; i++) {
4099 		if (iocmd & (1 << i)) {
4100 			if (ac_arg) {
4101 				aac_abort_iocmd(softs, ac_arg, reason);
4102 			} else {
4103 				while ((acp = softs->q_wait[i].q_head) != NULL)
4104 					aac_abort_iocmd(softs, acp, reason);
4105 			}
4106 		}
4107 	}
4108 }
4109 
4110 /*
4111  * The draining thread is shared among quiesce threads. It terminates
4112  * when the adapter is quiesced or stopped by aac_stop_drain().
4113  */
4114 static void
4115 aac_check_drain(void *arg)
4116 {
4117 	struct aac_softstate *softs = arg;
4118 
4119 	mutex_enter(&softs->io_lock);
4120 	if (softs->ndrains) {
4121 		softs->drain_timeid = 0;
4122 		/*
4123 		 * If both ASYNC and SYNC bus throttle are held,
4124 		 * wake up threads only when both are drained out.
4125 		 */
4126 		if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 ||
4127 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) &&
4128 		    (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 ||
4129 		    softs->bus_ncmds[AAC_CMDQ_SYNC] == 0))
4130 			cv_broadcast(&softs->drain_cv);
4131 		else
4132 			softs->drain_timeid = timeout(aac_check_drain, softs,
4133 			    AAC_QUIESCE_TICK * drv_usectohz(1000000));
4134 	}
4135 	mutex_exit(&softs->io_lock);
4136 }
4137 
4138 /*
4139  * If not draining the outstanding cmds, drain them. Otherwise,
4140  * only update ndrains.
4141  */
4142 static void
4143 aac_start_drain(struct aac_softstate *softs)
4144 {
4145 	if (softs->ndrains == 0) {
4146 		ASSERT(softs->drain_timeid == 0);
4147 		softs->drain_timeid = timeout(aac_check_drain, softs,
4148 		    AAC_QUIESCE_TICK * drv_usectohz(1000000));
4149 	}
4150 	softs->ndrains++;
4151 }
4152 
4153 /*
4154  * Stop the draining thread when no other threads use it any longer.
4155  * Side effect: io_lock may be released in the middle.
4156  */
4157 static void
4158 aac_stop_drain(struct aac_softstate *softs)
4159 {
4160 	softs->ndrains--;
4161 	if (softs->ndrains == 0) {
4162 		if (softs->drain_timeid != 0) {
4163 			timeout_id_t tid = softs->drain_timeid;
4164 
4165 			softs->drain_timeid = 0;
4166 			mutex_exit(&softs->io_lock);
4167 			(void) untimeout(tid);
4168 			mutex_enter(&softs->io_lock);
4169 		}
4170 	}
4171 }
4172 
4173 /*
4174  * The following function comes from Adaptec:
4175  *
4176  * Once do an IOP reset, basically the driver have to re-initialize the card
4177  * as if up from a cold boot, and the driver is responsible for any IO that
4178  * is outstanding to the adapter at the time of the IOP RESET. And prepare
4179  * for IOP RESET by making the init code modular with the ability to call it
4180  * from multiple places.
4181  */
4182 static int
4183 aac_reset_adapter(struct aac_softstate *softs)
4184 {
4185 	int health;
4186 	uint32_t status;
4187 	int rval = AAC_IOP_RESET_FAILED;
4188 
4189 	DBCALLED(softs, 1);
4190 
4191 	ASSERT(softs->state & AAC_STATE_RESET);
4192 
4193 	ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0);
4194 	/* Disable interrupt */
4195 	AAC_DISABLE_INTR(softs);
4196 
4197 	health = aac_check_adapter_health(softs);
4198 	if (health == -1) {
4199 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
4200 		goto finish;
4201 	}
4202 	if (health == 0) /* flush drives if possible */
4203 		(void) aac_shutdown(softs);
4204 
4205 	/* Execute IOP reset */
4206 	if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0,
4207 	    &status)) != AACOK) {
4208 		ddi_acc_handle_t acc = softs->comm_space_acc_handle;
4209 		struct aac_fib *fibp;
4210 		struct aac_pause_command *pc;
4211 
4212 		if ((status & 0xf) == 0xf) {
4213 			uint32_t wait_count;
4214 
4215 			/*
4216 			 * Sunrise Lake has dual cores and we must drag the
4217 			 * other core with us to reset simultaneously. There
4218 			 * are 2 bits in the Inbound Reset Control and Status
4219 			 * Register (offset 0x38) of the Sunrise Lake to reset
4220 			 * the chip without clearing out the PCI configuration
4221 			 * info (COMMAND & BARS).
4222 			 */
4223 			PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST);
4224 
4225 			/*
4226 			 * We need to wait for 5 seconds before accessing the MU
4227 			 * again 10000 * 100us = 1000,000us = 1000ms = 1s
4228 			 */
4229 			wait_count = 5 * 10000;
4230 			while (wait_count) {
4231 				drv_usecwait(100); /* delay 100 microseconds */
4232 				wait_count--;
4233 			}
4234 		} else {
4235 			if (status == SRB_STATUS_INVALID_REQUEST)
4236 				cmn_err(CE_WARN, "!IOP_RESET not supported");
4237 			else /* probably timeout */
4238 				cmn_err(CE_WARN, "!IOP_RESET failed");
4239 
4240 			/* Unwind aac_shutdown() */
4241 			fibp = softs->sync_slot.fibp;
4242 			pc = (struct aac_pause_command *)&fibp->data[0];
4243 
4244 			bzero(pc, sizeof (*pc));
4245 			ddi_put32(acc, &pc->Command, VM_ContainerConfig);
4246 			ddi_put32(acc, &pc->Type, CT_PAUSE_IO);
4247 			ddi_put32(acc, &pc->Timeout, 1);
4248 			ddi_put32(acc, &pc->Min, 1);
4249 			ddi_put32(acc, &pc->NoRescan, 1);
4250 
4251 			(void) aac_sync_fib(softs, ContainerCommand,
4252 			    AAC_FIB_SIZEOF(struct aac_pause_command));
4253 
4254 			if (aac_check_adapter_health(softs) != 0)
4255 				ddi_fm_service_impact(softs->devinfo_p,
4256 				    DDI_SERVICE_LOST);
4257 			else
4258 				/*
4259 				 * IOP reset not supported or IOP not reseted
4260 				 */
4261 				rval = AAC_IOP_RESET_ABNORMAL;
4262 			goto finish;
4263 		}
4264 	}
4265 
4266 	/*
4267 	 * Re-read and renegotiate the FIB parameters, as one of the actions
4268 	 * that can result from an IOP reset is the running of a new firmware
4269 	 * image.
4270 	 */
4271 	if (aac_common_attach(softs) != AACOK)
4272 		goto finish;
4273 
4274 	rval = AAC_IOP_RESET_SUCCEED;
4275 
4276 finish:
4277 	AAC_ENABLE_INTR(softs);
4278 	return (rval);
4279 }
4280 
4281 static void
4282 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q,
4283     int throttle)
4284 {
4285 	/*
4286 	 * If the bus is draining/quiesced, no changes to the throttles
4287 	 * are allowed. All throttles should have been set to 0.
4288 	 */
4289 	if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains)
4290 		return;
4291 	dvp->throttle[q] = throttle;
4292 }
4293 
4294 static void
4295 aac_hold_bus(struct aac_softstate *softs, int iocmds)
4296 {
4297 	int i, q;
4298 
4299 	/* Hold bus by holding every device on the bus */
4300 	for (q = 0; q < AAC_CMDQ_NUM; q++) {
4301 		if (iocmds & (1 << q)) {
4302 			softs->bus_throttle[q] = 0;
4303 			for (i = 0; i < AAC_MAX_LD; i++)
4304 				aac_set_throttle(softs,
4305 				    &softs->containers[i].dev, q, 0);
4306 			for (i = 0; i < AAC_MAX_PD(softs); i++)
4307 				aac_set_throttle(softs,
4308 				    &softs->nondasds[i].dev, q, 0);
4309 		}
4310 	}
4311 }
4312 
4313 static void
4314 aac_unhold_bus(struct aac_softstate *softs, int iocmds)
4315 {
4316 	int i, q;
4317 
4318 	for (q = 0; q < AAC_CMDQ_NUM; q++) {
4319 		if (iocmds & (1 << q)) {
4320 			/*
4321 			 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been
4322 			 * quiesced or being drained by possibly some quiesce
4323 			 * threads.
4324 			 */
4325 			if (q == AAC_CMDQ_ASYNC && ((softs->state &
4326 			    AAC_STATE_QUIESCED) || softs->ndrains))
4327 				continue;
4328 			softs->bus_throttle[q] = softs->total_slots;
4329 			for (i = 0; i < AAC_MAX_LD; i++)
4330 				aac_set_throttle(softs,
4331 				    &softs->containers[i].dev,
4332 				    q, softs->total_slots);
4333 			for (i = 0; i < AAC_MAX_PD(softs); i++)
4334 				aac_set_throttle(softs, &softs->nondasds[i].dev,
4335 				    q, softs->total_slots);
4336 		}
4337 	}
4338 }
4339 
4340 static int
4341 aac_do_reset(struct aac_softstate *softs)
4342 {
4343 	int health;
4344 	int rval;
4345 
4346 	softs->state |= AAC_STATE_RESET;
4347 	health = aac_check_adapter_health(softs);
4348 
4349 	/*
4350 	 * Hold off new io commands and wait all outstanding io
4351 	 * commands to complete.
4352 	 */
4353 	if (health == 0) {
4354 		int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC];
4355 		int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC];
4356 
4357 		if (sync_cmds == 0 && async_cmds == 0) {
4358 			rval = AAC_IOP_RESET_SUCCEED;
4359 			goto finish;
4360 		}
4361 		/*
4362 		 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds
4363 		 * to complete the outstanding io commands
4364 		 */
4365 		int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10;
4366 		int (*intr_handler)(struct aac_softstate *);
4367 
4368 		aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4369 		/*
4370 		 * Poll the adapter by ourselves in case interrupt is disabled
4371 		 * and to avoid releasing the io_lock.
4372 		 */
4373 		intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
4374 		    aac_process_intr_new : aac_process_intr_old;
4375 		while ((softs->bus_ncmds[AAC_CMDQ_SYNC] ||
4376 		    softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) {
4377 			drv_usecwait(100);
4378 			(void) intr_handler(softs);
4379 			timeout--;
4380 		}
4381 		aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4382 
4383 		if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 &&
4384 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) {
4385 			/* Cmds drained out */
4386 			rval = AAC_IOP_RESET_SUCCEED;
4387 			goto finish;
4388 		} else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds ||
4389 		    softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) {
4390 			/* Cmds not drained out, adapter overloaded */
4391 			rval = AAC_IOP_RESET_ABNORMAL;
4392 			goto finish;
4393 		}
4394 	}
4395 
4396 	/*
4397 	 * If a longer waiting time still can't drain any outstanding io
4398 	 * commands, do IOP reset.
4399 	 */
4400 	if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED)
4401 		softs->state |= AAC_STATE_DEAD;
4402 
4403 finish:
4404 	softs->state &= ~AAC_STATE_RESET;
4405 	return (rval);
4406 }
4407 
4408 static int
4409 aac_tran_reset(struct scsi_address *ap, int level)
4410 {
4411 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4412 	int rval;
4413 
4414 	DBCALLED(softs, 1);
4415 
4416 	if (level != RESET_ALL) {
4417 		cmn_err(CE_NOTE, "!reset target/lun not supported");
4418 		return (0);
4419 	}
4420 
4421 	mutex_enter(&softs->io_lock);
4422 	switch (rval = aac_do_reset(softs)) {
4423 	case AAC_IOP_RESET_SUCCEED:
4424 		aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC,
4425 		    NULL, CMD_RESET);
4426 		aac_start_waiting_io(softs);
4427 		break;
4428 	case AAC_IOP_RESET_FAILED:
4429 		/* Abort IOCTL cmds when adapter is dead */
4430 		aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET);
4431 		break;
4432 	case AAC_IOP_RESET_ABNORMAL:
4433 		aac_start_waiting_io(softs);
4434 	}
4435 	mutex_exit(&softs->io_lock);
4436 
4437 	aac_drain_comp_q(softs);
4438 	return (rval == 0);
4439 }
4440 
4441 static int
4442 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4443 {
4444 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4445 
4446 	DBCALLED(softs, 1);
4447 
4448 	mutex_enter(&softs->io_lock);
4449 	aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED);
4450 	mutex_exit(&softs->io_lock);
4451 
4452 	aac_drain_comp_q(softs);
4453 	return (1);
4454 }
4455 
4456 void
4457 aac_free_dmamap(struct aac_cmd *acp)
4458 {
4459 	/* Free dma mapping */
4460 	if (acp->flags & AAC_CMD_DMA_VALID) {
4461 		ASSERT(acp->buf_dma_handle);
4462 		(void) ddi_dma_unbind_handle(acp->buf_dma_handle);
4463 		acp->flags &= ~AAC_CMD_DMA_VALID;
4464 	}
4465 
4466 	if (acp->abp != NULL) { /* free non-aligned buf DMA */
4467 		ASSERT(acp->buf_dma_handle);
4468 		if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp)
4469 			ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr,
4470 			    (uint8_t *)acp->abp, acp->bp->b_bcount,
4471 			    DDI_DEV_AUTOINCR);
4472 		ddi_dma_mem_free(&acp->abh);
4473 		acp->abp = NULL;
4474 	}
4475 
4476 	if (acp->buf_dma_handle) {
4477 		ddi_dma_free_handle(&acp->buf_dma_handle);
4478 		acp->buf_dma_handle = NULL;
4479 	}
4480 }
4481 
4482 static void
4483 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
4484 {
4485 	AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported",
4486 	    ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd);
4487 	aac_free_dmamap(acp);
4488 	aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0);
4489 	aac_soft_callback(softs, acp);
4490 }
4491 
4492 /*
4493  * Handle command to logical device
4494  */
4495 static int
4496 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp)
4497 {
4498 	struct aac_container *dvp;
4499 	struct scsi_pkt *pkt;
4500 	union scsi_cdb *cdbp;
4501 	struct buf *bp;
4502 	int rval;
4503 
4504 	dvp = (struct aac_container *)acp->dvp;
4505 	pkt = acp->pkt;
4506 	cdbp = (void *)pkt->pkt_cdbp;
4507 	bp = acp->bp;
4508 
4509 	switch (cdbp->scc_cmd) {
4510 	case SCMD_INQUIRY: /* inquiry */
4511 		aac_free_dmamap(acp);
4512 		aac_inquiry(softs, pkt, cdbp, bp);
4513 		aac_soft_callback(softs, acp);
4514 		rval = TRAN_ACCEPT;
4515 		break;
4516 
4517 	case SCMD_READ_CAPACITY: /* read capacity */
4518 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
4519 			struct scsi_capacity cap;
4520 			uint64_t last_lba;
4521 
4522 			/* check 64-bit LBA */
4523 			last_lba = dvp->size - 1;
4524 			if (last_lba > 0xffffffffull) {
4525 				cap.capacity = 0xfffffffful;
4526 			} else {
4527 				cap.capacity = BE_32(last_lba);
4528 			}
4529 			cap.lbasize = BE_32(AAC_SECTOR_SIZE);
4530 
4531 			aac_free_dmamap(acp);
4532 			if (bp->b_flags & (B_PHYS|B_PAGEIO))
4533 				bp_mapin(bp);
4534 			bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8));
4535 			pkt->pkt_state |= STATE_XFERRED_DATA;
4536 		}
4537 		aac_soft_callback(softs, acp);
4538 		rval = TRAN_ACCEPT;
4539 		break;
4540 
4541 	case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */
4542 		/* Check if containers need 64-bit LBA support */
4543 		if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) {
4544 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
4545 				struct scsi_capacity_16 cap16;
4546 				int cap_len = sizeof (struct scsi_capacity_16);
4547 
4548 				bzero(&cap16, cap_len);
4549 				cap16.sc_capacity = BE_64(dvp->size - 1);
4550 				cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE);
4551 
4552 				aac_free_dmamap(acp);
4553 				if (bp->b_flags & (B_PHYS | B_PAGEIO))
4554 					bp_mapin(bp);
4555 				bcopy(&cap16, bp->b_un.b_addr,
4556 				    min(bp->b_bcount, cap_len));
4557 				pkt->pkt_state |= STATE_XFERRED_DATA;
4558 			}
4559 			aac_soft_callback(softs, acp);
4560 		} else {
4561 			aac_unknown_scmd(softs, acp);
4562 		}
4563 		rval = TRAN_ACCEPT;
4564 		break;
4565 
4566 	case SCMD_READ_G4: /* read_16 */
4567 	case SCMD_WRITE_G4: /* write_16 */
4568 		if (softs->flags & AAC_FLAGS_RAW_IO) {
4569 			/* NOTE: GETG4ADDRTL(cdbp) is int32_t */
4570 			acp->blkno = ((uint64_t) \
4571 			    GETG4ADDR(cdbp) << 32) | \
4572 			    (uint32_t)GETG4ADDRTL(cdbp);
4573 			goto do_io;
4574 		}
4575 		AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported");
4576 		aac_unknown_scmd(softs, acp);
4577 		rval = TRAN_ACCEPT;
4578 		break;
4579 
4580 	case SCMD_READ: /* read_6 */
4581 	case SCMD_WRITE: /* write_6 */
4582 		acp->blkno = GETG0ADDR(cdbp);
4583 		goto do_io;
4584 
4585 	case SCMD_READ_G5: /* read_12 */
4586 	case SCMD_WRITE_G5: /* write_12 */
4587 		acp->blkno = GETG5ADDR(cdbp);
4588 		goto do_io;
4589 
4590 	case SCMD_READ_G1: /* read_10 */
4591 	case SCMD_WRITE_G1: /* write_10 */
4592 		acp->blkno = (uint32_t)GETG1ADDR(cdbp);
4593 do_io:
4594 		if (acp->flags & AAC_CMD_DMA_VALID) {
4595 			uint64_t cnt_size = dvp->size;
4596 
4597 			/*
4598 			 * If LBA > array size AND rawio, the
4599 			 * adapter may hang. So check it before
4600 			 * sending.
4601 			 * NOTE: (blkno + blkcnt) may overflow
4602 			 */
4603 			if ((acp->blkno < cnt_size) &&
4604 			    ((acp->blkno + acp->bcount /
4605 			    AAC_BLK_SIZE) <= cnt_size)) {
4606 				rval = aac_do_io(softs, acp);
4607 			} else {
4608 			/*
4609 			 * Request exceeds the capacity of disk,
4610 			 * set error block number to last LBA
4611 			 * + 1.
4612 			 */
4613 				aac_set_arq_data(pkt,
4614 				    KEY_ILLEGAL_REQUEST, 0x21,
4615 				    0x00, cnt_size);
4616 				aac_soft_callback(softs, acp);
4617 				rval = TRAN_ACCEPT;
4618 			}
4619 		} else if (acp->bcount == 0) {
4620 			/* For 0 length IO, just return ok */
4621 			aac_soft_callback(softs, acp);
4622 			rval = TRAN_ACCEPT;
4623 		} else {
4624 			rval = TRAN_BADPKT;
4625 		}
4626 		break;
4627 
4628 	case SCMD_MODE_SENSE: /* mode_sense_6 */
4629 	case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */
4630 		int capacity;
4631 
4632 		aac_free_dmamap(acp);
4633 		if (dvp->size > 0xffffffffull)
4634 			capacity = 0xfffffffful; /* 64-bit LBA */
4635 		else
4636 			capacity = dvp->size;
4637 		aac_mode_sense(softs, pkt, cdbp, bp, capacity);
4638 		aac_soft_callback(softs, acp);
4639 		rval = TRAN_ACCEPT;
4640 		break;
4641 	}
4642 
4643 	case SCMD_TEST_UNIT_READY:
4644 	case SCMD_REQUEST_SENSE:
4645 	case SCMD_FORMAT:
4646 	case SCMD_START_STOP:
4647 		aac_free_dmamap(acp);
4648 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
4649 			if (acp->flags & AAC_CMD_BUF_READ) {
4650 				if (bp->b_flags & (B_PHYS|B_PAGEIO))
4651 					bp_mapin(bp);
4652 				bzero(bp->b_un.b_addr, bp->b_bcount);
4653 			}
4654 			pkt->pkt_state |= STATE_XFERRED_DATA;
4655 		}
4656 		aac_soft_callback(softs, acp);
4657 		rval = TRAN_ACCEPT;
4658 		break;
4659 
4660 	case SCMD_SYNCHRONIZE_CACHE:
4661 		acp->flags |= AAC_CMD_NTAG;
4662 		acp->aac_cmd_fib = aac_cmd_fib_sync;
4663 		acp->ac_comp = aac_synccache_complete;
4664 		rval = aac_do_io(softs, acp);
4665 		break;
4666 
4667 	case SCMD_DOORLOCK:
4668 		aac_free_dmamap(acp);
4669 		dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0;
4670 		aac_soft_callback(softs, acp);
4671 		rval = TRAN_ACCEPT;
4672 		break;
4673 
4674 	default: /* unknown command */
4675 		aac_unknown_scmd(softs, acp);
4676 		rval = TRAN_ACCEPT;
4677 		break;
4678 	}
4679 
4680 	return (rval);
4681 }
4682 
4683 static int
4684 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
4685 {
4686 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4687 	struct aac_cmd *acp = PKT2AC(pkt);
4688 	struct aac_device *dvp = acp->dvp;
4689 	int rval;
4690 
4691 	DBCALLED(softs, 2);
4692 
4693 	/*
4694 	 * Reinitialize some fields of ac and pkt; the packet may
4695 	 * have been resubmitted
4696 	 */
4697 	acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \
4698 	    AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID;
4699 	acp->timeout = acp->pkt->pkt_time;
4700 	if (pkt->pkt_flags & FLAG_NOINTR)
4701 		acp->flags |= AAC_CMD_NO_INTR;
4702 #ifdef DEBUG
4703 	acp->fib_flags = AACDB_FLAGS_FIB_SCMD;
4704 #endif
4705 	pkt->pkt_reason = CMD_CMPLT;
4706 	pkt->pkt_state = 0;
4707 	pkt->pkt_statistics = 0;
4708 	*pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
4709 
4710 	if (acp->flags & AAC_CMD_DMA_VALID) {
4711 		pkt->pkt_resid = acp->bcount;
4712 		/* Consistent packets need to be sync'ed first */
4713 		if ((acp->flags & AAC_CMD_CONSISTENT) &&
4714 		    (acp->flags & AAC_CMD_BUF_WRITE))
4715 			if (aac_dma_sync_ac(acp) != AACOK) {
4716 				ddi_fm_service_impact(softs->devinfo_p,
4717 				    DDI_SERVICE_UNAFFECTED);
4718 				return (TRAN_BADPKT);
4719 			}
4720 	} else {
4721 		pkt->pkt_resid = 0;
4722 	}
4723 
4724 	mutex_enter(&softs->io_lock);
4725 	AACDB_PRINT_SCMD(softs, acp);
4726 	if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) &&
4727 	    !(softs->state & AAC_STATE_DEAD)) {
4728 		if (dvp->type == AAC_DEV_LD) {
4729 			if (ap->a_lun == 0)
4730 				rval = aac_tran_start_ld(softs, acp);
4731 			else
4732 				goto error;
4733 		} else {
4734 			rval = aac_do_io(softs, acp);
4735 		}
4736 	} else {
4737 error:
4738 #ifdef DEBUG
4739 		if (!(softs->state & AAC_STATE_DEAD)) {
4740 			AACDB_PRINT_TRAN(softs,
4741 			    "Cannot send cmd to target t%dL%d: %s",
4742 			    ap->a_target, ap->a_lun,
4743 			    "target invalid");
4744 		} else {
4745 			AACDB_PRINT(softs, CE_WARN,
4746 			    "Cannot send cmd to target t%dL%d: %s",
4747 			    ap->a_target, ap->a_lun,
4748 			    "adapter dead");
4749 		}
4750 #endif
4751 		rval = TRAN_FATAL_ERROR;
4752 	}
4753 	mutex_exit(&softs->io_lock);
4754 	return (rval);
4755 }
4756 
4757 static int
4758 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom)
4759 {
4760 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4761 	struct aac_device *dvp;
4762 	int rval;
4763 
4764 	DBCALLED(softs, 2);
4765 
4766 	/* We don't allow inquiring about capabilities for other targets */
4767 	if (cap == NULL || whom == 0) {
4768 		AACDB_PRINT(softs, CE_WARN,
4769 		    "GetCap> %s not supported: whom=%d", cap, whom);
4770 		return (-1);
4771 	}
4772 
4773 	mutex_enter(&softs->io_lock);
4774 	dvp = AAC_DEV(softs, ap->a_target);
4775 	if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
4776 		mutex_exit(&softs->io_lock);
4777 		AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap",
4778 		    ap->a_target, ap->a_lun);
4779 		return (-1);
4780 	}
4781 
4782 	switch (scsi_hba_lookup_capstr(cap)) {
4783 	case SCSI_CAP_ARQ: /* auto request sense */
4784 		rval = 1;
4785 		break;
4786 	case SCSI_CAP_UNTAGGED_QING:
4787 	case SCSI_CAP_TAGGED_QING:
4788 		rval = 1;
4789 		break;
4790 	case SCSI_CAP_DMA_MAX:
4791 		rval = softs->dma_max;
4792 		break;
4793 	default:
4794 		rval = -1;
4795 		break;
4796 	}
4797 	mutex_exit(&softs->io_lock);
4798 
4799 	AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d",
4800 	    cap, ap->a_target, ap->a_lun, rval);
4801 	return (rval);
4802 }
4803 
4804 /*ARGSUSED*/
4805 static int
4806 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4807 {
4808 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4809 	struct aac_device *dvp;
4810 	int rval;
4811 
4812 	DBCALLED(softs, 2);
4813 
4814 	/* We don't allow inquiring about capabilities for other targets */
4815 	if (cap == NULL || whom == 0) {
4816 		AACDB_PRINT(softs, CE_WARN,
4817 		    "SetCap> %s not supported: whom=%d", cap, whom);
4818 		return (-1);
4819 	}
4820 
4821 	mutex_enter(&softs->io_lock);
4822 	dvp = AAC_DEV(softs, ap->a_target);
4823 	if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
4824 		mutex_exit(&softs->io_lock);
4825 		AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap",
4826 		    ap->a_target, ap->a_lun);
4827 		return (-1);
4828 	}
4829 
4830 	switch (scsi_hba_lookup_capstr(cap)) {
4831 	case SCSI_CAP_ARQ:
4832 		/* Force auto request sense */
4833 		rval = (value == 1) ? 1 : 0;
4834 		break;
4835 	case SCSI_CAP_UNTAGGED_QING:
4836 	case SCSI_CAP_TAGGED_QING:
4837 		rval = (value == 1) ? 1 : 0;
4838 		break;
4839 	default:
4840 		rval = -1;
4841 		break;
4842 	}
4843 	mutex_exit(&softs->io_lock);
4844 
4845 	AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d",
4846 	    cap, ap->a_target, ap->a_lun, value, rval);
4847 	return (rval);
4848 }
4849 
4850 static void
4851 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4852 {
4853 	struct aac_cmd *acp = PKT2AC(pkt);
4854 
4855 	DBCALLED(NULL, 2);
4856 
4857 	if (acp->sgt) {
4858 		kmem_free(acp->sgt, sizeof (struct aac_sge) * \
4859 		    acp->left_cookien);
4860 	}
4861 	aac_free_dmamap(acp);
4862 	ASSERT(acp->slotp == NULL);
4863 	scsi_hba_pkt_free(ap, pkt);
4864 }
4865 
4866 int
4867 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp,
4868     struct buf *bp, int flags, int (*cb)(), caddr_t arg)
4869 {
4870 	int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
4871 	uint_t oldcookiec;
4872 	int bioerr;
4873 	int rval;
4874 
4875 	oldcookiec = acp->left_cookien;
4876 
4877 	/* Move window to build s/g map */
4878 	if (acp->total_nwin > 0) {
4879 		if (++acp->cur_win < acp->total_nwin) {
4880 			off_t off;
4881 			size_t len;
4882 
4883 			rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win,
4884 			    &off, &len, &acp->cookie, &acp->left_cookien);
4885 			if (rval == DDI_SUCCESS)
4886 				goto get_dma_cookies;
4887 			AACDB_PRINT(softs, CE_WARN,
4888 			    "ddi_dma_getwin() fail %d", rval);
4889 			return (AACERR);
4890 		}
4891 		AACDB_PRINT(softs, CE_WARN, "Nothing to transfer");
4892 		return (AACERR);
4893 	}
4894 
4895 	/* We need to transfer data, so we alloc DMA resources for this pkt */
4896 	if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) {
4897 		uint_t dma_flags = 0;
4898 		struct aac_sge *sge;
4899 
4900 		/*
4901 		 * We will still use this point to fake some
4902 		 * infomation in tran_start
4903 		 */
4904 		acp->bp = bp;
4905 
4906 		/* Set dma flags */
4907 		if (BUF_IS_READ(bp)) {
4908 			dma_flags |= DDI_DMA_READ;
4909 			acp->flags |= AAC_CMD_BUF_READ;
4910 		} else {
4911 			dma_flags |= DDI_DMA_WRITE;
4912 			acp->flags |= AAC_CMD_BUF_WRITE;
4913 		}
4914 		if (flags & PKT_CONSISTENT)
4915 			dma_flags |= DDI_DMA_CONSISTENT;
4916 		if (flags & PKT_DMA_PARTIAL)
4917 			dma_flags |= DDI_DMA_PARTIAL;
4918 
4919 		/* Alloc buf dma handle */
4920 		if (!acp->buf_dma_handle) {
4921 			rval = ddi_dma_alloc_handle(softs->devinfo_p,
4922 			    &softs->buf_dma_attr, cb, arg,
4923 			    &acp->buf_dma_handle);
4924 			if (rval != DDI_SUCCESS) {
4925 				AACDB_PRINT(softs, CE_WARN,
4926 				    "Can't allocate DMA handle, errno=%d",
4927 				    rval);
4928 				goto error_out;
4929 			}
4930 		}
4931 
4932 		/* Bind buf */
4933 		if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) {
4934 			rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle,
4935 			    bp, dma_flags, cb, arg, &acp->cookie,
4936 			    &acp->left_cookien);
4937 		} else {
4938 			size_t bufsz;
4939 
4940 			AACDB_PRINT_TRAN(softs,
4941 			    "non-aligned buffer: addr=0x%p, cnt=%lu",
4942 			    (void *)bp->b_un.b_addr, bp->b_bcount);
4943 			if (bp->b_flags & (B_PAGEIO|B_PHYS))
4944 				bp_mapin(bp);
4945 
4946 			rval = ddi_dma_mem_alloc(acp->buf_dma_handle,
4947 			    AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN),
4948 			    &softs->acc_attr, DDI_DMA_STREAMING,
4949 			    cb, arg, &acp->abp, &bufsz, &acp->abh);
4950 
4951 			if (rval != DDI_SUCCESS) {
4952 				AACDB_PRINT(softs, CE_NOTE,
4953 				    "Cannot alloc DMA to non-aligned buf");
4954 				bioerr = 0;
4955 				goto error_out;
4956 			}
4957 
4958 			if (acp->flags & AAC_CMD_BUF_WRITE)
4959 				ddi_rep_put8(acp->abh,
4960 				    (uint8_t *)bp->b_un.b_addr,
4961 				    (uint8_t *)acp->abp, bp->b_bcount,
4962 				    DDI_DEV_AUTOINCR);
4963 
4964 			rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle,
4965 			    NULL, acp->abp, bufsz, dma_flags, cb, arg,
4966 			    &acp->cookie, &acp->left_cookien);
4967 		}
4968 
4969 		switch (rval) {
4970 		case DDI_DMA_PARTIAL_MAP:
4971 			if (ddi_dma_numwin(acp->buf_dma_handle,
4972 			    &acp->total_nwin) == DDI_FAILURE) {
4973 				AACDB_PRINT(softs, CE_WARN,
4974 				    "Cannot get number of DMA windows");
4975 				bioerr = 0;
4976 				goto error_out;
4977 			}
4978 			AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
4979 			    acp->left_cookien);
4980 			acp->cur_win = 0;
4981 			break;
4982 
4983 		case DDI_DMA_MAPPED:
4984 			AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
4985 			    acp->left_cookien);
4986 			acp->cur_win = 0;
4987 			acp->total_nwin = 1;
4988 			break;
4989 
4990 		case DDI_DMA_NORESOURCES:
4991 			bioerr = 0;
4992 			AACDB_PRINT(softs, CE_WARN,
4993 			    "Cannot bind buf for DMA: DDI_DMA_NORESOURCES");
4994 			goto error_out;
4995 		case DDI_DMA_BADATTR:
4996 		case DDI_DMA_NOMAPPING:
4997 			bioerr = EFAULT;
4998 			AACDB_PRINT(softs, CE_WARN,
4999 			    "Cannot bind buf for DMA: DDI_DMA_NOMAPPING");
5000 			goto error_out;
5001 		case DDI_DMA_TOOBIG:
5002 			bioerr = EINVAL;
5003 			AACDB_PRINT(softs, CE_WARN,
5004 			    "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)",
5005 			    bp->b_bcount);
5006 			goto error_out;
5007 		default:
5008 			bioerr = EINVAL;
5009 			AACDB_PRINT(softs, CE_WARN,
5010 			    "Cannot bind buf for DMA: %d", rval);
5011 			goto error_out;
5012 		}
5013 		acp->flags |= AAC_CMD_DMA_VALID;
5014 
5015 get_dma_cookies:
5016 		ASSERT(acp->left_cookien > 0);
5017 		if (acp->left_cookien > softs->aac_sg_tablesize) {
5018 			AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d",
5019 			    acp->left_cookien);
5020 			bioerr = EINVAL;
5021 			goto error_out;
5022 		}
5023 		if (oldcookiec != acp->left_cookien && acp->sgt != NULL) {
5024 			kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5025 			    oldcookiec);
5026 			acp->sgt = NULL;
5027 		}
5028 		if (acp->sgt == NULL) {
5029 			acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \
5030 			    acp->left_cookien, kf);
5031 			if (acp->sgt == NULL) {
5032 				AACDB_PRINT(softs, CE_WARN,
5033 				    "sgt kmem_alloc fail");
5034 				bioerr = ENOMEM;
5035 				goto error_out;
5036 			}
5037 		}
5038 
5039 		sge = &acp->sgt[0];
5040 		sge->bcount = acp->cookie.dmac_size;
5041 		sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5042 		sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5043 		acp->bcount = acp->cookie.dmac_size;
5044 		for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) {
5045 			ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie);
5046 			sge->bcount = acp->cookie.dmac_size;
5047 			sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5048 			sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5049 			acp->bcount += acp->cookie.dmac_size;
5050 		}
5051 
5052 		/*
5053 		 * Note: The old DMA engine do not correctly handle
5054 		 * dma_attr_maxxfer attribute. So we have to ensure
5055 		 * it by ourself.
5056 		 */
5057 		if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) {
5058 			AACDB_PRINT(softs, CE_NOTE,
5059 			    "large xfer size received %d\n", acp->bcount);
5060 			bioerr = EINVAL;
5061 			goto error_out;
5062 		}
5063 
5064 		acp->total_xfer += acp->bcount;
5065 
5066 		if (acp->pkt) {
5067 			/* Return remaining byte count */
5068 			if (acp->total_xfer <= bp->b_bcount) {
5069 				acp->pkt->pkt_resid = bp->b_bcount - \
5070 				    acp->total_xfer;
5071 			} else {
5072 				/*
5073 				 * Allocated DMA size is greater than the buf
5074 				 * size of bp. This is caused by devices like
5075 				 * tape. we have extra bytes allocated, but
5076 				 * the packet residual has to stay correct.
5077 				 */
5078 				acp->pkt->pkt_resid = 0;
5079 			}
5080 			AACDB_PRINT_TRAN(softs,
5081 			    "bp=0x%p, xfered=%d/%d, resid=%d",
5082 			    (void *)bp->b_un.b_addr, (int)acp->total_xfer,
5083 			    (int)bp->b_bcount, (int)acp->pkt->pkt_resid);
5084 		}
5085 	}
5086 	return (AACOK);
5087 
5088 error_out:
5089 	bioerror(bp, bioerr);
5090 	return (AACERR);
5091 }
5092 
5093 static struct scsi_pkt *
5094 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
5095     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
5096     int (*callback)(), caddr_t arg)
5097 {
5098 	struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5099 	struct aac_cmd *acp, *new_acp;
5100 
5101 	DBCALLED(softs, 2);
5102 
5103 	/* Allocate pkt */
5104 	if (pkt == NULL) {
5105 		int slen;
5106 
5107 		/* Force auto request sense */
5108 		slen = (statuslen > softs->slen) ? statuslen : softs->slen;
5109 		pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen,
5110 		    slen, tgtlen, sizeof (struct aac_cmd), callback, arg);
5111 		if (pkt == NULL) {
5112 			AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed");
5113 			return (NULL);
5114 		}
5115 		acp = new_acp = PKT2AC(pkt);
5116 		acp->pkt = pkt;
5117 		acp->cmdlen = cmdlen;
5118 
5119 		if (ap->a_target < AAC_MAX_LD) {
5120 			acp->dvp = &softs->containers[ap->a_target].dev;
5121 			acp->aac_cmd_fib = softs->aac_cmd_fib;
5122 			acp->ac_comp = aac_ld_complete;
5123 		} else {
5124 			_NOTE(ASSUMING_PROTECTED(softs->nondasds))
5125 
5126 			acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev;
5127 			acp->aac_cmd_fib = softs->aac_cmd_fib_scsi;
5128 			acp->ac_comp = aac_pd_complete;
5129 		}
5130 	} else {
5131 		acp = PKT2AC(pkt);
5132 		new_acp = NULL;
5133 	}
5134 
5135 	if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK)
5136 		return (pkt);
5137 
5138 	if (new_acp)
5139 		aac_tran_destroy_pkt(ap, pkt);
5140 	return (NULL);
5141 }
5142 
5143 /*
5144  * tran_sync_pkt(9E) - explicit DMA synchronization
5145  */
5146 /*ARGSUSED*/
5147 static void
5148 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5149 {
5150 	struct aac_cmd *acp = PKT2AC(pkt);
5151 
5152 	DBCALLED(NULL, 2);
5153 
5154 	if (aac_dma_sync_ac(acp) != AACOK)
5155 		ddi_fm_service_impact(
5156 		    (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p,
5157 		    DDI_SERVICE_UNAFFECTED);
5158 }
5159 
5160 /*
5161  * tran_dmafree(9E) - deallocate DMA resources allocated for command
5162  */
5163 /*ARGSUSED*/
5164 static void
5165 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
5166 {
5167 	struct aac_cmd *acp = PKT2AC(pkt);
5168 
5169 	DBCALLED(NULL, 2);
5170 
5171 	aac_free_dmamap(acp);
5172 }
5173 
5174 static int
5175 aac_do_quiesce(struct aac_softstate *softs)
5176 {
5177 	aac_hold_bus(softs, AAC_IOCMD_ASYNC);
5178 	if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) {
5179 		aac_start_drain(softs);
5180 		do {
5181 			if (cv_wait_sig(&softs->drain_cv,
5182 			    &softs->io_lock) == 0) {
5183 				/* Quiesce has been interrupted */
5184 				aac_stop_drain(softs);
5185 				aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5186 				aac_start_waiting_io(softs);
5187 				return (AACERR);
5188 			}
5189 		} while (softs->bus_ncmds[AAC_CMDQ_ASYNC]);
5190 		aac_stop_drain(softs);
5191 	}
5192 
5193 	softs->state |= AAC_STATE_QUIESCED;
5194 	return (AACOK);
5195 }
5196 
5197 static int
5198 aac_tran_quiesce(dev_info_t *dip)
5199 {
5200 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5201 	int rval;
5202 
5203 	DBCALLED(softs, 1);
5204 
5205 	mutex_enter(&softs->io_lock);
5206 	if (aac_do_quiesce(softs) == AACOK)
5207 		rval = 0;
5208 	else
5209 		rval = 1;
5210 	mutex_exit(&softs->io_lock);
5211 	return (rval);
5212 }
5213 
5214 static int
5215 aac_do_unquiesce(struct aac_softstate *softs)
5216 {
5217 	softs->state &= ~AAC_STATE_QUIESCED;
5218 	aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5219 
5220 	aac_start_waiting_io(softs);
5221 	return (AACOK);
5222 }
5223 
5224 static int
5225 aac_tran_unquiesce(dev_info_t *dip)
5226 {
5227 	struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5228 	int rval;
5229 
5230 	DBCALLED(softs, 1);
5231 
5232 	mutex_enter(&softs->io_lock);
5233 	if (aac_do_unquiesce(softs) == AACOK)
5234 		rval = 0;
5235 	else
5236 		rval = 1;
5237 	mutex_exit(&softs->io_lock);
5238 	return (rval);
5239 }
5240 
5241 static int
5242 aac_hba_setup(struct aac_softstate *softs)
5243 {
5244 	scsi_hba_tran_t *hba_tran;
5245 	int rval;
5246 
5247 	hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP);
5248 	if (hba_tran == NULL)
5249 		return (AACERR);
5250 	hba_tran->tran_hba_private = softs;
5251 	hba_tran->tran_tgt_init = aac_tran_tgt_init;
5252 	hba_tran->tran_tgt_free = aac_tran_tgt_free;
5253 	hba_tran->tran_tgt_probe = scsi_hba_probe;
5254 	hba_tran->tran_start = aac_tran_start;
5255 	hba_tran->tran_getcap = aac_tran_getcap;
5256 	hba_tran->tran_setcap = aac_tran_setcap;
5257 	hba_tran->tran_init_pkt = aac_tran_init_pkt;
5258 	hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt;
5259 	hba_tran->tran_reset = aac_tran_reset;
5260 	hba_tran->tran_abort = aac_tran_abort;
5261 	hba_tran->tran_sync_pkt = aac_tran_sync_pkt;
5262 	hba_tran->tran_dmafree = aac_tran_dmafree;
5263 	hba_tran->tran_quiesce = aac_tran_quiesce;
5264 	hba_tran->tran_unquiesce = aac_tran_unquiesce;
5265 	hba_tran->tran_bus_config = aac_tran_bus_config;
5266 	rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr,
5267 	    hba_tran, 0);
5268 	if (rval != DDI_SUCCESS) {
5269 		scsi_hba_tran_free(hba_tran);
5270 		AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed");
5271 		return (AACERR);
5272 	}
5273 
5274 	softs->hba_tran = hba_tran;
5275 	return (AACOK);
5276 }
5277 
5278 /*
5279  * FIB setup operations
5280  */
5281 
5282 /*
5283  * Init FIB header
5284  */
5285 static void
5286 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_slot *slotp,
5287     uint16_t cmd, uint16_t fib_size)
5288 {
5289 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5290 	struct aac_fib *fibp = slotp->fibp;
5291 	uint32_t xfer_state;
5292 
5293 	xfer_state =
5294 	    AAC_FIBSTATE_HOSTOWNED |
5295 	    AAC_FIBSTATE_INITIALISED |
5296 	    AAC_FIBSTATE_EMPTY |
5297 	    AAC_FIBSTATE_FROMHOST |
5298 	    AAC_FIBSTATE_REXPECTED |
5299 	    AAC_FIBSTATE_NORM;
5300 	if (slotp->acp && !(slotp->acp->flags & AAC_CMD_SYNC)) {
5301 		xfer_state |=
5302 		    AAC_FIBSTATE_ASYNC |
5303 		    AAC_FIBSTATE_FAST_RESPONSE /* enable fast io */;
5304 		ddi_put16(acc, &fibp->Header.SenderSize,
5305 		    softs->aac_max_fib_size);
5306 	} else {
5307 		ddi_put16(acc, &fibp->Header.SenderSize, AAC_FIB_SIZE);
5308 	}
5309 
5310 	ddi_put32(acc, &fibp->Header.XferState, xfer_state);
5311 	ddi_put16(acc, &fibp->Header.Command, cmd);
5312 	ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB);
5313 	ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */
5314 	ddi_put16(acc, &fibp->Header.Size, fib_size);
5315 	ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2));
5316 	ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5317 	ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */
5318 }
5319 
5320 /*
5321  * Init FIB for raw IO command
5322  */
5323 static void
5324 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp)
5325 {
5326 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5327 	struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0];
5328 	struct aac_sg_entryraw *sgp;
5329 	struct aac_sge *sge;
5330 
5331 	/* Calculate FIB size */
5332 	acp->fib_size = sizeof (struct aac_fib_header) + \
5333 	    sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \
5334 	    sizeof (struct aac_sg_entryraw);
5335 
5336 	aac_cmd_fib_header(softs, acp->slotp, RawIo, acp->fib_size);
5337 
5338 	ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0);
5339 	ddi_put16(acc, &io->BpTotal, 0);
5340 	ddi_put16(acc, &io->BpComplete, 0);
5341 
5342 	ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno));
5343 	ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno));
5344 	ddi_put16(acc, &io->ContainerId,
5345 	    ((struct aac_container *)acp->dvp)->cid);
5346 
5347 	/* Fill SG table */
5348 	ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien);
5349 	ddi_put32(acc, &io->ByteCount, acp->bcount);
5350 
5351 	for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0];
5352 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5353 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5354 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5355 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5356 		sgp->Next = 0;
5357 		sgp->Prev = 0;
5358 		sgp->Flags = 0;
5359 	}
5360 }
5361 
5362 /* Init FIB for 64-bit block IO command */
5363 static void
5364 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp)
5365 {
5366 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5367 	struct aac_blockread64 *br = (struct aac_blockread64 *) \
5368 	    &acp->slotp->fibp->data[0];
5369 	struct aac_sg_entry64 *sgp;
5370 	struct aac_sge *sge;
5371 
5372 	acp->fib_size = sizeof (struct aac_fib_header) + \
5373 	    sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \
5374 	    sizeof (struct aac_sg_entry64);
5375 
5376 	aac_cmd_fib_header(softs, acp->slotp, ContainerCommand64,
5377 	    acp->fib_size);
5378 
5379 	/*
5380 	 * The definitions for aac_blockread64 and aac_blockwrite64
5381 	 * are the same.
5382 	 */
5383 	ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5384 	ddi_put16(acc, &br->ContainerId,
5385 	    ((struct aac_container *)acp->dvp)->cid);
5386 	ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ?
5387 	    VM_CtHostRead64 : VM_CtHostWrite64);
5388 	ddi_put16(acc, &br->Pad, 0);
5389 	ddi_put16(acc, &br->Flags, 0);
5390 
5391 	/* Fill SG table */
5392 	ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien);
5393 	ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE);
5394 
5395 	for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0];
5396 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5397 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5398 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5399 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5400 	}
5401 }
5402 
5403 /* Init FIB for block IO command */
5404 static void
5405 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp)
5406 {
5407 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5408 	struct aac_blockread *br = (struct aac_blockread *) \
5409 	    &acp->slotp->fibp->data[0];
5410 	struct aac_sg_entry *sgp;
5411 	struct aac_sge *sge = &acp->sgt[0];
5412 
5413 	if (acp->flags & AAC_CMD_BUF_READ) {
5414 		acp->fib_size = sizeof (struct aac_fib_header) + \
5415 		    sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \
5416 		    sizeof (struct aac_sg_entry);
5417 
5418 		ddi_put32(acc, &br->Command, VM_CtBlockRead);
5419 		ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien);
5420 		sgp = &br->SgMap.SgEntry[0];
5421 	} else {
5422 		struct aac_blockwrite *bw = (struct aac_blockwrite *)br;
5423 
5424 		acp->fib_size = sizeof (struct aac_fib_header) + \
5425 		    sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \
5426 		    sizeof (struct aac_sg_entry);
5427 
5428 		ddi_put32(acc, &bw->Command, VM_CtBlockWrite);
5429 		ddi_put32(acc, &bw->Stable, CUNSTABLE);
5430 		ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien);
5431 		sgp = &bw->SgMap.SgEntry[0];
5432 	}
5433 	aac_cmd_fib_header(softs, acp->slotp, ContainerCommand, acp->fib_size);
5434 
5435 	/*
5436 	 * aac_blockread and aac_blockwrite have the similar
5437 	 * structure head, so use br for bw here
5438 	 */
5439 	ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5440 	ddi_put32(acc, &br->ContainerId,
5441 	    ((struct aac_container *)acp->dvp)->cid);
5442 	ddi_put32(acc, &br->ByteCount, acp->bcount);
5443 
5444 	/* Fill SG table */
5445 	for (sge = &acp->sgt[0];
5446 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5447 		ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5448 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5449 	}
5450 }
5451 
5452 /*ARGSUSED*/
5453 void
5454 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp)
5455 {
5456 	struct aac_slot *slotp = acp->slotp;
5457 	struct aac_fib *fibp = slotp->fibp;
5458 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5459 
5460 	ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp,
5461 	    acp->fib_size,   /* only copy data of needed length */
5462 	    DDI_DEV_AUTOINCR);
5463 	ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5464 	ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2);
5465 }
5466 
5467 static void
5468 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp)
5469 {
5470 	struct aac_slot *slotp = acp->slotp;
5471 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5472 	struct aac_synchronize_command *sync =
5473 	    (struct aac_synchronize_command *)&slotp->fibp->data[0];
5474 
5475 	acp->fib_size = sizeof (struct aac_fib_header) + \
5476 	    sizeof (struct aac_synchronize_command);
5477 
5478 	aac_cmd_fib_header(softs, slotp, ContainerCommand, acp->fib_size);
5479 	ddi_put32(acc, &sync->Command, VM_ContainerConfig);
5480 	ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE);
5481 	ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid);
5482 	ddi_put32(acc, &sync->Count,
5483 	    sizeof (((struct aac_synchronize_reply *)0)->Data));
5484 }
5485 
5486 /*
5487  * Init FIB for pass-through SCMD
5488  */
5489 static void
5490 aac_cmd_fib_srb(struct aac_cmd *acp)
5491 {
5492 	struct aac_slot *slotp = acp->slotp;
5493 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
5494 	struct aac_srb *srb = (struct aac_srb *)&slotp->fibp->data[0];
5495 	uint8_t *cdb;
5496 
5497 	ddi_put32(acc, &srb->function, SRBF_ExecuteScsi);
5498 	ddi_put32(acc, &srb->retry_limit, 0);
5499 	ddi_put32(acc, &srb->cdb_size, acp->cmdlen);
5500 	ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */
5501 	if (acp->fibp == NULL) {
5502 		if (acp->flags & AAC_CMD_BUF_READ)
5503 			ddi_put32(acc, &srb->flags, SRB_DataIn);
5504 		else if (acp->flags & AAC_CMD_BUF_WRITE)
5505 			ddi_put32(acc, &srb->flags, SRB_DataOut);
5506 		ddi_put32(acc, &srb->channel,
5507 		    ((struct aac_nondasd *)acp->dvp)->bus);
5508 		ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid);
5509 		ddi_put32(acc, &srb->lun, 0);
5510 		cdb = acp->pkt->pkt_cdbp;
5511 	} else {
5512 		struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0];
5513 
5514 		ddi_put32(acc, &srb->flags, srb0->flags);
5515 		ddi_put32(acc, &srb->channel, srb0->channel);
5516 		ddi_put32(acc, &srb->id, srb0->id);
5517 		ddi_put32(acc, &srb->lun, srb0->lun);
5518 		cdb = srb0->cdb;
5519 	}
5520 	ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR);
5521 }
5522 
5523 static void
5524 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp)
5525 {
5526 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5527 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5528 	struct aac_sg_entry *sgp;
5529 	struct aac_sge *sge;
5530 
5531 	acp->fib_size = sizeof (struct aac_fib_header) + \
5532 	    sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5533 	    acp->left_cookien * sizeof (struct aac_sg_entry);
5534 
5535 	/* Fill FIB and SRB headers, and copy cdb */
5536 	aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommand, acp->fib_size);
5537 	aac_cmd_fib_srb(acp);
5538 
5539 	/* Fill SG table */
5540 	ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5541 	ddi_put32(acc, &srb->count, acp->bcount);
5542 
5543 	for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0];
5544 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5545 		ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5546 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5547 	}
5548 }
5549 
5550 static void
5551 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp)
5552 {
5553 	ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5554 	struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5555 	struct aac_sg_entry64 *sgp;
5556 	struct aac_sge *sge;
5557 
5558 	acp->fib_size = sizeof (struct aac_fib_header) + \
5559 	    sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5560 	    acp->left_cookien * sizeof (struct aac_sg_entry64);
5561 
5562 	/* Fill FIB and SRB headers, and copy cdb */
5563 	aac_cmd_fib_header(softs, acp->slotp, ScsiPortCommandU64,
5564 	    acp->fib_size);
5565 	aac_cmd_fib_srb(acp);
5566 
5567 	/* Fill SG table */
5568 	ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5569 	ddi_put32(acc, &srb->count, acp->bcount);
5570 
5571 	for (sge = &acp->sgt[0],
5572 	    sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0];
5573 	    sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5574 		ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5575 		ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5576 		ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5577 	}
5578 }
5579 
5580 static int
5581 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5582 {
5583 	struct aac_slot *slotp;
5584 
5585 	if (slotp = aac_get_slot(softs)) {
5586 		acp->slotp = slotp;
5587 		slotp->acp = acp;
5588 		acp->aac_cmd_fib(softs, acp);
5589 		(void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0,
5590 		    DDI_DMA_SYNC_FORDEV);
5591 		return (AACOK);
5592 	}
5593 	return (AACERR);
5594 }
5595 
5596 static int
5597 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp)
5598 {
5599 	struct aac_device *dvp = acp->dvp;
5600 	int q = AAC_CMDQ(acp);
5601 
5602 	if (dvp) {
5603 		if (dvp->ncmds[q] < dvp->throttle[q]) {
5604 			if (!(acp->flags & AAC_CMD_NTAG) ||
5605 			    dvp->ncmds[q] == 0) {
5606 do_bind:
5607 				return (aac_cmd_slot_bind(softs, acp));
5608 			}
5609 			ASSERT(q == AAC_CMDQ_ASYNC);
5610 			aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC,
5611 			    AAC_THROTTLE_DRAIN);
5612 		}
5613 	} else {
5614 		if (softs->bus_ncmds[q] < softs->bus_throttle[q])
5615 			goto do_bind;
5616 	}
5617 	return (AACERR);
5618 }
5619 
5620 static void
5621 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp)
5622 {
5623 	struct aac_slot *slotp = acp->slotp;
5624 	int q = AAC_CMDQ(acp);
5625 	int rval;
5626 
5627 	/* Set ac and pkt */
5628 	if (acp->pkt) { /* ac from ioctl has no pkt */
5629 		acp->pkt->pkt_state |=
5630 		    STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
5631 	}
5632 	if (acp->timeout) /* 0 indicates no timeout */
5633 		acp->timeout += aac_timebase + aac_tick;
5634 
5635 	if (acp->dvp)
5636 		acp->dvp->ncmds[q]++;
5637 	softs->bus_ncmds[q]++;
5638 	aac_cmd_enqueue(&softs->q_busy, acp);
5639 
5640 	AACDB_PRINT_FIB(softs, slotp);
5641 
5642 	if (softs->flags & AAC_FLAGS_NEW_COMM) {
5643 		rval = aac_send_command(softs, slotp);
5644 	} else {
5645 		/*
5646 		 * If fib can not be enqueued, the adapter is in an abnormal
5647 		 * state, there will be no interrupt to us.
5648 		 */
5649 		rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q,
5650 		    slotp->fib_phyaddr, acp->fib_size);
5651 	}
5652 
5653 	if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS)
5654 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
5655 
5656 	/*
5657 	 * NOTE: We send command only when slots availabe, so should never
5658 	 * reach here.
5659 	 */
5660 	if (rval != AACOK) {
5661 		AACDB_PRINT(softs, CE_NOTE, "SCMD send failed");
5662 		if (acp->pkt) {
5663 			acp->pkt->pkt_state &= ~STATE_SENT_CMD;
5664 			aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0);
5665 		}
5666 		aac_end_io(softs, acp);
5667 		if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB)))
5668 			ddi_trigger_softintr(softs->softint_id);
5669 	}
5670 }
5671 
5672 static void
5673 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q)
5674 {
5675 	struct aac_cmd *acp, *next_acp;
5676 
5677 	/* Serve as many waiting io's as possible */
5678 	for (acp = q->q_head; acp; acp = next_acp) {
5679 		next_acp = acp->next;
5680 		if (aac_bind_io(softs, acp) == AACOK) {
5681 			aac_cmd_delete(q, acp);
5682 			aac_start_io(softs, acp);
5683 		}
5684 		if (softs->free_io_slot_head == NULL)
5685 			break;
5686 	}
5687 }
5688 
5689 static void
5690 aac_start_waiting_io(struct aac_softstate *softs)
5691 {
5692 	/*
5693 	 * Sync FIB io is served before async FIB io so that io requests
5694 	 * sent by interactive userland commands get responded asap.
5695 	 */
5696 	if (softs->q_wait[AAC_CMDQ_SYNC].q_head)
5697 		aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]);
5698 	if (softs->q_wait[AAC_CMDQ_ASYNC].q_head)
5699 		aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]);
5700 }
5701 
5702 static void
5703 aac_drain_comp_q(struct aac_softstate *softs)
5704 {
5705 	struct aac_cmd *acp;
5706 	struct scsi_pkt *pkt;
5707 
5708 	/*CONSTCOND*/
5709 	while (1) {
5710 		mutex_enter(&softs->q_comp_mutex);
5711 		acp = aac_cmd_dequeue(&softs->q_comp);
5712 		mutex_exit(&softs->q_comp_mutex);
5713 		if (acp != NULL) {
5714 			ASSERT(acp->pkt != NULL);
5715 			pkt = acp->pkt;
5716 
5717 			if (pkt->pkt_reason == CMD_CMPLT) {
5718 				/*
5719 				 * Consistent packets need to be sync'ed first
5720 				 */
5721 				if ((acp->flags & AAC_CMD_CONSISTENT) &&
5722 				    (acp->flags & AAC_CMD_BUF_READ)) {
5723 					if (aac_dma_sync_ac(acp) != AACOK) {
5724 						ddi_fm_service_impact(
5725 						    softs->devinfo_p,
5726 						    DDI_SERVICE_UNAFFECTED);
5727 						pkt->pkt_reason = CMD_TRAN_ERR;
5728 						pkt->pkt_statistics = 0;
5729 					}
5730 				}
5731 				if ((aac_check_acc_handle(softs-> \
5732 				    comm_space_acc_handle) != DDI_SUCCESS) ||
5733 				    (aac_check_acc_handle(softs-> \
5734 				    pci_mem_handle) != DDI_SUCCESS)) {
5735 					ddi_fm_service_impact(softs->devinfo_p,
5736 					    DDI_SERVICE_UNAFFECTED);
5737 					ddi_fm_acc_err_clear(softs-> \
5738 					    pci_mem_handle, DDI_FME_VER0);
5739 					pkt->pkt_reason = CMD_TRAN_ERR;
5740 					pkt->pkt_statistics = 0;
5741 				}
5742 				if (aac_check_dma_handle(softs-> \
5743 				    comm_space_dma_handle) != DDI_SUCCESS) {
5744 					ddi_fm_service_impact(softs->devinfo_p,
5745 					    DDI_SERVICE_UNAFFECTED);
5746 					pkt->pkt_reason = CMD_TRAN_ERR;
5747 					pkt->pkt_statistics = 0;
5748 				}
5749 			}
5750 			scsi_hba_pkt_comp(pkt);
5751 		} else {
5752 			break;
5753 		}
5754 	}
5755 }
5756 
5757 static int
5758 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp)
5759 {
5760 	size_t rlen;
5761 	ddi_dma_cookie_t cookie;
5762 	uint_t cookien;
5763 
5764 	/* Allocate FIB dma resource */
5765 	if (ddi_dma_alloc_handle(
5766 	    softs->devinfo_p,
5767 	    &softs->addr_dma_attr,
5768 	    DDI_DMA_SLEEP,
5769 	    NULL,
5770 	    &slotp->fib_dma_handle) != DDI_SUCCESS) {
5771 		AACDB_PRINT(softs, CE_WARN,
5772 		    "Cannot alloc dma handle for slot fib area");
5773 		goto error;
5774 	}
5775 	if (ddi_dma_mem_alloc(
5776 	    slotp->fib_dma_handle,
5777 	    softs->aac_max_fib_size,
5778 	    &softs->acc_attr,
5779 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
5780 	    DDI_DMA_SLEEP,
5781 	    NULL,
5782 	    (caddr_t *)&slotp->fibp,
5783 	    &rlen,
5784 	    &slotp->fib_acc_handle) != DDI_SUCCESS) {
5785 		AACDB_PRINT(softs, CE_WARN,
5786 		    "Cannot alloc mem for slot fib area");
5787 		goto error;
5788 	}
5789 	if (ddi_dma_addr_bind_handle(
5790 	    slotp->fib_dma_handle,
5791 	    NULL,
5792 	    (caddr_t)slotp->fibp,
5793 	    softs->aac_max_fib_size,
5794 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
5795 	    DDI_DMA_SLEEP,
5796 	    NULL,
5797 	    &cookie,
5798 	    &cookien) != DDI_DMA_MAPPED) {
5799 		AACDB_PRINT(softs, CE_WARN,
5800 		    "dma bind failed for slot fib area");
5801 		goto error;
5802 	}
5803 
5804 	/* Check dma handles allocated in fib attach */
5805 	if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) {
5806 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
5807 		goto error;
5808 	}
5809 
5810 	/* Check acc handles allocated in fib attach */
5811 	if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) {
5812 		ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
5813 		goto error;
5814 	}
5815 
5816 	slotp->fib_phyaddr = cookie.dmac_laddress;
5817 	return (AACOK);
5818 
5819 error:
5820 	if (slotp->fib_acc_handle) {
5821 		ddi_dma_mem_free(&slotp->fib_acc_handle);
5822 		slotp->fib_acc_handle = NULL;
5823 	}
5824 	if (slotp->fib_dma_handle) {
5825 		ddi_dma_free_handle(&slotp->fib_dma_handle);
5826 		slotp->fib_dma_handle = NULL;
5827 	}
5828 	return (AACERR);
5829 }
5830 
5831 static void
5832 aac_free_fib(struct aac_slot *slotp)
5833 {
5834 	(void) ddi_dma_unbind_handle(slotp->fib_dma_handle);
5835 	ddi_dma_mem_free(&slotp->fib_acc_handle);
5836 	slotp->fib_acc_handle = NULL;
5837 	ddi_dma_free_handle(&slotp->fib_dma_handle);
5838 	slotp->fib_dma_handle = NULL;
5839 	slotp->fib_phyaddr = 0;
5840 }
5841 
5842 static void
5843 aac_alloc_fibs(struct aac_softstate *softs)
5844 {
5845 	int i;
5846 	struct aac_slot *slotp;
5847 
5848 	for (i = 0; i < softs->total_slots &&
5849 	    softs->total_fibs < softs->total_slots; i++) {
5850 		slotp = &(softs->io_slot[i]);
5851 		if (slotp->fib_phyaddr)
5852 			continue;
5853 		if (aac_alloc_fib(softs, slotp) != AACOK)
5854 			break;
5855 
5856 		/* Insert the slot to the free slot list */
5857 		aac_release_slot(softs, slotp);
5858 		softs->total_fibs++;
5859 	}
5860 }
5861 
5862 static void
5863 aac_destroy_fibs(struct aac_softstate *softs)
5864 {
5865 	struct aac_slot *slotp;
5866 
5867 	while ((slotp = softs->free_io_slot_head) != NULL) {
5868 		ASSERT(slotp->fib_phyaddr);
5869 		softs->free_io_slot_head = slotp->next;
5870 		aac_free_fib(slotp);
5871 		ASSERT(slotp->index == (slotp - softs->io_slot));
5872 		softs->total_fibs--;
5873 	}
5874 	ASSERT(softs->total_fibs == 0);
5875 }
5876 
5877 static int
5878 aac_create_slots(struct aac_softstate *softs)
5879 {
5880 	int i;
5881 
5882 	softs->total_slots = softs->aac_max_fibs;
5883 	softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \
5884 	    softs->total_slots, KM_SLEEP);
5885 	if (softs->io_slot == NULL) {
5886 		AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot");
5887 		return (AACERR);
5888 	}
5889 	for (i = 0; i < softs->total_slots; i++)
5890 		softs->io_slot[i].index = i;
5891 	softs->free_io_slot_head = NULL;
5892 	softs->total_fibs = 0;
5893 	return (AACOK);
5894 }
5895 
5896 static void
5897 aac_destroy_slots(struct aac_softstate *softs)
5898 {
5899 	ASSERT(softs->free_io_slot_head == NULL);
5900 
5901 	kmem_free(softs->io_slot, sizeof (struct aac_slot) * \
5902 	    softs->total_slots);
5903 	softs->io_slot = NULL;
5904 	softs->total_slots = 0;
5905 }
5906 
5907 struct aac_slot *
5908 aac_get_slot(struct aac_softstate *softs)
5909 {
5910 	struct aac_slot *slotp;
5911 
5912 	if ((slotp = softs->free_io_slot_head) != NULL) {
5913 		softs->free_io_slot_head = slotp->next;
5914 		slotp->next = NULL;
5915 	}
5916 	return (slotp);
5917 }
5918 
5919 static void
5920 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp)
5921 {
5922 	ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots));
5923 	ASSERT(slotp == &softs->io_slot[slotp->index]);
5924 
5925 	slotp->acp = NULL;
5926 	slotp->next = softs->free_io_slot_head;
5927 	softs->free_io_slot_head = slotp;
5928 }
5929 
5930 int
5931 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp)
5932 {
5933 	if (aac_bind_io(softs, acp) == AACOK)
5934 		aac_start_io(softs, acp);
5935 	else
5936 		aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp);
5937 
5938 	if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR)))
5939 		return (TRAN_ACCEPT);
5940 	/*
5941 	 * Because sync FIB is always 512 bytes and used for critical
5942 	 * functions, async FIB is used for poll IO.
5943 	 */
5944 	if (acp->flags & AAC_CMD_NO_INTR) {
5945 		if (aac_do_poll_io(softs, acp) == AACOK)
5946 			return (TRAN_ACCEPT);
5947 	} else {
5948 		if (aac_do_sync_io(softs, acp) == AACOK)
5949 			return (TRAN_ACCEPT);
5950 	}
5951 	return (TRAN_BADPKT);
5952 }
5953 
5954 static int
5955 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp)
5956 {
5957 	int (*intr_handler)(struct aac_softstate *);
5958 
5959 	/*
5960 	 * Interrupt is disabled, we have to poll the adapter by ourselves.
5961 	 */
5962 	intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
5963 	    aac_process_intr_new : aac_process_intr_old;
5964 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) {
5965 		int i = AAC_POLL_TIME * 1000;
5966 
5967 		AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i);
5968 		if (i == 0)
5969 			aac_cmd_timeout(softs, acp);
5970 	}
5971 
5972 	ddi_trigger_softintr(softs->softint_id);
5973 
5974 	if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR))
5975 		return (AACOK);
5976 	return (AACERR);
5977 }
5978 
5979 static int
5980 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp)
5981 {
5982 	ASSERT(softs && acp);
5983 
5984 	while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT)))
5985 		cv_wait(&softs->event, &softs->io_lock);
5986 
5987 	if (acp->flags & AAC_CMD_CMPLT)
5988 		return (AACOK);
5989 	return (AACERR);
5990 }
5991 
5992 static int
5993 aac_dma_sync_ac(struct aac_cmd *acp)
5994 {
5995 	if (acp->buf_dma_handle) {
5996 		if (acp->flags & AAC_CMD_BUF_WRITE) {
5997 			if (acp->abp != NULL)
5998 				ddi_rep_put8(acp->abh,
5999 				    (uint8_t *)acp->bp->b_un.b_addr,
6000 				    (uint8_t *)acp->abp, acp->bp->b_bcount,
6001 				    DDI_DEV_AUTOINCR);
6002 			(void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6003 			    DDI_DMA_SYNC_FORDEV);
6004 		} else {
6005 			(void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6006 			    DDI_DMA_SYNC_FORCPU);
6007 			if (aac_check_dma_handle(acp->buf_dma_handle) !=
6008 			    DDI_SUCCESS)
6009 				return (AACERR);
6010 			if (acp->abp != NULL)
6011 				ddi_rep_get8(acp->abh,
6012 				    (uint8_t *)acp->bp->b_un.b_addr,
6013 				    (uint8_t *)acp->abp, acp->bp->b_bcount,
6014 				    DDI_DEV_AUTOINCR);
6015 		}
6016 	}
6017 	return (AACOK);
6018 }
6019 
6020 /*
6021  * The following function comes from Adaptec:
6022  *
6023  * When driver sees a particular event that means containers are changed, it
6024  * will rescan containers. However a change may not be complete until some
6025  * other event is received. For example, creating or deleting an array will
6026  * incur as many as six AifEnConfigChange events which would generate six
6027  * container rescans. To diminish rescans, driver set a flag to wait for
6028  * another particular event. When sees that events come in, it will do rescan.
6029  */
6030 static int
6031 aac_handle_aif(struct aac_softstate *softs, struct aac_fib *fibp)
6032 {
6033 	ddi_acc_handle_t acc = softs->comm_space_acc_handle;
6034 	uint16_t fib_command;
6035 	struct aac_aif_command *aif;
6036 	int en_type;
6037 	int devcfg_needed;
6038 	int current, next;
6039 
6040 	fib_command = LE_16(fibp->Header.Command);
6041 	if (fib_command != AifRequest) {
6042 		cmn_err(CE_NOTE, "!Unknown command from controller: 0x%x",
6043 		    fib_command);
6044 		return (AACERR);
6045 	}
6046 
6047 	/* Update internal container state */
6048 	aif = (struct aac_aif_command *)&fibp->data[0];
6049 
6050 	AACDB_PRINT_AIF(softs, aif);
6051 	devcfg_needed = 0;
6052 	en_type = LE_32((uint32_t)aif->data.EN.type);
6053 
6054 	switch (LE_32((uint32_t)aif->command)) {
6055 	case AifCmdDriverNotify: {
6056 		int cid = LE_32(aif->data.EN.data.ECC.container[0]);
6057 
6058 		switch (en_type) {
6059 		case AifDenMorphComplete:
6060 		case AifDenVolumeExtendComplete:
6061 			if (AAC_DEV_IS_VALID(&softs->containers[cid].dev))
6062 				softs->devcfg_wait_on = AifEnConfigChange;
6063 			break;
6064 		}
6065 		if (softs->devcfg_wait_on == en_type)
6066 			devcfg_needed = 1;
6067 		break;
6068 	}
6069 
6070 	case AifCmdEventNotify:
6071 		switch (en_type) {
6072 		case AifEnAddContainer:
6073 		case AifEnDeleteContainer:
6074 			softs->devcfg_wait_on = AifEnConfigChange;
6075 			break;
6076 		case AifEnContainerChange:
6077 			if (!softs->devcfg_wait_on)
6078 				softs->devcfg_wait_on = AifEnConfigChange;
6079 			break;
6080 		case AifEnContainerEvent:
6081 			if (ddi_get32(acc, &aif-> \
6082 			    data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE)
6083 				devcfg_needed = 1;
6084 			break;
6085 		}
6086 		if (softs->devcfg_wait_on == en_type)
6087 			devcfg_needed = 1;
6088 		break;
6089 
6090 	case AifCmdJobProgress:
6091 		if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) {
6092 			int pr_status;
6093 			uint32_t pr_ftick, pr_ctick;
6094 
6095 			pr_status = LE_32((uint32_t)aif->data.PR[0].status);
6096 			pr_ctick = LE_32(aif->data.PR[0].currentTick);
6097 			pr_ftick = LE_32(aif->data.PR[0].finalTick);
6098 
6099 			if ((pr_ctick == pr_ftick) ||
6100 			    (pr_status == AifJobStsSuccess))
6101 				softs->devcfg_wait_on = AifEnContainerChange;
6102 			else if ((pr_ctick == 0) &&
6103 			    (pr_status == AifJobStsRunning))
6104 				softs->devcfg_wait_on = AifEnContainerChange;
6105 		}
6106 		break;
6107 	}
6108 
6109 	if (devcfg_needed) {
6110 		softs->devcfg_wait_on = 0;
6111 		(void) aac_probe_containers(softs);
6112 	}
6113 
6114 	/* Modify AIF contexts */
6115 	current = softs->aifq_idx;
6116 	next = (current + 1) % AAC_AIFQ_LENGTH;
6117 	if (next == 0) {
6118 		struct aac_fib_context *ctx;
6119 
6120 		softs->aifq_wrap = 1;
6121 		for (ctx = softs->fibctx; ctx; ctx = ctx->next) {
6122 			if (next == ctx->ctx_idx) {
6123 				ctx->ctx_filled = 1;
6124 			} else if (current == ctx->ctx_idx && ctx->ctx_filled) {
6125 				ctx->ctx_idx = next;
6126 				AACDB_PRINT(softs, CE_NOTE,
6127 				    "-- AIF queue(%x) overrun", ctx->unique);
6128 			}
6129 		}
6130 	}
6131 	softs->aifq_idx = next;
6132 
6133 	/* Wakeup applications */
6134 	cv_broadcast(&softs->aifv);
6135 	return (AACOK);
6136 }
6137 
6138 /*
6139  * Timeout recovery
6140  */
6141 /*ARGSUSED*/
6142 static void
6143 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp)
6144 {
6145 #ifdef DEBUG
6146 	acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT;
6147 	AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp);
6148 	AACDB_PRINT_FIB(softs, acp->slotp);
6149 #endif
6150 
6151 	/*
6152 	 * Besides the firmware in unhealthy state, an overloaded
6153 	 * adapter may also incur pkt timeout.
6154 	 * There is a chance for an adapter with a slower IOP to take
6155 	 * longer than 60 seconds to process the commands, such as when
6156 	 * to perform IOs. So the adapter is doing a build on a RAID-5
6157 	 * while being required longer completion times should be
6158 	 * tolerated.
6159 	 */
6160 	switch (aac_do_reset(softs)) {
6161 	case AAC_IOP_RESET_SUCCEED:
6162 		aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET);
6163 		aac_start_waiting_io(softs);
6164 		break;
6165 	case AAC_IOP_RESET_FAILED:
6166 		/* Abort all waiting cmds when adapter is dead */
6167 		aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT);
6168 		break;
6169 	case AAC_IOP_RESET_ABNORMAL:
6170 		aac_start_waiting_io(softs);
6171 	}
6172 }
6173 
6174 /*
6175  * The following function comes from Adaptec:
6176  *
6177  * Time sync. command added to synchronize time with firmware every 30
6178  * minutes (required for correct AIF timestamps etc.)
6179  */
6180 static int
6181 aac_sync_tick(struct aac_softstate *softs)
6182 {
6183 	ddi_acc_handle_t acc = softs->sync_slot.fib_acc_handle;
6184 	struct aac_fib *fibp = softs->sync_slot.fibp;
6185 
6186 	ddi_put32(acc, (void *)&fibp->data[0], ddi_get_time());
6187 	return (aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t)));
6188 }
6189 
6190 static void
6191 aac_daemon(void *arg)
6192 {
6193 	struct aac_softstate *softs = (struct aac_softstate *)arg;
6194 	struct aac_cmd *acp;
6195 
6196 	DBCALLED(softs, 2);
6197 
6198 	mutex_enter(&softs->io_lock);
6199 	/* Check slot for timeout pkts */
6200 	aac_timebase += aac_tick;
6201 	for (acp = softs->q_busy.q_head; acp; acp = acp->next) {
6202 		if (acp->timeout) {
6203 			if (acp->timeout <= aac_timebase) {
6204 				aac_cmd_timeout(softs, acp);
6205 				ddi_trigger_softintr(softs->softint_id);
6206 			}
6207 			break;
6208 		}
6209 	}
6210 
6211 	/* Time sync. with firmware every AAC_SYNC_TICK */
6212 	if (aac_sync_time <= aac_timebase) {
6213 		aac_sync_time = aac_timebase;
6214 		if (aac_sync_tick(softs) != AACOK)
6215 			aac_sync_time += aac_tick << 1; /* retry shortly */
6216 		else
6217 			aac_sync_time += AAC_SYNC_TICK;
6218 	}
6219 
6220 	if ((softs->state & AAC_STATE_RUN) && (softs->timeout_id != 0))
6221 		softs->timeout_id = timeout(aac_daemon, (void *)softs,
6222 		    (aac_tick * drv_usectohz(1000000)));
6223 	mutex_exit(&softs->io_lock);
6224 }
6225 
6226 /*
6227  * Architecture dependent functions
6228  */
6229 static int
6230 aac_rx_get_fwstatus(struct aac_softstate *softs)
6231 {
6232 	return (PCI_MEM_GET32(softs, AAC_OMR0));
6233 }
6234 
6235 static int
6236 aac_rx_get_mailbox(struct aac_softstate *softs, int mb)
6237 {
6238 	return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4));
6239 }
6240 
6241 static void
6242 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6243     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6244 {
6245 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd);
6246 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0);
6247 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1);
6248 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2);
6249 	PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3);
6250 }
6251 
6252 static int
6253 aac_rkt_get_fwstatus(struct aac_softstate *softs)
6254 {
6255 	return (PCI_MEM_GET32(softs, AAC_OMR0));
6256 }
6257 
6258 static int
6259 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb)
6260 {
6261 	return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4));
6262 }
6263 
6264 static void
6265 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6266     uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6267 {
6268 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd);
6269 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0);
6270 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1);
6271 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2);
6272 	PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3);
6273 }
6274 
6275 /*
6276  * cb_ops functions
6277  */
6278 static int
6279 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred)
6280 {
6281 	struct aac_softstate *softs;
6282 	int minor0, minor;
6283 	int instance;
6284 
6285 	DBCALLED(NULL, 2);
6286 
6287 	if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6288 		return (EINVAL);
6289 
6290 	minor0 = getminor(*devp);
6291 	minor = AAC_SCSA_MINOR(minor0);
6292 
6293 	if (AAC_IS_SCSA_NODE(minor))
6294 		return (scsi_hba_open(devp, flag, otyp, cred));
6295 
6296 	instance = MINOR2INST(minor0);
6297 	if (instance >= AAC_MAX_ADAPTERS)
6298 		return (ENXIO);
6299 
6300 	softs = ddi_get_soft_state(aac_softstatep, instance);
6301 	if (softs == NULL)
6302 		return (ENXIO);
6303 
6304 	return (0);
6305 }
6306 
6307 /*ARGSUSED*/
6308 static int
6309 aac_close(dev_t dev, int flag, int otyp, cred_t *cred)
6310 {
6311 	int minor0, minor;
6312 	int instance;
6313 
6314 	DBCALLED(NULL, 2);
6315 
6316 	if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6317 		return (EINVAL);
6318 
6319 	minor0 = getminor(dev);
6320 	minor = AAC_SCSA_MINOR(minor0);
6321 
6322 	if (AAC_IS_SCSA_NODE(minor))
6323 		return (scsi_hba_close(dev, flag, otyp, cred));
6324 
6325 	instance = MINOR2INST(minor0);
6326 	if (instance >= AAC_MAX_ADAPTERS)
6327 		return (ENXIO);
6328 
6329 	return (0);
6330 }
6331 
6332 static int
6333 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p,
6334     int *rval_p)
6335 {
6336 	struct aac_softstate *softs;
6337 	int minor0, minor;
6338 	int instance;
6339 
6340 	DBCALLED(NULL, 2);
6341 
6342 	if (drv_priv(cred_p) != 0)
6343 		return (EPERM);
6344 
6345 	minor0 = getminor(dev);
6346 	minor = AAC_SCSA_MINOR(minor0);
6347 
6348 	if (AAC_IS_SCSA_NODE(minor))
6349 		return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p));
6350 
6351 	instance = MINOR2INST(minor0);
6352 	if (instance < AAC_MAX_ADAPTERS) {
6353 		softs = ddi_get_soft_state(aac_softstatep, instance);
6354 		return (aac_do_ioctl(softs, dev, cmd, arg, flag));
6355 	}
6356 	return (ENXIO);
6357 }
6358 
6359 /*
6360  * The IO fault service error handling callback function
6361  */
6362 /*ARGSUSED*/
6363 static int
6364 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6365 {
6366 	/*
6367 	 * as the driver can always deal with an error in any dma or
6368 	 * access handle, we can just return the fme_status value.
6369 	 */
6370 	pci_ereport_post(dip, err, NULL);
6371 	return (err->fme_status);
6372 }
6373 
6374 /*
6375  * aac_fm_init - initialize fma capabilities and register with IO
6376  *               fault services.
6377  */
6378 static void
6379 aac_fm_init(struct aac_softstate *softs)
6380 {
6381 	/*
6382 	 * Need to change iblock to priority for new MSI intr
6383 	 */
6384 	ddi_iblock_cookie_t fm_ibc;
6385 
6386 	softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p,
6387 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
6388 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
6389 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
6390 
6391 	/* Only register with IO Fault Services if we have some capability */
6392 	if (softs->fm_capabilities) {
6393 		/* Adjust access and dma attributes for FMA */
6394 		softs->acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC;
6395 		softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6396 		softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6397 
6398 		/*
6399 		 * Register capabilities with IO Fault Services.
6400 		 * fm_capabilities will be updated to indicate
6401 		 * capabilities actually supported (not requested.)
6402 		 */
6403 		ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc);
6404 
6405 		/*
6406 		 * Initialize pci ereport capabilities if ereport
6407 		 * capable (should always be.)
6408 		 */
6409 		if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6410 		    DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6411 			pci_ereport_setup(softs->devinfo_p);
6412 		}
6413 
6414 		/*
6415 		 * Register error callback if error callback capable.
6416 		 */
6417 		if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6418 			ddi_fm_handler_register(softs->devinfo_p,
6419 			    aac_fm_error_cb, (void *) softs);
6420 		}
6421 	}
6422 }
6423 
6424 /*
6425  * aac_fm_fini - Releases fma capabilities and un-registers with IO
6426  *               fault services.
6427  */
6428 static void
6429 aac_fm_fini(struct aac_softstate *softs)
6430 {
6431 	/* Only unregister FMA capabilities if registered */
6432 	if (softs->fm_capabilities) {
6433 		/*
6434 		 * Un-register error callback if error callback capable.
6435 		 */
6436 		if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6437 			ddi_fm_handler_unregister(softs->devinfo_p);
6438 		}
6439 
6440 		/*
6441 		 * Release any resources allocated by pci_ereport_setup()
6442 		 */
6443 		if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6444 		    DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6445 			pci_ereport_teardown(softs->devinfo_p);
6446 		}
6447 
6448 		/* Unregister from IO Fault Services */
6449 		ddi_fm_fini(softs->devinfo_p);
6450 
6451 		/* Adjust access and dma attributes for FMA */
6452 		softs->acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC;
6453 		softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6454 		softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6455 	}
6456 }
6457 
6458 int
6459 aac_check_acc_handle(ddi_acc_handle_t handle)
6460 {
6461 	ddi_fm_error_t de;
6462 
6463 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6464 	return (de.fme_status);
6465 }
6466 
6467 int
6468 aac_check_dma_handle(ddi_dma_handle_t handle)
6469 {
6470 	ddi_fm_error_t de;
6471 
6472 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6473 	return (de.fme_status);
6474 }
6475 
6476 void
6477 aac_fm_ereport(struct aac_softstate *softs, char *detail)
6478 {
6479 	uint64_t ena;
6480 	char buf[FM_MAX_CLASS];
6481 
6482 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6483 	ena = fm_ena_generate(0, FM_ENA_FMT1);
6484 	if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) {
6485 		ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP,
6486 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
6487 	}
6488 }
6489 
6490 /*
6491  * Autoconfiguration support
6492  */
6493 static int
6494 aac_parse_devname(char *devnm, int *tgt, int *lun)
6495 {
6496 	char devbuf[SCSI_MAXNAMELEN];
6497 	char *addr;
6498 	char *p,  *tp, *lp;
6499 	long num;
6500 
6501 	/* Parse dev name and address */
6502 	(void) strcpy(devbuf, devnm);
6503 	addr = "";
6504 	for (p = devbuf; *p != '\0'; p++) {
6505 		if (*p == '@') {
6506 			addr = p + 1;
6507 			*p = '\0';
6508 		} else if (*p == ':') {
6509 			*p = '\0';
6510 			break;
6511 		}
6512 	}
6513 
6514 	/* Parse taget and lun */
6515 	for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
6516 		if (*p == ',') {
6517 			lp = p + 1;
6518 			*p = '\0';
6519 			break;
6520 		}
6521 	}
6522 	if (tgt && tp) {
6523 		if (ddi_strtol(tp, NULL, 0x10, &num))
6524 			return (AACERR);
6525 		*tgt = (int)num;
6526 	}
6527 	if (lun && lp) {
6528 		if (ddi_strtol(lp, NULL, 0x10, &num))
6529 			return (AACERR);
6530 		*lun = (int)num;
6531 	}
6532 	return (AACOK);
6533 }
6534 
6535 static dev_info_t *
6536 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun)
6537 {
6538 	dev_info_t *child = NULL;
6539 	char addr[SCSI_MAXNAMELEN];
6540 	char tmp[MAXNAMELEN];
6541 
6542 	if (tgt < AAC_MAX_LD) {
6543 		if (lun == 0) {
6544 			struct aac_device *dvp = &softs->containers[tgt].dev;
6545 
6546 			child = dvp->dip;
6547 		}
6548 	} else {
6549 		(void) sprintf(addr, "%x,%x", tgt, lun);
6550 		for (child = ddi_get_child(softs->devinfo_p);
6551 		    child; child = ddi_get_next_sibling(child)) {
6552 			/* We don't care about non-persistent node */
6553 			if (ndi_dev_is_persistent_node(child) == 0)
6554 				continue;
6555 
6556 			if (aac_name_node(child, tmp, MAXNAMELEN) !=
6557 			    DDI_SUCCESS)
6558 				continue;
6559 			if (strcmp(addr, tmp) == 0)
6560 				break;
6561 		}
6562 	}
6563 	return (child);
6564 }
6565 
6566 static int
6567 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd,
6568     dev_info_t **dipp)
6569 {
6570 	char *nodename = NULL;
6571 	char **compatible = NULL;
6572 	int ncompatible = 0;
6573 	char *childname;
6574 	dev_info_t *ldip = NULL;
6575 	int tgt = sd->sd_address.a_target;
6576 	int lun = sd->sd_address.a_lun;
6577 	int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
6578 	int rval;
6579 
6580 	DBCALLED(softs, 2);
6581 
6582 	scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
6583 	    NULL, &nodename, &compatible, &ncompatible);
6584 	if (nodename == NULL) {
6585 		AACDB_PRINT(softs, CE_WARN,
6586 		    "found no comptible driver for t%dL%d", tgt, lun);
6587 		rval = NDI_FAILURE;
6588 		goto finish;
6589 	}
6590 	childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename;
6591 
6592 	/* Create dev node */
6593 	rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID,
6594 	    &ldip);
6595 	if (rval == NDI_SUCCESS) {
6596 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt)
6597 		    != DDI_PROP_SUCCESS) {
6598 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6599 			    "property for t%dL%d (target)", tgt, lun);
6600 			rval = NDI_FAILURE;
6601 			goto finish;
6602 		}
6603 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun)
6604 		    != DDI_PROP_SUCCESS) {
6605 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6606 			    "property for t%dL%d (lun)", tgt, lun);
6607 			rval = NDI_FAILURE;
6608 			goto finish;
6609 		}
6610 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
6611 		    "compatible", compatible, ncompatible)
6612 		    != DDI_PROP_SUCCESS) {
6613 			AACDB_PRINT(softs, CE_WARN, "unable to create "
6614 			    "property for t%dL%d (compatible)", tgt, lun);
6615 			rval = NDI_FAILURE;
6616 			goto finish;
6617 		}
6618 
6619 		rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
6620 		if (rval != NDI_SUCCESS) {
6621 			AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d",
6622 			    tgt, lun);
6623 			ndi_prop_remove_all(ldip);
6624 			(void) ndi_devi_free(ldip);
6625 		}
6626 	}
6627 finish:
6628 	if (dipp)
6629 		*dipp = ldip;
6630 
6631 	scsi_hba_nodename_compatible_free(nodename, compatible);
6632 	return (rval);
6633 }
6634 
6635 /*ARGSUSED*/
6636 static int
6637 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd)
6638 {
6639 	int tgt = sd->sd_address.a_target;
6640 	int lun = sd->sd_address.a_lun;
6641 
6642 	DBCALLED(softs, 2);
6643 
6644 	if (tgt < AAC_MAX_LD) {
6645 		int rval;
6646 
6647 		if (lun == 0) {
6648 			mutex_enter(&softs->io_lock);
6649 			rval = aac_probe_container(softs, tgt);
6650 			mutex_exit(&softs->io_lock);
6651 			if (rval == AACOK) {
6652 				if (scsi_hba_probe(sd, NULL) ==
6653 				    SCSIPROBE_EXISTS)
6654 					return (NDI_SUCCESS);
6655 			}
6656 		}
6657 		return (NDI_FAILURE);
6658 	} else {
6659 		int dtype;
6660 
6661 		if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS)
6662 			return (NDI_FAILURE);
6663 
6664 		dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
6665 
6666 		AACDB_PRINT(softs, CE_NOTE,
6667 		    "Phys. device found: tgt %d dtype %d: %s",
6668 		    tgt, dtype, sd->sd_inq->inq_vid);
6669 
6670 		/* Only non-DASD exposed */
6671 		if (dtype != DTYPE_RODIRECT /* CDROM */ &&
6672 		    dtype != DTYPE_SEQUENTIAL /* TAPE */ &&
6673 		    dtype != DTYPE_ESI /* SES */)
6674 			return (NDI_FAILURE);
6675 
6676 		AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt);
6677 		mutex_enter(&softs->io_lock);
6678 		softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID;
6679 		mutex_exit(&softs->io_lock);
6680 		return (NDI_SUCCESS);
6681 	}
6682 }
6683 
6684 static int
6685 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun,
6686     dev_info_t **ldip)
6687 {
6688 	struct scsi_device sd;
6689 	dev_info_t *child;
6690 	int rval;
6691 
6692 	DBCALLED(softs, 2);
6693 
6694 	if ((child = aac_find_child(softs, tgt, lun)) != NULL) {
6695 		if (ldip)
6696 			*ldip = child;
6697 		return (NDI_SUCCESS);
6698 	}
6699 
6700 	bzero(&sd, sizeof (struct scsi_device));
6701 	sd.sd_address.a_hba_tran = softs->hba_tran;
6702 	sd.sd_address.a_target = (uint16_t)tgt;
6703 	sd.sd_address.a_lun = (uint8_t)lun;
6704 	if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS)
6705 		rval = aac_config_child(softs, &sd, ldip);
6706 	scsi_unprobe(&sd);
6707 	return (rval);
6708 }
6709 
6710 static int
6711 aac_config_tgt(struct aac_softstate *softs, int tgt)
6712 {
6713 	struct scsi_address ap;
6714 	struct buf *bp = NULL;
6715 	int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE;
6716 	int list_len = 0;
6717 	int lun_total = 0;
6718 	dev_info_t *ldip;
6719 	int i;
6720 
6721 	ap.a_hba_tran = softs->hba_tran;
6722 	ap.a_target = (uint16_t)tgt;
6723 	ap.a_lun = 0;
6724 
6725 	for (i = 0; i < 2; i++) {
6726 		struct scsi_pkt *pkt;
6727 		uchar_t *cdb;
6728 		uchar_t *p;
6729 		uint32_t data;
6730 
6731 		if (bp == NULL) {
6732 			if ((bp = scsi_alloc_consistent_buf(&ap, NULL,
6733 			    buf_len, B_READ, NULL_FUNC, NULL)) == NULL)
6734 			return (AACERR);
6735 		}
6736 		if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5,
6737 		    sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT,
6738 		    NULL, NULL)) == NULL) {
6739 			scsi_free_consistent_buf(bp);
6740 			return (AACERR);
6741 		}
6742 		cdb = pkt->pkt_cdbp;
6743 		bzero(cdb, CDB_GROUP5);
6744 		cdb[0] = SCMD_REPORT_LUNS;
6745 
6746 		/* Convert buffer len from local to LE_32 */
6747 		data = buf_len;
6748 		for (p = &cdb[9]; p > &cdb[5]; p--) {
6749 			*p = data & 0xff;
6750 			data >>= 8;
6751 		}
6752 
6753 		if (scsi_poll(pkt) < 0 ||
6754 		    ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) {
6755 			scsi_destroy_pkt(pkt);
6756 			break;
6757 		}
6758 
6759 		/* Convert list_len from LE_32 to local */
6760 		for (p = (uchar_t *)bp->b_un.b_addr;
6761 		    p < (uchar_t *)bp->b_un.b_addr + 4; p++) {
6762 			data <<= 8;
6763 			data |= *p;
6764 		}
6765 		list_len = data;
6766 		if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) {
6767 			scsi_free_consistent_buf(bp);
6768 			bp = NULL;
6769 			buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE;
6770 		}
6771 		scsi_destroy_pkt(pkt);
6772 	}
6773 	if (i >= 2) {
6774 		uint8_t *buf = (uint8_t *)(bp->b_un.b_addr +
6775 		    AAC_SCSI_RPTLUNS_HEAD_SIZE);
6776 
6777 		for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) {
6778 			uint16_t lun;
6779 
6780 			/* Determine report luns addressing type */
6781 			switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) {
6782 			/*
6783 			 * Vendors in the field have been found to be
6784 			 * concatenating bus/target/lun to equal the
6785 			 * complete lun value instead of switching to
6786 			 * flat space addressing
6787 			 */
6788 			case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL:
6789 			case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT:
6790 			case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE:
6791 				lun = ((buf[0] & 0x3f) << 8) | buf[1];
6792 				if (lun > UINT8_MAX) {
6793 					AACDB_PRINT(softs, CE_WARN,
6794 					    "abnormal lun number: %d", lun);
6795 					break;
6796 				}
6797 				if (aac_config_lun(softs, tgt, lun, &ldip) ==
6798 				    NDI_SUCCESS)
6799 					lun_total++;
6800 				break;
6801 			}
6802 
6803 			buf += AAC_SCSI_RPTLUNS_ADDR_SIZE;
6804 		}
6805 	} else {
6806 		/* The target may do not support SCMD_REPORT_LUNS. */
6807 		if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS)
6808 			lun_total++;
6809 	}
6810 	scsi_free_consistent_buf(bp);
6811 	return (lun_total);
6812 }
6813 
6814 static void
6815 aac_devcfg(struct aac_softstate *softs, int tgt, int en)
6816 {
6817 	struct aac_device *dvp;
6818 
6819 	mutex_enter(&softs->io_lock);
6820 	dvp = AAC_DEV(softs, tgt);
6821 	if (en)
6822 		dvp->flags |= AAC_DFLAG_CONFIGURING;
6823 	else
6824 		dvp->flags &= ~AAC_DFLAG_CONFIGURING;
6825 	mutex_exit(&softs->io_lock);
6826 }
6827 
6828 static int
6829 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op,
6830     void *arg, dev_info_t **childp)
6831 {
6832 	struct aac_softstate *softs;
6833 	int circ = 0;
6834 	int rval;
6835 
6836 	if ((softs = ddi_get_soft_state(aac_softstatep,
6837 	    ddi_get_instance(parent))) == NULL)
6838 		return (NDI_FAILURE);
6839 
6840 	/* Commands for bus config should be blocked as the bus is quiesced */
6841 	mutex_enter(&softs->io_lock);
6842 	if (softs->state & AAC_STATE_QUIESCED) {
6843 		AACDB_PRINT(softs, CE_NOTE,
6844 		    "bus_config abroted because bus is quiesced");
6845 		mutex_exit(&softs->io_lock);
6846 		return (NDI_FAILURE);
6847 	}
6848 	mutex_exit(&softs->io_lock);
6849 
6850 	DBCALLED(softs, 1);
6851 
6852 	/* Hold the nexus across the bus_config */
6853 	ndi_devi_enter(parent, &circ);
6854 	switch (op) {
6855 	case BUS_CONFIG_ONE: {
6856 		int tgt, lun;
6857 
6858 		if (aac_parse_devname(arg, &tgt, &lun) != AACOK) {
6859 			rval = NDI_FAILURE;
6860 			break;
6861 		}
6862 
6863 		AAC_DEVCFG_BEGIN(softs, tgt);
6864 		rval = aac_config_lun(softs, tgt, lun, childp);
6865 		AAC_DEVCFG_END(softs, tgt);
6866 		break;
6867 	}
6868 
6869 	case BUS_CONFIG_DRIVER:
6870 	case BUS_CONFIG_ALL: {
6871 		uint32_t bus, tgt;
6872 		int index, total;
6873 
6874 		for (tgt = 0; tgt < AAC_MAX_LD; tgt++) {
6875 			AAC_DEVCFG_BEGIN(softs, tgt);
6876 			(void) aac_config_lun(softs, tgt, 0, NULL);
6877 			AAC_DEVCFG_END(softs, tgt);
6878 		}
6879 
6880 		/* Config the non-DASD devices connected to the card */
6881 		total = 0;
6882 		index = AAC_MAX_LD;
6883 		for (bus = 0; bus < softs->bus_max; bus++) {
6884 			AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus);
6885 			for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) {
6886 				AAC_DEVCFG_BEGIN(softs, index);
6887 				if (aac_config_tgt(softs, index))
6888 					total++;
6889 				AAC_DEVCFG_END(softs, index);
6890 			}
6891 		}
6892 		AACDB_PRINT(softs, CE_CONT,
6893 		    "?Total %d phys. device(s) found", total);
6894 		rval = NDI_SUCCESS;
6895 		break;
6896 	}
6897 	}
6898 
6899 	if (rval == NDI_SUCCESS)
6900 		rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
6901 	ndi_devi_exit(parent, circ);
6902 	return (rval);
6903 }
6904 
6905 static void
6906 aac_handle_dr(struct aac_drinfo *drp)
6907 {
6908 	struct aac_softstate *softs = drp->softs;
6909 	struct aac_device *dvp;
6910 	dev_info_t *dip;
6911 	int valid;
6912 	int circ1 = 0;
6913 
6914 	DBCALLED(softs, 1);
6915 
6916 	/* Hold the nexus across the bus_config */
6917 	mutex_enter(&softs->io_lock);
6918 	dvp = AAC_DEV(softs, drp->tgt);
6919 	valid = AAC_DEV_IS_VALID(dvp);
6920 	dip = dvp->dip;
6921 	mutex_exit(&softs->io_lock);
6922 
6923 	switch (drp->event) {
6924 	case AAC_EVT_ONLINE:
6925 	case AAC_EVT_OFFLINE:
6926 		/* Device onlined */
6927 		if (dip == NULL && valid) {
6928 			ndi_devi_enter(softs->devinfo_p, &circ1);
6929 			(void) aac_config_lun(softs, drp->tgt, 0, NULL);
6930 			AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined",
6931 			    softs->instance, drp->tgt, drp->lun);
6932 			ndi_devi_exit(softs->devinfo_p, circ1);
6933 		}
6934 		/* Device offlined */
6935 		if (dip && !valid) {
6936 			mutex_enter(&softs->io_lock);
6937 			(void) aac_do_reset(softs);
6938 			mutex_exit(&softs->io_lock);
6939 
6940 			(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
6941 			AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined",
6942 			    softs->instance, drp->tgt, drp->lun);
6943 		}
6944 		break;
6945 	}
6946 	kmem_free(drp, sizeof (struct aac_drinfo));
6947 }
6948 
6949 static int
6950 aac_dr_event(struct aac_softstate *softs, int tgt, int lun, int event)
6951 {
6952 	struct aac_drinfo *drp;
6953 
6954 	DBCALLED(softs, 1);
6955 
6956 	if (softs->taskq == NULL ||
6957 	    (drp = kmem_zalloc(sizeof (struct aac_drinfo), KM_NOSLEEP)) == NULL)
6958 		return (AACERR);
6959 
6960 	drp->softs = softs;
6961 	drp->tgt = tgt;
6962 	drp->lun = lun;
6963 	drp->event = event;
6964 	if ((ddi_taskq_dispatch(softs->taskq, (void (*)(void *))aac_handle_dr,
6965 	    drp, DDI_NOSLEEP)) != DDI_SUCCESS) {
6966 		AACDB_PRINT(softs, CE_WARN, "DR task start failed");
6967 		kmem_free(drp, sizeof (struct aac_drinfo));
6968 		return (AACERR);
6969 	}
6970 	return (AACOK);
6971 }
6972 
6973 #ifdef DEBUG
6974 
6975 /* -------------------------debug aid functions-------------------------- */
6976 
6977 #define	AAC_FIB_CMD_KEY_STRINGS \
6978 	TestCommandResponse, "TestCommandResponse", \
6979 	TestAdapterCommand, "TestAdapterCommand", \
6980 	LastTestCommand, "LastTestCommand", \
6981 	ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \
6982 	ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \
6983 	ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \
6984 	ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \
6985 	ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \
6986 	ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \
6987 	ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \
6988 	ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \
6989 	InterfaceShutdown, "InterfaceShutdown", \
6990 	DmaCommandFib, "DmaCommandFib", \
6991 	StartProfile, "StartProfile", \
6992 	TermProfile, "TermProfile", \
6993 	SpeedTest, "SpeedTest", \
6994 	TakeABreakPt, "TakeABreakPt", \
6995 	RequestPerfData, "RequestPerfData", \
6996 	SetInterruptDefTimer, "SetInterruptDefTimer", \
6997 	SetInterruptDefCount, "SetInterruptDefCount", \
6998 	GetInterruptDefStatus, "GetInterruptDefStatus", \
6999 	LastCommCommand, "LastCommCommand", \
7000 	NuFileSystem, "NuFileSystem", \
7001 	UFS, "UFS", \
7002 	HostFileSystem, "HostFileSystem", \
7003 	LastFileSystemCommand, "LastFileSystemCommand", \
7004 	ContainerCommand, "ContainerCommand", \
7005 	ContainerCommand64, "ContainerCommand64", \
7006 	ClusterCommand, "ClusterCommand", \
7007 	ScsiPortCommand, "ScsiPortCommand", \
7008 	ScsiPortCommandU64, "ScsiPortCommandU64", \
7009 	AifRequest, "AifRequest", \
7010 	CheckRevision, "CheckRevision", \
7011 	FsaHostShutdown, "FsaHostShutdown", \
7012 	RequestAdapterInfo, "RequestAdapterInfo", \
7013 	IsAdapterPaused, "IsAdapterPaused", \
7014 	SendHostTime, "SendHostTime", \
7015 	LastMiscCommand, "LastMiscCommand"
7016 
7017 #define	AAC_CTVM_SUBCMD_KEY_STRINGS \
7018 	VM_Null, "VM_Null", \
7019 	VM_NameServe, "VM_NameServe", \
7020 	VM_ContainerConfig, "VM_ContainerConfig", \
7021 	VM_Ioctl, "VM_Ioctl", \
7022 	VM_FilesystemIoctl, "VM_FilesystemIoctl", \
7023 	VM_CloseAll, "VM_CloseAll", \
7024 	VM_CtBlockRead, "VM_CtBlockRead", \
7025 	VM_CtBlockWrite, "VM_CtBlockWrite", \
7026 	VM_SliceBlockRead, "VM_SliceBlockRead", \
7027 	VM_SliceBlockWrite, "VM_SliceBlockWrite", \
7028 	VM_DriveBlockRead, "VM_DriveBlockRead", \
7029 	VM_DriveBlockWrite, "VM_DriveBlockWrite", \
7030 	VM_EnclosureMgt, "VM_EnclosureMgt", \
7031 	VM_Unused, "VM_Unused", \
7032 	VM_CtBlockVerify, "VM_CtBlockVerify", \
7033 	VM_CtPerf, "VM_CtPerf", \
7034 	VM_CtBlockRead64, "VM_CtBlockRead64", \
7035 	VM_CtBlockWrite64, "VM_CtBlockWrite64", \
7036 	VM_CtBlockVerify64, "VM_CtBlockVerify64", \
7037 	VM_CtHostRead64, "VM_CtHostRead64", \
7038 	VM_CtHostWrite64, "VM_CtHostWrite64", \
7039 	VM_NameServe64, "VM_NameServe64"
7040 
7041 #define	AAC_CT_SUBCMD_KEY_STRINGS \
7042 	CT_Null, "CT_Null", \
7043 	CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \
7044 	CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \
7045 	CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \
7046 	CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \
7047 	CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \
7048 	CT_WRITE_MBR, "CT_WRITE_MBR", \
7049 	CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \
7050 	CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \
7051 	CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \
7052 	CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \
7053 	CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \
7054 	CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \
7055 	CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \
7056 	CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \
7057 	CT_READ_MBR, "CT_READ_MBR", \
7058 	CT_READ_PARTITION, "CT_READ_PARTITION", \
7059 	CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \
7060 	CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \
7061 	CT_SLICE_SIZE, "CT_SLICE_SIZE", \
7062 	CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \
7063 	CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \
7064 	CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \
7065 	CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \
7066 	CT_UNMIRROR, "CT_UNMIRROR", \
7067 	CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \
7068 	CT_GEN_MIRROR, "CT_GEN_MIRROR", \
7069 	CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \
7070 	CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \
7071 	CT_MOVE2, "CT_MOVE2", \
7072 	CT_SPLIT, "CT_SPLIT", \
7073 	CT_SPLIT2, "CT_SPLIT2", \
7074 	CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \
7075 	CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \
7076 	CT_RECONFIG, "CT_RECONFIG", \
7077 	CT_BREAK2, "CT_BREAK2", \
7078 	CT_BREAK, "CT_BREAK", \
7079 	CT_MERGE2, "CT_MERGE2", \
7080 	CT_MERGE, "CT_MERGE", \
7081 	CT_FORCE_ERROR, "CT_FORCE_ERROR", \
7082 	CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \
7083 	CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \
7084 	CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \
7085 	CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \
7086 	CT_VOLUME_ADD, "CT_VOLUME_ADD", \
7087 	CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \
7088 	CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \
7089 	CT_COPY_STATUS, "CT_COPY_STATUS", \
7090 	CT_COPY, "CT_COPY", \
7091 	CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \
7092 	CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \
7093 	CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \
7094 	CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \
7095 	CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \
7096 	CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \
7097 	CT_SET, "CT_SET", \
7098 	CT_GET, "CT_GET", \
7099 	CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \
7100 	CT_GET_DELAY, "CT_GET_DELAY", \
7101 	CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \
7102 	CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \
7103 	CT_SCRUB, "CT_SCRUB", \
7104 	CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \
7105 	CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \
7106 	CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \
7107 	CT_PAUSE_IO, "CT_PAUSE_IO", \
7108 	CT_RELEASE_IO, "CT_RELEASE_IO", \
7109 	CT_SCRUB2, "CT_SCRUB2", \
7110 	CT_MCHECK, "CT_MCHECK", \
7111 	CT_CORRUPT, "CT_CORRUPT", \
7112 	CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \
7113 	CT_PROMOTE, "CT_PROMOTE", \
7114 	CT_SET_DEAD, "CT_SET_DEAD", \
7115 	CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \
7116 	CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \
7117 	CT_GET_PARAM, "CT_GET_PARAM", \
7118 	CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \
7119 	CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \
7120 	CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \
7121 	CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \
7122 	CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \
7123 	CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \
7124 	CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \
7125 	CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \
7126 	CT_STOP_DATA, "CT_STOP_DATA", \
7127 	CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \
7128 	CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \
7129 	CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \
7130 	CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \
7131 	CT_GET_TIME, "CT_GET_TIME", \
7132 	CT_READ_DATA, "CT_READ_DATA", \
7133 	CT_CTR, "CT_CTR", \
7134 	CT_CTL, "CT_CTL", \
7135 	CT_DRAINIO, "CT_DRAINIO", \
7136 	CT_RELEASEIO, "CT_RELEASEIO", \
7137 	CT_GET_NVRAM, "CT_GET_NVRAM", \
7138 	CT_GET_MEMORY, "CT_GET_MEMORY", \
7139 	CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \
7140 	CT_ADD_LEVEL, "CT_ADD_LEVEL", \
7141 	CT_NV_ZERO, "CT_NV_ZERO", \
7142 	CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \
7143 	CT_THROTTLE_ON, "CT_THROTTLE_ON", \
7144 	CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \
7145 	CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \
7146 	CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \
7147 	CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \
7148 	CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \
7149 	CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \
7150 	CT_MONITOR, "CT_MONITOR", \
7151 	CT_GEN_MORPH, "CT_GEN_MORPH", \
7152 	CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \
7153 	CT_CACHE_SET, "CT_CACHE_SET", \
7154 	CT_CACHE_STAT, "CT_CACHE_STAT", \
7155 	CT_TRACE_START, "CT_TRACE_START", \
7156 	CT_TRACE_STOP, "CT_TRACE_STOP", \
7157 	CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \
7158 	CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \
7159 	CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \
7160 	CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \
7161 	CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \
7162 	CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \
7163 	CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \
7164 	CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \
7165 	CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \
7166 	CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \
7167 	CT_STOP_DUMPS, "CT_STOP_DUMPS", \
7168 	CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \
7169 	CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \
7170 	CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \
7171 	CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \
7172 	CT_READ_NAME, "CT_READ_NAME", \
7173 	CT_WRITE_NAME, "CT_WRITE_NAME", \
7174 	CT_TOSS_CACHE, "CT_TOSS_CACHE", \
7175 	CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \
7176 	CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \
7177 	CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \
7178 	CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \
7179 	CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \
7180 	CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \
7181 	CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \
7182 	CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \
7183 	CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \
7184 	CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \
7185 	CT_FLUSH, "CT_FLUSH", \
7186 	CT_REBUILD, "CT_REBUILD", \
7187 	CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \
7188 	CT_RESTART, "CT_RESTART", \
7189 	CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \
7190 	CT_TRACE_FLAG, "CT_TRACE_FLAG", \
7191 	CT_RESTART_MORPH, "CT_RESTART_MORPH", \
7192 	CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \
7193 	CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \
7194 	CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \
7195 	CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \
7196 	CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \
7197 	CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \
7198 	CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \
7199 	CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \
7200 	CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \
7201 	CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \
7202 	CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \
7203 	CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \
7204 	CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \
7205 	CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \
7206 	CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \
7207 	CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \
7208 	CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \
7209 	CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \
7210 	CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \
7211 	CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \
7212 	CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \
7213 	CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \
7214 	CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \
7215 	CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \
7216 	CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \
7217 	CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \
7218 	CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \
7219 	CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \
7220 	CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \
7221 	CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \
7222 	CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \
7223 	CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \
7224 	CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \
7225 	CT_IS_CONTAINER_MEATADATA_STANDARD, \
7226 	    "CT_IS_CONTAINER_MEATADATA_STANDARD", \
7227 	CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \
7228 	CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \
7229 	CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \
7230 	CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \
7231 	CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \
7232 	CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \
7233 	CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \
7234 	CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \
7235 	CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \
7236 	CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \
7237 	CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \
7238 	CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \
7239 	CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \
7240 	CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \
7241 	CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \
7242 	CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \
7243 	CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \
7244 	CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \
7245 	CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE"
7246 
7247 #define	AAC_CL_SUBCMD_KEY_STRINGS \
7248 	CL_NULL, "CL_NULL", \
7249 	DS_INIT, "DS_INIT", \
7250 	DS_RESCAN, "DS_RESCAN", \
7251 	DS_CREATE, "DS_CREATE", \
7252 	DS_DELETE, "DS_DELETE", \
7253 	DS_ADD_DISK, "DS_ADD_DISK", \
7254 	DS_REMOVE_DISK, "DS_REMOVE_DISK", \
7255 	DS_MOVE_DISK, "DS_MOVE_DISK", \
7256 	DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \
7257 	DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \
7258 	DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \
7259 	DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \
7260 	DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \
7261 	DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \
7262 	DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \
7263 	DS_GET_DRIVES, "DS_GET_DRIVES", \
7264 	DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \
7265 	DS_ONLINE, "DS_ONLINE", \
7266 	DS_OFFLINE, "DS_OFFLINE", \
7267 	DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \
7268 	DS_FSAPRINT, "DS_FSAPRINT", \
7269 	CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \
7270 	CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \
7271 	CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \
7272 	CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \
7273 	CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \
7274 	CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \
7275 	CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \
7276 	CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \
7277 	CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \
7278 	CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \
7279 	CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \
7280 	CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \
7281 	CC_GET_BUSINFO, "CC_GET_BUSINFO", \
7282 	CC_GET_PORTINFO, "CC_GET_PORTINFO", \
7283 	CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \
7284 	CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \
7285 	CQ_QUORUM_OP, "CQ_QUORUM_OP"
7286 
7287 #define	AAC_AIF_SUBCMD_KEY_STRINGS \
7288 	AifCmdEventNotify, "AifCmdEventNotify", \
7289 	AifCmdJobProgress, "AifCmdJobProgress", \
7290 	AifCmdAPIReport, "AifCmdAPIReport", \
7291 	AifCmdDriverNotify, "AifCmdDriverNotify", \
7292 	AifReqJobList, "AifReqJobList", \
7293 	AifReqJobsForCtr, "AifReqJobsForCtr", \
7294 	AifReqJobsForScsi, "AifReqJobsForScsi", \
7295 	AifReqJobReport, "AifReqJobReport", \
7296 	AifReqTerminateJob, "AifReqTerminateJob", \
7297 	AifReqSuspendJob, "AifReqSuspendJob", \
7298 	AifReqResumeJob, "AifReqResumeJob", \
7299 	AifReqSendAPIReport, "AifReqSendAPIReport", \
7300 	AifReqAPIJobStart, "AifReqAPIJobStart", \
7301 	AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \
7302 	AifReqAPIJobFinish, "AifReqAPIJobFinish"
7303 
7304 #define	AAC_IOCTL_SUBCMD_KEY_STRINGS \
7305 	Reserved_IOCTL, "Reserved_IOCTL", \
7306 	GetDeviceHandle, "GetDeviceHandle", \
7307 	BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \
7308 	DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \
7309 	RescanBus, "RescanBus", \
7310 	GetDeviceProbeInfo, "GetDeviceProbeInfo", \
7311 	GetDeviceCapacity, "GetDeviceCapacity", \
7312 	GetContainerProbeInfo, "GetContainerProbeInfo", \
7313 	GetRequestedMemorySize, "GetRequestedMemorySize", \
7314 	GetBusInfo, "GetBusInfo", \
7315 	GetVendorSpecific, "GetVendorSpecific", \
7316 	EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \
7317 	EnhancedGetBusInfo, "EnhancedGetBusInfo", \
7318 	SetupExtendedCounters, "SetupExtendedCounters", \
7319 	GetPerformanceCounters, "GetPerformanceCounters", \
7320 	ResetPerformanceCounters, "ResetPerformanceCounters", \
7321 	ReadModePage, "ReadModePage", \
7322 	WriteModePage, "WriteModePage", \
7323 	ReadDriveParameter, "ReadDriveParameter", \
7324 	WriteDriveParameter, "WriteDriveParameter", \
7325 	ResetAdapter, "ResetAdapter", \
7326 	ResetBus, "ResetBus", \
7327 	ResetBusDevice, "ResetBusDevice", \
7328 	ExecuteSrb, "ExecuteSrb", \
7329 	Create_IO_Task, "Create_IO_Task", \
7330 	Delete_IO_Task, "Delete_IO_Task", \
7331 	Get_IO_Task_Info, "Get_IO_Task_Info", \
7332 	Check_Task_Progress, "Check_Task_Progress", \
7333 	InjectError, "InjectError", \
7334 	GetDeviceDefectCounts, "GetDeviceDefectCounts", \
7335 	GetDeviceDefectInfo, "GetDeviceDefectInfo", \
7336 	GetDeviceStatus, "GetDeviceStatus", \
7337 	ClearDeviceStatus, "ClearDeviceStatus", \
7338 	DiskSpinControl, "DiskSpinControl", \
7339 	DiskSmartControl, "DiskSmartControl", \
7340 	WriteSame, "WriteSame", \
7341 	ReadWriteLong, "ReadWriteLong", \
7342 	FormatUnit, "FormatUnit", \
7343 	TargetDeviceControl, "TargetDeviceControl", \
7344 	TargetChannelControl, "TargetChannelControl", \
7345 	FlashNewCode, "FlashNewCode", \
7346 	DiskCheck, "DiskCheck", \
7347 	RequestSense, "RequestSense", \
7348 	DiskPERControl, "DiskPERControl", \
7349 	Read10, "Read10", \
7350 	Write10, "Write10"
7351 
7352 #define	AAC_AIFEN_KEY_STRINGS \
7353 	AifEnGeneric, "Generic", \
7354 	AifEnTaskComplete, "TaskComplete", \
7355 	AifEnConfigChange, "Config change", \
7356 	AifEnContainerChange, "Container change", \
7357 	AifEnDeviceFailure, "device failed", \
7358 	AifEnMirrorFailover, "Mirror failover", \
7359 	AifEnContainerEvent, "container event", \
7360 	AifEnFileSystemChange, "File system changed", \
7361 	AifEnConfigPause, "Container pause event", \
7362 	AifEnConfigResume, "Container resume event", \
7363 	AifEnFailoverChange, "Failover space assignment changed", \
7364 	AifEnRAID5RebuildDone, "RAID5 rebuild finished", \
7365 	AifEnEnclosureManagement, "Enclosure management event", \
7366 	AifEnBatteryEvent, "battery event", \
7367 	AifEnAddContainer, "Add container", \
7368 	AifEnDeleteContainer, "Delete container", \
7369 	AifEnSMARTEvent, "SMART Event", \
7370 	AifEnBatteryNeedsRecond, "battery needs reconditioning", \
7371 	AifEnClusterEvent, "cluster event", \
7372 	AifEnDiskSetEvent, "disk set event occured", \
7373 	AifDenMorphComplete, "morph operation completed", \
7374 	AifDenVolumeExtendComplete, "VolumeExtendComplete"
7375 
7376 struct aac_key_strings {
7377 	int key;
7378 	char *message;
7379 };
7380 
7381 extern struct scsi_key_strings scsi_cmds[];
7382 
7383 static struct aac_key_strings aac_fib_cmds[] = {
7384 	AAC_FIB_CMD_KEY_STRINGS,
7385 	-1,			NULL
7386 };
7387 
7388 static struct aac_key_strings aac_ctvm_subcmds[] = {
7389 	AAC_CTVM_SUBCMD_KEY_STRINGS,
7390 	-1,			NULL
7391 };
7392 
7393 static struct aac_key_strings aac_ct_subcmds[] = {
7394 	AAC_CT_SUBCMD_KEY_STRINGS,
7395 	-1,			NULL
7396 };
7397 
7398 static struct aac_key_strings aac_cl_subcmds[] = {
7399 	AAC_CL_SUBCMD_KEY_STRINGS,
7400 	-1,			NULL
7401 };
7402 
7403 static struct aac_key_strings aac_aif_subcmds[] = {
7404 	AAC_AIF_SUBCMD_KEY_STRINGS,
7405 	-1,			NULL
7406 };
7407 
7408 static struct aac_key_strings aac_ioctl_subcmds[] = {
7409 	AAC_IOCTL_SUBCMD_KEY_STRINGS,
7410 	-1,			NULL
7411 };
7412 
7413 static struct aac_key_strings aac_aifens[] = {
7414 	AAC_AIFEN_KEY_STRINGS,
7415 	-1,			NULL
7416 };
7417 
7418 /*
7419  * The following function comes from Adaptec:
7420  *
7421  * Get the firmware print buffer parameters from the firmware,
7422  * if the command was successful map in the address.
7423  */
7424 static int
7425 aac_get_fw_debug_buffer(struct aac_softstate *softs)
7426 {
7427 	if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP,
7428 	    0, 0, 0, 0, NULL) == AACOK) {
7429 		uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1);
7430 		uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2);
7431 		uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3);
7432 		uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4);
7433 
7434 		if (mondrv_buf_size) {
7435 			uint32_t offset = mondrv_buf_paddrl - \
7436 			    softs->pci_mem_base_paddr;
7437 
7438 			/*
7439 			 * See if the address is already mapped in, and
7440 			 * if so set it up from the base address
7441 			 */
7442 			if ((mondrv_buf_paddrh == 0) &&
7443 			    (offset + mondrv_buf_size < softs->map_size)) {
7444 				mutex_enter(&aac_prt_mutex);
7445 				softs->debug_buf_offset = offset;
7446 				softs->debug_header_size = mondrv_hdr_size;
7447 				softs->debug_buf_size = mondrv_buf_size;
7448 				softs->debug_fw_flags = 0;
7449 				softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
7450 				mutex_exit(&aac_prt_mutex);
7451 
7452 				return (AACOK);
7453 			}
7454 		}
7455 	}
7456 	return (AACERR);
7457 }
7458 
7459 int
7460 aac_dbflag_on(struct aac_softstate *softs, int flag)
7461 {
7462 	int debug_flags = softs ? softs->debug_flags : aac_debug_flags;
7463 
7464 	return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \
7465 	    AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag));
7466 }
7467 
7468 static void
7469 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader)
7470 {
7471 	if (noheader) {
7472 		if (sl) {
7473 			aac_fmt[0] = sl;
7474 			cmn_err(lev, aac_fmt, aac_prt_buf);
7475 		} else {
7476 			cmn_err(lev, &aac_fmt[1], aac_prt_buf);
7477 		}
7478 	} else {
7479 		if (sl) {
7480 			aac_fmt_header[0] = sl;
7481 			cmn_err(lev, aac_fmt_header,
7482 			    softs->vendor_name, softs->instance,
7483 			    aac_prt_buf);
7484 		} else {
7485 			cmn_err(lev, &aac_fmt_header[1],
7486 			    softs->vendor_name, softs->instance,
7487 			    aac_prt_buf);
7488 		}
7489 	}
7490 }
7491 
7492 /*
7493  * The following function comes from Adaptec:
7494  *
7495  * Format and print out the data passed in to UART or console
7496  * as specified by debug flags.
7497  */
7498 void
7499 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...)
7500 {
7501 	va_list args;
7502 	char sl; /* system log character */
7503 
7504 	mutex_enter(&aac_prt_mutex);
7505 	/* Set up parameters and call sprintf function to format the data */
7506 	if (strchr("^!?", fmt[0]) == NULL) {
7507 		sl = 0;
7508 	} else {
7509 		sl = fmt[0];
7510 		fmt++;
7511 	}
7512 	va_start(args, fmt);
7513 	(void) vsprintf(aac_prt_buf, fmt, args);
7514 	va_end(args);
7515 
7516 	/* Make sure the softs structure has been passed in for this section */
7517 	if (softs) {
7518 		if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) &&
7519 		    /* If we are set up for a Firmware print */
7520 		    (softs->debug_buf_size)) {
7521 			uint32_t count, i;
7522 
7523 			/* Make sure the string size is within boundaries */
7524 			count = strlen(aac_prt_buf);
7525 			if (count > softs->debug_buf_size)
7526 				count = (uint16_t)softs->debug_buf_size;
7527 
7528 			/*
7529 			 * Wait for no more than AAC_PRINT_TIMEOUT for the
7530 			 * previous message length to clear (the handshake).
7531 			 */
7532 			for (i = 0; i < AAC_PRINT_TIMEOUT; i++) {
7533 				if (!PCI_MEM_GET32(softs,
7534 				    softs->debug_buf_offset + \
7535 				    AAC_FW_DBG_STRLEN_OFFSET))
7536 					break;
7537 
7538 				drv_usecwait(1000);
7539 			}
7540 
7541 			/*
7542 			 * If the length is clear, copy over the message, the
7543 			 * flags, and the length. Make sure the length is the
7544 			 * last because that is the signal for the Firmware to
7545 			 * pick it up.
7546 			 */
7547 			if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \
7548 			    AAC_FW_DBG_STRLEN_OFFSET)) {
7549 				PCI_MEM_REP_PUT8(softs,
7550 				    softs->debug_buf_offset + \
7551 				    softs->debug_header_size,
7552 				    aac_prt_buf, count);
7553 				PCI_MEM_PUT32(softs,
7554 				    softs->debug_buf_offset + \
7555 				    AAC_FW_DBG_FLAGS_OFFSET,
7556 				    softs->debug_fw_flags);
7557 				PCI_MEM_PUT32(softs,
7558 				    softs->debug_buf_offset + \
7559 				    AAC_FW_DBG_STRLEN_OFFSET, count);
7560 			} else {
7561 				cmn_err(CE_WARN, "UART output fail");
7562 				softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
7563 			}
7564 		}
7565 
7566 		/*
7567 		 * If the Kernel Debug Print flag is set, send it off
7568 		 * to the Kernel Debugger
7569 		 */
7570 		if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT)
7571 			aac_cmn_err(softs, lev, sl,
7572 			    (softs->debug_flags & AACDB_FLAGS_NO_HEADERS));
7573 	} else {
7574 		/* Driver not initialized yet, no firmware or header output */
7575 		if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT)
7576 			aac_cmn_err(softs, lev, sl, 1);
7577 	}
7578 	mutex_exit(&aac_prt_mutex);
7579 }
7580 
7581 /*
7582  * Translate command number to description string
7583  */
7584 static char *
7585 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist)
7586 {
7587 	int i;
7588 
7589 	for (i = 0; cmdlist[i].key != -1; i++) {
7590 		if (cmd == cmdlist[i].key)
7591 			return (cmdlist[i].message);
7592 	}
7593 	return (NULL);
7594 }
7595 
7596 static void
7597 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
7598 {
7599 	struct scsi_pkt *pkt = acp->pkt;
7600 	struct scsi_address *ap = &pkt->pkt_address;
7601 	int is_pd = 0;
7602 	int ctl = ddi_get_instance(softs->devinfo_p);
7603 	int tgt = ap->a_target;
7604 	int lun = ap->a_lun;
7605 	union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp;
7606 	uchar_t cmd = cdbp->scc_cmd;
7607 	char *desc;
7608 
7609 	if (tgt >= AAC_MAX_LD) {
7610 		is_pd = 1;
7611 		ctl = ((struct aac_nondasd *)acp->dvp)->bus;
7612 		tgt = ((struct aac_nondasd *)acp->dvp)->tid;
7613 		lun = 0;
7614 	}
7615 
7616 	if ((desc = aac_cmd_name(cmd,
7617 	    (struct aac_key_strings *)scsi_cmds)) == NULL) {
7618 		aac_printf(softs, CE_NOTE,
7619 		    "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s",
7620 		    cmd, ctl, tgt, lun, is_pd ? "(pd)" : "");
7621 		return;
7622 	}
7623 
7624 	switch (cmd) {
7625 	case SCMD_READ:
7626 	case SCMD_WRITE:
7627 		aac_printf(softs, CE_NOTE,
7628 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7629 		    desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp),
7630 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7631 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7632 		break;
7633 	case SCMD_READ_G1:
7634 	case SCMD_WRITE_G1:
7635 		aac_printf(softs, CE_NOTE,
7636 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7637 		    desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp),
7638 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7639 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7640 		break;
7641 	case SCMD_READ_G4:
7642 	case SCMD_WRITE_G4:
7643 		aac_printf(softs, CE_NOTE,
7644 		    "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s",
7645 		    desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp),
7646 		    GETG4COUNT(cdbp),
7647 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7648 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7649 		break;
7650 	case SCMD_READ_G5:
7651 	case SCMD_WRITE_G5:
7652 		aac_printf(softs, CE_NOTE,
7653 		    "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
7654 		    desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp),
7655 		    (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
7656 		    ctl, tgt, lun, is_pd ? "(pd)" : "");
7657 		break;
7658 	default:
7659 		aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s",
7660 		    desc, ctl, tgt, lun, is_pd ? "(pd)" : "");
7661 	}
7662 }
7663 
7664 void
7665 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp)
7666 {
7667 	struct aac_cmd *acp = slotp->acp;
7668 	struct aac_fib *fibp = slotp->fibp;
7669 	ddi_acc_handle_t acc = slotp->fib_acc_handle;
7670 	uint16_t fib_size;
7671 	uint32_t fib_cmd, sub_cmd;
7672 	char *cmdstr, *subcmdstr;
7673 	char *caller;
7674 	int i;
7675 
7676 	if (acp) {
7677 		if (!(softs->debug_fib_flags & acp->fib_flags))
7678 			return;
7679 		if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD)
7680 			caller = "SCMD";
7681 		else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL)
7682 			caller = "IOCTL";
7683 		else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB)
7684 			caller = "SRB";
7685 		else
7686 			return;
7687 	} else {
7688 		if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC))
7689 			return;
7690 		caller = "SYNC";
7691 	}
7692 
7693 	fib_cmd = ddi_get16(acc, &fibp->Header.Command);
7694 	cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds);
7695 	sub_cmd = (uint32_t)-1;
7696 	subcmdstr = NULL;
7697 
7698 	/* Print FIB header */
7699 	if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) {
7700 		aac_printf(softs, CE_NOTE, "FIB> from %s", caller);
7701 		aac_printf(softs, CE_NOTE, "     XferState  %d",
7702 		    ddi_get32(acc, &fibp->Header.XferState));
7703 		aac_printf(softs, CE_NOTE, "     Command    %d",
7704 		    ddi_get16(acc, &fibp->Header.Command));
7705 		aac_printf(softs, CE_NOTE, "     StructType %d",
7706 		    ddi_get8(acc, &fibp->Header.StructType));
7707 		aac_printf(softs, CE_NOTE, "     Flags      0x%x",
7708 		    ddi_get8(acc, &fibp->Header.Flags));
7709 		aac_printf(softs, CE_NOTE, "     Size       %d",
7710 		    ddi_get16(acc, &fibp->Header.Size));
7711 		aac_printf(softs, CE_NOTE, "     SenderSize %d",
7712 		    ddi_get16(acc, &fibp->Header.SenderSize));
7713 		aac_printf(softs, CE_NOTE, "     SenderAddr 0x%x",
7714 		    ddi_get32(acc, &fibp->Header.SenderFibAddress));
7715 		aac_printf(softs, CE_NOTE, "     RcvrAddr   0x%x",
7716 		    ddi_get32(acc, &fibp->Header.ReceiverFibAddress));
7717 		aac_printf(softs, CE_NOTE, "     SenderData 0x%x",
7718 		    ddi_get32(acc, &fibp->Header.SenderData));
7719 	}
7720 
7721 	/* Print FIB data */
7722 	switch (fib_cmd) {
7723 	case ContainerCommand:
7724 		sub_cmd = ddi_get32(acc,
7725 		    (void *)&(((uint32_t *)(void *)&fibp->data[0])[0]));
7726 		subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds);
7727 		if (subcmdstr == NULL)
7728 			break;
7729 
7730 		switch (sub_cmd) {
7731 		case VM_ContainerConfig: {
7732 			struct aac_Container *pContainer =
7733 			    (struct aac_Container *)fibp->data;
7734 
7735 			fib_cmd = sub_cmd;
7736 			cmdstr = subcmdstr;
7737 			sub_cmd = (uint32_t)-1;
7738 			subcmdstr = NULL;
7739 
7740 			sub_cmd = ddi_get32(acc,
7741 			    &pContainer->CTCommand.command);
7742 			subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds);
7743 			if (subcmdstr == NULL)
7744 				break;
7745 			aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)",
7746 			    subcmdstr,
7747 			    ddi_get32(acc, &pContainer->CTCommand.param[0]),
7748 			    ddi_get32(acc, &pContainer->CTCommand.param[1]),
7749 			    ddi_get32(acc, &pContainer->CTCommand.param[2]));
7750 			return;
7751 		}
7752 
7753 		case VM_Ioctl:
7754 			fib_cmd = sub_cmd;
7755 			cmdstr = subcmdstr;
7756 			sub_cmd = (uint32_t)-1;
7757 			subcmdstr = NULL;
7758 
7759 			sub_cmd = ddi_get32(acc,
7760 			    (void *)&(((uint32_t *)(void *)&fibp->data[0])[4]));
7761 			subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds);
7762 			break;
7763 
7764 		case VM_CtBlockRead:
7765 		case VM_CtBlockWrite: {
7766 			struct aac_blockread *br =
7767 			    (struct aac_blockread *)fibp->data;
7768 			struct aac_sg_table *sg = &br->SgMap;
7769 			uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
7770 
7771 			aac_printf(softs, CE_NOTE,
7772 			    "FIB> %s Container %d  0x%x/%d", subcmdstr,
7773 			    ddi_get32(acc, &br->ContainerId),
7774 			    ddi_get32(acc, &br->BlockNumber),
7775 			    ddi_get32(acc, &br->ByteCount));
7776 			for (i = 0; i < sgcount; i++)
7777 				aac_printf(softs, CE_NOTE,
7778 				    "     %d: 0x%08x/%d", i,
7779 				    ddi_get32(acc, &sg->SgEntry[i].SgAddress),
7780 				    ddi_get32(acc, &sg->SgEntry[i]. \
7781 				    SgByteCount));
7782 			return;
7783 		}
7784 		}
7785 		break;
7786 
7787 	case ContainerCommand64: {
7788 		struct aac_blockread64 *br =
7789 		    (struct aac_blockread64 *)fibp->data;
7790 		struct aac_sg_table64 *sg = &br->SgMap64;
7791 		uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
7792 		uint64_t sgaddr;
7793 
7794 		sub_cmd = br->Command;
7795 		subcmdstr = NULL;
7796 		if (sub_cmd == VM_CtHostRead64)
7797 			subcmdstr = "VM_CtHostRead64";
7798 		else if (sub_cmd == VM_CtHostWrite64)
7799 			subcmdstr = "VM_CtHostWrite64";
7800 		else
7801 			break;
7802 
7803 		aac_printf(softs, CE_NOTE,
7804 		    "FIB> %s Container %d  0x%x/%d", subcmdstr,
7805 		    ddi_get16(acc, &br->ContainerId),
7806 		    ddi_get32(acc, &br->BlockNumber),
7807 		    ddi_get16(acc, &br->SectorCount));
7808 		for (i = 0; i < sgcount; i++) {
7809 			sgaddr = ddi_get64(acc,
7810 			    &sg->SgEntry64[i].SgAddress);
7811 			aac_printf(softs, CE_NOTE,
7812 			    "     %d: 0x%08x.%08x/%d", i,
7813 			    AAC_MS32(sgaddr), AAC_LS32(sgaddr),
7814 			    ddi_get32(acc, &sg->SgEntry64[i]. \
7815 			    SgByteCount));
7816 		}
7817 		return;
7818 	}
7819 
7820 	case RawIo: {
7821 		struct aac_raw_io *io = (struct aac_raw_io *)fibp->data;
7822 		struct aac_sg_tableraw *sg = &io->SgMapRaw;
7823 		uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
7824 		uint64_t sgaddr;
7825 
7826 		aac_printf(softs, CE_NOTE,
7827 		    "FIB> RawIo Container %d  0x%llx/%d 0x%x",
7828 		    ddi_get16(acc, &io->ContainerId),
7829 		    ddi_get64(acc, &io->BlockNumber),
7830 		    ddi_get32(acc, &io->ByteCount),
7831 		    ddi_get16(acc, &io->Flags));
7832 		for (i = 0; i < sgcount; i++) {
7833 			sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress);
7834 			aac_printf(softs, CE_NOTE, "     %d: 0x%08x.%08x/%d", i,
7835 			    AAC_MS32(sgaddr), AAC_LS32(sgaddr),
7836 			    ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount));
7837 		}
7838 		return;
7839 	}
7840 
7841 	case ClusterCommand:
7842 		sub_cmd = ddi_get32(acc,
7843 		    (void *)&(((uint32_t *)(void *)fibp->data)[0]));
7844 		subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds);
7845 		break;
7846 
7847 	case AifRequest:
7848 		sub_cmd = ddi_get32(acc,
7849 		    (void *)&(((uint32_t *)(void *)fibp->data)[0]));
7850 		subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds);
7851 		break;
7852 
7853 	default:
7854 		break;
7855 	}
7856 
7857 	fib_size = ddi_get16(acc, &(fibp->Header.Size));
7858 	if (subcmdstr)
7859 		aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
7860 		    subcmdstr, fib_size);
7861 	else if (cmdstr && sub_cmd == (uint32_t)-1)
7862 		aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
7863 		    cmdstr, fib_size);
7864 	else if (cmdstr)
7865 		aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d",
7866 		    cmdstr, sub_cmd, fib_size);
7867 	else
7868 		aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d",
7869 		    fib_cmd, fib_size);
7870 }
7871 
7872 static void
7873 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
7874 {
7875 	int aif_command;
7876 	uint32_t aif_seqnumber;
7877 	int aif_en_type;
7878 	char *str;
7879 
7880 	aif_command = LE_32(aif->command);
7881 	aif_seqnumber = LE_32(aif->seqNumber);
7882 	aif_en_type = LE_32(aif->data.EN.type);
7883 
7884 	switch (aif_command) {
7885 	case AifCmdEventNotify:
7886 		str = aac_cmd_name(aif_en_type, aac_aifens);
7887 		if (str)
7888 			aac_printf(softs, CE_NOTE, "AIF! %s", str);
7889 		else
7890 			aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)",
7891 			    aif_en_type);
7892 		break;
7893 
7894 	case AifCmdJobProgress:
7895 		switch (LE_32(aif->data.PR[0].status)) {
7896 		case AifJobStsSuccess:
7897 			str = "success"; break;
7898 		case AifJobStsFinished:
7899 			str = "finished"; break;
7900 		case AifJobStsAborted:
7901 			str = "aborted"; break;
7902 		case AifJobStsFailed:
7903 			str = "failed"; break;
7904 		case AifJobStsSuspended:
7905 			str = "suspended"; break;
7906 		case AifJobStsRunning:
7907 			str = "running"; break;
7908 		default:
7909 			str = "unknown"; break;
7910 		}
7911 		aac_printf(softs, CE_NOTE,
7912 		    "AIF! JobProgress (%d) - %s (%d, %d)",
7913 		    aif_seqnumber, str,
7914 		    LE_32(aif->data.PR[0].currentTick),
7915 		    LE_32(aif->data.PR[0].finalTick));
7916 		break;
7917 
7918 	case AifCmdAPIReport:
7919 		aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)",
7920 		    aif_seqnumber);
7921 		break;
7922 
7923 	case AifCmdDriverNotify:
7924 		aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)",
7925 		    aif_seqnumber);
7926 		break;
7927 
7928 	default:
7929 		aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)",
7930 		    aif_command, aif_seqnumber);
7931 		break;
7932 	}
7933 }
7934 
7935 #endif /* DEBUG */
7936