xref: /illumos-gate/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c (revision 6926de2eb4241afc948ffe50db3f852871d00997)
1 /*
2  *       O.S   : Solaris
3  *  FILE NAME  : arcmsr.c
4  *       BY    : Erich Chen, C.L. Huang
5  *  Description: SCSI RAID Device Driver for
6  *               ARECA RAID Host adapter
7  *
8  *  Copyright (C) 2002,2010 Areca Technology Corporation All rights reserved.
9  *  Copyright (C) 2002,2010 Erich Chen
10  *	    Web site: www.areca.com.tw
11  *	      E-mail: erich@areca.com.tw; ching2048@areca.com.tw
12  *
13  *	Redistribution and use in source and binary forms, with or without
14  *	modification, are permitted provided that the following conditions
15  *	are met:
16  *	1. Redistributions of source code must retain the above copyright
17  *	   notice, this list of conditions and the following disclaimer.
18  *	2. Redistributions in binary form must reproduce the above copyright
19  *	   notice, this list of conditions and the following disclaimer in the
20  *	   documentation and/or other materials provided with the distribution.
21  *  3. The party using or redistributing the source code and binary forms
22  *     agrees to the disclaimer below and the terms and conditions set forth
23  *     herein.
24  *
25  *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  *  ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  *  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  *  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  *  OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  *  HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  *  OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  *  SUCH DAMAGE.
36  *
37  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
38  * Use is subject to license terms.
39  *
40  */
41 /*
42  * This file and its contents are supplied under the terms of the
43  * Common Development and Distribution License ("CDDL"), version 1.0.
44  * You may only use this file in accordance with the terms of version
45  * 1.0 of the CDDL.
46  *
47  * A full copy of the text of the CDDL should have accompanied this
48  * source.  A copy of the CDDL is also available via the Internet at
49  * http://www.illumos.org/license/CDDL.
50  */
51 /*
52  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
53  * Copyright 2023 Oxide Computer Company
54  */
55 #include <sys/types.h>
56 #include <sys/ddidmareq.h>
57 #include <sys/scsi/scsi.h>
58 #include <sys/ddi.h>
59 #include <sys/sunddi.h>
60 #include <sys/file.h>
61 #include <sys/disp.h>
62 #include <sys/signal.h>
63 #include <sys/debug.h>
64 #include <sys/pci.h>
65 #include <sys/policy.h>
66 #include <sys/atomic.h>
67 #include "arcmsr.h"
68 
69 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
70 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
71     int mode, cred_t *credp, int *rvalp);
72 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
73 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
74 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
75 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
76 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
77 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
78 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
79     int whom);
80 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
81     dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
82     struct scsi_device *sd);
83 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
84 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
85     struct scsi_pkt *pkt);
86 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
87     struct scsi_pkt *pkt);
88 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
89     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
90     int tgtlen, int flags, int (*callback)(), caddr_t arg);
91 static int arcmsr_config_child(struct ACB *acb, struct scsi_device *sd,
92     dev_info_t **dipp);
93 
94 static int arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
95     dev_info_t **ldip);
96 static uint8_t arcmsr_abort_host_command(struct ACB *acb);
97 static uint8_t arcmsr_get_echo_from_iop(struct ACB *acb);
98 static uint_t arcmsr_intr_handler(caddr_t arg, caddr_t arg2);
99 static int arcmsr_initialize(struct ACB *acb);
100 static int arcmsr_dma_alloc(struct ACB *acb,
101     struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
102 static int arcmsr_dma_move(struct ACB *acb,
103     struct scsi_pkt *pkt, struct buf *bp);
104 static void arcmsr_handle_iop_bus_hold(struct ACB *acb);
105 static void arcmsr_hbc_message_isr(struct ACB *acb);
106 static void arcmsr_pcidev_disattach(struct ACB *acb);
107 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
108 static void arcmsr_iop_init(struct ACB *acb);
109 static void arcmsr_iop_parking(struct ACB *acb);
110 /*PRINTFLIKE3*/
111 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
112 /*PRINTFLIKE2*/
113 static void arcmsr_warn(struct ACB *acb, char *fmt, ...);
114 static void arcmsr_mutex_init(struct ACB *acb);
115 static void arcmsr_remove_intr(struct ACB *acb);
116 static void arcmsr_ccbs_timeout(void* arg);
117 static void arcmsr_devMap_monitor(void* arg);
118 static void arcmsr_pcidev_disattach(struct ACB *acb);
119 static void arcmsr_iop_message_read(struct ACB *acb);
120 static void arcmsr_free_ccb(struct CCB *ccb);
121 static void arcmsr_post_ioctldata2iop(struct ACB *acb);
122 static void arcmsr_report_sense_info(struct CCB *ccb);
123 static void arcmsr_init_list_head(struct list_head *list);
124 static void arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org);
125 static void arcmsr_done4abort_postqueue(struct ACB *acb);
126 static void arcmsr_list_add_tail(kmutex_t *list_lock,
127     struct list_head *new_one, struct list_head *head);
128 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
129 static int arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt);
130 static int arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt);
131 static int arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb);
132 static int arcmsr_parse_devname(char *devnm, int *tgt, int *lun);
133 static int arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance);
134 static uint8_t arcmsr_iop_reset(struct ACB *acb);
135 static uint32_t arcmsr_disable_allintr(struct ACB *acb);
136 static uint32_t arcmsr_iop_confirm(struct ACB *acb);
137 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
138 static void arcmsr_flush_hba_cache(struct ACB *acb);
139 static void arcmsr_flush_hbb_cache(struct ACB *acb);
140 static void arcmsr_flush_hbc_cache(struct ACB *acb);
141 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
142 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
143 static void arcmsr_stop_hbc_bgrb(struct ACB *acb);
144 static void arcmsr_start_hba_bgrb(struct ACB *acb);
145 static void arcmsr_start_hbb_bgrb(struct ACB *acb);
146 static void arcmsr_start_hbc_bgrb(struct ACB *acb);
147 static void arcmsr_mutex_destroy(struct ACB *acb);
148 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
149 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
150 static void arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
151 static void arcmsr_build_ccb(struct CCB *ccb);
152 static int arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
153     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
154 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
155 static dev_info_t *arcmsr_find_child(struct ACB *acb, uint16_t tgt,
156     uint8_t lun);
157 static struct QBUFFER *arcmsr_get_iop_rqbuffer(struct ACB *acb);
158 
159 static int arcmsr_add_intr(struct ACB *, int);
160 
161 static void *arcmsr_soft_state = NULL;
162 
163 static ddi_dma_attr_t arcmsr_dma_attr = {
164 	DMA_ATTR_V0,		/* ddi_dma_attr version */
165 	0,			/* low DMA address range */
166 	0xffffffffffffffffull,	/* high DMA address range */
167 	0x00ffffff,		/* DMA counter counter upper bound */
168 	1,			/* DMA address alignment requirements */
169 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* burst sizes */
170 	1,			/* minimum effective DMA size */
171 	ARCMSR_MAX_XFER_LEN,	/* maximum DMA xfer size */
172 	/*
173 	 * The dma_attr_seg field supplies the limit of each Scatter/Gather
174 	 * list element's "address+length". The Intel IOP331 can not use
175 	 * segments over the 4G boundary due to segment boundary restrictions
176 	 */
177 	0xffffffff,
178 	ARCMSR_MAX_SG_ENTRIES,	/* scatter/gather list count */
179 	1,			/* device granularity */
180 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
181 };
182 
183 
184 static ddi_dma_attr_t arcmsr_ccb_attr = {
185 	DMA_ATTR_V0,	/* ddi_dma_attr version */
186 	0,		/* low DMA address range */
187 	0xffffffff,	/* high DMA address range */
188 	0x00ffffff,	/* DMA counter counter upper bound */
189 	1,		/* default byte alignment */
190 	DEFAULT_BURSTSIZE | BURST32 | BURST64,   /* burst sizes */
191 	1,		/* minimum effective DMA size */
192 	0xffffffff,	/* maximum DMA xfer size */
193 	0x00ffffff,	/* max segment size, segment boundary restrictions */
194 	1,		/* scatter/gather list count */
195 	1,		/* device granularity */
196 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
197 };
198 
199 
200 static struct cb_ops arcmsr_cb_ops = {
201 	scsi_hba_open,		/* open(9E) */
202 	scsi_hba_close,		/* close(9E) */
203 	nodev,			/* strategy(9E), returns ENXIO */
204 	nodev,			/* print(9E) */
205 	nodev,			/* dump(9E) Cannot be used as a dump device */
206 	nodev,			/* read(9E) */
207 	nodev,			/* write(9E) */
208 	arcmsr_cb_ioctl,	/* ioctl(9E) */
209 	nodev,			/* devmap(9E) */
210 	nodev,			/* mmap(9E) */
211 	nodev,			/* segmap(9E) */
212 	NULL,			/* chpoll(9E) returns ENXIO */
213 	nodev,			/* prop_op(9E) */
214 	NULL,			/* streamtab(9S) */
215 	D_MP,
216 	CB_REV,
217 	nodev,			/* aread(9E) */
218 	nodev			/* awrite(9E) */
219 };
220 
221 static struct dev_ops arcmsr_ops = {
222 	DEVO_REV,		/* devo_rev */
223 	0,			/* reference count */
224 	nodev,			/* getinfo */
225 	nulldev,		/* identify */
226 	nulldev,		/* probe */
227 	arcmsr_attach,		/* attach */
228 	arcmsr_detach,		/* detach */
229 	arcmsr_reset,		/* reset, shutdown, reboot notify */
230 	&arcmsr_cb_ops,		/* driver operations */
231 	NULL,			/* bus operations */
232 	NULL			/* power */
233 };
234 
235 static struct modldrv arcmsr_modldrv = {
236 	&mod_driverops,			/* Type of module. This is a driver. */
237 	"ARECA RAID Controller",	/* module name, from arcmsr.h */
238 	&arcmsr_ops,			/* driver ops */
239 };
240 
241 static struct modlinkage arcmsr_modlinkage = {
242 	MODREV_1,
243 	&arcmsr_modldrv,
244 	NULL
245 };
246 
247 
248 int
249 _init(void)
250 {
251 	int ret;
252 
253 	ret = ddi_soft_state_init(&arcmsr_soft_state, sizeof (struct ACB), 1);
254 	if (ret != 0) {
255 		return (ret);
256 	}
257 	if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
258 		ddi_soft_state_fini(&arcmsr_soft_state);
259 		return (ret);
260 	}
261 
262 	if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
263 		scsi_hba_fini(&arcmsr_modlinkage);
264 		if (arcmsr_soft_state != NULL) {
265 			ddi_soft_state_fini(&arcmsr_soft_state);
266 		}
267 	}
268 	return (ret);
269 }
270 
271 
272 int
273 _fini(void)
274 {
275 	int ret;
276 
277 	ret = mod_remove(&arcmsr_modlinkage);
278 	if (ret == 0) {
279 		/* if ret = 0 , said driver can remove */
280 		scsi_hba_fini(&arcmsr_modlinkage);
281 		if (arcmsr_soft_state != NULL) {
282 			ddi_soft_state_fini(&arcmsr_soft_state);
283 		}
284 	}
285 	return (ret);
286 }
287 
288 
289 int
290 _info(struct modinfo *modinfop)
291 {
292 	return (mod_info(&arcmsr_modlinkage, modinfop));
293 }
294 
295 
296 /*
297  *      Function: arcmsr_attach(9E)
298  *   Description: Set up all device state and allocate data structures,
299  *		  mutexes, condition variables, etc. for device operation.
300  *		  Set mt_attr property for driver to indicate MT-safety.
301  *		  Add interrupts needed.
302  *         Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
303  *        Output: Return DDI_SUCCESS if device is ready,
304  *		          else return DDI_FAILURE
305  */
306 static int
307 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd)
308 {
309 	scsi_hba_tran_t *hba_trans;
310 	struct ACB *acb;
311 
312 	switch (cmd) {
313 	case DDI_ATTACH:
314 		return (arcmsr_do_ddi_attach(dev_info,
315 		    ddi_get_instance(dev_info)));
316 	case DDI_RESUME:
317 		/*
318 		 * There is no hardware state to restart and no
319 		 * timeouts to restart since we didn't DDI_SUSPEND with
320 		 * active cmds or active timeouts We just need to
321 		 * unblock waiting threads and restart I/O the code
322 		 */
323 		hba_trans = ddi_get_driver_private(dev_info);
324 		if (hba_trans == NULL) {
325 			return (DDI_FAILURE);
326 		}
327 		acb = hba_trans->tran_hba_private;
328 		mutex_enter(&acb->acb_mutex);
329 		arcmsr_iop_init(acb);
330 
331 		/* restart ccbs "timeout" watchdog */
332 		acb->timeout_count = 0;
333 		acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
334 		    (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
335 		acb->timeout_sc_id = timeout(arcmsr_devMap_monitor,
336 		    (caddr_t)acb,
337 		    (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
338 		mutex_exit(&acb->acb_mutex);
339 		return (DDI_SUCCESS);
340 
341 	default:
342 		return (DDI_FAILURE);
343 	}
344 }
345 
346 /*
347  *    Function:	arcmsr_detach(9E)
348  * Description: Remove all device allocation and system resources, disable
349  *		        device interrupt.
350  *       Input: dev_info_t *dev_info
351  *		        ddi_detach_cmd_t cmd
352  *      Output:	Return DDI_SUCCESS if done,
353  *		        else returnDDI_FAILURE
354  */
355 static int
356 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
357 
358 	int instance;
359 	struct ACB *acb;
360 
361 
362 	instance = ddi_get_instance(dev_info);
363 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
364 	if (acb == NULL)
365 		return (DDI_FAILURE);
366 
367 	switch (cmd) {
368 	case DDI_DETACH:
369 		mutex_enter(&acb->acb_mutex);
370 		if (acb->timeout_id != 0) {
371 			mutex_exit(&acb->acb_mutex);
372 			(void) untimeout(acb->timeout_id);
373 			mutex_enter(&acb->acb_mutex);
374 			acb->timeout_id = 0;
375 		}
376 		if (acb->timeout_sc_id != 0) {
377 			mutex_exit(&acb->acb_mutex);
378 			(void) untimeout(acb->timeout_sc_id);
379 			mutex_enter(&acb->acb_mutex);
380 			acb->timeout_sc_id = 0;
381 		}
382 		arcmsr_pcidev_disattach(acb);
383 		/* Remove interrupt set up by ddi_add_intr */
384 		arcmsr_remove_intr(acb);
385 		/* unbind mapping object to handle */
386 		(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
387 		/* Free ccb pool memory */
388 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
389 		/* Free DMA handle */
390 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
391 		ddi_regs_map_free(&acb->reg_mu_acc_handle0);
392 		if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
393 			arcmsr_warn(acb, "Unable to detach instance cleanly "
394 			    "(should not happen)");
395 		/* free scsi_hba_transport from scsi_hba_tran_alloc */
396 		scsi_hba_tran_free(acb->scsi_hba_transport);
397 		ddi_taskq_destroy(acb->taskq);
398 		ddi_prop_remove_all(dev_info);
399 		mutex_exit(&acb->acb_mutex);
400 		arcmsr_mutex_destroy(acb);
401 		pci_config_teardown(&acb->pci_acc_handle);
402 		ddi_set_driver_private(dev_info, NULL);
403 		ddi_soft_state_free(arcmsr_soft_state, instance);
404 		return (DDI_SUCCESS);
405 	case DDI_SUSPEND:
406 		mutex_enter(&acb->acb_mutex);
407 		if (acb->timeout_id != 0) {
408 			acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
409 			mutex_exit(&acb->acb_mutex);
410 			(void) untimeout(acb->timeout_id);
411 			(void) untimeout(acb->timeout_sc_id);
412 			mutex_enter(&acb->acb_mutex);
413 			acb->timeout_id = 0;
414 		}
415 
416 		if (acb->timeout_sc_id != 0) {
417 			acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
418 			mutex_exit(&acb->acb_mutex);
419 			(void) untimeout(acb->timeout_sc_id);
420 			mutex_enter(&acb->acb_mutex);
421 			acb->timeout_sc_id = 0;
422 		}
423 
424 		/* disable all outbound interrupt */
425 		(void) arcmsr_disable_allintr(acb);
426 		/* stop adapter background rebuild */
427 		switch (acb->adapter_type) {
428 		case ACB_ADAPTER_TYPE_A:
429 			arcmsr_stop_hba_bgrb(acb);
430 			arcmsr_flush_hba_cache(acb);
431 			break;
432 
433 		case ACB_ADAPTER_TYPE_B:
434 			arcmsr_stop_hbb_bgrb(acb);
435 			arcmsr_flush_hbb_cache(acb);
436 			break;
437 
438 		case ACB_ADAPTER_TYPE_C:
439 			arcmsr_stop_hbc_bgrb(acb);
440 			arcmsr_flush_hbc_cache(acb);
441 			break;
442 		}
443 		mutex_exit(&acb->acb_mutex);
444 		return (DDI_SUCCESS);
445 	default:
446 		return (DDI_FAILURE);
447 	}
448 }
449 
450 static int
451 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd)
452 {
453 	struct ACB *acb;
454 	scsi_hba_tran_t *scsi_hba_transport;
455 	_NOTE(ARGUNUSED(cmd));
456 
457 	scsi_hba_transport = ddi_get_driver_private(resetdev);
458 	if (scsi_hba_transport == NULL)
459 		return (DDI_FAILURE);
460 
461 	acb = (struct ACB *)scsi_hba_transport->tran_hba_private;
462 	if (!acb)
463 		return (DDI_FAILURE);
464 
465 	arcmsr_pcidev_disattach(acb);
466 
467 	return (DDI_SUCCESS);
468 }
469 
470 static int
471 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
472     cred_t *credp, int *rvalp)
473 {
474 	struct ACB *acb;
475 	struct CMD_MESSAGE_FIELD *pktioctlfld;
476 	int retvalue = 0;
477 	int instance = MINOR2INST(getminor(dev));
478 
479 	if (instance < 0)
480 		return (ENXIO);
481 
482 	if (secpolicy_sys_config(credp, B_FALSE) != 0)
483 		return (EPERM);
484 
485 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
486 	if (acb == NULL)
487 		return (ENXIO);
488 
489 	pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD), KM_SLEEP);
490 
491 	mutex_enter(&acb->ioctl_mutex);
492 	if (ddi_copyin((void *)arg, pktioctlfld,
493 	    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
494 		retvalue = ENXIO;
495 		goto ioctl_out;
496 	}
497 
498 	if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
499 		/* validity check */
500 		retvalue = ENXIO;
501 		goto ioctl_out;
502 	}
503 
504 	switch ((unsigned int)ioctl_cmd) {
505 	case ARCMSR_MESSAGE_READ_RQBUFFER:
506 	{
507 		uint8_t *ver_addr;
508 		uint8_t *pQbuffer, *ptmpQbuffer;
509 		int32_t allxfer_len = 0;
510 
511 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
512 		ptmpQbuffer = ver_addr;
513 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
514 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
515 			/* copy READ QBUFFER to srb */
516 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
517 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
518 			acb->rqbuf_firstidx++;
519 			/* if last index number set it to 0 */
520 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
521 			ptmpQbuffer++;
522 			allxfer_len++;
523 		}
524 
525 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
526 			struct QBUFFER *prbuffer;
527 			uint8_t *pQbuffer;
528 			uint8_t *iop_data;
529 			int32_t iop_len;
530 
531 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
532 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
533 			iop_data = (uint8_t *)prbuffer->data;
534 			iop_len = (int32_t)prbuffer->data_len;
535 			/*
536 			 * this iop data does no chance to make me overflow
537 			 * again here, so just do it
538 			 */
539 			while (iop_len > 0) {
540 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
541 				(void) memcpy(pQbuffer, iop_data, 1);
542 				acb->rqbuf_lastidx++;
543 				/* if last index number set it to 0 */
544 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
545 				iop_data++;
546 				iop_len--;
547 			}
548 			/* let IOP know data has been read */
549 			arcmsr_iop_message_read(acb);
550 		}
551 		(void) memcpy(pktioctlfld->messagedatabuffer,
552 		    ver_addr, allxfer_len);
553 		pktioctlfld->cmdmessage.Length = allxfer_len;
554 		pktioctlfld->cmdmessage.ReturnCode =
555 		    ARCMSR_MESSAGE_RETURNCODE_OK;
556 
557 		if (ddi_copyout(pktioctlfld, (void *)arg,
558 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
559 			retvalue = ENXIO;
560 
561 		kmem_free(ver_addr, MSGDATABUFLEN);
562 		break;
563 	}
564 
565 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
566 	{
567 		uint8_t *ver_addr;
568 		int32_t my_empty_len, user_len;
569 		int32_t wqbuf_firstidx, wqbuf_lastidx;
570 		uint8_t *pQbuffer, *ptmpuserbuffer;
571 
572 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
573 
574 		ptmpuserbuffer = ver_addr;
575 		user_len = min(pktioctlfld->cmdmessage.Length,
576 		    MSGDATABUFLEN);
577 		(void) memcpy(ptmpuserbuffer,
578 		    pktioctlfld->messagedatabuffer, user_len);
579 		/*
580 		 * check ifdata xfer length of this request will overflow
581 		 * my array qbuffer
582 		 */
583 		wqbuf_lastidx = acb->wqbuf_lastidx;
584 		wqbuf_firstidx = acb->wqbuf_firstidx;
585 		if (wqbuf_lastidx != wqbuf_firstidx) {
586 			arcmsr_post_ioctldata2iop(acb);
587 			pktioctlfld->cmdmessage.ReturnCode =
588 			    ARCMSR_MESSAGE_RETURNCODE_ERROR;
589 		} else {
590 			my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
591 			    & (ARCMSR_MAX_QBUFFER - 1);
592 			if (my_empty_len >= user_len) {
593 				while (user_len > 0) {
594 					/* copy srb data to wqbuffer */
595 					pQbuffer =
596 					    &acb->wqbuffer[acb->wqbuf_lastidx];
597 					(void) memcpy(pQbuffer,
598 					    ptmpuserbuffer, 1);
599 					acb->wqbuf_lastidx++;
600 					/* iflast index number set it to 0 */
601 					acb->wqbuf_lastidx %=
602 					    ARCMSR_MAX_QBUFFER;
603 					ptmpuserbuffer++;
604 					user_len--;
605 				}
606 				/* post first Qbuffer */
607 				if (acb->acb_flags &
608 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
609 					acb->acb_flags &=
610 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
611 					arcmsr_post_ioctldata2iop(acb);
612 				}
613 				pktioctlfld->cmdmessage.ReturnCode =
614 				    ARCMSR_MESSAGE_RETURNCODE_OK;
615 			} else {
616 				pktioctlfld->cmdmessage.ReturnCode =
617 				    ARCMSR_MESSAGE_RETURNCODE_ERROR;
618 			}
619 		}
620 		if (ddi_copyout(pktioctlfld, (void *)arg,
621 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
622 			retvalue = ENXIO;
623 
624 		kmem_free(ver_addr, MSGDATABUFLEN);
625 		break;
626 	}
627 
628 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
629 	{
630 		uint8_t *pQbuffer = acb->rqbuffer;
631 
632 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
633 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
634 			arcmsr_iop_message_read(acb);
635 		}
636 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
637 		acb->rqbuf_firstidx = 0;
638 		acb->rqbuf_lastidx = 0;
639 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
640 		/* report success */
641 		pktioctlfld->cmdmessage.ReturnCode =
642 		    ARCMSR_MESSAGE_RETURNCODE_OK;
643 
644 		if (ddi_copyout(pktioctlfld, (void *)arg,
645 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
646 			retvalue = ENXIO;
647 		break;
648 	}
649 
650 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
651 	{
652 		uint8_t *pQbuffer = acb->wqbuffer;
653 
654 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
655 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
656 			arcmsr_iop_message_read(acb);
657 		}
658 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
659 		    ACB_F_MESSAGE_WQBUFFER_READ);
660 		acb->wqbuf_firstidx = 0;
661 		acb->wqbuf_lastidx = 0;
662 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
663 		/* report success */
664 		pktioctlfld->cmdmessage.ReturnCode =
665 		    ARCMSR_MESSAGE_RETURNCODE_OK;
666 
667 		if (ddi_copyout(pktioctlfld, (void *)arg,
668 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
669 			retvalue = ENXIO;
670 		break;
671 	}
672 
673 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
674 	{
675 		uint8_t *pQbuffer;
676 
677 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
678 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
679 			arcmsr_iop_message_read(acb);
680 		}
681 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
682 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
683 		    ACB_F_MESSAGE_WQBUFFER_READ);
684 		acb->rqbuf_firstidx = 0;
685 		acb->rqbuf_lastidx = 0;
686 		acb->wqbuf_firstidx = 0;
687 		acb->wqbuf_lastidx = 0;
688 		pQbuffer = acb->rqbuffer;
689 		bzero(pQbuffer, sizeof (struct QBUFFER));
690 		pQbuffer = acb->wqbuffer;
691 		bzero(pQbuffer, sizeof (struct QBUFFER));
692 		/* report success */
693 		pktioctlfld->cmdmessage.ReturnCode =
694 		    ARCMSR_MESSAGE_RETURNCODE_OK;
695 		if (ddi_copyout(pktioctlfld, (void *)arg,
696 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
697 			retvalue = ENXIO;
698 		break;
699 	}
700 
701 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
702 		pktioctlfld->cmdmessage.ReturnCode =
703 		    ARCMSR_MESSAGE_RETURNCODE_3F;
704 		if (ddi_copyout(pktioctlfld, (void *)arg,
705 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
706 			retvalue = ENXIO;
707 		break;
708 
709 	/* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
710 	case ARCMSR_MESSAGE_SAY_GOODBYE:
711 		arcmsr_iop_parking(acb);
712 		break;
713 
714 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
715 		switch (acb->adapter_type) {
716 		case ACB_ADAPTER_TYPE_A:
717 			arcmsr_flush_hba_cache(acb);
718 			break;
719 		case ACB_ADAPTER_TYPE_B:
720 			arcmsr_flush_hbb_cache(acb);
721 			break;
722 		case ACB_ADAPTER_TYPE_C:
723 			arcmsr_flush_hbc_cache(acb);
724 			break;
725 		}
726 		break;
727 
728 	default:
729 		mutex_exit(&acb->ioctl_mutex);
730 		kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
731 		return (scsi_hba_ioctl(dev, ioctl_cmd, arg, mode, credp,
732 		    rvalp));
733 	}
734 
735 ioctl_out:
736 	kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
737 	mutex_exit(&acb->ioctl_mutex);
738 
739 	return (retvalue);
740 }
741 
742 
743 /*
744  *    Function:	arcmsr_tran_tgt_init
745  * Description: Called when initializing a target device instance. If
746  *		        no per-target initialization is required, the HBA
747  *		        may leave tran_tgt_init to NULL
748  *       Input:
749  *		        dev_info_t *host_dev_info,
750  *		        dev_info_t *target_dev_info,
751  *		        scsi_hba_tran_t *tran,
752  *		        struct scsi_device *sd
753  *
754  *      Return: DDI_SUCCESS if success, else return DDI_FAILURE
755  *
756  *  entry point enables the HBA to allocate and/or initialize any per-
757  *  target resources.
758  *  It also enables the HBA to qualify the device's address as valid and
759  *  supportable for that particular HBA.
760  *  By returning DDI_FAILURE, the instance of the target driver for that
761  *  device will not be probed or attached.
762  * 	This entry point is not required, and if none is supplied,
763  *  the framework will attempt to probe and attach all possible instances
764  *  of the appropriate target drivers.
765  */
766 static int
767 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
768     scsi_hba_tran_t *tran, struct scsi_device *sd)
769 {
770 	uint16_t  target;
771 	uint8_t  lun;
772 	struct ACB *acb = tran->tran_hba_private;
773 
774 	_NOTE(ARGUNUSED(tran, target_dev_info, host_dev_info))
775 
776 	target = sd->sd_address.a_target;
777 	lun = sd->sd_address.a_lun;
778 	if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
779 		return (DDI_FAILURE);
780 	}
781 
782 
783 	if (ndi_dev_is_persistent_node(target_dev_info) == 0) {
784 		/*
785 		 * If no persistent node exist, we don't allow .conf node
786 		 * to be created.
787 		 */
788 		if (arcmsr_find_child(acb, target, lun) != NULL) {
789 			if ((ndi_merge_node(target_dev_info,
790 			    arcmsr_name_node) != DDI_SUCCESS)) {
791 				return (DDI_SUCCESS);
792 			}
793 		}
794 		return (DDI_FAILURE);
795 	}
796 
797 	return (DDI_SUCCESS);
798 }
799 
800 /*
801  *         Function: arcmsr_tran_getcap(9E)
802  *      Description: Get the capability named, and returnits value.
803  *    Return Values: current value of capability, ifdefined
804  *		             -1 ifcapability is not defined
805  * ------------------------------------------------------
806  *         Common Capability Strings Array
807  * ------------------------------------------------------
808  *	#define	SCSI_CAP_DMA_MAX		0
809  *	#define	SCSI_CAP_MSG_OUT		1
810  *	#define	SCSI_CAP_DISCONNECT		2
811  *	#define	SCSI_CAP_SYNCHRONOUS		3
812  *	#define	SCSI_CAP_WIDE_XFER		4
813  *	#define	SCSI_CAP_PARITY			5
814  *	#define	SCSI_CAP_INITIATOR_ID		6
815  *	#define	SCSI_CAP_UNTAGGED_QING		7
816  *	#define	SCSI_CAP_TAGGED_QING		8
817  *	#define	SCSI_CAP_ARQ			9
818  *	#define	SCSI_CAP_LINKED_CMDS		10 a
819  *	#define	SCSI_CAP_SECTOR_SIZE		11 b
820  *	#define	SCSI_CAP_TOTAL_SECTORS		12 c
821  *	#define	SCSI_CAP_GEOMETRY		13 d
822  *	#define	SCSI_CAP_RESET_NOTIFICATION	14 e
823  *	#define	SCSI_CAP_QFULL_RETRIES		15 f
824  *	#define	SCSI_CAP_QFULL_RETRY_INTERVAL	16 10
825  *	#define	SCSI_CAP_SCSI_VERSION		17 11
826  *	#define	SCSI_CAP_INTERCONNECT_TYPE	18 12
827  *	#define	SCSI_CAP_LUN_RESET		19 13
828  */
829 static int
830 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
831 {
832 	int capability = 0;
833 	struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
834 
835 	if (cap == NULL || whom == 0) {
836 		return (DDI_FAILURE);
837 	}
838 
839 	mutex_enter(&acb->acb_mutex);
840 	if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
841 		mutex_exit(&acb->acb_mutex);
842 		return (-1);
843 	}
844 	switch (scsi_hba_lookup_capstr(cap)) {
845 	case SCSI_CAP_MSG_OUT:
846 	case SCSI_CAP_DISCONNECT:
847 	case SCSI_CAP_WIDE_XFER:
848 	case SCSI_CAP_TAGGED_QING:
849 	case SCSI_CAP_UNTAGGED_QING:
850 	case SCSI_CAP_PARITY:
851 	case SCSI_CAP_ARQ:
852 		capability = 1;
853 		break;
854 	case SCSI_CAP_SECTOR_SIZE:
855 		capability = ARCMSR_DEV_SECTOR_SIZE;
856 		break;
857 	case SCSI_CAP_DMA_MAX:
858 		/* Limit to 16MB max transfer */
859 		capability = ARCMSR_MAX_XFER_LEN;
860 		break;
861 	case SCSI_CAP_INITIATOR_ID:
862 		capability = ARCMSR_SCSI_INITIATOR_ID;
863 		break;
864 	case SCSI_CAP_GEOMETRY:
865 		/* head , track , cylinder */
866 		capability = (255 << 16) | 63;
867 		break;
868 	default:
869 		capability = -1;
870 		break;
871 	}
872 	mutex_exit(&acb->acb_mutex);
873 	return (capability);
874 }
875 
876 /*
877  *      Function: arcmsr_tran_setcap(9E)
878  *   Description: Set the specific capability.
879  * Return Values: 1 - capability exists and can be set to new value
880  *		          0 - capability could not be set to new value
881  *		         -1 - no such capability
882  */
883 static int
884 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
885 {
886 	_NOTE(ARGUNUSED(value))
887 
888 	int supported = 0;
889 	struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
890 
891 	if (cap == NULL || whom == 0) {
892 		return (-1);
893 	}
894 
895 	mutex_enter(&acb->acb_mutex);
896 	if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
897 		mutex_exit(&acb->acb_mutex);
898 		return (-1);
899 	}
900 	switch (supported = scsi_hba_lookup_capstr(cap)) {
901 	case SCSI_CAP_ARQ:			/* 9 auto request sense */
902 	case SCSI_CAP_UNTAGGED_QING:   		/* 7 */
903 	case SCSI_CAP_TAGGED_QING:		/* 8 */
904 		/* these are always on, and cannot be turned off */
905 		supported = (value == 1) ? 1 : 0;
906 		break;
907 	case SCSI_CAP_TOTAL_SECTORS:		/* c */
908 		supported = 1;
909 		break;
910 	case SCSI_CAP_DISCONNECT:		/* 2 */
911 	case SCSI_CAP_WIDE_XFER:		/* 4 */
912 	case SCSI_CAP_INITIATOR_ID:		/* 6 */
913 	case SCSI_CAP_DMA_MAX:			/* 0 */
914 	case SCSI_CAP_MSG_OUT:			/* 1 */
915 	case SCSI_CAP_PARITY:			/* 5 */
916 	case SCSI_CAP_LINKED_CMDS:		/* a */
917 	case SCSI_CAP_RESET_NOTIFICATION:	/* e */
918 	case SCSI_CAP_SECTOR_SIZE:		/* b */
919 		/* these are not settable */
920 		supported = 0;
921 		break;
922 	default:
923 		supported = -1;
924 		break;
925 	}
926 	mutex_exit(&acb->acb_mutex);
927 	return (supported);
928 }
929 
930 
931 /*
932  *      Function: arcmsr_tran_init_pkt
933  * Return Values: pointer to scsi_pkt, or NULL
934  *   Description: simultaneously allocate both a scsi_pkt(9S) structure and
935  *                DMA resources for that pkt.
936  *                Called by kernel on behalf of a target driver
937  *		          calling scsi_init_pkt(9F).
938  *		          Refer to tran_init_pkt(9E) man page
939  *       Context: Can be called from different kernel process threads.
940  *		          Can be called by interrupt thread.
941  * Allocates SCSI packet and DMA resources
942  */
943 static struct
944 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
945     register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
946     int tgtlen, int flags, int (*callback)(), caddr_t arg)
947 {
948 	struct CCB *ccb;
949 	struct ARCMSR_CDB *arcmsr_cdb;
950 	struct ACB *acb;
951 	int old_pkt_flag;
952 
953 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
954 
955 	if (acb->acb_flags & ACB_F_BUS_RESET) {
956 		return (NULL);
957 	}
958 	if (pkt == NULL) {
959 		/* get free CCB */
960 		(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
961 		    DDI_DMA_SYNC_FORKERNEL);
962 		ccb = arcmsr_get_freeccb(acb);
963 		if (ccb == (struct CCB *)NULL) {
964 			return (NULL);
965 		}
966 
967 		if (statuslen < sizeof (struct scsi_arq_status)) {
968 			statuslen = sizeof (struct scsi_arq_status);
969 		}
970 		pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
971 		    statuslen, tgtlen, sizeof (void *), callback, arg);
972 		if (pkt == NULL) {
973 			arcmsr_warn(acb, "scsi pkt allocation failed");
974 			arcmsr_free_ccb(ccb);
975 			return (NULL);
976 		}
977 		/* Initialize CCB */
978 		ccb->pkt = pkt;
979 		ccb->pkt_dma_handle = NULL;
980 		/* record how many sg are needed to xfer on this pkt */
981 		ccb->pkt_ncookies = 0;
982 		/* record how many sg we got from this window */
983 		ccb->pkt_cookie = 0;
984 		/* record how many windows have partial dma map set */
985 		ccb->pkt_nwin = 0;
986 		/* record current sg window position */
987 		ccb->pkt_curwin	= 0;
988 		ccb->pkt_dma_len = 0;
989 		ccb->pkt_dma_offset = 0;
990 		ccb->resid_dmacookie.dmac_size = 0;
991 
992 		/*
993 		 * we will still use this point for we want to fake some
994 		 * information in tran_start
995 		 */
996 		ccb->bp = bp;
997 
998 		/* Initialize arcmsr_cdb */
999 		arcmsr_cdb = &ccb->arcmsr_cdb;
1000 		bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
1001 		arcmsr_cdb->Bus = 0;
1002 		arcmsr_cdb->Function = 1;
1003 		arcmsr_cdb->LUN = ap->a_lun;
1004 		arcmsr_cdb->TargetID = ap->a_target;
1005 		arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1006 		arcmsr_cdb->Context = (uintptr_t)arcmsr_cdb;
1007 
1008 		/* Fill in the rest of the structure */
1009 		pkt->pkt_ha_private = ccb;
1010 		pkt->pkt_address = *ap;
1011 		pkt->pkt_comp = NULL;
1012 		pkt->pkt_flags = 0;
1013 		pkt->pkt_time = 0;
1014 		pkt->pkt_resid = 0;
1015 		pkt->pkt_statistics = 0;
1016 		pkt->pkt_reason = 0;
1017 		old_pkt_flag = 0;
1018 	} else {
1019 		ccb = pkt->pkt_ha_private;
1020 		if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1021 			if (!(ccb->ccb_state & ARCMSR_CCB_BACK)) {
1022 				return (NULL);
1023 			}
1024 		}
1025 
1026 		/*
1027 		 * you cannot update CdbLength with cmdlen here, it would
1028 		 * cause a data compare error
1029 		 */
1030 		ccb->ccb_state = ARCMSR_CCB_UNBUILD;
1031 		old_pkt_flag = 1;
1032 	}
1033 
1034 	/* Second step : dma allocation/move */
1035 	if (bp && bp->b_bcount != 0) {
1036 		/*
1037 		 * system had a lot of data trunk need to xfer, from...20 byte
1038 		 * to 819200 byte.
1039 		 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1040 		 * this lot of data trunk xfer done this mission will be done
1041 		 * by some of continue READ or WRITE scsi command, till this
1042 		 * lot of data trunk xfer completed.
1043 		 * arcmsr_dma_move do the action repeatedly, and use the same
1044 		 * ccb till this lot of data trunk xfer complete notice.
1045 		 * when after the arcmsr_tran_init_pkt returns the solaris
1046 		 * kernel is by your pkt_resid and its b_bcount to give you
1047 		 * which type of scsi command descriptor to implement the
1048 		 * length of folowing arcmsr_tran_start scsi cdb (data length)
1049 		 *
1050 		 * Each transfer should be aligned on a 512 byte boundary
1051 		 */
1052 		if (ccb->pkt_dma_handle == NULL) {
1053 			if (arcmsr_dma_alloc(acb, pkt, bp, flags, callback) ==
1054 			    DDI_FAILURE) {
1055 				/*
1056 				 * the HBA driver is unable to allocate DMA
1057 				 * resources, it must free the allocated
1058 				 * scsi_pkt(9S) before returning
1059 				 */
1060 				arcmsr_warn(acb, "dma allocation failure");
1061 				if (old_pkt_flag == 0) {
1062 					arcmsr_warn(acb, "dma "
1063 					    "allocation failed to free "
1064 					    "scsi hba pkt");
1065 					arcmsr_free_ccb(ccb);
1066 					scsi_hba_pkt_free(ap, pkt);
1067 				}
1068 				return (NULL);
1069 			}
1070 		} else {
1071 			/* DMA resources to next DMA window, for old pkt */
1072 			if (arcmsr_dma_move(acb, pkt, bp) == DDI_FAILURE) {
1073 				arcmsr_warn(acb, "dma move failed");
1074 				return (NULL);
1075 			}
1076 		}
1077 	} else {
1078 		pkt->pkt_resid = 0;
1079 	}
1080 	return (pkt);
1081 }
1082 
1083 /*
1084  *    Function: arcmsr_tran_start(9E)
1085  * Description: Transport the command in pktp to the target device.
1086  *		The command is not finished when this returns, only
1087  *		sent to the target; arcmsr_intr_handler will call
1088  *		scsi_hba_pkt_comp(pktp) when the target device has done.
1089  *
1090  *       Input: struct scsi_address *ap, struct scsi_pkt *pktp
1091  *      Output:	TRAN_ACCEPT if pkt is OK and not driver not busy
1092  *		TRAN_BUSY if driver is
1093  *		TRAN_BADPKT if pkt is invalid
1094  */
1095 static int
1096 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1097 {
1098 	struct ACB *acb;
1099 	struct CCB *ccb;
1100 	int target = ap->a_target;
1101 	int lun = ap->a_lun;
1102 
1103 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1104 	ccb = pkt->pkt_ha_private;
1105 	*pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1106 
1107 	if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
1108 	    (ccb->ccb_flags & DDI_DMA_CONSISTENT))
1109 		(void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1110 		    DDI_DMA_SYNC_FORDEV);
1111 
1112 	if (ccb->ccb_state == ARCMSR_CCB_UNBUILD)
1113 		arcmsr_build_ccb(ccb);
1114 
1115 	if (acb->acb_flags & ACB_F_BUS_RESET) {
1116 		pkt->pkt_reason = CMD_RESET;
1117 		pkt->pkt_statistics |= STAT_BUS_RESET;
1118 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1119 		    STATE_SENT_CMD | STATE_GOT_STATUS);
1120 		if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1121 		    (pkt->pkt_state & STATE_XFERRED_DATA))
1122 			(void) ddi_dma_sync(ccb->pkt_dma_handle,
1123 			    0, 0, DDI_DMA_SYNC_FORCPU);
1124 
1125 		scsi_hba_pkt_comp(pkt);
1126 		return (TRAN_ACCEPT);
1127 	}
1128 
1129 	/* IMPORTANT: Target 16 is a virtual device for iop message transfer */
1130 	if (target == 16) {
1131 
1132 		struct buf *bp = ccb->bp;
1133 		uint8_t scsicmd = pkt->pkt_cdbp[0];
1134 
1135 		switch (scsicmd) {
1136 		case SCMD_INQUIRY: {
1137 			if (lun != 0) {
1138 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
1139 				ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1140 				arcmsr_ccb_complete(ccb, 0);
1141 				return (TRAN_ACCEPT);
1142 			}
1143 
1144 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
1145 				uint8_t inqdata[36];
1146 
1147 				/* The EVDP and pagecode is not supported */
1148 				if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
1149 					inqdata[1] = 0xFF;
1150 					inqdata[2] = 0x00;
1151 				} else {
1152 					/* Periph Qualifier & Periph Dev Type */
1153 					inqdata[0] = DTYPE_PROCESSOR;
1154 					/* rem media bit & Dev Type Modifier */
1155 					inqdata[1] = 0;
1156 					/* ISO, ECMA, & ANSI versions */
1157 					inqdata[2] = 0;
1158 					inqdata[3] = 0;
1159 					/* length of additional data */
1160 					inqdata[4] = 31;
1161 					/* Vendor Identification */
1162 					bcopy("Areca   ", &inqdata[8], VIDLEN);
1163 					/* Product Identification */
1164 					bcopy("RAID controller ", &inqdata[16],
1165 					    PIDLEN);
1166 					/* Product Revision */
1167 					bcopy(&inqdata[32], "R001", REVLEN);
1168 					if (bp->b_flags & (B_PHYS | B_PAGEIO))
1169 						bp_mapin(bp);
1170 
1171 					(void) memcpy(bp->b_un.b_addr,
1172 					    inqdata, sizeof (inqdata));
1173 				}
1174 				ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1175 			}
1176 			arcmsr_ccb_complete(ccb, 0);
1177 			return (TRAN_ACCEPT);
1178 		}
1179 		case SCMD_WRITE_BUFFER:
1180 		case SCMD_READ_BUFFER: {
1181 			if (arcmsr_iop_message_xfer(acb, pkt)) {
1182 				/* error just for retry */
1183 				ccb->pkt->pkt_reason = CMD_TRAN_ERR;
1184 				ccb->pkt->pkt_statistics |= STAT_TERMINATED;
1185 			}
1186 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1187 			arcmsr_ccb_complete(ccb, 0);
1188 			return (TRAN_ACCEPT);
1189 		}
1190 		default:
1191 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1192 			arcmsr_ccb_complete(ccb, 0);
1193 			return (TRAN_ACCEPT);
1194 		}
1195 	}
1196 
1197 	if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1198 		uint8_t block_cmd;
1199 
1200 		block_cmd = pkt->pkt_cdbp[0] & 0x0f;
1201 		if (block_cmd == 0x08 || block_cmd == 0x0a) {
1202 			pkt->pkt_reason = CMD_TIMEOUT;
1203 			pkt->pkt_statistics |= STAT_TIMEOUT;
1204 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1205 			    STATE_SENT_CMD | STATE_GOT_STATUS);
1206 			if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1207 			    (pkt->pkt_state & STATE_XFERRED_DATA)) {
1208 				(void) ddi_dma_sync(ccb->pkt_dma_handle,
1209 				    ccb->pkt_dma_offset,
1210 				    ccb->pkt_dma_len, DDI_DMA_SYNC_FORCPU);
1211 			}
1212 			scsi_hba_pkt_comp(pkt);
1213 			return (TRAN_ACCEPT);
1214 		}
1215 	}
1216 	mutex_enter(&acb->postq_mutex);
1217 	if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
1218 		ccb->ccb_state = ARCMSR_CCB_RETRY;
1219 		mutex_exit(&acb->postq_mutex);
1220 		return (TRAN_BUSY);
1221 	} else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
1222 		arcmsr_warn(acb, "post ccb failure, ccboutstandingcount = %d",
1223 		    acb->ccboutstandingcount);
1224 		mutex_exit(&acb->postq_mutex);
1225 		return (TRAN_FATAL_ERROR);
1226 	}
1227 	mutex_exit(&acb->postq_mutex);
1228 	return (TRAN_ACCEPT);
1229 }
1230 
1231 /*
1232  * Function name: arcmsr_tran_destroy_pkt
1233  * Return Values: none
1234  *   Description: Called by kernel on behalf of a target driver
1235  *	          calling scsi_destroy_pkt(9F).
1236  *	          Refer to tran_destroy_pkt(9E) man page
1237  *       Context: Can be called from different kernel process threads.
1238  *	          Can be called by interrupt thread.
1239  */
1240 static void
1241 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1242 {
1243 	struct CCB *ccb = pkt->pkt_ha_private;
1244 	ddi_dma_handle_t pkt_dma_handle = ccb->pkt_dma_handle;
1245 
1246 	if (ccb == NULL) {
1247 		return;
1248 	}
1249 	if (ccb->pkt != pkt) {
1250 		return;
1251 	}
1252 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1253 		ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1254 		if (pkt_dma_handle) {
1255 			(void) ddi_dma_unbind_handle(ccb->pkt_dma_handle);
1256 		}
1257 	}
1258 	if (pkt_dma_handle) {
1259 		(void) ddi_dma_free_handle(&pkt_dma_handle);
1260 	}
1261 	pkt->pkt_ha_private = NULL;
1262 	if (ccb)	{
1263 		if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1264 			if (ccb->ccb_state & ARCMSR_CCB_BACK) {
1265 				arcmsr_free_ccb(ccb);
1266 			} else {
1267 				ccb->ccb_state |= ARCMSR_CCB_WAIT4_FREE;
1268 			}
1269 		} else {
1270 			arcmsr_free_ccb(ccb);
1271 		}
1272 	}
1273 	scsi_hba_pkt_free(ap, pkt);
1274 }
1275 
1276 /*
1277  * Function name: arcmsr_tran_dmafree()
1278  * Return Values: none
1279  *   Description: free dvma resources
1280  *       Context: Can be called from different kernel process threads.
1281  *	          Can be called by interrupt thread.
1282  */
1283 static void
1284 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1285 {
1286 	struct CCB *ccb = pkt->pkt_ha_private;
1287 
1288 	if ((ccb == NULL) || (ccb->pkt != pkt)) {
1289 		return;
1290 	}
1291 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1292 		ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1293 		if (ddi_dma_unbind_handle(ccb->pkt_dma_handle) != DDI_SUCCESS) {
1294 			arcmsr_warn(ccb->acb, "ddi_dma_unbind_handle() failed "
1295 			    "(target %d lun %d)", ap->a_target, ap->a_lun);
1296 		}
1297 		ddi_dma_free_handle(&ccb->pkt_dma_handle);
1298 		ccb->pkt_dma_handle = NULL;
1299 	}
1300 }
1301 
1302 /*
1303  * Function name: arcmsr_tran_sync_pkt()
1304  * Return Values: none
1305  *   Description: sync dma
1306  *       Context: Can be called from different kernel process threads.
1307  *		  Can be called by interrupt thread.
1308  */
1309 static void
1310 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1311 {
1312 	struct CCB *ccb;
1313 
1314 	ccb = pkt->pkt_ha_private;
1315 	if ((ccb == NULL) || (ccb->pkt != pkt)) {
1316 		return;
1317 	}
1318 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1319 		if (ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1320 		    (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1321 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1322 		    DDI_SUCCESS) {
1323 			arcmsr_warn(ccb->acb,
1324 			    "sync pkt failed for target %d lun %d",
1325 			    ap->a_target, ap->a_lun);
1326 		}
1327 	}
1328 }
1329 
1330 
1331 /*
1332  * Function: arcmsr_tran_abort(9E)
1333  * 		SCSA interface routine to abort pkt(s) in progress.
1334  * 		Aborts the pkt specified.  If NULL pkt, aborts ALL pkts.
1335  * Output:	Return 1 if success
1336  *		Return 0 if failure
1337  */
1338 static int
1339 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt)
1340 {
1341 	struct ACB *acb;
1342 	int return_code;
1343 
1344 	acb = ap->a_hba_tran->tran_hba_private;
1345 
1346 	while (acb->ccboutstandingcount != 0) {
1347 		drv_usecwait(10000);
1348 	}
1349 
1350 	mutex_enter(&acb->isr_mutex);
1351 	return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
1352 	mutex_exit(&acb->isr_mutex);
1353 
1354 	if (return_code != DDI_SUCCESS) {
1355 		arcmsr_warn(acb, "abort command failed for target %d lun %d",
1356 		    ap->a_target, ap->a_lun);
1357 		return (0);
1358 	}
1359 	return (1);
1360 }
1361 
1362 /*
1363  * Function: arcmsr_tran_reset(9E)
1364  *           SCSA interface routine to perform scsi resets on either
1365  *           a specified target or the bus (default).
1366  *   Output: Return 1 if success
1367  *	     Return 0 if failure
1368  */
1369 static int
1370 arcmsr_tran_reset(struct scsi_address *ap, int level) {
1371 
1372 	struct ACB *acb;
1373 	int return_code = 1;
1374 	int target = ap->a_target;
1375 	int lun = ap->a_lun;
1376 
1377 	/* Are we in the middle of dumping core? */
1378 	if (ddi_in_panic())
1379 		return (return_code);
1380 
1381 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1382 	mutex_enter(&acb->isr_mutex);
1383 	switch (level) {
1384 	case RESET_ALL:		/* 0 */
1385 		acb->num_resets++;
1386 		acb->acb_flags |= ACB_F_BUS_RESET;
1387 		if (acb->timeout_count) {
1388 			if (arcmsr_iop_reset(acb) != 0) {
1389 				arcmsr_handle_iop_bus_hold(acb);
1390 				acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1391 			}
1392 		}
1393 		acb->acb_flags &= ~ACB_F_BUS_RESET;
1394 		break;
1395 	case RESET_TARGET:	/* 1 */
1396 		if (acb->devstate[target][lun] == ARECA_RAID_GONE)
1397 			return_code = 0;
1398 		break;
1399 	case RESET_BUS:		/* 2 */
1400 		return_code = 0;
1401 		break;
1402 	case RESET_LUN:		/* 3 */
1403 		return_code = 0;
1404 		break;
1405 	default:
1406 		return_code = 0;
1407 	}
1408 	mutex_exit(&acb->isr_mutex);
1409 	return (return_code);
1410 }
1411 
1412 static int
1413 arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
1414     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1415 {
1416 	struct ACB *acb;
1417 	int rval;
1418 	int tgt, lun;
1419 
1420 	if ((acb = ddi_get_soft_state(arcmsr_soft_state,
1421 	    ddi_get_instance(parent))) == NULL)
1422 		return (NDI_FAILURE);
1423 
1424 	ndi_devi_enter(parent);
1425 	switch (op) {
1426 	case BUS_CONFIG_ONE:
1427 		if (arcmsr_parse_devname(arg, &tgt, &lun) != 0) {
1428 			rval = NDI_FAILURE;
1429 			break;
1430 		}
1431 		if (acb->device_map[tgt] & 1 << lun) {
1432 			acb->devstate[tgt][lun] = ARECA_RAID_GOOD;
1433 			rval = arcmsr_config_lun(acb, tgt, lun, childp);
1434 		}
1435 		break;
1436 
1437 	case BUS_CONFIG_DRIVER:
1438 	case BUS_CONFIG_ALL:
1439 		for (tgt = 0; tgt < ARCMSR_MAX_TARGETID; tgt++)
1440 			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1441 				if (acb->device_map[tgt] & 1 << lun) {
1442 					acb->devstate[tgt][lun] =
1443 					    ARECA_RAID_GOOD;
1444 					(void) arcmsr_config_lun(acb, tgt,
1445 					    lun, NULL);
1446 				}
1447 
1448 		rval = NDI_SUCCESS;
1449 		break;
1450 	}
1451 	if (rval == NDI_SUCCESS)
1452 		rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
1453 	ndi_devi_exit(parent);
1454 	return (rval);
1455 }
1456 
1457 /*
1458  * Function name: arcmsr_dma_alloc
1459  * Return Values: 0 if successful, -1 if failure
1460  *   Description: allocate DMA resources
1461  *       Context: Can only be called from arcmsr_tran_init_pkt()
1462  *     register struct scsi_address	*ap = &((pkt)->pkt_address);
1463  */
1464 static int
1465 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1466     struct buf *bp, int flags, int (*callback)())
1467 {
1468 	struct CCB *ccb = pkt->pkt_ha_private;
1469 	int alloc_result, map_method, dma_flags;
1470 	int resid = 0;
1471 	int total_ccb_xferlen = 0;
1472 	int (*cb)(caddr_t);
1473 	uint8_t i;
1474 
1475 	/*
1476 	 * at this point the PKT SCSI CDB is empty, and dma xfer length
1477 	 * is bp->b_bcount
1478 	 */
1479 
1480 	if (bp->b_flags & B_READ) {
1481 		ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1482 		dma_flags = DDI_DMA_READ;
1483 	} else {
1484 		ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1485 		dma_flags = DDI_DMA_WRITE;
1486 	}
1487 
1488 	if (flags & PKT_CONSISTENT) {
1489 		ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1490 		dma_flags |= DDI_DMA_CONSISTENT;
1491 	}
1492 	if (flags & PKT_DMA_PARTIAL) {
1493 		dma_flags |= DDI_DMA_PARTIAL;
1494 	}
1495 
1496 	dma_flags |= DDI_DMA_REDZONE;
1497 	cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1498 
1499 	alloc_result = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_dma_attr,
1500 	    cb, 0, &ccb->pkt_dma_handle);
1501 	if (alloc_result != DDI_SUCCESS) {
1502 		arcmsr_warn(acb, "dma allocate failed (%x)", alloc_result);
1503 		return (DDI_FAILURE);
1504 	}
1505 
1506 	map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle,
1507 	    bp, dma_flags, cb, 0,
1508 	    &ccb->pkt_dmacookies[0],	/* SG List pointer */
1509 	    &ccb->pkt_ncookies);	/* number of sgl cookies */
1510 
1511 	switch (map_method) {
1512 	case DDI_DMA_PARTIAL_MAP:
1513 		/*
1514 		 * When your main memory size larger then 4G
1515 		 * DDI_DMA_PARTIAL_MAP will be touched.
1516 		 *
1517 		 * We've already set DDI_DMA_PARTIAL in dma_flags,
1518 		 * so if it's now missing, there's something screwy
1519 		 * happening. We plow on....
1520 		 */
1521 
1522 		if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1523 			arcmsr_warn(acb,
1524 			    "dma partial mapping lost ...impossible case!");
1525 		}
1526 		if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1527 		    DDI_FAILURE) {
1528 			arcmsr_warn(acb, "ddi_dma_numwin() failed");
1529 		}
1530 
1531 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1532 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1533 		    &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1534 		    DDI_FAILURE) {
1535 			arcmsr_warn(acb, "ddi_dma_getwin failed");
1536 		}
1537 
1538 		i = 0;
1539 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1540 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1541 		for (;;) {
1542 			i++;
1543 			if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1544 			    (i == ccb->pkt_ncookies) ||
1545 			    (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1546 				break;
1547 			}
1548 			/*
1549 			 * next cookie will be retrieved from
1550 			 * ccb->pkt_dmacookies[i]
1551 			 */
1552 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1553 			    &ccb->pkt_dmacookies[i]);
1554 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1555 		}
1556 		ccb->pkt_cookie = i;
1557 		ccb->arcmsr_cdb.sgcount = i;
1558 		if (total_ccb_xferlen > 512) {
1559 			resid = total_ccb_xferlen % 512;
1560 			if (resid != 0) {
1561 				i--;
1562 				total_ccb_xferlen -= resid;
1563 				/* modify last sg length */
1564 				ccb->pkt_dmacookies[i].dmac_size =
1565 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1566 				ccb->resid_dmacookie.dmac_size = resid;
1567 				ccb->resid_dmacookie.dmac_laddress =
1568 				    ccb->pkt_dmacookies[i].dmac_laddress +
1569 				    ccb->pkt_dmacookies[i].dmac_size;
1570 			}
1571 		}
1572 		ccb->total_dmac_size = total_ccb_xferlen;
1573 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1574 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1575 
1576 		return (DDI_SUCCESS);
1577 
1578 	case DDI_DMA_MAPPED:
1579 		ccb->pkt_nwin = 1; /* all mapped, so only one window */
1580 		ccb->pkt_dma_len = 0;
1581 		ccb->pkt_dma_offset = 0;
1582 		i = 0;
1583 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1584 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1585 		for (;;) {
1586 			i++;
1587 			if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1588 			    (i == ccb->pkt_ncookies) ||
1589 			    (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1590 				break;
1591 			}
1592 			/*
1593 			 * next cookie will be retrieved from
1594 			 * ccb->pkt_dmacookies[i]
1595 			 */
1596 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1597 			    &ccb->pkt_dmacookies[i]);
1598 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1599 		}
1600 		ccb->pkt_cookie = i;
1601 		ccb->arcmsr_cdb.sgcount = i;
1602 		if (total_ccb_xferlen > 512) {
1603 			resid = total_ccb_xferlen % 512;
1604 			if (resid != 0) {
1605 				i--;
1606 				total_ccb_xferlen -= resid;
1607 				/* modify last sg length */
1608 				ccb->pkt_dmacookies[i].dmac_size =
1609 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1610 				ccb->resid_dmacookie.dmac_size = resid;
1611 				ccb->resid_dmacookie.dmac_laddress =
1612 				    ccb->pkt_dmacookies[i].dmac_laddress +
1613 				    ccb->pkt_dmacookies[i].dmac_size;
1614 			}
1615 		}
1616 		ccb->total_dmac_size = total_ccb_xferlen;
1617 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1618 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1619 		return (DDI_SUCCESS);
1620 
1621 	case DDI_DMA_NORESOURCES:
1622 		arcmsr_warn(acb, "dma map got 'no resources'");
1623 		bioerror(bp, ENOMEM);
1624 		break;
1625 
1626 	case DDI_DMA_NOMAPPING:
1627 		arcmsr_warn(acb, "dma map got 'no mapping'");
1628 		bioerror(bp, EFAULT);
1629 		break;
1630 
1631 	case DDI_DMA_TOOBIG:
1632 		arcmsr_warn(acb, "dma map got 'too big'");
1633 		bioerror(bp, EINVAL);
1634 		break;
1635 
1636 	case DDI_DMA_INUSE:
1637 		arcmsr_warn(acb, "dma map got 'in use' "
1638 		    "(should not happen)");
1639 		break;
1640 	default:
1641 		arcmsr_warn(acb, "dma map failed (0x%x)", i);
1642 		break;
1643 	}
1644 
1645 	ddi_dma_free_handle(&ccb->pkt_dma_handle);
1646 	ccb->pkt_dma_handle = NULL;
1647 	ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1648 	return (DDI_FAILURE);
1649 }
1650 
1651 
1652 /*
1653  * Function name: arcmsr_dma_move
1654  * Return Values: 0 if successful, -1 if failure
1655  *   Description: move DMA resources to next DMA window
1656  *       Context: Can only be called from arcmsr_tran_init_pkt()
1657  */
1658 static int
1659 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt, struct buf *bp)
1660 {
1661 	struct CCB *ccb = pkt->pkt_ha_private;
1662 	uint8_t i = 0;
1663 	int resid = 0;
1664 	int total_ccb_xferlen = 0;
1665 
1666 	if (ccb->resid_dmacookie.dmac_size != 0) 	{
1667 		total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1668 		ccb->pkt_dmacookies[i].dmac_size =
1669 		    ccb->resid_dmacookie.dmac_size;
1670 		ccb->pkt_dmacookies[i].dmac_laddress =
1671 		    ccb->resid_dmacookie.dmac_laddress;
1672 		i++;
1673 		ccb->resid_dmacookie.dmac_size = 0;
1674 	}
1675 	/*
1676 	 * If there are no more cookies remaining in this window,
1677 	 * move to the next window.
1678 	 */
1679 	if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1680 		/*
1681 		 * only dma map "partial" arrive here
1682 		 */
1683 		if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1684 		    (ccb->pkt_nwin == 1)) {
1685 			return (DDI_SUCCESS);
1686 		}
1687 
1688 		/* At last window, cannot move */
1689 		if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1690 			arcmsr_warn(acb, "dma partial set, numwin exceeded");
1691 			return (DDI_FAILURE);
1692 		}
1693 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1694 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1695 		    &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1696 		    DDI_FAILURE) {
1697 			arcmsr_warn(acb, "ddi_dma_getwin failed");
1698 			return (DDI_FAILURE);
1699 		}
1700 		/* reset cookie pointer */
1701 		ccb->pkt_cookie = 0;
1702 	} else {
1703 		/*
1704 		 * only dma map "all" arrive here
1705 		 * We still have more cookies in this window,
1706 		 * get the next one
1707 		 * access the pkt_dma_handle remain cookie record at
1708 		 * ccb->pkt_dmacookies array
1709 		 */
1710 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1711 		    &ccb->pkt_dmacookies[i]);
1712 	}
1713 
1714 	/* Get remaining cookies in this window, up to our maximum */
1715 	total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1716 
1717 	/* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1718 	for (;;) {
1719 		i++;
1720 		/* handled cookies count level indicator */
1721 		ccb->pkt_cookie++;
1722 		if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1723 		    (ccb->pkt_cookie == ccb->pkt_ncookies) ||
1724 		    (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1725 			break;
1726 		}
1727 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1728 		    &ccb->pkt_dmacookies[i]);
1729 		total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1730 	}
1731 
1732 	ccb->arcmsr_cdb.sgcount = i;
1733 	if (total_ccb_xferlen > 512) {
1734 		resid = total_ccb_xferlen % 512;
1735 		if (resid != 0) {
1736 			i--;
1737 			total_ccb_xferlen -= resid;
1738 			/* modify last sg length */
1739 			ccb->pkt_dmacookies[i].dmac_size =
1740 			    ccb->pkt_dmacookies[i].dmac_size - resid;
1741 			ccb->resid_dmacookie.dmac_size = resid;
1742 			ccb->resid_dmacookie.dmac_laddress =
1743 			    ccb->pkt_dmacookies[i].dmac_laddress +
1744 			    ccb->pkt_dmacookies[i].dmac_size;
1745 		}
1746 	}
1747 	ccb->total_dmac_size += total_ccb_xferlen;
1748 	pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1749 
1750 	return (DDI_SUCCESS);
1751 }
1752 
1753 
1754 /*ARGSUSED*/
1755 static void
1756 arcmsr_build_ccb(struct CCB *ccb)
1757 {
1758 	struct scsi_pkt *pkt = ccb->pkt;
1759 	struct ARCMSR_CDB *arcmsr_cdb;
1760 	char *psge;
1761 	uint32_t address_lo, address_hi;
1762 	int arccdbsize = 0x30;
1763 	uint8_t sgcount;
1764 
1765 	arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1766 	psge = (char *)&arcmsr_cdb->sgu;
1767 
1768 	bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb, arcmsr_cdb->CdbLength);
1769 	sgcount = ccb->arcmsr_cdb.sgcount;
1770 
1771 	if (sgcount != 0) {
1772 		int length, i;
1773 		int cdb_sgcount = 0;
1774 		int total_xfer_length = 0;
1775 
1776 		/* map stor port SG list to our iop SG List. */
1777 		for (i = 0; i < sgcount; i++) {
1778 			/* Get physaddr of the current data pointer */
1779 			length = ccb->pkt_dmacookies[i].dmac_size;
1780 			total_xfer_length += length;
1781 			address_lo =
1782 			    dma_addr_lo32(ccb->pkt_dmacookies[i].dmac_laddress);
1783 			address_hi =
1784 			    dma_addr_hi32(ccb->pkt_dmacookies[i].dmac_laddress);
1785 
1786 			if (address_hi == 0) {
1787 				struct SG32ENTRY *dma_sg;
1788 
1789 				dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
1790 				dma_sg->address = address_lo;
1791 				dma_sg->length = length;
1792 				psge += sizeof (struct SG32ENTRY);
1793 				arccdbsize += sizeof (struct SG32ENTRY);
1794 			} else {
1795 				struct SG64ENTRY *dma_sg;
1796 
1797 				dma_sg = (struct SG64ENTRY *)(intptr_t)psge;
1798 				dma_sg->addresshigh = address_hi;
1799 				dma_sg->address = address_lo;
1800 				dma_sg->length = length | IS_SG64_ADDR;
1801 				psge += sizeof (struct SG64ENTRY);
1802 				arccdbsize += sizeof (struct SG64ENTRY);
1803 			}
1804 			cdb_sgcount++;
1805 		}
1806 		arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
1807 		arcmsr_cdb->DataLength = total_xfer_length;
1808 		if (arccdbsize > 256) {
1809 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1810 		}
1811 	} else {
1812 		arcmsr_cdb->DataLength = 0;
1813 	}
1814 
1815 	if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
1816 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1817 	ccb->arc_cdb_size = arccdbsize;
1818 }
1819 
1820 /*
1821  * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
1822  *
1823  * handle:		Handle of registered ARC protocol driver
1824  * adapter_id:		AIOC unique identifier(integer)
1825  * pPOSTCARD_SEND:	Pointer to ARC send postcard
1826  *
1827  * This routine posts a ARC send postcard to the request post FIFO of a
1828  * specific ARC adapter.
1829  */
1830 static int
1831 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb)
1832 {
1833 	uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1834 	struct scsi_pkt *pkt = ccb->pkt;
1835 	struct ARCMSR_CDB *arcmsr_cdb;
1836 	uint_t pkt_flags = pkt->pkt_flags;
1837 
1838 	arcmsr_cdb = &ccb->arcmsr_cdb;
1839 
1840 	/* TODO: Use correct offset and size for syncing? */
1841 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0, DDI_DMA_SYNC_FORDEV) ==
1842 	    DDI_FAILURE)
1843 		return (DDI_FAILURE);
1844 
1845 	atomic_inc_32(&acb->ccboutstandingcount);
1846 	ccb->ccb_time = (time_t)(ddi_get_time() + pkt->pkt_time);
1847 
1848 	ccb->ccb_state = ARCMSR_CCB_START;
1849 	switch (acb->adapter_type) {
1850 	case ACB_ADAPTER_TYPE_A:
1851 	{
1852 		struct HBA_msgUnit *phbamu;
1853 
1854 		phbamu = (struct HBA_msgUnit *)acb->pmu;
1855 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1856 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1857 			    &phbamu->inbound_queueport,
1858 			    cdb_phyaddr_pattern |
1859 			    ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1860 		} else {
1861 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1862 			    &phbamu->inbound_queueport, cdb_phyaddr_pattern);
1863 		}
1864 		if (pkt_flags & FLAG_NOINTR)
1865 			arcmsr_polling_hba_ccbdone(acb, ccb);
1866 		break;
1867 	}
1868 
1869 	case ACB_ADAPTER_TYPE_B:
1870 	{
1871 		struct HBB_msgUnit *phbbmu;
1872 		int ending_index, index;
1873 
1874 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
1875 		index = phbbmu->postq_index;
1876 		ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
1877 		phbbmu->post_qbuffer[ending_index] = 0;
1878 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1879 			phbbmu->post_qbuffer[index] =
1880 			    (cdb_phyaddr_pattern|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1881 		} else {
1882 			phbbmu->post_qbuffer[index] = cdb_phyaddr_pattern;
1883 		}
1884 		index++;
1885 		/* if last index number set it to 0 */
1886 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
1887 		phbbmu->postq_index = index;
1888 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1889 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1890 		    ARCMSR_DRV2IOP_CDB_POSTED);
1891 
1892 		if (pkt_flags & FLAG_NOINTR)
1893 			arcmsr_polling_hbb_ccbdone(acb, ccb);
1894 		break;
1895 	}
1896 
1897 	case ACB_ADAPTER_TYPE_C:
1898 	{
1899 		struct HBC_msgUnit *phbcmu;
1900 		uint32_t ccb_post_stamp, arc_cdb_size;
1901 
1902 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
1903 		arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 :
1904 		    ccb->arc_cdb_size;
1905 		ccb_post_stamp = (cdb_phyaddr_pattern |
1906 		    ((arc_cdb_size-1) >> 6) |1);
1907 		if (acb->cdb_phyaddr_hi32) {
1908 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1909 			    &phbcmu->inbound_queueport_high,
1910 			    acb->cdb_phyaddr_hi32);
1911 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1912 			    &phbcmu->inbound_queueport_low, ccb_post_stamp);
1913 		} else {
1914 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1915 			    &phbcmu->inbound_queueport_low, ccb_post_stamp);
1916 		}
1917 		if (pkt_flags & FLAG_NOINTR)
1918 			arcmsr_polling_hbc_ccbdone(acb, ccb);
1919 		break;
1920 	}
1921 
1922 	}
1923 	return (DDI_SUCCESS);
1924 }
1925 
1926 
1927 static void
1928 arcmsr_ccb_complete(struct CCB *ccb, int flag)
1929 {
1930 	struct ACB *acb = ccb->acb;
1931 	struct scsi_pkt *pkt = ccb->pkt;
1932 
1933 	if (pkt == NULL) {
1934 		return;
1935 	}
1936 	ccb->ccb_state |= ARCMSR_CCB_DONE;
1937 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1938 	    STATE_SENT_CMD | STATE_GOT_STATUS);
1939 
1940 	if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1941 	    (pkt->pkt_state & STATE_XFERRED_DATA)) {
1942 		(void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1943 		    DDI_DMA_SYNC_FORCPU);
1944 	}
1945 	/*
1946 	 * TODO: This represents a potential race condition, and is
1947 	 * ultimately a poor design decision.  Revisit this code
1948 	 * and solve the mutex ownership issue correctly.
1949 	 */
1950 	if (mutex_owned(&acb->isr_mutex)) {
1951 		mutex_exit(&acb->isr_mutex);
1952 		scsi_hba_pkt_comp(pkt);
1953 		mutex_enter(&acb->isr_mutex);
1954 	} else {
1955 		scsi_hba_pkt_comp(pkt);
1956 	}
1957 	if (flag == 1) {
1958 		atomic_dec_32(&acb->ccboutstandingcount);
1959 	}
1960 }
1961 
1962 static void
1963 arcmsr_report_ccb_state(struct ACB *acb, struct CCB *ccb, boolean_t error)
1964 {
1965 	int id, lun;
1966 
1967 	ccb->ccb_state |= ARCMSR_CCB_DONE;
1968 	id = ccb->pkt->pkt_address.a_target;
1969 	lun = ccb->pkt->pkt_address.a_lun;
1970 
1971 	if (!error) {
1972 		if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1973 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
1974 		}
1975 		ccb->pkt->pkt_reason = CMD_CMPLT;
1976 		ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1977 		arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1978 		    &ccb->complete_queue_pointer, &acb->ccb_complete_list);
1979 
1980 	} else {
1981 		switch (ccb->arcmsr_cdb.DeviceStatus) {
1982 		case ARCMSR_DEV_SELECT_TIMEOUT:
1983 			if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
1984 				arcmsr_warn(acb,
1985 				    "target %d lun %d selection "
1986 				    "timeout", id, lun);
1987 			}
1988 			acb->devstate[id][lun] = ARECA_RAID_GONE;
1989 			ccb->pkt->pkt_reason = CMD_TIMEOUT; /* CMD_DEV_GONE; */
1990 			ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1991 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1992 			    &ccb->complete_queue_pointer,
1993 			    &acb->ccb_complete_list);
1994 			break;
1995 		case ARCMSR_DEV_ABORTED:
1996 		case ARCMSR_DEV_INIT_FAIL:
1997 			arcmsr_warn(acb, "isr got 'ARCMSR_DEV_ABORTED'"
1998 			    " 'ARCMSR_DEV_INIT_FAIL'");
1999 			arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2000 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2001 			ccb->pkt->pkt_reason = CMD_DEV_GONE;
2002 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2003 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2004 			    &ccb->complete_queue_pointer,
2005 			    &acb->ccb_complete_list);
2006 			break;
2007 		case SCSISTAT_CHECK_CONDITION:
2008 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
2009 			arcmsr_report_sense_info(ccb);
2010 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2011 			    &ccb->complete_queue_pointer,
2012 			    &acb->ccb_complete_list);
2013 			break;
2014 		default:
2015 			arcmsr_warn(acb,
2016 			    "target %d lun %d isr received CMD_DONE"
2017 			    " with unknown DeviceStatus (0x%x)",
2018 			    id, lun, ccb->arcmsr_cdb.DeviceStatus);
2019 			arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2020 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2021 			/* unknown error or crc error just for retry */
2022 			ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2023 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2024 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2025 			    &ccb->complete_queue_pointer,
2026 			    &acb->ccb_complete_list);
2027 			break;
2028 		}
2029 	}
2030 }
2031 
2032 
2033 static void
2034 arcmsr_drain_donequeue(struct ACB *acb, struct CCB *ccb, boolean_t error)
2035 {
2036 	uint16_t	ccb_state;
2037 
2038 	if (ccb->acb != acb) {
2039 		return;
2040 	}
2041 	if (ccb->ccb_state != ARCMSR_CCB_START) {
2042 		switch (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
2043 		case ARCMSR_CCB_TIMEOUT:
2044 			ccb_state = ccb->ccb_state;
2045 			if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2046 				arcmsr_free_ccb(ccb);
2047 			else
2048 				ccb->ccb_state |= ARCMSR_CCB_BACK;
2049 			return;
2050 
2051 		case ARCMSR_CCB_ABORTED:
2052 			ccb_state = ccb->ccb_state;
2053 			if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2054 				arcmsr_free_ccb(ccb);
2055 			else
2056 				ccb->ccb_state |= ARCMSR_CCB_BACK;
2057 			return;
2058 		case ARCMSR_CCB_RESET:
2059 			ccb_state = ccb->ccb_state;
2060 			if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2061 				arcmsr_free_ccb(ccb);
2062 			else
2063 				ccb->ccb_state |= ARCMSR_CCB_BACK;
2064 			return;
2065 		default:
2066 			return;
2067 		}
2068 	}
2069 	arcmsr_report_ccb_state(acb, ccb, error);
2070 }
2071 
2072 static void
2073 arcmsr_report_sense_info(struct CCB *ccb)
2074 {
2075 	struct SENSE_DATA *cdb_sensedata;
2076 	struct scsi_pkt *pkt = ccb->pkt;
2077 	struct scsi_arq_status *arq_status;
2078 	union scsi_cdb *cdbp;
2079 	uint64_t err_blkno;
2080 
2081 	cdbp = (void *)pkt->pkt_cdbp;
2082 	err_blkno = ARCMSR_GETGXADDR(ccb->arcmsr_cdb.CdbLength, cdbp);
2083 
2084 	arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
2085 	bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
2086 	*pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
2087 	arq_status->sts_rqpkt_reason = CMD_CMPLT;
2088 	arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
2089 	    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
2090 	arq_status->sts_rqpkt_statistics = 0;
2091 	arq_status->sts_rqpkt_resid = 0;
2092 
2093 	pkt->pkt_reason = CMD_CMPLT;
2094 	/* auto rqsense took place */
2095 	pkt->pkt_state |= STATE_ARQ_DONE;
2096 
2097 	cdb_sensedata = (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
2098 	if (&arq_status->sts_sensedata != NULL) {
2099 		if (err_blkno <= 0xfffffffful) {
2100 			struct scsi_extended_sense *sts_sensedata;
2101 
2102 			sts_sensedata = &arq_status->sts_sensedata;
2103 			sts_sensedata->es_code = cdb_sensedata->ErrorCode;
2104 			/* must eq CLASS_EXTENDED_SENSE (0x07) */
2105 			sts_sensedata->es_class = cdb_sensedata->ErrorClass;
2106 			sts_sensedata->es_valid = cdb_sensedata->Valid;
2107 			sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
2108 			sts_sensedata->es_key = cdb_sensedata->SenseKey;
2109 			sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
2110 			sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
2111 			sts_sensedata->es_filmk = cdb_sensedata->FileMark;
2112 			sts_sensedata->es_info_1 = (err_blkno >> 24) & 0xFF;
2113 			sts_sensedata->es_info_2 = (err_blkno >> 16) & 0xFF;
2114 			sts_sensedata->es_info_3 = (err_blkno >>  8) & 0xFF;
2115 			sts_sensedata->es_info_4 = err_blkno & 0xFF;
2116 			sts_sensedata->es_add_len =
2117 			    cdb_sensedata->AdditionalSenseLength;
2118 			sts_sensedata->es_cmd_info[0] =
2119 			    cdb_sensedata->CommandSpecificInformation[0];
2120 			sts_sensedata->es_cmd_info[1] =
2121 			    cdb_sensedata->CommandSpecificInformation[1];
2122 			sts_sensedata->es_cmd_info[2] =
2123 			    cdb_sensedata->CommandSpecificInformation[2];
2124 			sts_sensedata->es_cmd_info[3] =
2125 			    cdb_sensedata->CommandSpecificInformation[3];
2126 			sts_sensedata->es_add_code =
2127 			    cdb_sensedata->AdditionalSenseCode;
2128 			sts_sensedata->es_qual_code =
2129 			    cdb_sensedata->AdditionalSenseCodeQualifier;
2130 			sts_sensedata->es_fru_code =
2131 			    cdb_sensedata->FieldReplaceableUnitCode;
2132 		} else { /* 64-bit LBA */
2133 			struct scsi_descr_sense_hdr *dsp;
2134 			struct scsi_information_sense_descr *isd;
2135 
2136 			dsp = (struct scsi_descr_sense_hdr *)
2137 			    &arq_status->sts_sensedata;
2138 			dsp->ds_class = CLASS_EXTENDED_SENSE;
2139 			dsp->ds_code = CODE_FMT_DESCR_CURRENT;
2140 			dsp->ds_key = cdb_sensedata->SenseKey;
2141 			dsp->ds_add_code = cdb_sensedata->AdditionalSenseCode;
2142 			dsp->ds_qual_code =
2143 			    cdb_sensedata->AdditionalSenseCodeQualifier;
2144 			dsp->ds_addl_sense_length =
2145 			    sizeof (struct scsi_information_sense_descr);
2146 
2147 			isd = (struct scsi_information_sense_descr *)(dsp+1);
2148 			isd->isd_descr_type = DESCR_INFORMATION;
2149 			isd->isd_valid = 1;
2150 			isd->isd_information[0] = (err_blkno >> 56) & 0xFF;
2151 			isd->isd_information[1] = (err_blkno >> 48) & 0xFF;
2152 			isd->isd_information[2] = (err_blkno >> 40) & 0xFF;
2153 			isd->isd_information[3] = (err_blkno >> 32) & 0xFF;
2154 			isd->isd_information[4] = (err_blkno >> 24) & 0xFF;
2155 			isd->isd_information[5] = (err_blkno >> 16) & 0xFF;
2156 			isd->isd_information[6] = (err_blkno >>  8) & 0xFF;
2157 			isd->isd_information[7] = (err_blkno) & 0xFF;
2158 		}
2159 	}
2160 }
2161 
2162 
2163 static int
2164 arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt)
2165 {
2166 	struct CCB *ccb;
2167 	uint32_t intmask_org = 0;
2168 	int i = 0;
2169 
2170 	acb->num_aborts++;
2171 
2172 	if (abortpkt != NULL) {
2173 		/*
2174 		 * We don't support abort of a single packet.  All
2175 		 * callers in our kernel always do a global abort, so
2176 		 * there is no point in having code to support it
2177 		 * here.
2178 		 */
2179 		return (DDI_FAILURE);
2180 	}
2181 
2182 	/*
2183 	 * if abortpkt is NULL, the upper layer needs us
2184 	 * to abort all commands
2185 	 */
2186 	if (acb->ccboutstandingcount != 0) {
2187 		/* disable all outbound interrupt */
2188 		intmask_org = arcmsr_disable_allintr(acb);
2189 		/* clear and abort all outbound posted Q */
2190 		arcmsr_done4abort_postqueue(acb);
2191 		/* talk to iop 331 outstanding command aborted */
2192 		(void) arcmsr_abort_host_command(acb);
2193 
2194 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2195 			ccb = acb->pccb_pool[i];
2196 			if (ccb->ccb_state == ARCMSR_CCB_START) {
2197 				/*
2198 				 * this ccb will complete at
2199 				 * hwinterrupt
2200 				 */
2201 				/* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
2202 				ccb->pkt->pkt_reason = CMD_ABORTED;
2203 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
2204 				arcmsr_ccb_complete(ccb, 1);
2205 			}
2206 		}
2207 		/*
2208 		 * enable outbound Post Queue, outbound
2209 		 * doorbell Interrupt
2210 		 */
2211 		arcmsr_enable_allintr(acb, intmask_org);
2212 	}
2213 	return (DDI_SUCCESS);
2214 }
2215 
2216 
2217 /*
2218  * Autoconfiguration support
2219  */
2220 static int
2221 arcmsr_parse_devname(char *devnm, int *tgt, int *lun) {
2222 
2223 	char devbuf[SCSI_MAXNAMELEN];
2224 	char *addr;
2225 	char *p,  *tp, *lp;
2226 	long num;
2227 
2228 	/* Parse dev name and address */
2229 	(void) strlcpy(devbuf, devnm, sizeof (devbuf));
2230 	addr = "";
2231 	for (p = devbuf; *p != '\0'; p++) {
2232 		if (*p == '@') {
2233 			addr = p + 1;
2234 			*p = '\0';
2235 		} else if (*p == ':') {
2236 			*p = '\0';
2237 			break;
2238 		}
2239 	}
2240 
2241 	/* Parse target and lun */
2242 	for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
2243 		if (*p == ',') {
2244 			lp = p + 1;
2245 			*p = '\0';
2246 			break;
2247 		}
2248 	}
2249 	if ((tgt != NULL) && (tp != NULL)) {
2250 		if (ddi_strtol(tp, NULL, 0x10, &num) != 0)
2251 			return (-1);
2252 		*tgt = (int)num;
2253 	}
2254 	if ((lun != NULL) && (lp != NULL)) {
2255 		if (ddi_strtol(lp, NULL, 0x10, &num) != 0)
2256 			return (-1);
2257 		*lun = (int)num;
2258 	}
2259 	return (0);
2260 }
2261 
2262 static int
2263 arcmsr_name_node(dev_info_t *dip, char *name, int len)
2264 {
2265 	int tgt, lun;
2266 
2267 	tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "target",
2268 	    -1);
2269 	if (tgt == -1)
2270 		return (DDI_FAILURE);
2271 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "lun",
2272 	    -1);
2273 	if (lun == -1)
2274 		return (DDI_FAILURE);
2275 	(void) snprintf(name, len, "%x,%x", tgt, lun);
2276 	return (DDI_SUCCESS);
2277 }
2278 
2279 static dev_info_t *
2280 arcmsr_find_child(struct ACB *acb, uint16_t tgt, uint8_t lun)
2281 {
2282 	dev_info_t *child = NULL;
2283 	char addr[SCSI_MAXNAMELEN];
2284 	char tmp[SCSI_MAXNAMELEN];
2285 
2286 	(void) sprintf(addr, "%x,%x", tgt, lun);
2287 
2288 	for (child = ddi_get_child(acb->dev_info);
2289 	    child;
2290 	    child = ddi_get_next_sibling(child)) {
2291 		/* We don't care about non-persistent node */
2292 		if (ndi_dev_is_persistent_node(child) == 0)
2293 			continue;
2294 		if (arcmsr_name_node(child, tmp, SCSI_MAXNAMELEN) !=
2295 		    DDI_SUCCESS)
2296 			continue;
2297 		if (strcmp(addr, tmp) == 0)
2298 			break;
2299 	}
2300 	return (child);
2301 }
2302 
2303 static int
2304 arcmsr_config_child(struct ACB *acb, struct scsi_device *sd, dev_info_t **dipp)
2305 {
2306 	char *nodename = NULL;
2307 	char **compatible = NULL;
2308 	int ncompatible = 0;
2309 	dev_info_t *ldip = NULL;
2310 	int tgt = sd->sd_address.a_target;
2311 	int lun = sd->sd_address.a_lun;
2312 	int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
2313 	int rval;
2314 
2315 	scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
2316 	    NULL, &nodename, &compatible, &ncompatible);
2317 	if (nodename == NULL) {
2318 		arcmsr_warn(acb, "found no comptible driver for T%dL%d",
2319 		    tgt, lun);
2320 		rval = NDI_FAILURE;
2321 		goto finish;
2322 	}
2323 	/* Create dev node */
2324 	rval = ndi_devi_alloc(acb->dev_info, nodename, DEVI_SID_NODEID, &ldip);
2325 	if (rval == NDI_SUCCESS) {
2326 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
2327 		    DDI_PROP_SUCCESS) {
2328 			arcmsr_warn(acb,
2329 			    "unable to create target property for T%dL%d",
2330 			    tgt, lun);
2331 			rval = NDI_FAILURE;
2332 			goto finish;
2333 		}
2334 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
2335 		    DDI_PROP_SUCCESS) {
2336 			arcmsr_warn(acb,
2337 			    "unable to create lun property for T%dL%d",
2338 			    tgt, lun);
2339 			rval = NDI_FAILURE;
2340 			goto finish;
2341 		}
2342 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
2343 		    "compatible", compatible, ncompatible) !=
2344 		    DDI_PROP_SUCCESS) {
2345 			arcmsr_warn(acb,
2346 			    "unable to create compatible property for T%dL%d",
2347 			    tgt, lun);
2348 			rval = NDI_FAILURE;
2349 			goto finish;
2350 		}
2351 		rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
2352 		if (rval != NDI_SUCCESS) {
2353 			arcmsr_warn(acb, "unable to online T%dL%d", tgt, lun);
2354 			ndi_prop_remove_all(ldip);
2355 			(void) ndi_devi_free(ldip);
2356 		} else {
2357 			arcmsr_log(acb, CE_NOTE, "T%dL%d onlined", tgt, lun);
2358 		}
2359 	}
2360 finish:
2361 	if (dipp)
2362 		*dipp = ldip;
2363 
2364 	scsi_hba_nodename_compatible_free(nodename, compatible);
2365 	return (rval);
2366 }
2367 
2368 static int
2369 arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun, dev_info_t **ldip)
2370 {
2371 	struct scsi_device sd;
2372 	dev_info_t *child;
2373 	int rval;
2374 
2375 	if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
2376 		if (ldip) {
2377 			*ldip = child;
2378 		}
2379 		return (NDI_SUCCESS);
2380 	}
2381 	bzero(&sd, sizeof (struct scsi_device));
2382 	sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
2383 	sd.sd_address.a_target = tgt;
2384 	sd.sd_address.a_lun = lun;
2385 
2386 	rval = scsi_hba_probe(&sd, NULL);
2387 	if (rval == SCSIPROBE_EXISTS)
2388 		rval = arcmsr_config_child(acb, &sd, ldip);
2389 	scsi_unprobe(&sd);
2390 	return (rval);
2391 }
2392 
2393 
2394 static int
2395 arcmsr_add_intr(struct ACB *acb, int intr_type)
2396 {
2397 	int	rc, count;
2398 	dev_info_t *dev_info;
2399 	const char *type_str;
2400 
2401 	switch (intr_type) {
2402 	case DDI_INTR_TYPE_MSI:
2403 		type_str = "MSI";
2404 		break;
2405 	case DDI_INTR_TYPE_MSIX:
2406 		type_str = "MSIX";
2407 		break;
2408 	case DDI_INTR_TYPE_FIXED:
2409 		type_str = "FIXED";
2410 		break;
2411 	default:
2412 		type_str = "unknown";
2413 		break;
2414 	}
2415 
2416 	dev_info = acb->dev_info;
2417 	/* Determine number of supported interrupts */
2418 	rc = ddi_intr_get_nintrs(dev_info, intr_type, &count);
2419 	if ((rc != DDI_SUCCESS) || (count == 0)) {
2420 		arcmsr_warn(acb,
2421 		    "no interrupts of type %s, rc=0x%x, count=%d",
2422 		    type_str, rc, count);
2423 		return (DDI_FAILURE);
2424 	}
2425 	acb->intr_size = sizeof (ddi_intr_handle_t) * count;
2426 	acb->phandle = kmem_zalloc(acb->intr_size, KM_SLEEP);
2427 	rc = ddi_intr_alloc(dev_info, acb->phandle, intr_type, 0,
2428 	    count, &acb->intr_count, DDI_INTR_ALLOC_NORMAL);
2429 	if ((rc != DDI_SUCCESS) || (acb->intr_count == 0)) {
2430 		arcmsr_warn(acb, "ddi_intr_alloc(%s) failed 0x%x",
2431 		    type_str, rc);
2432 		return (DDI_FAILURE);
2433 	}
2434 	if (acb->intr_count < count) {
2435 		arcmsr_log(acb, CE_NOTE, "Got %d interrupts, but requested %d",
2436 		    acb->intr_count, count);
2437 	}
2438 	/*
2439 	 * Get priority for first msi, assume remaining are all the same
2440 	 */
2441 	if (ddi_intr_get_pri(acb->phandle[0], &acb->intr_pri) != DDI_SUCCESS) {
2442 		arcmsr_warn(acb, "ddi_intr_get_pri failed");
2443 		return (DDI_FAILURE);
2444 	}
2445 	if (acb->intr_pri >= ddi_intr_get_hilevel_pri()) {
2446 		arcmsr_warn(acb,  "high level interrupt not supported");
2447 		return (DDI_FAILURE);
2448 	}
2449 
2450 	for (int x = 0; x < acb->intr_count; x++) {
2451 		if (ddi_intr_add_handler(acb->phandle[x], arcmsr_intr_handler,
2452 		    (caddr_t)acb, NULL) != DDI_SUCCESS) {
2453 			arcmsr_warn(acb, "ddi_intr_add_handler(%s) failed",
2454 			    type_str);
2455 			return (DDI_FAILURE);
2456 		}
2457 	}
2458 	(void) ddi_intr_get_cap(acb->phandle[0], &acb->intr_cap);
2459 	if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2460 		/* Call ddi_intr_block_enable() for MSI */
2461 		(void) ddi_intr_block_enable(acb->phandle, acb->intr_count);
2462 	} else {
2463 		/* Call ddi_intr_enable() for MSI non block enable */
2464 		for (int x = 0; x < acb->intr_count; x++) {
2465 			(void) ddi_intr_enable(acb->phandle[x]);
2466 		}
2467 	}
2468 	return (DDI_SUCCESS);
2469 }
2470 
2471 static void
2472 arcmsr_remove_intr(struct ACB *acb)
2473 {
2474 	int x;
2475 
2476 	if (acb->phandle == NULL)
2477 		return;
2478 
2479 	/* Disable all interrupts */
2480 	if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2481 		/* Call ddi_intr_block_disable() */
2482 		(void) ddi_intr_block_disable(acb->phandle, acb->intr_count);
2483 	} else {
2484 		for (x = 0; x < acb->intr_count; x++) {
2485 			(void) ddi_intr_disable(acb->phandle[x]);
2486 		}
2487 	}
2488 	/* Call ddi_intr_remove_handler() */
2489 	for (x = 0; x < acb->intr_count; x++) {
2490 		(void) ddi_intr_remove_handler(acb->phandle[x]);
2491 		(void) ddi_intr_free(acb->phandle[x]);
2492 	}
2493 	kmem_free(acb->phandle, acb->intr_size);
2494 	acb->phandle = NULL;
2495 }
2496 
2497 static void
2498 arcmsr_mutex_init(struct ACB *acb)
2499 {
2500 	mutex_init(&acb->isr_mutex, NULL, MUTEX_DRIVER, NULL);
2501 	mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER, NULL);
2502 	mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER, NULL);
2503 	mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER, NULL);
2504 	mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
2505 }
2506 
2507 static void
2508 arcmsr_mutex_destroy(struct ACB *acb)
2509 {
2510 	mutex_destroy(&acb->isr_mutex);
2511 	mutex_destroy(&acb->acb_mutex);
2512 	mutex_destroy(&acb->postq_mutex);
2513 	mutex_destroy(&acb->workingQ_mutex);
2514 	mutex_destroy(&acb->ioctl_mutex);
2515 }
2516 
2517 static int
2518 arcmsr_initialize(struct ACB *acb)
2519 {
2520 	struct CCB *pccb_tmp;
2521 	size_t allocated_length;
2522 	uint16_t wval;
2523 	uint_t intmask_org, count;
2524 	caddr_t	arcmsr_ccbs_area;
2525 	uint32_t wlval, cdb_phyaddr, offset, realccb_size;
2526 	int32_t dma_sync_size;
2527 	int i, id, lun, instance;
2528 
2529 	instance = ddi_get_instance(acb->dev_info);
2530 	wlval = pci_config_get32(acb->pci_acc_handle, 0);
2531 	wval = (uint16_t)((wlval >> 16) & 0xffff);
2532 	realccb_size = P2ROUNDUP(sizeof (struct CCB), 32);
2533 	switch (wval) {
2534 	case PCI_DEVICE_ID_ARECA_1880:
2535 	case PCI_DEVICE_ID_ARECA_1882:
2536 	{
2537 		uint32_t *iop_mu_regs_map0;
2538 
2539 		acb->adapter_type = ACB_ADAPTER_TYPE_C; /* lsi */
2540 		dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2541 		if (ddi_regs_map_setup(acb->dev_info, 2,
2542 		    (caddr_t *)&iop_mu_regs_map0, 0,
2543 		    sizeof (struct HBC_msgUnit), &acb->dev_acc_attr,
2544 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2545 			arcmsr_warn(acb, "unable to map registers");
2546 			return (DDI_FAILURE);
2547 		}
2548 
2549 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2550 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2551 		    DDI_SUCCESS) {
2552 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2553 			arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2554 			return (DDI_FAILURE);
2555 		}
2556 
2557 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2558 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2559 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2560 		    &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2561 			arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2562 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2563 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2564 			return (DDI_FAILURE);
2565 		}
2566 
2567 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2568 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2569 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2570 		    &count) != DDI_DMA_MAPPED) {
2571 			arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2572 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2573 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2574 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2575 			return (DDI_FAILURE);
2576 		}
2577 		bzero(arcmsr_ccbs_area, dma_sync_size);
2578 		offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2579 		    - PtrToNum(arcmsr_ccbs_area));
2580 		arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2581 		/* ioport base */
2582 		acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2583 		break;
2584 	}
2585 
2586 	case PCI_DEVICE_ID_ARECA_1201:
2587 	{
2588 		uint32_t *iop_mu_regs_map0;
2589 		uint32_t *iop_mu_regs_map1;
2590 		struct HBB_msgUnit *phbbmu;
2591 
2592 		acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
2593 		dma_sync_size =
2594 		    (ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20) +
2595 		    sizeof (struct HBB_msgUnit);
2596 		/* Allocate memory for the ccb */
2597 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2598 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2599 		    DDI_SUCCESS) {
2600 			arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2601 			return (DDI_FAILURE);
2602 		}
2603 
2604 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2605 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2606 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2607 		    &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2608 			arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2609 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2610 			return (DDI_FAILURE);
2611 		}
2612 
2613 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2614 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size,
2615 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2616 		    NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
2617 			arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2618 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2619 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2620 			return (DDI_FAILURE);
2621 		}
2622 		bzero(arcmsr_ccbs_area, dma_sync_size);
2623 		offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2624 		    - PtrToNum(arcmsr_ccbs_area));
2625 		arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2626 		acb->pmu = (struct msgUnit *)
2627 		    NumToPtr(PtrToNum(arcmsr_ccbs_area) +
2628 		    (realccb_size*ARCMSR_MAX_FREECCB_NUM));
2629 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2630 
2631 		/* setup device register */
2632 		if (ddi_regs_map_setup(acb->dev_info, 1,
2633 		    (caddr_t *)&iop_mu_regs_map0, 0,
2634 		    sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
2635 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2636 			arcmsr_warn(acb, "unable to map base0 registers");
2637 			(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2638 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2639 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2640 			return (DDI_FAILURE);
2641 		}
2642 
2643 		/* ARCMSR_DRV2IOP_DOORBELL */
2644 		phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)iop_mu_regs_map0;
2645 		if (ddi_regs_map_setup(acb->dev_info, 2,
2646 		    (caddr_t *)&iop_mu_regs_map1, 0,
2647 		    sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
2648 		    &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
2649 			arcmsr_warn(acb, "unable to map base1 registers");
2650 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2651 			(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2652 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2653 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2654 			return (DDI_FAILURE);
2655 		}
2656 
2657 		/* ARCMSR_MSGCODE_RWBUFFER */
2658 		phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)iop_mu_regs_map1;
2659 		break;
2660 	}
2661 
2662 	case	PCI_DEVICE_ID_ARECA_1110:
2663 	case	PCI_DEVICE_ID_ARECA_1120:
2664 	case	PCI_DEVICE_ID_ARECA_1130:
2665 	case	PCI_DEVICE_ID_ARECA_1160:
2666 	case	PCI_DEVICE_ID_ARECA_1170:
2667 	case	PCI_DEVICE_ID_ARECA_1210:
2668 	case	PCI_DEVICE_ID_ARECA_1220:
2669 	case	PCI_DEVICE_ID_ARECA_1230:
2670 	case	PCI_DEVICE_ID_ARECA_1231:
2671 	case	PCI_DEVICE_ID_ARECA_1260:
2672 	case	PCI_DEVICE_ID_ARECA_1261:
2673 	case	PCI_DEVICE_ID_ARECA_1270:
2674 	case	PCI_DEVICE_ID_ARECA_1280:
2675 	case	PCI_DEVICE_ID_ARECA_1212:
2676 	case	PCI_DEVICE_ID_ARECA_1222:
2677 	case	PCI_DEVICE_ID_ARECA_1380:
2678 	case	PCI_DEVICE_ID_ARECA_1381:
2679 	case	PCI_DEVICE_ID_ARECA_1680:
2680 	case	PCI_DEVICE_ID_ARECA_1681:
2681 	{
2682 		uint32_t *iop_mu_regs_map0;
2683 
2684 		acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
2685 		dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2686 		if (ddi_regs_map_setup(acb->dev_info, 1,
2687 		    (caddr_t *)&iop_mu_regs_map0, 0,
2688 		    sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
2689 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2690 			arcmsr_warn(acb, "unable to map registers");
2691 			return (DDI_FAILURE);
2692 		}
2693 
2694 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2695 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2696 		    DDI_SUCCESS) {
2697 			arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2698 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2699 			return (DDI_FAILURE);
2700 		}
2701 
2702 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2703 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2704 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2705 		    &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2706 			arcmsr_warn(acb, "ddi_dma_mem_alloc failed", instance);
2707 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2708 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2709 			return (DDI_FAILURE);
2710 		}
2711 
2712 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2713 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2714 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2715 		    &count) != DDI_DMA_MAPPED) {
2716 			arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2717 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2718 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2719 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2720 			return (DDI_FAILURE);
2721 		}
2722 		bzero(arcmsr_ccbs_area, dma_sync_size);
2723 		offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2724 		    - PtrToNum(arcmsr_ccbs_area));
2725 		arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2726 		/* ioport base */
2727 		acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2728 		break;
2729 	}
2730 
2731 	default:
2732 		arcmsr_warn(acb, "Unknown RAID adapter type!");
2733 		return (DDI_FAILURE);
2734 	}
2735 	arcmsr_init_list_head(&acb->ccb_complete_list);
2736 	/* here we can not access pci configuration again */
2737 	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2738 	    ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
2739 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2740 	/* physical address of acb->pccb_pool */
2741 	cdb_phyaddr = acb->ccb_cookie.dmac_address + offset;
2742 
2743 	pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
2744 
2745 	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2746 		pccb_tmp->cdb_phyaddr_pattern =
2747 		    (acb->adapter_type == ACB_ADAPTER_TYPE_C) ?
2748 		    cdb_phyaddr : (cdb_phyaddr >> 5);
2749 		pccb_tmp->acb = acb;
2750 		acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
2751 		cdb_phyaddr = cdb_phyaddr + realccb_size;
2752 		pccb_tmp = (struct CCB *)NumToPtr(PtrToNum(pccb_tmp) +
2753 		    realccb_size);
2754 	}
2755 	acb->vir2phy_offset = PtrToNum(pccb_tmp) - cdb_phyaddr;
2756 
2757 	/* disable all outbound interrupt */
2758 	intmask_org = arcmsr_disable_allintr(acb);
2759 
2760 	if (!arcmsr_iop_confirm(acb)) {
2761 		arcmsr_warn(acb, "arcmsr_iop_confirm error", instance);
2762 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
2763 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
2764 		return (DDI_FAILURE);
2765 	}
2766 
2767 	for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
2768 		for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
2769 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2770 		}
2771 	}
2772 
2773 	/* enable outbound Post Queue, outbound doorbell Interrupt */
2774 	arcmsr_enable_allintr(acb, intmask_org);
2775 
2776 	return (0);
2777 }
2778 
2779 static int
2780 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance)
2781 {
2782 	scsi_hba_tran_t *hba_trans;
2783 	ddi_device_acc_attr_t dev_acc_attr;
2784 	struct ACB *acb;
2785 	uint16_t wval;
2786 	int raid6 = 1;
2787 	char *type;
2788 	int intr_types;
2789 
2790 
2791 	/*
2792 	 * Soft State Structure
2793 	 * The driver should allocate the per-device-instance
2794 	 * soft state structure, being careful to clean up properly if
2795 	 * an error occurs. Allocate data structure.
2796 	 */
2797 	if (ddi_soft_state_zalloc(arcmsr_soft_state, instance) != DDI_SUCCESS) {
2798 		arcmsr_warn(NULL, "ddi_soft_state_zalloc failed");
2799 		return (DDI_FAILURE);
2800 	}
2801 
2802 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2803 	ASSERT(acb);
2804 
2805 	arcmsr_mutex_init(acb);
2806 
2807 	/* acb is already zalloc()d so we don't need to bzero() it */
2808 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2809 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2810 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2811 
2812 	acb->dev_info = dev_info;
2813 	acb->dev_acc_attr = dev_acc_attr;
2814 
2815 	/*
2816 	 * The driver, if providing DMA, should also check that its hardware is
2817 	 * installed in a DMA-capable slot
2818 	 */
2819 	if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
2820 		arcmsr_warn(acb, "hardware is not installed in"
2821 		    " a DMA-capable slot");
2822 		goto error_level_0;
2823 	}
2824 	if (pci_config_setup(dev_info, &acb->pci_acc_handle) != DDI_SUCCESS) {
2825 		arcmsr_warn(acb, "pci_config_setup() failed, attach failed");
2826 		goto error_level_0;
2827 	}
2828 
2829 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
2830 	if (wval != PCI_VENDOR_ID_ARECA) {
2831 		arcmsr_warn(acb,
2832 		    "'vendorid (0x%04x) does not match 0x%04x "
2833 		    "(PCI_VENDOR_ID_ARECA)",
2834 		    wval, PCI_VENDOR_ID_ARECA);
2835 		goto error_level_0;
2836 	}
2837 
2838 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
2839 	switch (wval) {
2840 	case PCI_DEVICE_ID_ARECA_1110:
2841 	case PCI_DEVICE_ID_ARECA_1210:
2842 	case PCI_DEVICE_ID_ARECA_1201:
2843 		raid6 = 0;
2844 		/*FALLTHRU*/
2845 	case PCI_DEVICE_ID_ARECA_1120:
2846 	case PCI_DEVICE_ID_ARECA_1130:
2847 	case PCI_DEVICE_ID_ARECA_1160:
2848 	case PCI_DEVICE_ID_ARECA_1170:
2849 	case PCI_DEVICE_ID_ARECA_1220:
2850 	case PCI_DEVICE_ID_ARECA_1230:
2851 	case PCI_DEVICE_ID_ARECA_1260:
2852 	case PCI_DEVICE_ID_ARECA_1270:
2853 	case PCI_DEVICE_ID_ARECA_1280:
2854 		type = "SATA 3G";
2855 		break;
2856 	case PCI_DEVICE_ID_ARECA_1380:
2857 	case PCI_DEVICE_ID_ARECA_1381:
2858 	case PCI_DEVICE_ID_ARECA_1680:
2859 	case PCI_DEVICE_ID_ARECA_1681:
2860 		type = "SAS 3G";
2861 		break;
2862 	case PCI_DEVICE_ID_ARECA_1880:
2863 		type = "SAS 6G";
2864 		break;
2865 	default:
2866 		type = "X-TYPE";
2867 		arcmsr_warn(acb, "Unknown Host Adapter RAID Controller!");
2868 		goto error_level_0;
2869 	}
2870 
2871 	arcmsr_log(acb, CE_CONT, "Areca %s Host Adapter RAID Controller%s\n",
2872 	    type, raid6 ? " (RAID6 capable)" : "");
2873 
2874 	/* we disable iop interrupt here */
2875 	if (arcmsr_initialize(acb) == DDI_FAILURE) {
2876 		arcmsr_warn(acb, "arcmsr_initialize failed");
2877 		goto error_level_1;
2878 	}
2879 
2880 	/* Allocate a transport structure */
2881 	hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
2882 	if (hba_trans == NULL) {
2883 		arcmsr_warn(acb, "scsi_hba_tran_alloc failed");
2884 		goto error_level_2;
2885 	}
2886 	acb->scsi_hba_transport = hba_trans;
2887 	acb->dev_info = dev_info;
2888 	/* init scsi host adapter transport entry */
2889 	hba_trans->tran_hba_private  = acb;
2890 	hba_trans->tran_tgt_private  = NULL;
2891 	/*
2892 	 * If no per-target initialization is required, the HBA can leave
2893 	 * tran_tgt_init set to NULL.
2894 	 */
2895 	hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
2896 	hba_trans->tran_tgt_probe = scsi_hba_probe;
2897 	hba_trans->tran_tgt_free = NULL;
2898 	hba_trans->tran_start = arcmsr_tran_start;
2899 	hba_trans->tran_abort = arcmsr_tran_abort;
2900 	hba_trans->tran_reset = arcmsr_tran_reset;
2901 	hba_trans->tran_getcap = arcmsr_tran_getcap;
2902 	hba_trans->tran_setcap = arcmsr_tran_setcap;
2903 	hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
2904 	hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
2905 	hba_trans->tran_dmafree = arcmsr_tran_dmafree;
2906 	hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
2907 
2908 	hba_trans->tran_reset_notify = NULL;
2909 	hba_trans->tran_get_bus_addr = NULL;
2910 	hba_trans->tran_get_name = NULL;
2911 	hba_trans->tran_quiesce = NULL;
2912 	hba_trans->tran_unquiesce = NULL;
2913 	hba_trans->tran_bus_reset = NULL;
2914 	hba_trans->tran_bus_config = arcmsr_tran_bus_config;
2915 	hba_trans->tran_add_eventcall = NULL;
2916 	hba_trans->tran_get_eventcookie = NULL;
2917 	hba_trans->tran_post_event = NULL;
2918 	hba_trans->tran_remove_eventcall = NULL;
2919 
2920 	/* iop init and enable interrupt here */
2921 	arcmsr_iop_init(acb);
2922 
2923 	/* Get supported interrupt types */
2924 	if (ddi_intr_get_supported_types(dev_info, &intr_types) !=
2925 	    DDI_SUCCESS) {
2926 		arcmsr_warn(acb, "ddi_intr_get_supported_types failed");
2927 		goto error_level_3;
2928 	}
2929 	if (intr_types & DDI_INTR_TYPE_FIXED) {
2930 		if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2931 			goto error_level_5;
2932 	} else if (intr_types & DDI_INTR_TYPE_MSI) {
2933 		if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2934 			goto error_level_5;
2935 	}
2936 
2937 	/*
2938 	 * The driver should attach this instance of the device, and
2939 	 * perform error cleanup if necessary
2940 	 */
2941 	if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
2942 	    hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
2943 		arcmsr_warn(acb, "scsi_hba_attach_setup failed");
2944 		goto error_level_5;
2945 	}
2946 
2947 	/* Create a taskq for dealing with dr events */
2948 	if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
2949 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
2950 		arcmsr_warn(acb, "ddi_taskq_create failed");
2951 		goto error_level_8;
2952 	}
2953 
2954 	acb->timeout_count = 0;
2955 	/* active ccbs "timeout" watchdog */
2956 	acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
2957 	    (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
2958 	acb->timeout_sc_id = timeout(arcmsr_devMap_monitor, (caddr_t)acb,
2959 	    (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
2960 
2961 	/* report device info */
2962 	ddi_report_dev(dev_info);
2963 
2964 	return (DDI_SUCCESS);
2965 
2966 error_level_8:
2967 
2968 error_level_7:
2969 error_level_6:
2970 	(void) scsi_hba_detach(dev_info);
2971 
2972 error_level_5:
2973 	arcmsr_remove_intr(acb);
2974 
2975 error_level_3:
2976 error_level_4:
2977 	if (acb->scsi_hba_transport)
2978 		scsi_hba_tran_free(acb->scsi_hba_transport);
2979 
2980 error_level_2:
2981 	if (acb->ccbs_acc_handle)
2982 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
2983 	if (acb->ccbs_pool_handle)
2984 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
2985 
2986 error_level_1:
2987 	if (acb->pci_acc_handle)
2988 		pci_config_teardown(&acb->pci_acc_handle);
2989 	arcmsr_mutex_destroy(acb);
2990 	ddi_soft_state_free(arcmsr_soft_state, instance);
2991 
2992 error_level_0:
2993 	return (DDI_FAILURE);
2994 }
2995 
2996 
2997 static void
2998 arcmsr_vlog(struct ACB *acb, int level, char *fmt, va_list ap)
2999 {
3000 	char	buf[256];
3001 
3002 	if (acb != NULL) {
3003 		(void) snprintf(buf, sizeof (buf), "%s%d: %s",
3004 		    ddi_driver_name(acb->dev_info),
3005 		    ddi_get_instance(acb->dev_info), fmt);
3006 		fmt = buf;
3007 	}
3008 	vcmn_err(level, fmt, ap);
3009 }
3010 
3011 static void
3012 arcmsr_log(struct ACB *acb, int level, char *fmt, ...)
3013 {
3014 	va_list ap;
3015 
3016 	va_start(ap, fmt);
3017 	arcmsr_vlog(acb, level, fmt, ap);
3018 	va_end(ap);
3019 }
3020 
3021 static void
3022 arcmsr_warn(struct ACB *acb, char *fmt, ...)
3023 {
3024 	va_list ap;
3025 
3026 	va_start(ap, fmt);
3027 	arcmsr_vlog(acb, CE_WARN, fmt, ap);
3028 	va_end(ap);
3029 }
3030 
3031 static void
3032 arcmsr_init_list_head(struct list_head *list)
3033 {
3034 	list->next = list;
3035 	list->prev = list;
3036 }
3037 
3038 static void
3039 arcmsr_x_list_del(struct list_head *prev, struct list_head *next)
3040 {
3041 	next->prev = prev;
3042 	prev->next = next;
3043 }
3044 
3045 static void
3046 arcmsr_x_list_add(struct list_head *new_one,  struct list_head *prev,
3047     struct list_head *next)
3048 {
3049 	next->prev = new_one;
3050 	new_one->next = next;
3051 	new_one->prev = prev;
3052 	prev->next = new_one;
3053 }
3054 
3055 static void
3056 arcmsr_list_add_tail(kmutex_t *list_lock, struct list_head *new_one,
3057     struct list_head *head)
3058 {
3059 	mutex_enter(list_lock);
3060 	arcmsr_x_list_add(new_one, head->prev, head);
3061 	mutex_exit(list_lock);
3062 }
3063 
3064 static struct list_head *
3065 arcmsr_list_get_first(kmutex_t *list_lock, struct list_head *head)
3066 {
3067 	struct list_head *one = NULL;
3068 
3069 	mutex_enter(list_lock);
3070 	if (head->next == head)	{
3071 		mutex_exit(list_lock);
3072 		return (NULL);
3073 	}
3074 	one = head->next;
3075 	arcmsr_x_list_del(one->prev, one->next);
3076 	arcmsr_init_list_head(one);
3077 	mutex_exit(list_lock);
3078 	return (one);
3079 }
3080 
3081 static struct CCB *
3082 arcmsr_get_complete_ccb_from_list(struct ACB *acb)
3083 {
3084 	struct list_head *first_complete_ccb_list = NULL;
3085 	struct CCB *ccb;
3086 
3087 	first_complete_ccb_list =
3088 	    arcmsr_list_get_first(&acb->ccb_complete_list_mutex,
3089 	    &acb->ccb_complete_list);
3090 	if (first_complete_ccb_list == NULL) {
3091 		return (NULL);
3092 	}
3093 	ccb = (void *)((caddr_t)(first_complete_ccb_list) -
3094 	    offsetof(struct CCB, complete_queue_pointer));
3095 	return (ccb);
3096 }
3097 
3098 static struct CCB *
3099 arcmsr_get_freeccb(struct ACB *acb)
3100 {
3101 	struct CCB *ccb;
3102 	int ccb_get_index, ccb_put_index;
3103 
3104 	mutex_enter(&acb->workingQ_mutex);
3105 	ccb_put_index = acb->ccb_put_index;
3106 	ccb_get_index = acb->ccb_get_index;
3107 	ccb = acb->ccbworkingQ[ccb_get_index];
3108 	ccb_get_index++;
3109 	if (ccb_get_index >= ARCMSR_MAX_FREECCB_NUM)
3110 		ccb_get_index = ccb_get_index - ARCMSR_MAX_FREECCB_NUM;
3111 	if (ccb_put_index != ccb_get_index) {
3112 		acb->ccb_get_index = ccb_get_index;
3113 		arcmsr_init_list_head(&ccb->complete_queue_pointer);
3114 		ccb->ccb_state = ARCMSR_CCB_UNBUILD;
3115 	} else {
3116 		ccb = NULL;
3117 	}
3118 	mutex_exit(&acb->workingQ_mutex);
3119 	return (ccb);
3120 }
3121 
3122 
3123 static void
3124 arcmsr_free_ccb(struct CCB *ccb)
3125 {
3126 	struct ACB *acb = ccb->acb;
3127 
3128 	if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3129 		return;
3130 	}
3131 	mutex_enter(&acb->workingQ_mutex);
3132 	ccb->ccb_state = ARCMSR_CCB_FREE;
3133 	ccb->pkt = NULL;
3134 	ccb->pkt_dma_handle = NULL;
3135 	ccb->ccb_flags = 0;
3136 	acb->ccbworkingQ[acb->ccb_put_index] = ccb;
3137 	acb->ccb_put_index++;
3138 	if (acb->ccb_put_index >= ARCMSR_MAX_FREECCB_NUM)
3139 		acb->ccb_put_index =
3140 		    acb->ccb_put_index - ARCMSR_MAX_FREECCB_NUM;
3141 	mutex_exit(&acb->workingQ_mutex);
3142 }
3143 
3144 
3145 static void
3146 arcmsr_ccbs_timeout(void* arg)
3147 {
3148 	struct ACB *acb = (struct ACB *)arg;
3149 	struct CCB *ccb;
3150 	int i, instance, timeout_count = 0;
3151 	uint32_t intmask_org;
3152 	time_t current_time = ddi_get_time();
3153 
3154 	intmask_org = arcmsr_disable_allintr(acb);
3155 	mutex_enter(&acb->isr_mutex);
3156 	if (acb->ccboutstandingcount != 0) {
3157 		/* check each ccb */
3158 		i = ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
3159 		    DDI_DMA_SYNC_FORKERNEL);
3160 		if (i != DDI_SUCCESS) {
3161 			if ((acb->timeout_id != 0) &&
3162 			    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3163 				/* do pkt timeout check each 60 secs */
3164 				acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3165 				    (void*)acb, (ARCMSR_TIMEOUT_WATCH *
3166 				    drv_usectohz(1000000)));
3167 			}
3168 			mutex_exit(&acb->isr_mutex);
3169 			arcmsr_enable_allintr(acb, intmask_org);
3170 			return;
3171 		}
3172 		instance = ddi_get_instance(acb->dev_info);
3173 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3174 			ccb = acb->pccb_pool[i];
3175 			if (ccb->acb != acb) {
3176 				break;
3177 			}
3178 			if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3179 				continue;
3180 			}
3181 			if (ccb->pkt == NULL) {
3182 				continue;
3183 			}
3184 			if (ccb->pkt->pkt_time == 0) {
3185 				continue;
3186 			}
3187 			if (ccb->ccb_time >= current_time) {
3188 				continue;
3189 			}
3190 			int id = ccb->pkt->pkt_address.a_target;
3191 			int lun = ccb->pkt->pkt_address.a_lun;
3192 			if (ccb->ccb_state == ARCMSR_CCB_START) {
3193 				uint8_t	*cdb = (uint8_t	*)&ccb->arcmsr_cdb.Cdb;
3194 
3195 				timeout_count++;
3196 				arcmsr_warn(acb,
3197 				    "scsi target %d lun %d cmd=0x%x "
3198 				    "command timeout, ccb=0x%p",
3199 				    instance, id, lun, *cdb, (void *)ccb);
3200 				ccb->ccb_state = ARCMSR_CCB_TIMEOUT;
3201 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
3202 				ccb->pkt->pkt_statistics = STAT_TIMEOUT;
3203 				/* acb->devstate[id][lun] = ARECA_RAID_GONE; */
3204 				arcmsr_ccb_complete(ccb, 1);
3205 				continue;
3206 			} else if ((ccb->ccb_state & ARCMSR_CCB_CAN_BE_FREE) ==
3207 			    ARCMSR_CCB_CAN_BE_FREE) {
3208 				arcmsr_free_ccb(ccb);
3209 			}
3210 		}
3211 	}
3212 	if ((acb->timeout_id != 0) &&
3213 	    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3214 		/* do pkt timeout check each 60 secs */
3215 		acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3216 		    (void*)acb, (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
3217 	}
3218 	mutex_exit(&acb->isr_mutex);
3219 	arcmsr_enable_allintr(acb, intmask_org);
3220 }
3221 
3222 static void
3223 arcmsr_abort_dr_ccbs(struct ACB *acb, uint16_t target, uint8_t lun)
3224 {
3225 	struct CCB *ccb;
3226 	uint32_t intmask_org;
3227 	int i;
3228 
3229 	/* disable all outbound interrupts */
3230 	intmask_org = arcmsr_disable_allintr(acb);
3231 	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3232 		ccb = acb->pccb_pool[i];
3233 		if (ccb->ccb_state == ARCMSR_CCB_START) {
3234 			if ((target == ccb->pkt->pkt_address.a_target) &&
3235 			    (lun == ccb->pkt->pkt_address.a_lun)) {
3236 				ccb->ccb_state = ARCMSR_CCB_ABORTED;
3237 				ccb->pkt->pkt_reason = CMD_ABORTED;
3238 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3239 				arcmsr_ccb_complete(ccb, 1);
3240 				arcmsr_log(acb, CE_NOTE,
3241 				    "abort T%dL%d ccb", target, lun);
3242 			}
3243 		}
3244 	}
3245 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3246 	arcmsr_enable_allintr(acb, intmask_org);
3247 }
3248 
3249 static int
3250 arcmsr_scsi_device_probe(struct ACB *acb, uint16_t tgt, uint8_t lun)
3251 {
3252 	struct scsi_device sd;
3253 	dev_info_t *child;
3254 	int rval;
3255 
3256 	bzero(&sd, sizeof (struct scsi_device));
3257 	sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
3258 	sd.sd_address.a_target = (uint16_t)tgt;
3259 	sd.sd_address.a_lun = (uint8_t)lun;
3260 	if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
3261 		rval = scsi_hba_probe(&sd, NULL);
3262 		if (rval == SCSIPROBE_EXISTS) {
3263 			rval = ndi_devi_online(child, NDI_ONLINE_ATTACH);
3264 			if (rval != NDI_SUCCESS) {
3265 				arcmsr_warn(acb, "unable to online T%dL%d",
3266 				    tgt, lun);
3267 			} else {
3268 				arcmsr_log(acb, CE_NOTE, "T%dL%d onlined",
3269 				    tgt, lun);
3270 			}
3271 		}
3272 	} else {
3273 		rval = scsi_hba_probe(&sd, NULL);
3274 		if (rval == SCSIPROBE_EXISTS)
3275 			rval = arcmsr_config_child(acb, &sd, NULL);
3276 	}
3277 	scsi_unprobe(&sd);
3278 	return (rval);
3279 }
3280 
3281 static void
3282 arcmsr_dr_handle(struct ACB *acb)
3283 {
3284 	char *acb_dev_map = (char *)acb->device_map;
3285 	char *devicemap;
3286 	char temp;
3287 	uint16_t target;
3288 	uint8_t lun;
3289 	char diff;
3290 	dev_info_t *dip;
3291 	ddi_acc_handle_t reg;
3292 
3293 	switch (acb->adapter_type) {
3294 	case ACB_ADAPTER_TYPE_A:
3295 	{
3296 		struct HBA_msgUnit *phbamu;
3297 
3298 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3299 		devicemap = (char *)&phbamu->msgcode_rwbuffer[21];
3300 		reg = acb->reg_mu_acc_handle0;
3301 		break;
3302 	}
3303 
3304 	case ACB_ADAPTER_TYPE_B:
3305 	{
3306 		struct HBB_msgUnit *phbbmu;
3307 
3308 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3309 		devicemap = (char *)
3310 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[21];
3311 		reg = acb->reg_mu_acc_handle1;
3312 		break;
3313 	}
3314 
3315 	case ACB_ADAPTER_TYPE_C:
3316 	{
3317 		struct HBC_msgUnit *phbcmu;
3318 
3319 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3320 		devicemap = (char *)&phbcmu->msgcode_rwbuffer[21];
3321 		reg = acb->reg_mu_acc_handle0;
3322 		break;
3323 	}
3324 
3325 	}
3326 
3327 	for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
3328 		temp = CHIP_REG_READ8(reg, devicemap);
3329 		diff = (*acb_dev_map)^ temp;
3330 		if (diff != 0) {
3331 			*acb_dev_map = temp;
3332 			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
3333 				if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
3334 					ndi_devi_enter(acb->dev_info);
3335 					acb->devstate[target][lun] =
3336 					    ARECA_RAID_GOOD;
3337 					(void) arcmsr_scsi_device_probe(acb,
3338 					    target, lun);
3339 					ndi_devi_exit(acb->dev_info);
3340 					arcmsr_log(acb, CE_NOTE,
3341 					    "T%dL%d on-line", target, lun);
3342 				} else if ((temp & 0x01) == 0 &&
3343 				    (diff & 0x01) == 1) {
3344 					dip = arcmsr_find_child(acb, target,
3345 					    lun);
3346 					if (dip != NULL) {
3347 						acb->devstate[target][lun] =
3348 						    ARECA_RAID_GONE;
3349 						if (mutex_owned(&acb->
3350 						    isr_mutex)) {
3351 							arcmsr_abort_dr_ccbs(
3352 							    acb, target, lun);
3353 							(void)
3354 							    ndi_devi_offline(
3355 							    dip,
3356 							    NDI_DEVI_REMOVE |
3357 							    NDI_DEVI_OFFLINE);
3358 						} else {
3359 							mutex_enter(&acb->
3360 							    isr_mutex);
3361 							arcmsr_abort_dr_ccbs(
3362 							    acb, target, lun);
3363 							(void)
3364 							    ndi_devi_offline(
3365 							    dip,
3366 							    NDI_DEVI_REMOVE |
3367 							    NDI_DEVI_OFFLINE);
3368 							mutex_exit(&acb->
3369 							    isr_mutex);
3370 						}
3371 					}
3372 					arcmsr_log(acb, CE_NOTE,
3373 					    "T%dL%d off-line", target, lun);
3374 				}
3375 				temp >>= 1;
3376 				diff >>= 1;
3377 			}
3378 		}
3379 		devicemap++;
3380 		acb_dev_map++;
3381 	}
3382 }
3383 
3384 
3385 static void
3386 arcmsr_devMap_monitor(void* arg)
3387 {
3388 
3389 	struct ACB *acb = (struct ACB *)arg;
3390 	switch (acb->adapter_type) {
3391 	case ACB_ADAPTER_TYPE_A:
3392 	{
3393 		struct HBA_msgUnit *phbamu;
3394 
3395 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3396 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3397 		    &phbamu->inbound_msgaddr0,
3398 		    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3399 		break;
3400 	}
3401 
3402 	case ACB_ADAPTER_TYPE_B:
3403 	{
3404 		struct HBB_msgUnit *phbbmu;
3405 
3406 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3407 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3408 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3409 		    ARCMSR_MESSAGE_GET_CONFIG);
3410 		break;
3411 	}
3412 
3413 	case ACB_ADAPTER_TYPE_C:
3414 	{
3415 		struct HBC_msgUnit *phbcmu;
3416 
3417 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3418 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3419 		    &phbcmu->inbound_msgaddr0,
3420 		    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3421 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3422 		    &phbcmu->inbound_doorbell,
3423 		    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3424 		break;
3425 	}
3426 
3427 	}
3428 
3429 	if ((acb->timeout_id != 0) &&
3430 	    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3431 		/* do pkt timeout check each 5 secs */
3432 		acb->timeout_id = timeout(arcmsr_devMap_monitor, (void*)acb,
3433 		    (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
3434 	}
3435 }
3436 
3437 
3438 static uint32_t
3439 arcmsr_disable_allintr(struct ACB *acb) {
3440 
3441 	uint32_t intmask_org;
3442 
3443 	switch (acb->adapter_type) {
3444 	case ACB_ADAPTER_TYPE_A:
3445 	{
3446 		struct HBA_msgUnit *phbamu;
3447 
3448 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3449 		/* disable all outbound interrupt */
3450 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3451 		    &phbamu->outbound_intmask);
3452 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3453 		    &phbamu->outbound_intmask,
3454 		    intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
3455 		break;
3456 	}
3457 
3458 	case ACB_ADAPTER_TYPE_B:
3459 	{
3460 		struct HBB_msgUnit *phbbmu;
3461 
3462 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3463 		/* disable all outbound interrupt */
3464 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3465 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask);
3466 		/* disable all interrupts */
3467 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3468 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
3469 		break;
3470 	}
3471 
3472 	case ACB_ADAPTER_TYPE_C:
3473 	{
3474 		struct HBC_msgUnit *phbcmu;
3475 
3476 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3477 		/* disable all outbound interrupt */
3478 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3479 		    &phbcmu->host_int_mask); /* disable outbound message0 int */
3480 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3481 		    &phbcmu->host_int_mask,
3482 		    intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
3483 		break;
3484 	}
3485 
3486 	}
3487 	return (intmask_org);
3488 }
3489 
3490 
3491 static void
3492 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org) {
3493 
3494 	int mask;
3495 
3496 	switch (acb->adapter_type) {
3497 	case ACB_ADAPTER_TYPE_A:
3498 	{
3499 		struct HBA_msgUnit *phbamu;
3500 
3501 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3502 		/*
3503 		 * enable outbound Post Queue, outbound doorbell message0
3504 		 * Interrupt
3505 		 */
3506 		mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
3507 		    ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE |
3508 		    ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
3509 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3510 		    &phbamu->outbound_intmask, intmask_org & mask);
3511 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
3512 		break;
3513 	}
3514 
3515 	case ACB_ADAPTER_TYPE_B:
3516 	{
3517 		struct HBB_msgUnit *phbbmu;
3518 
3519 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3520 		mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
3521 		    ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE |
3522 		    ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
3523 		/* 1=interrupt enable, 0=interrupt disable */
3524 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3525 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
3526 		    intmask_org | mask);
3527 		acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
3528 		break;
3529 	}
3530 
3531 	case ACB_ADAPTER_TYPE_C:
3532 	{
3533 		struct HBC_msgUnit *phbcmu;
3534 
3535 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3536 		/* enable outbound Post Queue,outbound doorbell Interrupt */
3537 		mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK |
3538 		    ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK |
3539 		    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
3540 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3541 		    &phbcmu->host_int_mask, intmask_org & mask);
3542 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
3543 		break;
3544 	}
3545 
3546 	}
3547 }
3548 
3549 
3550 static void
3551 arcmsr_iop_parking(struct ACB *acb)
3552 {
3553 	/* stop adapter background rebuild */
3554 	if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
3555 		uint32_t intmask_org;
3556 
3557 		acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
3558 		/* disable all outbound interrupt */
3559 		intmask_org = arcmsr_disable_allintr(acb);
3560 		switch (acb->adapter_type) {
3561 		case ACB_ADAPTER_TYPE_A:
3562 			arcmsr_stop_hba_bgrb(acb);
3563 			arcmsr_flush_hba_cache(acb);
3564 			break;
3565 
3566 		case ACB_ADAPTER_TYPE_B:
3567 			arcmsr_stop_hbb_bgrb(acb);
3568 			arcmsr_flush_hbb_cache(acb);
3569 			break;
3570 
3571 		case ACB_ADAPTER_TYPE_C:
3572 			arcmsr_stop_hbc_bgrb(acb);
3573 			arcmsr_flush_hbc_cache(acb);
3574 			break;
3575 		}
3576 		/*
3577 		 * enable outbound Post Queue
3578 		 * enable outbound doorbell Interrupt
3579 		 */
3580 		arcmsr_enable_allintr(acb, intmask_org);
3581 	}
3582 }
3583 
3584 
3585 static uint8_t
3586 arcmsr_hba_wait_msgint_ready(struct ACB *acb)
3587 {
3588 	uint32_t i;
3589 	uint8_t retries = 0x00;
3590 	struct HBA_msgUnit *phbamu;
3591 
3592 
3593 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3594 
3595 	do {
3596 		for (i = 0; i < 100; i++) {
3597 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3598 			    &phbamu->outbound_intstatus) &
3599 			    ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
3600 				/* clear interrupt */
3601 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3602 				    &phbamu->outbound_intstatus,
3603 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3604 				return (TRUE);
3605 			}
3606 			drv_usecwait(10000);
3607 			if (ddi_in_panic()) {
3608 				/* clear interrupts */
3609 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3610 				    &phbamu->outbound_intstatus,
3611 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3612 				return (TRUE);
3613 			}
3614 		} /* max 1 second */
3615 	} while (retries++ < 20); /* max 20 seconds */
3616 	return (FALSE);
3617 }
3618 
3619 
3620 static uint8_t
3621 arcmsr_hbb_wait_msgint_ready(struct ACB *acb)
3622 {
3623 	struct HBB_msgUnit *phbbmu;
3624 	uint32_t i;
3625 	uint8_t retries = 0x00;
3626 
3627 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3628 
3629 	do {
3630 		for (i = 0; i < 100; i++) {
3631 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3632 			    &phbbmu->hbb_doorbell->iop2drv_doorbell) &
3633 			    ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
3634 				/* clear interrupt */
3635 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3636 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3637 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3638 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3639 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3640 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3641 				return (TRUE);
3642 			}
3643 			drv_usecwait(10000);
3644 			if (ddi_in_panic()) {
3645 				/* clear interrupts */
3646 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3647 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3648 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3649 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3650 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3651 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3652 				return (TRUE);
3653 			}
3654 		} /* max 1 second */
3655 	} while (retries++ < 20); /* max 20 seconds */
3656 
3657 	return (FALSE);
3658 }
3659 
3660 
3661 static uint8_t
3662 arcmsr_hbc_wait_msgint_ready(struct ACB *acb)
3663 {
3664 	uint32_t i;
3665 	uint8_t retries = 0x00;
3666 	struct HBC_msgUnit *phbcmu;
3667 	uint32_t c = ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR;
3668 
3669 
3670 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
3671 
3672 	do {
3673 		for (i = 0; i < 100; i++) {
3674 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3675 			    &phbcmu->outbound_doorbell) &
3676 			    ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
3677 				/* clear interrupt */
3678 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3679 				    &phbcmu->outbound_doorbell_clear, c);
3680 				return (TRUE);
3681 			}
3682 			drv_usecwait(10000);
3683 			if (ddi_in_panic()) {
3684 				/* clear interrupts */
3685 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3686 				    &phbcmu->outbound_doorbell_clear, c);
3687 				return (TRUE);
3688 			}
3689 		} /* max 1 second */
3690 	} while (retries++ < 20); /* max 20 seconds */
3691 	return (FALSE);
3692 }
3693 
3694 static void
3695 arcmsr_flush_hba_cache(struct ACB *acb) {
3696 
3697 	struct HBA_msgUnit *phbamu;
3698 	int retry_count = 30;
3699 
3700 	/* enlarge wait flush adapter cache time: 10 minutes */
3701 
3702 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3703 
3704 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3705 	    ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3706 	do {
3707 		if (arcmsr_hba_wait_msgint_ready(acb)) {
3708 			break;
3709 		} else {
3710 			retry_count--;
3711 		}
3712 	} while (retry_count != 0);
3713 }
3714 
3715 
3716 
3717 static void
3718 arcmsr_flush_hbb_cache(struct ACB *acb) {
3719 
3720 	struct HBB_msgUnit *phbbmu;
3721 	int retry_count = 30;
3722 
3723 	/* enlarge wait flush adapter cache time: 10 minutes */
3724 
3725 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3726 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3727 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3728 	    ARCMSR_MESSAGE_FLUSH_CACHE);
3729 	do {
3730 		if (arcmsr_hbb_wait_msgint_ready(acb)) {
3731 			break;
3732 		} else {
3733 			retry_count--;
3734 		}
3735 	} while (retry_count != 0);
3736 }
3737 
3738 
3739 static void
3740 arcmsr_flush_hbc_cache(struct ACB *acb)
3741 {
3742 	struct HBC_msgUnit *phbcmu;
3743 	int retry_count = 30;
3744 
3745 	/* enlarge wait flush adapter cache time: 10 minutes */
3746 
3747 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
3748 
3749 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3750 	    ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3751 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3752 	    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3753 	do {
3754 		if (arcmsr_hbc_wait_msgint_ready(acb)) {
3755 			break;
3756 		} else {
3757 			retry_count--;
3758 		}
3759 	} while (retry_count != 0);
3760 }
3761 
3762 
3763 
3764 static uint8_t
3765 arcmsr_abort_hba_allcmd(struct ACB *acb)
3766 {
3767 	struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
3768 
3769 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3770 	    ARCMSR_INBOUND_MESG0_ABORT_CMD);
3771 
3772 	if (!arcmsr_hba_wait_msgint_ready(acb)) {
3773 		arcmsr_warn(acb,
3774 		    "timeout while waiting for 'abort all "
3775 		    "outstanding commands'");
3776 		return (0xff);
3777 	}
3778 	return (0x00);
3779 }
3780 
3781 
3782 
3783 static uint8_t
3784 arcmsr_abort_hbb_allcmd(struct ACB *acb)
3785 {
3786 	struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
3787 
3788 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3789 	    &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
3790 
3791 	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
3792 		arcmsr_warn(acb,
3793 		    "timeout while waiting for 'abort all "
3794 		    "outstanding commands'");
3795 		return (0x00);
3796 	}
3797 	return (0x00);
3798 }
3799 
3800 
3801 static uint8_t
3802 arcmsr_abort_hbc_allcmd(struct ACB *acb)
3803 {
3804 	struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
3805 
3806 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3807 	    ARCMSR_INBOUND_MESG0_ABORT_CMD);
3808 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3809 	    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3810 
3811 	if (!arcmsr_hbc_wait_msgint_ready(acb)) {
3812 		arcmsr_warn(acb,
3813 		    "timeout while waiting for 'abort all "
3814 		    "outstanding commands'");
3815 		return (0xff);
3816 	}
3817 	return (0x00);
3818 }
3819 
3820 
3821 static void
3822 arcmsr_done4abort_postqueue(struct ACB *acb)
3823 {
3824 
3825 	struct CCB *ccb;
3826 	uint32_t flag_ccb;
3827 	int i = 0;
3828 	boolean_t error;
3829 
3830 	switch (acb->adapter_type) {
3831 	case ACB_ADAPTER_TYPE_A:
3832 	{
3833 		struct HBA_msgUnit *phbamu;
3834 		uint32_t outbound_intstatus;
3835 
3836 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3837 		/* clear and abort all outbound posted Q */
3838 		outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3839 		    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3840 		/* clear interrupt */
3841 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3842 		    &phbamu->outbound_intstatus, outbound_intstatus);
3843 		while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3844 		    &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
3845 		    (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3846 			/* frame must be 32 bytes aligned */
3847 			/* the CDB is the first field of the CCB */
3848 			ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
3849 			/* check if command done with no error */
3850 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3851 			    B_TRUE : B_FALSE;
3852 			arcmsr_drain_donequeue(acb, ccb, error);
3853 		}
3854 		break;
3855 	}
3856 
3857 	case ACB_ADAPTER_TYPE_B:
3858 	{
3859 		struct HBB_msgUnit *phbbmu;
3860 
3861 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3862 		/* clear all outbound posted Q */
3863 		/* clear doorbell interrupt */
3864 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3865 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3866 		    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3867 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
3868 			if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
3869 				phbbmu->done_qbuffer[i] = 0;
3870 				/* frame must be 32 bytes aligned */
3871 				ccb = NumToPtr((acb->vir2phy_offset +
3872 				    (flag_ccb << 5)));
3873 				/* check if command done with no error */
3874 				error =
3875 				    (flag_ccb &
3876 				    ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3877 				    B_TRUE : B_FALSE;
3878 				arcmsr_drain_donequeue(acb, ccb, error);
3879 			}
3880 			phbbmu->post_qbuffer[i] = 0;
3881 		}	/* drain reply FIFO */
3882 		phbbmu->doneq_index = 0;
3883 		phbbmu->postq_index = 0;
3884 		break;
3885 	}
3886 
3887 	case ACB_ADAPTER_TYPE_C:
3888 	{
3889 		struct HBC_msgUnit *phbcmu;
3890 		uint32_t ccb_cdb_phy;
3891 
3892 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3893 		while ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3894 		    &phbcmu->host_int_status) &
3895 		    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) &&
3896 		    (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3897 			/* need to do */
3898 			flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3899 			    &phbcmu->outbound_queueport_low);
3900 			/* frame must be 32 bytes aligned */
3901 			ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3902 			ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
3903 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)?
3904 			    B_TRUE : B_FALSE;
3905 			arcmsr_drain_donequeue(acb, ccb, error);
3906 		}
3907 		break;
3908 	}
3909 
3910 	}
3911 }
3912 /*
3913  * Routine Description: try to get echo from iop.
3914  *           Arguments:
3915  *        Return Value: Nothing.
3916  */
3917 static uint8_t
3918 arcmsr_get_echo_from_iop(struct ACB *acb)
3919 {
3920 	uint32_t intmask_org;
3921 	uint8_t rtnval = 0;
3922 
3923 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3924 		struct HBA_msgUnit *phbamu;
3925 
3926 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3927 		intmask_org = arcmsr_disable_allintr(acb);
3928 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3929 		    &phbamu->inbound_msgaddr0,
3930 		    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3931 		if (!arcmsr_hba_wait_msgint_ready(acb)) {
3932 			arcmsr_warn(acb, "try to get echo from iop,"
3933 			    "... timeout ...");
3934 			acb->acb_flags |= ACB_F_BUS_HANG_ON;
3935 			rtnval = 0xFF;
3936 		}
3937 		/* enable all outbound interrupt */
3938 		arcmsr_enable_allintr(acb, intmask_org);
3939 	}
3940 	return (rtnval);
3941 }
3942 
3943 /*
3944  * Routine Description: Reset 80331 iop.
3945  *           Arguments:
3946  *        Return Value: Nothing.
3947  */
3948 static uint8_t
3949 arcmsr_iop_reset(struct ACB *acb)
3950 {
3951 	struct CCB *ccb;
3952 	uint32_t intmask_org;
3953 	uint8_t rtnval = 0;
3954 	int i = 0;
3955 
3956 	if (acb->ccboutstandingcount > 0) {
3957 		/* disable all outbound interrupt */
3958 		intmask_org = arcmsr_disable_allintr(acb);
3959 		/* clear and abort all outbound posted Q */
3960 		arcmsr_done4abort_postqueue(acb);
3961 		/* talk to iop 331 outstanding command aborted */
3962 		rtnval = (acb->acb_flags & ACB_F_BUS_HANG_ON) ?
3963 		    0xFF : arcmsr_abort_host_command(acb);
3964 
3965 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3966 			ccb = acb->pccb_pool[i];
3967 			if (ccb->ccb_state == ARCMSR_CCB_START) {
3968 				/* ccb->ccb_state = ARCMSR_CCB_RESET; */
3969 				ccb->pkt->pkt_reason = CMD_RESET;
3970 				ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
3971 				arcmsr_ccb_complete(ccb, 1);
3972 			}
3973 		}
3974 		atomic_and_32(&acb->ccboutstandingcount, 0);
3975 		/* enable all outbound interrupt */
3976 		arcmsr_enable_allintr(acb, intmask_org);
3977 	} else {
3978 		rtnval = arcmsr_get_echo_from_iop(acb);
3979 	}
3980 	return (rtnval);
3981 }
3982 
3983 
3984 static struct QBUFFER *
3985 arcmsr_get_iop_rqbuffer(struct ACB *acb)
3986 {
3987 	struct QBUFFER *qb;
3988 
3989 	switch (acb->adapter_type) {
3990 	case ACB_ADAPTER_TYPE_A:
3991 	{
3992 		struct HBA_msgUnit *phbamu;
3993 
3994 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3995 		qb = (struct QBUFFER *)&phbamu->message_rbuffer;
3996 		break;
3997 	}
3998 
3999 	case ACB_ADAPTER_TYPE_B:
4000 	{
4001 		struct HBB_msgUnit *phbbmu;
4002 
4003 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4004 		qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
4005 		break;
4006 	}
4007 
4008 	case ACB_ADAPTER_TYPE_C:
4009 	{
4010 		struct HBC_msgUnit *phbcmu;
4011 
4012 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4013 		qb = (struct QBUFFER *)&phbcmu->message_rbuffer;
4014 		break;
4015 	}
4016 
4017 	}
4018 	return (qb);
4019 }
4020 
4021 
4022 static struct QBUFFER *
4023 arcmsr_get_iop_wqbuffer(struct ACB *acb)
4024 {
4025 	struct QBUFFER *qbuffer = NULL;
4026 
4027 	switch (acb->adapter_type) {
4028 	case ACB_ADAPTER_TYPE_A:
4029 	{
4030 		struct HBA_msgUnit *phbamu;
4031 
4032 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4033 		qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
4034 		break;
4035 	}
4036 
4037 	case ACB_ADAPTER_TYPE_B:
4038 	{
4039 		struct HBB_msgUnit *phbbmu;
4040 
4041 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4042 		qbuffer = (struct QBUFFER *)
4043 		    &phbbmu->hbb_rwbuffer->message_wbuffer;
4044 		break;
4045 	}
4046 
4047 	case ACB_ADAPTER_TYPE_C:
4048 	{
4049 		struct HBC_msgUnit *phbcmu;
4050 
4051 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4052 		qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
4053 		break;
4054 	}
4055 
4056 	}
4057 	return (qbuffer);
4058 }
4059 
4060 
4061 
4062 static void
4063 arcmsr_iop_message_read(struct ACB *acb)
4064 {
4065 	switch (acb->adapter_type) {
4066 	case ACB_ADAPTER_TYPE_A:
4067 	{
4068 		struct HBA_msgUnit *phbamu;
4069 
4070 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4071 		/* let IOP know the data has been read */
4072 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4073 		    &phbamu->inbound_doorbell,
4074 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4075 		break;
4076 	}
4077 
4078 	case ACB_ADAPTER_TYPE_B:
4079 	{
4080 		struct HBB_msgUnit *phbbmu;
4081 
4082 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4083 		/* let IOP know the data has been read */
4084 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4085 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4086 		    ARCMSR_DRV2IOP_DATA_READ_OK);
4087 		break;
4088 	}
4089 
4090 	case ACB_ADAPTER_TYPE_C:
4091 	{
4092 		struct HBC_msgUnit *phbcmu;
4093 
4094 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4095 		/* let IOP know data has been read */
4096 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4097 		    &phbcmu->inbound_doorbell,
4098 		    ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
4099 		break;
4100 	}
4101 
4102 	}
4103 }
4104 
4105 
4106 
4107 static void
4108 arcmsr_iop_message_wrote(struct ACB *acb)
4109 {
4110 	switch (acb->adapter_type) {
4111 	case ACB_ADAPTER_TYPE_A: {
4112 		struct HBA_msgUnit *phbamu;
4113 
4114 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4115 		/*
4116 		 * push inbound doorbell tell iop, driver data write ok
4117 		 * and wait reply on next hwinterrupt for next Qbuffer post
4118 		 */
4119 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4120 		    &phbamu->inbound_doorbell,
4121 		    ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
4122 		break;
4123 	}
4124 
4125 	case ACB_ADAPTER_TYPE_B:
4126 	{
4127 		struct HBB_msgUnit *phbbmu;
4128 
4129 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4130 		/*
4131 		 * push inbound doorbell tell iop, driver data was writen
4132 		 * successfully, then await reply on next hwinterrupt for
4133 		 * next Qbuffer post
4134 		 */
4135 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4136 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4137 		    ARCMSR_DRV2IOP_DATA_WRITE_OK);
4138 		break;
4139 	}
4140 
4141 	case ACB_ADAPTER_TYPE_C:
4142 	{
4143 		struct HBC_msgUnit *phbcmu;
4144 
4145 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4146 		/*
4147 		 * push inbound doorbell tell iop, driver data write ok
4148 		 * and wait reply on next hwinterrupt for next Qbuffer post
4149 		 */
4150 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4151 		    &phbcmu->inbound_doorbell,
4152 		    ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
4153 		break;
4154 	}
4155 
4156 	}
4157 }
4158 
4159 
4160 
4161 static void
4162 arcmsr_post_ioctldata2iop(struct ACB *acb)
4163 {
4164 	uint8_t *pQbuffer;
4165 	struct QBUFFER *pwbuffer;
4166 	uint8_t *iop_data;
4167 	int32_t allxfer_len = 0;
4168 
4169 	pwbuffer = arcmsr_get_iop_wqbuffer(acb);
4170 	iop_data = (uint8_t *)pwbuffer->data;
4171 	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
4172 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
4173 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
4174 		    (allxfer_len < 124)) {
4175 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
4176 			(void) memcpy(iop_data, pQbuffer, 1);
4177 			acb->wqbuf_firstidx++;
4178 			/* if last index number set it to 0 */
4179 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4180 			iop_data++;
4181 			allxfer_len++;
4182 		}
4183 		pwbuffer->data_len = allxfer_len;
4184 		/*
4185 		 * push inbound doorbell and wait reply at hwinterrupt
4186 		 * routine for next Qbuffer post
4187 		 */
4188 		arcmsr_iop_message_wrote(acb);
4189 	}
4190 }
4191 
4192 
4193 
4194 static void
4195 arcmsr_stop_hba_bgrb(struct ACB *acb)
4196 {
4197 	struct HBA_msgUnit *phbamu;
4198 
4199 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4200 
4201 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4202 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4203 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4204 	if (!arcmsr_hba_wait_msgint_ready(acb))
4205 		arcmsr_warn(acb,
4206 		    "timeout while waiting for background rebuild completion");
4207 }
4208 
4209 
4210 static void
4211 arcmsr_stop_hbb_bgrb(struct ACB *acb)
4212 {
4213 	struct HBB_msgUnit *phbbmu;
4214 
4215 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4216 
4217 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4218 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4219 	    &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
4220 
4221 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4222 		arcmsr_warn(acb,
4223 		    "timeout while waiting for background rebuild completion");
4224 }
4225 
4226 
4227 static void
4228 arcmsr_stop_hbc_bgrb(struct ACB *acb)
4229 {
4230 	struct HBC_msgUnit *phbcmu;
4231 
4232 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
4233 
4234 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4235 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4236 	    &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4237 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4238 	    &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4239 	if (!arcmsr_hbc_wait_msgint_ready(acb))
4240 		arcmsr_warn(acb,
4241 		    "timeout while waiting for background rebuild completion");
4242 }
4243 
4244 
4245 static int
4246 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt)
4247 {
4248 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
4249 	struct CCB *ccb = pkt->pkt_ha_private;
4250 	struct buf *bp = ccb->bp;
4251 	uint8_t *pQbuffer;
4252 	int retvalue = 0, transfer_len = 0;
4253 	char *buffer;
4254 	uint32_t controlcode;
4255 
4256 
4257 	/* 4 bytes: Areca io control code */
4258 	controlcode =
4259 	    (uint32_t)pkt->pkt_cdbp[5] << 24 |
4260 	    (uint32_t)pkt->pkt_cdbp[6] << 16 |
4261 	    (uint32_t)pkt->pkt_cdbp[7] << 8 |
4262 	    (uint32_t)pkt->pkt_cdbp[8];
4263 
4264 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
4265 		bp_mapin(bp);
4266 
4267 	buffer = bp->b_un.b_addr;
4268 	transfer_len = bp->b_bcount;
4269 	if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
4270 		retvalue = ARCMSR_MESSAGE_FAIL;
4271 		goto message_out;
4272 	}
4273 
4274 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
4275 	switch (controlcode) {
4276 	case ARCMSR_MESSAGE_READ_RQBUFFER:
4277 	{
4278 		unsigned long *ver_addr;
4279 		uint8_t *ptmpQbuffer;
4280 		int32_t allxfer_len = 0;
4281 
4282 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4283 
4284 		ptmpQbuffer = (uint8_t *)ver_addr;
4285 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
4286 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
4287 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
4288 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
4289 			acb->rqbuf_firstidx++;
4290 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4291 			ptmpQbuffer++;
4292 			allxfer_len++;
4293 		}
4294 
4295 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4296 			struct QBUFFER *prbuffer;
4297 			uint8_t  *iop_data;
4298 			int32_t iop_len;
4299 
4300 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4301 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
4302 			iop_data = (uint8_t *)prbuffer->data;
4303 			iop_len = (int32_t)prbuffer->data_len;
4304 
4305 			while (iop_len > 0) {
4306 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
4307 				(void) memcpy(pQbuffer, iop_data, 1);
4308 				acb->rqbuf_lastidx++;
4309 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
4310 				iop_data++;
4311 				iop_len--;
4312 			}
4313 			arcmsr_iop_message_read(acb);
4314 		}
4315 
4316 		(void) memcpy(pcmdmessagefld->messagedatabuffer,
4317 		    (uint8_t *)ver_addr, allxfer_len);
4318 		pcmdmessagefld->cmdmessage.Length = allxfer_len;
4319 		pcmdmessagefld->cmdmessage.ReturnCode =
4320 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4321 		kmem_free(ver_addr, MSGDATABUFLEN);
4322 		break;
4323 	}
4324 
4325 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
4326 	{
4327 		uint8_t *ver_addr;
4328 		int32_t my_empty_len, user_len, wqbuf_firstidx,
4329 		    wqbuf_lastidx;
4330 		uint8_t *ptmpuserbuffer;
4331 
4332 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4333 
4334 		ptmpuserbuffer = ver_addr;
4335 		user_len = min(pcmdmessagefld->cmdmessage.Length,
4336 		    MSGDATABUFLEN);
4337 		(void) memcpy(ptmpuserbuffer,
4338 		    pcmdmessagefld->messagedatabuffer, user_len);
4339 		wqbuf_lastidx = acb->wqbuf_lastidx;
4340 		wqbuf_firstidx = acb->wqbuf_firstidx;
4341 		if (wqbuf_lastidx != wqbuf_firstidx) {
4342 			struct scsi_arq_status *arq_status;
4343 
4344 			arcmsr_post_ioctldata2iop(acb);
4345 			arq_status = (struct scsi_arq_status *)
4346 			    (intptr_t)(pkt->pkt_scbp);
4347 			bzero((caddr_t)arq_status,
4348 			    sizeof (struct scsi_arq_status));
4349 			arq_status->sts_rqpkt_reason = CMD_CMPLT;
4350 			arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
4351 			    STATE_GOT_TARGET | STATE_SENT_CMD |
4352 			    STATE_XFERRED_DATA | STATE_GOT_STATUS);
4353 
4354 			arq_status->sts_rqpkt_statistics =
4355 			    pkt->pkt_statistics;
4356 			arq_status->sts_rqpkt_resid = 0;
4357 			if (&arq_status->sts_sensedata != NULL) {
4358 				struct scsi_extended_sense *sts_sensedata;
4359 
4360 				sts_sensedata = &arq_status->sts_sensedata;
4361 
4362 				/* has error report sensedata */
4363 				sts_sensedata->es_code = 0x0;
4364 				sts_sensedata->es_valid = 0x01;
4365 				sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
4366 				/* AdditionalSenseLength */
4367 				sts_sensedata->es_add_len = 0x0A;
4368 				/* AdditionalSenseCode */
4369 				sts_sensedata->es_add_code = 0x20;
4370 			}
4371 			retvalue = ARCMSR_MESSAGE_FAIL;
4372 		} else {
4373 			my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
4374 			    (ARCMSR_MAX_QBUFFER - 1);
4375 			if (my_empty_len >= user_len) {
4376 				while (user_len > 0) {
4377 					pQbuffer = &acb->wqbuffer[
4378 					    acb->wqbuf_lastidx];
4379 					(void) memcpy(pQbuffer,
4380 					    ptmpuserbuffer, 1);
4381 					acb->wqbuf_lastidx++;
4382 					acb->wqbuf_lastidx %=
4383 					    ARCMSR_MAX_QBUFFER;
4384 					ptmpuserbuffer++;
4385 					user_len--;
4386 				}
4387 				if (acb->acb_flags &
4388 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
4389 					acb->acb_flags &=
4390 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
4391 					arcmsr_post_ioctldata2iop(acb);
4392 				}
4393 			} else {
4394 				struct scsi_arq_status *arq_status;
4395 
4396 				/* has error report sensedata */
4397 				arq_status = (struct scsi_arq_status *)
4398 				    (intptr_t)(pkt->pkt_scbp);
4399 				bzero((caddr_t)arq_status,
4400 				    sizeof (struct scsi_arq_status));
4401 				arq_status->sts_rqpkt_reason = CMD_CMPLT;
4402 				arq_status->sts_rqpkt_state =
4403 				    (STATE_GOT_BUS |
4404 				    STATE_GOT_TARGET |STATE_SENT_CMD |
4405 				    STATE_XFERRED_DATA | STATE_GOT_STATUS);
4406 				arq_status->sts_rqpkt_statistics =
4407 				    pkt->pkt_statistics;
4408 				arq_status->sts_rqpkt_resid = 0;
4409 				if (&arq_status->sts_sensedata != NULL) {
4410 					struct scsi_extended_sense *
4411 					    sts_sensedata;
4412 
4413 					sts_sensedata =
4414 					    &arq_status->sts_sensedata;
4415 
4416 					/* has error report sensedata */
4417 					sts_sensedata->es_code  = 0x0;
4418 					sts_sensedata->es_valid = 0x01;
4419 					sts_sensedata->es_key =
4420 					    KEY_ILLEGAL_REQUEST;
4421 					/* AdditionalSenseLength */
4422 					sts_sensedata->es_add_len = 0x0A;
4423 					/* AdditionalSenseCode */
4424 					sts_sensedata->es_add_code = 0x20;
4425 				}
4426 				retvalue = ARCMSR_MESSAGE_FAIL;
4427 			}
4428 		}
4429 		kmem_free(ver_addr, MSGDATABUFLEN);
4430 		break;
4431 	}
4432 
4433 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
4434 		pQbuffer = acb->rqbuffer;
4435 
4436 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4437 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4438 			arcmsr_iop_message_read(acb);
4439 		}
4440 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
4441 		acb->rqbuf_firstidx = 0;
4442 		acb->rqbuf_lastidx = 0;
4443 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4444 		pcmdmessagefld->cmdmessage.ReturnCode =
4445 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4446 		break;
4447 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
4448 		pQbuffer = acb->wqbuffer;
4449 
4450 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4451 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4452 			arcmsr_iop_message_read(acb);
4453 		}
4454 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4455 		    ACB_F_MESSAGE_WQBUFFER_READ);
4456 		acb->wqbuf_firstidx = 0;
4457 		acb->wqbuf_lastidx = 0;
4458 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4459 		pcmdmessagefld->cmdmessage.ReturnCode =
4460 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4461 		break;
4462 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
4463 
4464 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4465 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4466 			arcmsr_iop_message_read(acb);
4467 		}
4468 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4469 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
4470 		    ACB_F_MESSAGE_WQBUFFER_READ);
4471 		acb->rqbuf_firstidx = 0;
4472 		acb->rqbuf_lastidx = 0;
4473 		acb->wqbuf_firstidx = 0;
4474 		acb->wqbuf_lastidx = 0;
4475 		pQbuffer = acb->rqbuffer;
4476 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4477 		pQbuffer = acb->wqbuffer;
4478 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4479 		pcmdmessagefld->cmdmessage.ReturnCode =
4480 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4481 		break;
4482 
4483 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
4484 		pcmdmessagefld->cmdmessage.ReturnCode =
4485 		    ARCMSR_MESSAGE_RETURNCODE_3F;
4486 		break;
4487 	/*
4488 	 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
4489 	 */
4490 	case ARCMSR_MESSAGE_SAY_GOODBYE:
4491 		arcmsr_iop_parking(acb);
4492 		break;
4493 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
4494 		switch (acb->adapter_type) {
4495 		case ACB_ADAPTER_TYPE_A:
4496 			arcmsr_flush_hba_cache(acb);
4497 			break;
4498 		case ACB_ADAPTER_TYPE_B:
4499 			arcmsr_flush_hbb_cache(acb);
4500 			break;
4501 		case ACB_ADAPTER_TYPE_C:
4502 			arcmsr_flush_hbc_cache(acb);
4503 			break;
4504 		}
4505 		break;
4506 	default:
4507 		retvalue = ARCMSR_MESSAGE_FAIL;
4508 	}
4509 
4510 message_out:
4511 
4512 	return (retvalue);
4513 }
4514 
4515 
4516 
4517 
4518 static void
4519 arcmsr_pcidev_disattach(struct ACB *acb)
4520 {
4521 	struct CCB *ccb;
4522 	int i = 0;
4523 
4524 	/* disable all outbound interrupts */
4525 	(void) arcmsr_disable_allintr(acb);
4526 	/* stop adapter background rebuild */
4527 	switch (acb->adapter_type) {
4528 	case ACB_ADAPTER_TYPE_A:
4529 		arcmsr_stop_hba_bgrb(acb);
4530 		arcmsr_flush_hba_cache(acb);
4531 		break;
4532 	case ACB_ADAPTER_TYPE_B:
4533 		arcmsr_stop_hbb_bgrb(acb);
4534 		arcmsr_flush_hbb_cache(acb);
4535 		break;
4536 	case ACB_ADAPTER_TYPE_C:
4537 		arcmsr_stop_hbc_bgrb(acb);
4538 		arcmsr_flush_hbc_cache(acb);
4539 		break;
4540 	}
4541 	/* abort all outstanding commands */
4542 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
4543 	acb->acb_flags &= ~ACB_F_IOP_INITED;
4544 
4545 	if (acb->ccboutstandingcount != 0) {
4546 		/* clear and abort all outbound posted Q */
4547 		arcmsr_done4abort_postqueue(acb);
4548 		/* talk to iop outstanding command aborted */
4549 		(void) arcmsr_abort_host_command(acb);
4550 
4551 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4552 			ccb = acb->pccb_pool[i];
4553 			if (ccb->ccb_state == ARCMSR_CCB_START) {
4554 				/* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
4555 				ccb->pkt->pkt_reason = CMD_ABORTED;
4556 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
4557 				arcmsr_ccb_complete(ccb, 1);
4558 			}
4559 		}
4560 	}
4561 }
4562 
4563 /* get firmware miscellaneous data */
4564 static void
4565 arcmsr_get_hba_config(struct ACB *acb)
4566 {
4567 	struct HBA_msgUnit *phbamu;
4568 
4569 	char *acb_firm_model;
4570 	char *acb_firm_version;
4571 	char *acb_device_map;
4572 	char *iop_firm_model;
4573 	char *iop_firm_version;
4574 	char *iop_device_map;
4575 	int count;
4576 
4577 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4578 	acb_firm_model = acb->firm_model;
4579 	acb_firm_version = acb->firm_version;
4580 	acb_device_map = acb->device_map;
4581 	/* firm_model, 15 */
4582 	iop_firm_model =
4583 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4584 	/* firm_version, 17 */
4585 	iop_firm_version =
4586 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4587 
4588 	/* device_map, 21 */
4589 	iop_device_map =
4590 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4591 
4592 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4593 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4594 
4595 	if (!arcmsr_hba_wait_msgint_ready(acb))
4596 		arcmsr_warn(acb,
4597 		    "timeout while waiting for adapter firmware "
4598 		    "miscellaneous data");
4599 
4600 	count = 8;
4601 	while (count) {
4602 		*acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle0,
4603 		    iop_firm_model);
4604 		acb_firm_model++;
4605 		iop_firm_model++;
4606 		count--;
4607 	}
4608 
4609 	count = 16;
4610 	while (count) {
4611 		*acb_firm_version =
4612 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4613 		acb_firm_version++;
4614 		iop_firm_version++;
4615 		count--;
4616 	}
4617 
4618 	count = 16;
4619 	while (count) {
4620 		*acb_device_map =
4621 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4622 		acb_device_map++;
4623 		iop_device_map++;
4624 		count--;
4625 	}
4626 
4627 	arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4628 	    acb->firm_version);
4629 
4630 	/* firm_request_len, 1 */
4631 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4632 	    &phbamu->msgcode_rwbuffer[1]);
4633 	/* firm_numbers_queue, 2 */
4634 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4635 	    &phbamu->msgcode_rwbuffer[2]);
4636 	/* firm_sdram_size, 3 */
4637 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4638 	    &phbamu->msgcode_rwbuffer[3]);
4639 	/* firm_ide_channels, 4 */
4640 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4641 	    &phbamu->msgcode_rwbuffer[4]);
4642 }
4643 
4644 /* get firmware miscellaneous data */
4645 static void
4646 arcmsr_get_hbb_config(struct ACB *acb)
4647 {
4648 	struct HBB_msgUnit *phbbmu;
4649 	char *acb_firm_model;
4650 	char *acb_firm_version;
4651 	char *acb_device_map;
4652 	char *iop_firm_model;
4653 	char *iop_firm_version;
4654 	char *iop_device_map;
4655 	int count;
4656 
4657 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4658 	acb_firm_model = acb->firm_model;
4659 	acb_firm_version = acb->firm_version;
4660 	acb_device_map = acb->device_map;
4661 	/* firm_model, 15 */
4662 	iop_firm_model = (char *)
4663 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4664 	/* firm_version, 17 */
4665 	iop_firm_version = (char *)
4666 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4667 	/* device_map, 21 */
4668 	iop_device_map = (char *)
4669 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4670 
4671 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4672 	    &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
4673 
4674 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4675 		arcmsr_warn(acb,
4676 		    "timeout while waiting for adapter firmware "
4677 		    "miscellaneous data");
4678 
4679 	count = 8;
4680 	while (count) {
4681 		*acb_firm_model =
4682 		    CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_model);
4683 		acb_firm_model++;
4684 		iop_firm_model++;
4685 		count--;
4686 	}
4687 	count = 16;
4688 	while (count) {
4689 		*acb_firm_version =
4690 		    CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_version);
4691 		acb_firm_version++;
4692 		iop_firm_version++;
4693 		count--;
4694 	}
4695 	count = 16;
4696 	while (count) {
4697 		*acb_device_map =
4698 		    CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_device_map);
4699 		acb_device_map++;
4700 		iop_device_map++;
4701 		count--;
4702 	}
4703 
4704 	arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4705 	    acb->firm_version);
4706 
4707 	/* firm_request_len, 1 */
4708 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4709 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
4710 	/* firm_numbers_queue, 2 */
4711 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4712 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
4713 	/* firm_sdram_size, 3 */
4714 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4715 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
4716 	/* firm_ide_channels, 4 */
4717 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4718 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
4719 }
4720 
4721 
4722 /* get firmware miscellaneous data */
4723 static void
4724 arcmsr_get_hbc_config(struct ACB *acb)
4725 {
4726 	struct HBC_msgUnit *phbcmu;
4727 
4728 	char *acb_firm_model;
4729 	char *acb_firm_version;
4730 	char *acb_device_map;
4731 	char *iop_firm_model;
4732 	char *iop_firm_version;
4733 	char *iop_device_map;
4734 	int count;
4735 
4736 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
4737 	acb_firm_model = acb->firm_model;
4738 	acb_firm_version = acb->firm_version;
4739 	acb_device_map = acb->device_map;
4740 	/* firm_model, 15 */
4741 	iop_firm_model =
4742 	    (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4743 	/* firm_version, 17 */
4744 	iop_firm_version =
4745 	    (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4746 	/* device_map, 21 */
4747 	iop_device_map =
4748 	    (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4749 	/* post "get config" instruction */
4750 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4751 	    &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4752 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4753 	    &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4754 	if (!arcmsr_hbc_wait_msgint_ready(acb))
4755 		arcmsr_warn(acb,
4756 		    "timeout while waiting for adapter firmware "
4757 		    "miscellaneous data");
4758 	count = 8;
4759 	while (count) {
4760 		*acb_firm_model =
4761 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
4762 		acb_firm_model++;
4763 		iop_firm_model++;
4764 		count--;
4765 	}
4766 
4767 	count = 16;
4768 	while (count) {
4769 		*acb_firm_version =
4770 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4771 		acb_firm_version++;
4772 		iop_firm_version++;
4773 		count--;
4774 	}
4775 
4776 	count = 16;
4777 	while (count) {
4778 		*acb_device_map =
4779 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4780 		acb_device_map++;
4781 		iop_device_map++;
4782 		count--;
4783 	}
4784 
4785 	arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4786 	    acb->firm_version);
4787 
4788 	/* firm_request_len, 1, 04-07 */
4789 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4790 	    &phbcmu->msgcode_rwbuffer[1]);
4791 	/* firm_numbers_queue, 2, 08-11 */
4792 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4793 	    &phbcmu->msgcode_rwbuffer[2]);
4794 	/* firm_sdram_size, 3, 12-15 */
4795 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4796 	    &phbcmu->msgcode_rwbuffer[3]);
4797 	/* firm_ide_channels, 4, 16-19 */
4798 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4799 	    &phbcmu->msgcode_rwbuffer[4]);
4800 	/* firm_cfg_version, 25, 100-103 */
4801 	acb->firm_cfg_version = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4802 	    &phbcmu->msgcode_rwbuffer[25]);
4803 }
4804 
4805 
4806 /* start background rebuild */
4807 static void
4808 arcmsr_start_hba_bgrb(struct ACB *acb) {
4809 
4810 	struct HBA_msgUnit *phbamu;
4811 
4812 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4813 
4814 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4815 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4816 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4817 
4818 	if (!arcmsr_hba_wait_msgint_ready(acb))
4819 		arcmsr_warn(acb,
4820 		    "timeout while waiting for background rebuild to start");
4821 }
4822 
4823 
4824 static void
4825 arcmsr_start_hbb_bgrb(struct ACB *acb) {
4826 
4827 	struct HBB_msgUnit *phbbmu;
4828 
4829 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4830 
4831 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4832 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4833 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4834 	    ARCMSR_MESSAGE_START_BGRB);
4835 
4836 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4837 		arcmsr_warn(acb,
4838 		    "timeout while waiting for background rebuild to start");
4839 }
4840 
4841 
4842 static void
4843 arcmsr_start_hbc_bgrb(struct ACB *acb) {
4844 
4845 	struct HBC_msgUnit *phbcmu;
4846 
4847 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
4848 
4849 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4850 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4851 	    &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4852 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4853 	    &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4854 	if (!arcmsr_hbc_wait_msgint_ready(acb))
4855 		arcmsr_warn(acb,
4856 		    "timeout while waiting for background rebuild to start");
4857 }
4858 
4859 static void
4860 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4861 {
4862 	struct HBA_msgUnit *phbamu;
4863 	struct CCB *ccb;
4864 	boolean_t error;
4865 	uint32_t flag_ccb, outbound_intstatus, intmask_org;
4866 	boolean_t poll_ccb_done = B_FALSE;
4867 	uint32_t poll_count = 0;
4868 
4869 
4870 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4871 
4872 polling_ccb_retry:
4873 	/* TODO: Use correct offset and size for syncing? */
4874 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4875 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4876 		return;
4877 	intmask_org = arcmsr_disable_allintr(acb);
4878 
4879 	for (;;) {
4880 		if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4881 		    &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
4882 			if (poll_ccb_done) {
4883 				/* chip FIFO no ccb for completion already */
4884 				break;
4885 			} else {
4886 				drv_usecwait(25000);
4887 				if ((poll_count > 100) && (poll_ccb != NULL)) {
4888 					break;
4889 				}
4890 				if (acb->ccboutstandingcount == 0) {
4891 					break;
4892 				}
4893 				poll_count++;
4894 				outbound_intstatus =
4895 				    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4896 				    &phbamu->outbound_intstatus) &
4897 				    acb->outbound_int_enable;
4898 
4899 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4900 				    &phbamu->outbound_intstatus,
4901 				    outbound_intstatus); /* clear interrupt */
4902 			}
4903 		}
4904 
4905 		/* frame must be 32 bytes aligned */
4906 		ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4907 
4908 		/* check if command done with no error */
4909 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4910 		    B_TRUE : B_FALSE;
4911 		if (poll_ccb != NULL)
4912 			poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4913 
4914 		if (ccb->acb != acb) {
4915 			arcmsr_warn(acb, "ccb got a wrong acb!");
4916 			continue;
4917 		}
4918 		if (ccb->ccb_state != ARCMSR_CCB_START) {
4919 			if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
4920 				ccb->ccb_state |= ARCMSR_CCB_BACK;
4921 				ccb->pkt->pkt_reason = CMD_ABORTED;
4922 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
4923 				arcmsr_ccb_complete(ccb, 1);
4924 				continue;
4925 			}
4926 			arcmsr_report_ccb_state(acb, ccb, error);
4927 			arcmsr_warn(acb,
4928 			    "polling op got unexpected ccb command done");
4929 			continue;
4930 		}
4931 		arcmsr_report_ccb_state(acb, ccb, error);
4932 	}	/* drain reply FIFO */
4933 	arcmsr_enable_allintr(acb, intmask_org);
4934 }
4935 
4936 
4937 static void
4938 arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4939 {
4940 	struct HBB_msgUnit *phbbmu;
4941 	struct CCB *ccb;
4942 	uint32_t flag_ccb, intmask_org;
4943 	boolean_t error;
4944 	uint32_t poll_count = 0;
4945 	int index;
4946 	boolean_t poll_ccb_done = B_FALSE;
4947 
4948 
4949 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4950 
4951 
4952 polling_ccb_retry:
4953 	/* Use correct offset and size for syncing */
4954 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4955 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4956 		return;
4957 
4958 	intmask_org = arcmsr_disable_allintr(acb);
4959 
4960 	for (;;) {
4961 		index = phbbmu->doneq_index;
4962 		if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
4963 			if (poll_ccb_done) {
4964 				/* chip FIFO no ccb for completion already */
4965 				break;
4966 			} else {
4967 				drv_usecwait(25000);
4968 				if ((poll_count > 100) && (poll_ccb != NULL))
4969 					break;
4970 				if (acb->ccboutstandingcount == 0)
4971 					break;
4972 				poll_count++;
4973 				/* clear doorbell interrupt */
4974 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4975 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
4976 				    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
4977 			}
4978 		}
4979 
4980 		phbbmu->done_qbuffer[index] = 0;
4981 		index++;
4982 		/* if last index number set it to 0 */
4983 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
4984 		phbbmu->doneq_index = index;
4985 		/* check if command done with no error */
4986 		/* frame must be 32 bytes aligned */
4987 		ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4988 
4989 		/* check if command done with no error */
4990 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4991 		    B_TRUE : B_FALSE;
4992 
4993 		if (poll_ccb != NULL)
4994 			poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4995 		if (ccb->acb != acb) {
4996 			arcmsr_warn(acb, "ccb got a wrong acb!");
4997 			continue;
4998 		}
4999 		if (ccb->ccb_state != ARCMSR_CCB_START) {
5000 			if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5001 				ccb->ccb_state |= ARCMSR_CCB_BACK;
5002 				ccb->pkt->pkt_reason = CMD_ABORTED;
5003 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
5004 				arcmsr_ccb_complete(ccb, 1);
5005 				continue;
5006 			}
5007 			arcmsr_report_ccb_state(acb, ccb, error);
5008 			arcmsr_warn(acb,
5009 			    "polling op got unexpect ccb command done");
5010 			continue;
5011 		}
5012 		arcmsr_report_ccb_state(acb, ccb, error);
5013 	}	/* drain reply FIFO */
5014 	arcmsr_enable_allintr(acb, intmask_org);
5015 }
5016 
5017 
5018 static void
5019 arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
5020 {
5021 
5022 	struct HBC_msgUnit *phbcmu;
5023 	struct CCB *ccb;
5024 	boolean_t error;
5025 	uint32_t ccb_cdb_phy;
5026 	uint32_t flag_ccb, intmask_org;
5027 	boolean_t poll_ccb_done = B_FALSE;
5028 	uint32_t poll_count = 0;
5029 
5030 
5031 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5032 
5033 polling_ccb_retry:
5034 
5035 	/* Use correct offset and size for syncing */
5036 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5037 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5038 		return;
5039 
5040 	intmask_org = arcmsr_disable_allintr(acb);
5041 
5042 	for (;;) {
5043 		if (!(CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5044 		    &phbcmu->host_int_status) &
5045 		    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
5046 
5047 			if (poll_ccb_done) {
5048 				/* chip FIFO no ccb for completion already */
5049 				break;
5050 			} else {
5051 				drv_usecwait(25000);
5052 				if ((poll_count > 100) && (poll_ccb != NULL)) {
5053 					break;
5054 				}
5055 				if (acb->ccboutstandingcount == 0) {
5056 					break;
5057 				}
5058 				poll_count++;
5059 			}
5060 		}
5061 		flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5062 		    &phbcmu->outbound_queueport_low);
5063 		/* frame must be 32 bytes aligned */
5064 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5065 		/* the CDB is the first field of the CCB */
5066 		ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5067 
5068 		/* check if command done with no error */
5069 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5070 		    B_TRUE : B_FALSE;
5071 		if (poll_ccb != NULL)
5072 			poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
5073 
5074 		if (ccb->acb != acb) {
5075 			arcmsr_warn(acb, "ccb got a wrong acb!");
5076 			continue;
5077 		}
5078 		if (ccb->ccb_state != ARCMSR_CCB_START) {
5079 			if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5080 				ccb->ccb_state |= ARCMSR_CCB_BACK;
5081 				ccb->pkt->pkt_reason = CMD_ABORTED;
5082 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
5083 				arcmsr_ccb_complete(ccb, 1);
5084 				continue;
5085 			}
5086 			arcmsr_report_ccb_state(acb, ccb, error);
5087 			arcmsr_warn(acb,
5088 			    "polling op got unexpected ccb command done");
5089 			continue;
5090 		}
5091 		arcmsr_report_ccb_state(acb, ccb, error);
5092 	}	/* drain reply FIFO */
5093 	arcmsr_enable_allintr(acb, intmask_org);
5094 }
5095 
5096 
5097 /*
5098  * Function: arcmsr_hba_hardware_reset()
5099  *           Bug Fix for Intel IOP cause firmware hang on.
5100  *           and kernel panic
5101  */
5102 static void
5103 arcmsr_hba_hardware_reset(struct ACB *acb)
5104 {
5105 	struct HBA_msgUnit *phbamu;
5106 	uint8_t value[64];
5107 	int i;
5108 
5109 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5110 	/* backup pci config data */
5111 	for (i = 0; i < 64; i++) {
5112 		value[i] = pci_config_get8(acb->pci_acc_handle, i);
5113 	}
5114 	/* hardware reset signal */
5115 	if ((PCI_DEVICE_ID_ARECA_1680 ==
5116 	    pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID))) {
5117 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5118 		    &phbamu->reserved1[0], 0x00000003);
5119 	} else {
5120 		pci_config_put8(acb->pci_acc_handle, 0x84, 0x20);
5121 	}
5122 	drv_usecwait(1000000);
5123 	/* write back pci config data */
5124 	for (i = 0; i < 64; i++) {
5125 		pci_config_put8(acb->pci_acc_handle, i, value[i]);
5126 	}
5127 	drv_usecwait(1000000);
5128 }
5129 
5130 /*
5131  * Function: arcmsr_abort_host_command
5132  */
5133 static uint8_t
5134 arcmsr_abort_host_command(struct ACB *acb)
5135 {
5136 	uint8_t rtnval = 0;
5137 
5138 	switch (acb->adapter_type) {
5139 	case ACB_ADAPTER_TYPE_A:
5140 		rtnval = arcmsr_abort_hba_allcmd(acb);
5141 		break;
5142 	case ACB_ADAPTER_TYPE_B:
5143 		rtnval = arcmsr_abort_hbb_allcmd(acb);
5144 		break;
5145 	case ACB_ADAPTER_TYPE_C:
5146 		rtnval = arcmsr_abort_hbc_allcmd(acb);
5147 		break;
5148 	}
5149 	return (rtnval);
5150 }
5151 
5152 /*
5153  * Function: arcmsr_handle_iop_bus_hold
5154  */
5155 static void
5156 arcmsr_handle_iop_bus_hold(struct ACB *acb)
5157 {
5158 
5159 	switch (acb->adapter_type) {
5160 	case ACB_ADAPTER_TYPE_A:
5161 	{
5162 		struct HBA_msgUnit *phbamu;
5163 		int retry_count = 0;
5164 
5165 		acb->timeout_count = 0;
5166 		phbamu = (struct HBA_msgUnit *)acb->pmu;
5167 		arcmsr_hba_hardware_reset(acb);
5168 		acb->acb_flags &= ~ACB_F_IOP_INITED;
5169 	sleep_again:
5170 		drv_usecwait(1000000);
5171 		if ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5172 		    &phbamu->outbound_msgaddr1) &
5173 		    ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
5174 			if (retry_count > 60) {
5175 				arcmsr_warn(acb,
5176 				    "waiting for hardware"
5177 				    "bus reset return, RETRY TERMINATED!!");
5178 				return;
5179 			}
5180 			retry_count++;
5181 			goto sleep_again;
5182 		}
5183 		arcmsr_iop_init(acb);
5184 		break;
5185 	}
5186 
5187 	}
5188 }
5189 
5190 static void
5191 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb) {
5192 
5193 	struct QBUFFER *prbuffer;
5194 	uint8_t *pQbuffer;
5195 	uint8_t *iop_data;
5196 	int my_empty_len, iop_len;
5197 	int rqbuf_firstidx, rqbuf_lastidx;
5198 
5199 	/* check this iop data if overflow my rqbuffer */
5200 	rqbuf_lastidx = acb->rqbuf_lastidx;
5201 	rqbuf_firstidx = acb->rqbuf_firstidx;
5202 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
5203 	iop_data = (uint8_t *)prbuffer->data;
5204 	iop_len = prbuffer->data_len;
5205 	my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
5206 	    (ARCMSR_MAX_QBUFFER - 1);
5207 
5208 	if (my_empty_len >= iop_len) {
5209 		while (iop_len > 0) {
5210 			pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
5211 			(void) memcpy(pQbuffer, iop_data, 1);
5212 			rqbuf_lastidx++;
5213 			/* if last index number set it to 0 */
5214 			rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
5215 			iop_data++;
5216 			iop_len--;
5217 		}
5218 		acb->rqbuf_lastidx = rqbuf_lastidx;
5219 		arcmsr_iop_message_read(acb);
5220 		/* signature, let IOP know data has been read */
5221 	} else {
5222 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
5223 	}
5224 }
5225 
5226 
5227 
5228 static void
5229 arcmsr_iop2drv_data_read_handle(struct ACB *acb) {
5230 
5231 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
5232 	/*
5233 	 * check if there are any mail packages from user space program
5234 	 * in my post bag, now is the time to send them into Areca's firmware
5235 	 */
5236 
5237 	if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
5238 
5239 		uint8_t *pQbuffer;
5240 		struct QBUFFER *pwbuffer;
5241 		uint8_t *iop_data;
5242 		int allxfer_len = 0;
5243 
5244 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
5245 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
5246 		iop_data = (uint8_t *)pwbuffer->data;
5247 
5248 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
5249 		    (allxfer_len < 124)) {
5250 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
5251 			(void) memcpy(iop_data, pQbuffer, 1);
5252 			acb->wqbuf_firstidx++;
5253 			/* if last index number set it to 0 */
5254 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
5255 			iop_data++;
5256 			allxfer_len++;
5257 		}
5258 		pwbuffer->data_len = allxfer_len;
5259 		/*
5260 		 * push inbound doorbell, tell iop driver data write ok
5261 		 * await reply on next hwinterrupt for next Qbuffer post
5262 		 */
5263 		arcmsr_iop_message_wrote(acb);
5264 	}
5265 
5266 	if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
5267 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
5268 }
5269 
5270 
5271 static void
5272 arcmsr_hba_doorbell_isr(struct ACB *acb)
5273 {
5274 	uint32_t outbound_doorbell;
5275 	struct HBA_msgUnit *phbamu;
5276 
5277 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5278 
5279 	/*
5280 	 *  Maybe here we need to check wrqbuffer_lock is locked or not
5281 	 *  DOORBELL: ding! dong!
5282 	 *  check if there are any mail need to pack from firmware
5283 	 */
5284 
5285 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5286 	    &phbamu->outbound_doorbell);
5287 	/* clear doorbell interrupt */
5288 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5289 	    &phbamu->outbound_doorbell, outbound_doorbell);
5290 
5291 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
5292 		arcmsr_iop2drv_data_wrote_handle(acb);
5293 
5294 
5295 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
5296 		arcmsr_iop2drv_data_read_handle(acb);
5297 }
5298 
5299 
5300 
5301 static void
5302 arcmsr_hbc_doorbell_isr(struct ACB *acb)
5303 {
5304 	uint32_t outbound_doorbell;
5305 	struct HBC_msgUnit *phbcmu;
5306 
5307 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5308 
5309 	/*
5310 	 *  Maybe here we need to check wrqbuffer_lock is locked or not
5311 	 *  DOORBELL: ding! dong!
5312 	 *  check if there are any mail need to pick from firmware
5313 	 */
5314 
5315 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5316 	    &phbcmu->outbound_doorbell);
5317 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5318 	    &phbcmu->outbound_doorbell_clear,
5319 	    outbound_doorbell); /* clear interrupt */
5320 	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
5321 		arcmsr_iop2drv_data_wrote_handle(acb);
5322 	}
5323 	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
5324 		arcmsr_iop2drv_data_read_handle(acb);
5325 	}
5326 	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
5327 		/* messenger of "driver to iop commands" */
5328 		arcmsr_hbc_message_isr(acb);
5329 	}
5330 }
5331 
5332 
5333 static void
5334 arcmsr_hba_message_isr(struct ACB *acb)
5335 {
5336 	struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
5337 	uint32_t  *signature = (&phbamu->msgcode_rwbuffer[0]);
5338 	uint32_t outbound_message;
5339 
5340 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5341 	    &phbamu->outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
5342 
5343 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5344 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5345 		if ((ddi_taskq_dispatch(acb->taskq,
5346 		    (void (*)(void *))arcmsr_dr_handle,
5347 		    acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5348 			arcmsr_warn(acb, "DR task start failed");
5349 		}
5350 }
5351 
5352 static void
5353 arcmsr_hbb_message_isr(struct ACB *acb)
5354 {
5355 	struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
5356 	uint32_t  *signature = (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0]);
5357 	uint32_t outbound_message;
5358 
5359 	/* clear interrupts */
5360 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5361 	    &phbbmu->hbb_doorbell->iop2drv_doorbell,
5362 	    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5363 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5364 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5365 	    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5366 
5367 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5368 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5369 		if ((ddi_taskq_dispatch(acb->taskq,
5370 		    (void (*)(void *))arcmsr_dr_handle,
5371 		    acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5372 			arcmsr_warn(acb, "DR task start failed");
5373 		}
5374 }
5375 
5376 static void
5377 arcmsr_hbc_message_isr(struct ACB *acb)
5378 {
5379 	struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
5380 	uint32_t  *signature = (&phbcmu->msgcode_rwbuffer[0]);
5381 	uint32_t outbound_message;
5382 
5383 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5384 	    &phbcmu->outbound_doorbell_clear,
5385 	    ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
5386 
5387 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5388 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5389 		if ((ddi_taskq_dispatch(acb->taskq,
5390 		    (void (*)(void *))arcmsr_dr_handle,
5391 		    acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5392 			arcmsr_warn(acb, "DR task start failed");
5393 		}
5394 }
5395 
5396 
5397 static void
5398 arcmsr_hba_postqueue_isr(struct ACB *acb)
5399 {
5400 
5401 	struct HBA_msgUnit *phbamu;
5402 	struct CCB *ccb;
5403 	uint32_t flag_ccb;
5404 	boolean_t error;
5405 
5406 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5407 
5408 	/* areca cdb command done */
5409 	/* Use correct offset and size for syncing */
5410 	(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5411 	    DDI_DMA_SYNC_FORKERNEL);
5412 
5413 	while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5414 	    &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
5415 		/* frame must be 32 bytes aligned */
5416 		ccb = NumToPtr((acb->vir2phy_offset+(flag_ccb << 5)));
5417 		/* check if command done with no error */
5418 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5419 		    B_TRUE : B_FALSE;
5420 		arcmsr_drain_donequeue(acb, ccb, error);
5421 	}	/* drain reply FIFO */
5422 }
5423 
5424 
5425 static void
5426 arcmsr_hbb_postqueue_isr(struct ACB *acb)
5427 {
5428 	struct HBB_msgUnit *phbbmu;
5429 	struct CCB *ccb;
5430 	uint32_t flag_ccb;
5431 	boolean_t error;
5432 	int index;
5433 
5434 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
5435 
5436 	/* areca cdb command done */
5437 	index = phbbmu->doneq_index;
5438 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5439 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5440 		return;
5441 	while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
5442 		phbbmu->done_qbuffer[index] = 0;
5443 		/* frame must be 32 bytes aligned */
5444 
5445 		/* the CDB is the first field of the CCB */
5446 		ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
5447 
5448 		/* check if command done with no error */
5449 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5450 		    B_TRUE : B_FALSE;
5451 		arcmsr_drain_donequeue(acb, ccb, error);
5452 		index++;
5453 		/* if last index number set it to 0 */
5454 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
5455 		phbbmu->doneq_index = index;
5456 	}	/* drain reply FIFO */
5457 }
5458 
5459 
5460 static void
5461 arcmsr_hbc_postqueue_isr(struct ACB *acb)
5462 {
5463 
5464 	struct HBC_msgUnit *phbcmu;
5465 	struct CCB *ccb;
5466 	uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
5467 	boolean_t error;
5468 
5469 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5470 	/* areca cdb command done */
5471 	/* Use correct offset and size for syncing */
5472 	(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5473 	    DDI_DMA_SYNC_FORKERNEL);
5474 
5475 	while (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5476 	    &phbcmu->host_int_status) &
5477 	    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5478 		/* check if command done with no error */
5479 		flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5480 		    &phbcmu->outbound_queueport_low);
5481 		/* frame must be 32 bytes aligned */
5482 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5483 
5484 		/* the CDB is the first field of the CCB */
5485 		ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5486 
5487 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5488 		    B_TRUE : B_FALSE;
5489 		/* check if command done with no error */
5490 		arcmsr_drain_donequeue(acb, ccb, error);
5491 		if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
5492 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5493 			    &phbcmu->inbound_doorbell,
5494 			    ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
5495 			break;
5496 		}
5497 		throttling++;
5498 	}	/* drain reply FIFO */
5499 }
5500 
5501 
5502 static uint_t
5503 arcmsr_handle_hba_isr(struct ACB *acb) {
5504 
5505 	uint32_t outbound_intstatus;
5506 	struct HBA_msgUnit *phbamu;
5507 
5508 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5509 
5510 	outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5511 	    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
5512 
5513 	if (outbound_intstatus == 0)	/* it must be a shared irq */
5514 		return (DDI_INTR_UNCLAIMED);
5515 
5516 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
5517 	    outbound_intstatus); /* clear interrupt */
5518 
5519 	/* MU doorbell interrupts */
5520 
5521 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
5522 		arcmsr_hba_doorbell_isr(acb);
5523 
5524 	/* MU post queue interrupts */
5525 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
5526 		arcmsr_hba_postqueue_isr(acb);
5527 
5528 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
5529 		arcmsr_hba_message_isr(acb);
5530 	}
5531 
5532 	return (DDI_INTR_CLAIMED);
5533 }
5534 
5535 
5536 static uint_t
5537 arcmsr_handle_hbb_isr(struct ACB *acb) {
5538 
5539 	uint32_t outbound_doorbell;
5540 	struct HBB_msgUnit *phbbmu;
5541 
5542 
5543 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
5544 
5545 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5546 	    &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
5547 
5548 	if (outbound_doorbell == 0)		/* it must be a shared irq */
5549 		return (DDI_INTR_UNCLAIMED);
5550 
5551 	/* clear doorbell interrupt */
5552 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5553 	    &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
5554 	/* wait a cycle */
5555 	(void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5556 	    &phbbmu->hbb_doorbell->iop2drv_doorbell);
5557 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5558 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5559 	    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5560 
5561 	/* MU ioctl transfer doorbell interrupts */
5562 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
5563 		arcmsr_iop2drv_data_wrote_handle(acb);
5564 
5565 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
5566 		arcmsr_iop2drv_data_read_handle(acb);
5567 
5568 	/* MU post queue interrupts */
5569 	if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
5570 		arcmsr_hbb_postqueue_isr(acb);
5571 
5572 	/* MU message interrupt */
5573 
5574 	if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
5575 		arcmsr_hbb_message_isr(acb);
5576 	}
5577 
5578 	return (DDI_INTR_CLAIMED);
5579 }
5580 
5581 static uint_t
5582 arcmsr_handle_hbc_isr(struct ACB *acb)
5583 {
5584 	uint32_t host_interrupt_status;
5585 	struct HBC_msgUnit *phbcmu;
5586 
5587 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5588 	/*  check outbound intstatus */
5589 	host_interrupt_status=
5590 	    CHIP_REG_READ32(acb->reg_mu_acc_handle0, &phbcmu->host_int_status);
5591 	if (host_interrupt_status == 0)	/* it must be share irq */
5592 		return (DDI_INTR_UNCLAIMED);
5593 	/* MU ioctl transfer doorbell interrupts */
5594 	if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
5595 		/* messenger of "ioctl message read write" */
5596 		arcmsr_hbc_doorbell_isr(acb);
5597 	}
5598 	/* MU post queue interrupts */
5599 	if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5600 		/* messenger of "scsi commands" */
5601 		arcmsr_hbc_postqueue_isr(acb);
5602 	}
5603 	return (DDI_INTR_CLAIMED);
5604 }
5605 
5606 static uint_t
5607 arcmsr_intr_handler(caddr_t arg, caddr_t arg2)
5608 {
5609 	struct ACB *acb = (void *)arg;
5610 	struct CCB *ccb;
5611 	uint_t retrn = DDI_INTR_UNCLAIMED;
5612 	_NOTE(ARGUNUSED(arg2))
5613 
5614 	mutex_enter(&acb->isr_mutex);
5615 	switch (acb->adapter_type) {
5616 	case ACB_ADAPTER_TYPE_A:
5617 		retrn = arcmsr_handle_hba_isr(acb);
5618 		break;
5619 
5620 	case ACB_ADAPTER_TYPE_B:
5621 		retrn = arcmsr_handle_hbb_isr(acb);
5622 		break;
5623 
5624 	case ACB_ADAPTER_TYPE_C:
5625 		retrn = arcmsr_handle_hbc_isr(acb);
5626 		break;
5627 
5628 	default:
5629 		/* We should never be here */
5630 		ASSERT(0);
5631 		break;
5632 	}
5633 	mutex_exit(&acb->isr_mutex);
5634 	while ((ccb = arcmsr_get_complete_ccb_from_list(acb)) != NULL) {
5635 		arcmsr_ccb_complete(ccb, 1);
5636 	}
5637 	return (retrn);
5638 }
5639 
5640 
5641 static void
5642 arcmsr_wait_firmware_ready(struct ACB *acb) {
5643 
5644 	uint32_t firmware_state;
5645 
5646 	firmware_state = 0;
5647 
5648 	switch (acb->adapter_type) {
5649 	case ACB_ADAPTER_TYPE_A:
5650 	{
5651 		struct HBA_msgUnit *phbamu;
5652 		phbamu = (struct HBA_msgUnit *)acb->pmu;
5653 		do {
5654 			firmware_state =
5655 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5656 			    &phbamu->outbound_msgaddr1);
5657 		} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
5658 		    == 0);
5659 		break;
5660 	}
5661 
5662 	case ACB_ADAPTER_TYPE_B:
5663 	{
5664 		struct HBB_msgUnit *phbbmu;
5665 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
5666 		do {
5667 			firmware_state =
5668 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5669 				&phbbmu->hbb_doorbell->iop2drv_doorbell);
5670 		} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
5671 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5672 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5673 		    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5674 		break;
5675 	}
5676 
5677 	case ACB_ADAPTER_TYPE_C:
5678 	{
5679 		struct HBC_msgUnit *phbcmu;
5680 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
5681 		do {
5682 			firmware_state =
5683 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5684 				&phbcmu->outbound_msgaddr1);
5685 		} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK)
5686 		    == 0);
5687 		break;
5688 	}
5689 
5690 	}
5691 }
5692 
5693 static void
5694 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb)
5695 {
5696 	switch (acb->adapter_type) {
5697 	case ACB_ADAPTER_TYPE_A: {
5698 		struct HBA_msgUnit *phbamu;
5699 		uint32_t outbound_doorbell;
5700 
5701 		phbamu = (struct HBA_msgUnit *)acb->pmu;
5702 		/* empty doorbell Qbuffer if door bell rung */
5703 		outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5704 		    &phbamu->outbound_doorbell);
5705 		/* clear doorbell interrupt */
5706 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5707 		    &phbamu->outbound_doorbell, outbound_doorbell);
5708 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5709 		    &phbamu->inbound_doorbell,
5710 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
5711 		break;
5712 	}
5713 
5714 	case ACB_ADAPTER_TYPE_B: {
5715 		struct HBB_msgUnit *phbbmu;
5716 
5717 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
5718 		/* clear interrupt and message state */
5719 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5720 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
5721 		    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5722 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5723 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5724 		    ARCMSR_DRV2IOP_DATA_READ_OK);
5725 		/* let IOP know data has been read */
5726 		break;
5727 	}
5728 
5729 	case ACB_ADAPTER_TYPE_C: {
5730 		struct HBC_msgUnit *phbcmu;
5731 		uint32_t outbound_doorbell;
5732 
5733 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
5734 		/* empty doorbell Qbuffer if door bell ringed */
5735 		outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5736 		    &phbcmu->outbound_doorbell);
5737 		/* clear outbound doobell isr */
5738 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5739 		    &phbcmu->outbound_doorbell_clear, outbound_doorbell);
5740 		/* let IOP know data has been read */
5741 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5742 		    &phbcmu->inbound_doorbell,
5743 		    ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
5744 		break;
5745 	}
5746 
5747 	}
5748 }
5749 
5750 
5751 static uint32_t
5752 arcmsr_iop_confirm(struct ACB *acb) {
5753 
5754 	uint64_t cdb_phyaddr;
5755 	uint32_t cdb_phyaddr_hi32;
5756 
5757 	/*
5758 	 * here we need to tell iop 331 about our freeccb.HighPart
5759 	 * if freeccb.HighPart is non-zero
5760 	 */
5761 	cdb_phyaddr = acb->ccb_cookie.dmac_laddress;
5762 	cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
5763 	acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
5764 	switch (acb->adapter_type) {
5765 	case ACB_ADAPTER_TYPE_A:
5766 		if (cdb_phyaddr_hi32 != 0) {
5767 			struct HBA_msgUnit *phbamu;
5768 
5769 			phbamu = (struct HBA_msgUnit *)acb->pmu;
5770 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5771 			    &phbamu->msgcode_rwbuffer[0],
5772 			    ARCMSR_SIGNATURE_SET_CONFIG);
5773 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5774 			    &phbamu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5775 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5776 			    &phbamu->inbound_msgaddr0,
5777 			    ARCMSR_INBOUND_MESG0_SET_CONFIG);
5778 			if (!arcmsr_hba_wait_msgint_ready(acb)) {
5779 				arcmsr_warn(acb,
5780 				    "timeout setting ccb "
5781 				    "high physical address");
5782 				return (FALSE);
5783 			}
5784 		}
5785 		break;
5786 
5787 	/* if adapter is type B, set window of "post command queue" */
5788 	case ACB_ADAPTER_TYPE_B: {
5789 		uint32_t post_queue_phyaddr;
5790 		struct HBB_msgUnit *phbbmu;
5791 
5792 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
5793 		phbbmu->postq_index = 0;
5794 		phbbmu->doneq_index = 0;
5795 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5796 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5797 		    ARCMSR_MESSAGE_SET_POST_WINDOW);
5798 
5799 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5800 			arcmsr_warn(acb, "timeout setting post command "
5801 			    "queue window");
5802 			return (FALSE);
5803 		}
5804 
5805 		post_queue_phyaddr = (uint32_t)cdb_phyaddr +
5806 		    ARCMSR_MAX_FREECCB_NUM * P2ROUNDUP(sizeof (struct CCB), 32)
5807 		    + offsetof(struct HBB_msgUnit, post_qbuffer);
5808 		/* driver "set config" signature */
5809 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5810 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
5811 		    ARCMSR_SIGNATURE_SET_CONFIG);
5812 		/* normal should be zero */
5813 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5814 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
5815 		    cdb_phyaddr_hi32);
5816 		/* postQ size (256+8)*4 */
5817 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5818 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
5819 		    post_queue_phyaddr);
5820 		/* doneQ size (256+8)*4 */
5821 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5822 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
5823 		    post_queue_phyaddr+1056);
5824 		/* ccb maxQ size must be --> [(256+8)*4] */
5825 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5826 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
5827 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5828 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5829 		    ARCMSR_MESSAGE_SET_CONFIG);
5830 
5831 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5832 			arcmsr_warn(acb,
5833 			    "timeout setting command queue window");
5834 			return (FALSE);
5835 		}
5836 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5837 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5838 		    ARCMSR_MESSAGE_START_DRIVER_MODE);
5839 
5840 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5841 			arcmsr_warn(acb, "timeout in 'start driver mode'");
5842 			return (FALSE);
5843 		}
5844 		break;
5845 	}
5846 
5847 	case ACB_ADAPTER_TYPE_C:
5848 		if (cdb_phyaddr_hi32 != 0) {
5849 			struct HBC_msgUnit *phbcmu;
5850 
5851 			phbcmu = (struct HBC_msgUnit *)acb->pmu;
5852 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5853 			    &phbcmu->msgcode_rwbuffer[0],
5854 			    ARCMSR_SIGNATURE_SET_CONFIG);
5855 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5856 			    &phbcmu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5857 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5858 			    &phbcmu->inbound_msgaddr0,
5859 			    ARCMSR_INBOUND_MESG0_SET_CONFIG);
5860 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5861 			    &phbcmu->inbound_doorbell,
5862 			    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
5863 			if (!arcmsr_hbc_wait_msgint_ready(acb)) {
5864 				arcmsr_warn(acb, "'set ccb "
5865 				    "high part physical address' timeout");
5866 				return (FALSE);
5867 			}
5868 		}
5869 		break;
5870 	}
5871 	return (TRUE);
5872 }
5873 
5874 
5875 /*
5876  * ONLY used for Adapter type B
5877  */
5878 static void
5879 arcmsr_enable_eoi_mode(struct ACB *acb)
5880 {
5881 	struct HBB_msgUnit *phbbmu;
5882 
5883 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
5884 
5885 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5886 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5887 	    ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
5888 
5889 	if (!arcmsr_hbb_wait_msgint_ready(acb))
5890 		arcmsr_warn(acb, "'iop enable eoi mode' timeout");
5891 }
5892 
5893 /* start background rebuild */
5894 static void
5895 arcmsr_iop_init(struct ACB *acb)
5896 {
5897 	uint32_t intmask_org;
5898 
5899 	/* disable all outbound interrupt */
5900 	intmask_org = arcmsr_disable_allintr(acb);
5901 	arcmsr_wait_firmware_ready(acb);
5902 	(void) arcmsr_iop_confirm(acb);
5903 
5904 	/* start background rebuild */
5905 	switch (acb->adapter_type) {
5906 	case ACB_ADAPTER_TYPE_A:
5907 		arcmsr_get_hba_config(acb);
5908 		arcmsr_start_hba_bgrb(acb);
5909 		break;
5910 	case ACB_ADAPTER_TYPE_B:
5911 		arcmsr_get_hbb_config(acb);
5912 		arcmsr_start_hbb_bgrb(acb);
5913 		break;
5914 	case ACB_ADAPTER_TYPE_C:
5915 		arcmsr_get_hbc_config(acb);
5916 		arcmsr_start_hbc_bgrb(acb);
5917 		break;
5918 	}
5919 	/* empty doorbell Qbuffer if door bell rang */
5920 	arcmsr_clear_doorbell_queue_buffer(acb);
5921 
5922 	if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
5923 		arcmsr_enable_eoi_mode(acb);
5924 
5925 	/* enable outbound Post Queue, outbound doorbell Interrupt */
5926 	arcmsr_enable_allintr(acb, intmask_org);
5927 	acb->acb_flags |= ACB_F_IOP_INITED;
5928 }
5929