xref: /illumos-gate/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c (revision a38ddfee9c8c6b6c5a2947ff52fd2338362a4444)
1 /*
2  *       O.S   : Solaris
3  *  FILE NAME  : arcmsr.c
4  *       BY    : Erich Chen
5  *  Description: SCSI RAID Device Driver for
6  *               ARECA RAID Host adapter
7  *
8  *  Copyright (C) 2002,2007 Areca Technology Corporation All rights reserved.
9  *  Copyright (C) 2002,2007 Erich Chen
10  *	    Web site: www.areca.com.tw
11  *	      E-mail: erich@areca.com.tw
12  *
13  *	Redistribution and use in source and binary forms, with or without
14  *	modification, are permitted provided that the following conditions
15  *	are met:
16  *	1. Redistributions of source code must retain the above copyright
17  *	   notice, this list of conditions and the following disclaimer.
18  *	2. Redistributions in binary form must reproduce the above copyright
19  *	   notice, this list of conditions and the following disclaimer in the
20  *	   documentation and/or other materials provided with the distribution.
21  *  3. The party using or redistributing the source code and binary forms
22  *     agrees to the disclaimer below and the terms and conditions set forth
23  *     herein.
24  *
25  *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  *  ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  *  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  *  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  *  OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  *  HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  *  OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  *  SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
40  * Use is subject to license terms.
41  */
42 
43 #include <sys/types.h>
44 #include <sys/ddidmareq.h>
45 #include <sys/scsi/scsi.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/file.h>
49 #include <sys/disp.h>
50 #include <sys/signal.h>
51 #include <sys/debug.h>
52 #include <sys/pci.h>
53 #include <sys/policy.h>
54 
55 #include "arcmsr.h"
56 
57 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
58 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
59     int mode, cred_t *credp, int *rvalp);
60 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
61 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
62 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
63 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
64 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
65 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
66 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
67     int whom);
68 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
69     dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
70     struct scsi_device *sd);
71 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
72 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
73     struct scsi_pkt *pkt);
74 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
75     struct scsi_pkt *pkt);
76 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
77     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
78     int tgtlen, int flags, int (*callback)(), caddr_t arg);
79 
80 static uint_t arcmsr_interrupt(caddr_t arg);
81 static int arcmsr_initialize(struct ACB *acb);
82 static int arcmsr_dma_alloc(struct ACB *acb,
83     struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
84 static int arcmsr_dma_move(struct ACB *acb,
85     struct scsi_pkt *pkt, struct buf *bp);
86 static void arcmsr_pcidev_disattach(struct ACB *acb);
87 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
88 static void arcmsr_iop_init(struct ACB *acb);
89 static void arcmsr_iop_parking(struct ACB *acb);
90 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
91 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
92 static void arcmsr_flush_hba_cache(struct ACB *acb);
93 static void arcmsr_flush_hbb_cache(struct ACB *acb);
94 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
95 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
96 static void arcmsr_start_hba_bgrb(struct ACB *acb);
97 static void arcmsr_start_hba_bgrb(struct ACB *acb);
98 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
99 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
100 static void arcmsr_build_ccb(struct CCB *ccb);
101 
102 
103 static struct ACB *ArcMSRHBA[ARCMSR_MAX_ADAPTER];
104 static int arcmsr_hba_count;
105 static void *arcmsr_soft_state = NULL;
106 static kmutex_t arcmsr_global_mutex;
107 
108 static ddi_dma_attr_t arcmsr_dma_attr = {
109 	DMA_ATTR_V0,		/* ddi_dma_attr version */
110 	0,			/* low DMA address range */
111 	0xffffffff,		/* high DMA address range */
112 	0x00ffffff,		/* DMA counter counter upper bound */
113 	1,			/* DMA address alignment requirements */
114 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* burst sizes */
115 	1,			/* minimum effective DMA size */
116 	ARCMSR_MAX_XFER_LEN,	/* maximum DMA xfer size */
117 	/*
118 	 * The dma_attr_seg field supplies the limit of each Scatter/Gather
119 	 * list element's "address+length". The Intel IOP331 can not use
120 	 * segments over the 4G boundary due to segment boundary restrictions
121 	 */
122 	0x00ffffff,
123 	ARCMSR_MAX_SG_ENTRIES,	/* scatter/gather list count */
124 	1, 			/* device granularity */
125 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
126 };
127 
128 static ddi_dma_attr_t arcmsr_ccb_attr = {
129 	DMA_ATTR_V0,	/* ddi_dma_attr version */
130 	0,		/* low DMA address range */
131 	0xffffffff,	/* high DMA address range */
132 	0x00ffffff,	/* DMA counter counter upper bound */
133 	1,		/* default byte alignment */
134 	DEFAULT_BURSTSIZE | BURST32 | BURST64,   /* burst sizes */
135 	1,		/* minimum effective DMA size */
136 	0xffffffff,	/* maximum DMA xfer size */
137 	0x00ffffff,	/* max segment size, segment boundary restrictions */
138 	1,		/* scatter/gather list count */
139 	1,		/* device granularity */
140 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
141 };
142 
143 static struct cb_ops arcmsr_cb_ops = {
144 	scsi_hba_open,		/* open(9E) */
145 	scsi_hba_close,		/* close(9E) */
146 	nodev,			/* strategy(9E), returns ENXIO */
147 	nodev,			/* print(9E) */
148 	nodev,			/* dump(9E) Cannot be used as a dump device */
149 	nodev,			/* read(9E) */
150 	nodev,			/* write(9E) */
151 	arcmsr_cb_ioctl,	/* ioctl(9E) */
152 	nodev,			/* devmap(9E) */
153 	nodev,			/* mmap(9E) */
154 	nodev,			/* segmap(9E) */
155 	NULL,			/* chpoll(9E) returns ENXIO */
156 	nodev,			/* prop_op(9E) */
157 	NULL,			/* streamtab(9S) */
158 #ifdef _LP64
159 	/*
160 	 * cb_ops cb_flag:
161 	 *	D_NEW | D_MP	compatibility flags, see conf.h
162 	 *	D_MP 		flag indicates that the driver is safe for
163 	 *			multi-threaded operation
164 	 *	D_64BIT		flag driver properly handles 64-bit offsets
165 	 */
166 	D_HOTPLUG | D_MP | D_64BIT,
167 #else
168 	D_HOTPLUG | D_MP,
169 #endif
170 	CB_REV,
171 	nodev,			/* aread(9E) */
172 	nodev			/* awrite(9E) */
173 };
174 
175 static struct dev_ops arcmsr_ops = {
176 	DEVO_REV,		/* devo_rev */
177 	0,			/* reference count */
178 	nodev,			/* getinfo */
179 	nulldev,		/* identify */
180 	nulldev,		/* probe */
181 	arcmsr_attach,		/* attach */
182 	arcmsr_detach,		/* detach */
183 	arcmsr_reset,		/* reset, shutdown, reboot notify */
184 	&arcmsr_cb_ops,		/* driver operations */
185 	NULL,			/* bus operations */
186 	nulldev			/* power */
187 };
188 
189 char _depends_on[] = "misc/scsi";
190 
191 static struct modldrv arcmsr_modldrv = {
192 	&mod_driverops, 	/* Type of module. This is a driver. */
193 	ARCMSR_DRIVER_VERSION,  /* module name, from arcmsr.h */
194 	&arcmsr_ops,		/* driver ops */
195 };
196 
197 static struct modlinkage arcmsr_modlinkage = {
198 	MODREV_1,
199 	&arcmsr_modldrv,
200 	NULL
201 };
202 
203 
204 int
205 _init(void) {
206 	int ret;
207 
208 
209 	mutex_init(&arcmsr_global_mutex, "arcmsr global mutex",
210 	    MUTEX_DRIVER, NULL);
211 	ret = ddi_soft_state_init(&arcmsr_soft_state,
212 	    sizeof (struct ACB), ARCMSR_MAX_ADAPTER);
213 	if (ret != 0) {
214 		return (ret);
215 	}
216 	if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
217 		ddi_soft_state_fini(&arcmsr_soft_state);
218 		return (ret);
219 	}
220 
221 	if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
222 		mutex_destroy(&arcmsr_global_mutex);
223 		scsi_hba_fini(&arcmsr_modlinkage);
224 		if (arcmsr_soft_state != NULL) {
225 			ddi_soft_state_fini(&arcmsr_soft_state);
226 		}
227 	}
228 	return (ret);
229 }
230 
231 
232 int
233 _fini(void) {
234 	int ret;
235 
236 	ret = mod_remove(&arcmsr_modlinkage);
237 	if (ret == 0) {
238 		/* if ret = 0 , said driver can remove */
239 		mutex_destroy(&arcmsr_global_mutex);
240 		scsi_hba_fini(&arcmsr_modlinkage);
241 		if (arcmsr_soft_state != NULL) {
242 			ddi_soft_state_fini(&arcmsr_soft_state);
243 		}
244 	}
245 	return (ret);
246 }
247 
248 
249 int
250 _info(struct modinfo *modinfop) {
251 	return (mod_info(&arcmsr_modlinkage, modinfop));
252 }
253 
254 
255 
256 #if defined(ARCMSR_DEBUG)
257 static void
258 arcmsr_dump_scsi_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) {
259 
260 	static char hex[] = "0123456789abcdef";
261 	struct ACB *acb =
262 	    (struct ACB *)ap->a_hba_tran->tran_hba_private;
263 	struct CCB *ccb =
264 	    (struct CCB *)pkt->pkt_ha_private;
265 	uint8_t	*cdb = pkt->pkt_cdbp;
266 	char buf [256];
267 	char *p;
268 	int i;
269 
270 
271 	(void) sprintf(buf, "arcmsr%d: sgcount=%d <%d, %d> "
272 	    "cdb ",
273 	    ddi_get_instance(acb->dev_info), ccb->arcmsr_cdb.sgcount,
274 	    ap->a_target, ap->a_lun);
275 
276 	p = buf + strlen(buf);
277 	*p++ = '[';
278 
279 	for (i = 0; i < ccb->arcmsr_cdb.CdbLength; i++, cdb++) {
280 		if (i != 0) {
281 			*p++ = ' ';
282 		}
283 		*p++ = hex[(*cdb >> 4) & 0x0f];
284 		*p++ = hex[*cdb & 0x0f];
285 	}
286 	*p++ = ']';
287 	*p++ = '.';
288 	*p = 0;
289 	cmn_err(CE_CONT, buf);
290 }
291 #endif  /* ARCMSR_DEBUG */
292 
293 static void
294 arcmsr_ccbs_timeout(void* arg) {
295 
296 	struct ACB *acb = (struct ACB *)arg;
297 	struct CCB *ccb;
298 	int i;
299 	int current_time = ddi_get_time();
300 
301 
302 	if (acb->ccboutstandingcount != 0) {
303 		/* check each ccb */
304 		i = ddi_dma_sync(acb->ccbs_pool_handle, 0,
305 		    acb->dma_sync_size, DDI_DMA_SYNC_FORKERNEL);
306 		if (i != DDI_SUCCESS) {
307 			if ((acb->timeout_id != 0) &&
308 			    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
309 				/* do pkt timeout check each 60 secs */
310 				acb->timeout_id = timeout(arcmsr_ccbs_timeout,
311 				    (void*)acb,
312 				    (60 * drv_usectohz(1000000)));
313 			}
314 			return;
315 		}
316 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
317 			ccb = acb->pccb_pool[i];
318 			if (ccb->acb != acb) {
319 				break;
320 			}
321 			if (ccb->startdone == ARCMSR_CCB_DONE) {
322 				continue;
323 			}
324 			if (ccb->pkt == NULL) {
325 				continue;
326 			}
327 			if (ccb->pkt->pkt_time == 0) {
328 				continue;
329 			}
330 			if ((int)ccb->ccb_time >= current_time) {
331 				continue;
332 			}
333 			if (ccb->startdone == ARCMSR_CCB_START) {
334 				int id = ccb->pkt->pkt_address.a_target;
335 				int lun = ccb->pkt->pkt_address.a_lun;
336 
337 				/*
338 				 * handle outstanding command of timeout ccb
339 				 */
340 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
341 				ccb->pkt->pkt_statistics = STAT_TIMEOUT;
342 
343 				cmn_err(CE_CONT,
344 				    "arcmsr%d: scsi target %d lun %d "
345 				    "outstanding command timeout",
346 				    ddi_get_instance(acb->dev_info),
347 				    id, lun);
348 				cmn_err(CE_CONT,
349 				    "arcmsr%d: scsi target %d lun %d "
350 				    "fatal error on target, device is gone",
351 				    ddi_get_instance(acb->dev_info),
352 				    id, lun);
353 				acb->devstate[id][lun] = ARECA_RAID_GONE;
354 				arcmsr_ccb_complete(ccb, 1);
355 				continue;
356 			}
357 			ccb->ccb_time = (time_t)(ccb->pkt->pkt_time +
358 			    current_time); /* adjust ccb_time of pending ccb */
359 		}
360 	}
361 	if ((acb->timeout_id != 0) &&
362 	    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
363 		/* do pkt timeout check each 60 secs */
364 		acb->timeout_id = timeout(arcmsr_ccbs_timeout,
365 		    (void*)acb, (60 * drv_usectohz(1000000)));
366 	}
367 }
368 
369 
370 static uint32_t
371 arcmsr_disable_allintr(struct ACB *acb) {
372 
373 	uint32_t intmask_org;
374 
375 	switch (acb->adapter_type) {
376 	case ACB_ADAPTER_TYPE_A: {
377 		struct HBA_msgUnit *phbamu =
378 		    (struct HBA_msgUnit *)acb->pmu;
379 
380 		/* disable all outbound interrupt */
381 		/* disable outbound message0 int */
382 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
383 		    &phbamu->outbound_intmask) |
384 		    ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
385 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
386 		    &phbamu->outbound_intmask,
387 		    intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
388 		}
389 		break;
390 	case ACB_ADAPTER_TYPE_B: {
391 		struct HBB_msgUnit *phbbmu =
392 		    (struct HBB_msgUnit *)acb->pmu;
393 
394 		/* disable all outbound interrupt */
395 		/* disable outbound message0 int */
396 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
397 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask) &
398 		    (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
399 		/* disable all interrupts */
400 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
401 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
402 		}
403 		break;
404 	}
405 	return (intmask_org);
406 }
407 
408 
409 static void
410 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org) {
411 
412 	int mask;
413 
414 	switch (acb->adapter_type) {
415 	case ACB_ADAPTER_TYPE_A: {
416 		struct HBA_msgUnit *phbamu =
417 		    (struct HBA_msgUnit *)acb->pmu;
418 
419 		/* enable outbound Post Queue, outbound doorbell Interrupt */
420 		mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
421 		    ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
422 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
423 		    &phbamu->outbound_intmask, intmask_org & mask);
424 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
425 		}
426 		break;
427 	case ACB_ADAPTER_TYPE_B: {
428 		struct HBB_msgUnit *phbbmu =
429 		    (struct HBB_msgUnit *)acb->pmu;
430 
431 		/* disable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
432 		mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
433 		    ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
434 		/* 1=interrupt enable, 0=interrupt disable */
435 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
436 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
437 		    intmask_org | mask);
438 		acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
439 		}
440 		break;
441 	}
442 }
443 
444 
445 static void
446 arcmsr_iop_parking(struct ACB *acb) {
447 
448 	if (acb != NULL) {
449 		/* stop adapter background rebuild */
450 		if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
451 			uint32_t intmask_org;
452 
453 			acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
454 			/* disable all outbound interrupt */
455 			intmask_org = arcmsr_disable_allintr(acb);
456 			if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
457 				arcmsr_stop_hba_bgrb(acb);
458 				arcmsr_flush_hba_cache(acb);
459 			} else {
460 				arcmsr_stop_hbb_bgrb(acb);
461 				arcmsr_flush_hbb_cache(acb);
462 			}
463 			/*
464 			 * enable outbound Post Queue
465 			 * enable outbound doorbell Interrupt
466 			 */
467 			arcmsr_enable_allintr(acb, intmask_org);
468 		}
469 	}
470 }
471 
472 
473 
474 static int
475 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd) {
476 
477 	struct ACB *acb;
478 	scsi_hba_tran_t *scsi_hba_transport;
479 
480 	scsi_hba_transport = (scsi_hba_tran_t *)
481 	    ddi_get_driver_private(resetdev);
482 
483 	if (!scsi_hba_transport)
484 		return (DDI_FAILURE);
485 
486 	acb = (struct ACB *)
487 	    scsi_hba_transport->tran_hba_private;
488 
489 	if (!acb)
490 		return (DDI_FAILURE);
491 
492 	if ((cmd == RESET_LUN) ||
493 	    (cmd == RESET_BUS) ||
494 	    (cmd == RESET_TARGET))
495 		arcmsr_log(NULL, CE_WARN,
496 		    "arcmsr%d: reset op (%d) not supported",
497 		    ddi_get_instance(resetdev), cmd);
498 
499 	arcmsr_pcidev_disattach(acb);
500 
501 	return (DDI_SUCCESS);
502 }
503 
504 static int
505 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance) {
506 
507 	scsi_hba_tran_t *hba_trans;
508 	ddi_device_acc_attr_t dev_acc_attr;
509 	struct ACB *acb;
510 	static char buf[256];
511 	uint16_t wval;
512 	int raid6 = 1;
513 	char *type;
514 
515 	/*
516 	 * Soft State Structure
517 	 * The driver should allocate the per-device-instance
518 	 * soft state structure, being careful to clean up properly if
519 	 * an error occurs. Allocate data structure.
520 	 */
521 	if (ddi_soft_state_zalloc(arcmsr_soft_state, instance)
522 	    != DDI_SUCCESS) {
523 		arcmsr_log(NULL, CE_WARN,
524 		    "arcmsr%d: ddi_soft_state_zalloc failed",
525 		    instance);
526 		return (DDI_FAILURE);
527 	}
528 
529 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
530 	if (acb == NULL) {
531 		arcmsr_log(NULL, CE_WARN,
532 		    "arcmsr%d: ddi_get_soft_state failed",
533 		    instance);
534 		goto error_level_1;
535 	}
536 
537 	/* acb is already zalloc()d so we don't need to bzero() it */
538 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
539 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
540 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
541 
542 	acb->dev_info = dev_info;
543 	acb->dev_acc_attr = dev_acc_attr;
544 
545 	/*
546 	 * The driver, if providing DMA, should also check that its hardware is
547 	 * installed in a DMA-capable slot
548 	 */
549 	if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
550 		arcmsr_log(NULL, CE_WARN,
551 		    "arcmsr%d: hardware is not installed in a "
552 		    "DMA-capable slot",
553 		    instance);
554 		goto error_level_0;
555 	}
556 	/* We do not support adapter drivers with high-level interrupts */
557 	if (ddi_intr_hilevel(dev_info, 0) != 0) {
558 		arcmsr_log(NULL, CE_WARN,
559 		    "arcmsr%d: high-level interrupt not supported",
560 		    instance);
561 		goto error_level_0;
562 	}
563 
564 
565 
566 
567 	if (pci_config_setup(dev_info, &acb->pci_acc_handle)
568 	    != DDI_SUCCESS) {
569 		arcmsr_log(NULL, CE_NOTE,
570 		    "arcmsr%d: pci_config_setup() failed, attach failed",
571 		    instance);
572 		return (DDI_PROBE_FAILURE);
573 	}
574 
575 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
576 	if (wval != PCI_VENDOR_ID_ARECA) {
577 		arcmsr_log(NULL, CE_NOTE,
578 		    "arcmsr%d: failing attach: 'vendorid (0x%04x) "
579 		    "does not match 0x%04x (PCI_VENDOR_ID_ARECA)\n",
580 		    instance, wval, PCI_VENDOR_ID_ARECA);
581 		return (DDI_PROBE_FAILURE);
582 	}
583 
584 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
585 	switch (wval) {
586 	case PCI_DEVICE_ID_ARECA_1110:
587 	case PCI_DEVICE_ID_ARECA_1210:
588 	case PCI_DEVICE_ID_ARECA_1201:
589 		raid6 = 0;
590 		/*FALLTHRU*/
591 	case PCI_DEVICE_ID_ARECA_1120:
592 	case PCI_DEVICE_ID_ARECA_1130:
593 	case PCI_DEVICE_ID_ARECA_1160:
594 	case PCI_DEVICE_ID_ARECA_1170:
595 	case PCI_DEVICE_ID_ARECA_1220:
596 	case PCI_DEVICE_ID_ARECA_1230:
597 	case PCI_DEVICE_ID_ARECA_1260:
598 	case PCI_DEVICE_ID_ARECA_1270:
599 	case PCI_DEVICE_ID_ARECA_1280:
600 		type = "SATA";
601 		break;
602 	case PCI_DEVICE_ID_ARECA_1380:
603 	case PCI_DEVICE_ID_ARECA_1381:
604 	case PCI_DEVICE_ID_ARECA_1680:
605 	case PCI_DEVICE_ID_ARECA_1681:
606 		type = "SAS";
607 		break;
608 	default:
609 		type = "X-TYPE";
610 		break;
611 	}
612 
613 	(void) sprintf(buf, "Areca %s Host Adapter RAID Controller%s",
614 	    type, raid6 ? " (RAID6 capable)" : "");
615 	cmn_err(CE_CONT, "arcmsr%d:%s ", instance, buf);
616 	cmn_err(CE_CONT, "arcmsr%d:%s ", instance, ARCMSR_DRIVER_VERSION);
617 
618 
619 	/* we disable iop interrupt here */
620 	if (arcmsr_initialize(acb) == DDI_FAILURE) {
621 		arcmsr_log(NULL, CE_WARN, "arcmsr%d: arcmsr_initialize "
622 		    "failed", instance);
623 		goto error_level_1;
624 	}
625 
626 	/*
627 	 * The driver must first obtain the iblock cookie to initialize
628 	 * mutexes used in the driver handler. Only after those mutexes
629 	 * have been initialized can the interrupt handler be added.
630 	 */
631 	if (ddi_get_iblock_cookie(dev_info, 0, &acb->iblock_cookie)
632 	    != DDI_SUCCESS) {
633 		arcmsr_log(NULL, CE_WARN, "arcmsr%d: "
634 		    "ddi_get_iblock_cookie failed", instance);
635 		goto error_level_2;
636 	}
637 	mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER,
638 	    (void *)acb->iblock_cookie);
639 	mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER,
640 	    (void *)acb->iblock_cookie);
641 	mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER,
642 	    (void *)acb->iblock_cookie);
643 	mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER,
644 	    (void *)acb->iblock_cookie);
645 
646 	/* Allocate a transport structure */
647 	hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
648 	if (hba_trans == NULL) {
649 		arcmsr_log(NULL, CE_WARN,
650 		    "arcmsr%d: scsi_hba_tran_alloc failed",
651 		    instance);
652 		goto error_level_3;
653 	}
654 	acb->scsi_hba_transport = hba_trans;
655 	acb->dev_info = dev_info;
656 	/* init scsi host adapter transport entry */
657 	hba_trans->tran_hba_private  = acb;
658 	hba_trans->tran_tgt_private  = NULL;
659 	/*
660 	 * If no per-target initialization is required, the HBA can leave
661 	 * tran_tgt_init set to NULL.
662 	 */
663 	hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
664 	hba_trans->tran_tgt_probe = scsi_hba_probe;
665 	hba_trans->tran_tgt_free = NULL;
666 	hba_trans->tran_start = arcmsr_tran_start;
667 	hba_trans->tran_abort = arcmsr_tran_abort;
668 	hba_trans->tran_reset = arcmsr_tran_reset;
669 	hba_trans->tran_getcap = arcmsr_tran_getcap;
670 	hba_trans->tran_setcap = arcmsr_tran_setcap;
671 	hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
672 	hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
673 	hba_trans->tran_dmafree = arcmsr_tran_dmafree;
674 	hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
675 
676 	hba_trans->tran_reset_notify = NULL;
677 	hba_trans->tran_get_bus_addr = NULL;
678 	hba_trans->tran_get_name = NULL;
679 	hba_trans->tran_quiesce = NULL;
680 	hba_trans->tran_unquiesce = NULL;
681 	hba_trans->tran_bus_reset = NULL;
682 	hba_trans->tran_add_eventcall = NULL;
683 	hba_trans->tran_get_eventcookie = NULL;
684 	hba_trans->tran_post_event = NULL;
685 	hba_trans->tran_remove_eventcall = NULL;
686 
687 
688 	/* Adding an Interrupt Handler */
689 	if (ddi_add_intr(dev_info, 0, &acb->iblock_cookie, 0,
690 	    arcmsr_interrupt, (caddr_t)acb) != DDI_SUCCESS) {
691 		arcmsr_log(NULL, CE_WARN,
692 		    "arcmsr%d: failed to add interrupt handler",
693 		    instance);
694 		goto error_level_4;
695 	}
696 	/*
697 	 * The driver should attach this instance of the device, and
698 	 * perform error cleanup if necessary
699 	 */
700 	if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
701 	    hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
702 		arcmsr_log(NULL, CE_WARN,
703 		    "arcmsr%d: scsi_hba_attach_setup failed",
704 		    instance);
705 		goto error_level_5;
706 	}
707 
708 	/* iop init and enable interrupt here */
709 	mutex_enter(&arcmsr_global_mutex);
710 	arcmsr_iop_init(acb);
711 	mutex_exit(&arcmsr_global_mutex);
712 
713 	/* Initialize power management bookkeeping. */
714 	if (pm_create_components(dev_info, 1) == DDI_SUCCESS) {
715 		if (pm_idle_component(dev_info, 0) == DDI_FAILURE) {
716 			arcmsr_log(NULL, CE_WARN,
717 			    "arcmsr%d: pm_idle_component fail",
718 			    instance);
719 			goto error_level_8;
720 		}
721 		pm_set_normal_power(dev_info, 0, 1);
722 		/* acb->power_level = 1; */
723 	} else {
724 		arcmsr_log(NULL, CE_WARN,
725 		    "arcmsr%d: pm_create_components fail",
726 		    instance);
727 		goto error_level_7;
728 	}
729 
730 	/*
731 	 * Since this driver manages devices with "remote" hardware, "
732 	 * i.e. the devices themselves have no "reg" property, the SUSPEND/
733 	 * RESUME commands in detach/attach will not be called by the power
734 	 * management framework unless we request it by creating a
735 	 * "pm-hardware-state" property and setting it to value
736 	 * "needs-suspend-resume".
737 	 */
738 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dev_info,
739 	    "pm-hardware-state", "needs-suspend-resume")
740 	    != DDI_PROP_SUCCESS) {
741 		arcmsr_log(NULL, CE_WARN,
742 		    "arcmsr%d: ddi_prop_update(\"pm-hardware-state\")failed",
743 		    instance);
744 		goto error_level_8;
745 	}
746 
747 	/* active ccbs "timeout" watchdog */
748 	acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
749 	    (60 * drv_usectohz(1000000)));
750 	/* report device info */
751 	ddi_report_dev(dev_info);
752 	ArcMSRHBA[arcmsr_hba_count] = acb;
753 	arcmsr_hba_count++;
754 
755 	return (DDI_SUCCESS);
756 
757 error_level_8:
758 	pm_destroy_components(dev_info);
759 
760 error_level_7:
761 	/* Remove any previously allocated minor nodes */
762 	ddi_remove_minor_node(dev_info, NULL);
763 
764 error_level_6:
765 	scsi_hba_tran_free(hba_trans);
766 
767 error_level_5:
768 	ddi_remove_intr(dev_info, 0, (void *)acb->iblock_cookie);
769 
770 error_level_4:
771 	scsi_hba_tran_free(hba_trans);
772 
773 error_level_3:
774 	mutex_destroy(&acb->acb_mutex);
775 	mutex_destroy(&acb->postq_mutex);
776 	mutex_destroy(&acb->workingQ_mutex);
777 	mutex_destroy(&acb->ioctl_mutex);
778 
779 error_level_2:
780 	ddi_dma_mem_free(&acb->ccbs_acc_handle);
781 	ddi_dma_free_handle(&acb->ccbs_pool_handle);
782 
783 error_level_1:
784 	ddi_soft_state_free(arcmsr_soft_state, instance);
785 
786 error_level_0:
787 	return (DDI_FAILURE);
788 }
789 
790 
791 
792 /*
793  *      Function: arcmsr_attach(9E)
794  *   Description: Set up all device state and allocate data structures,
795  *		  mutexes, condition variables, etc. for device operation.
796  *		  Set mt_attr property for driver to indicate MT-safety.
797  *		  Add interrupts needed.
798  *         Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
799  *        Output: Return DDI_SUCCESS if device is ready,
800  *		          else return DDI_FAILURE
801  */
802 static int
803 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd) {
804 
805 	scsi_hba_tran_t *hba_trans;
806 	struct ACB *acb;
807 
808 
809 #if defined(ARCMSR_DEBUG)
810 	arcmsr_log(NULL, CE_NOTE,
811 	    "arcmsr_attach called for device %lx (instance %d)",
812 	    &dev_info, ddi_get_instance(dev_info));
813 #endif
814 	switch (cmd) {
815 	case DDI_ATTACH:
816 		return (arcmsr_do_ddi_attach(dev_info,
817 		    ddi_get_instance(dev_info)));
818 	case DDI_RESUME:
819 	case DDI_PM_RESUME:
820 	/*
821 	 * There is no hardware state to restart and no timeouts to
822 	 * restart since we didn't PM_SUSPEND with active cmds or
823 	 * active timeouts We just need to unblock waiting threads
824 	 * and restart I/O the code for DDI_RESUME is almost identical
825 	 * except it uses the suspend flag rather than pm_suspend flag
826 	 */
827 	    hba_trans = (scsi_hba_tran_t *)ddi_get_driver_private(dev_info);
828 	    if (!hba_trans) {
829 		    return (DDI_FAILURE);
830 	    }
831 	    acb = (struct ACB *)
832 		hba_trans->tran_hba_private;
833 	    mutex_enter(&acb->acb_mutex);
834 	    arcmsr_iop_init(acb);
835 
836 	    /* restart ccbs "timeout" watchdog */
837 	    acb->timeout_id = timeout(arcmsr_ccbs_timeout,
838 		(void*)acb, (60 * drv_usectohz(1000000)));
839 	    mutex_exit(&acb->acb_mutex);
840 	    return (DDI_SUCCESS);
841 
842     default:
843 	    arcmsr_log(NULL, CE_WARN,
844 		"arcmsr%d: ddi attach cmd (%d) unsupported",
845 		cmd, ddi_get_instance(dev_info));
846 	    return (DDI_FAILURE);
847 	}
848 }
849 
850 /*
851  *    Function:	arcmsr_detach(9E)
852  * Description: Remove all device allocation and system resources, disable
853  *		        device interrupt.
854  *       Input: dev_info_t *dev_info
855  *		        ddi_detach_cmd_t cmd
856  *      Output:	Return DDI_SUCCESS if done,
857  *		        else returnDDI_FAILURE
858  */
859 static int
860 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
861 
862 	int instance;
863 	struct ACB *acb;
864 
865 
866 	instance = ddi_get_instance(dev_info);
867 	acb = (struct ACB *)ddi_get_soft_state(arcmsr_soft_state,
868 	    instance);
869 	if (!acb) {
870 		return (DDI_FAILURE);
871 	}
872 
873 	switch (cmd) {
874 	case DDI_DETACH:
875 		mutex_enter(&acb->acb_mutex);
876 		if (acb->timeout_id != 0) {
877 			mutex_exit(&acb->acb_mutex);
878 			(void) untimeout(acb->timeout_id);
879 			mutex_enter(&acb->acb_mutex);
880 			acb->timeout_id = 0;
881 		}
882 		arcmsr_pcidev_disattach(acb);
883 		/* Remove interrupt set up by ddi_add_intr */
884 		ddi_remove_intr(dev_info, 0, acb->iblock_cookie);
885 		/* unbind mapping object to handle */
886 		(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
887 		/* Free ccb pool memory */
888 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
889 		/* Free DMA handle */
890 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
891 		ddi_regs_map_free(&acb->reg_mu_acc_handle0);
892 		if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
893 			arcmsr_log(NULL, CE_WARN,
894 			    "arcmsr%d: Unable to detach instance cleanly "
895 			    "(should not happen)",
896 			    ddi_get_instance(dev_info));
897 		/* free scsi_hba_transport from scsi_hba_tran_alloc */
898 		scsi_hba_tran_free(acb->scsi_hba_transport);
899 		ddi_remove_minor_node(dev_info, NULL);
900 		ddi_prop_remove_all(dev_info);
901 		mutex_exit(&acb->acb_mutex);
902 		mutex_destroy(&acb->acb_mutex);
903 		mutex_destroy(&acb->postq_mutex);
904 		mutex_destroy(&acb->workingQ_mutex);
905 		mutex_destroy(&acb->ioctl_mutex);
906 		pci_config_teardown(&acb->pci_acc_handle);
907 		ddi_set_driver_private(dev_info, NULL);
908 		ddi_soft_state_free(arcmsr_soft_state, instance);
909 		pm_destroy_components(dev_info);
910 		return (DDI_SUCCESS);
911 	case DDI_SUSPEND:
912 	case DDI_PM_SUSPEND:
913 		mutex_enter(&acb->acb_mutex);
914 		if (acb->timeout_id != 0) {
915 			acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
916 			mutex_exit(&acb->acb_mutex);
917 			(void) untimeout(acb->timeout_id);
918 			mutex_enter(&acb->acb_mutex);
919 			acb->timeout_id = 0;
920 		}
921 		/* disable all outbound interrupt */
922 		(void) arcmsr_disable_allintr(acb);
923 		/* stop adapter background rebuild */
924 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
925 			arcmsr_stop_hba_bgrb(acb);
926 			arcmsr_flush_hba_cache(acb);
927 		} else {
928 			arcmsr_stop_hbb_bgrb(acb);
929 			arcmsr_flush_hbb_cache(acb);
930 		}
931 		mutex_exit(&acb->acb_mutex);
932 		return (DDI_SUCCESS);
933 	default:
934 		return (DDI_FAILURE);
935 	}
936 }
937 
938 
939 
940 /*
941  *    Function:	arcmsr_tran_tgt_init
942  * Description: Called when initializing a target device instance. If
943  *		        no per-target initialization is required, the HBA
944  *		        may leave tran_tgt_init to NULL
945  *       Input:
946  *		        dev_info_t *host_dev_info,
947  *		        dev_info_t *target_dev_info,
948  *		        scsi_hba_tran_t *tran,
949  *		        struct scsi_device *sd
950  *
951  *      Return: DDI_SUCCESS if success, else return DDI_FAILURE
952  *
953  *  entry point enables the HBA to allocate and/or initialize any per-
954  *  target resources.
955  *  It also enables the HBA to qualify the device's address as valid and
956  *  supportable for that particular HBA.
957  *  By returning DDI_FAILURE, the instance of the target driver for that
958  *  device will not be probed or attached.
959  * 	This entry point is not required, and if none is supplied,
960  *  the framework will attempt to probe and attach all possible instances
961  *  of the appropriate target drivers.
962  */
963 static int
964 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
965     scsi_hba_tran_t *hosttran, struct scsi_device *sd) {
966 #ifndef __lock_lint
967 	_NOTE(ARGUNUSED(hosttran, target_dev_info))
968 #endif
969 
970 
971 	uint16_t  target;
972 	uint8_t  lun;
973 
974 	target = sd->sd_address.a_target;
975 	lun = sd->sd_address.a_lun;
976 	if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
977 		cmn_err(CE_WARN,
978 		    "arcmsr%d: (target %d, lun %d) exceeds "
979 		    "maximum supported values (%d, %d)",
980 		    ddi_get_instance(host_dev_info),
981 		    target, lun, ARCMSR_MAX_TARGETID, ARCMSR_MAX_TARGETLUN);
982 		return (DDI_FAILURE);
983 	}
984 	return (DDI_SUCCESS);
985 }
986 
987 /*
988  *         Function: arcmsr_tran_getcap(9E)
989  *      Description: Get the capability named, and returnits value.
990  *    Return Values: current value of capability, ifdefined
991  *		             -1 ifcapability is not defined
992  * ------------------------------------------------------
993  *         Common Capability Strings Array
994  * ------------------------------------------------------
995  *	#define	SCSI_CAP_DMA_MAX		0
996  *	#define	SCSI_CAP_MSG_OUT		1
997  *	#define	SCSI_CAP_DISCONNECT		2
998  *	#define	SCSI_CAP_SYNCHRONOUS		3
999  *	#define	SCSI_CAP_WIDE_XFER		4
1000  *	#define	SCSI_CAP_PARITY			5
1001  *	#define	SCSI_CAP_INITIATOR_ID		6
1002  *	#define	SCSI_CAP_UNTAGGED_QING		7
1003  *	#define	SCSI_CAP_TAGGED_QING		8
1004  *	#define	SCSI_CAP_ARQ			9
1005  *	#define	SCSI_CAP_LINKED_CMDS		10 a
1006  *	#define	SCSI_CAP_SECTOR_SIZE		11 b
1007  *	#define	SCSI_CAP_TOTAL_SECTORS		12 c
1008  *	#define	SCSI_CAP_GEOMETRY		13 d
1009  *	#define	SCSI_CAP_RESET_NOTIFICATION	14 e
1010  *	#define	SCSI_CAP_QFULL_RETRIES		15 f
1011  *	#define	SCSI_CAP_QFULL_RETRY_INTERVAL	16 10
1012  *	#define	SCSI_CAP_SCSI_VERSION		17 11
1013  *	#define	SCSI_CAP_INTERCONNECT_TYPE	18 12
1014  *	#define	SCSI_CAP_LUN_RESET		19 13
1015  */
1016 static int
1017 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom) {
1018 
1019 	int capability = 0;
1020 	struct ACB *acb =
1021 	    (struct ACB *)ap->a_hba_tran->tran_hba_private;
1022 
1023 
1024 	if (cap == NULL || whom == 0) {
1025 		return (DDI_FAILURE);
1026 	}
1027 
1028 	mutex_enter(&arcmsr_global_mutex);
1029 	switch (scsi_hba_lookup_capstr(cap)) {
1030 	case SCSI_CAP_MSG_OUT:
1031 	case SCSI_CAP_DISCONNECT:
1032 	case SCSI_CAP_SYNCHRONOUS:
1033 	case SCSI_CAP_WIDE_XFER:
1034 	case SCSI_CAP_TAGGED_QING:
1035 	case SCSI_CAP_UNTAGGED_QING:
1036 	case SCSI_CAP_PARITY:
1037 	case SCSI_CAP_ARQ:
1038 		capability = acb->tgt_scsi_opts[ap->a_target];
1039 		break;
1040 	case SCSI_CAP_SECTOR_SIZE:
1041 		capability = ARCMSR_DEV_SECTOR_SIZE;
1042 		break;
1043 	case SCSI_CAP_DMA_MAX:
1044 		/* Limit to 16MB max transfer */
1045 		capability = ARCMSR_MAX_XFER_LEN;
1046 		break;
1047 	case SCSI_CAP_INITIATOR_ID:
1048 		capability = ARCMSR_SCSI_INITIATOR_ID;
1049 		break;
1050 	case SCSI_CAP_GEOMETRY:
1051 		/* head , track , cylinder */
1052 		capability = (255 << 16) | 63;
1053 		break;
1054 	default:
1055 		capability = -1;
1056 		break;
1057 	}
1058 	mutex_exit(&arcmsr_global_mutex);
1059 	return (capability);
1060 }
1061 
1062 /*
1063  *      Function: arcmsr_tran_setcap(9E)
1064  *   Description: Set the specific capability.
1065  * Return Values: 1 - capability exists and can be set to new value
1066  *		          0 - capability could not be set to new value
1067  *		         -1 - no such capability
1068  */
1069 static int
1070 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1071     int whom) {
1072 #ifndef __lock_lint
1073 	_NOTE(ARGUNUSED(value))
1074 #endif
1075 
1076 
1077 	int supported = 0;
1078 	struct ACB *acb =
1079 	    (struct ACB *)ap->a_hba_tran->tran_hba_private;
1080 
1081 
1082 	if (cap == NULL || whom == 0) {
1083 		return (-1);
1084 	}
1085 
1086 	mutex_enter(&arcmsr_global_mutex);
1087 	switch (supported = scsi_hba_lookup_capstr(cap)) {
1088 	case SCSI_CAP_DISCONNECT:		/* 2 */
1089 	case SCSI_CAP_SYNCHRONOUS:		/* 3 */
1090 	case SCSI_CAP_TAGGED_QING:		/* 8 */
1091 	case SCSI_CAP_WIDE_XFER:		/* 4 */
1092 	case SCSI_CAP_ARQ:			/* 9 auto request sense */
1093 	case SCSI_CAP_TOTAL_SECTORS:		/* c */
1094 		acb->tgt_scsi_opts[ap->a_target] |= supported;
1095 		supported = 1;
1096 		break;
1097 	case SCSI_CAP_UNTAGGED_QING:   		/* 7 */
1098 	case SCSI_CAP_INITIATOR_ID:		/* 6 */
1099 	case SCSI_CAP_DMA_MAX:			/* 0 */
1100 	case SCSI_CAP_MSG_OUT:			/* 1 */
1101 	case SCSI_CAP_PARITY:			/* 5 */
1102 	case SCSI_CAP_LINKED_CMDS:		/* a */
1103 	case SCSI_CAP_RESET_NOTIFICATION:	/* e */
1104 	case SCSI_CAP_SECTOR_SIZE:		/* b */
1105 		supported = 0;
1106 		break;
1107 	default:
1108 		supported = -1;
1109 		break;
1110 	}
1111 	mutex_exit(&arcmsr_global_mutex);
1112 	return (supported);
1113 }
1114 
1115 
1116 
1117 static void
1118 arcmsr_free_ccb(struct CCB *ccb) {
1119 
1120 	struct ACB *acb = ccb->acb;
1121 
1122 	ccb->startdone = ARCMSR_CCB_DONE;
1123 	ccb->pkt = NULL;
1124 	ccb->ccb_flags = 0;
1125 	mutex_enter(&acb->workingQ_mutex);
1126 	acb->ccbworkingQ[acb->workingccb_doneindex] = ccb;
1127 	acb->workingccb_doneindex++;
1128 	acb->workingccb_doneindex %= ARCMSR_MAX_FREECCB_NUM;
1129 	mutex_exit(&acb->workingQ_mutex);
1130 }
1131 
1132 /*
1133  *      Function: arcmsr_tran_init_pkt
1134  * Return Values: pointer to scsi_pkt, or NULL
1135  *   Description: simultaneously allocate both a scsi_pkt(9S) structure and
1136  *                DMA resources for that pkt.
1137  *                Called by kernel on behalf of a target driver
1138  *		          calling scsi_init_pkt(9F).
1139  *		          Refer to tran_init_pkt(9E) man page
1140  *       Context: Can be called from different kernel process threads.
1141  *		          Can be called by interrupt thread.
1142  * Allocates SCSI packet and DMA resources
1143  */
1144 static struct
1145 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
1146     register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1147     int tgtlen, int flags, int (*callback)(), caddr_t arg) {
1148 
1149 	struct CCB *ccb;
1150 	struct ARCMSR_CDB *arcmsr_cdb;
1151 	struct ACB *acb;
1152 	int old_pkt_flag = 1;
1153 
1154 
1155 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1156 
1157 	if (pkt == NULL) {
1158 		/* get free CCB */
1159 		ccb = arcmsr_get_freeccb(acb);
1160 		if (ccb == (struct CCB *)NULL) {
1161 			return (NULL);
1162 		}
1163 
1164 		if (ccb->pkt != NULL) {
1165 			/*
1166 			 * If kmem_flags are turned on, expect to
1167 			 * see a message
1168 			 */
1169 			cmn_err(CE_WARN, "arcmsr%d: invalid pkt",
1170 			    ddi_get_instance(acb->dev_info));
1171 			return (NULL);
1172 		}
1173 		pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
1174 		    statuslen, tgtlen, sizeof (struct scsi_pkt),
1175 		    callback, arg);
1176 		if (pkt == NULL) {
1177 			cmn_err(CE_WARN,
1178 			    "arcmsr%d: scsi pkt allocation failed",
1179 			    ddi_get_instance(acb->dev_info));
1180 			arcmsr_free_ccb(ccb);
1181 			return (NULL);
1182 		}
1183 		/* Initialize CCB */
1184 		ccb->pkt = pkt;
1185 		ccb->pkt_dma_handle = NULL;
1186 		/* record how many sg are needed to xfer on this pkt */
1187 		ccb->pkt_ncookies = 0;
1188 		/* record how many sg we got from this window */
1189 		ccb->pkt_cookie = 0;
1190 		/* record how many windows have partial dma map set */
1191 		ccb->pkt_nwin = 0;
1192 		/* record current sg window position */
1193 		ccb->pkt_curwin	= 0;
1194 		ccb->pkt_dma_len = 0;
1195 		ccb->pkt_dma_offset = 0;
1196 		ccb->resid_dmacookie.dmac_size = 0;
1197 
1198 		/*
1199 		 * we will still use this point for we want to fake some
1200 		 * information in tran_start
1201 		 */
1202 		ccb->bp = bp;
1203 
1204 		/* Initialize arcmsr_cdb */
1205 		arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1206 		bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
1207 		arcmsr_cdb->Bus = 0;
1208 		arcmsr_cdb->Function = 1;
1209 		arcmsr_cdb->LUN = ap->a_lun;
1210 		arcmsr_cdb->TargetID = ap->a_target;
1211 		arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1212 		arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
1213 
1214 		/* Fill in the rest of the structure */
1215 		pkt->pkt_ha_private = ccb;
1216 		pkt->pkt_address = *ap;
1217 		pkt->pkt_comp = (void (*)())NULL;
1218 		pkt->pkt_flags = 0;
1219 		pkt->pkt_time = 0;
1220 		pkt->pkt_resid = 0;
1221 		pkt->pkt_statistics = 0;
1222 		pkt->pkt_reason = 0;
1223 		old_pkt_flag = 0;
1224 	} else {
1225 		ccb = (struct CCB *)pkt->pkt_ha_private;
1226 		/*
1227 		 * you cannot update CdbLength with cmdlen here, it would
1228 		 * cause a data compare error
1229 		 */
1230 		ccb->startdone = ARCMSR_CCB_UNBUILD;
1231 	}
1232 
1233 	/* Second step : dma allocation/move */
1234 	if (bp && bp->b_bcount != 0) {
1235 		/*
1236 		 * system had a lot of data trunk need to xfer, from...20 byte
1237 		 * to 819200 byte.
1238 		 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1239 		 * this lot of data trunk xfer done this mission will be done
1240 		 * by some of continue READ or WRITE scsi command, till this
1241 		 * lot of data trunk xfer completed.
1242 		 * arcmsr_dma_move do the action repeatedly, and use the same
1243 		 * ccb till this lot of data trunk xfer complete notice.
1244 		 * when after the arcmsr_tran_init_pkt returns the solaris
1245 		 * kernel is by your pkt_resid and its b_bcount to give you
1246 		 * which type of scsi command descriptor to implement the
1247 		 * length of folowing arcmsr_tran_start scsi cdb (data length)
1248 		 *
1249 		 * Each transfer should be aligned on a 512 byte boundary
1250 		 */
1251 		if (ccb->pkt_dma_handle == NULL) {
1252 			if (arcmsr_dma_alloc(acb, pkt, bp, flags,
1253 			    callback) == DDI_FAILURE) {
1254 				/*
1255 				 * the HBA driver is unable to allocate DMA
1256 				 * resources, it must free the allocated
1257 				 * scsi_pkt(9S) before returning
1258 				 */
1259 				cmn_err(CE_WARN, "arcmsr%d: dma allocation "
1260 				    "failure ",
1261 				    ddi_get_instance(acb->dev_info));
1262 				if (old_pkt_flag == 0) {
1263 					cmn_err(CE_WARN, "arcmsr%d: dma "
1264 					    "allocation failed to free scsi "
1265 					    "hba pkt ",
1266 					    ddi_get_instance(acb->dev_info));
1267 					arcmsr_free_ccb(ccb);
1268 					scsi_hba_pkt_free(ap, pkt);
1269 				}
1270 				return ((struct scsi_pkt *)NULL);
1271 			}
1272 		} else {
1273 			/* DMA resources to next DMA window, for old pkt */
1274 			if (arcmsr_dma_move(acb, pkt, bp) == -1) {
1275 				cmn_err(CE_WARN, "arcmsr%d: dma move "
1276 				    "failed ",
1277 				    ddi_get_instance(acb->dev_info));
1278 				return ((struct scsi_pkt *)NULL);
1279 			}
1280 		}
1281 	} else {
1282 		pkt->pkt_resid = 0;
1283 	}
1284 	return (pkt);
1285 }
1286 
1287 /*
1288  * Function name: arcmsr_dma_alloc
1289  * Return Values: 0 if successful, -1 if failure
1290  *   Description: allocate DMA resources
1291  *       Context: Can only be called from arcmsr_tran_init_pkt()
1292  *     register struct scsi_address	*ap = &((pkt)->pkt_address);
1293  */
1294 static int
1295 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1296     struct buf *bp, int flags, int (*callback)()) {
1297 
1298 	struct CCB *ccb = pkt->pkt_ha_private;
1299 	int alloc_result, map_method, dma_flags;
1300 	int resid = 0;
1301 	int total_ccb_xferlen = 0;
1302 	int (*cb)(caddr_t);
1303 	uint8_t i;
1304 
1305 	/*
1306 	 * at this point the PKT SCSI CDB is empty, and dma xfer length
1307 	 * is bp->b_bcount
1308 	 */
1309 
1310 	if (bp->b_flags & B_READ) {
1311 		ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1312 		dma_flags = DDI_DMA_READ;
1313 	} else {
1314 		ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1315 		dma_flags = DDI_DMA_WRITE;
1316 	}
1317 
1318 	if (flags & PKT_CONSISTENT) {
1319 		ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1320 		dma_flags |= DDI_DMA_CONSISTENT;
1321 	}
1322 	if (flags & PKT_DMA_PARTIAL) {
1323 		dma_flags |= DDI_DMA_PARTIAL;
1324 	}
1325 
1326 	dma_flags |= DDI_DMA_REDZONE;
1327 	cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1328 
1329 	if ((alloc_result = ddi_dma_alloc_handle(acb->dev_info,
1330 	    &arcmsr_dma_attr, cb, 0, &ccb->pkt_dma_handle))
1331 	    != DDI_SUCCESS) {
1332 		switch (alloc_result) {
1333 		case DDI_DMA_BADATTR:
1334 			/*
1335 			 * If the system does not support physical DMA,
1336 			 * the return value from ddi_dma_alloc_handle
1337 			 * will be DDI_DMA_BADATTR
1338 			 */
1339 			cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1340 			    "'bad attribute'",
1341 			    ddi_get_instance(acb->dev_info));
1342 			bioerror(bp, EFAULT);
1343 			return (DDI_FAILURE);
1344 		case DDI_DMA_NORESOURCES:
1345 			cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1346 			    "'no resources'",
1347 			    ddi_get_instance(acb->dev_info));
1348 			bioerror(bp, 0);
1349 			return (DDI_FAILURE);
1350 		default:
1351 			cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1352 			    "'unknown failure'",
1353 			    ddi_get_instance(acb->dev_info));
1354 			return (DDI_FAILURE);
1355 		}
1356 	}
1357 
1358 	map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle, bp,
1359 	    dma_flags, cb, 0,
1360 	    &ccb->pkt_dmacookies[0],	/* SG List pointer */
1361 	    &ccb->pkt_ncookies);	/* number of sgl cookies */
1362 
1363 	switch (map_method) {
1364 	case DDI_DMA_PARTIAL_MAP:
1365 		/*
1366 		 * When your main memory size larger then 4G
1367 		 * DDI_DMA_PARTIAL_MAP will be touched.
1368 		 *
1369 		 * We've already set DDI_DMA_PARTIAL in dma_flags,
1370 		 * so if it's now missing, there's something screwy
1371 		 * happening. We plow on....
1372 		 */
1373 
1374 		if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1375 			cmn_err(CE_WARN, "arcmsr%d: dma partial mapping lost "
1376 			    "...impossible case!",
1377 			    ddi_get_instance(acb->dev_info));
1378 		}
1379 		if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1380 		    DDI_FAILURE) {
1381 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_numwin() failed",
1382 			    ddi_get_instance(acb->dev_info));
1383 		}
1384 
1385 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1386 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1387 		    &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1388 		    DDI_FAILURE) {
1389 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_getwin failed",
1390 			    ddi_get_instance(acb->dev_info));
1391 		}
1392 
1393 		i = 0;
1394 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1395 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1396 		for (;;) {
1397 			i++;
1398 			if (i == ARCMSR_MAX_SG_ENTRIES ||
1399 			    i == ccb->pkt_ncookies ||
1400 			    total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1401 				break;
1402 			}
1403 			/*
1404 			 * next cookie will be retrieved from
1405 			 * ccb->pkt_dmacookies[i]
1406 			 */
1407 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1408 			    &ccb->pkt_dmacookies[i]);
1409 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1410 		}
1411 		ccb->pkt_cookie = i;
1412 		ccb->arcmsr_cdb.sgcount = i;
1413 		if (total_ccb_xferlen > 512) {
1414 			resid = total_ccb_xferlen % 512;
1415 			if (resid != 0) {
1416 				i--;
1417 				total_ccb_xferlen -= resid;
1418 				/* modify last sg length */
1419 				ccb->pkt_dmacookies[i].dmac_size =
1420 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1421 				ccb->resid_dmacookie.dmac_size = resid;
1422 				ccb->resid_dmacookie.dmac_laddress =
1423 				    ccb->pkt_dmacookies[i].dmac_laddress +
1424 				    ccb->pkt_dmacookies[i].dmac_size;
1425 			}
1426 		}
1427 		ccb->total_dmac_size = total_ccb_xferlen;
1428 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1429 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1430 
1431 		return (DDI_SUCCESS);
1432 
1433 	case DDI_DMA_MAPPED:
1434 		ccb->pkt_nwin = 1; /* all mapped, so only one window */
1435 		ccb->pkt_dma_len = 0;
1436 		ccb->pkt_dma_offset = 0;
1437 		i = 0;
1438 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1439 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1440 		for (;;) {
1441 			i++;
1442 			if (i == ARCMSR_MAX_SG_ENTRIES ||
1443 			    i == ccb->pkt_ncookies ||
1444 			    total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1445 				break;
1446 			}
1447 			/*
1448 			 * next cookie will be retrieved from
1449 			 * ccb->pkt_dmacookies[i]
1450 			 */
1451 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1452 			    &ccb->pkt_dmacookies[i]);
1453 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1454 		}
1455 		ccb->pkt_cookie = i;
1456 		ccb->arcmsr_cdb.sgcount = i;
1457 		if (total_ccb_xferlen > 512) {
1458 			resid = total_ccb_xferlen % 512;
1459 			    if (resid != 0) {
1460 				i--;
1461 				total_ccb_xferlen -= resid;
1462 				/* modify last sg length */
1463 				ccb->pkt_dmacookies[i].dmac_size =
1464 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1465 				ccb->resid_dmacookie.dmac_size = resid;
1466 				ccb->resid_dmacookie.dmac_laddress =
1467 				    ccb->pkt_dmacookies[i].dmac_laddress +
1468 				    ccb->pkt_dmacookies[i].dmac_size;
1469 			}
1470 		}
1471 		ccb->total_dmac_size = total_ccb_xferlen;
1472 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1473 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1474 		return (DDI_SUCCESS);
1475 
1476 	case DDI_DMA_NORESOURCES:
1477 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'no resources'",
1478 		    ddi_get_instance(acb->dev_info));
1479 		bioerror(bp, ENOMEM);
1480 		break;
1481 
1482 	case DDI_DMA_NOMAPPING:
1483 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'no mapping'",
1484 		    ddi_get_instance(acb->dev_info));
1485 		bioerror(bp, EFAULT);
1486 		break;
1487 
1488 	case DDI_DMA_TOOBIG:
1489 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'too big'",
1490 		    ddi_get_instance(acb->dev_info));
1491 		bioerror(bp, EINVAL);
1492 		break;
1493 
1494 	case DDI_DMA_INUSE:
1495 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'in use' "
1496 		    "(should not happen)",
1497 		    ddi_get_instance(acb->dev_info));
1498 		break;
1499 	default:
1500 		cmn_err(CE_WARN,
1501 		    "arcmsr%d: dma map got 'unknown failure 0x%x' "
1502 		    "(should not happen)",
1503 		    ddi_get_instance(acb->dev_info), i);
1504 #ifdef ARCMSR_DEBUG
1505 		arcmsr_dump_scsi_cdb(&pkt->pkt_address, pkt);
1506 #endif
1507 		break;
1508 	}
1509 
1510 	ddi_dma_free_handle(&ccb->pkt_dma_handle);
1511 	ccb->pkt_dma_handle = NULL;
1512 	ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1513 	return (DDI_FAILURE);
1514 }
1515 
1516 
1517 /*
1518  * Function name: arcmsr_dma_move
1519  * Return Values: 0 if successful, -1 if failure
1520  *   Description: move DMA resources to next DMA window
1521  *       Context: Can only be called from arcmsr_tran_init_pkt()
1522  */
1523 static int
1524 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt,
1525     struct buf *bp) {
1526 
1527 	struct CCB *ccb = pkt->pkt_ha_private;
1528 	uint8_t i = 0;
1529 	int resid = 0;
1530 	int total_ccb_xferlen = 0;
1531 
1532 	if (ccb->resid_dmacookie.dmac_size != 0) 	{
1533 		total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1534 		ccb->pkt_dmacookies[i].dmac_size =
1535 		    ccb->resid_dmacookie.dmac_size;
1536 		ccb->pkt_dmacookies[i].dmac_laddress =
1537 		    ccb->resid_dmacookie.dmac_laddress;
1538 		i++;
1539 		ccb->resid_dmacookie.dmac_size = 0;
1540 	}
1541 	/*
1542 	 * If there are no more cookies remaining in this window,
1543 	 * move to the next window.
1544 	 */
1545 	if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1546 		/*
1547 		 * only dma map "partial" arrive here
1548 		 */
1549 		if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1550 		    (ccb->pkt_nwin == 1)) {
1551 			cmn_err(CE_CONT,
1552 			    "arcmsr%d: dma partial set, but only "
1553 			    "one window allocated",
1554 			    ddi_get_instance(acb->dev_info));
1555 			return (DDI_SUCCESS);
1556 		}
1557 
1558 		/* At last window, cannot move */
1559 		if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1560 			cmn_err(CE_WARN,
1561 			    "arcmsr%d: dma partial set, numwin exceeded",
1562 			    ddi_get_instance(acb->dev_info));
1563 			return (DDI_FAILURE);
1564 		}
1565 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1566 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1567 		    &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1568 		    DDI_FAILURE) {
1569 			cmn_err(CE_WARN,
1570 			    "arcmsr%d: dma partial set, "
1571 			    "ddi_dma_getwin failure",
1572 			    ddi_get_instance(acb->dev_info));
1573 			return (DDI_FAILURE);
1574 		}
1575 		/* reset cookie pointer */
1576 		ccb->pkt_cookie = 0;
1577 	} else {
1578 		/*
1579 		 * only dma map "all" arrive here
1580 		 * We still have more cookies in this window,
1581 		 * get the next one
1582 		 * access the pkt_dma_handle remain cookie record at
1583 		 * ccb->pkt_dmacookies array
1584 		 */
1585 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1586 		    &ccb->pkt_dmacookies[i]);
1587 	}
1588 
1589 	/* Get remaining cookies in this window, up to our maximum */
1590 	total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1591 
1592 	/* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1593 	for (;;) {
1594 		i++;
1595 		/* handled cookies count level indicator */
1596 		ccb->pkt_cookie++;
1597 		if (i == ARCMSR_MAX_SG_ENTRIES ||
1598 		    ccb->pkt_cookie == ccb->pkt_ncookies ||
1599 		    total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1600 			break;
1601 		}
1602 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1603 		    &ccb->pkt_dmacookies[i]);
1604 		total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1605 	}
1606 
1607 	ccb->arcmsr_cdb.sgcount = i;
1608 	if (total_ccb_xferlen > 512) {
1609 		resid = total_ccb_xferlen % 512;
1610 		if (resid != 0) {
1611 			i--;
1612 			total_ccb_xferlen -= resid;
1613 			/* modify last sg length */
1614 			ccb->pkt_dmacookies[i].dmac_size =
1615 			    ccb->pkt_dmacookies[i].dmac_size - resid;
1616 			ccb->resid_dmacookie.dmac_size = resid;
1617 			ccb->resid_dmacookie.dmac_laddress =
1618 			    ccb->pkt_dmacookies[i].dmac_laddress +
1619 			    ccb->pkt_dmacookies[i].dmac_size;
1620 		}
1621 	}
1622 	ccb->total_dmac_size += total_ccb_xferlen;
1623 	pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1624 
1625 	return (DDI_SUCCESS);
1626 }
1627 
1628 /*
1629  * Function name: arcmsr_tran_destroy_pkt
1630  * Return Values: none
1631  *   Description: Called by kernel on behalf of a target driver
1632  *	          calling scsi_destroy_pkt(9F).
1633  *	          Refer to tran_destroy_pkt(9E) man page
1634  *       Context: Can be called from different kernel process threads.
1635  *	          Can be called by interrupt thread.
1636  */
1637 static void
1638 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) {
1639 
1640 	struct CCB *ccb = pkt->pkt_ha_private;
1641 
1642 	if ((ccb != NULL) && (ccb->pkt == pkt)) {
1643 		struct ACB *acb = ccb->acb;
1644 		if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1645 			if (ddi_dma_unbind_handle(ccb->pkt_dma_handle)
1646 			    != DDI_SUCCESS) {
1647 				cmn_err(CE_WARN,
1648 				    "arcmsr%d: ddi_dma_unbind_handle() failed",
1649 				    ddi_get_instance(acb->dev_info));
1650 			}
1651 			ddi_dma_free_handle(&ccb->pkt_dma_handle);
1652 			ccb->pkt_dma_handle = NULL;
1653 		}
1654 		arcmsr_free_ccb(ccb);
1655 	}
1656 
1657 	scsi_hba_pkt_free(ap, pkt);
1658 }
1659 
1660 /*
1661  * Function name: arcmsr_tran_dmafree()
1662  * Return Values: none
1663  *   Description: free dvma resources
1664  *       Context: Can be called from different kernel process threads.
1665  *	          Can be called by interrupt thread.
1666  */
1667 static void
1668 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) {
1669 
1670 	struct CCB *ccb = pkt->pkt_ha_private;
1671 
1672 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1673 		ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1674 		if (ddi_dma_unbind_handle(ccb->pkt_dma_handle)
1675 		    != DDI_SUCCESS) {
1676 			cmn_err(CE_WARN,
1677 			    "arcmsr%d: ddi_dma_unbind_handle() failed "
1678 			    "(target %d lun %d)",
1679 			    ddi_get_instance(ccb->acb->dev_info),
1680 			    ap->a_target, ap->a_lun);
1681 		}
1682 		ddi_dma_free_handle(&ccb->pkt_dma_handle);
1683 		ccb->pkt_dma_handle = NULL;
1684 	}
1685 }
1686 
1687 /*
1688  * Function name: arcmsr_tran_sync_pkt()
1689  * Return Values: none
1690  *   Description: sync dma
1691  *       Context: Can be called from different kernel process threads.
1692  *		  Can be called by interrupt thread.
1693  */
1694 static void
1695 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) {
1696 
1697 	struct CCB *ccb;
1698 
1699 	ccb = pkt->pkt_ha_private;
1700 
1701 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1702 		if (ddi_dma_sync(ccb->pkt_dma_handle,
1703 		    ccb->pkt_dma_offset, ccb->pkt_dma_len,
1704 		    (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1705 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU)
1706 			!= DDI_SUCCESS) {
1707 			cmn_err(CE_WARN, "arcmsr%d: sync pkt failed "
1708 			    "for target %d lun %d",
1709 			    ddi_get_instance(ccb->acb->dev_info),
1710 			    ap->a_target, ap->a_lun);
1711 		}
1712 	}
1713 }
1714 
1715 
1716 static uint8_t
1717 arcmsr_hba_wait_msgint_ready(struct ACB *acb) {
1718 
1719 	uint32_t i;
1720 	uint8_t retries = 0x00;
1721 	struct HBA_msgUnit *phbamu;
1722 
1723 
1724 	phbamu = (struct HBA_msgUnit *)acb->pmu;
1725 
1726 	do {
1727 		for (i = 0; i < 100; i++) {
1728 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
1729 			    &phbamu->outbound_intstatus) &
1730 			    ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1731 				/* clear interrupt */
1732 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1733 				    &phbamu->outbound_intstatus,
1734 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1735 				return (TRUE);
1736 			}
1737 			drv_usecwait(10000);
1738 			if (ddi_in_panic()) {
1739 				/* clear interrupts */
1740 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1741 				    &phbamu->outbound_intstatus,
1742 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1743 				return (TRUE);
1744 			}
1745 		} /* max 1 second */
1746 	} while (retries++ < 20); /* max 20 seconds */
1747 	return (FALSE);
1748 }
1749 
1750 
1751 
1752 static uint8_t
1753 arcmsr_hbb_wait_msgint_ready(struct ACB *acb) {
1754 
1755 	struct HBB_msgUnit *phbbmu;
1756 	uint32_t i;
1757 	uint8_t retries = 0x00;
1758 
1759 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
1760 
1761 	do {
1762 		for (i = 0; i < 100; i++) {
1763 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
1764 			    &phbbmu->hbb_doorbell->iop2drv_doorbell) &
1765 			    ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1766 				/* clear interrupt */
1767 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1768 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
1769 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1770 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1771 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1772 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1773 				return (TRUE);
1774 			}
1775 			drv_usecwait(10000);
1776 			if (ddi_in_panic()) {
1777 				/* clear interrupts */
1778 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1779 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
1780 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1781 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1782 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1783 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1784 				return (TRUE);
1785 			}
1786 		} /* max 1 second */
1787 	} while (retries++ < 20); /* max 20 seconds */
1788 
1789 	return (FALSE);
1790 }
1791 
1792 
1793 static void
1794 arcmsr_flush_hba_cache(struct ACB *acb) {
1795 
1796 	struct HBA_msgUnit *phbamu;
1797 	int retry_count = 30;
1798 
1799 	/* enlarge wait flush adapter cache time: 10 minutes */
1800 
1801 	phbamu = (struct HBA_msgUnit *)acb->pmu;
1802 
1803 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
1804 	    ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
1805 
1806 	do {
1807 		if (arcmsr_hba_wait_msgint_ready(acb)) {
1808 			break;
1809 		} else {
1810 			retry_count--;
1811 		}
1812 	} while (retry_count != 0);
1813 }
1814 
1815 
1816 
1817 static void
1818 arcmsr_flush_hbb_cache(struct ACB *acb) {
1819 
1820 	struct HBB_msgUnit *phbbmu;
1821 	int retry_count = 30;
1822 
1823 	/* enlarge wait flush adapter cache time: 10 minutes */
1824 
1825 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
1826 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1827 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1828 	    ARCMSR_MESSAGE_FLUSH_CACHE);
1829 
1830 	do {
1831 		if (arcmsr_hbb_wait_msgint_ready(acb)) {
1832 			break;
1833 		} else {
1834 			retry_count--;
1835 		}
1836 	} while (retry_count != 0);
1837 }
1838 
1839 
1840 static void
1841 arcmsr_ccb_complete(struct CCB *ccb, int flag) {
1842 
1843 	struct ACB *acb = ccb->acb;
1844 	struct scsi_pkt *pkt = ccb->pkt;
1845 
1846 	if (flag == 1) {
1847 		acb->ccboutstandingcount--;
1848 	}
1849 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1850 	    STATE_SENT_CMD | STATE_GOT_STATUS);
1851 
1852 	if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1853 	    (pkt->pkt_state & STATE_XFERRED_DATA)) {
1854 		(void) ddi_dma_sync(ccb->pkt_dma_handle,
1855 		    ccb->pkt_dma_offset, ccb->pkt_dma_len,
1856 		    DDI_DMA_SYNC_FORCPU);
1857 	}
1858 
1859 	if (pkt->pkt_comp) {
1860 		(*pkt->pkt_comp)(pkt);
1861 	}
1862 }
1863 
1864 
1865 static void
1866 arcmsr_report_sense_info(struct CCB *ccb) {
1867 
1868 	struct scsi_pkt *pkt = ccb->pkt;
1869 	struct scsi_arq_status *arq_status;
1870 
1871 
1872 	arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
1873 	bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
1874 	arq_status->sts_rqpkt_reason = CMD_CMPLT;
1875 	arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
1876 	    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
1877 	arq_status->sts_rqpkt_statistics = pkt->pkt_statistics;
1878 	arq_status->sts_rqpkt_resid = 0;
1879 
1880 	pkt->pkt_reason = CMD_CMPLT;
1881 	/* auto rqsense took place */
1882 	pkt->pkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
1883 	    STATE_GOT_STATUS | STATE_ARQ_DONE);
1884 
1885 	if (&arq_status->sts_sensedata != NULL) {
1886 		struct SENSE_DATA *cdb_sensedata;
1887 		struct scsi_extended_sense *sts_sensedata;
1888 
1889 		cdb_sensedata =
1890 		    (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
1891 		sts_sensedata = &arq_status->sts_sensedata;
1892 
1893 		sts_sensedata->es_code = cdb_sensedata->ErrorCode;
1894 		/* must eq CLASS_EXTENDED_SENSE (0x07) */
1895 		sts_sensedata->es_class = cdb_sensedata->ErrorClass;
1896 		sts_sensedata->es_valid = cdb_sensedata->Valid;
1897 		sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
1898 		sts_sensedata->es_key = cdb_sensedata->SenseKey;
1899 		sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
1900 		sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
1901 		sts_sensedata->es_filmk = cdb_sensedata->FileMark;
1902 		sts_sensedata->es_info_1 = cdb_sensedata->Information[0];
1903 		sts_sensedata->es_info_2 = cdb_sensedata->Information[1];
1904 		sts_sensedata->es_info_3 = cdb_sensedata->Information[2];
1905 		sts_sensedata->es_info_4 = cdb_sensedata->Information[3];
1906 		sts_sensedata->es_add_len =
1907 		    cdb_sensedata->AdditionalSenseLength;
1908 		sts_sensedata->es_cmd_info[0] =
1909 		    cdb_sensedata->CommandSpecificInformation[0];
1910 		sts_sensedata->es_cmd_info[1] =
1911 		    cdb_sensedata->CommandSpecificInformation[1];
1912 		sts_sensedata->es_cmd_info[2] =
1913 		    cdb_sensedata->CommandSpecificInformation[2];
1914 		sts_sensedata->es_cmd_info[3] =
1915 		    cdb_sensedata->CommandSpecificInformation[3];
1916 		sts_sensedata->es_add_code =
1917 		    cdb_sensedata->AdditionalSenseCode;
1918 		sts_sensedata->es_qual_code =
1919 		    cdb_sensedata->AdditionalSenseCodeQualifier;
1920 		sts_sensedata->es_fru_code =
1921 		    cdb_sensedata->FieldReplaceableUnitCode;
1922 	}
1923 }
1924 
1925 
1926 
1927 static void
1928 arcmsr_abort_hba_allcmd(struct ACB *acb) {
1929 
1930 	struct HBA_msgUnit *phbamu;
1931 
1932 	phbamu = (struct HBA_msgUnit *)acb->pmu;
1933 
1934 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1935 	    &phbamu->inbound_msgaddr0,
1936 	    ARCMSR_INBOUND_MESG0_ABORT_CMD);
1937 
1938 	if (!arcmsr_hba_wait_msgint_ready(acb)) {
1939 		cmn_err(CE_WARN,
1940 		    "arcmsr%d: timeout while waiting for 'abort all "
1941 		    "outstanding commands'",
1942 		    ddi_get_instance(acb->dev_info));
1943 	}
1944 }
1945 
1946 
1947 
1948 static void
1949 arcmsr_abort_hbb_allcmd(struct ACB *acb) {
1950 
1951 	struct HBB_msgUnit *phbbmu =
1952 	    (struct HBB_msgUnit *)acb->pmu;
1953 
1954 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1955 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1956 	    ARCMSR_MESSAGE_ABORT_CMD);
1957 
1958 	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
1959 		cmn_err(CE_WARN,
1960 		    "arcmsr%d: timeout while waiting for 'abort all "
1961 		    "outstanding commands'",
1962 		    ddi_get_instance(acb->dev_info));
1963 	}
1964 }
1965 
1966 static void
1967 arcmsr_report_ccb_state(struct ACB *acb,
1968     struct CCB *ccb, uint32_t flag_ccb) {
1969 
1970 	int id, lun;
1971 
1972 	id = ccb->pkt->pkt_address.a_target;
1973 	lun = ccb->pkt->pkt_address.a_lun;
1974 
1975 	if ((flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR) == 0) {
1976 		if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1977 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
1978 		}
1979 		ccb->pkt->pkt_reason = CMD_CMPLT;
1980 		ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1981 		arcmsr_ccb_complete(ccb, 1);
1982 	} else {
1983 		switch (ccb->arcmsr_cdb.DeviceStatus) {
1984 		case ARCMSR_DEV_SELECT_TIMEOUT:
1985 			if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
1986 				cmn_err(CE_CONT,
1987 				    "arcmsr%d: raid volume was kicked out ",
1988 				    ddi_get_instance(acb->dev_info));
1989 			}
1990 			acb->devstate[id][lun] = ARECA_RAID_GONE;
1991 			ccb->pkt->pkt_reason = CMD_TIMEOUT;
1992 			ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1993 			arcmsr_ccb_complete(ccb, 1);
1994 			break;
1995 		case ARCMSR_DEV_ABORTED:
1996 		case ARCMSR_DEV_INIT_FAIL:
1997 			cmn_err(CE_CONT,
1998 			    "arcmsr%d: isr got "
1999 			    "'ARCMSR_DEV_ABORTED' 'ARCMSR_DEV_INIT_FAIL'",
2000 			    ddi_get_instance(acb->dev_info));
2001 			cmn_err(CE_CONT, "arcmsr%d: raid volume was kicked "
2002 			    "out", ddi_get_instance(acb->dev_info));
2003 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2004 			ccb->pkt->pkt_reason = CMD_DEV_GONE;
2005 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2006 			arcmsr_ccb_complete(ccb, 1);
2007 			break;
2008 		case SCSISTAT_CHECK_CONDITION:
2009 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
2010 			arcmsr_report_sense_info(ccb);
2011 			arcmsr_ccb_complete(ccb, 1);
2012 			break;
2013 		default:
2014 			cmn_err(CE_WARN, "arcmsr%d: target %d lun %d "
2015 			    "isr received CMD_DONE with unknown "
2016 			    "DeviceStatus (0x%x)",
2017 			    ddi_get_instance(acb->dev_info), id, lun,
2018 			    ccb->arcmsr_cdb.DeviceStatus);
2019 			cmn_err(CE_CONT, "arcmsr%d: raid volume was kicked "
2020 			    "out ", ddi_get_instance(acb->dev_info));
2021 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2022 			/* unknown error or crc error just for retry */
2023 			ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2024 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2025 			arcmsr_ccb_complete(ccb, 1);
2026 			break;
2027 		}
2028 	}
2029 }
2030 
2031 
2032 static void
2033 arcmsr_drain_donequeue(struct ACB *acb, uint32_t flag_ccb) {
2034 
2035 	struct CCB *ccb;
2036 
2037 	/* check if command completed without error */
2038 	ccb = (struct CCB *)(acb->vir2phy_offset +
2039 	    (flag_ccb << 5)); /* frame must be aligned on 32 byte boundary */
2040 
2041 	if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) 	{
2042 		if (ccb->startdone == ARCMSR_CCB_ABORTED) {
2043 			cmn_err(CE_CONT,
2044 			    "arcmsr%d: isr got aborted command "
2045 			    "while draining doneq",
2046 			    ddi_get_instance(acb->dev_info));
2047 			ccb->pkt->pkt_reason = CMD_ABORTED;
2048 			ccb->pkt->pkt_statistics |= STAT_ABORTED;
2049 			arcmsr_ccb_complete(ccb, 1);
2050 			return;
2051 		}
2052 
2053 		if (ccb->startdone == ARCMSR_CCB_RESET) {
2054 			cmn_err(CE_CONT,
2055 			    "arcmsr%d: isr got command reset "
2056 			    "while draining doneq",
2057 			    ddi_get_instance(acb->dev_info));
2058 			ccb->pkt->pkt_reason = CMD_RESET;
2059 			ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
2060 			arcmsr_ccb_complete(ccb, 1);
2061 			return;
2062 		}
2063 
2064 		cmn_err(CE_WARN, "arcmsr%d: isr got an illegal ccb command "
2065 		    "done while draining doneq",
2066 		    ddi_get_instance(acb->dev_info));
2067 		return;
2068 	}
2069 	arcmsr_report_ccb_state(acb, ccb, flag_ccb);
2070 }
2071 
2072 
2073 static void
2074 arcmsr_done4abort_postqueue(struct ACB *acb) {
2075 
2076 	int i = 0;
2077 	uint32_t flag_ccb;
2078 
2079 	switch (acb->adapter_type) {
2080 	case ACB_ADAPTER_TYPE_A:
2081 	{
2082 		struct HBA_msgUnit *phbamu;
2083 		uint32_t outbound_intstatus;
2084 
2085 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2086 		/* clear and abort all outbound posted Q */
2087 		outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
2088 		    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
2089 		/* clear interrupt */
2090 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2091 		    &phbamu->outbound_intstatus, outbound_intstatus);
2092 		while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
2093 		    &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
2094 		    (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
2095 			arcmsr_drain_donequeue(acb, flag_ccb);
2096 		}
2097 	}
2098 		break;
2099 
2100 	case ACB_ADAPTER_TYPE_B:
2101 	{
2102 		struct HBB_msgUnit *phbbmu;
2103 
2104 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2105 
2106 		/* clear all outbound posted Q */
2107 		/* clear doorbell interrupt */
2108 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2109 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
2110 		    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
2111 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
2112 			if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
2113 				phbbmu->done_qbuffer[i] = 0;
2114 				arcmsr_drain_donequeue(acb, flag_ccb);
2115 			}
2116 			phbbmu->post_qbuffer[i] = 0;
2117 		}	/* drain reply FIFO */
2118 		phbbmu->doneq_index = 0;
2119 		phbbmu->postq_index = 0;
2120 		break;
2121 	}
2122 	}
2123 }
2124 
2125 /*
2126  * Routine Description: Reset 80331 iop.
2127  *           Arguments:
2128  *        Return Value: Nothing.
2129  */
2130 static void
2131 arcmsr_iop_reset(struct ACB *acb) {
2132 
2133 	struct CCB *ccb;
2134 	uint32_t intmask_org;
2135 	int i = 0;
2136 
2137 	if (acb->ccboutstandingcount > 0) {
2138 		/* disable all outbound interrupt */
2139 		intmask_org = arcmsr_disable_allintr(acb);
2140 		/* talk to iop 331 outstanding command aborted */
2141 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
2142 			arcmsr_abort_hba_allcmd(acb);
2143 		} else {
2144 			arcmsr_abort_hbb_allcmd(acb);
2145 		}
2146 		/* clear and abort all outbound posted Q */
2147 		arcmsr_done4abort_postqueue(acb);
2148 
2149 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2150 			ccb = acb->pccb_pool[i];
2151 			if (ccb->startdone == ARCMSR_CCB_START) {
2152 				ccb->startdone = ARCMSR_CCB_RESET;
2153 				ccb->pkt->pkt_reason = CMD_RESET;
2154 				ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
2155 				arcmsr_ccb_complete(ccb, 1);
2156 			}
2157 		}
2158 		/* enable all outbound interrupt */
2159 		arcmsr_enable_allintr(acb, intmask_org);
2160 	}
2161 }
2162 
2163 /*
2164  * You can access the DMA address through the #defines:
2165  * dmac_address for 32-bit addresses and dmac_laddress for 64-bit addresses.
2166  *	These macros are defined as follows:
2167  *
2168  *	#define dmac_laddress   _dmu._dmac_ll
2169  *	#ifdef _LONG_LONG_HTOL
2170  *		#define dmac_notused    _dmu._dmac_la[0]
2171  *		#define dmac_address    _dmu._dmac_la[1]
2172  *	#else
2173  *		#define dmac_address    _dmu._dmac_la[0]
2174  *		#define dmac_notused    _dmu._dmac_la[1]
2175  *	#endif
2176  */
2177 /*ARGSUSED*/
2178 static void
2179 arcmsr_build_ccb(struct CCB *ccb) {
2180 
2181 	struct scsi_pkt *pkt = ccb->pkt;
2182 	struct ARCMSR_CDB *arcmsr_cdb;
2183 	char *psge;
2184 	uint32_t address_lo, address_hi;
2185 	int arccdbsize = 0x30;
2186 	uint8_t sgcount;
2187 
2188 	arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
2189 	psge = (char *)&arcmsr_cdb->sgu;
2190 
2191 	/* return the current time in seconds */
2192 	ccb->ccb_time = (time_t)(pkt->pkt_time + ddi_get_time());
2193 	bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb,
2194 	    arcmsr_cdb->CdbLength);
2195 	sgcount = ccb->arcmsr_cdb.sgcount;
2196 
2197 	if (sgcount) {
2198 		int length, i;
2199 		int cdb_sgcount = 0;
2200 		int total_xfer_length = 0;
2201 
2202 		/* map stor port SG list to our iop SG List. */
2203 		for (i = 0; i < sgcount; i++) {
2204 			/* Get physaddr of the current data pointer */
2205 			length = ccb->pkt_dmacookies[i].dmac_size;
2206 			total_xfer_length += length;
2207 			address_lo = dma_addr_lo32(
2208 				ccb->pkt_dmacookies[i].dmac_laddress);
2209 			address_hi = dma_addr_hi32(
2210 				ccb->pkt_dmacookies[i].dmac_laddress);
2211 
2212 			if (address_hi == 0) {
2213 				struct SG32ENTRY *dma_sg;
2214 
2215 				dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
2216 
2217 				dma_sg->address = address_lo;
2218 				dma_sg->length = length;
2219 				psge += sizeof (struct SG32ENTRY);
2220 				arccdbsize += sizeof (struct SG32ENTRY);
2221 			} else {
2222 				int sg64s_size = 0;
2223 				int tmplength = length;
2224 				int64_t span4G, length0;
2225 				struct SG64ENTRY *dma_sg;
2226 
2227 				/*LINTED*/
2228 				while (1) {
2229 					dma_sg =
2230 					    (struct SG64ENTRY *)(intptr_t)psge;
2231 					span4G =
2232 					    (int64_t)address_lo + tmplength;
2233 
2234 					dma_sg->addresshigh = address_hi;
2235 					dma_sg->address = address_lo;
2236 					if (span4G > 0x100000000ULL) {
2237 						/* see if we cross 4G */
2238 						length0 = 0x100000000ULL -
2239 						    address_lo;
2240 						dma_sg->length =
2241 						    (uint32_t)length0 |
2242 						    IS_SG64_ADDR;
2243 						address_hi = address_hi + 1;
2244 						address_lo = 0;
2245 						tmplength = tmplength-
2246 						    (int32_t)length0;
2247 						sg64s_size +=
2248 						    sizeof (struct SG64ENTRY);
2249 						psge +=
2250 						    sizeof (struct SG64ENTRY);
2251 						cdb_sgcount++;
2252 					} else {
2253 						dma_sg->length = tmplength |
2254 						    IS_SG64_ADDR;
2255 						sg64s_size +=
2256 						    sizeof (struct SG64ENTRY);
2257 						psge +=
2258 						    sizeof (struct SG64ENTRY);
2259 						break;
2260 					}
2261 				}
2262 				arccdbsize += sg64s_size;
2263 			}
2264 			cdb_sgcount++;
2265 		}
2266 		arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
2267 		arcmsr_cdb->DataLength = total_xfer_length;
2268 		if (arccdbsize > 256) {
2269 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
2270 		}
2271 	} else {
2272 		arcmsr_cdb->DataLength = 0;
2273 	}
2274 
2275 	if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
2276 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
2277 }
2278 
2279 /*
2280  * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
2281  *
2282  * handle:		Handle of registered ARC protocol driver
2283  * adapter_id:		AIOC unique identifier(integer)
2284  * pPOSTCARD_SEND:	Pointer to ARC send postcard
2285  *
2286  * This routine posts a ARC send postcard to the request post FIFO of a
2287  * specific ARC adapter.
2288  */
2289 static int
2290 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb) {
2291 
2292 	uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
2293 	struct scsi_pkt *pkt = ccb->pkt;
2294 	struct ARCMSR_CDB *arcmsr_cdb;
2295 
2296 	arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
2297 
2298 	/* Use correct offset and size for syncing */
2299 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
2300 	    DDI_DMA_SYNC_FORDEV) == DDI_FAILURE)
2301 		return (DDI_FAILURE);
2302 
2303 	acb->ccboutstandingcount++;
2304 	ccb->startdone = ARCMSR_CCB_START;
2305 
2306 	switch (acb->adapter_type) {
2307 	case ACB_ADAPTER_TYPE_A:
2308 	{
2309 		struct HBA_msgUnit *phbamu;
2310 
2311 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2312 
2313 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
2314 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2315 			    &phbamu->inbound_queueport,
2316 			    cdb_shifted_phyaddr |
2317 			    ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
2318 		} else {
2319 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2320 			    &phbamu->inbound_queueport, cdb_shifted_phyaddr);
2321 		}
2322 		if (pkt->pkt_flags & FLAG_NOINTR)
2323 			arcmsr_polling_hba_ccbdone(acb, ccb);
2324 	}
2325 		break;
2326 	case ACB_ADAPTER_TYPE_B:
2327 	{
2328 		struct HBB_msgUnit *phbbmu;
2329 		int ending_index, index;
2330 
2331 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2332 		mutex_enter(&acb->postq_mutex);
2333 		index = phbbmu->postq_index;
2334 		ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
2335 		phbbmu->post_qbuffer[ending_index] = 0;
2336 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
2337 			phbbmu->post_qbuffer[index] =
2338 			    (cdb_shifted_phyaddr|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
2339 		} else {
2340 			phbbmu->post_qbuffer[index] = cdb_shifted_phyaddr;
2341 		}
2342 		index++;
2343 		/* if last index number set it to 0 */
2344 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
2345 		phbbmu->postq_index = index;
2346 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2347 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2348 		    ARCMSR_DRV2IOP_CDB_POSTED);
2349 		mutex_exit(&acb->postq_mutex);
2350 		if (pkt->pkt_flags & FLAG_NOINTR)
2351 			arcmsr_polling_hbb_ccbdone(acb, ccb);
2352 	}
2353 	break;
2354 	}
2355 
2356 	return (DDI_SUCCESS);
2357 }
2358 
2359 
2360 
2361 
2362 static struct QBUFFER *
2363 arcmsr_get_iop_rqbuffer(struct ACB *acb) {
2364 
2365 	struct QBUFFER *qb;
2366 
2367 	switch (acb->adapter_type) {
2368 	case ACB_ADAPTER_TYPE_A:
2369 	{
2370 		struct HBA_msgUnit *phbamu;
2371 
2372 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2373 		qb = (struct QBUFFER *)&phbamu->message_rbuffer;
2374 	}
2375 		break;
2376 	case ACB_ADAPTER_TYPE_B:
2377 	{
2378 		struct HBB_msgUnit *phbbmu;
2379 
2380 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2381 		qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
2382 	}
2383 		break;
2384 	}
2385 
2386 	return (qb);
2387 }
2388 
2389 
2390 
2391 static struct QBUFFER *
2392 arcmsr_get_iop_wqbuffer(struct ACB *acb) {
2393 
2394 	struct QBUFFER *qbuffer = NULL;
2395 
2396 	switch (acb->adapter_type) {
2397 	case ACB_ADAPTER_TYPE_A:
2398 	{
2399 		struct HBA_msgUnit *phbamu;
2400 
2401 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2402 		qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
2403 	}
2404 	break;
2405 	case ACB_ADAPTER_TYPE_B:
2406 	{
2407 		struct HBB_msgUnit *phbbmu;
2408 
2409 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2410 		qbuffer =
2411 		    (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
2412 	}
2413 	break;
2414 	}
2415 	return (qbuffer);
2416 }
2417 
2418 
2419 
2420 static void
2421 arcmsr_iop_message_read(struct ACB *acb) {
2422 
2423 	switch (acb->adapter_type) {
2424 	case ACB_ADAPTER_TYPE_A:
2425 	{
2426 		struct HBA_msgUnit *phbamu;
2427 
2428 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2429 		/* let IOP know the data has been read */
2430 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2431 		    &phbamu->inbound_doorbell,
2432 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
2433 	}
2434 	break;
2435 	case ACB_ADAPTER_TYPE_B:
2436 	{
2437 		struct HBB_msgUnit *phbbmu;
2438 
2439 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2440 		/* let IOP know the data has been read */
2441 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2442 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2443 		    ARCMSR_DRV2IOP_DATA_READ_OK);
2444 	}
2445 	break;
2446 	}
2447 }
2448 
2449 
2450 
2451 static void
2452 arcmsr_iop_message_wrote(struct ACB *acb) {
2453 
2454 	switch (acb->adapter_type) {
2455 	case ACB_ADAPTER_TYPE_A:
2456 	{
2457 		struct HBA_msgUnit *phbamu;
2458 
2459 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2460 		/*
2461 		 * push inbound doorbell tell iop, driver data write ok
2462 		 * and wait reply on next hwinterrupt for next Qbuffer post
2463 		 */
2464 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2465 		    &phbamu->inbound_doorbell,
2466 		    ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
2467 	}
2468 	break;
2469 	case ACB_ADAPTER_TYPE_B:
2470 	{
2471 		struct HBB_msgUnit *phbbmu;
2472 
2473 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2474 		/*
2475 		 * push inbound doorbell tell iop, driver data was writen
2476 		 * successfully, then await reply on next hwinterrupt for
2477 		 * next Qbuffer post
2478 		 */
2479 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2480 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2481 		    ARCMSR_DRV2IOP_DATA_WRITE_OK);
2482 	}
2483 	break;
2484 	}
2485 }
2486 
2487 
2488 
2489 static void
2490 arcmsr_post_ioctldata2iop(struct ACB *acb) {
2491 
2492 	uint8_t *pQbuffer;
2493 	struct QBUFFER *pwbuffer;
2494 	uint8_t *iop_data;
2495 	int32_t allxfer_len = 0;
2496 
2497 	pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2498 	iop_data = (uint8_t *)pwbuffer->data;
2499 	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
2500 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
2501 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
2502 		    (allxfer_len < 124)) {
2503 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
2504 			(void) memcpy(iop_data, pQbuffer, 1);
2505 			acb->wqbuf_firstidx++;
2506 			/* if last index number set it to 0 */
2507 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
2508 			iop_data++;
2509 			allxfer_len++;
2510 		}
2511 		pwbuffer->data_len = allxfer_len;
2512 		/*
2513 		 * push inbound doorbell and wait reply at hwinterrupt
2514 		 * routine for next Qbuffer post
2515 		 */
2516 		arcmsr_iop_message_wrote(acb);
2517 	}
2518 }
2519 
2520 
2521 
2522 static void
2523 arcmsr_stop_hba_bgrb(struct ACB *acb) {
2524 
2525 	struct HBA_msgUnit *phbamu;
2526 
2527 	phbamu = (struct HBA_msgUnit *)acb->pmu;
2528 
2529 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2530 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2531 	    &phbamu->inbound_msgaddr0,
2532 	    ARCMSR_INBOUND_MESG0_STOP_BGRB);
2533 	if (!arcmsr_hba_wait_msgint_ready(acb))
2534 		cmn_err(CE_WARN,
2535 		    "arcmsr%d: timeout while waiting for background "
2536 		    "rebuild completion",
2537 		    ddi_get_instance(acb->dev_info));
2538 }
2539 
2540 
2541 static void
2542 arcmsr_stop_hbb_bgrb(struct ACB *acb) {
2543 
2544 	struct HBB_msgUnit *phbbmu;
2545 
2546 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
2547 
2548 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2549 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2550 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2551 	    ARCMSR_MESSAGE_STOP_BGRB);
2552 
2553 	if (!arcmsr_hbb_wait_msgint_ready(acb))
2554 		cmn_err(CE_WARN,
2555 		    "arcmsr%d: timeout while waiting for background "
2556 		    "rebuild completion",
2557 		    ddi_get_instance(acb->dev_info));
2558 }
2559 
2560 static int
2561 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt) {
2562 
2563 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2564 	struct CCB *ccb = pkt->pkt_ha_private;
2565 	struct buf *bp = ccb->bp;
2566 	uint8_t *pQbuffer;
2567 	int retvalue = 0, transfer_len = 0;
2568 	char *buffer;
2569 	uint32_t controlcode;
2570 
2571 
2572 	/* 4 bytes: Areca io control code */
2573 	controlcode = (uint32_t)pkt->pkt_cdbp[5] << 24 |
2574 	    (uint32_t)pkt->pkt_cdbp[6] << 16 |
2575 	    (uint32_t)pkt->pkt_cdbp[7] << 8 |
2576 	    (uint32_t)pkt->pkt_cdbp[8];
2577 
2578 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
2579 		bp_mapin(bp);
2580 
2581 
2582 	buffer = bp->b_un.b_addr;
2583 	transfer_len = bp->b_bcount;
2584 	if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
2585 		retvalue = ARCMSR_MESSAGE_FAIL;
2586 		goto message_out;
2587 	}
2588 
2589 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
2590 
2591 	switch (controlcode) {
2592 	case ARCMSR_MESSAGE_READ_RQBUFFER:
2593 	{
2594 		unsigned long *ver_addr;
2595 		uint8_t *ptmpQbuffer;
2596 		int32_t allxfer_len = 0;
2597 
2598 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2599 		if (!ver_addr) {
2600 			retvalue = ARCMSR_MESSAGE_FAIL;
2601 			goto message_out;
2602 		}
2603 
2604 		ptmpQbuffer = (uint8_t *)ver_addr;
2605 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
2606 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
2607 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
2608 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
2609 			acb->rqbuf_firstidx++;
2610 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
2611 			ptmpQbuffer++;
2612 			allxfer_len++;
2613 		}
2614 
2615 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2616 			struct QBUFFER *prbuffer;
2617 			uint8_t  *iop_data;
2618 			int32_t iop_len;
2619 
2620 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2621 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
2622 			iop_data = (uint8_t *)prbuffer->data;
2623 			iop_len = (int32_t)prbuffer->data_len;
2624 
2625 			while (iop_len > 0) {
2626 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
2627 				(void) memcpy(pQbuffer, iop_data, 1);
2628 				acb->rqbuf_lastidx++;
2629 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
2630 				iop_data++;
2631 				iop_len--;
2632 			}
2633 			arcmsr_iop_message_read(acb);
2634 		}
2635 
2636 		(void) memcpy(pcmdmessagefld->messagedatabuffer,
2637 		    (uint8_t *)ver_addr, allxfer_len);
2638 		pcmdmessagefld->cmdmessage.Length = allxfer_len;
2639 		pcmdmessagefld->cmdmessage.ReturnCode =
2640 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2641 		kmem_free(ver_addr, MSGDATABUFLEN);
2642 	}
2643 	break;
2644 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
2645 	{
2646 		unsigned long *ver_addr;
2647 		int32_t my_empty_len, user_len, wqbuf_firstidx, wqbuf_lastidx;
2648 		uint8_t *ptmpuserbuffer;
2649 
2650 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2651 		if (!ver_addr) {
2652 			retvalue = ARCMSR_MESSAGE_FAIL;
2653 			goto message_out;
2654 		}
2655 		ptmpuserbuffer = (uint8_t *)ver_addr;
2656 		user_len = pcmdmessagefld->cmdmessage.Length;
2657 		(void) memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer,
2658 		    user_len);
2659 		wqbuf_lastidx = acb->wqbuf_lastidx;
2660 		wqbuf_firstidx = acb->wqbuf_firstidx;
2661 		if (wqbuf_lastidx != wqbuf_firstidx) {
2662 			struct scsi_arq_status *arq_status;
2663 
2664 			arcmsr_post_ioctldata2iop(acb);
2665 			arq_status =
2666 			    (struct scsi_arq_status *)(intptr_t)
2667 			    (pkt->pkt_scbp);
2668 			bzero((caddr_t)arq_status,
2669 			    sizeof (struct scsi_arq_status));
2670 			arq_status->sts_rqpkt_reason = CMD_CMPLT;
2671 			arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
2672 			    STATE_GOT_TARGET |STATE_SENT_CMD |
2673 			    STATE_XFERRED_DATA | STATE_GOT_STATUS);
2674 
2675 			arq_status->sts_rqpkt_statistics = pkt->pkt_statistics;
2676 			arq_status->sts_rqpkt_resid = 0;
2677 			if (&arq_status->sts_sensedata != NULL) {
2678 				struct scsi_extended_sense *sts_sensedata;
2679 
2680 				sts_sensedata = &arq_status->sts_sensedata;
2681 
2682 				/* has error report sensedata */
2683 				sts_sensedata->es_code = 0x0;
2684 				sts_sensedata->es_valid = 0x01;
2685 				sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
2686 				/* AdditionalSenseLength */
2687 				sts_sensedata->es_add_len = 0x0A;
2688 				/* AdditionalSenseCode */
2689 				sts_sensedata->es_add_code = 0x20;
2690 			}
2691 			retvalue = ARCMSR_MESSAGE_FAIL;
2692 		} else {
2693 			my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
2694 			    (ARCMSR_MAX_QBUFFER - 1);
2695 			if (my_empty_len >= user_len) {
2696 				while (user_len > 0) {
2697 					pQbuffer =
2698 					    &acb->wqbuffer[acb->wqbuf_lastidx];
2699 					(void) memcpy(pQbuffer,
2700 					    ptmpuserbuffer, 1);
2701 					acb->wqbuf_lastidx++;
2702 					acb->wqbuf_lastidx %=
2703 					    ARCMSR_MAX_QBUFFER;
2704 					ptmpuserbuffer++;
2705 					user_len--;
2706 				}
2707 				if (acb->acb_flags &
2708 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2709 					acb->acb_flags &=
2710 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2711 					arcmsr_post_ioctldata2iop(acb);
2712 				}
2713 			} else {
2714 				struct scsi_arq_status *arq_status;
2715 
2716 				/* has error report sensedata */
2717 				arq_status =
2718 				    (struct scsi_arq_status *)
2719 				    (intptr_t)(pkt->pkt_scbp);
2720 				bzero((caddr_t)arq_status,
2721 				    sizeof (struct scsi_arq_status));
2722 				arq_status->sts_rqpkt_reason = CMD_CMPLT;
2723 				arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
2724 				    STATE_GOT_TARGET |STATE_SENT_CMD |
2725 				    STATE_XFERRED_DATA | STATE_GOT_STATUS);
2726 				arq_status->sts_rqpkt_statistics =
2727 				    pkt->pkt_statistics;
2728 				arq_status->sts_rqpkt_resid = 0;
2729 				if (&arq_status->sts_sensedata != NULL) {
2730 					struct scsi_extended_sense
2731 					    *sts_sensedata;
2732 
2733 					sts_sensedata =
2734 					    &arq_status->sts_sensedata;
2735 
2736 					/* has error report sensedata */
2737 					sts_sensedata->es_code  = 0x0;
2738 					sts_sensedata->es_valid = 0x01;
2739 					sts_sensedata->es_key =
2740 					    KEY_ILLEGAL_REQUEST;
2741 					/* AdditionalSenseLength */
2742 					sts_sensedata->es_add_len = 0x0A;
2743 					/* AdditionalSenseCode */
2744 					sts_sensedata->es_add_code = 0x20;
2745 				}
2746 				retvalue = ARCMSR_MESSAGE_FAIL;
2747 			}
2748 		}
2749 		kmem_free(ver_addr, MSGDATABUFLEN);
2750 	}
2751 	break;
2752 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
2753 	{
2754 		pQbuffer = acb->rqbuffer;
2755 
2756 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2757 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2758 			arcmsr_iop_message_read(acb);
2759 		}
2760 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2761 		acb->rqbuf_firstidx = 0;
2762 		acb->rqbuf_lastidx = 0;
2763 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2764 		pcmdmessagefld->cmdmessage.ReturnCode =
2765 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2766 	}
2767 	break;
2768 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
2769 	{
2770 		pQbuffer = acb->wqbuffer;
2771 
2772 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2773 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2774 			arcmsr_iop_message_read(acb);
2775 		}
2776 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2777 		    ACB_F_MESSAGE_WQBUFFER_READ);
2778 		acb->wqbuf_firstidx = 0;
2779 		acb->wqbuf_lastidx = 0;
2780 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2781 		pcmdmessagefld->cmdmessage.ReturnCode =
2782 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2783 	}
2784 	break;
2785 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
2786 	{
2787 
2788 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2789 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2790 			arcmsr_iop_message_read(acb);
2791 		}
2792 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2793 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
2794 		    ACB_F_MESSAGE_WQBUFFER_READ);
2795 		acb->rqbuf_firstidx = 0;
2796 		acb->rqbuf_lastidx = 0;
2797 		acb->wqbuf_firstidx = 0;
2798 		acb->wqbuf_lastidx = 0;
2799 		pQbuffer = acb->rqbuffer;
2800 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
2801 		pQbuffer = acb->wqbuffer;
2802 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
2803 		pcmdmessagefld->cmdmessage.ReturnCode =
2804 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2805 	}
2806 	break;
2807 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
2808 		pcmdmessagefld->cmdmessage.ReturnCode =
2809 		    ARCMSR_MESSAGE_RETURNCODE_3F;
2810 		break;
2811 	/*
2812 	 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
2813 	 */
2814 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2815 		arcmsr_iop_parking(acb);
2816 		break;
2817 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2818 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
2819 			arcmsr_flush_hba_cache(acb);
2820 		} else {
2821 			arcmsr_flush_hbb_cache(acb);
2822 		}
2823 		break;
2824 	default:
2825 		retvalue = ARCMSR_MESSAGE_FAIL;
2826 	}
2827 
2828 message_out:
2829 
2830 	return (retvalue);
2831 }
2832 
2833 
2834 
2835 static int
2836 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
2837     cred_t *credp, int *rvalp) {
2838 #ifndef __lock_lint
2839 	_NOTE(ARGUNUSED(rvalp))
2840 #endif
2841 
2842 	struct ACB *acb;
2843 	struct CMD_MESSAGE_FIELD *pktioctlfld;
2844 	int retvalue = 0;
2845 	int instance = getminor(dev);
2846 
2847 
2848 	if (instance < 0)
2849 		return (ENXIO);
2850 
2851 	if (secpolicy_sys_config(credp, B_FALSE) != 0)
2852 		return (EPERM);
2853 
2854 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2855 	if (acb == NULL)
2856 		return (ENXIO);
2857 
2858 	pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD),
2859 	    KM_SLEEP);
2860 	if (pktioctlfld == NULL)
2861 		return (ENXIO);
2862 
2863 	/*
2864 	 * if we got here, we either are a 64-bit app in a 64-bit kernel
2865 	 * or a 32-bit app in a 32-bit kernel. Either way, we can just
2866 	 * copy in the args without any special conversions.
2867 	 */
2868 
2869 	mutex_enter(&acb->ioctl_mutex);
2870 	if (ddi_copyin((void *)arg, pktioctlfld,
2871 	    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
2872 		retvalue = ENXIO;
2873 		goto ioctl_out;
2874 	}
2875 
2876 	if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
2877 		/* validity check */
2878 		retvalue = ENXIO;
2879 		goto ioctl_out;
2880 	}
2881 
2882 	switch (ioctl_cmd) {
2883 	case ARCMSR_MESSAGE_READ_RQBUFFER:
2884 	{
2885 		unsigned long *ver_addr;
2886 		uint8_t *pQbuffer, *ptmpQbuffer;
2887 		int32_t allxfer_len = 0;
2888 
2889 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2890 		if (ver_addr == NULL) {
2891 			retvalue = ENXIO;
2892 			goto ioctl_out;
2893 		}
2894 
2895 		ptmpQbuffer = (uint8_t *)ver_addr;
2896 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
2897 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
2898 			/* copy READ QBUFFER to srb */
2899 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
2900 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
2901 			acb->rqbuf_firstidx++;
2902 			/* if last index number set it to 0 */
2903 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
2904 			ptmpQbuffer++;
2905 			allxfer_len++;
2906 		}
2907 
2908 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2909 			struct QBUFFER *prbuffer;
2910 			uint8_t *pQbuffer;
2911 			uint8_t *iop_data;
2912 			int32_t iop_len;
2913 
2914 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2915 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
2916 			iop_data = (uint8_t *)prbuffer->data;
2917 			iop_len = (int32_t)prbuffer->data_len;
2918 			/*
2919 			 * this iop data does no chance to make me overflow
2920 			 * again here, so just do it
2921 			 */
2922 			while (iop_len > 0) {
2923 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
2924 				(void) memcpy(pQbuffer, iop_data, 1);
2925 				acb->rqbuf_lastidx++;
2926 				/* if last index number set it to 0 */
2927 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
2928 				iop_data++;
2929 				iop_len--;
2930 			}
2931 			/* let IOP know data has been read */
2932 			arcmsr_iop_message_read(acb);
2933 		}
2934 		(void) memcpy(pktioctlfld->messagedatabuffer,
2935 		    (uint8_t *)ver_addr, allxfer_len);
2936 		pktioctlfld->cmdmessage.Length = allxfer_len;
2937 		pktioctlfld->cmdmessage.ReturnCode =
2938 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2939 
2940 		if (ddi_copyout(pktioctlfld, (void *)arg,
2941 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
2942 			retvalue = ENXIO;
2943 
2944 		kmem_free(ver_addr, MSGDATABUFLEN);
2945 	}
2946 	break;
2947 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
2948 	{
2949 		unsigned long *ver_addr;
2950 		int32_t my_empty_len, user_len;
2951 		int32_t wqbuf_firstidx, wqbuf_lastidx;
2952 		uint8_t *pQbuffer, *ptmpuserbuffer;
2953 
2954 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2955 
2956 		if (ver_addr == NULL) {
2957 			retvalue = ENXIO;
2958 			goto ioctl_out;
2959 		}
2960 
2961 		ptmpuserbuffer = (uint8_t *)ver_addr;
2962 		user_len = pktioctlfld->cmdmessage.Length;
2963 		(void) memcpy(ptmpuserbuffer,
2964 		    pktioctlfld->messagedatabuffer, user_len);
2965 		/*
2966 		 * check ifdata xfer length of this request will overflow
2967 		 * my array qbuffer
2968 		 */
2969 		wqbuf_lastidx = acb->wqbuf_lastidx;
2970 		wqbuf_firstidx = acb->wqbuf_firstidx;
2971 		if (wqbuf_lastidx != wqbuf_firstidx) {
2972 			arcmsr_post_ioctldata2iop(acb);
2973 			pktioctlfld->cmdmessage.ReturnCode =
2974 			    ARCMSR_MESSAGE_RETURNCODE_ERROR;
2975 		} else {
2976 			my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
2977 			    & (ARCMSR_MAX_QBUFFER - 1);
2978 			if (my_empty_len >= user_len) {
2979 				while (user_len > 0) {
2980 					/* copy srb data to wqbuffer */
2981 					pQbuffer =
2982 					    &acb->wqbuffer[acb->wqbuf_lastidx];
2983 					(void) memcpy(pQbuffer,
2984 					    ptmpuserbuffer, 1);
2985 					acb->wqbuf_lastidx++;
2986 					/* iflast index number set it to 0 */
2987 					acb->wqbuf_lastidx %=
2988 					    ARCMSR_MAX_QBUFFER;
2989 					ptmpuserbuffer++;
2990 					user_len--;
2991 				}
2992 				/* post first Qbuffer */
2993 				if (acb->acb_flags &
2994 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2995 					acb->acb_flags &=
2996 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2997 					arcmsr_post_ioctldata2iop(acb);
2998 				}
2999 				pktioctlfld->cmdmessage.ReturnCode =
3000 				    ARCMSR_MESSAGE_RETURNCODE_OK;
3001 			} else {
3002 				pktioctlfld->cmdmessage.ReturnCode =
3003 				    ARCMSR_MESSAGE_RETURNCODE_ERROR;
3004 			}
3005 		}
3006 		if (ddi_copyout(pktioctlfld, (void *)arg,
3007 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3008 			retvalue = ENXIO;
3009 
3010 		kmem_free(ver_addr, MSGDATABUFLEN);
3011 	}
3012 	break;
3013 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
3014 	{
3015 		uint8_t *pQbuffer = acb->rqbuffer;
3016 
3017 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3018 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3019 				arcmsr_iop_message_read(acb);
3020 		}
3021 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3022 		acb->rqbuf_firstidx = 0;
3023 		acb->rqbuf_lastidx = 0;
3024 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
3025 		/* report success */
3026 		pktioctlfld->cmdmessage.ReturnCode =
3027 		    ARCMSR_MESSAGE_RETURNCODE_OK;
3028 		if (ddi_copyout(pktioctlfld, (void *)arg,
3029 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3030 			retvalue = ENXIO;
3031 
3032 	}
3033 	break;
3034 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
3035 	{
3036 		uint8_t *pQbuffer = acb->wqbuffer;
3037 
3038 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3039 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3040 			arcmsr_iop_message_read(acb);
3041 		}
3042 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3043 		    ACB_F_MESSAGE_WQBUFFER_READ);
3044 		acb->wqbuf_firstidx = 0;
3045 		acb->wqbuf_lastidx = 0;
3046 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
3047 		/* report success */
3048 		pktioctlfld->cmdmessage.ReturnCode =
3049 		    ARCMSR_MESSAGE_RETURNCODE_OK;
3050 		if (ddi_copyout(pktioctlfld, (void *)arg,
3051 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3052 			retvalue = ENXIO;
3053 
3054 	}
3055 	break;
3056 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
3057 	{
3058 		uint8_t *pQbuffer;
3059 
3060 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3061 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3062 			arcmsr_iop_message_read(acb);
3063 		}
3064 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3065 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
3066 		    ACB_F_MESSAGE_WQBUFFER_READ);
3067 		acb->rqbuf_firstidx = 0;
3068 		acb->rqbuf_lastidx = 0;
3069 		acb->wqbuf_firstidx = 0;
3070 		acb->wqbuf_lastidx = 0;
3071 		pQbuffer = acb->rqbuffer;
3072 		bzero(pQbuffer, sizeof (struct QBUFFER));
3073 		pQbuffer = acb->wqbuffer;
3074 		bzero(pQbuffer, sizeof (struct QBUFFER));
3075 		/* report success */
3076 		pktioctlfld->cmdmessage.ReturnCode =
3077 		    ARCMSR_MESSAGE_RETURNCODE_OK;
3078 		if (ddi_copyout(pktioctlfld, (void *)arg,
3079 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3080 			retvalue = ENXIO;
3081 
3082 	}
3083 	break;
3084 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
3085 	{
3086 		pktioctlfld->cmdmessage.ReturnCode =
3087 		    ARCMSR_MESSAGE_RETURNCODE_3F;
3088 		if (ddi_copyout(pktioctlfld, (void *)arg,
3089 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3090 			retvalue = ENXIO;
3091 	}
3092 	break;
3093 	/* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
3094 	case ARCMSR_MESSAGE_SAY_GOODBYE:
3095 		arcmsr_iop_parking(acb);
3096 		break;
3097 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
3098 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3099 			arcmsr_flush_hba_cache(acb);
3100 		} else {
3101 			arcmsr_flush_hbb_cache(acb);
3102 		}
3103 		break;
3104 	default:
3105 		retvalue = ENOTTY;
3106 	}
3107 
3108 ioctl_out:
3109 	kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
3110 	mutex_exit(&acb->ioctl_mutex);
3111 
3112 	return (retvalue);
3113 }
3114 
3115 
3116 
3117 static struct CCB *
3118 arcmsr_get_freeccb(struct ACB *acb) {
3119 
3120 	struct CCB *ccb;
3121 	int workingccb_startindex, workingccb_doneindex;
3122 
3123 
3124 	mutex_enter(&acb->workingQ_mutex);
3125 	workingccb_doneindex = acb->workingccb_doneindex;
3126 	workingccb_startindex = acb->workingccb_startindex;
3127 	ccb = acb->ccbworkingQ[workingccb_startindex];
3128 	workingccb_startindex++;
3129 	workingccb_startindex %= ARCMSR_MAX_FREECCB_NUM;
3130 	if (workingccb_doneindex != workingccb_startindex) {
3131 		acb->workingccb_startindex = workingccb_startindex;
3132 	} else {
3133 		ccb = NULL;
3134 	}
3135 
3136 	mutex_exit(&acb->workingQ_mutex);
3137 	return (ccb);
3138 }
3139 
3140 
3141 
3142 static int
3143 arcmsr_seek_cmd2abort(struct ACB *acb,
3144     struct scsi_pkt *abortpkt) {
3145 
3146 	struct CCB *ccb;
3147 	uint32_t intmask_org = 0;
3148 	int i = 0;
3149 
3150 	acb->num_aborts++;
3151 
3152 	if (abortpkt == NULL) {
3153 		/*
3154 		 * if abortpkt is NULL, the upper layer needs us
3155 		 * to abort all commands
3156 		 */
3157 		if (acb->ccboutstandingcount != 0) {
3158 			/* disable all outbound interrupt */
3159 			intmask_org = arcmsr_disable_allintr(acb);
3160 			/* clear and abort all outbound posted Q */
3161 			arcmsr_done4abort_postqueue(acb);
3162 			/* talk to iop 331 outstanding command aborted */
3163 			if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3164 				arcmsr_abort_hba_allcmd(acb);
3165 			} else {
3166 				arcmsr_abort_hbb_allcmd(acb);
3167 			}
3168 
3169 			for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3170 				ccb = acb->pccb_pool[i];
3171 				if (ccb->startdone == ARCMSR_CCB_START) {
3172 					/*
3173 					 * this ccb will complete at
3174 					 * hwinterrupt
3175 					 */
3176 					ccb->startdone = ARCMSR_CCB_ABORTED;
3177 					ccb->pkt->pkt_reason = CMD_ABORTED;
3178 					ccb->pkt->pkt_statistics |=
3179 					    STAT_ABORTED;
3180 					arcmsr_ccb_complete(ccb, 1);
3181 				}
3182 			}
3183 			/*
3184 			 * enable outbound Post Queue, outbound
3185 			 * doorbell Interrupt
3186 			 */
3187 			arcmsr_enable_allintr(acb, intmask_org);
3188 		}
3189 		return (DDI_SUCCESS);
3190 	}
3191 
3192 	/*
3193 	 * It is the upper layer do abort command this lock
3194 	 * just prior to calling us.
3195 	 * First determine if we currently own this command.
3196 	 * Start by searching the device queue. If not found
3197 	 * at all, and the system wanted us to just abort the
3198 	 * command returnsuccess.
3199 	 */
3200 
3201 	if (acb->ccboutstandingcount != 0) {
3202 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3203 			ccb = acb->pccb_pool[i];
3204 			if (ccb->startdone == ARCMSR_CCB_START) {
3205 				if (ccb->pkt == abortpkt) {
3206 					ccb->startdone =
3207 					    ARCMSR_CCB_ABORTED;
3208 					goto abort_outstanding_cmd;
3209 				}
3210 			}
3211 		}
3212 	}
3213 
3214 	return (DDI_FAILURE);
3215 
3216 abort_outstanding_cmd:
3217 	/* disable all outbound interrupts */
3218 	intmask_org = arcmsr_disable_allintr(acb);
3219 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3220 		arcmsr_polling_hba_ccbdone(acb, ccb);
3221 	} else {
3222 		arcmsr_polling_hbb_ccbdone(acb, ccb);
3223 	}
3224 
3225 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3226 	arcmsr_enable_allintr(acb, intmask_org);
3227 	return (DDI_SUCCESS);
3228 }
3229 
3230 
3231 
3232 static void
3233 arcmsr_pcidev_disattach(struct ACB *acb) {
3234 
3235 	struct CCB *ccb;
3236 	int i = 0;
3237 
3238 	/* disable all outbound interrupts */
3239 	(void) arcmsr_disable_allintr(acb);
3240 	/* stop adapter background rebuild */
3241 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3242 		arcmsr_stop_hba_bgrb(acb);
3243 		arcmsr_flush_hba_cache(acb);
3244 	} else {
3245 		arcmsr_stop_hbb_bgrb(acb);
3246 		arcmsr_flush_hbb_cache(acb);
3247 	}
3248 	/* abort all outstanding commands */
3249 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3250 	acb->acb_flags &= ~ACB_F_IOP_INITED;
3251 
3252 	if (acb->ccboutstandingcount != 0) {
3253 		/* clear and abort all outbound posted Q */
3254 		arcmsr_done4abort_postqueue(acb);
3255 		/* talk to iop 331 outstanding command aborted */
3256 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3257 			arcmsr_abort_hba_allcmd(acb);
3258 		} else {
3259 			arcmsr_abort_hbb_allcmd(acb);
3260 		}
3261 
3262 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3263 			ccb = acb->pccb_pool[i];
3264 			if (ccb->startdone == ARCMSR_CCB_START) {
3265 				ccb->startdone = ARCMSR_CCB_ABORTED;
3266 				ccb->pkt->pkt_reason = CMD_ABORTED;
3267 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3268 				arcmsr_ccb_complete(ccb, 1);
3269 			}
3270 		}
3271 	}
3272 }
3273 
3274 /* get firmware miscellaneous data */
3275 static void
3276 arcmsr_get_hba_config(struct ACB *acb) {
3277 
3278 	struct HBA_msgUnit *phbamu;
3279 
3280 	char *acb_firm_model;
3281 	char *acb_firm_version;
3282 	char *iop_firm_model;
3283 	char *iop_firm_version;
3284 	int count;
3285 
3286 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3287 	acb_firm_model = acb->firm_model;
3288 	acb_firm_version = acb->firm_version;
3289 	/* firm_model, 15 */
3290 	iop_firm_model = (char *)
3291 	    (&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
3292 	/* firm_version, 17 */
3293 	iop_firm_version =
3294 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
3295 
3296 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3297 	    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3298 
3299 	if (!arcmsr_hba_wait_msgint_ready(acb))
3300 		cmn_err(CE_CONT,
3301 		    "arcmsr%d: timeout while waiting for adapter firmware "
3302 		    "miscellaneous data",
3303 		    ddi_get_instance(acb->dev_info));
3304 
3305 	count = 8;
3306 	while (count) {
3307 		*acb_firm_model =
3308 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
3309 		acb_firm_model++;
3310 		iop_firm_model++;
3311 		count--;
3312 	}
3313 
3314 	count = 16;
3315 	while (count) {
3316 		*acb_firm_version =
3317 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
3318 		acb_firm_version++;
3319 		iop_firm_version++;
3320 		count--;
3321 	}
3322 
3323 	cmn_err(CE_CONT, "arcmsr%d: ARECA RAID FIRMWARE VERSION %s",
3324 	    ddi_get_instance(acb->dev_info), acb->firm_version);
3325 
3326 	/* firm_request_len, 1 */
3327 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3328 	    &phbamu->msgcode_rwbuffer[1]);
3329 	/* firm_numbers_queue, 2 */
3330 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3331 	    &phbamu->msgcode_rwbuffer[2]);
3332 	/* firm_sdram_size, 3 */
3333 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3334 	    &phbamu->msgcode_rwbuffer[3]);
3335 	/* firm_ide_channels, 4 */
3336 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3337 	    &phbamu->msgcode_rwbuffer[4]);
3338 }
3339 
3340 /* get firmware miscellaneous data */
3341 static void
3342 arcmsr_get_hbb_config(struct ACB *acb) {
3343 
3344 	struct HBB_msgUnit *phbbmu;
3345 	char *acb_firm_model;
3346 	char *acb_firm_version;
3347 	char *iop_firm_model;
3348 	char *iop_firm_version;
3349 	int count;
3350 
3351 
3352 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3353 	acb_firm_model = acb->firm_model;
3354 	acb_firm_version = acb->firm_version;
3355 	/* firm_model, 15 */
3356 	iop_firm_model = (char *)
3357 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
3358 	/* firm_version, 17 */
3359 	iop_firm_version = (char *)
3360 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
3361 
3362 
3363 
3364 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3365 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3366 	    ARCMSR_MESSAGE_GET_CONFIG);
3367 
3368 	if (!arcmsr_hbb_wait_msgint_ready(acb))
3369 		cmn_err(CE_CONT,
3370 		    "arcmsr%d: timeout while waiting for adapter firmware "
3371 		    "miscellaneous data",
3372 		    ddi_get_instance(acb->dev_info));
3373 
3374 	count = 8;
3375 	while (count) {
3376 		*acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle1,
3377 		    iop_firm_model);
3378 		acb_firm_model++;
3379 		iop_firm_model++;
3380 		count--;
3381 	}
3382 
3383 	count = 16;
3384 	while (count) {
3385 		*acb_firm_version = CHIP_REG_READ8(acb->reg_mu_acc_handle1,
3386 		    iop_firm_version);
3387 		acb_firm_version++;
3388 		iop_firm_version++;
3389 		count--;
3390 	}
3391 
3392 	cmn_err(CE_CONT, "arcmsr%d: ARECA RAID FIRMWARE VERSION %s",
3393 	    ddi_get_instance(acb->dev_info), acb->firm_version);
3394 
3395 	/* firm_request_len, 1 */
3396 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3397 		&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
3398 	/* firm_numbers_queue, 2 */
3399 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3400 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
3401 	/* firm_sdram_size, 3 */
3402 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3403 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
3404 	/* firm_ide_channels, 4 */
3405 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3406 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
3407 }
3408 
3409 
3410 
3411 /* start background rebuild */
3412 static void
3413 arcmsr_start_hba_bgrb(struct ACB *acb) {
3414 
3415 	struct HBA_msgUnit *phbamu;
3416 
3417 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3418 
3419 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3420 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3421 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3422 
3423 	if (!arcmsr_hba_wait_msgint_ready(acb))
3424 		cmn_err(CE_WARN,
3425 		    "arcmsr%d: timeout while waiting for background "
3426 		    "rebuild to start",
3427 		    ddi_get_instance(acb->dev_info));
3428 }
3429 
3430 
3431 static void
3432 arcmsr_start_hbb_bgrb(struct ACB *acb) {
3433 
3434 	struct HBB_msgUnit *phbbmu;
3435 
3436 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3437 
3438 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3439 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3440 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3441 	    ARCMSR_MESSAGE_START_BGRB);
3442 
3443 	if (!arcmsr_hbb_wait_msgint_ready(acb))
3444 		cmn_err(CE_WARN,
3445 		    "arcmsr%d: timeout while waiting for background "
3446 		    "rebuild to start",
3447 		    ddi_get_instance(acb->dev_info));
3448 }
3449 
3450 
3451 static void
3452 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb) {
3453 
3454 	struct HBA_msgUnit *phbamu;
3455 	struct CCB *ccb;
3456 	uint32_t flag_ccb, outbound_intstatus;
3457 	uint32_t poll_ccb_done = 0;
3458 	uint32_t poll_count = 0;
3459 
3460 
3461 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3462 
3463 polling_ccb_retry:
3464 	poll_count++;
3465 	outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3466 	    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3467 
3468 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
3469 	    outbound_intstatus); /* clear interrupt */
3470 
3471 	/* Use correct offset and size for syncing */
3472 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
3473 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
3474 		return;
3475 
3476 	/*LINTED*/
3477 	while (1) {
3478 		if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3479 		    &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
3480 			if (poll_ccb_done) {
3481 				/* chip FIFO no ccb for completion already */
3482 				break;
3483 			} else {
3484 				drv_usecwait(25000);
3485 				if (poll_count > 100) {
3486 					break;
3487 				}
3488 				goto polling_ccb_retry;
3489 			}
3490 		}
3491 
3492 		/* check ifcommand done with no error */
3493 		ccb = (struct CCB *)(acb->vir2phy_offset  +
3494 		    (flag_ccb << 5)); /* frame must be 32 bytes aligned */
3495 		poll_ccb_done = (ccb == poll_ccb) ? 1 : 0;
3496 
3497 		if ((ccb->acb != acb) ||
3498 		    (ccb->startdone != ARCMSR_CCB_START)) {
3499 			if (ccb->startdone == ARCMSR_CCB_ABORTED) {
3500 				ccb->pkt->pkt_reason = CMD_ABORTED;
3501 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3502 				arcmsr_ccb_complete(ccb, 1);
3503 				continue;
3504 			}
3505 			cmn_err(CE_WARN, "arcmsr%d: polling op got "
3506 			    "unexpected ccb command done",
3507 			    ddi_get_instance(acb->dev_info));
3508 			continue;
3509 		}
3510 		arcmsr_report_ccb_state(acb, ccb, flag_ccb);
3511 	}	/* drain reply FIFO */
3512 }
3513 
3514 
3515 static void
3516 arcmsr_polling_hbb_ccbdone(struct ACB *acb,
3517     struct CCB *poll_ccb) {
3518 
3519 	struct HBB_msgUnit *phbbmu;
3520 	struct CCB *ccb;
3521 	uint32_t flag_ccb;
3522 	uint32_t poll_ccb_done = 0;
3523 	uint32_t poll_count = 0;
3524 	int index;
3525 
3526 
3527 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3528 
3529 
3530 polling_ccb_retry:
3531 	poll_count++;
3532 	/* clear doorbell interrupt */
3533 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3534 	    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3535 	    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3536 
3537 	/* Use correct offset and size for syncing */
3538 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
3539 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
3540 		return;
3541 
3542 
3543 	/*LINTED*/
3544 	while (1) {
3545 		index = phbbmu->doneq_index;
3546 		if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
3547 			if (poll_ccb_done) {
3548 				/* chip FIFO no ccb for completion already */
3549 				break;
3550 			} else {
3551 				drv_usecwait(25000);
3552 				if (poll_count > 100)
3553 					break;
3554 
3555 				goto polling_ccb_retry;
3556 			}
3557 		}
3558 
3559 		phbbmu->done_qbuffer[index] = 0;
3560 		index++;
3561 		/* if last index number set it to 0 */
3562 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
3563 		phbbmu->doneq_index = index;
3564 		/* check if command done with no error */
3565 		/* frame must be 32 bytes aligned */
3566 		ccb = (struct CCB *)(acb->vir2phy_offset +
3567 		    (flag_ccb << 5));
3568 		poll_ccb_done = (ccb == poll_ccb) ? 1 : 0;
3569 		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3570 			if (ccb->startdone == ARCMSR_CCB_ABORTED) {
3571 				ccb->pkt->pkt_reason = CMD_ABORTED;
3572 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3573 				arcmsr_ccb_complete(ccb, 1);
3574 				continue;
3575 			}
3576 			cmn_err(CE_WARN, "arcmsr%d: polling op got"
3577 			    "unexpect ccb command done",
3578 			    ddi_get_instance(acb->dev_info));
3579 			continue;
3580 		}
3581 		arcmsr_report_ccb_state(acb, ccb, flag_ccb);
3582 	}	/* drain reply FIFO */
3583 }
3584 
3585 
3586 /*
3587  *    Function: arcmsr_tran_start(9E)
3588  * Description: Transport the command in pktp to the target device.
3589  *		The command is not finished when this returns, only
3590  *		sent to the target; arcmsr_interrupt will call
3591  *		(*pktp->pkt_comp)(pktp) when the target device has done.
3592  *
3593  *       Input: struct scsi_address *ap, struct scsi_pkt *pktp
3594  *      Output:	TRAN_ACCEPT if pkt is OK and not driver not busy
3595  *		TRAN_BUSY if driver is
3596  *		TRAN_BADPKT if pkt is invalid
3597  */
3598 static int
3599 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) {
3600 
3601 	struct ACB *acb;
3602 	struct CCB *ccb;
3603 	int target = ap->a_target;
3604 	int lun = ap->a_lun;
3605 
3606 
3607 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3608 	ccb = pkt->pkt_ha_private;
3609 
3610 	if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
3611 	    (ccb->ccb_flags & DDI_DMA_CONSISTENT))
3612 		(void) ddi_dma_sync(ccb->pkt_dma_handle, ccb->pkt_dma_offset,
3613 		    ccb->pkt_dma_len, DDI_DMA_SYNC_FORDEV);
3614 
3615 
3616 	if (ccb->startdone == ARCMSR_CCB_UNBUILD)
3617 		arcmsr_build_ccb(ccb);
3618 
3619 
3620 	if (acb->acb_flags & ACB_F_BUS_RESET) {
3621 		cmn_err(CE_CONT,
3622 		    "arcmsr%d: bus reset returned busy",
3623 		    ddi_get_instance(acb->dev_info));
3624 		pkt->pkt_reason = CMD_RESET;
3625 		pkt->pkt_statistics |= STAT_BUS_RESET;
3626 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
3627 		    STATE_SENT_CMD | STATE_GOT_STATUS);
3628 		if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
3629 		    (pkt->pkt_state & STATE_XFERRED_DATA))
3630 			(void) ddi_dma_sync(ccb->pkt_dma_handle,
3631 			    ccb->pkt_dma_offset, ccb->pkt_dma_len,
3632 			    DDI_DMA_SYNC_FORCPU);
3633 
3634 		if (pkt->pkt_comp)
3635 			(*pkt->pkt_comp)(pkt);
3636 
3637 
3638 		return (TRAN_ACCEPT);
3639 	}
3640 
3641 	if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
3642 		uint8_t block_cmd;
3643 
3644 		block_cmd = pkt->pkt_cdbp[0] & 0x0f;
3645 
3646 		if (block_cmd == 0x08 || block_cmd == 0x0a) {
3647 			cmn_err(CE_CONT,
3648 			    "arcmsr%d: block read/write command while raid"
3649 			    "volume missing (cmd %02x for target %d lun %d)",
3650 			    ddi_get_instance(acb->dev_info),
3651 			    block_cmd, target, lun);
3652 			pkt->pkt_reason = CMD_TIMEOUT;
3653 			pkt->pkt_statistics |= CMD_TIMEOUT;
3654 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
3655 			    STATE_SENT_CMD | STATE_GOT_STATUS);
3656 
3657 			if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
3658 			    (pkt->pkt_state & STATE_XFERRED_DATA))
3659 				(void) ddi_dma_sync(ccb->pkt_dma_handle,
3660 				    ccb->pkt_dma_offset, ccb->pkt_dma_len,
3661 				    DDI_DMA_SYNC_FORCPU);
3662 
3663 
3664 			if (pkt->pkt_comp)
3665 				(*pkt->pkt_comp)(pkt);
3666 
3667 
3668 			return (TRAN_ACCEPT);
3669 		}
3670 	}
3671 
3672 
3673 	/* IMPORTANT: Target 16 is a virtual device for iop message transfer */
3674 	if (target == 16) {
3675 
3676 		struct buf *bp = ccb->bp;
3677 		uint8_t scsicmd = pkt->pkt_cdbp[0];
3678 
3679 		switch (scsicmd) {
3680 		case SCMD_INQUIRY: {
3681 			if (lun != 0) {
3682 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
3683 				ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
3684 				arcmsr_ccb_complete(ccb, 0);
3685 				return (TRAN_ACCEPT);
3686 			}
3687 
3688 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
3689 				uint8_t inqdata[36];
3690 
3691 				/* The EVDP and pagecode is not supported */
3692 				if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
3693 					inqdata[1] = 0xFF;
3694 					inqdata[2] = 0x00;
3695 				} else {
3696 					/* Periph Qualifier & Periph Dev Type */
3697 					inqdata[0] = DTYPE_PROCESSOR;
3698 					/* rem media bit & Dev Type Modifier */
3699 					inqdata[1] = 0;
3700 					/* ISO, ECMA, & ANSI versions */
3701 					inqdata[2] = 0;
3702 					/* length of additional data */
3703 					inqdata[4] = 31;
3704 					/* Vendor Identification */
3705 					bcopy("Areca   ",
3706 					    &inqdata[8], VIDLEN);
3707 					/* Product Identification */
3708 					bcopy("RAID controller ",
3709 					    &inqdata[16], PIDLEN);
3710 					/* Product Revision */
3711 					bcopy(&inqdata[32],
3712 					    "R001", REVLEN);
3713 					if (bp->b_flags & (B_PHYS | B_PAGEIO))
3714 						bp_mapin(bp);
3715 
3716 					(void) memcpy(bp->b_un.b_addr,
3717 					    inqdata, sizeof (inqdata));
3718 				}
3719 				ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3720 			}
3721 			arcmsr_ccb_complete(ccb, 0);
3722 			return (TRAN_ACCEPT);
3723 		}
3724 		case SCMD_WRITE_BUFFER:
3725 		case SCMD_READ_BUFFER: {
3726 			if (arcmsr_iop_message_xfer(acb, pkt)) {
3727 				/* error just for retry */
3728 				ccb->pkt->pkt_reason = CMD_TRAN_ERR;
3729 				ccb->pkt->pkt_statistics |= STAT_TERMINATED;
3730 			}
3731 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3732 			arcmsr_ccb_complete(ccb, 0);
3733 			return (TRAN_ACCEPT);
3734 		}
3735 		default:
3736 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3737 			arcmsr_ccb_complete(ccb, 0);
3738 			return (TRAN_ACCEPT);
3739 		}
3740 	}
3741 
3742 	if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
3743 		cmn_err(CE_CONT,
3744 		    "arcmsr%d: too many outstanding commands (%d > %d)",
3745 		    ddi_get_instance(acb->dev_info),
3746 		    acb->ccboutstandingcount,
3747 		    ARCMSR_MAX_OUTSTANDING_CMD);
3748 		return (TRAN_BUSY);
3749 	} else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
3750 		cmn_err(CE_CONT,
3751 		    "arcmsr%d: post failure, ccboutstandingcount = %d",
3752 		    ddi_get_instance(acb->dev_info),
3753 		    acb->ccboutstandingcount);
3754 		return (TRAN_BUSY);
3755 	}
3756 
3757     return (TRAN_ACCEPT);
3758 }
3759 
3760 /*
3761  * Function: arcmsr_tran_abort(9E)
3762  * 		SCSA interface routine to abort pkt(s) in progress.
3763  * 		Aborts the pkt specified.  If NULL pkt, aborts ALL pkts.
3764  * Output:	Return 1 if success
3765  *		Return 0 if failure
3766  */
3767 static int
3768 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt) {
3769 
3770 	struct ACB *acb;
3771 	int return_code;
3772 
3773 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3774 
3775 
3776 	cmn_err(CE_WARN,
3777 	    "arcmsr%d: tran_abort called for target %d lun %d",
3778 	    ddi_get_instance(acb->dev_info), ap->a_target, ap->a_lun);
3779 
3780 	while (acb->ccboutstandingcount != 0) {
3781 		drv_usecwait(10000);
3782 	}
3783 
3784 	mutex_enter(&acb->acb_mutex);
3785 	return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
3786 	mutex_exit(&acb->acb_mutex);
3787 
3788 	if (return_code != DDI_SUCCESS) {
3789 		cmn_err(CE_WARN,
3790 		    "arcmsr%d: abort command failed for target %d lun %d",
3791 		    ddi_get_instance(acb->dev_info),
3792 		    ap->a_target, ap->a_lun);
3793 		return (0);
3794 	}
3795 
3796 	return (1);
3797 }
3798 
3799 /*
3800  * Function: arcmsr_tran_reset(9E)
3801  *           SCSA interface routine to perform scsi resets on either
3802  *           a specified target or the bus (default).
3803  *   Output: Return 1 if success
3804  *	     Return 0 if failure
3805  */
3806 static int
3807 arcmsr_tran_reset(struct scsi_address *ap, int level) {
3808 
3809 	struct ACB *acb;
3810 	int return_code = 1;
3811 	int retry = 0;
3812 
3813 
3814 	/* Are we in the middle of dumping core? */
3815 	if (ddi_in_panic())
3816 		return (return_code);
3817 
3818 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3819 
3820 	cmn_err(CE_WARN, "arcmsr%d: tran reset (level 0x%x) called "
3821 	    "for target %d lun %d",
3822 	    ddi_get_instance(acb->dev_info), level,
3823 	    ap->a_target, ap->a_lun);
3824 	mutex_enter(&acb->acb_mutex);
3825 
3826 	while ((acb->ccboutstandingcount > 0) && (retry < 400)) {
3827 		(void) arcmsr_interrupt((caddr_t)acb);
3828 		drv_usecwait(25000);
3829 		retry++;
3830 	}
3831 
3832 	switch (level) {
3833 	case RESET_ALL:		/* level 1 */
3834 		acb->num_resets++;
3835 		acb->acb_flags |= ACB_F_BUS_RESET;
3836 		arcmsr_iop_reset(acb);
3837 		acb->acb_flags &= ~ACB_F_BUS_RESET;
3838 		return_code = 0;
3839 		break;
3840 	case RESET_TARGET:	/* level 0 */
3841 		cmn_err(CE_WARN, "arcmsr%d: target reset not supported",
3842 		    ddi_get_instance(acb->dev_info));
3843 		return_code = 0;
3844 		break;
3845 	default:
3846 		return_code = 0;
3847 	}
3848 
3849 	mutex_exit(&acb->acb_mutex);
3850 	return (return_code);
3851 }
3852 
3853 
3854 static void
3855 arcmsr_log(struct ACB *acb, int level, char *fmt, ...) {
3856 
3857 	char	buf[256];
3858 	va_list ap;
3859 
3860 	va_start(ap, fmt);
3861 	(void) vsprintf(buf, fmt, ap);
3862 	va_end(ap);
3863 	scsi_log(acb ? acb->dev_info : NULL, "arcmsr", level, "%s", buf);
3864 }
3865 
3866 
3867 static void
3868 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb) {
3869 
3870 	struct QBUFFER *prbuffer;
3871 	uint8_t *pQbuffer;
3872 	uint8_t *iop_data;
3873 	int my_empty_len, iop_len;
3874 	int rqbuf_firstidx, rqbuf_lastidx;
3875 
3876 	/* check this iop data if overflow my rqbuffer */
3877 	rqbuf_lastidx = acb->rqbuf_lastidx;
3878 	rqbuf_firstidx = acb->rqbuf_firstidx;
3879 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
3880 	iop_data = (uint8_t *)prbuffer->data;
3881 	iop_len = prbuffer->data_len;
3882 	my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
3883 	    (ARCMSR_MAX_QBUFFER - 1);
3884 
3885 	if (my_empty_len >= iop_len) {
3886 		while (iop_len > 0) {
3887 			pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
3888 			(void) memcpy(pQbuffer, iop_data, 1);
3889 			rqbuf_lastidx++;
3890 			/* if last index number set it to 0 */
3891 			rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
3892 			iop_data++;
3893 			iop_len--;
3894 		}
3895 		acb->rqbuf_lastidx = rqbuf_lastidx;
3896 		arcmsr_iop_message_read(acb);
3897 		/* signature, let IOP know data has been read */
3898 	} else {
3899 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
3900 	}
3901 }
3902 
3903 
3904 
3905 static void
3906 arcmsr_iop2drv_data_read_handle(struct ACB *acb) {
3907 
3908 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
3909 	/*
3910 	 * check if there are any mail packages from user space program
3911 	 * in my post bag, now is the time to send them into Areca's firmware
3912 	 */
3913 
3914 	if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
3915 
3916 		uint8_t *pQbuffer;
3917 		struct QBUFFER *pwbuffer;
3918 		uint8_t *iop_data;
3919 		int allxfer_len = 0;
3920 
3921 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
3922 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
3923 		iop_data = (uint8_t *)pwbuffer->data;
3924 
3925 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
3926 		    (allxfer_len < 124)) {
3927 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
3928 			(void) memcpy(iop_data, pQbuffer, 1);
3929 			acb->wqbuf_firstidx++;
3930 			/* if last index number set it to 0 */
3931 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
3932 			iop_data++;
3933 			allxfer_len++;
3934 		}
3935 		pwbuffer->data_len = allxfer_len;
3936 		/*
3937 		 * push inbound doorbell, tell iop driver data write ok
3938 		 * await reply on next hwinterrupt for next Qbuffer post
3939 		 */
3940 		arcmsr_iop_message_wrote(acb);
3941 	}
3942 
3943 	if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
3944 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
3945 }
3946 
3947 
3948 static void
3949 arcmsr_hba_doorbell_isr(struct ACB *acb) {
3950 
3951 	uint32_t outbound_doorbell;
3952 	struct HBA_msgUnit *phbamu;
3953 
3954 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3955 
3956 	/*
3957 	 *  Maybe here we need to check wrqbuffer_lock is locked or not
3958 	 *  DOORBELL: ding! dong!
3959 	 *  check if there are any mail need to pack from firmware
3960 	 */
3961 
3962 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3963 	    &phbamu->outbound_doorbell);
3964 	/* clear doorbell interrupt */
3965 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3966 	    &phbamu->outbound_doorbell, outbound_doorbell);
3967 
3968 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
3969 		arcmsr_iop2drv_data_wrote_handle(acb);
3970 
3971 
3972 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
3973 		arcmsr_iop2drv_data_read_handle(acb);
3974 }
3975 
3976 
3977 
3978 static void
3979 arcmsr_hba_postqueue_isr(struct ACB *acb) {
3980 
3981 	uint32_t flag_ccb;
3982 	struct HBA_msgUnit *phbamu;
3983 
3984 
3985 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3986 
3987 	/* areca cdb command done */
3988 	/* Use correct offset and size for syncing */
3989 	(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
3990 	    DDI_DMA_SYNC_FORKERNEL);
3991 
3992 	while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3993 	    &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
3994 		/* check if command done with no error */
3995 		arcmsr_drain_donequeue(acb, flag_ccb);
3996 	}	/* drain reply FIFO */
3997 }
3998 
3999 
4000 
4001 static void
4002 arcmsr_hbb_postqueue_isr(struct ACB *acb) {
4003 
4004 	int index;
4005 	uint32_t flag_ccb;
4006 	struct HBB_msgUnit *phbbmu;
4007 
4008 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4009 
4010 
4011 	/* areca cdb command done */
4012 	index = phbbmu->doneq_index;
4013 
4014 	while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
4015 		phbbmu->done_qbuffer[index] = 0;
4016 		index++;
4017 		/* if last index number set it to 0 */
4018 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
4019 		phbbmu->doneq_index = index;
4020 		/* check if command done with no error */
4021 		arcmsr_drain_donequeue(acb, flag_ccb);
4022 	}	/* drain reply FIFO */
4023 }
4024 
4025 
4026 static uint_t
4027 arcmsr_handle_hba_isr(struct ACB *acb) {
4028 
4029 	uint32_t outbound_intstatus;
4030 	struct HBA_msgUnit *phbamu;
4031 
4032 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4033 
4034 	outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4035 	    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
4036 
4037 	if (!outbound_intstatus)
4038 		/* it must be a shared irq */
4039 		return (DDI_INTR_UNCLAIMED);
4040 
4041 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
4042 	    outbound_intstatus); /* clear interrupt */
4043 
4044 
4045 	/* MU doorbell interrupts */
4046 
4047 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
4048 		arcmsr_hba_doorbell_isr(acb);
4049 
4050 	/* MU post queue interrupts */
4051 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
4052 		arcmsr_hba_postqueue_isr(acb);
4053 
4054 	/*
4055 	 * The following block is commented out pending confirmation from
4056 	 * Areca whether it is or is not truly required
4057 	 */
4058 	/* MU message interrupt */
4059 	/*
4060 	 * if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
4061 	 *	arcmsr_hba_message_isr(acb);
4062 	 * }
4063 	 */
4064 	return (DDI_INTR_CLAIMED);
4065 }
4066 
4067 
4068 static uint_t
4069 arcmsr_handle_hbb_isr(struct ACB *acb) {
4070 
4071 	uint32_t outbound_doorbell;
4072 	struct HBB_msgUnit *phbbmu;
4073 
4074 
4075 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4076 
4077 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4078 	    &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
4079 
4080 	if (!outbound_doorbell)
4081 		/* it must be a shared irq */
4082 		return (DDI_INTR_UNCLAIMED);
4083 
4084 	/* clear doorbell interrupt */
4085 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4086 	    &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
4087 	/* wait a cycle */
4088 	(void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4089 	    &phbbmu->hbb_doorbell->iop2drv_doorbell);
4090 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4091 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4092 	    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
4093 
4094 	/* MU ioctl transfer doorbell interrupts */
4095 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
4096 		arcmsr_iop2drv_data_wrote_handle(acb);
4097 
4098 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
4099 		arcmsr_iop2drv_data_read_handle(acb);
4100 
4101 	/* MU post queue interrupts */
4102 	if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
4103 		arcmsr_hbb_postqueue_isr(acb);
4104 
4105 	/*
4106 	 * The following block is commented out pending confirmation from
4107 	 * Areca whether it is or is not truly required
4108 	 */
4109 	/* MU message interrupt */
4110 	/*
4111 	 * if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
4112 	 *		arcmsr_hbb_message_isr(acb);
4113 	 *	}
4114 	 */
4115 	return (DDI_INTR_CLAIMED);
4116 }
4117 
4118 
4119 static uint_t
4120 arcmsr_interrupt(caddr_t arg) {
4121 
4122 
4123 	struct ACB *acb = (struct ACB *)(intptr_t)arg;
4124 
4125 	switch (acb->adapter_type) {
4126 	case ACB_ADAPTER_TYPE_A:
4127 		return (arcmsr_handle_hba_isr(acb));
4128 	case ACB_ADAPTER_TYPE_B:
4129 		return (arcmsr_handle_hbb_isr(acb));
4130 	default:
4131 		cmn_err(CE_WARN, "arcmsr%d: unknown adapter type (%d)",
4132 		    ddi_get_instance(acb->dev_info), acb->adapter_type);
4133 		return (DDI_INTR_UNCLAIMED);
4134 	}
4135 }
4136 
4137 
4138 static void
4139 arcmsr_wait_firmware_ready(struct ACB *acb) {
4140 
4141 	uint32_t firmware_state;
4142 
4143 	firmware_state = 0;
4144 
4145 	switch (acb->adapter_type) {
4146 	case ACB_ADAPTER_TYPE_A:
4147 	{
4148 		struct HBA_msgUnit *phbamu;
4149 
4150 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4151 		do {
4152 			firmware_state =
4153 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4154 			    &phbamu->outbound_msgaddr1);
4155 		} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
4156 		    == 0);
4157 	}
4158 	break;
4159 	case ACB_ADAPTER_TYPE_B:
4160 	{
4161 		struct HBB_msgUnit *phbbmu;
4162 
4163 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4164 		do {
4165 			firmware_state =
4166 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4167 				    &phbbmu->hbb_doorbell->iop2drv_doorbell);
4168 		} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
4169 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4170 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4171 		    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
4172 	}
4173 	break;
4174 	}
4175 }
4176 
4177 static void
4178 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb) {
4179 
4180 	switch (acb->adapter_type) {
4181 	case ACB_ADAPTER_TYPE_A:
4182 	{
4183 		struct HBA_msgUnit *phbamu;
4184 		uint32_t outbound_doorbell;
4185 
4186 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4187 		/* empty doorbell Qbuffer if door bell rung */
4188 		outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4189 		    &phbamu->outbound_doorbell);
4190 		/* clear doorbell interrupt */
4191 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4192 		    &phbamu->outbound_doorbell, outbound_doorbell);
4193 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4194 		    &phbamu->inbound_doorbell,
4195 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4196 	}
4197 	break;
4198 	case ACB_ADAPTER_TYPE_B:
4199 	{
4200 		struct HBB_msgUnit *phbbmu;
4201 
4202 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4203 
4204 		/* clear interrupt and message state */
4205 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4206 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
4207 		    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
4208 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4209 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4210 		    ARCMSR_DRV2IOP_DATA_READ_OK);
4211 		/* let IOP know data has been read */
4212 	}
4213 	break;
4214 	}
4215 }
4216 
4217 
4218 static uint32_t
4219 arcmsr_iop_confirm(struct ACB *acb) {
4220 
4221 	unsigned long ccb_phyaddr;
4222 	uint32_t ccb_phyaddr_hi32;
4223 
4224 	/*
4225 	 * here we need to tell iop 331 about our freeccb.HighPart
4226 	 * if freeccb.HighPart is non-zero
4227 	 */
4228 	ccb_phyaddr = (unsigned long)acb->ccb_cookie.dmac_address;
4229 	ccb_phyaddr_hi32 = (uint32_t)((ccb_phyaddr >> 16) >> 16);
4230 
4231 	switch (acb->adapter_type) {
4232 	case ACB_ADAPTER_TYPE_A:
4233 	{
4234 		if (ccb_phyaddr_hi32 != 0) {
4235 			struct HBA_msgUnit *phbamu;
4236 
4237 			phbamu = (struct HBA_msgUnit *)acb->pmu;
4238 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4239 			    &phbamu->msgcode_rwbuffer[0],
4240 			    ARCMSR_SIGNATURE_SET_CONFIG);
4241 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4242 			    &phbamu->msgcode_rwbuffer[1], ccb_phyaddr_hi32);
4243 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4244 			    &phbamu->inbound_msgaddr0,
4245 			    ARCMSR_INBOUND_MESG0_SET_CONFIG);
4246 			if (!arcmsr_hba_wait_msgint_ready(acb)) {
4247 				cmn_err(CE_WARN,
4248 				    "arcmsr%d: timeout setting ccb high "
4249 				    "physical address",
4250 				    ddi_get_instance(acb->dev_info));
4251 				return (FALSE);
4252 			}
4253 		}
4254 	}
4255 	break;
4256 
4257 	/* if adapter is type B, set window of "post command queue" */
4258 
4259 	case ACB_ADAPTER_TYPE_B:
4260 	{
4261 		uint32_t post_queue_phyaddr;
4262 		struct HBB_msgUnit *phbbmu;
4263 
4264 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4265 		phbbmu->postq_index = 0;
4266 		phbbmu->doneq_index = 0;
4267 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4268 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4269 		    ARCMSR_MESSAGE_SET_POST_WINDOW);
4270 
4271 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4272 			cmn_err(CE_WARN,
4273 			    "arcmsr%d: timeout setting post command "
4274 			    "queue window",
4275 			    ddi_get_instance(acb->dev_info));
4276 			return (FALSE);
4277 		}
4278 
4279 		post_queue_phyaddr = ccb_phyaddr +
4280 		    ARCMSR_MAX_FREECCB_NUM *
4281 		    sizeof (struct CCB)
4282 		    + ARCOFFSET(struct HBB_msgUnit, post_qbuffer);
4283 		/* driver "set config" signature */
4284 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4285 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
4286 		    ARCMSR_SIGNATURE_SET_CONFIG);
4287 		/* normal should be zero */
4288 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4289 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
4290 		    ccb_phyaddr_hi32);
4291 		/* postQ size (256+8)*4 */
4292 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4293 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
4294 		    post_queue_phyaddr);
4295 		/* doneQ size (256+8)*4 */
4296 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4297 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
4298 		    post_queue_phyaddr+1056);
4299 		/* ccb maxQ size must be --> [(256+8)*4] */
4300 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4301 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
4302 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4303 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4304 		    ARCMSR_MESSAGE_SET_CONFIG);
4305 
4306 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4307 			cmn_err(CE_WARN,
4308 			    "arcmsr%d: timeout setting command queue window",
4309 			    ddi_get_instance(acb->dev_info));
4310 			return (FALSE);
4311 		}
4312 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4313 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4314 		    ARCMSR_MESSAGE_START_DRIVER_MODE);
4315 
4316 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4317 			cmn_err(CE_WARN,
4318 			    "arcmsr%d: timeout in 'start driver mode'",
4319 			    ddi_get_instance(acb->dev_info));
4320 			return (FALSE);
4321 		}
4322 	}
4323 	break;
4324 	}
4325 	return (TRUE);
4326 }
4327 
4328 
4329 /*
4330  * ONLY used for Adapter type B
4331  */
4332 static void
4333 arcmsr_enable_eoi_mode(struct ACB *acb) {
4334 
4335 	struct HBB_msgUnit *phbbmu;
4336 
4337 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4338 
4339 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4340 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4341 	    ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
4342 
4343 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4344 		cmn_err(CE_WARN,
4345 		    "arcmsr%d (Adapter type B): "
4346 		    "'iop enable eoi mode' timeout ",
4347 		    ddi_get_instance(acb->dev_info));
4348 
4349 }
4350 
4351 /* start background rebuild */
4352 static void
4353 arcmsr_iop_init(struct ACB *acb) {
4354 
4355 	uint32_t intmask_org;
4356 
4357 	/* disable all outbound interrupt */
4358 	intmask_org = arcmsr_disable_allintr(acb);
4359 	arcmsr_wait_firmware_ready(acb);
4360 	(void) arcmsr_iop_confirm(acb);
4361 
4362 	/* start background rebuild */
4363 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
4364 		arcmsr_get_hba_config(acb);
4365 		arcmsr_start_hba_bgrb(acb);
4366 	} else {
4367 		arcmsr_get_hbb_config(acb);
4368 		arcmsr_start_hbb_bgrb(acb);
4369 	}
4370 
4371 	/* empty doorbell Qbuffer if door bell rang */
4372 	arcmsr_clear_doorbell_queue_buffer(acb);
4373 
4374 	if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
4375 		arcmsr_enable_eoi_mode(acb);
4376 
4377 	/* enable outbound Post Queue, outbound doorbell Interrupt */
4378 	arcmsr_enable_allintr(acb, intmask_org);
4379 	acb->acb_flags |= ACB_F_IOP_INITED;
4380 }
4381 
4382 
4383 static int
4384 arcmsr_initialize(struct ACB *acb) {
4385 
4386 	struct CCB *pccb_tmp;
4387 	size_t allocated_length;
4388 	uint16_t wval;
4389 	uint32_t wlval;
4390 	uint_t intmask_org, count;
4391 	caddr_t	arcmsr_ccbs_area;
4392 	unsigned long ccb_phyaddr;
4393 	int32_t dma_sync_size;
4394 	int i, id, lun;
4395 
4396 	acb->irq = pci_config_get8(acb->pci_acc_handle,
4397 	    ARCMSR_PCI2PCI_PRIMARY_INTERRUPT_LINE_REG);
4398 	wlval = pci_config_get32(acb->pci_acc_handle, 0);
4399 	wval = (uint16_t)((wlval >> 16) & 0xffff);
4400 
4401 	if (wval == PCI_DEVICE_ID_ARECA_1201) {
4402 		uint32_t *iop_mu_regs_map0;
4403 		uint32_t *iop_mu_regs_map1;
4404 		struct CCB *freeccb;
4405 		struct HBB_msgUnit *phbbmu;
4406 
4407 		acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
4408 		dma_sync_size = (ARCMSR_MAX_FREECCB_NUM*
4409 		    sizeof (struct CCB) + 0x20) +
4410 		    sizeof (struct HBB_msgUnit);
4411 
4412 
4413 		/* Allocate memory for the ccb */
4414 		if ((i = ddi_dma_alloc_handle(acb->dev_info,
4415 		    &arcmsr_ccb_attr, DDI_DMA_SLEEP, NULL,
4416 		    &acb->ccbs_pool_handle)) != DDI_SUCCESS) {
4417 			switch (i) {
4418 			case DDI_DMA_BADATTR:
4419 				cmn_err(CE_WARN,
4420 				    "arcmsr%d: ddi_dma_alloc_handle got "
4421 				    "DDI_DMA_BADATTR",
4422 				    ddi_get_instance(acb->dev_info));
4423 				return (DDI_FAILURE);
4424 
4425 			case DDI_DMA_NORESOURCES:
4426 				cmn_err(CE_WARN, "arcmsr%d: "
4427 				    "ddi_dma_alloc_handle got "
4428 				    "DDI_DMA_NORESOURCES ",
4429 				    ddi_get_instance(acb->dev_info));
4430 				return (DDI_FAILURE);
4431 			}
4432 			cmn_err(CE_WARN,
4433 			    "arcmsr%d: ddi_dma_alloc_handle got DDI_FAILURE",
4434 			    ddi_get_instance(acb->dev_info));
4435 			return (DDI_FAILURE);
4436 		}
4437 
4438 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
4439 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4440 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
4441 		    &allocated_length, &acb->ccbs_acc_handle)
4442 		    != DDI_SUCCESS) {
4443 			cmn_err(CE_CONT,
4444 			    "arcmsr%d: ddi_dma_mem_alloc failed ",
4445 			    ddi_get_instance(acb->dev_info));
4446 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4447 			return (DDI_FAILURE);
4448 		}
4449 
4450 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
4451 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size,
4452 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
4453 		    NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
4454 			cmn_err(CE_WARN,
4455 			    "arcmsr%d: ddi_dma_addr_bind_handle failed",
4456 			    ddi_get_instance(acb->dev_info));
4457 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
4458 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4459 			return (DDI_FAILURE);
4460 		}
4461 		bzero(arcmsr_ccbs_area, dma_sync_size);
4462 		freeccb = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
4463 		acb->pmu = (struct msgUnit *)
4464 		    &freeccb[ARCMSR_MAX_FREECCB_NUM];
4465 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4466 
4467 		/* setup device register */
4468 		if (ddi_regs_map_setup(acb->dev_info, 1,
4469 		    (caddr_t *)&iop_mu_regs_map0, 0,
4470 		    sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
4471 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
4472 			arcmsr_log(NULL, CE_WARN,
4473 			    "arcmsr%d: unable to map PCI device "
4474 			    "base0 address registers",
4475 			    ddi_get_instance(acb->dev_info));
4476 			return (DDI_FAILURE);
4477 		}
4478 
4479 		/* ARCMSR_DRV2IOP_DOORBELL */
4480 		phbbmu->hbb_doorbell =
4481 		    (struct HBB_DOORBELL *)iop_mu_regs_map0;
4482 		if (ddi_regs_map_setup(acb->dev_info, 2,
4483 		    (caddr_t *)&iop_mu_regs_map1, 0,
4484 		    sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
4485 		    &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
4486 			arcmsr_log(NULL, CE_WARN,
4487 			    "arcmsr%d: unable to map PCI device "
4488 			    "base1 address registers",
4489 			    ddi_get_instance(acb->dev_info));
4490 			return (DDI_FAILURE);
4491 		}
4492 
4493 		/* ARCMSR_MSGCODE_RWBUFFER */
4494 		phbbmu->hbb_rwbuffer =
4495 		    (struct HBB_RWBUFFER *)iop_mu_regs_map1;
4496 	} else {
4497 		uint32_t *iop_mu_regs_map0;
4498 
4499 		acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
4500 		dma_sync_size = ARCMSR_MAX_FREECCB_NUM*
4501 		    sizeof (struct CCB) + 0x20;
4502 		if (ddi_regs_map_setup(acb->dev_info, 1,
4503 		    (caddr_t *)&iop_mu_regs_map0, 0,
4504 		    sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
4505 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
4506 			arcmsr_log(NULL, CE_WARN,
4507 			    "arcmsr%d: unable to map registers",
4508 			    ddi_get_instance(acb->dev_info));
4509 			return (DDI_FAILURE);
4510 		}
4511 
4512 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
4513 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
4514 		    DDI_SUCCESS) {
4515 			switch (i) {
4516 			case DDI_DMA_BADATTR:
4517 				cmn_err(CE_WARN,
4518 				    "arcmsr%d: ddi_dma_alloc_handle "
4519 				    "got DDI_DMA_BADATTR",
4520 				    ddi_get_instance(acb->dev_info));
4521 				return (DDI_FAILURE);
4522 			case DDI_DMA_NORESOURCES:
4523 				cmn_err(CE_WARN, "arcmsr%d: "
4524 				    "ddi_dma_alloc_handle got "
4525 				    "DDI_DMA_NORESOURCES",
4526 				    ddi_get_instance(acb->dev_info));
4527 				return (DDI_FAILURE);
4528 			}
4529 			cmn_err(CE_WARN,
4530 			    "arcmsr%d: ddi_dma_alloc_handle failed",
4531 			    ddi_get_instance(acb->dev_info));
4532 			return (DDI_FAILURE);
4533 		}
4534 
4535 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
4536 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4537 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
4538 		    &allocated_length, &acb->ccbs_acc_handle)
4539 		    != DDI_SUCCESS) {
4540 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_mem_alloc failed",
4541 			    ddi_get_instance(acb->dev_info));
4542 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4543 			return (DDI_FAILURE);
4544 		}
4545 
4546 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
4547 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
4548 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
4549 		    &count) != DDI_DMA_MAPPED) {
4550 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_addr_bind_handle "
4551 			    "failed",
4552 			    ddi_get_instance(acb->dev_info));
4553 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
4554 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4555 			return (DDI_FAILURE);
4556 		}
4557 		bzero(arcmsr_ccbs_area, dma_sync_size);
4558 		/* ioport base */
4559 		acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
4560 	}
4561 
4562 	/* here we can not access pci configuration again */
4563 	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4564 	    ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
4565 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
4566 	/* physical address of acb->pccb_pool */
4567 	ccb_phyaddr = acb->ccb_cookie.dmac_address;
4568 
4569 	if (((unsigned long)arcmsr_ccbs_area & 0x1F) != 0) {
4570 		/* ccb address must 32 (0x20) boundary */
4571 		arcmsr_ccbs_area = (caddr_t)((unsigned long)arcmsr_ccbs_area +
4572 		    (0x20 - ((unsigned long)arcmsr_ccbs_area & 0x1F)));
4573 		ccb_phyaddr = (unsigned long)ccb_phyaddr +
4574 		    (0x20 - ((unsigned long)ccb_phyaddr & 0x1F));
4575 	}
4576 
4577 	pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
4578 
4579 	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4580 		pccb_tmp->cdb_shifted_phyaddr = ccb_phyaddr >> 5;
4581 		pccb_tmp->acb = acb;
4582 		acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
4583 		ccb_phyaddr = ccb_phyaddr + sizeof (struct CCB);
4584 		pccb_tmp++;
4585 	}
4586 
4587 	acb->vir2phy_offset = (unsigned long)pccb_tmp -
4588 	    (unsigned long)ccb_phyaddr;
4589 
4590 	/* disable all outbound interrupt */
4591 	intmask_org = arcmsr_disable_allintr(acb);
4592 
4593 	if (!arcmsr_iop_confirm(acb)) {
4594 		cmn_err(CE_WARN, "arcmsr%d: arcmsr_iop_confirm error",
4595 		    ddi_get_instance(acb->dev_info));
4596 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
4597 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
4598 		return (DDI_FAILURE);
4599 	}
4600 
4601 	for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
4602 		for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
4603 			acb->devstate[id][lun] = ARECA_RAID_GONE;
4604 		}
4605 	}
4606 
4607 	/* enable outbound Post Queue, outbound doorbell Interrupt */
4608 	arcmsr_enable_allintr(acb, intmask_org);
4609 
4610 	return (0);
4611 }
4612