xref: /illumos-gate/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c (revision ffb5616e59d0fbdc1ee94070050f240a6a4ac8e2)
1 /*
2  *       O.S   : Solaris
3  *  FILE NAME  : arcmsr.c
4  *       BY    : Erich Chen
5  *  Description: SCSI RAID Device Driver for
6  *               ARECA RAID Host adapter
7  *
8  *  Copyright (C) 2002,2007 Areca Technology Corporation All rights reserved.
9  *  Copyright (C) 2002,2007 Erich Chen
10  *	    Web site: www.areca.com.tw
11  *	      E-mail: erich@areca.com.tw
12  *
13  *	Redistribution and use in source and binary forms, with or without
14  *	modification, are permitted provided that the following conditions
15  *	are met:
16  *	1. Redistributions of source code must retain the above copyright
17  *	   notice, this list of conditions and the following disclaimer.
18  *	2. Redistributions in binary form must reproduce the above copyright
19  *	   notice, this list of conditions and the following disclaimer in the
20  *	   documentation and/or other materials provided with the distribution.
21  *  3. The party using or redistributing the source code and binary forms
22  *     agrees to the disclaimer below and the terms and conditions set forth
23  *     herein.
24  *
25  *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  *  ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  *  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  *  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  *  OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  *  HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  *  OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  *  SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
40  * Use is subject to license terms.
41  */
42 
43 #include <sys/types.h>
44 #include <sys/ddidmareq.h>
45 #include <sys/scsi/scsi.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/file.h>
49 #include <sys/disp.h>
50 #include <sys/signal.h>
51 #include <sys/debug.h>
52 #include <sys/pci.h>
53 #include <sys/policy.h>
54 
55 #include "arcmsr.h"
56 
57 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
58 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
59     int mode, cred_t *credp, int *rvalp);
60 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
61 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
62 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
63 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
64 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
65 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
66 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
67     int whom);
68 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
69     dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
70     struct scsi_device *sd);
71 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
72 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
73     struct scsi_pkt *pkt);
74 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
75     struct scsi_pkt *pkt);
76 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
77     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
78     int tgtlen, int flags, int (*callback)(), caddr_t arg);
79 
80 static uint_t arcmsr_interrupt(caddr_t arg);
81 static int arcmsr_initialize(struct ACB *acb);
82 static int arcmsr_dma_alloc(struct ACB *acb,
83     struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
84 static int arcmsr_dma_move(struct ACB *acb,
85     struct scsi_pkt *pkt, struct buf *bp);
86 static void arcmsr_pcidev_disattach(struct ACB *acb);
87 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
88 static void arcmsr_iop_init(struct ACB *acb);
89 static void arcmsr_iop_parking(struct ACB *acb);
90 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
91 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
92 static void arcmsr_flush_hba_cache(struct ACB *acb);
93 static void arcmsr_flush_hbb_cache(struct ACB *acb);
94 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
95 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
96 static void arcmsr_start_hba_bgrb(struct ACB *acb);
97 static void arcmsr_start_hba_bgrb(struct ACB *acb);
98 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
99 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
100 static void arcmsr_build_ccb(struct CCB *ccb);
101 
102 
103 static struct ACB *ArcMSRHBA[ARCMSR_MAX_ADAPTER];
104 static int arcmsr_hba_count;
105 static void *arcmsr_soft_state = NULL;
106 static kmutex_t arcmsr_global_mutex;
107 
108 static ddi_dma_attr_t arcmsr_dma_attr = {
109 	DMA_ATTR_V0,		/* ddi_dma_attr version */
110 	0,			/* low DMA address range */
111 	0xffffffff,		/* high DMA address range */
112 	0x00ffffff,		/* DMA counter counter upper bound */
113 	1,			/* DMA address alignment requirements */
114 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* burst sizes */
115 	1,			/* minimum effective DMA size */
116 	ARCMSR_MAX_XFER_LEN,	/* maximum DMA xfer size */
117 	/*
118 	 * The dma_attr_seg field supplies the limit of each Scatter/Gather
119 	 * list element's "address+length". The Intel IOP331 can not use
120 	 * segments over the 4G boundary due to segment boundary restrictions
121 	 */
122 	0x00ffffff,
123 	ARCMSR_MAX_SG_ENTRIES,	/* scatter/gather list count */
124 	1, 			/* device granularity */
125 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
126 };
127 
128 static ddi_dma_attr_t arcmsr_ccb_attr = {
129 	DMA_ATTR_V0,	/* ddi_dma_attr version */
130 	0,		/* low DMA address range */
131 	0xffffffff,	/* high DMA address range */
132 	0x00ffffff,	/* DMA counter counter upper bound */
133 	1,		/* default byte alignment */
134 	DEFAULT_BURSTSIZE | BURST32 | BURST64,   /* burst sizes */
135 	1,		/* minimum effective DMA size */
136 	0xffffffff,	/* maximum DMA xfer size */
137 	0x00ffffff,	/* max segment size, segment boundary restrictions */
138 	1,		/* scatter/gather list count */
139 	1,		/* device granularity */
140 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
141 };
142 
143 static struct cb_ops arcmsr_cb_ops = {
144 	scsi_hba_open,		/* open(9E) */
145 	scsi_hba_close,		/* close(9E) */
146 	nodev,			/* strategy(9E), returns ENXIO */
147 	nodev,			/* print(9E) */
148 	nodev,			/* dump(9E) Cannot be used as a dump device */
149 	nodev,			/* read(9E) */
150 	nodev,			/* write(9E) */
151 	arcmsr_cb_ioctl,	/* ioctl(9E) */
152 	nodev,			/* devmap(9E) */
153 	nodev,			/* mmap(9E) */
154 	nodev,			/* segmap(9E) */
155 	NULL,			/* chpoll(9E) returns ENXIO */
156 	nodev,			/* prop_op(9E) */
157 	NULL,			/* streamtab(9S) */
158 #ifdef _LP64
159 	/*
160 	 * cb_ops cb_flag:
161 	 *	D_NEW | D_MP	compatibility flags, see conf.h
162 	 *	D_MP 		flag indicates that the driver is safe for
163 	 *			multi-threaded operation
164 	 *	D_64BIT		flag driver properly handles 64-bit offsets
165 	 */
166 	D_HOTPLUG | D_MP | D_64BIT,
167 #else
168 	D_HOTPLUG | D_MP,
169 #endif
170 	CB_REV,
171 	nodev,			/* aread(9E) */
172 	nodev			/* awrite(9E) */
173 };
174 
175 static struct dev_ops arcmsr_ops = {
176 	DEVO_REV,		/* devo_rev */
177 	0,			/* reference count */
178 	nodev,			/* getinfo */
179 	nulldev,		/* identify */
180 	nulldev,		/* probe */
181 	arcmsr_attach,		/* attach */
182 	arcmsr_detach,		/* detach */
183 	arcmsr_reset,		/* reset, shutdown, reboot notify */
184 	&arcmsr_cb_ops,		/* driver operations */
185 	NULL,			/* bus operations */
186 	nulldev			/* power */
187 };
188 
189 char _depends_on[] = "misc/scsi";
190 
191 static struct modldrv arcmsr_modldrv = {
192 	&mod_driverops, 	/* Type of module. This is a driver. */
193 	ARCMSR_DRIVER_VERSION,  /* module name, from arcmsr.h */
194 	&arcmsr_ops,		/* driver ops */
195 };
196 
197 static struct modlinkage arcmsr_modlinkage = {
198 	MODREV_1,
199 	&arcmsr_modldrv,
200 	NULL
201 };
202 
203 
204 int
205 _init(void) {
206 	int ret;
207 
208 
209 	mutex_init(&arcmsr_global_mutex, "arcmsr global mutex",
210 	    MUTEX_DRIVER, NULL);
211 	ret = ddi_soft_state_init(&arcmsr_soft_state,
212 	    sizeof (struct ACB), ARCMSR_MAX_ADAPTER);
213 	if (ret != 0) {
214 		return (ret);
215 	}
216 	if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
217 		ddi_soft_state_fini(&arcmsr_soft_state);
218 		return (ret);
219 	}
220 
221 	if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
222 		mutex_destroy(&arcmsr_global_mutex);
223 		scsi_hba_fini(&arcmsr_modlinkage);
224 		if (arcmsr_soft_state != NULL) {
225 			ddi_soft_state_fini(&arcmsr_soft_state);
226 		}
227 	}
228 	return (ret);
229 }
230 
231 
232 int
233 _fini(void) {
234 	int ret;
235 
236 	ret = mod_remove(&arcmsr_modlinkage);
237 	if (ret == 0) {
238 		/* if ret = 0 , said driver can remove */
239 		mutex_destroy(&arcmsr_global_mutex);
240 		scsi_hba_fini(&arcmsr_modlinkage);
241 		if (arcmsr_soft_state != NULL) {
242 			ddi_soft_state_fini(&arcmsr_soft_state);
243 		}
244 	}
245 	return (ret);
246 }
247 
248 
249 int
250 _info(struct modinfo *modinfop) {
251 	return (mod_info(&arcmsr_modlinkage, modinfop));
252 }
253 
254 
255 
256 #if defined(ARCMSR_DEBUG)
257 static void
258 arcmsr_dump_scsi_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) {
259 
260 	static char hex[] = "0123456789abcdef";
261 	struct ACB *acb =
262 	    (struct ACB *)ap->a_hba_tran->tran_hba_private;
263 	struct CCB *ccb =
264 	    (struct CCB *)pkt->pkt_ha_private;
265 	uint8_t	*cdb = pkt->pkt_cdbp;
266 	char buf [256];
267 	char *p;
268 	int i;
269 
270 
271 	(void) sprintf(buf, "arcmsr%d: sgcount=%d <%d, %d> "
272 	    "cdb ",
273 	    ddi_get_instance(acb->dev_info), ccb->arcmsr_cdb.sgcount,
274 	    ap->a_target, ap->a_lun);
275 
276 	p = buf + strlen(buf);
277 	*p++ = '[';
278 
279 	for (i = 0; i < ccb->arcmsr_cdb.CdbLength; i++, cdb++) {
280 		if (i != 0) {
281 			*p++ = ' ';
282 		}
283 		*p++ = hex[(*cdb >> 4) & 0x0f];
284 		*p++ = hex[*cdb & 0x0f];
285 	}
286 	*p++ = ']';
287 	*p++ = '.';
288 	*p = 0;
289 	cmn_err(CE_CONT, buf);
290 }
291 #endif  /* ARCMSR_DEBUG */
292 
293 static void
294 arcmsr_ccbs_timeout(void* arg) {
295 
296 	struct ACB *acb = (struct ACB *)arg;
297 	struct CCB *ccb;
298 	int i;
299 	int current_time = ddi_get_time();
300 
301 
302 	if (acb->ccboutstandingcount != 0) {
303 		/* check each ccb */
304 		i = ddi_dma_sync(acb->ccbs_pool_handle, 0,
305 		    acb->dma_sync_size, DDI_DMA_SYNC_FORKERNEL);
306 		if (i != DDI_SUCCESS) {
307 			if ((acb->timeout_id != 0) &&
308 			    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
309 				/* do pkt timeout check each 60 secs */
310 				acb->timeout_id = timeout(arcmsr_ccbs_timeout,
311 				    (void*)acb,
312 				    (60 * drv_usectohz(1000000)));
313 			}
314 			return;
315 		}
316 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
317 			ccb = acb->pccb_pool[i];
318 			if (ccb->acb != acb) {
319 				break;
320 			}
321 			if (ccb->startdone == ARCMSR_CCB_DONE) {
322 				continue;
323 			}
324 			if (ccb->pkt == NULL) {
325 				continue;
326 			}
327 			if (ccb->pkt->pkt_time == 0) {
328 				continue;
329 			}
330 			if ((int)ccb->ccb_time >= current_time) {
331 				continue;
332 			}
333 			if (ccb->startdone == ARCMSR_CCB_START) {
334 				int id = ccb->pkt->pkt_address.a_target;
335 				int lun = ccb->pkt->pkt_address.a_lun;
336 
337 				/*
338 				 * handle outstanding command of timeout ccb
339 				 */
340 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
341 				ccb->pkt->pkt_statistics = STAT_TIMEOUT;
342 
343 				cmn_err(CE_CONT,
344 				    "arcmsr%d: scsi target %d lun %d "
345 				    "outstanding command timeout",
346 				    ddi_get_instance(acb->dev_info),
347 				    id, lun);
348 				cmn_err(CE_CONT,
349 				    "arcmsr%d: scsi target %d lun %d "
350 				    "fatal error on target, device is gone",
351 				    ddi_get_instance(acb->dev_info),
352 				    id, lun);
353 				acb->devstate[id][lun] = ARECA_RAID_GONE;
354 				arcmsr_ccb_complete(ccb, 1);
355 				continue;
356 			}
357 			ccb->ccb_time = (time_t)(ccb->pkt->pkt_time +
358 			    current_time); /* adjust ccb_time of pending ccb */
359 		}
360 	}
361 	if ((acb->timeout_id != 0) &&
362 	    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
363 		/* do pkt timeout check each 60 secs */
364 		acb->timeout_id = timeout(arcmsr_ccbs_timeout,
365 		    (void*)acb, (60 * drv_usectohz(1000000)));
366 	}
367 }
368 
369 
370 static uint32_t
371 arcmsr_disable_allintr(struct ACB *acb) {
372 
373 	uint32_t intmask_org;
374 
375 	switch (acb->adapter_type) {
376 	case ACB_ADAPTER_TYPE_A: {
377 		struct HBA_msgUnit *phbamu =
378 		    (struct HBA_msgUnit *)acb->pmu;
379 
380 		/* disable all outbound interrupt */
381 		/* disable outbound message0 int */
382 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
383 		    &phbamu->outbound_intmask) |
384 		    ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
385 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
386 		    &phbamu->outbound_intmask,
387 		    intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
388 		}
389 		break;
390 	case ACB_ADAPTER_TYPE_B: {
391 		struct HBB_msgUnit *phbbmu =
392 		    (struct HBB_msgUnit *)acb->pmu;
393 
394 		/* disable all outbound interrupt */
395 		/* disable outbound message0 int */
396 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
397 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask) &
398 		    (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
399 		/* disable all interrupts */
400 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
401 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
402 		}
403 		break;
404 	}
405 	return (intmask_org);
406 }
407 
408 
409 static void
410 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org) {
411 
412 	int mask;
413 
414 	switch (acb->adapter_type) {
415 	case ACB_ADAPTER_TYPE_A: {
416 		struct HBA_msgUnit *phbamu =
417 		    (struct HBA_msgUnit *)acb->pmu;
418 
419 		/* enable outbound Post Queue, outbound doorbell Interrupt */
420 		mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
421 		    ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
422 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
423 		    &phbamu->outbound_intmask, intmask_org & mask);
424 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
425 		}
426 		break;
427 	case ACB_ADAPTER_TYPE_B: {
428 		struct HBB_msgUnit *phbbmu =
429 		    (struct HBB_msgUnit *)acb->pmu;
430 
431 		/* disable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
432 		mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
433 		    ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
434 		/* 1=interrupt enable, 0=interrupt disable */
435 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
436 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
437 		    intmask_org | mask);
438 		acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
439 		}
440 		break;
441 	}
442 }
443 
444 
445 static void
446 arcmsr_iop_parking(struct ACB *acb) {
447 
448 	if (acb != NULL) {
449 		/* stop adapter background rebuild */
450 		if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
451 			uint32_t intmask_org;
452 
453 			acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
454 			/* disable all outbound interrupt */
455 			intmask_org = arcmsr_disable_allintr(acb);
456 			if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
457 				arcmsr_stop_hba_bgrb(acb);
458 				arcmsr_flush_hba_cache(acb);
459 			} else {
460 				arcmsr_stop_hbb_bgrb(acb);
461 				arcmsr_flush_hbb_cache(acb);
462 			}
463 			/*
464 			 * enable outbound Post Queue
465 			 * enable outbound doorbell Interrupt
466 			 */
467 			arcmsr_enable_allintr(acb, intmask_org);
468 		}
469 	}
470 }
471 
472 
473 
474 static int
475 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd) {
476 
477 	struct ACB *acb;
478 	scsi_hba_tran_t *scsi_hba_transport;
479 
480 	scsi_hba_transport = (scsi_hba_tran_t *)
481 	    ddi_get_driver_private(resetdev);
482 
483 	if (!scsi_hba_transport)
484 		return (DDI_FAILURE);
485 
486 	acb = (struct ACB *)
487 	    scsi_hba_transport->tran_hba_private;
488 
489 	if (!acb)
490 		return (DDI_FAILURE);
491 
492 	if ((cmd == RESET_LUN) ||
493 	    (cmd == RESET_BUS) ||
494 	    (cmd == RESET_TARGET))
495 		arcmsr_log(NULL, CE_WARN,
496 		    "arcmsr%d: reset op (%d) not supported",
497 		    ddi_get_instance(resetdev), cmd);
498 
499 	arcmsr_pcidev_disattach(acb);
500 
501 	return (DDI_SUCCESS);
502 }
503 
504 static int
505 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance) {
506 
507 	scsi_hba_tran_t *hba_trans;
508 	ddi_device_acc_attr_t dev_acc_attr;
509 	struct ACB *acb;
510 	static char buf[256];
511 	uint16_t wval;
512 	int raid6 = 1;
513 	char *type;
514 
515 	/*
516 	 * Soft State Structure
517 	 * The driver should allocate the per-device-instance
518 	 * soft state structure, being careful to clean up properly if
519 	 * an error occurs. Allocate data structure.
520 	 */
521 	if (ddi_soft_state_zalloc(arcmsr_soft_state, instance)
522 	    != DDI_SUCCESS) {
523 		arcmsr_log(NULL, CE_WARN,
524 		    "arcmsr%d: ddi_soft_state_zalloc failed",
525 		    instance);
526 		return (DDI_FAILURE);
527 	}
528 
529 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
530 	if (acb == NULL) {
531 		arcmsr_log(NULL, CE_WARN,
532 		    "arcmsr%d: ddi_get_soft_state failed",
533 		    instance);
534 		goto error_level_1;
535 	}
536 
537 	/* acb is already zalloc()d so we don't need to bzero() it */
538 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
539 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
540 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
541 
542 	acb->dev_info = dev_info;
543 	acb->dev_acc_attr = dev_acc_attr;
544 
545 	/*
546 	 * The driver, if providing DMA, should also check that its hardware is
547 	 * installed in a DMA-capable slot
548 	 */
549 	if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
550 		arcmsr_log(NULL, CE_WARN,
551 		    "arcmsr%d: hardware is not installed in a "
552 		    "DMA-capable slot",
553 		    instance);
554 		goto error_level_0;
555 	}
556 	/* We do not support adapter drivers with high-level interrupts */
557 	if (ddi_intr_hilevel(dev_info, 0) != 0) {
558 		arcmsr_log(NULL, CE_WARN,
559 		    "arcmsr%d: high-level interrupt not supported",
560 		    instance);
561 		goto error_level_0;
562 	}
563 
564 
565 
566 
567 	if (pci_config_setup(dev_info, &acb->pci_acc_handle)
568 	    != DDI_SUCCESS) {
569 		arcmsr_log(NULL, CE_NOTE,
570 		    "arcmsr%d: pci_config_setup() failed, attach failed",
571 		    instance);
572 		return (DDI_PROBE_FAILURE);
573 	}
574 
575 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
576 	if (wval != PCI_VENDOR_ID_ARECA) {
577 		arcmsr_log(NULL, CE_NOTE,
578 		    "arcmsr%d: failing attach: 'vendorid (0x%04x) "
579 		    "does not match 0x%04x (PCI_VENDOR_ID_ARECA)\n",
580 		    instance, wval, PCI_VENDOR_ID_ARECA);
581 		return (DDI_PROBE_FAILURE);
582 	}
583 
584 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
585 	switch (wval) {
586 	case PCI_DEVICE_ID_ARECA_1110:
587 	case PCI_DEVICE_ID_ARECA_1210:
588 	case PCI_DEVICE_ID_ARECA_1201:
589 		raid6 = 0;
590 		/*FALLTHRU*/
591 	case PCI_DEVICE_ID_ARECA_1120:
592 	case PCI_DEVICE_ID_ARECA_1130:
593 	case PCI_DEVICE_ID_ARECA_1160:
594 	case PCI_DEVICE_ID_ARECA_1170:
595 	case PCI_DEVICE_ID_ARECA_1220:
596 	case PCI_DEVICE_ID_ARECA_1230:
597 	case PCI_DEVICE_ID_ARECA_1260:
598 	case PCI_DEVICE_ID_ARECA_1270:
599 	case PCI_DEVICE_ID_ARECA_1280:
600 		type = "SATA";
601 		break;
602 	case PCI_DEVICE_ID_ARECA_1380:
603 	case PCI_DEVICE_ID_ARECA_1381:
604 	case PCI_DEVICE_ID_ARECA_1680:
605 	case PCI_DEVICE_ID_ARECA_1681:
606 		type = "SAS";
607 		break;
608 	default:
609 		type = "X-TYPE";
610 		break;
611 	}
612 
613 	(void) sprintf(buf, "Areca %s Host Adapter RAID Controller%s",
614 	    type, raid6 ? " (RAID6 capable)" : "");
615 	cmn_err(CE_CONT, "arcmsr%d:%s ", instance, buf);
616 	cmn_err(CE_CONT, "arcmsr%d:%s ", instance, ARCMSR_DRIVER_VERSION);
617 
618 
619 	/* we disable iop interrupt here */
620 	if (arcmsr_initialize(acb) == DDI_FAILURE) {
621 		arcmsr_log(NULL, CE_WARN, "arcmsr%d: arcmsr_initialize "
622 		    "failed", instance);
623 		goto error_level_1;
624 	}
625 
626 	/*
627 	 * The driver must first obtain the iblock cookie to initialize
628 	 * mutexes used in the driver handler. Only after those mutexes
629 	 * have been initialized can the interrupt handler be added.
630 	 */
631 	if (ddi_get_iblock_cookie(dev_info, 0, &acb->iblock_cookie)
632 	    != DDI_SUCCESS) {
633 		arcmsr_log(NULL, CE_WARN, "arcmsr%d: "
634 		    "ddi_get_iblock_cookie failed", instance);
635 		goto error_level_2;
636 	}
637 	mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER,
638 	    (void *)acb->iblock_cookie);
639 	mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER,
640 	    (void *)acb->iblock_cookie);
641 	mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER,
642 	    (void *)acb->iblock_cookie);
643 	mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER,
644 	    (void *)acb->iblock_cookie);
645 
646 	/* Allocate a transport structure */
647 	hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
648 	if (hba_trans == NULL) {
649 		arcmsr_log(NULL, CE_WARN,
650 		    "arcmsr%d: scsi_hba_tran_alloc failed",
651 		    instance);
652 		goto error_level_3;
653 	}
654 	acb->scsi_hba_transport = hba_trans;
655 	acb->dev_info = dev_info;
656 	/* init scsi host adapter transport entry */
657 	hba_trans->tran_hba_private  = acb;
658 	hba_trans->tran_tgt_private  = NULL;
659 	/*
660 	 * If no per-target initialization is required, the HBA can leave
661 	 * tran_tgt_init set to NULL.
662 	 */
663 	hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
664 	hba_trans->tran_tgt_probe = scsi_hba_probe;
665 	hba_trans->tran_tgt_free = NULL;
666 	hba_trans->tran_start = arcmsr_tran_start;
667 	hba_trans->tran_abort = arcmsr_tran_abort;
668 	hba_trans->tran_reset = arcmsr_tran_reset;
669 	hba_trans->tran_getcap = arcmsr_tran_getcap;
670 	hba_trans->tran_setcap = arcmsr_tran_setcap;
671 	hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
672 	hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
673 	hba_trans->tran_dmafree = arcmsr_tran_dmafree;
674 	hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
675 
676 	hba_trans->tran_reset_notify = NULL;
677 	hba_trans->tran_get_bus_addr = NULL;
678 	hba_trans->tran_get_name = NULL;
679 	hba_trans->tran_quiesce = NULL;
680 	hba_trans->tran_unquiesce = NULL;
681 	hba_trans->tran_bus_reset = NULL;
682 	hba_trans->tran_add_eventcall = NULL;
683 	hba_trans->tran_get_eventcookie = NULL;
684 	hba_trans->tran_post_event = NULL;
685 	hba_trans->tran_remove_eventcall = NULL;
686 
687 
688 	/* Adding an Interrupt Handler */
689 	if (ddi_add_intr(dev_info, 0, &acb->iblock_cookie, 0,
690 	    arcmsr_interrupt, (caddr_t)acb) != DDI_SUCCESS) {
691 		arcmsr_log(NULL, CE_WARN,
692 		    "arcmsr%d: failed to add interrupt handler",
693 		    instance);
694 		goto error_level_4;
695 	}
696 	/*
697 	 * The driver should attach this instance of the device, and
698 	 * perform error cleanup if necessary
699 	 */
700 	if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
701 	    hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
702 		arcmsr_log(NULL, CE_WARN,
703 		    "arcmsr%d: scsi_hba_attach_setup failed",
704 		    instance);
705 		goto error_level_5;
706 	}
707 
708 	/* iop init and enable interrupt here */
709 	mutex_enter(&arcmsr_global_mutex);
710 	arcmsr_iop_init(acb);
711 	mutex_exit(&arcmsr_global_mutex);
712 
713 	/* Initialize power management bookkeeping. */
714 	if (pm_create_components(dev_info, 1) == DDI_SUCCESS) {
715 		if (pm_idle_component(dev_info, 0) == DDI_FAILURE) {
716 			arcmsr_log(NULL, CE_WARN,
717 			    "arcmsr%d: pm_idle_component fail",
718 			    instance);
719 			goto error_level_8;
720 		}
721 		pm_set_normal_power(dev_info, 0, 1);
722 		/* acb->power_level = 1; */
723 	} else {
724 		arcmsr_log(NULL, CE_WARN,
725 		    "arcmsr%d: pm_create_components fail",
726 		    instance);
727 		goto error_level_7;
728 	}
729 
730 	/*
731 	 * Since this driver manages devices with "remote" hardware, "
732 	 * i.e. the devices themselves have no "reg" property, the SUSPEND/
733 	 * RESUME commands in detach/attach will not be called by the power
734 	 * management framework unless we request it by creating a
735 	 * "pm-hardware-state" property and setting it to value
736 	 * "needs-suspend-resume".
737 	 */
738 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dev_info,
739 	    "pm-hardware-state", "needs-suspend-resume")
740 	    != DDI_PROP_SUCCESS) {
741 		arcmsr_log(NULL, CE_WARN,
742 		    "arcmsr%d: ddi_prop_update(\"pm-hardware-state\")failed",
743 		    instance);
744 		goto error_level_8;
745 	}
746 
747 	/* active ccbs "timeout" watchdog */
748 	acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
749 	    (60 * drv_usectohz(1000000)));
750 	/* report device info */
751 	ddi_report_dev(dev_info);
752 	ArcMSRHBA[arcmsr_hba_count] = acb;
753 	arcmsr_hba_count++;
754 
755 	return (DDI_SUCCESS);
756 
757 error_level_8:
758 	pm_destroy_components(dev_info);
759 
760 error_level_7:
761 	/* Remove any previously allocated minor nodes */
762 	ddi_remove_minor_node(dev_info, NULL);
763 
764 error_level_6:
765 	scsi_hba_tran_free(hba_trans);
766 
767 error_level_5:
768 	ddi_remove_intr(dev_info, 0, (void *)acb->iblock_cookie);
769 
770 error_level_4:
771 	scsi_hba_tran_free(hba_trans);
772 
773 error_level_3:
774 	mutex_destroy(&acb->acb_mutex);
775 	mutex_destroy(&acb->postq_mutex);
776 	mutex_destroy(&acb->workingQ_mutex);
777 	mutex_destroy(&acb->ioctl_mutex);
778 
779 error_level_2:
780 	ddi_dma_mem_free(&acb->ccbs_acc_handle);
781 	ddi_dma_free_handle(&acb->ccbs_pool_handle);
782 
783 error_level_1:
784 	ddi_soft_state_free(arcmsr_soft_state, instance);
785 
786 error_level_0:
787 	return (DDI_FAILURE);
788 }
789 
790 
791 
792 /*
793  *      Function: arcmsr_attach(9E)
794  *   Description: Set up all device state and allocate data structures,
795  *		  mutexes, condition variables, etc. for device operation.
796  *		  Set mt_attr property for driver to indicate MT-safety.
797  *		  Add interrupts needed.
798  *         Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
799  *        Output: Return DDI_SUCCESS if device is ready,
800  *		          else return DDI_FAILURE
801  */
802 static int
803 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd) {
804 
805 	scsi_hba_tran_t *hba_trans;
806 	struct ACB *acb;
807 
808 
809 #if defined(ARCMSR_DEBUG)
810 	arcmsr_log(NULL, CE_NOTE,
811 	    "arcmsr_attach called for device %lx (instance %d)",
812 	    &dev_info, ddi_get_instance(dev_info));
813 #endif
814 	switch (cmd) {
815 	case DDI_ATTACH:
816 		return (arcmsr_do_ddi_attach(dev_info,
817 		    ddi_get_instance(dev_info)));
818 	case DDI_RESUME:
819 	case DDI_PM_RESUME:
820 	/*
821 	 * There is no hardware state to restart and no timeouts to
822 	 * restart since we didn't PM_SUSPEND with active cmds or
823 	 * active timeouts We just need to unblock waiting threads
824 	 * and restart I/O the code for DDI_RESUME is almost identical
825 	 * except it uses the suspend flag rather than pm_suspend flag
826 	 */
827 	    hba_trans = (scsi_hba_tran_t *)ddi_get_driver_private(dev_info);
828 	    if (!hba_trans) {
829 		    return (DDI_FAILURE);
830 	    }
831 	    acb = (struct ACB *)
832 		hba_trans->tran_hba_private;
833 	    mutex_enter(&acb->acb_mutex);
834 	    arcmsr_iop_init(acb);
835 
836 	    /* restart ccbs "timeout" watchdog */
837 	    acb->timeout_id = timeout(arcmsr_ccbs_timeout,
838 		(void*)acb, (60 * drv_usectohz(1000000)));
839 	    mutex_exit(&acb->acb_mutex);
840 	    return (DDI_SUCCESS);
841 
842     default:
843 	    arcmsr_log(NULL, CE_WARN,
844 		"arcmsr%d: ddi attach cmd (%d) unsupported",
845 		cmd, ddi_get_instance(dev_info));
846 	    return (DDI_FAILURE);
847 	}
848 }
849 
850 /*
851  *    Function:	arcmsr_detach(9E)
852  * Description: Remove all device allocation and system resources, disable
853  *		        device interrupt.
854  *       Input: dev_info_t *dev_info
855  *		        ddi_detach_cmd_t cmd
856  *      Output:	Return DDI_SUCCESS if done,
857  *		        else returnDDI_FAILURE
858  */
859 static int
860 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
861 
862 	int instance;
863 	struct ACB *acb;
864 
865 
866 	instance = ddi_get_instance(dev_info);
867 	acb = (struct ACB *)ddi_get_soft_state(arcmsr_soft_state,
868 	    instance);
869 	if (!acb) {
870 		return (DDI_FAILURE);
871 	}
872 
873 	switch (cmd) {
874 	case DDI_DETACH:
875 		mutex_enter(&acb->acb_mutex);
876 		if (acb->timeout_id != 0) {
877 			mutex_exit(&acb->acb_mutex);
878 			(void) untimeout(acb->timeout_id);
879 			mutex_enter(&acb->acb_mutex);
880 			acb->timeout_id = 0;
881 		}
882 		arcmsr_pcidev_disattach(acb);
883 		/* Remove interrupt set up by ddi_add_intr */
884 		ddi_remove_intr(dev_info, 0, acb->iblock_cookie);
885 		/* unbind mapping object to handle */
886 		(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
887 		/* Free ccb pool memory */
888 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
889 		/* Free DMA handle */
890 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
891 		ddi_regs_map_free(&acb->reg_mu_acc_handle0);
892 		if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
893 			arcmsr_log(NULL, CE_WARN,
894 			    "arcmsr%d: Unable to detach instance cleanly "
895 			    "(should not happen)",
896 			    ddi_get_instance(dev_info));
897 		/* free scsi_hba_transport from scsi_hba_tran_alloc */
898 		scsi_hba_tran_free(acb->scsi_hba_transport);
899 		ddi_remove_minor_node(dev_info, NULL);
900 		ddi_prop_remove_all(dev_info);
901 		mutex_exit(&acb->acb_mutex);
902 		mutex_destroy(&acb->acb_mutex);
903 		mutex_destroy(&acb->postq_mutex);
904 		mutex_destroy(&acb->workingQ_mutex);
905 		mutex_destroy(&acb->ioctl_mutex);
906 		pci_config_teardown(&acb->pci_acc_handle);
907 		ddi_set_driver_private(dev_info, NULL);
908 		ddi_soft_state_free(arcmsr_soft_state, instance);
909 		pm_destroy_components(dev_info);
910 		return (DDI_SUCCESS);
911 	case DDI_SUSPEND:
912 	case DDI_PM_SUSPEND:
913 		mutex_enter(&acb->acb_mutex);
914 		if (acb->timeout_id != 0) {
915 			acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
916 			mutex_exit(&acb->acb_mutex);
917 			(void) untimeout(acb->timeout_id);
918 			mutex_enter(&acb->acb_mutex);
919 			acb->timeout_id = 0;
920 		}
921 		/* disable all outbound interrupt */
922 		(void) arcmsr_disable_allintr(acb);
923 		/* stop adapter background rebuild */
924 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
925 			arcmsr_stop_hba_bgrb(acb);
926 			arcmsr_flush_hba_cache(acb);
927 		} else {
928 			arcmsr_stop_hbb_bgrb(acb);
929 			arcmsr_flush_hbb_cache(acb);
930 		}
931 		mutex_exit(&acb->acb_mutex);
932 		return (DDI_SUCCESS);
933 	default:
934 		return (DDI_FAILURE);
935 	}
936 }
937 
938 
939 
940 /*
941  *    Function:	arcmsr_tran_tgt_init
942  * Description: Called when initializing a target device instance. If
943  *		        no per-target initialization is required, the HBA
944  *		        may leave tran_tgt_init to NULL
945  *       Input:
946  *		        dev_info_t *host_dev_info,
947  *		        dev_info_t *target_dev_info,
948  *		        scsi_hba_tran_t *tran,
949  *		        struct scsi_device *sd
950  *
951  *      Return: DDI_SUCCESS if success, else return DDI_FAILURE
952  *
953  *  entry point enables the HBA to allocate and/or initialize any per-
954  *  target resources.
955  *  It also enables the HBA to qualify the device's address as valid and
956  *  supportable for that particular HBA.
957  *  By returning DDI_FAILURE, the instance of the target driver for that
958  *  device will not be probed or attached.
959  * 	This entry point is not required, and if none is supplied,
960  *  the framework will attempt to probe and attach all possible instances
961  *  of the appropriate target drivers.
962  */
963 static int
964 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
965     scsi_hba_tran_t *hosttran, struct scsi_device *sd) {
966 #ifndef __lock_lint
967 	_NOTE(ARGUNUSED(hosttran, target_dev_info))
968 #endif
969 
970 
971 	uint16_t  target;
972 	uint8_t  lun;
973 
974 	target = sd->sd_address.a_target;
975 	lun = sd->sd_address.a_lun;
976 	if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
977 		cmn_err(CE_WARN,
978 		    "arcmsr%d: (target %d, lun %d) exceeds "
979 		    "maximum supported values (%d, %d)",
980 		    ddi_get_instance(host_dev_info),
981 		    target, lun, ARCMSR_MAX_TARGETID, ARCMSR_MAX_TARGETLUN);
982 		return (DDI_FAILURE);
983 	}
984 	return (DDI_SUCCESS);
985 }
986 
987 /*
988  *         Function: arcmsr_tran_getcap(9E)
989  *      Description: Get the capability named, and returnits value.
990  *    Return Values: current value of capability, ifdefined
991  *		             -1 ifcapability is not defined
992  * ------------------------------------------------------
993  *         Common Capability Strings Array
994  * ------------------------------------------------------
995  *	#define	SCSI_CAP_DMA_MAX		0
996  *	#define	SCSI_CAP_MSG_OUT		1
997  *	#define	SCSI_CAP_DISCONNECT		2
998  *	#define	SCSI_CAP_SYNCHRONOUS		3
999  *	#define	SCSI_CAP_WIDE_XFER		4
1000  *	#define	SCSI_CAP_PARITY			5
1001  *	#define	SCSI_CAP_INITIATOR_ID		6
1002  *	#define	SCSI_CAP_UNTAGGED_QING		7
1003  *	#define	SCSI_CAP_TAGGED_QING		8
1004  *	#define	SCSI_CAP_ARQ			9
1005  *	#define	SCSI_CAP_LINKED_CMDS		10 a
1006  *	#define	SCSI_CAP_SECTOR_SIZE		11 b
1007  *	#define	SCSI_CAP_TOTAL_SECTORS		12 c
1008  *	#define	SCSI_CAP_GEOMETRY		13 d
1009  *	#define	SCSI_CAP_RESET_NOTIFICATION	14 e
1010  *	#define	SCSI_CAP_QFULL_RETRIES		15 f
1011  *	#define	SCSI_CAP_QFULL_RETRY_INTERVAL	16 10
1012  *	#define	SCSI_CAP_SCSI_VERSION		17 11
1013  *	#define	SCSI_CAP_INTERCONNECT_TYPE	18 12
1014  *	#define	SCSI_CAP_LUN_RESET		19 13
1015  */
1016 static int
1017 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom) {
1018 
1019 	int capability = 0;
1020 	struct ACB *acb =
1021 	    (struct ACB *)ap->a_hba_tran->tran_hba_private;
1022 
1023 
1024 	if (cap == NULL || whom == 0) {
1025 		return (DDI_FAILURE);
1026 	}
1027 
1028 	mutex_enter(&arcmsr_global_mutex);
1029 	switch (scsi_hba_lookup_capstr(cap)) {
1030 	case SCSI_CAP_MSG_OUT:
1031 	case SCSI_CAP_DISCONNECT:
1032 	case SCSI_CAP_SYNCHRONOUS:
1033 	case SCSI_CAP_WIDE_XFER:
1034 	case SCSI_CAP_TAGGED_QING:
1035 	case SCSI_CAP_UNTAGGED_QING:
1036 	case SCSI_CAP_PARITY:
1037 	case SCSI_CAP_ARQ:
1038 		capability = acb->tgt_scsi_opts[ap->a_target];
1039 		break;
1040 	case SCSI_CAP_SECTOR_SIZE:
1041 		capability = ARCMSR_DEV_SECTOR_SIZE;
1042 		break;
1043 	case SCSI_CAP_DMA_MAX:
1044 		/* Limit to 16MB max transfer */
1045 		capability = ARCMSR_MAX_XFER_LEN;
1046 		break;
1047 	case SCSI_CAP_INITIATOR_ID:
1048 		capability = ARCMSR_SCSI_INITIATOR_ID;
1049 		break;
1050 	case SCSI_CAP_GEOMETRY:
1051 		/* head , track , cylinder */
1052 		capability = (255 << 16) | 63;
1053 		break;
1054 	default:
1055 		capability = -1;
1056 		break;
1057 	}
1058 	mutex_exit(&arcmsr_global_mutex);
1059 	return (capability);
1060 }
1061 
1062 /*
1063  *      Function: arcmsr_tran_setcap(9E)
1064  *   Description: Set the specific capability.
1065  * Return Values: 1 - capability exists and can be set to new value
1066  *		          0 - capability could not be set to new value
1067  *		         -1 - no such capability
1068  */
1069 static int
1070 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1071     int whom) {
1072 #ifndef __lock_lint
1073 	_NOTE(ARGUNUSED(value))
1074 #endif
1075 
1076 
1077 	int supported = 0;
1078 	struct ACB *acb =
1079 	    (struct ACB *)ap->a_hba_tran->tran_hba_private;
1080 
1081 
1082 	if (cap == NULL || whom == 0) {
1083 		return (-1);
1084 	}
1085 
1086 	mutex_enter(&arcmsr_global_mutex);
1087 	switch (supported = scsi_hba_lookup_capstr(cap)) {
1088 	case SCSI_CAP_DISCONNECT:		/* 2 */
1089 	case SCSI_CAP_SYNCHRONOUS:		/* 3 */
1090 	case SCSI_CAP_TAGGED_QING:		/* 8 */
1091 	case SCSI_CAP_WIDE_XFER:		/* 4 */
1092 	case SCSI_CAP_ARQ:			/* 9 auto request sense */
1093 	case SCSI_CAP_TOTAL_SECTORS:		/* c */
1094 		acb->tgt_scsi_opts[ap->a_target] |= supported;
1095 		supported = 1;
1096 		break;
1097 	case SCSI_CAP_UNTAGGED_QING:   		/* 7 */
1098 	case SCSI_CAP_INITIATOR_ID:		/* 6 */
1099 	case SCSI_CAP_DMA_MAX:			/* 0 */
1100 	case SCSI_CAP_MSG_OUT:			/* 1 */
1101 	case SCSI_CAP_PARITY:			/* 5 */
1102 	case SCSI_CAP_LINKED_CMDS:		/* a */
1103 	case SCSI_CAP_RESET_NOTIFICATION:	/* e */
1104 	case SCSI_CAP_SECTOR_SIZE:		/* b */
1105 		supported = 0;
1106 		break;
1107 	default:
1108 		supported = -1;
1109 		break;
1110 	}
1111 	mutex_exit(&arcmsr_global_mutex);
1112 	return (supported);
1113 }
1114 
1115 
1116 
1117 static void
1118 arcmsr_free_ccb(struct CCB *ccb) {
1119 
1120 	struct ACB *acb = ccb->acb;
1121 
1122 	ccb->startdone = ARCMSR_CCB_DONE;
1123 	ccb->pkt = NULL;
1124 	ccb->ccb_flags = 0;
1125 	mutex_enter(&acb->workingQ_mutex);
1126 	acb->ccbworkingQ[acb->workingccb_doneindex] = ccb;
1127 	acb->workingccb_doneindex++;
1128 	acb->workingccb_doneindex %= ARCMSR_MAX_FREECCB_NUM;
1129 	mutex_exit(&acb->workingQ_mutex);
1130 }
1131 
1132 /*
1133  *      Function: arcmsr_tran_init_pkt
1134  * Return Values: pointer to scsi_pkt, or NULL
1135  *   Description: simultaneously allocate both a scsi_pkt(9S) structure and
1136  *                DMA resources for that pkt.
1137  *                Called by kernel on behalf of a target driver
1138  *		          calling scsi_init_pkt(9F).
1139  *		          Refer to tran_init_pkt(9E) man page
1140  *       Context: Can be called from different kernel process threads.
1141  *		          Can be called by interrupt thread.
1142  * Allocates SCSI packet and DMA resources
1143  */
1144 static struct
1145 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
1146     register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1147     int tgtlen, int flags, int (*callback)(), caddr_t arg) {
1148 
1149 	struct CCB *ccb;
1150 	struct ARCMSR_CDB *arcmsr_cdb;
1151 	struct ACB *acb;
1152 	int old_pkt_flag = 1;
1153 
1154 
1155 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1156 
1157 	if (pkt == NULL) {
1158 		/* get free CCB */
1159 		ccb = arcmsr_get_freeccb(acb);
1160 		if (ccb == (struct CCB *)NULL) {
1161 			return (NULL);
1162 		}
1163 
1164 		if (ccb->pkt != NULL) {
1165 			/*
1166 			 * If kmem_flags are turned on, expect to
1167 			 * see a message
1168 			 */
1169 			cmn_err(CE_WARN, "arcmsr%d: invalid pkt",
1170 			    ddi_get_instance(acb->dev_info));
1171 			return (NULL);
1172 		}
1173 		pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
1174 		    statuslen, tgtlen, sizeof (struct scsi_pkt),
1175 		    callback, arg);
1176 		if (pkt == NULL) {
1177 			cmn_err(CE_WARN,
1178 			    "arcmsr%d: scsi pkt allocation failed",
1179 			    ddi_get_instance(acb->dev_info));
1180 			arcmsr_free_ccb(ccb);
1181 			return (NULL);
1182 		}
1183 		/* Initialize CCB */
1184 		ccb->pkt = pkt;
1185 		ccb->pkt_dma_handle = NULL;
1186 		/* record how many sg are needed to xfer on this pkt */
1187 		ccb->pkt_ncookies = 0;
1188 		/* record how many sg we got from this window */
1189 		ccb->pkt_cookie = 0;
1190 		/* record how many windows have partial dma map set */
1191 		ccb->pkt_nwin = 0;
1192 		/* record current sg window position */
1193 		ccb->pkt_curwin	= 0;
1194 		ccb->pkt_dma_len = 0;
1195 		ccb->pkt_dma_offset = 0;
1196 		ccb->resid_dmacookie.dmac_size = 0;
1197 
1198 		/*
1199 		 * we will still use this point for we want to fake some
1200 		 * information in tran_start
1201 		 */
1202 		ccb->bp = bp;
1203 
1204 		/* Initialize arcmsr_cdb */
1205 		arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1206 		bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
1207 		arcmsr_cdb->Bus = 0;
1208 		arcmsr_cdb->Function = 1;
1209 		arcmsr_cdb->LUN = ap->a_lun;
1210 		arcmsr_cdb->TargetID = ap->a_target;
1211 		arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1212 		arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
1213 
1214 		/* Fill in the rest of the structure */
1215 		pkt->pkt_ha_private = ccb;
1216 		pkt->pkt_address = *ap;
1217 		pkt->pkt_comp = (void (*)())NULL;
1218 		pkt->pkt_flags = 0;
1219 		pkt->pkt_time = 0;
1220 		pkt->pkt_resid = 0;
1221 		pkt->pkt_statistics = 0;
1222 		pkt->pkt_reason = 0;
1223 		old_pkt_flag = 0;
1224 	} else {
1225 		ccb = (struct CCB *)pkt->pkt_ha_private;
1226 		/*
1227 		 * you cannot update CdbLength with cmdlen here, it would
1228 		 * cause a data compare error
1229 		 */
1230 		ccb->startdone = ARCMSR_CCB_UNBUILD;
1231 	}
1232 
1233 	/* Second step : dma allocation/move */
1234 	if (bp && bp->b_bcount != 0) {
1235 		/*
1236 		 * system had a lot of data trunk need to xfer, from...20 byte
1237 		 * to 819200 byte.
1238 		 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1239 		 * this lot of data trunk xfer done this mission will be done
1240 		 * by some of continue READ or WRITE scsi command, till this
1241 		 * lot of data trunk xfer completed.
1242 		 * arcmsr_dma_move do the action repeatedly, and use the same
1243 		 * ccb till this lot of data trunk xfer complete notice.
1244 		 * when after the arcmsr_tran_init_pkt returns the solaris
1245 		 * kernel is by your pkt_resid and its b_bcount to give you
1246 		 * which type of scsi command descriptor to implement the
1247 		 * length of folowing arcmsr_tran_start scsi cdb (data length)
1248 		 *
1249 		 * Each transfer should be aligned on a 512 byte boundary
1250 		 */
1251 		if (ccb->pkt_dma_handle == NULL) {
1252 			if (arcmsr_dma_alloc(acb, pkt, bp, flags,
1253 			    callback) == DDI_FAILURE) {
1254 				/*
1255 				 * the HBA driver is unable to allocate DMA
1256 				 * resources, it must free the allocated
1257 				 * scsi_pkt(9S) before returning
1258 				 */
1259 				cmn_err(CE_WARN, "arcmsr%d: dma allocation "
1260 				    "failure ",
1261 				    ddi_get_instance(acb->dev_info));
1262 				if (old_pkt_flag == 0) {
1263 					cmn_err(CE_WARN, "arcmsr%d: dma "
1264 					    "allocation failed to free scsi "
1265 					    "hba pkt ",
1266 					    ddi_get_instance(acb->dev_info));
1267 					arcmsr_free_ccb(ccb);
1268 					scsi_hba_pkt_free(ap, pkt);
1269 				}
1270 				return ((struct scsi_pkt *)NULL);
1271 			}
1272 		} else {
1273 			/* DMA resources to next DMA window, for old pkt */
1274 			if (arcmsr_dma_move(acb, pkt, bp) == -1) {
1275 				cmn_err(CE_WARN, "arcmsr%d: dma move "
1276 				    "failed ",
1277 				    ddi_get_instance(acb->dev_info));
1278 				return ((struct scsi_pkt *)NULL);
1279 			}
1280 		}
1281 	} else {
1282 		pkt->pkt_resid = 0;
1283 	}
1284 	return (pkt);
1285 }
1286 
1287 /*
1288  * Function name: arcmsr_dma_alloc
1289  * Return Values: 0 if successful, -1 if failure
1290  *   Description: allocate DMA resources
1291  *       Context: Can only be called from arcmsr_tran_init_pkt()
1292  *     register struct scsi_address	*ap = &((pkt)->pkt_address);
1293  */
1294 static int
1295 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1296     struct buf *bp, int flags, int (*callback)()) {
1297 
1298 	struct CCB *ccb = pkt->pkt_ha_private;
1299 	int alloc_result, map_method, dma_flags;
1300 	int resid = 0;
1301 	int total_ccb_xferlen = 0;
1302 	int (*cb)(caddr_t);
1303 	uint8_t i;
1304 
1305 	/*
1306 	 * at this point the PKT SCSI CDB is empty, and dma xfer length
1307 	 * is bp->b_bcount
1308 	 */
1309 
1310 	if (bp->b_flags & B_READ) {
1311 		ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1312 		dma_flags = DDI_DMA_READ;
1313 	} else {
1314 		ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1315 		dma_flags = DDI_DMA_WRITE;
1316 	}
1317 
1318 	if (flags & PKT_CONSISTENT) {
1319 		ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1320 		dma_flags |= DDI_DMA_CONSISTENT;
1321 	}
1322 	if (flags & PKT_DMA_PARTIAL) {
1323 		dma_flags |= DDI_DMA_PARTIAL;
1324 	}
1325 
1326 	dma_flags |= DDI_DMA_REDZONE;
1327 	cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1328 
1329 	if ((alloc_result = ddi_dma_alloc_handle(acb->dev_info,
1330 	    &arcmsr_dma_attr, cb, 0, &ccb->pkt_dma_handle))
1331 	    != DDI_SUCCESS) {
1332 		switch (alloc_result) {
1333 		case DDI_DMA_BADATTR:
1334 			/*
1335 			 * If the system does not support physical DMA,
1336 			 * the return value from ddi_dma_alloc_handle
1337 			 * will be DDI_DMA_BADATTR
1338 			 */
1339 			cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1340 			    "'bad attribute'",
1341 			    ddi_get_instance(acb->dev_info));
1342 			bioerror(bp, EFAULT);
1343 			return (DDI_FAILURE);
1344 		case DDI_DMA_NORESOURCES:
1345 			cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1346 			    "'no resources'",
1347 			    ddi_get_instance(acb->dev_info));
1348 			bioerror(bp, 0);
1349 			return (DDI_FAILURE);
1350 		default:
1351 			cmn_err(CE_WARN, "arcmsr%d: dma allocate returned "
1352 			    "'unknown failure'",
1353 			    ddi_get_instance(acb->dev_info));
1354 			return (DDI_FAILURE);
1355 		}
1356 	}
1357 
1358 	map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle, bp,
1359 	    dma_flags, cb, 0,
1360 	    &ccb->pkt_dmacookies[0],	/* SG List pointer */
1361 	    &ccb->pkt_ncookies);	/* number of sgl cookies */
1362 
1363 	switch (map_method) {
1364 	case DDI_DMA_PARTIAL_MAP:
1365 		/*
1366 		 * When your main memory size larger then 4G
1367 		 * DDI_DMA_PARTIAL_MAP will be touched.
1368 		 *
1369 		 * We've already set DDI_DMA_PARTIAL in dma_flags,
1370 		 * so if it's now missing, there's something screwy
1371 		 * happening. We plow on....
1372 		 */
1373 
1374 		if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1375 			cmn_err(CE_WARN, "arcmsr%d: dma partial mapping lost "
1376 			    "...impossible case!",
1377 			    ddi_get_instance(acb->dev_info));
1378 		}
1379 		if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1380 		    DDI_FAILURE) {
1381 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_numwin() failed",
1382 			    ddi_get_instance(acb->dev_info));
1383 		}
1384 
1385 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1386 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1387 		    &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1388 		    DDI_FAILURE) {
1389 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_getwin failed",
1390 			    ddi_get_instance(acb->dev_info));
1391 		}
1392 
1393 		i = 0;
1394 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1395 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1396 		for (;;) {
1397 			i++;
1398 			if (i == ARCMSR_MAX_SG_ENTRIES ||
1399 			    i == ccb->pkt_ncookies ||
1400 			    total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1401 				break;
1402 			}
1403 			/*
1404 			 * next cookie will be retrieved from
1405 			 * ccb->pkt_dmacookies[i]
1406 			 */
1407 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1408 			    &ccb->pkt_dmacookies[i]);
1409 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1410 		}
1411 		ccb->pkt_cookie = i;
1412 		ccb->arcmsr_cdb.sgcount = i;
1413 		if (total_ccb_xferlen > 512) {
1414 			resid = total_ccb_xferlen % 512;
1415 			if (resid != 0) {
1416 				i--;
1417 				total_ccb_xferlen -= resid;
1418 				/* modify last sg length */
1419 				ccb->pkt_dmacookies[i].dmac_size =
1420 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1421 				ccb->resid_dmacookie.dmac_size = resid;
1422 				ccb->resid_dmacookie.dmac_laddress =
1423 				    ccb->pkt_dmacookies[i].dmac_laddress +
1424 				    ccb->pkt_dmacookies[i].dmac_size;
1425 			}
1426 		}
1427 		ccb->total_dmac_size = total_ccb_xferlen;
1428 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1429 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1430 
1431 		return (DDI_SUCCESS);
1432 
1433 	case DDI_DMA_MAPPED:
1434 		ccb->pkt_nwin = 1; /* all mapped, so only one window */
1435 		ccb->pkt_dma_len = 0;
1436 		ccb->pkt_dma_offset = 0;
1437 		i = 0;
1438 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1439 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1440 		for (;;) {
1441 			i++;
1442 			if (i == ARCMSR_MAX_SG_ENTRIES ||
1443 			    i == ccb->pkt_ncookies ||
1444 			    total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1445 				break;
1446 			}
1447 			/*
1448 			 * next cookie will be retrieved from
1449 			 * ccb->pkt_dmacookies[i]
1450 			 */
1451 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1452 			    &ccb->pkt_dmacookies[i]);
1453 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1454 		}
1455 		ccb->pkt_cookie = i;
1456 		ccb->arcmsr_cdb.sgcount = i;
1457 		if (total_ccb_xferlen > 512) {
1458 			resid = total_ccb_xferlen % 512;
1459 			    if (resid != 0) {
1460 				i--;
1461 				total_ccb_xferlen -= resid;
1462 				/* modify last sg length */
1463 				ccb->pkt_dmacookies[i].dmac_size =
1464 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1465 				ccb->resid_dmacookie.dmac_size = resid;
1466 				ccb->resid_dmacookie.dmac_laddress =
1467 				    ccb->pkt_dmacookies[i].dmac_laddress +
1468 				    ccb->pkt_dmacookies[i].dmac_size;
1469 			}
1470 		}
1471 		ccb->total_dmac_size = total_ccb_xferlen;
1472 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1473 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1474 		return (DDI_SUCCESS);
1475 
1476 	case DDI_DMA_NORESOURCES:
1477 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'no resources'",
1478 		    ddi_get_instance(acb->dev_info));
1479 		bioerror(bp, ENOMEM);
1480 		break;
1481 
1482 	case DDI_DMA_NOMAPPING:
1483 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'no mapping'",
1484 		    ddi_get_instance(acb->dev_info));
1485 		bioerror(bp, EFAULT);
1486 		break;
1487 
1488 	case DDI_DMA_TOOBIG:
1489 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'too big'",
1490 		    ddi_get_instance(acb->dev_info));
1491 		bioerror(bp, EINVAL);
1492 		break;
1493 
1494 	case DDI_DMA_INUSE:
1495 		cmn_err(CE_WARN, "arcmsr%d: dma map got 'in use' "
1496 		    "(should not happen)",
1497 		    ddi_get_instance(acb->dev_info));
1498 		break;
1499 	default:
1500 		cmn_err(CE_WARN,
1501 		    "arcmsr%d: dma map got 'unknown failure 0x%x' "
1502 		    "(should not happen)",
1503 		    ddi_get_instance(acb->dev_info), i);
1504 #ifdef ARCMSR_DEBUG
1505 		arcmsr_dump_scsi_cdb(&pkt->pkt_address, pkt);
1506 #endif
1507 		break;
1508 	}
1509 
1510 	ddi_dma_free_handle(&ccb->pkt_dma_handle);
1511 	ccb->pkt_dma_handle = NULL;
1512 	ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1513 	return (DDI_FAILURE);
1514 }
1515 
1516 
1517 /*
1518  * Function name: arcmsr_dma_move
1519  * Return Values: 0 if successful, -1 if failure
1520  *   Description: move DMA resources to next DMA window
1521  *       Context: Can only be called from arcmsr_tran_init_pkt()
1522  */
1523 static int
1524 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt,
1525     struct buf *bp) {
1526 
1527 	struct CCB *ccb = pkt->pkt_ha_private;
1528 	uint8_t i = 0;
1529 	int resid = 0;
1530 	int total_ccb_xferlen = 0;
1531 
1532 	if (ccb->resid_dmacookie.dmac_size != 0) 	{
1533 		total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1534 		ccb->pkt_dmacookies[i].dmac_size =
1535 		    ccb->resid_dmacookie.dmac_size;
1536 		ccb->pkt_dmacookies[i].dmac_laddress =
1537 		    ccb->resid_dmacookie.dmac_laddress;
1538 		i++;
1539 		ccb->resid_dmacookie.dmac_size = 0;
1540 	}
1541 	/*
1542 	 * If there are no more cookies remaining in this window,
1543 	 * move to the next window.
1544 	 */
1545 	if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1546 		/*
1547 		 * only dma map "partial" arrive here
1548 		 */
1549 		if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1550 		    (ccb->pkt_nwin == 1)) {
1551 			cmn_err(CE_CONT,
1552 			    "arcmsr%d: dma partial set, but only "
1553 			    "one window allocated",
1554 			    ddi_get_instance(acb->dev_info));
1555 			return (DDI_SUCCESS);
1556 		}
1557 
1558 		/* At last window, cannot move */
1559 		if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1560 			cmn_err(CE_WARN,
1561 			    "arcmsr%d: dma partial set, numwin exceeded",
1562 			    ddi_get_instance(acb->dev_info));
1563 			return (DDI_FAILURE);
1564 		}
1565 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1566 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1567 		    &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1568 		    DDI_FAILURE) {
1569 			cmn_err(CE_WARN,
1570 			    "arcmsr%d: dma partial set, "
1571 			    "ddi_dma_getwin failure",
1572 			    ddi_get_instance(acb->dev_info));
1573 			return (DDI_FAILURE);
1574 		}
1575 		/* reset cookie pointer */
1576 		ccb->pkt_cookie = 0;
1577 	} else {
1578 		/*
1579 		 * only dma map "all" arrive here
1580 		 * We still have more cookies in this window,
1581 		 * get the next one
1582 		 * access the pkt_dma_handle remain cookie record at
1583 		 * ccb->pkt_dmacookies array
1584 		 */
1585 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1586 		    &ccb->pkt_dmacookies[i]);
1587 	}
1588 
1589 	/* Get remaining cookies in this window, up to our maximum */
1590 	total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1591 
1592 	/* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1593 	for (;;) {
1594 		i++;
1595 		/* handled cookies count level indicator */
1596 		ccb->pkt_cookie++;
1597 		if (i == ARCMSR_MAX_SG_ENTRIES ||
1598 		    ccb->pkt_cookie == ccb->pkt_ncookies ||
1599 		    total_ccb_xferlen == ARCMSR_MAX_XFER_LEN) {
1600 			break;
1601 		}
1602 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1603 		    &ccb->pkt_dmacookies[i]);
1604 		total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1605 	}
1606 
1607 	ccb->arcmsr_cdb.sgcount = i;
1608 	if (total_ccb_xferlen > 512) {
1609 		resid = total_ccb_xferlen % 512;
1610 		if (resid != 0) {
1611 			i--;
1612 			total_ccb_xferlen -= resid;
1613 			/* modify last sg length */
1614 			ccb->pkt_dmacookies[i].dmac_size =
1615 			    ccb->pkt_dmacookies[i].dmac_size - resid;
1616 			ccb->resid_dmacookie.dmac_size = resid;
1617 			ccb->resid_dmacookie.dmac_laddress =
1618 			    ccb->pkt_dmacookies[i].dmac_laddress +
1619 			    ccb->pkt_dmacookies[i].dmac_size;
1620 		}
1621 	}
1622 	ccb->total_dmac_size += total_ccb_xferlen;
1623 	pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1624 
1625 	return (DDI_SUCCESS);
1626 }
1627 
1628 /*
1629  * Function name: arcmsr_tran_destroy_pkt
1630  * Return Values: none
1631  *   Description: Called by kernel on behalf of a target driver
1632  *	          calling scsi_destroy_pkt(9F).
1633  *	          Refer to tran_destroy_pkt(9E) man page
1634  *       Context: Can be called from different kernel process threads.
1635  *	          Can be called by interrupt thread.
1636  */
1637 static void
1638 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) {
1639 
1640 	struct CCB *ccb = pkt->pkt_ha_private;
1641 
1642 	if ((ccb != NULL) && (ccb->pkt == pkt)) {
1643 		struct ACB *acb = ccb->acb;
1644 		if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1645 			if (ddi_dma_unbind_handle(ccb->pkt_dma_handle)
1646 			    != DDI_SUCCESS) {
1647 				cmn_err(CE_WARN,
1648 				    "arcmsr%d: ddi_dma_unbind_handle() failed",
1649 				    ddi_get_instance(acb->dev_info));
1650 			}
1651 			ddi_dma_free_handle(&ccb->pkt_dma_handle);
1652 			ccb->pkt_dma_handle = NULL;
1653 		}
1654 		arcmsr_free_ccb(ccb);
1655 	}
1656 
1657 	scsi_hba_pkt_free(ap, pkt);
1658 }
1659 
1660 /*
1661  * Function name: arcmsr_tran_dmafree()
1662  * Return Values: none
1663  *   Description: free dvma resources
1664  *       Context: Can be called from different kernel process threads.
1665  *	          Can be called by interrupt thread.
1666  */
1667 static void
1668 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) {
1669 
1670 	struct CCB *ccb = pkt->pkt_ha_private;
1671 
1672 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1673 		ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1674 		if (ddi_dma_unbind_handle(ccb->pkt_dma_handle)
1675 		    != DDI_SUCCESS) {
1676 			cmn_err(CE_WARN,
1677 			    "arcmsr%d: ddi_dma_unbind_handle() failed "
1678 			    "(target %d lun %d)",
1679 			    ddi_get_instance(ccb->acb->dev_info),
1680 			    ap->a_target, ap->a_lun);
1681 		}
1682 		ddi_dma_free_handle(&ccb->pkt_dma_handle);
1683 		ccb->pkt_dma_handle = NULL;
1684 	}
1685 }
1686 
1687 /*
1688  * Function name: arcmsr_tran_sync_pkt()
1689  * Return Values: none
1690  *   Description: sync dma
1691  *       Context: Can be called from different kernel process threads.
1692  *		  Can be called by interrupt thread.
1693  */
1694 static void
1695 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) {
1696 
1697 	struct CCB *ccb;
1698 
1699 	ccb = pkt->pkt_ha_private;
1700 
1701 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1702 		if (ddi_dma_sync(ccb->pkt_dma_handle,
1703 		    ccb->pkt_dma_offset, ccb->pkt_dma_len,
1704 		    (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1705 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU)
1706 			!= DDI_SUCCESS) {
1707 			cmn_err(CE_WARN, "arcmsr%d: sync pkt failed "
1708 			    "for target %d lun %d",
1709 			    ddi_get_instance(ccb->acb->dev_info),
1710 			    ap->a_target, ap->a_lun);
1711 		}
1712 	}
1713 }
1714 
1715 
1716 static uint8_t
1717 arcmsr_hba_wait_msgint_ready(struct ACB *acb) {
1718 
1719 	uint32_t i;
1720 	uint8_t retries = 0x00;
1721 	struct HBA_msgUnit *phbamu;
1722 
1723 
1724 	phbamu = (struct HBA_msgUnit *)acb->pmu;
1725 
1726 	do {
1727 		for (i = 0; i < 100; i++) {
1728 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
1729 			    &phbamu->outbound_intstatus) &
1730 			    ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1731 				/* clear interrupt */
1732 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1733 				    &phbamu->outbound_intstatus,
1734 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1735 				return (TRUE);
1736 			}
1737 			drv_usecwait(10000);
1738 			if (ddi_in_panic()) {
1739 				/* clear interrupts */
1740 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1741 				    &phbamu->outbound_intstatus,
1742 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1743 				return (TRUE);
1744 			}
1745 		} /* max 1 second */
1746 	} while (retries++ < 20); /* max 20 seconds */
1747 	return (FALSE);
1748 }
1749 
1750 
1751 
1752 static uint8_t
1753 arcmsr_hbb_wait_msgint_ready(struct ACB *acb) {
1754 
1755 	struct HBB_msgUnit *phbbmu;
1756 	uint32_t i;
1757 	uint8_t retries = 0x00;
1758 
1759 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
1760 
1761 	do {
1762 		for (i = 0; i < 100; i++) {
1763 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
1764 			    &phbbmu->hbb_doorbell->iop2drv_doorbell) &
1765 			    ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1766 				/* clear interrupt */
1767 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1768 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
1769 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1770 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1771 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1772 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1773 				return (TRUE);
1774 			}
1775 			drv_usecwait(10000);
1776 			if (ddi_in_panic()) {
1777 				/* clear interrupts */
1778 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1779 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
1780 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1781 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1782 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1783 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1784 				return (TRUE);
1785 			}
1786 		} /* max 1 second */
1787 	} while (retries++ < 20); /* max 20 seconds */
1788 
1789 	return (FALSE);
1790 }
1791 
1792 
1793 static void
1794 arcmsr_flush_hba_cache(struct ACB *acb) {
1795 
1796 	struct HBA_msgUnit *phbamu;
1797 	int retry_count = 30;
1798 
1799 	/* enlarge wait flush adapter cache time: 10 minutes */
1800 
1801 	phbamu = (struct HBA_msgUnit *)acb->pmu;
1802 
1803 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
1804 	    ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
1805 
1806 	do {
1807 		if (arcmsr_hba_wait_msgint_ready(acb)) {
1808 			break;
1809 		} else {
1810 			retry_count--;
1811 		}
1812 	} while (retry_count != 0);
1813 }
1814 
1815 
1816 
1817 static void
1818 arcmsr_flush_hbb_cache(struct ACB *acb) {
1819 
1820 	struct HBB_msgUnit *phbbmu;
1821 	int retry_count = 30;
1822 
1823 	/* enlarge wait flush adapter cache time: 10 minutes */
1824 
1825 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
1826 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1827 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1828 	    ARCMSR_MESSAGE_FLUSH_CACHE);
1829 
1830 	do {
1831 		if (arcmsr_hbb_wait_msgint_ready(acb)) {
1832 			break;
1833 		} else {
1834 			retry_count--;
1835 		}
1836 	} while (retry_count != 0);
1837 }
1838 
1839 
1840 static void
1841 arcmsr_ccb_complete(struct CCB *ccb, int flag) {
1842 
1843 	struct ACB *acb = ccb->acb;
1844 	struct scsi_pkt *pkt = ccb->pkt;
1845 
1846 	if (flag == 1) {
1847 		acb->ccboutstandingcount--;
1848 	}
1849 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1850 	    STATE_SENT_CMD | STATE_GOT_STATUS);
1851 
1852 	if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1853 	    (pkt->pkt_state & STATE_XFERRED_DATA)) {
1854 		(void) ddi_dma_sync(ccb->pkt_dma_handle,
1855 		    ccb->pkt_dma_offset, ccb->pkt_dma_len,
1856 		    DDI_DMA_SYNC_FORCPU);
1857 	}
1858 
1859 	if (pkt->pkt_comp) {
1860 		(*pkt->pkt_comp)(pkt);
1861 	}
1862 }
1863 
1864 
1865 static void
1866 arcmsr_report_sense_info(struct CCB *ccb) {
1867 
1868 	struct scsi_pkt *pkt = ccb->pkt;
1869 	struct scsi_arq_status *arq_status;
1870 
1871 
1872 	arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
1873 	bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
1874 	arq_status->sts_rqpkt_reason = CMD_CMPLT;
1875 	arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
1876 	    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
1877 	arq_status->sts_rqpkt_statistics = pkt->pkt_statistics;
1878 	arq_status->sts_rqpkt_resid = 0;
1879 
1880 	pkt->pkt_reason = CMD_CMPLT;
1881 	/* auto rqsense took place */
1882 	pkt->pkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
1883 	    STATE_GOT_STATUS | STATE_ARQ_DONE);
1884 
1885 	if (&arq_status->sts_sensedata != NULL) {
1886 		struct SENSE_DATA *cdb_sensedata;
1887 		struct scsi_extended_sense *sts_sensedata;
1888 
1889 		cdb_sensedata =
1890 		    (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
1891 		sts_sensedata = &arq_status->sts_sensedata;
1892 
1893 		sts_sensedata->es_code = cdb_sensedata->ErrorCode;
1894 		/* must eq CLASS_EXTENDED_SENSE (0x07) */
1895 		sts_sensedata->es_class = cdb_sensedata->ErrorClass;
1896 		sts_sensedata->es_valid = cdb_sensedata->Valid;
1897 		sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
1898 		sts_sensedata->es_key = cdb_sensedata->SenseKey;
1899 		sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
1900 		sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
1901 		sts_sensedata->es_filmk = cdb_sensedata->FileMark;
1902 		sts_sensedata->es_info_1 = cdb_sensedata->Information[0];
1903 		sts_sensedata->es_info_2 = cdb_sensedata->Information[1];
1904 		sts_sensedata->es_info_3 = cdb_sensedata->Information[2];
1905 		sts_sensedata->es_info_4 = cdb_sensedata->Information[3];
1906 		sts_sensedata->es_add_len =
1907 		    cdb_sensedata->AdditionalSenseLength;
1908 		sts_sensedata->es_cmd_info[0] =
1909 		    cdb_sensedata->CommandSpecificInformation[0];
1910 		sts_sensedata->es_cmd_info[1] =
1911 		    cdb_sensedata->CommandSpecificInformation[1];
1912 		sts_sensedata->es_cmd_info[2] =
1913 		    cdb_sensedata->CommandSpecificInformation[2];
1914 		sts_sensedata->es_cmd_info[3] =
1915 		    cdb_sensedata->CommandSpecificInformation[3];
1916 		sts_sensedata->es_add_code =
1917 		    cdb_sensedata->AdditionalSenseCode;
1918 		sts_sensedata->es_qual_code =
1919 		    cdb_sensedata->AdditionalSenseCodeQualifier;
1920 		sts_sensedata->es_fru_code =
1921 		    cdb_sensedata->FieldReplaceableUnitCode;
1922 	}
1923 }
1924 
1925 
1926 
1927 static void
1928 arcmsr_abort_hba_allcmd(struct ACB *acb) {
1929 
1930 	struct HBA_msgUnit *phbamu;
1931 
1932 	phbamu = (struct HBA_msgUnit *)acb->pmu;
1933 
1934 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1935 	    &phbamu->inbound_msgaddr0,
1936 	    ARCMSR_INBOUND_MESG0_ABORT_CMD);
1937 
1938 	if (!arcmsr_hba_wait_msgint_ready(acb)) {
1939 		cmn_err(CE_WARN,
1940 		    "arcmsr%d: timeout while waiting for 'abort all "
1941 		    "outstanding commands'",
1942 		    ddi_get_instance(acb->dev_info));
1943 	}
1944 }
1945 
1946 
1947 
1948 static void
1949 arcmsr_abort_hbb_allcmd(struct ACB *acb) {
1950 
1951 	struct HBB_msgUnit *phbbmu =
1952 	    (struct HBB_msgUnit *)acb->pmu;
1953 
1954 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1955 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1956 	    ARCMSR_MESSAGE_ABORT_CMD);
1957 
1958 	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
1959 		cmn_err(CE_WARN,
1960 		    "arcmsr%d: timeout while waiting for 'abort all "
1961 		    "outstanding commands'",
1962 		    ddi_get_instance(acb->dev_info));
1963 	}
1964 }
1965 
1966 static void
1967 arcmsr_report_ccb_state(struct ACB *acb,
1968     struct CCB *ccb, uint32_t flag_ccb) {
1969 
1970 	int id, lun;
1971 
1972 	id = ccb->pkt->pkt_address.a_target;
1973 	lun = ccb->pkt->pkt_address.a_lun;
1974 
1975 	if ((flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR) == 0) {
1976 		if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1977 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
1978 		}
1979 		ccb->pkt->pkt_reason = CMD_CMPLT;
1980 		ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1981 		arcmsr_ccb_complete(ccb, 1);
1982 	} else {
1983 		switch (ccb->arcmsr_cdb.DeviceStatus) {
1984 		case ARCMSR_DEV_SELECT_TIMEOUT:
1985 			if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
1986 				cmn_err(CE_CONT,
1987 				    "arcmsr%d: raid volume was kicked out ",
1988 				    ddi_get_instance(acb->dev_info));
1989 			}
1990 			acb->devstate[id][lun] = ARECA_RAID_GONE;
1991 			ccb->pkt->pkt_reason = CMD_TIMEOUT;
1992 			ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1993 			arcmsr_ccb_complete(ccb, 1);
1994 			break;
1995 		case ARCMSR_DEV_ABORTED:
1996 		case ARCMSR_DEV_INIT_FAIL:
1997 			cmn_err(CE_CONT,
1998 			    "arcmsr%d: isr got "
1999 			    "'ARCMSR_DEV_ABORTED' 'ARCMSR_DEV_INIT_FAIL'",
2000 			    ddi_get_instance(acb->dev_info));
2001 			cmn_err(CE_CONT, "arcmsr%d: raid volume was kicked "
2002 			    "out", ddi_get_instance(acb->dev_info));
2003 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2004 			ccb->pkt->pkt_reason = CMD_DEV_GONE;
2005 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2006 			arcmsr_ccb_complete(ccb, 1);
2007 			break;
2008 		case SCSISTAT_CHECK_CONDITION:
2009 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
2010 			arcmsr_report_sense_info(ccb);
2011 			arcmsr_ccb_complete(ccb, 1);
2012 			break;
2013 		default:
2014 			cmn_err(CE_WARN, "arcmsr%d: target %d lun %d "
2015 			    "isr received CMD_DONE with unknown "
2016 			    "DeviceStatus (0x%x)",
2017 			    ddi_get_instance(acb->dev_info), id, lun,
2018 			    ccb->arcmsr_cdb.DeviceStatus);
2019 			cmn_err(CE_CONT, "arcmsr%d: raid volume was kicked "
2020 			    "out ", ddi_get_instance(acb->dev_info));
2021 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2022 			/* unknown error or crc error just for retry */
2023 			ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2024 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2025 			arcmsr_ccb_complete(ccb, 1);
2026 			break;
2027 		}
2028 	}
2029 }
2030 
2031 
2032 static void
2033 arcmsr_drain_donequeue(struct ACB *acb, uint32_t flag_ccb) {
2034 
2035 	struct CCB *ccb;
2036 
2037 	/* check if command completed without error */
2038 	ccb = (struct CCB *)(acb->vir2phy_offset +
2039 	    (flag_ccb << 5)); /* frame must be aligned on 32 byte boundary */
2040 
2041 	if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) 	{
2042 		if (ccb->startdone == ARCMSR_CCB_ABORTED) {
2043 			cmn_err(CE_CONT,
2044 			    "arcmsr%d: isr got aborted command "
2045 			    "while draining doneq",
2046 			    ddi_get_instance(acb->dev_info));
2047 			ccb->pkt->pkt_reason = CMD_ABORTED;
2048 			ccb->pkt->pkt_statistics |= STAT_ABORTED;
2049 			arcmsr_ccb_complete(ccb, 1);
2050 			return;
2051 		}
2052 
2053 		if (ccb->startdone == ARCMSR_CCB_RESET) {
2054 			cmn_err(CE_CONT,
2055 			    "arcmsr%d: isr got command reset "
2056 			    "while draining doneq",
2057 			    ddi_get_instance(acb->dev_info));
2058 			ccb->pkt->pkt_reason = CMD_RESET;
2059 			ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
2060 			arcmsr_ccb_complete(ccb, 1);
2061 			return;
2062 		}
2063 
2064 		cmn_err(CE_WARN, "arcmsr%d: isr got an illegal ccb command "
2065 		    "done while draining doneq",
2066 		    ddi_get_instance(acb->dev_info));
2067 		return;
2068 	}
2069 	arcmsr_report_ccb_state(acb, ccb, flag_ccb);
2070 }
2071 
2072 
2073 static void
2074 arcmsr_done4abort_postqueue(struct ACB *acb) {
2075 
2076 	int i = 0;
2077 	uint32_t flag_ccb;
2078 
2079 	switch (acb->adapter_type) {
2080 	case ACB_ADAPTER_TYPE_A:
2081 	{
2082 		struct HBA_msgUnit *phbamu;
2083 		uint32_t outbound_intstatus;
2084 
2085 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2086 		/* clear and abort all outbound posted Q */
2087 		outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
2088 		    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
2089 		/* clear interrupt */
2090 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2091 		    &phbamu->outbound_intstatus, outbound_intstatus);
2092 		while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
2093 		    &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
2094 		    (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
2095 			arcmsr_drain_donequeue(acb, flag_ccb);
2096 		}
2097 	}
2098 		break;
2099 
2100 	case ACB_ADAPTER_TYPE_B:
2101 	{
2102 		struct HBB_msgUnit *phbbmu;
2103 
2104 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2105 
2106 		/* clear all outbound posted Q */
2107 		/* clear doorbell interrupt */
2108 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2109 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
2110 		    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
2111 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
2112 			if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
2113 				phbbmu->done_qbuffer[i] = 0;
2114 				arcmsr_drain_donequeue(acb, flag_ccb);
2115 			}
2116 			phbbmu->post_qbuffer[i] = 0;
2117 		}	/* drain reply FIFO */
2118 		phbbmu->doneq_index = 0;
2119 		phbbmu->postq_index = 0;
2120 		break;
2121 	}
2122 	}
2123 }
2124 
2125 /*
2126  * Routine Description: Reset 80331 iop.
2127  *           Arguments:
2128  *        Return Value: Nothing.
2129  */
2130 static void
2131 arcmsr_iop_reset(struct ACB *acb) {
2132 
2133 	struct CCB *ccb;
2134 	uint32_t intmask_org;
2135 	int i = 0;
2136 
2137 	if (acb->ccboutstandingcount > 0) {
2138 		/* disable all outbound interrupt */
2139 		intmask_org = arcmsr_disable_allintr(acb);
2140 		/* talk to iop 331 outstanding command aborted */
2141 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
2142 			arcmsr_abort_hba_allcmd(acb);
2143 		} else {
2144 			arcmsr_abort_hbb_allcmd(acb);
2145 		}
2146 		/* clear and abort all outbound posted Q */
2147 		arcmsr_done4abort_postqueue(acb);
2148 
2149 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2150 			ccb = acb->pccb_pool[i];
2151 			if (ccb->startdone == ARCMSR_CCB_START) {
2152 				ccb->startdone = ARCMSR_CCB_RESET;
2153 				ccb->pkt->pkt_reason = CMD_RESET;
2154 				ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
2155 				arcmsr_ccb_complete(ccb, 1);
2156 			}
2157 		}
2158 		/* enable all outbound interrupt */
2159 		arcmsr_enable_allintr(acb, intmask_org);
2160 	}
2161 }
2162 
2163 /*
2164  * You can access the DMA address through the #defines:
2165  * dmac_address for 32-bit addresses and dmac_laddress for 64-bit addresses.
2166  *	These macros are defined as follows:
2167  *
2168  *	#define dmac_laddress   _dmu._dmac_ll
2169  *	#ifdef _LONG_LONG_HTOL
2170  *		#define dmac_notused    _dmu._dmac_la[0]
2171  *		#define dmac_address    _dmu._dmac_la[1]
2172  *	#else
2173  *		#define dmac_address    _dmu._dmac_la[0]
2174  *		#define dmac_notused    _dmu._dmac_la[1]
2175  *	#endif
2176  */
2177 /*ARGSUSED*/
2178 static void
2179 arcmsr_build_ccb(struct CCB *ccb) {
2180 
2181 	struct scsi_pkt *pkt = ccb->pkt;
2182 	struct ARCMSR_CDB *arcmsr_cdb;
2183 	char *psge;
2184 	uint32_t address_lo, address_hi;
2185 	int arccdbsize = 0x30;
2186 	uint8_t sgcount;
2187 
2188 	arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
2189 	psge = (char *)&arcmsr_cdb->sgu;
2190 
2191 	/* return the current time in seconds */
2192 	ccb->ccb_time = (time_t)(pkt->pkt_time + ddi_get_time());
2193 	bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb,
2194 	    arcmsr_cdb->CdbLength);
2195 	sgcount = ccb->arcmsr_cdb.sgcount;
2196 
2197 	if (sgcount) {
2198 		int length, i;
2199 		int cdb_sgcount = 0;
2200 		int total_xfer_length = 0;
2201 
2202 		/* map stor port SG list to our iop SG List. */
2203 		for (i = 0; i < sgcount; i++) {
2204 			/* Get physaddr of the current data pointer */
2205 			length = ccb->pkt_dmacookies[i].dmac_size;
2206 			total_xfer_length += length;
2207 			address_lo = dma_addr_lo32(
2208 				ccb->pkt_dmacookies[i].dmac_laddress);
2209 			address_hi = dma_addr_hi32(
2210 				ccb->pkt_dmacookies[i].dmac_laddress);
2211 
2212 			if (address_hi == 0) {
2213 				struct SG32ENTRY *dma_sg;
2214 
2215 				dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
2216 
2217 				dma_sg->address = address_lo;
2218 				dma_sg->length = length;
2219 				psge += sizeof (struct SG32ENTRY);
2220 				arccdbsize += sizeof (struct SG32ENTRY);
2221 			} else {
2222 				int sg64s_size = 0;
2223 				int tmplength = length;
2224 				int64_t span4G, length0;
2225 				struct SG64ENTRY *dma_sg;
2226 
2227 				/*LINTED*/
2228 				while (1) {
2229 					dma_sg =
2230 					    (struct SG64ENTRY *)(intptr_t)psge;
2231 					span4G =
2232 					    (int64_t)address_lo + tmplength;
2233 
2234 					dma_sg->addresshigh = address_hi;
2235 					dma_sg->address = address_lo;
2236 					if (span4G > 0x100000000ULL) {
2237 						/* see if we cross 4G */
2238 						length0 = 0x100000000ULL -
2239 						    address_lo;
2240 						dma_sg->length =
2241 						    (uint32_t)length0 |
2242 						    IS_SG64_ADDR;
2243 						address_hi = address_hi + 1;
2244 						address_lo = 0;
2245 						tmplength = tmplength-
2246 						    (int32_t)length0;
2247 						sg64s_size +=
2248 						    sizeof (struct SG64ENTRY);
2249 						psge +=
2250 						    sizeof (struct SG64ENTRY);
2251 						cdb_sgcount++;
2252 					} else {
2253 						dma_sg->length = tmplength |
2254 						    IS_SG64_ADDR;
2255 						sg64s_size +=
2256 						    sizeof (struct SG64ENTRY);
2257 						psge +=
2258 						    sizeof (struct SG64ENTRY);
2259 						break;
2260 					}
2261 				}
2262 				arccdbsize += sg64s_size;
2263 			}
2264 			cdb_sgcount++;
2265 		}
2266 		arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
2267 		arcmsr_cdb->DataLength = total_xfer_length;
2268 		if (arccdbsize > 256) {
2269 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
2270 		}
2271 	} else {
2272 		arcmsr_cdb->DataLength = 0;
2273 	}
2274 
2275 	if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
2276 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
2277 }
2278 
2279 /*
2280  * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
2281  *
2282  * handle:		Handle of registered ARC protocol driver
2283  * adapter_id:		AIOC unique identifier(integer)
2284  * pPOSTCARD_SEND:	Pointer to ARC send postcard
2285  *
2286  * This routine posts a ARC send postcard to the request post FIFO of a
2287  * specific ARC adapter.
2288  */
2289 static int
2290 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb) {
2291 
2292 	uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
2293 	struct scsi_pkt *pkt = ccb->pkt;
2294 	struct ARCMSR_CDB *arcmsr_cdb;
2295 
2296 	arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
2297 
2298 	/* Use correct offset and size for syncing */
2299 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
2300 	    DDI_DMA_SYNC_FORDEV) == DDI_FAILURE)
2301 		return (DDI_FAILURE);
2302 
2303 	acb->ccboutstandingcount++;
2304 	ccb->startdone = ARCMSR_CCB_START;
2305 
2306 	switch (acb->adapter_type) {
2307 	case ACB_ADAPTER_TYPE_A:
2308 	{
2309 		struct HBA_msgUnit *phbamu;
2310 
2311 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2312 
2313 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
2314 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2315 			    &phbamu->inbound_queueport,
2316 			    cdb_shifted_phyaddr |
2317 			    ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
2318 		} else {
2319 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2320 			    &phbamu->inbound_queueport, cdb_shifted_phyaddr);
2321 		}
2322 		if (pkt->pkt_flags & FLAG_NOINTR)
2323 			arcmsr_polling_hba_ccbdone(acb, ccb);
2324 	}
2325 		break;
2326 	case ACB_ADAPTER_TYPE_B:
2327 	{
2328 		struct HBB_msgUnit *phbbmu;
2329 		int ending_index, index;
2330 
2331 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2332 		mutex_enter(&acb->postq_mutex);
2333 		index = phbbmu->postq_index;
2334 		ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
2335 		phbbmu->post_qbuffer[ending_index] = 0;
2336 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
2337 			phbbmu->post_qbuffer[index] =
2338 			    (cdb_shifted_phyaddr|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
2339 		} else {
2340 			phbbmu->post_qbuffer[index] = cdb_shifted_phyaddr;
2341 		}
2342 		index++;
2343 		/* if last index number set it to 0 */
2344 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
2345 		phbbmu->postq_index = index;
2346 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2347 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2348 		    ARCMSR_DRV2IOP_CDB_POSTED);
2349 		mutex_exit(&acb->postq_mutex);
2350 		if (pkt->pkt_flags & FLAG_NOINTR)
2351 			arcmsr_polling_hbb_ccbdone(acb, ccb);
2352 	}
2353 	break;
2354 	}
2355 
2356 	return (DDI_SUCCESS);
2357 }
2358 
2359 
2360 
2361 
2362 static struct QBUFFER *
2363 arcmsr_get_iop_rqbuffer(struct ACB *acb) {
2364 
2365 	struct QBUFFER *qb;
2366 
2367 	switch (acb->adapter_type) {
2368 	case ACB_ADAPTER_TYPE_A:
2369 	{
2370 		struct HBA_msgUnit *phbamu;
2371 
2372 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2373 		qb = (struct QBUFFER *)&phbamu->message_rbuffer;
2374 	}
2375 		break;
2376 	case ACB_ADAPTER_TYPE_B:
2377 	{
2378 		struct HBB_msgUnit *phbbmu;
2379 
2380 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2381 		qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
2382 	}
2383 		break;
2384 	}
2385 
2386 	return (qb);
2387 }
2388 
2389 
2390 
2391 static struct QBUFFER *
2392 arcmsr_get_iop_wqbuffer(struct ACB *acb) {
2393 
2394 	struct QBUFFER *qbuffer = NULL;
2395 
2396 	switch (acb->adapter_type) {
2397 	case ACB_ADAPTER_TYPE_A:
2398 	{
2399 		struct HBA_msgUnit *phbamu;
2400 
2401 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2402 		qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
2403 	}
2404 	break;
2405 	case ACB_ADAPTER_TYPE_B:
2406 	{
2407 		struct HBB_msgUnit *phbbmu;
2408 
2409 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2410 		qbuffer =
2411 		    (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
2412 	}
2413 	break;
2414 	}
2415 	return (qbuffer);
2416 }
2417 
2418 
2419 
2420 static void
2421 arcmsr_iop_message_read(struct ACB *acb) {
2422 
2423 	switch (acb->adapter_type) {
2424 	case ACB_ADAPTER_TYPE_A:
2425 	{
2426 		struct HBA_msgUnit *phbamu;
2427 
2428 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2429 		/* let IOP know the data has been read */
2430 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2431 		    &phbamu->inbound_doorbell,
2432 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
2433 	}
2434 	break;
2435 	case ACB_ADAPTER_TYPE_B:
2436 	{
2437 		struct HBB_msgUnit *phbbmu;
2438 
2439 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2440 		/* let IOP know the data has been read */
2441 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2442 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2443 		    ARCMSR_DRV2IOP_DATA_READ_OK);
2444 	}
2445 	break;
2446 	}
2447 }
2448 
2449 
2450 
2451 static void
2452 arcmsr_iop_message_wrote(struct ACB *acb) {
2453 
2454 	switch (acb->adapter_type) {
2455 	case ACB_ADAPTER_TYPE_A:
2456 	{
2457 		struct HBA_msgUnit *phbamu;
2458 
2459 		phbamu = (struct HBA_msgUnit *)acb->pmu;
2460 		/*
2461 		 * push inbound doorbell tell iop, driver data write ok
2462 		 * and wait reply on next hwinterrupt for next Qbuffer post
2463 		 */
2464 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2465 		    &phbamu->inbound_doorbell,
2466 		    ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
2467 	}
2468 	break;
2469 	case ACB_ADAPTER_TYPE_B:
2470 	{
2471 		struct HBB_msgUnit *phbbmu;
2472 
2473 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2474 		/*
2475 		 * push inbound doorbell tell iop, driver data was writen
2476 		 * successfully, then await reply on next hwinterrupt for
2477 		 * next Qbuffer post
2478 		 */
2479 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2480 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2481 		    ARCMSR_DRV2IOP_DATA_WRITE_OK);
2482 	}
2483 	break;
2484 	}
2485 }
2486 
2487 
2488 
2489 static void
2490 arcmsr_post_ioctldata2iop(struct ACB *acb) {
2491 
2492 	uint8_t *pQbuffer;
2493 	struct QBUFFER *pwbuffer;
2494 	uint8_t *iop_data;
2495 	int32_t allxfer_len = 0;
2496 
2497 	pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2498 	iop_data = (uint8_t *)pwbuffer->data;
2499 	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
2500 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
2501 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
2502 		    (allxfer_len < 124)) {
2503 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
2504 			(void) memcpy(iop_data, pQbuffer, 1);
2505 			acb->wqbuf_firstidx++;
2506 			/* if last index number set it to 0 */
2507 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
2508 			iop_data++;
2509 			allxfer_len++;
2510 		}
2511 		pwbuffer->data_len = allxfer_len;
2512 		/*
2513 		 * push inbound doorbell and wait reply at hwinterrupt
2514 		 * routine for next Qbuffer post
2515 		 */
2516 		arcmsr_iop_message_wrote(acb);
2517 	}
2518 }
2519 
2520 
2521 
2522 static void
2523 arcmsr_stop_hba_bgrb(struct ACB *acb) {
2524 
2525 	struct HBA_msgUnit *phbamu;
2526 
2527 	phbamu = (struct HBA_msgUnit *)acb->pmu;
2528 
2529 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2530 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2531 	    &phbamu->inbound_msgaddr0,
2532 	    ARCMSR_INBOUND_MESG0_STOP_BGRB);
2533 	if (!arcmsr_hba_wait_msgint_ready(acb))
2534 		cmn_err(CE_WARN,
2535 		    "arcmsr%d: timeout while waiting for background "
2536 		    "rebuild completion",
2537 		    ddi_get_instance(acb->dev_info));
2538 }
2539 
2540 
2541 static void
2542 arcmsr_stop_hbb_bgrb(struct ACB *acb) {
2543 
2544 	struct HBB_msgUnit *phbbmu;
2545 
2546 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
2547 
2548 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2549 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
2550 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
2551 	    ARCMSR_MESSAGE_STOP_BGRB);
2552 
2553 	if (!arcmsr_hbb_wait_msgint_ready(acb))
2554 		cmn_err(CE_WARN,
2555 		    "arcmsr%d: timeout while waiting for background "
2556 		    "rebuild completion",
2557 		    ddi_get_instance(acb->dev_info));
2558 }
2559 
2560 static int
2561 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt) {
2562 
2563 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2564 	struct CCB *ccb = pkt->pkt_ha_private;
2565 	struct buf *bp = ccb->bp;
2566 	uint8_t *pQbuffer;
2567 	int retvalue = 0, transfer_len = 0;
2568 	char *buffer;
2569 	uint32_t controlcode;
2570 
2571 
2572 	/* 4 bytes: Areca io control code */
2573 	controlcode = (uint32_t)pkt->pkt_cdbp[5] << 24 |
2574 	    (uint32_t)pkt->pkt_cdbp[6] << 16 |
2575 	    (uint32_t)pkt->pkt_cdbp[7] << 8 |
2576 	    (uint32_t)pkt->pkt_cdbp[8];
2577 
2578 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
2579 		bp_mapin(bp);
2580 
2581 
2582 	buffer = bp->b_un.b_addr;
2583 	transfer_len = bp->b_bcount;
2584 	if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
2585 		retvalue = ARCMSR_MESSAGE_FAIL;
2586 		goto message_out;
2587 	}
2588 
2589 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
2590 
2591 	switch (controlcode) {
2592 	case ARCMSR_MESSAGE_READ_RQBUFFER:
2593 	{
2594 		unsigned long *ver_addr;
2595 		uint8_t *ptmpQbuffer;
2596 		int32_t allxfer_len = 0;
2597 
2598 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2599 		if (!ver_addr) {
2600 			retvalue = ARCMSR_MESSAGE_FAIL;
2601 			goto message_out;
2602 		}
2603 
2604 		ptmpQbuffer = (uint8_t *)ver_addr;
2605 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
2606 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
2607 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
2608 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
2609 			acb->rqbuf_firstidx++;
2610 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
2611 			ptmpQbuffer++;
2612 			allxfer_len++;
2613 		}
2614 
2615 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2616 			struct QBUFFER *prbuffer;
2617 			uint8_t  *iop_data;
2618 			int32_t iop_len;
2619 
2620 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2621 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
2622 			iop_data = (uint8_t *)prbuffer->data;
2623 			iop_len = (int32_t)prbuffer->data_len;
2624 
2625 			while (iop_len > 0) {
2626 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
2627 				(void) memcpy(pQbuffer, iop_data, 1);
2628 				acb->rqbuf_lastidx++;
2629 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
2630 				iop_data++;
2631 				iop_len--;
2632 			}
2633 			arcmsr_iop_message_read(acb);
2634 		}
2635 
2636 		(void) memcpy(pcmdmessagefld->messagedatabuffer,
2637 		    (uint8_t *)ver_addr, allxfer_len);
2638 		pcmdmessagefld->cmdmessage.Length = allxfer_len;
2639 		pcmdmessagefld->cmdmessage.ReturnCode =
2640 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2641 		kmem_free(ver_addr, MSGDATABUFLEN);
2642 	}
2643 	break;
2644 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
2645 	{
2646 		unsigned long *ver_addr;
2647 		int32_t my_empty_len, user_len, wqbuf_firstidx, wqbuf_lastidx;
2648 		uint8_t *ptmpuserbuffer;
2649 
2650 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2651 		if (!ver_addr) {
2652 			retvalue = ARCMSR_MESSAGE_FAIL;
2653 			goto message_out;
2654 		}
2655 		ptmpuserbuffer = (uint8_t *)ver_addr;
2656 		user_len = pcmdmessagefld->cmdmessage.Length;
2657 		(void) memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer,
2658 		    user_len);
2659 		wqbuf_lastidx = acb->wqbuf_lastidx;
2660 		wqbuf_firstidx = acb->wqbuf_firstidx;
2661 		if (wqbuf_lastidx != wqbuf_firstidx) {
2662 			struct scsi_arq_status *arq_status;
2663 
2664 			arcmsr_post_ioctldata2iop(acb);
2665 			arq_status =
2666 			    (struct scsi_arq_status *)(intptr_t)
2667 			    (pkt->pkt_scbp);
2668 			bzero((caddr_t)arq_status,
2669 			    sizeof (struct scsi_arq_status));
2670 			arq_status->sts_rqpkt_reason = CMD_CMPLT;
2671 			arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
2672 			    STATE_GOT_TARGET |STATE_SENT_CMD |
2673 			    STATE_XFERRED_DATA | STATE_GOT_STATUS);
2674 
2675 			arq_status->sts_rqpkt_statistics = pkt->pkt_statistics;
2676 			arq_status->sts_rqpkt_resid = 0;
2677 			if (&arq_status->sts_sensedata != NULL) {
2678 				struct scsi_extended_sense *sts_sensedata;
2679 
2680 				sts_sensedata = &arq_status->sts_sensedata;
2681 
2682 				/* has error report sensedata */
2683 				sts_sensedata->es_code = 0x0;
2684 				sts_sensedata->es_valid = 0x01;
2685 				sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
2686 				/* AdditionalSenseLength */
2687 				sts_sensedata->es_add_len = 0x0A;
2688 				/* AdditionalSenseCode */
2689 				sts_sensedata->es_add_code = 0x20;
2690 			}
2691 			retvalue = ARCMSR_MESSAGE_FAIL;
2692 		} else {
2693 			my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
2694 			    (ARCMSR_MAX_QBUFFER - 1);
2695 			if (my_empty_len >= user_len) {
2696 				while (user_len > 0) {
2697 					pQbuffer =
2698 					    &acb->wqbuffer[acb->wqbuf_lastidx];
2699 					(void) memcpy(pQbuffer,
2700 					    ptmpuserbuffer, 1);
2701 					acb->wqbuf_lastidx++;
2702 					acb->wqbuf_lastidx %=
2703 					    ARCMSR_MAX_QBUFFER;
2704 					ptmpuserbuffer++;
2705 					user_len--;
2706 				}
2707 				if (acb->acb_flags &
2708 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2709 					acb->acb_flags &=
2710 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2711 					arcmsr_post_ioctldata2iop(acb);
2712 				}
2713 			} else {
2714 				struct scsi_arq_status *arq_status;
2715 
2716 				/* has error report sensedata */
2717 				arq_status =
2718 				    (struct scsi_arq_status *)
2719 				    (intptr_t)(pkt->pkt_scbp);
2720 				bzero((caddr_t)arq_status,
2721 				    sizeof (struct scsi_arq_status));
2722 				arq_status->sts_rqpkt_reason = CMD_CMPLT;
2723 				arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
2724 				    STATE_GOT_TARGET |STATE_SENT_CMD |
2725 				    STATE_XFERRED_DATA | STATE_GOT_STATUS);
2726 				arq_status->sts_rqpkt_statistics =
2727 				    pkt->pkt_statistics;
2728 				arq_status->sts_rqpkt_resid = 0;
2729 				if (&arq_status->sts_sensedata != NULL) {
2730 					struct scsi_extended_sense
2731 					    *sts_sensedata;
2732 
2733 					sts_sensedata =
2734 					    &arq_status->sts_sensedata;
2735 
2736 					/* has error report sensedata */
2737 					sts_sensedata->es_code  = 0x0;
2738 					sts_sensedata->es_valid = 0x01;
2739 					sts_sensedata->es_key =
2740 					    KEY_ILLEGAL_REQUEST;
2741 					/* AdditionalSenseLength */
2742 					sts_sensedata->es_add_len = 0x0A;
2743 					/* AdditionalSenseCode */
2744 					sts_sensedata->es_add_code = 0x20;
2745 				}
2746 				retvalue = ARCMSR_MESSAGE_FAIL;
2747 			}
2748 		}
2749 		kmem_free(ver_addr, MSGDATABUFLEN);
2750 	}
2751 	break;
2752 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
2753 	{
2754 		pQbuffer = acb->rqbuffer;
2755 
2756 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2757 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2758 			arcmsr_iop_message_read(acb);
2759 		}
2760 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2761 		acb->rqbuf_firstidx = 0;
2762 		acb->rqbuf_lastidx = 0;
2763 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2764 		pcmdmessagefld->cmdmessage.ReturnCode =
2765 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2766 	}
2767 	break;
2768 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
2769 	{
2770 		pQbuffer = acb->wqbuffer;
2771 
2772 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2773 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2774 			arcmsr_iop_message_read(acb);
2775 		}
2776 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2777 		    ACB_F_MESSAGE_WQBUFFER_READ);
2778 		acb->wqbuf_firstidx = 0;
2779 		acb->wqbuf_lastidx = 0;
2780 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2781 		pcmdmessagefld->cmdmessage.ReturnCode =
2782 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2783 	}
2784 	break;
2785 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
2786 	{
2787 
2788 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2789 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2790 			arcmsr_iop_message_read(acb);
2791 		}
2792 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2793 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
2794 		    ACB_F_MESSAGE_WQBUFFER_READ);
2795 		acb->rqbuf_firstidx = 0;
2796 		acb->rqbuf_lastidx = 0;
2797 		acb->wqbuf_firstidx = 0;
2798 		acb->wqbuf_lastidx = 0;
2799 		pQbuffer = acb->rqbuffer;
2800 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
2801 		pQbuffer = acb->wqbuffer;
2802 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
2803 		pcmdmessagefld->cmdmessage.ReturnCode =
2804 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2805 	}
2806 	break;
2807 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
2808 		pcmdmessagefld->cmdmessage.ReturnCode =
2809 		    ARCMSR_MESSAGE_RETURNCODE_3F;
2810 		break;
2811 	/*
2812 	 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
2813 	 */
2814 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2815 		arcmsr_iop_parking(acb);
2816 		break;
2817 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2818 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
2819 			arcmsr_flush_hba_cache(acb);
2820 		} else {
2821 			arcmsr_flush_hbb_cache(acb);
2822 		}
2823 		break;
2824 	default:
2825 		retvalue = ARCMSR_MESSAGE_FAIL;
2826 	}
2827 
2828 message_out:
2829 
2830 	return (retvalue);
2831 }
2832 
2833 
2834 
2835 static int
2836 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
2837     cred_t *credp, int *rvalp) {
2838 #ifndef __lock_lint
2839 	_NOTE(ARGUNUSED(rvalp))
2840 #endif
2841 
2842 	struct ACB *acb;
2843 	struct CMD_MESSAGE_FIELD *pktioctlfld;
2844 	int retvalue = 0;
2845 	int instance = MINOR2INST(getminor(dev));
2846 
2847 	if (instance < 0)
2848 		return (ENXIO);
2849 
2850 	if (secpolicy_sys_config(credp, B_FALSE) != 0)
2851 		return (EPERM);
2852 
2853 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2854 	if (acb == NULL)
2855 		return (ENXIO);
2856 
2857 	pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD),
2858 	    KM_SLEEP);
2859 	if (pktioctlfld == NULL)
2860 		return (ENXIO);
2861 
2862 	/*
2863 	 * if we got here, we either are a 64-bit app in a 64-bit kernel
2864 	 * or a 32-bit app in a 32-bit kernel. Either way, we can just
2865 	 * copy in the args without any special conversions.
2866 	 */
2867 
2868 	mutex_enter(&acb->ioctl_mutex);
2869 	if (ddi_copyin((void *)arg, pktioctlfld,
2870 	    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
2871 		retvalue = ENXIO;
2872 		goto ioctl_out;
2873 	}
2874 
2875 	if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
2876 		/* validity check */
2877 		retvalue = ENXIO;
2878 		goto ioctl_out;
2879 	}
2880 
2881 	switch ((unsigned int)ioctl_cmd) {
2882 	case ARCMSR_MESSAGE_READ_RQBUFFER:
2883 	{
2884 		unsigned long *ver_addr;
2885 		uint8_t *pQbuffer, *ptmpQbuffer;
2886 		int32_t allxfer_len = 0;
2887 
2888 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2889 		if (ver_addr == NULL) {
2890 			retvalue = ENXIO;
2891 			goto ioctl_out;
2892 		}
2893 
2894 		ptmpQbuffer = (uint8_t *)ver_addr;
2895 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
2896 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
2897 			/* copy READ QBUFFER to srb */
2898 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
2899 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
2900 			acb->rqbuf_firstidx++;
2901 			/* if last index number set it to 0 */
2902 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
2903 			ptmpQbuffer++;
2904 			allxfer_len++;
2905 		}
2906 
2907 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2908 			struct QBUFFER *prbuffer;
2909 			uint8_t *pQbuffer;
2910 			uint8_t *iop_data;
2911 			int32_t iop_len;
2912 
2913 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2914 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
2915 			iop_data = (uint8_t *)prbuffer->data;
2916 			iop_len = (int32_t)prbuffer->data_len;
2917 			/*
2918 			 * this iop data does no chance to make me overflow
2919 			 * again here, so just do it
2920 			 */
2921 			while (iop_len > 0) {
2922 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
2923 				(void) memcpy(pQbuffer, iop_data, 1);
2924 				acb->rqbuf_lastidx++;
2925 				/* if last index number set it to 0 */
2926 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
2927 				iop_data++;
2928 				iop_len--;
2929 			}
2930 			/* let IOP know data has been read */
2931 			arcmsr_iop_message_read(acb);
2932 		}
2933 		(void) memcpy(pktioctlfld->messagedatabuffer,
2934 		    (uint8_t *)ver_addr, allxfer_len);
2935 		pktioctlfld->cmdmessage.Length = allxfer_len;
2936 		pktioctlfld->cmdmessage.ReturnCode =
2937 		    ARCMSR_MESSAGE_RETURNCODE_OK;
2938 
2939 		if (ddi_copyout(pktioctlfld, (void *)arg,
2940 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
2941 			retvalue = ENXIO;
2942 
2943 		kmem_free(ver_addr, MSGDATABUFLEN);
2944 	}
2945 	break;
2946 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
2947 	{
2948 		unsigned long *ver_addr;
2949 		int32_t my_empty_len, user_len;
2950 		int32_t wqbuf_firstidx, wqbuf_lastidx;
2951 		uint8_t *pQbuffer, *ptmpuserbuffer;
2952 
2953 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
2954 
2955 		if (ver_addr == NULL) {
2956 			retvalue = ENXIO;
2957 			goto ioctl_out;
2958 		}
2959 
2960 		ptmpuserbuffer = (uint8_t *)ver_addr;
2961 		user_len = pktioctlfld->cmdmessage.Length;
2962 		(void) memcpy(ptmpuserbuffer,
2963 		    pktioctlfld->messagedatabuffer, user_len);
2964 		/*
2965 		 * check ifdata xfer length of this request will overflow
2966 		 * my array qbuffer
2967 		 */
2968 		wqbuf_lastidx = acb->wqbuf_lastidx;
2969 		wqbuf_firstidx = acb->wqbuf_firstidx;
2970 		if (wqbuf_lastidx != wqbuf_firstidx) {
2971 			arcmsr_post_ioctldata2iop(acb);
2972 			pktioctlfld->cmdmessage.ReturnCode =
2973 			    ARCMSR_MESSAGE_RETURNCODE_ERROR;
2974 		} else {
2975 			my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
2976 			    & (ARCMSR_MAX_QBUFFER - 1);
2977 			if (my_empty_len >= user_len) {
2978 				while (user_len > 0) {
2979 					/* copy srb data to wqbuffer */
2980 					pQbuffer =
2981 					    &acb->wqbuffer[acb->wqbuf_lastidx];
2982 					(void) memcpy(pQbuffer,
2983 					    ptmpuserbuffer, 1);
2984 					acb->wqbuf_lastidx++;
2985 					/* iflast index number set it to 0 */
2986 					acb->wqbuf_lastidx %=
2987 					    ARCMSR_MAX_QBUFFER;
2988 					ptmpuserbuffer++;
2989 					user_len--;
2990 				}
2991 				/* post first Qbuffer */
2992 				if (acb->acb_flags &
2993 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2994 					acb->acb_flags &=
2995 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2996 					arcmsr_post_ioctldata2iop(acb);
2997 				}
2998 				pktioctlfld->cmdmessage.ReturnCode =
2999 				    ARCMSR_MESSAGE_RETURNCODE_OK;
3000 			} else {
3001 				pktioctlfld->cmdmessage.ReturnCode =
3002 				    ARCMSR_MESSAGE_RETURNCODE_ERROR;
3003 			}
3004 		}
3005 		if (ddi_copyout(pktioctlfld, (void *)arg,
3006 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3007 			retvalue = ENXIO;
3008 
3009 		kmem_free(ver_addr, MSGDATABUFLEN);
3010 	}
3011 	break;
3012 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
3013 	{
3014 		uint8_t *pQbuffer = acb->rqbuffer;
3015 
3016 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3017 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3018 				arcmsr_iop_message_read(acb);
3019 		}
3020 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3021 		acb->rqbuf_firstidx = 0;
3022 		acb->rqbuf_lastidx = 0;
3023 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
3024 		/* report success */
3025 		pktioctlfld->cmdmessage.ReturnCode =
3026 		    ARCMSR_MESSAGE_RETURNCODE_OK;
3027 		if (ddi_copyout(pktioctlfld, (void *)arg,
3028 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3029 			retvalue = ENXIO;
3030 
3031 	}
3032 	break;
3033 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
3034 	{
3035 		uint8_t *pQbuffer = acb->wqbuffer;
3036 
3037 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3038 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3039 			arcmsr_iop_message_read(acb);
3040 		}
3041 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3042 		    ACB_F_MESSAGE_WQBUFFER_READ);
3043 		acb->wqbuf_firstidx = 0;
3044 		acb->wqbuf_lastidx = 0;
3045 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
3046 		/* report success */
3047 		pktioctlfld->cmdmessage.ReturnCode =
3048 		    ARCMSR_MESSAGE_RETURNCODE_OK;
3049 		if (ddi_copyout(pktioctlfld, (void *)arg,
3050 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3051 			retvalue = ENXIO;
3052 
3053 	}
3054 	break;
3055 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
3056 	{
3057 		uint8_t *pQbuffer;
3058 
3059 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3060 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3061 			arcmsr_iop_message_read(acb);
3062 		}
3063 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3064 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
3065 		    ACB_F_MESSAGE_WQBUFFER_READ);
3066 		acb->rqbuf_firstidx = 0;
3067 		acb->rqbuf_lastidx = 0;
3068 		acb->wqbuf_firstidx = 0;
3069 		acb->wqbuf_lastidx = 0;
3070 		pQbuffer = acb->rqbuffer;
3071 		bzero(pQbuffer, sizeof (struct QBUFFER));
3072 		pQbuffer = acb->wqbuffer;
3073 		bzero(pQbuffer, sizeof (struct QBUFFER));
3074 		/* report success */
3075 		pktioctlfld->cmdmessage.ReturnCode =
3076 		    ARCMSR_MESSAGE_RETURNCODE_OK;
3077 		if (ddi_copyout(pktioctlfld, (void *)arg,
3078 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3079 			retvalue = ENXIO;
3080 
3081 	}
3082 	break;
3083 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
3084 	{
3085 		pktioctlfld->cmdmessage.ReturnCode =
3086 		    ARCMSR_MESSAGE_RETURNCODE_3F;
3087 		if (ddi_copyout(pktioctlfld, (void *)arg,
3088 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
3089 			retvalue = ENXIO;
3090 	}
3091 	break;
3092 	/* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
3093 	case ARCMSR_MESSAGE_SAY_GOODBYE:
3094 		arcmsr_iop_parking(acb);
3095 		break;
3096 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
3097 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3098 			arcmsr_flush_hba_cache(acb);
3099 		} else {
3100 			arcmsr_flush_hbb_cache(acb);
3101 		}
3102 		break;
3103 	default:
3104 		retvalue = ENOTTY;
3105 	}
3106 
3107 ioctl_out:
3108 	kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
3109 	mutex_exit(&acb->ioctl_mutex);
3110 
3111 	return (retvalue);
3112 }
3113 
3114 
3115 
3116 static struct CCB *
3117 arcmsr_get_freeccb(struct ACB *acb) {
3118 
3119 	struct CCB *ccb;
3120 	int workingccb_startindex, workingccb_doneindex;
3121 
3122 
3123 	mutex_enter(&acb->workingQ_mutex);
3124 	workingccb_doneindex = acb->workingccb_doneindex;
3125 	workingccb_startindex = acb->workingccb_startindex;
3126 	ccb = acb->ccbworkingQ[workingccb_startindex];
3127 	workingccb_startindex++;
3128 	workingccb_startindex %= ARCMSR_MAX_FREECCB_NUM;
3129 	if (workingccb_doneindex != workingccb_startindex) {
3130 		acb->workingccb_startindex = workingccb_startindex;
3131 	} else {
3132 		ccb = NULL;
3133 	}
3134 
3135 	mutex_exit(&acb->workingQ_mutex);
3136 	return (ccb);
3137 }
3138 
3139 
3140 
3141 static int
3142 arcmsr_seek_cmd2abort(struct ACB *acb,
3143     struct scsi_pkt *abortpkt) {
3144 
3145 	struct CCB *ccb;
3146 	uint32_t intmask_org = 0;
3147 	int i = 0;
3148 
3149 	acb->num_aborts++;
3150 
3151 	if (abortpkt == NULL) {
3152 		/*
3153 		 * if abortpkt is NULL, the upper layer needs us
3154 		 * to abort all commands
3155 		 */
3156 		if (acb->ccboutstandingcount != 0) {
3157 			/* disable all outbound interrupt */
3158 			intmask_org = arcmsr_disable_allintr(acb);
3159 			/* clear and abort all outbound posted Q */
3160 			arcmsr_done4abort_postqueue(acb);
3161 			/* talk to iop 331 outstanding command aborted */
3162 			if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3163 				arcmsr_abort_hba_allcmd(acb);
3164 			} else {
3165 				arcmsr_abort_hbb_allcmd(acb);
3166 			}
3167 
3168 			for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3169 				ccb = acb->pccb_pool[i];
3170 				if (ccb->startdone == ARCMSR_CCB_START) {
3171 					/*
3172 					 * this ccb will complete at
3173 					 * hwinterrupt
3174 					 */
3175 					ccb->startdone = ARCMSR_CCB_ABORTED;
3176 					ccb->pkt->pkt_reason = CMD_ABORTED;
3177 					ccb->pkt->pkt_statistics |=
3178 					    STAT_ABORTED;
3179 					arcmsr_ccb_complete(ccb, 1);
3180 				}
3181 			}
3182 			/*
3183 			 * enable outbound Post Queue, outbound
3184 			 * doorbell Interrupt
3185 			 */
3186 			arcmsr_enable_allintr(acb, intmask_org);
3187 		}
3188 		return (DDI_SUCCESS);
3189 	}
3190 
3191 	/*
3192 	 * It is the upper layer do abort command this lock
3193 	 * just prior to calling us.
3194 	 * First determine if we currently own this command.
3195 	 * Start by searching the device queue. If not found
3196 	 * at all, and the system wanted us to just abort the
3197 	 * command returnsuccess.
3198 	 */
3199 
3200 	if (acb->ccboutstandingcount != 0) {
3201 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3202 			ccb = acb->pccb_pool[i];
3203 			if (ccb->startdone == ARCMSR_CCB_START) {
3204 				if (ccb->pkt == abortpkt) {
3205 					ccb->startdone =
3206 					    ARCMSR_CCB_ABORTED;
3207 					goto abort_outstanding_cmd;
3208 				}
3209 			}
3210 		}
3211 	}
3212 
3213 	return (DDI_FAILURE);
3214 
3215 abort_outstanding_cmd:
3216 	/* disable all outbound interrupts */
3217 	intmask_org = arcmsr_disable_allintr(acb);
3218 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3219 		arcmsr_polling_hba_ccbdone(acb, ccb);
3220 	} else {
3221 		arcmsr_polling_hbb_ccbdone(acb, ccb);
3222 	}
3223 
3224 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3225 	arcmsr_enable_allintr(acb, intmask_org);
3226 	return (DDI_SUCCESS);
3227 }
3228 
3229 
3230 
3231 static void
3232 arcmsr_pcidev_disattach(struct ACB *acb) {
3233 
3234 	struct CCB *ccb;
3235 	int i = 0;
3236 
3237 	/* disable all outbound interrupts */
3238 	(void) arcmsr_disable_allintr(acb);
3239 	/* stop adapter background rebuild */
3240 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3241 		arcmsr_stop_hba_bgrb(acb);
3242 		arcmsr_flush_hba_cache(acb);
3243 	} else {
3244 		arcmsr_stop_hbb_bgrb(acb);
3245 		arcmsr_flush_hbb_cache(acb);
3246 	}
3247 	/* abort all outstanding commands */
3248 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3249 	acb->acb_flags &= ~ACB_F_IOP_INITED;
3250 
3251 	if (acb->ccboutstandingcount != 0) {
3252 		/* clear and abort all outbound posted Q */
3253 		arcmsr_done4abort_postqueue(acb);
3254 		/* talk to iop 331 outstanding command aborted */
3255 		if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3256 			arcmsr_abort_hba_allcmd(acb);
3257 		} else {
3258 			arcmsr_abort_hbb_allcmd(acb);
3259 		}
3260 
3261 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3262 			ccb = acb->pccb_pool[i];
3263 			if (ccb->startdone == ARCMSR_CCB_START) {
3264 				ccb->startdone = ARCMSR_CCB_ABORTED;
3265 				ccb->pkt->pkt_reason = CMD_ABORTED;
3266 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3267 				arcmsr_ccb_complete(ccb, 1);
3268 			}
3269 		}
3270 	}
3271 }
3272 
3273 /* get firmware miscellaneous data */
3274 static void
3275 arcmsr_get_hba_config(struct ACB *acb) {
3276 
3277 	struct HBA_msgUnit *phbamu;
3278 
3279 	char *acb_firm_model;
3280 	char *acb_firm_version;
3281 	char *iop_firm_model;
3282 	char *iop_firm_version;
3283 	int count;
3284 
3285 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3286 	acb_firm_model = acb->firm_model;
3287 	acb_firm_version = acb->firm_version;
3288 	/* firm_model, 15 */
3289 	iop_firm_model = (char *)
3290 	    (&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
3291 	/* firm_version, 17 */
3292 	iop_firm_version =
3293 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
3294 
3295 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3296 	    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3297 
3298 	if (!arcmsr_hba_wait_msgint_ready(acb))
3299 		cmn_err(CE_CONT,
3300 		    "arcmsr%d: timeout while waiting for adapter firmware "
3301 		    "miscellaneous data",
3302 		    ddi_get_instance(acb->dev_info));
3303 
3304 	count = 8;
3305 	while (count) {
3306 		*acb_firm_model =
3307 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
3308 		acb_firm_model++;
3309 		iop_firm_model++;
3310 		count--;
3311 	}
3312 
3313 	count = 16;
3314 	while (count) {
3315 		*acb_firm_version =
3316 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
3317 		acb_firm_version++;
3318 		iop_firm_version++;
3319 		count--;
3320 	}
3321 
3322 	cmn_err(CE_CONT, "arcmsr%d: ARECA RAID FIRMWARE VERSION %s",
3323 	    ddi_get_instance(acb->dev_info), acb->firm_version);
3324 
3325 	/* firm_request_len, 1 */
3326 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3327 	    &phbamu->msgcode_rwbuffer[1]);
3328 	/* firm_numbers_queue, 2 */
3329 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3330 	    &phbamu->msgcode_rwbuffer[2]);
3331 	/* firm_sdram_size, 3 */
3332 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3333 	    &phbamu->msgcode_rwbuffer[3]);
3334 	/* firm_ide_channels, 4 */
3335 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3336 	    &phbamu->msgcode_rwbuffer[4]);
3337 }
3338 
3339 /* get firmware miscellaneous data */
3340 static void
3341 arcmsr_get_hbb_config(struct ACB *acb) {
3342 
3343 	struct HBB_msgUnit *phbbmu;
3344 	char *acb_firm_model;
3345 	char *acb_firm_version;
3346 	char *iop_firm_model;
3347 	char *iop_firm_version;
3348 	int count;
3349 
3350 
3351 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3352 	acb_firm_model = acb->firm_model;
3353 	acb_firm_version = acb->firm_version;
3354 	/* firm_model, 15 */
3355 	iop_firm_model = (char *)
3356 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
3357 	/* firm_version, 17 */
3358 	iop_firm_version = (char *)
3359 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
3360 
3361 
3362 
3363 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3364 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3365 	    ARCMSR_MESSAGE_GET_CONFIG);
3366 
3367 	if (!arcmsr_hbb_wait_msgint_ready(acb))
3368 		cmn_err(CE_CONT,
3369 		    "arcmsr%d: timeout while waiting for adapter firmware "
3370 		    "miscellaneous data",
3371 		    ddi_get_instance(acb->dev_info));
3372 
3373 	count = 8;
3374 	while (count) {
3375 		*acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle1,
3376 		    iop_firm_model);
3377 		acb_firm_model++;
3378 		iop_firm_model++;
3379 		count--;
3380 	}
3381 
3382 	count = 16;
3383 	while (count) {
3384 		*acb_firm_version = CHIP_REG_READ8(acb->reg_mu_acc_handle1,
3385 		    iop_firm_version);
3386 		acb_firm_version++;
3387 		iop_firm_version++;
3388 		count--;
3389 	}
3390 
3391 	cmn_err(CE_CONT, "arcmsr%d: ARECA RAID FIRMWARE VERSION %s",
3392 	    ddi_get_instance(acb->dev_info), acb->firm_version);
3393 
3394 	/* firm_request_len, 1 */
3395 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3396 		&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
3397 	/* firm_numbers_queue, 2 */
3398 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3399 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
3400 	/* firm_sdram_size, 3 */
3401 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3402 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
3403 	/* firm_ide_channels, 4 */
3404 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
3405 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
3406 }
3407 
3408 
3409 
3410 /* start background rebuild */
3411 static void
3412 arcmsr_start_hba_bgrb(struct ACB *acb) {
3413 
3414 	struct HBA_msgUnit *phbamu;
3415 
3416 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3417 
3418 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3419 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3420 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
3421 
3422 	if (!arcmsr_hba_wait_msgint_ready(acb))
3423 		cmn_err(CE_WARN,
3424 		    "arcmsr%d: timeout while waiting for background "
3425 		    "rebuild to start",
3426 		    ddi_get_instance(acb->dev_info));
3427 }
3428 
3429 
3430 static void
3431 arcmsr_start_hbb_bgrb(struct ACB *acb) {
3432 
3433 	struct HBB_msgUnit *phbbmu;
3434 
3435 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3436 
3437 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
3438 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3439 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3440 	    ARCMSR_MESSAGE_START_BGRB);
3441 
3442 	if (!arcmsr_hbb_wait_msgint_ready(acb))
3443 		cmn_err(CE_WARN,
3444 		    "arcmsr%d: timeout while waiting for background "
3445 		    "rebuild to start",
3446 		    ddi_get_instance(acb->dev_info));
3447 }
3448 
3449 
3450 static void
3451 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb) {
3452 
3453 	struct HBA_msgUnit *phbamu;
3454 	struct CCB *ccb;
3455 	uint32_t flag_ccb, outbound_intstatus;
3456 	uint32_t poll_ccb_done = 0;
3457 	uint32_t poll_count = 0;
3458 
3459 
3460 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3461 
3462 polling_ccb_retry:
3463 	poll_count++;
3464 	outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3465 	    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3466 
3467 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
3468 	    outbound_intstatus); /* clear interrupt */
3469 
3470 	/* Use correct offset and size for syncing */
3471 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
3472 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
3473 		return;
3474 
3475 	/*LINTED*/
3476 	while (1) {
3477 		if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3478 		    &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
3479 			if (poll_ccb_done) {
3480 				/* chip FIFO no ccb for completion already */
3481 				break;
3482 			} else {
3483 				drv_usecwait(25000);
3484 				if (poll_count > 100) {
3485 					break;
3486 				}
3487 				goto polling_ccb_retry;
3488 			}
3489 		}
3490 
3491 		/* check ifcommand done with no error */
3492 		ccb = (struct CCB *)(acb->vir2phy_offset  +
3493 		    (flag_ccb << 5)); /* frame must be 32 bytes aligned */
3494 		poll_ccb_done = (ccb == poll_ccb) ? 1 : 0;
3495 
3496 		if ((ccb->acb != acb) ||
3497 		    (ccb->startdone != ARCMSR_CCB_START)) {
3498 			if (ccb->startdone == ARCMSR_CCB_ABORTED) {
3499 				ccb->pkt->pkt_reason = CMD_ABORTED;
3500 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3501 				arcmsr_ccb_complete(ccb, 1);
3502 				continue;
3503 			}
3504 			cmn_err(CE_WARN, "arcmsr%d: polling op got "
3505 			    "unexpected ccb command done",
3506 			    ddi_get_instance(acb->dev_info));
3507 			continue;
3508 		}
3509 		arcmsr_report_ccb_state(acb, ccb, flag_ccb);
3510 	}	/* drain reply FIFO */
3511 }
3512 
3513 
3514 static void
3515 arcmsr_polling_hbb_ccbdone(struct ACB *acb,
3516     struct CCB *poll_ccb) {
3517 
3518 	struct HBB_msgUnit *phbbmu;
3519 	struct CCB *ccb;
3520 	uint32_t flag_ccb;
3521 	uint32_t poll_ccb_done = 0;
3522 	uint32_t poll_count = 0;
3523 	int index;
3524 
3525 
3526 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3527 
3528 
3529 polling_ccb_retry:
3530 	poll_count++;
3531 	/* clear doorbell interrupt */
3532 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3533 	    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3534 	    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3535 
3536 	/* Use correct offset and size for syncing */
3537 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
3538 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
3539 		return;
3540 
3541 
3542 	/*LINTED*/
3543 	while (1) {
3544 		index = phbbmu->doneq_index;
3545 		if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
3546 			if (poll_ccb_done) {
3547 				/* chip FIFO no ccb for completion already */
3548 				break;
3549 			} else {
3550 				drv_usecwait(25000);
3551 				if (poll_count > 100)
3552 					break;
3553 
3554 				goto polling_ccb_retry;
3555 			}
3556 		}
3557 
3558 		phbbmu->done_qbuffer[index] = 0;
3559 		index++;
3560 		/* if last index number set it to 0 */
3561 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
3562 		phbbmu->doneq_index = index;
3563 		/* check if command done with no error */
3564 		/* frame must be 32 bytes aligned */
3565 		ccb = (struct CCB *)(acb->vir2phy_offset +
3566 		    (flag_ccb << 5));
3567 		poll_ccb_done = (ccb == poll_ccb) ? 1 : 0;
3568 		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3569 			if (ccb->startdone == ARCMSR_CCB_ABORTED) {
3570 				ccb->pkt->pkt_reason = CMD_ABORTED;
3571 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3572 				arcmsr_ccb_complete(ccb, 1);
3573 				continue;
3574 			}
3575 			cmn_err(CE_WARN, "arcmsr%d: polling op got"
3576 			    "unexpect ccb command done",
3577 			    ddi_get_instance(acb->dev_info));
3578 			continue;
3579 		}
3580 		arcmsr_report_ccb_state(acb, ccb, flag_ccb);
3581 	}	/* drain reply FIFO */
3582 }
3583 
3584 
3585 /*
3586  *    Function: arcmsr_tran_start(9E)
3587  * Description: Transport the command in pktp to the target device.
3588  *		The command is not finished when this returns, only
3589  *		sent to the target; arcmsr_interrupt will call
3590  *		(*pktp->pkt_comp)(pktp) when the target device has done.
3591  *
3592  *       Input: struct scsi_address *ap, struct scsi_pkt *pktp
3593  *      Output:	TRAN_ACCEPT if pkt is OK and not driver not busy
3594  *		TRAN_BUSY if driver is
3595  *		TRAN_BADPKT if pkt is invalid
3596  */
3597 static int
3598 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt) {
3599 
3600 	struct ACB *acb;
3601 	struct CCB *ccb;
3602 	int target = ap->a_target;
3603 	int lun = ap->a_lun;
3604 
3605 
3606 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3607 	ccb = pkt->pkt_ha_private;
3608 
3609 	if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
3610 	    (ccb->ccb_flags & DDI_DMA_CONSISTENT))
3611 		(void) ddi_dma_sync(ccb->pkt_dma_handle, ccb->pkt_dma_offset,
3612 		    ccb->pkt_dma_len, DDI_DMA_SYNC_FORDEV);
3613 
3614 
3615 	if (ccb->startdone == ARCMSR_CCB_UNBUILD)
3616 		arcmsr_build_ccb(ccb);
3617 
3618 
3619 	if (acb->acb_flags & ACB_F_BUS_RESET) {
3620 		cmn_err(CE_CONT,
3621 		    "arcmsr%d: bus reset returned busy",
3622 		    ddi_get_instance(acb->dev_info));
3623 		pkt->pkt_reason = CMD_RESET;
3624 		pkt->pkt_statistics |= STAT_BUS_RESET;
3625 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
3626 		    STATE_SENT_CMD | STATE_GOT_STATUS);
3627 		if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
3628 		    (pkt->pkt_state & STATE_XFERRED_DATA))
3629 			(void) ddi_dma_sync(ccb->pkt_dma_handle,
3630 			    ccb->pkt_dma_offset, ccb->pkt_dma_len,
3631 			    DDI_DMA_SYNC_FORCPU);
3632 
3633 		if (pkt->pkt_comp)
3634 			(*pkt->pkt_comp)(pkt);
3635 
3636 
3637 		return (TRAN_ACCEPT);
3638 	}
3639 
3640 	if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
3641 		uint8_t block_cmd;
3642 
3643 		block_cmd = pkt->pkt_cdbp[0] & 0x0f;
3644 
3645 		if (block_cmd == 0x08 || block_cmd == 0x0a) {
3646 			cmn_err(CE_CONT,
3647 			    "arcmsr%d: block read/write command while raid"
3648 			    "volume missing (cmd %02x for target %d lun %d)",
3649 			    ddi_get_instance(acb->dev_info),
3650 			    block_cmd, target, lun);
3651 			pkt->pkt_reason = CMD_TIMEOUT;
3652 			pkt->pkt_statistics |= CMD_TIMEOUT;
3653 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
3654 			    STATE_SENT_CMD | STATE_GOT_STATUS);
3655 
3656 			if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
3657 			    (pkt->pkt_state & STATE_XFERRED_DATA))
3658 				(void) ddi_dma_sync(ccb->pkt_dma_handle,
3659 				    ccb->pkt_dma_offset, ccb->pkt_dma_len,
3660 				    DDI_DMA_SYNC_FORCPU);
3661 
3662 
3663 			if (pkt->pkt_comp)
3664 				(*pkt->pkt_comp)(pkt);
3665 
3666 
3667 			return (TRAN_ACCEPT);
3668 		}
3669 	}
3670 
3671 
3672 	/* IMPORTANT: Target 16 is a virtual device for iop message transfer */
3673 	if (target == 16) {
3674 
3675 		struct buf *bp = ccb->bp;
3676 		uint8_t scsicmd = pkt->pkt_cdbp[0];
3677 
3678 		switch (scsicmd) {
3679 		case SCMD_INQUIRY: {
3680 			if (lun != 0) {
3681 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
3682 				ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
3683 				arcmsr_ccb_complete(ccb, 0);
3684 				return (TRAN_ACCEPT);
3685 			}
3686 
3687 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
3688 				uint8_t inqdata[36];
3689 
3690 				/* The EVDP and pagecode is not supported */
3691 				if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
3692 					inqdata[1] = 0xFF;
3693 					inqdata[2] = 0x00;
3694 				} else {
3695 					/* Periph Qualifier & Periph Dev Type */
3696 					inqdata[0] = DTYPE_PROCESSOR;
3697 					/* rem media bit & Dev Type Modifier */
3698 					inqdata[1] = 0;
3699 					/* ISO, ECMA, & ANSI versions */
3700 					inqdata[2] = 0;
3701 					/* length of additional data */
3702 					inqdata[4] = 31;
3703 					/* Vendor Identification */
3704 					bcopy("Areca   ",
3705 					    &inqdata[8], VIDLEN);
3706 					/* Product Identification */
3707 					bcopy("RAID controller ",
3708 					    &inqdata[16], PIDLEN);
3709 					/* Product Revision */
3710 					bcopy(&inqdata[32],
3711 					    "R001", REVLEN);
3712 					if (bp->b_flags & (B_PHYS | B_PAGEIO))
3713 						bp_mapin(bp);
3714 
3715 					(void) memcpy(bp->b_un.b_addr,
3716 					    inqdata, sizeof (inqdata));
3717 				}
3718 				ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3719 			}
3720 			arcmsr_ccb_complete(ccb, 0);
3721 			return (TRAN_ACCEPT);
3722 		}
3723 		case SCMD_WRITE_BUFFER:
3724 		case SCMD_READ_BUFFER: {
3725 			if (arcmsr_iop_message_xfer(acb, pkt)) {
3726 				/* error just for retry */
3727 				ccb->pkt->pkt_reason = CMD_TRAN_ERR;
3728 				ccb->pkt->pkt_statistics |= STAT_TERMINATED;
3729 			}
3730 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3731 			arcmsr_ccb_complete(ccb, 0);
3732 			return (TRAN_ACCEPT);
3733 		}
3734 		default:
3735 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
3736 			arcmsr_ccb_complete(ccb, 0);
3737 			return (TRAN_ACCEPT);
3738 		}
3739 	}
3740 
3741 	if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
3742 		cmn_err(CE_CONT,
3743 		    "arcmsr%d: too many outstanding commands (%d > %d)",
3744 		    ddi_get_instance(acb->dev_info),
3745 		    acb->ccboutstandingcount,
3746 		    ARCMSR_MAX_OUTSTANDING_CMD);
3747 		return (TRAN_BUSY);
3748 	} else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
3749 		cmn_err(CE_CONT,
3750 		    "arcmsr%d: post failure, ccboutstandingcount = %d",
3751 		    ddi_get_instance(acb->dev_info),
3752 		    acb->ccboutstandingcount);
3753 		return (TRAN_BUSY);
3754 	}
3755 
3756     return (TRAN_ACCEPT);
3757 }
3758 
3759 /*
3760  * Function: arcmsr_tran_abort(9E)
3761  * 		SCSA interface routine to abort pkt(s) in progress.
3762  * 		Aborts the pkt specified.  If NULL pkt, aborts ALL pkts.
3763  * Output:	Return 1 if success
3764  *		Return 0 if failure
3765  */
3766 static int
3767 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt) {
3768 
3769 	struct ACB *acb;
3770 	int return_code;
3771 
3772 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3773 
3774 
3775 	cmn_err(CE_WARN,
3776 	    "arcmsr%d: tran_abort called for target %d lun %d",
3777 	    ddi_get_instance(acb->dev_info), ap->a_target, ap->a_lun);
3778 
3779 	while (acb->ccboutstandingcount != 0) {
3780 		drv_usecwait(10000);
3781 	}
3782 
3783 	mutex_enter(&acb->acb_mutex);
3784 	return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
3785 	mutex_exit(&acb->acb_mutex);
3786 
3787 	if (return_code != DDI_SUCCESS) {
3788 		cmn_err(CE_WARN,
3789 		    "arcmsr%d: abort command failed for target %d lun %d",
3790 		    ddi_get_instance(acb->dev_info),
3791 		    ap->a_target, ap->a_lun);
3792 		return (0);
3793 	}
3794 
3795 	return (1);
3796 }
3797 
3798 /*
3799  * Function: arcmsr_tran_reset(9E)
3800  *           SCSA interface routine to perform scsi resets on either
3801  *           a specified target or the bus (default).
3802  *   Output: Return 1 if success
3803  *	     Return 0 if failure
3804  */
3805 static int
3806 arcmsr_tran_reset(struct scsi_address *ap, int level) {
3807 
3808 	struct ACB *acb;
3809 	int return_code = 1;
3810 	int retry = 0;
3811 
3812 
3813 	/* Are we in the middle of dumping core? */
3814 	if (ddi_in_panic())
3815 		return (return_code);
3816 
3817 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
3818 
3819 	cmn_err(CE_WARN, "arcmsr%d: tran reset (level 0x%x) called "
3820 	    "for target %d lun %d",
3821 	    ddi_get_instance(acb->dev_info), level,
3822 	    ap->a_target, ap->a_lun);
3823 	mutex_enter(&acb->acb_mutex);
3824 
3825 	while ((acb->ccboutstandingcount > 0) && (retry < 400)) {
3826 		(void) arcmsr_interrupt((caddr_t)acb);
3827 		drv_usecwait(25000);
3828 		retry++;
3829 	}
3830 
3831 	switch (level) {
3832 	case RESET_ALL:		/* level 1 */
3833 		acb->num_resets++;
3834 		acb->acb_flags |= ACB_F_BUS_RESET;
3835 		arcmsr_iop_reset(acb);
3836 		acb->acb_flags &= ~ACB_F_BUS_RESET;
3837 		return_code = 0;
3838 		break;
3839 	case RESET_TARGET:	/* level 0 */
3840 		cmn_err(CE_WARN, "arcmsr%d: target reset not supported",
3841 		    ddi_get_instance(acb->dev_info));
3842 		return_code = 0;
3843 		break;
3844 	default:
3845 		return_code = 0;
3846 	}
3847 
3848 	mutex_exit(&acb->acb_mutex);
3849 	return (return_code);
3850 }
3851 
3852 
3853 static void
3854 arcmsr_log(struct ACB *acb, int level, char *fmt, ...) {
3855 
3856 	char	buf[256];
3857 	va_list ap;
3858 
3859 	va_start(ap, fmt);
3860 	(void) vsprintf(buf, fmt, ap);
3861 	va_end(ap);
3862 	scsi_log(acb ? acb->dev_info : NULL, "arcmsr", level, "%s", buf);
3863 }
3864 
3865 
3866 static void
3867 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb) {
3868 
3869 	struct QBUFFER *prbuffer;
3870 	uint8_t *pQbuffer;
3871 	uint8_t *iop_data;
3872 	int my_empty_len, iop_len;
3873 	int rqbuf_firstidx, rqbuf_lastidx;
3874 
3875 	/* check this iop data if overflow my rqbuffer */
3876 	rqbuf_lastidx = acb->rqbuf_lastidx;
3877 	rqbuf_firstidx = acb->rqbuf_firstidx;
3878 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
3879 	iop_data = (uint8_t *)prbuffer->data;
3880 	iop_len = prbuffer->data_len;
3881 	my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
3882 	    (ARCMSR_MAX_QBUFFER - 1);
3883 
3884 	if (my_empty_len >= iop_len) {
3885 		while (iop_len > 0) {
3886 			pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
3887 			(void) memcpy(pQbuffer, iop_data, 1);
3888 			rqbuf_lastidx++;
3889 			/* if last index number set it to 0 */
3890 			rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
3891 			iop_data++;
3892 			iop_len--;
3893 		}
3894 		acb->rqbuf_lastidx = rqbuf_lastidx;
3895 		arcmsr_iop_message_read(acb);
3896 		/* signature, let IOP know data has been read */
3897 	} else {
3898 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
3899 	}
3900 }
3901 
3902 
3903 
3904 static void
3905 arcmsr_iop2drv_data_read_handle(struct ACB *acb) {
3906 
3907 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
3908 	/*
3909 	 * check if there are any mail packages from user space program
3910 	 * in my post bag, now is the time to send them into Areca's firmware
3911 	 */
3912 
3913 	if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
3914 
3915 		uint8_t *pQbuffer;
3916 		struct QBUFFER *pwbuffer;
3917 		uint8_t *iop_data;
3918 		int allxfer_len = 0;
3919 
3920 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
3921 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
3922 		iop_data = (uint8_t *)pwbuffer->data;
3923 
3924 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
3925 		    (allxfer_len < 124)) {
3926 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
3927 			(void) memcpy(iop_data, pQbuffer, 1);
3928 			acb->wqbuf_firstidx++;
3929 			/* if last index number set it to 0 */
3930 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
3931 			iop_data++;
3932 			allxfer_len++;
3933 		}
3934 		pwbuffer->data_len = allxfer_len;
3935 		/*
3936 		 * push inbound doorbell, tell iop driver data write ok
3937 		 * await reply on next hwinterrupt for next Qbuffer post
3938 		 */
3939 		arcmsr_iop_message_wrote(acb);
3940 	}
3941 
3942 	if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
3943 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
3944 }
3945 
3946 
3947 static void
3948 arcmsr_hba_doorbell_isr(struct ACB *acb) {
3949 
3950 	uint32_t outbound_doorbell;
3951 	struct HBA_msgUnit *phbamu;
3952 
3953 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3954 
3955 	/*
3956 	 *  Maybe here we need to check wrqbuffer_lock is locked or not
3957 	 *  DOORBELL: ding! dong!
3958 	 *  check if there are any mail need to pack from firmware
3959 	 */
3960 
3961 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3962 	    &phbamu->outbound_doorbell);
3963 	/* clear doorbell interrupt */
3964 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3965 	    &phbamu->outbound_doorbell, outbound_doorbell);
3966 
3967 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
3968 		arcmsr_iop2drv_data_wrote_handle(acb);
3969 
3970 
3971 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
3972 		arcmsr_iop2drv_data_read_handle(acb);
3973 }
3974 
3975 
3976 
3977 static void
3978 arcmsr_hba_postqueue_isr(struct ACB *acb) {
3979 
3980 	uint32_t flag_ccb;
3981 	struct HBA_msgUnit *phbamu;
3982 
3983 
3984 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3985 
3986 	/* areca cdb command done */
3987 	/* Use correct offset and size for syncing */
3988 	(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, acb->dma_sync_size,
3989 	    DDI_DMA_SYNC_FORKERNEL);
3990 
3991 	while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3992 	    &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
3993 		/* check if command done with no error */
3994 		arcmsr_drain_donequeue(acb, flag_ccb);
3995 	}	/* drain reply FIFO */
3996 }
3997 
3998 
3999 
4000 static void
4001 arcmsr_hbb_postqueue_isr(struct ACB *acb) {
4002 
4003 	int index;
4004 	uint32_t flag_ccb;
4005 	struct HBB_msgUnit *phbbmu;
4006 
4007 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4008 
4009 
4010 	/* areca cdb command done */
4011 	index = phbbmu->doneq_index;
4012 
4013 	while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
4014 		phbbmu->done_qbuffer[index] = 0;
4015 		index++;
4016 		/* if last index number set it to 0 */
4017 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
4018 		phbbmu->doneq_index = index;
4019 		/* check if command done with no error */
4020 		arcmsr_drain_donequeue(acb, flag_ccb);
4021 	}	/* drain reply FIFO */
4022 }
4023 
4024 
4025 static uint_t
4026 arcmsr_handle_hba_isr(struct ACB *acb) {
4027 
4028 	uint32_t outbound_intstatus;
4029 	struct HBA_msgUnit *phbamu;
4030 
4031 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4032 
4033 	outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4034 	    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
4035 
4036 	if (!outbound_intstatus)
4037 		/* it must be a shared irq */
4038 		return (DDI_INTR_UNCLAIMED);
4039 
4040 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
4041 	    outbound_intstatus); /* clear interrupt */
4042 
4043 
4044 	/* MU doorbell interrupts */
4045 
4046 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
4047 		arcmsr_hba_doorbell_isr(acb);
4048 
4049 	/* MU post queue interrupts */
4050 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
4051 		arcmsr_hba_postqueue_isr(acb);
4052 
4053 	/*
4054 	 * The following block is commented out pending confirmation from
4055 	 * Areca whether it is or is not truly required
4056 	 */
4057 	/* MU message interrupt */
4058 	/*
4059 	 * if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
4060 	 *	arcmsr_hba_message_isr(acb);
4061 	 * }
4062 	 */
4063 	return (DDI_INTR_CLAIMED);
4064 }
4065 
4066 
4067 static uint_t
4068 arcmsr_handle_hbb_isr(struct ACB *acb) {
4069 
4070 	uint32_t outbound_doorbell;
4071 	struct HBB_msgUnit *phbbmu;
4072 
4073 
4074 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4075 
4076 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4077 	    &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
4078 
4079 	if (!outbound_doorbell)
4080 		/* it must be a shared irq */
4081 		return (DDI_INTR_UNCLAIMED);
4082 
4083 	/* clear doorbell interrupt */
4084 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4085 	    &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
4086 	/* wait a cycle */
4087 	(void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4088 	    &phbbmu->hbb_doorbell->iop2drv_doorbell);
4089 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4090 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4091 	    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
4092 
4093 	/* MU ioctl transfer doorbell interrupts */
4094 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
4095 		arcmsr_iop2drv_data_wrote_handle(acb);
4096 
4097 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
4098 		arcmsr_iop2drv_data_read_handle(acb);
4099 
4100 	/* MU post queue interrupts */
4101 	if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
4102 		arcmsr_hbb_postqueue_isr(acb);
4103 
4104 	/*
4105 	 * The following block is commented out pending confirmation from
4106 	 * Areca whether it is or is not truly required
4107 	 */
4108 	/* MU message interrupt */
4109 	/*
4110 	 * if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
4111 	 *		arcmsr_hbb_message_isr(acb);
4112 	 *	}
4113 	 */
4114 	return (DDI_INTR_CLAIMED);
4115 }
4116 
4117 
4118 static uint_t
4119 arcmsr_interrupt(caddr_t arg) {
4120 
4121 
4122 	struct ACB *acb = (struct ACB *)(intptr_t)arg;
4123 
4124 	switch (acb->adapter_type) {
4125 	case ACB_ADAPTER_TYPE_A:
4126 		return (arcmsr_handle_hba_isr(acb));
4127 	case ACB_ADAPTER_TYPE_B:
4128 		return (arcmsr_handle_hbb_isr(acb));
4129 	default:
4130 		cmn_err(CE_WARN, "arcmsr%d: unknown adapter type (%d)",
4131 		    ddi_get_instance(acb->dev_info), acb->adapter_type);
4132 		return (DDI_INTR_UNCLAIMED);
4133 	}
4134 }
4135 
4136 
4137 static void
4138 arcmsr_wait_firmware_ready(struct ACB *acb) {
4139 
4140 	uint32_t firmware_state;
4141 
4142 	firmware_state = 0;
4143 
4144 	switch (acb->adapter_type) {
4145 	case ACB_ADAPTER_TYPE_A:
4146 	{
4147 		struct HBA_msgUnit *phbamu;
4148 
4149 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4150 		do {
4151 			firmware_state =
4152 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4153 			    &phbamu->outbound_msgaddr1);
4154 		} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
4155 		    == 0);
4156 	}
4157 	break;
4158 	case ACB_ADAPTER_TYPE_B:
4159 	{
4160 		struct HBB_msgUnit *phbbmu;
4161 
4162 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4163 		do {
4164 			firmware_state =
4165 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4166 				    &phbbmu->hbb_doorbell->iop2drv_doorbell);
4167 		} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
4168 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4169 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4170 		    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
4171 	}
4172 	break;
4173 	}
4174 }
4175 
4176 static void
4177 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb) {
4178 
4179 	switch (acb->adapter_type) {
4180 	case ACB_ADAPTER_TYPE_A:
4181 	{
4182 		struct HBA_msgUnit *phbamu;
4183 		uint32_t outbound_doorbell;
4184 
4185 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4186 		/* empty doorbell Qbuffer if door bell rung */
4187 		outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4188 		    &phbamu->outbound_doorbell);
4189 		/* clear doorbell interrupt */
4190 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4191 		    &phbamu->outbound_doorbell, outbound_doorbell);
4192 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4193 		    &phbamu->inbound_doorbell,
4194 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4195 	}
4196 	break;
4197 	case ACB_ADAPTER_TYPE_B:
4198 	{
4199 		struct HBB_msgUnit *phbbmu;
4200 
4201 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4202 
4203 		/* clear interrupt and message state */
4204 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4205 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
4206 		    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
4207 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4208 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4209 		    ARCMSR_DRV2IOP_DATA_READ_OK);
4210 		/* let IOP know data has been read */
4211 	}
4212 	break;
4213 	}
4214 }
4215 
4216 
4217 static uint32_t
4218 arcmsr_iop_confirm(struct ACB *acb) {
4219 
4220 	unsigned long ccb_phyaddr;
4221 	uint32_t ccb_phyaddr_hi32;
4222 
4223 	/*
4224 	 * here we need to tell iop 331 about our freeccb.HighPart
4225 	 * if freeccb.HighPart is non-zero
4226 	 */
4227 	ccb_phyaddr = (unsigned long)acb->ccb_cookie.dmac_address;
4228 	ccb_phyaddr_hi32 = (uint32_t)((ccb_phyaddr >> 16) >> 16);
4229 
4230 	switch (acb->adapter_type) {
4231 	case ACB_ADAPTER_TYPE_A:
4232 	{
4233 		if (ccb_phyaddr_hi32 != 0) {
4234 			struct HBA_msgUnit *phbamu;
4235 
4236 			phbamu = (struct HBA_msgUnit *)acb->pmu;
4237 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4238 			    &phbamu->msgcode_rwbuffer[0],
4239 			    ARCMSR_SIGNATURE_SET_CONFIG);
4240 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4241 			    &phbamu->msgcode_rwbuffer[1], ccb_phyaddr_hi32);
4242 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4243 			    &phbamu->inbound_msgaddr0,
4244 			    ARCMSR_INBOUND_MESG0_SET_CONFIG);
4245 			if (!arcmsr_hba_wait_msgint_ready(acb)) {
4246 				cmn_err(CE_WARN,
4247 				    "arcmsr%d: timeout setting ccb high "
4248 				    "physical address",
4249 				    ddi_get_instance(acb->dev_info));
4250 				return (FALSE);
4251 			}
4252 		}
4253 	}
4254 	break;
4255 
4256 	/* if adapter is type B, set window of "post command queue" */
4257 
4258 	case ACB_ADAPTER_TYPE_B:
4259 	{
4260 		uint32_t post_queue_phyaddr;
4261 		struct HBB_msgUnit *phbbmu;
4262 
4263 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4264 		phbbmu->postq_index = 0;
4265 		phbbmu->doneq_index = 0;
4266 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4267 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4268 		    ARCMSR_MESSAGE_SET_POST_WINDOW);
4269 
4270 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4271 			cmn_err(CE_WARN,
4272 			    "arcmsr%d: timeout setting post command "
4273 			    "queue window",
4274 			    ddi_get_instance(acb->dev_info));
4275 			return (FALSE);
4276 		}
4277 
4278 		post_queue_phyaddr = ccb_phyaddr +
4279 		    ARCMSR_MAX_FREECCB_NUM *
4280 		    sizeof (struct CCB)
4281 		    + ARCOFFSET(struct HBB_msgUnit, post_qbuffer);
4282 		/* driver "set config" signature */
4283 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4284 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
4285 		    ARCMSR_SIGNATURE_SET_CONFIG);
4286 		/* normal should be zero */
4287 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4288 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
4289 		    ccb_phyaddr_hi32);
4290 		/* postQ size (256+8)*4 */
4291 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4292 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
4293 		    post_queue_phyaddr);
4294 		/* doneQ size (256+8)*4 */
4295 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4296 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
4297 		    post_queue_phyaddr+1056);
4298 		/* ccb maxQ size must be --> [(256+8)*4] */
4299 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
4300 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
4301 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4302 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4303 		    ARCMSR_MESSAGE_SET_CONFIG);
4304 
4305 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4306 			cmn_err(CE_WARN,
4307 			    "arcmsr%d: timeout setting command queue window",
4308 			    ddi_get_instance(acb->dev_info));
4309 			return (FALSE);
4310 		}
4311 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4312 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4313 		    ARCMSR_MESSAGE_START_DRIVER_MODE);
4314 
4315 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
4316 			cmn_err(CE_WARN,
4317 			    "arcmsr%d: timeout in 'start driver mode'",
4318 			    ddi_get_instance(acb->dev_info));
4319 			return (FALSE);
4320 		}
4321 	}
4322 	break;
4323 	}
4324 	return (TRUE);
4325 }
4326 
4327 
4328 /*
4329  * ONLY used for Adapter type B
4330  */
4331 static void
4332 arcmsr_enable_eoi_mode(struct ACB *acb) {
4333 
4334 	struct HBB_msgUnit *phbbmu;
4335 
4336 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4337 
4338 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4339 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4340 	    ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
4341 
4342 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4343 		cmn_err(CE_WARN,
4344 		    "arcmsr%d (Adapter type B): "
4345 		    "'iop enable eoi mode' timeout ",
4346 		    ddi_get_instance(acb->dev_info));
4347 
4348 }
4349 
4350 /* start background rebuild */
4351 static void
4352 arcmsr_iop_init(struct ACB *acb) {
4353 
4354 	uint32_t intmask_org;
4355 
4356 	/* disable all outbound interrupt */
4357 	intmask_org = arcmsr_disable_allintr(acb);
4358 	arcmsr_wait_firmware_ready(acb);
4359 	(void) arcmsr_iop_confirm(acb);
4360 
4361 	/* start background rebuild */
4362 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
4363 		arcmsr_get_hba_config(acb);
4364 		arcmsr_start_hba_bgrb(acb);
4365 	} else {
4366 		arcmsr_get_hbb_config(acb);
4367 		arcmsr_start_hbb_bgrb(acb);
4368 	}
4369 
4370 	/* empty doorbell Qbuffer if door bell rang */
4371 	arcmsr_clear_doorbell_queue_buffer(acb);
4372 
4373 	if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
4374 		arcmsr_enable_eoi_mode(acb);
4375 
4376 	/* enable outbound Post Queue, outbound doorbell Interrupt */
4377 	arcmsr_enable_allintr(acb, intmask_org);
4378 	acb->acb_flags |= ACB_F_IOP_INITED;
4379 }
4380 
4381 
4382 static int
4383 arcmsr_initialize(struct ACB *acb) {
4384 
4385 	struct CCB *pccb_tmp;
4386 	size_t allocated_length;
4387 	uint16_t wval;
4388 	uint32_t wlval;
4389 	uint_t intmask_org, count;
4390 	caddr_t	arcmsr_ccbs_area;
4391 	unsigned long ccb_phyaddr;
4392 	int32_t dma_sync_size;
4393 	int i, id, lun;
4394 
4395 	acb->irq = pci_config_get8(acb->pci_acc_handle,
4396 	    ARCMSR_PCI2PCI_PRIMARY_INTERRUPT_LINE_REG);
4397 	wlval = pci_config_get32(acb->pci_acc_handle, 0);
4398 	wval = (uint16_t)((wlval >> 16) & 0xffff);
4399 
4400 	if (wval == PCI_DEVICE_ID_ARECA_1201) {
4401 		uint32_t *iop_mu_regs_map0;
4402 		uint32_t *iop_mu_regs_map1;
4403 		struct CCB *freeccb;
4404 		struct HBB_msgUnit *phbbmu;
4405 
4406 		acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
4407 		dma_sync_size = (ARCMSR_MAX_FREECCB_NUM*
4408 		    sizeof (struct CCB) + 0x20) +
4409 		    sizeof (struct HBB_msgUnit);
4410 
4411 
4412 		/* Allocate memory for the ccb */
4413 		if ((i = ddi_dma_alloc_handle(acb->dev_info,
4414 		    &arcmsr_ccb_attr, DDI_DMA_SLEEP, NULL,
4415 		    &acb->ccbs_pool_handle)) != DDI_SUCCESS) {
4416 			switch (i) {
4417 			case DDI_DMA_BADATTR:
4418 				cmn_err(CE_WARN,
4419 				    "arcmsr%d: ddi_dma_alloc_handle got "
4420 				    "DDI_DMA_BADATTR",
4421 				    ddi_get_instance(acb->dev_info));
4422 				return (DDI_FAILURE);
4423 
4424 			case DDI_DMA_NORESOURCES:
4425 				cmn_err(CE_WARN, "arcmsr%d: "
4426 				    "ddi_dma_alloc_handle got "
4427 				    "DDI_DMA_NORESOURCES ",
4428 				    ddi_get_instance(acb->dev_info));
4429 				return (DDI_FAILURE);
4430 			}
4431 			cmn_err(CE_WARN,
4432 			    "arcmsr%d: ddi_dma_alloc_handle got DDI_FAILURE",
4433 			    ddi_get_instance(acb->dev_info));
4434 			return (DDI_FAILURE);
4435 		}
4436 
4437 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
4438 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4439 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
4440 		    &allocated_length, &acb->ccbs_acc_handle)
4441 		    != DDI_SUCCESS) {
4442 			cmn_err(CE_CONT,
4443 			    "arcmsr%d: ddi_dma_mem_alloc failed ",
4444 			    ddi_get_instance(acb->dev_info));
4445 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4446 			return (DDI_FAILURE);
4447 		}
4448 
4449 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
4450 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size,
4451 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
4452 		    NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
4453 			cmn_err(CE_WARN,
4454 			    "arcmsr%d: ddi_dma_addr_bind_handle failed",
4455 			    ddi_get_instance(acb->dev_info));
4456 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
4457 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4458 			return (DDI_FAILURE);
4459 		}
4460 		bzero(arcmsr_ccbs_area, dma_sync_size);
4461 		freeccb = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
4462 		acb->pmu = (struct msgUnit *)
4463 		    &freeccb[ARCMSR_MAX_FREECCB_NUM];
4464 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4465 
4466 		/* setup device register */
4467 		if (ddi_regs_map_setup(acb->dev_info, 1,
4468 		    (caddr_t *)&iop_mu_regs_map0, 0,
4469 		    sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
4470 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
4471 			arcmsr_log(NULL, CE_WARN,
4472 			    "arcmsr%d: unable to map PCI device "
4473 			    "base0 address registers",
4474 			    ddi_get_instance(acb->dev_info));
4475 			return (DDI_FAILURE);
4476 		}
4477 
4478 		/* ARCMSR_DRV2IOP_DOORBELL */
4479 		phbbmu->hbb_doorbell =
4480 		    (struct HBB_DOORBELL *)iop_mu_regs_map0;
4481 		if (ddi_regs_map_setup(acb->dev_info, 2,
4482 		    (caddr_t *)&iop_mu_regs_map1, 0,
4483 		    sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
4484 		    &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
4485 			arcmsr_log(NULL, CE_WARN,
4486 			    "arcmsr%d: unable to map PCI device "
4487 			    "base1 address registers",
4488 			    ddi_get_instance(acb->dev_info));
4489 			return (DDI_FAILURE);
4490 		}
4491 
4492 		/* ARCMSR_MSGCODE_RWBUFFER */
4493 		phbbmu->hbb_rwbuffer =
4494 		    (struct HBB_RWBUFFER *)iop_mu_regs_map1;
4495 	} else {
4496 		uint32_t *iop_mu_regs_map0;
4497 
4498 		acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
4499 		dma_sync_size = ARCMSR_MAX_FREECCB_NUM*
4500 		    sizeof (struct CCB) + 0x20;
4501 		if (ddi_regs_map_setup(acb->dev_info, 1,
4502 		    (caddr_t *)&iop_mu_regs_map0, 0,
4503 		    sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
4504 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
4505 			arcmsr_log(NULL, CE_WARN,
4506 			    "arcmsr%d: unable to map registers",
4507 			    ddi_get_instance(acb->dev_info));
4508 			return (DDI_FAILURE);
4509 		}
4510 
4511 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
4512 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
4513 		    DDI_SUCCESS) {
4514 			switch (i) {
4515 			case DDI_DMA_BADATTR:
4516 				cmn_err(CE_WARN,
4517 				    "arcmsr%d: ddi_dma_alloc_handle "
4518 				    "got DDI_DMA_BADATTR",
4519 				    ddi_get_instance(acb->dev_info));
4520 				return (DDI_FAILURE);
4521 			case DDI_DMA_NORESOURCES:
4522 				cmn_err(CE_WARN, "arcmsr%d: "
4523 				    "ddi_dma_alloc_handle got "
4524 				    "DDI_DMA_NORESOURCES",
4525 				    ddi_get_instance(acb->dev_info));
4526 				return (DDI_FAILURE);
4527 			}
4528 			cmn_err(CE_WARN,
4529 			    "arcmsr%d: ddi_dma_alloc_handle failed",
4530 			    ddi_get_instance(acb->dev_info));
4531 			return (DDI_FAILURE);
4532 		}
4533 
4534 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
4535 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4536 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
4537 		    &allocated_length, &acb->ccbs_acc_handle)
4538 		    != DDI_SUCCESS) {
4539 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_mem_alloc failed",
4540 			    ddi_get_instance(acb->dev_info));
4541 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4542 			return (DDI_FAILURE);
4543 		}
4544 
4545 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
4546 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
4547 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
4548 		    &count) != DDI_DMA_MAPPED) {
4549 			cmn_err(CE_WARN, "arcmsr%d: ddi_dma_addr_bind_handle "
4550 			    "failed",
4551 			    ddi_get_instance(acb->dev_info));
4552 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
4553 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
4554 			return (DDI_FAILURE);
4555 		}
4556 		bzero(arcmsr_ccbs_area, dma_sync_size);
4557 		/* ioport base */
4558 		acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
4559 	}
4560 
4561 	/* here we can not access pci configuration again */
4562 	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4563 	    ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
4564 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
4565 	/* physical address of acb->pccb_pool */
4566 	ccb_phyaddr = acb->ccb_cookie.dmac_address;
4567 
4568 	if (((unsigned long)arcmsr_ccbs_area & 0x1F) != 0) {
4569 		/* ccb address must 32 (0x20) boundary */
4570 		arcmsr_ccbs_area = (caddr_t)((unsigned long)arcmsr_ccbs_area +
4571 		    (0x20 - ((unsigned long)arcmsr_ccbs_area & 0x1F)));
4572 		ccb_phyaddr = (unsigned long)ccb_phyaddr +
4573 		    (0x20 - ((unsigned long)ccb_phyaddr & 0x1F));
4574 	}
4575 
4576 	pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
4577 
4578 	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4579 		pccb_tmp->cdb_shifted_phyaddr = ccb_phyaddr >> 5;
4580 		pccb_tmp->acb = acb;
4581 		acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
4582 		ccb_phyaddr = ccb_phyaddr + sizeof (struct CCB);
4583 		pccb_tmp++;
4584 	}
4585 
4586 	acb->vir2phy_offset = (unsigned long)pccb_tmp -
4587 	    (unsigned long)ccb_phyaddr;
4588 
4589 	/* disable all outbound interrupt */
4590 	intmask_org = arcmsr_disable_allintr(acb);
4591 
4592 	if (!arcmsr_iop_confirm(acb)) {
4593 		cmn_err(CE_WARN, "arcmsr%d: arcmsr_iop_confirm error",
4594 		    ddi_get_instance(acb->dev_info));
4595 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
4596 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
4597 		return (DDI_FAILURE);
4598 	}
4599 
4600 	for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
4601 		for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
4602 			acb->devstate[id][lun] = ARECA_RAID_GONE;
4603 		}
4604 	}
4605 
4606 	/* enable outbound Post Queue, outbound doorbell Interrupt */
4607 	arcmsr_enable_allintr(acb, intmask_org);
4608 
4609 	return (0);
4610 }
4611