xref: /illumos-gate/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c (revision 2cbbfaaaa15aa66ca065e12f2f8f6f535dd800ae)
1 /*
2  *       O.S   : Solaris
3  *  FILE NAME  : arcmsr.c
4  *       BY    : Erich Chen, C.L. Huang
5  *  Description: SCSI RAID Device Driver for
6  *               ARECA RAID Host adapter
7  *
8  *  Copyright (C) 2002,2010 Areca Technology Corporation All rights reserved.
9  *  Copyright (C) 2002,2010 Erich Chen
10  *	    Web site: www.areca.com.tw
11  *	      E-mail: erich@areca.com.tw; ching2048@areca.com.tw
12  *
13  *	Redistribution and use in source and binary forms, with or without
14  *	modification, are permitted provided that the following conditions
15  *	are met:
16  *	1. Redistributions of source code must retain the above copyright
17  *	   notice, this list of conditions and the following disclaimer.
18  *	2. Redistributions in binary form must reproduce the above copyright
19  *	   notice, this list of conditions and the following disclaimer in the
20  *	   documentation and/or other materials provided with the distribution.
21  *  3. The party using or redistributing the source code and binary forms
22  *     agrees to the disclaimer below and the terms and conditions set forth
23  *     herein.
24  *
25  *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  *  ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  *  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  *  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  *  OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  *  HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  *  OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  *  SUCH DAMAGE.
36  *
37  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
38  * Use is subject to license terms.
39  *
40  */
41 /*
42  * This file and its contents are supplied under the terms of the
43  * Common Development and Distribution License ("CDDL"), version 1.0.
44  * You may only use this file in accordance with the terms of version
45  * 1.0 of the CDDL.
46  *
47  * A full copy of the text of the CDDL should have accompanied this
48  * source.  A copy of the CDDL is also available via the Internet at
49  * http://www.illumos.org/license/CDDL.
50  */
51 /*
52  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
53  * Copyright 2023 Oxide Computer Company
54  */
55 #include <sys/types.h>
56 #include <sys/ddidmareq.h>
57 #include <sys/scsi/scsi.h>
58 #include <sys/ddi.h>
59 #include <sys/sunddi.h>
60 #include <sys/file.h>
61 #include <sys/disp.h>
62 #include <sys/signal.h>
63 #include <sys/debug.h>
64 #include <sys/pci.h>
65 #include <sys/policy.h>
66 #include <sys/atomic.h>
67 #include "arcmsr.h"
68 
69 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
70 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
71     int mode, cred_t *credp, int *rvalp);
72 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
73 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
74 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
75 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
76 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
77 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
78 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
79     int whom);
80 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
81     dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
82     struct scsi_device *sd);
83 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
84 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
85     struct scsi_pkt *pkt);
86 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
87     struct scsi_pkt *pkt);
88 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
89     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
90     int tgtlen, int flags, int (*callback)(), caddr_t arg);
91 static int arcmsr_config_child(struct ACB *acb, struct scsi_device *sd,
92     dev_info_t **dipp);
93 
94 static int arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
95     dev_info_t **ldip);
96 static uint8_t arcmsr_abort_host_command(struct ACB *acb);
97 static uint8_t arcmsr_get_echo_from_iop(struct ACB *acb);
98 static uint_t arcmsr_intr_handler(caddr_t arg, caddr_t arg2);
99 static int arcmsr_initialize(struct ACB *acb);
100 static int arcmsr_dma_alloc(struct ACB *acb,
101     struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
102 static int arcmsr_dma_move(struct ACB *acb,
103     struct scsi_pkt *pkt, struct buf *bp);
104 static void arcmsr_handle_iop_bus_hold(struct ACB *acb);
105 static void arcmsr_hbc_message_isr(struct ACB *acb);
106 static void arcmsr_pcidev_disattach(struct ACB *acb);
107 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
108 static void arcmsr_iop_init(struct ACB *acb);
109 static void arcmsr_iop_parking(struct ACB *acb);
110 /*PRINTFLIKE3*/
111 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
112 /*PRINTFLIKE2*/
113 static void arcmsr_warn(struct ACB *acb, char *fmt, ...);
114 static void arcmsr_mutex_init(struct ACB *acb);
115 static void arcmsr_remove_intr(struct ACB *acb);
116 static void arcmsr_ccbs_timeout(void* arg);
117 static void arcmsr_devMap_monitor(void* arg);
118 static void arcmsr_pcidev_disattach(struct ACB *acb);
119 static void arcmsr_iop_message_read(struct ACB *acb);
120 static void arcmsr_free_ccb(struct CCB *ccb);
121 static void arcmsr_post_ioctldata2iop(struct ACB *acb);
122 static void arcmsr_report_sense_info(struct CCB *ccb);
123 static void arcmsr_init_list_head(struct list_head *list);
124 static void arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org);
125 static void arcmsr_done4abort_postqueue(struct ACB *acb);
126 static void arcmsr_list_add_tail(kmutex_t *list_lock,
127     struct list_head *new_one, struct list_head *head);
128 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
129 static int arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt);
130 static int arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt);
131 static int arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb);
132 static int arcmsr_parse_devname(char *devnm, int *tgt, int *lun);
133 static int arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance);
134 static uint8_t arcmsr_iop_reset(struct ACB *acb);
135 static uint32_t arcmsr_disable_allintr(struct ACB *acb);
136 static uint32_t arcmsr_iop_confirm(struct ACB *acb);
137 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
138 static void arcmsr_flush_hba_cache(struct ACB *acb);
139 static void arcmsr_flush_hbb_cache(struct ACB *acb);
140 static void arcmsr_flush_hbc_cache(struct ACB *acb);
141 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
142 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
143 static void arcmsr_stop_hbc_bgrb(struct ACB *acb);
144 static void arcmsr_start_hba_bgrb(struct ACB *acb);
145 static void arcmsr_start_hbb_bgrb(struct ACB *acb);
146 static void arcmsr_start_hbc_bgrb(struct ACB *acb);
147 static void arcmsr_mutex_destroy(struct ACB *acb);
148 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
149 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
150 static void arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
151 static void arcmsr_build_ccb(struct CCB *ccb);
152 static int arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
153     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
154 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
155 static dev_info_t *arcmsr_find_child(struct ACB *acb, uint16_t tgt,
156     uint8_t lun);
157 static struct QBUFFER *arcmsr_get_iop_rqbuffer(struct ACB *acb);
158 
159 static int arcmsr_add_intr(struct ACB *, int);
160 
161 static void *arcmsr_soft_state = NULL;
162 
163 static ddi_dma_attr_t arcmsr_dma_attr = {
164 	DMA_ATTR_V0,		/* ddi_dma_attr version */
165 	0,			/* low DMA address range */
166 	0xffffffffffffffffull,	/* high DMA address range */
167 	0x00ffffff,		/* DMA counter counter upper bound */
168 	1,			/* DMA address alignment requirements */
169 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* burst sizes */
170 	1,			/* minimum effective DMA size */
171 	ARCMSR_MAX_XFER_LEN,	/* maximum DMA xfer size */
172 	/*
173 	 * The dma_attr_seg field supplies the limit of each Scatter/Gather
174 	 * list element's "address+length". The Intel IOP331 can not use
175 	 * segments over the 4G boundary due to segment boundary restrictions
176 	 */
177 	0xffffffff,
178 	ARCMSR_MAX_SG_ENTRIES,	/* scatter/gather list count */
179 	1,			/* device granularity */
180 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
181 };
182 
183 
184 static ddi_dma_attr_t arcmsr_ccb_attr = {
185 	DMA_ATTR_V0,	/* ddi_dma_attr version */
186 	0,		/* low DMA address range */
187 	0xffffffff,	/* high DMA address range */
188 	0x00ffffff,	/* DMA counter counter upper bound */
189 	1,		/* default byte alignment */
190 	DEFAULT_BURSTSIZE | BURST32 | BURST64,   /* burst sizes */
191 	1,		/* minimum effective DMA size */
192 	0xffffffff,	/* maximum DMA xfer size */
193 	0x00ffffff,	/* max segment size, segment boundary restrictions */
194 	1,		/* scatter/gather list count */
195 	1,		/* device granularity */
196 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
197 };
198 
199 
200 static struct cb_ops arcmsr_cb_ops = {
201 	scsi_hba_open,		/* open(9E) */
202 	scsi_hba_close,		/* close(9E) */
203 	nodev,			/* strategy(9E), returns ENXIO */
204 	nodev,			/* print(9E) */
205 	nodev,			/* dump(9E) Cannot be used as a dump device */
206 	nodev,			/* read(9E) */
207 	nodev,			/* write(9E) */
208 	arcmsr_cb_ioctl,	/* ioctl(9E) */
209 	nodev,			/* devmap(9E) */
210 	nodev,			/* mmap(9E) */
211 	nodev,			/* segmap(9E) */
212 	NULL,			/* chpoll(9E) returns ENXIO */
213 	nodev,			/* prop_op(9E) */
214 	NULL,			/* streamtab(9S) */
215 	D_MP,
216 	CB_REV,
217 	nodev,			/* aread(9E) */
218 	nodev			/* awrite(9E) */
219 };
220 
221 static struct dev_ops arcmsr_ops = {
222 	DEVO_REV,		/* devo_rev */
223 	0,			/* reference count */
224 	nodev,			/* getinfo */
225 	nulldev,		/* identify */
226 	nulldev,		/* probe */
227 	arcmsr_attach,		/* attach */
228 	arcmsr_detach,		/* detach */
229 	arcmsr_reset,		/* reset, shutdown, reboot notify */
230 	&arcmsr_cb_ops,		/* driver operations */
231 	NULL,			/* bus operations */
232 	NULL			/* power */
233 };
234 
235 static struct modldrv arcmsr_modldrv = {
236 	&mod_driverops,			/* Type of module. This is a driver. */
237 	"ARECA RAID Controller",	/* module name, from arcmsr.h */
238 	&arcmsr_ops,			/* driver ops */
239 };
240 
241 static struct modlinkage arcmsr_modlinkage = {
242 	MODREV_1,
243 	&arcmsr_modldrv,
244 	NULL
245 };
246 
247 
248 int
_init(void)249 _init(void)
250 {
251 	int ret;
252 
253 	ret = ddi_soft_state_init(&arcmsr_soft_state, sizeof (struct ACB), 1);
254 	if (ret != 0) {
255 		return (ret);
256 	}
257 	if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
258 		ddi_soft_state_fini(&arcmsr_soft_state);
259 		return (ret);
260 	}
261 
262 	if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
263 		scsi_hba_fini(&arcmsr_modlinkage);
264 		if (arcmsr_soft_state != NULL) {
265 			ddi_soft_state_fini(&arcmsr_soft_state);
266 		}
267 	}
268 	return (ret);
269 }
270 
271 
272 int
_fini(void)273 _fini(void)
274 {
275 	int ret;
276 
277 	ret = mod_remove(&arcmsr_modlinkage);
278 	if (ret == 0) {
279 		/* if ret = 0 , said driver can remove */
280 		scsi_hba_fini(&arcmsr_modlinkage);
281 		if (arcmsr_soft_state != NULL) {
282 			ddi_soft_state_fini(&arcmsr_soft_state);
283 		}
284 	}
285 	return (ret);
286 }
287 
288 
289 int
_info(struct modinfo * modinfop)290 _info(struct modinfo *modinfop)
291 {
292 	return (mod_info(&arcmsr_modlinkage, modinfop));
293 }
294 
295 
296 /*
297  *      Function: arcmsr_attach(9E)
298  *   Description: Set up all device state and allocate data structures,
299  *		  mutexes, condition variables, etc. for device operation.
300  *		  Set mt_attr property for driver to indicate MT-safety.
301  *		  Add interrupts needed.
302  *         Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
303  *        Output: Return DDI_SUCCESS if device is ready,
304  *		          else return DDI_FAILURE
305  */
306 static int
arcmsr_attach(dev_info_t * dev_info,ddi_attach_cmd_t cmd)307 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd)
308 {
309 	scsi_hba_tran_t *hba_trans;
310 	struct ACB *acb;
311 
312 	switch (cmd) {
313 	case DDI_ATTACH:
314 		return (arcmsr_do_ddi_attach(dev_info,
315 		    ddi_get_instance(dev_info)));
316 	case DDI_RESUME:
317 		/*
318 		 * There is no hardware state to restart and no
319 		 * timeouts to restart since we didn't DDI_SUSPEND with
320 		 * active cmds or active timeouts We just need to
321 		 * unblock waiting threads and restart I/O the code
322 		 */
323 		hba_trans = ddi_get_driver_private(dev_info);
324 		if (hba_trans == NULL) {
325 			return (DDI_FAILURE);
326 		}
327 		acb = hba_trans->tran_hba_private;
328 		mutex_enter(&acb->acb_mutex);
329 		arcmsr_iop_init(acb);
330 
331 		/* restart ccbs "timeout" watchdog */
332 		acb->timeout_count = 0;
333 		acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
334 		    (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
335 		acb->timeout_sc_id = timeout(arcmsr_devMap_monitor,
336 		    (caddr_t)acb,
337 		    (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
338 		mutex_exit(&acb->acb_mutex);
339 		return (DDI_SUCCESS);
340 
341 	default:
342 		return (DDI_FAILURE);
343 	}
344 }
345 
346 /*
347  *    Function:	arcmsr_detach(9E)
348  * Description: Remove all device allocation and system resources, disable
349  *		        device interrupt.
350  *       Input: dev_info_t *dev_info
351  *		        ddi_detach_cmd_t cmd
352  *      Output:	Return DDI_SUCCESS if done,
353  *		        else returnDDI_FAILURE
354  */
355 static int
arcmsr_detach(dev_info_t * dev_info,ddi_detach_cmd_t cmd)356 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd)
357 {
358 	int instance;
359 	struct ACB *acb;
360 
361 	instance = ddi_get_instance(dev_info);
362 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
363 	if (acb == NULL)
364 		return (DDI_FAILURE);
365 
366 	switch (cmd) {
367 	case DDI_DETACH:
368 		mutex_enter(&acb->acb_mutex);
369 		if (acb->timeout_id != 0) {
370 			mutex_exit(&acb->acb_mutex);
371 			(void) untimeout(acb->timeout_id);
372 			mutex_enter(&acb->acb_mutex);
373 			acb->timeout_id = 0;
374 		}
375 		if (acb->timeout_sc_id != 0) {
376 			mutex_exit(&acb->acb_mutex);
377 			(void) untimeout(acb->timeout_sc_id);
378 			mutex_enter(&acb->acb_mutex);
379 			acb->timeout_sc_id = 0;
380 		}
381 		arcmsr_pcidev_disattach(acb);
382 		/* Remove interrupt set up by ddi_add_intr */
383 		arcmsr_remove_intr(acb);
384 		/* unbind mapping object to handle */
385 		(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
386 		/* Free ccb pool memory */
387 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
388 		/* Free DMA handle */
389 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
390 		ddi_regs_map_free(&acb->reg_mu_acc_handle0);
391 		if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
392 			arcmsr_warn(acb, "Unable to detach instance cleanly "
393 			    "(should not happen)");
394 		/* free scsi_hba_transport from scsi_hba_tran_alloc */
395 		scsi_hba_tran_free(acb->scsi_hba_transport);
396 		ddi_taskq_destroy(acb->taskq);
397 		ddi_prop_remove_all(dev_info);
398 		mutex_exit(&acb->acb_mutex);
399 		arcmsr_mutex_destroy(acb);
400 		pci_config_teardown(&acb->pci_acc_handle);
401 		ddi_set_driver_private(dev_info, NULL);
402 		ddi_soft_state_free(arcmsr_soft_state, instance);
403 		return (DDI_SUCCESS);
404 	case DDI_SUSPEND:
405 		mutex_enter(&acb->acb_mutex);
406 		if (acb->timeout_id != 0) {
407 			acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
408 			mutex_exit(&acb->acb_mutex);
409 			(void) untimeout(acb->timeout_id);
410 			(void) untimeout(acb->timeout_sc_id);
411 			mutex_enter(&acb->acb_mutex);
412 			acb->timeout_id = 0;
413 		}
414 
415 		if (acb->timeout_sc_id != 0) {
416 			acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
417 			mutex_exit(&acb->acb_mutex);
418 			(void) untimeout(acb->timeout_sc_id);
419 			mutex_enter(&acb->acb_mutex);
420 			acb->timeout_sc_id = 0;
421 		}
422 
423 		/* disable all outbound interrupt */
424 		(void) arcmsr_disable_allintr(acb);
425 		/* stop adapter background rebuild */
426 		switch (acb->adapter_type) {
427 		case ACB_ADAPTER_TYPE_A:
428 			arcmsr_stop_hba_bgrb(acb);
429 			arcmsr_flush_hba_cache(acb);
430 			break;
431 
432 		case ACB_ADAPTER_TYPE_B:
433 			arcmsr_stop_hbb_bgrb(acb);
434 			arcmsr_flush_hbb_cache(acb);
435 			break;
436 
437 		case ACB_ADAPTER_TYPE_C:
438 			arcmsr_stop_hbc_bgrb(acb);
439 			arcmsr_flush_hbc_cache(acb);
440 			break;
441 		}
442 		mutex_exit(&acb->acb_mutex);
443 		return (DDI_SUCCESS);
444 	default:
445 		return (DDI_FAILURE);
446 	}
447 }
448 
449 static int
arcmsr_reset(dev_info_t * resetdev,ddi_reset_cmd_t cmd)450 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd)
451 {
452 	struct ACB *acb;
453 	scsi_hba_tran_t *scsi_hba_transport;
454 	_NOTE(ARGUNUSED(cmd));
455 
456 	scsi_hba_transport = ddi_get_driver_private(resetdev);
457 	if (scsi_hba_transport == NULL)
458 		return (DDI_FAILURE);
459 
460 	acb = (struct ACB *)scsi_hba_transport->tran_hba_private;
461 	if (!acb)
462 		return (DDI_FAILURE);
463 
464 	arcmsr_pcidev_disattach(acb);
465 
466 	return (DDI_SUCCESS);
467 }
468 
469 static int
arcmsr_cb_ioctl(dev_t dev,int ioctl_cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)470 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
471     cred_t *credp, int *rvalp)
472 {
473 	struct ACB *acb;
474 	struct CMD_MESSAGE_FIELD *pktioctlfld;
475 	int retvalue = 0;
476 	int instance = MINOR2INST(getminor(dev));
477 
478 	if (instance < 0)
479 		return (ENXIO);
480 
481 	if (secpolicy_sys_config(credp, B_FALSE) != 0)
482 		return (EPERM);
483 
484 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
485 	if (acb == NULL)
486 		return (ENXIO);
487 
488 	pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD), KM_SLEEP);
489 
490 	mutex_enter(&acb->ioctl_mutex);
491 	if (ddi_copyin((void *)arg, pktioctlfld,
492 	    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
493 		retvalue = ENXIO;
494 		goto ioctl_out;
495 	}
496 
497 	if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
498 		/* validity check */
499 		retvalue = ENXIO;
500 		goto ioctl_out;
501 	}
502 
503 	switch ((unsigned int)ioctl_cmd) {
504 	case ARCMSR_MESSAGE_READ_RQBUFFER:
505 	{
506 		uint8_t *ver_addr;
507 		uint8_t *pQbuffer, *ptmpQbuffer;
508 		int32_t allxfer_len = 0;
509 
510 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
511 		ptmpQbuffer = ver_addr;
512 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
513 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
514 			/* copy READ QBUFFER to srb */
515 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
516 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
517 			acb->rqbuf_firstidx++;
518 			/* if last index number set it to 0 */
519 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
520 			ptmpQbuffer++;
521 			allxfer_len++;
522 		}
523 
524 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
525 			struct QBUFFER *prbuffer;
526 			uint8_t *pQbuffer;
527 			uint8_t *iop_data;
528 			int32_t iop_len;
529 
530 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
531 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
532 			iop_data = (uint8_t *)prbuffer->data;
533 			iop_len = (int32_t)prbuffer->data_len;
534 			/*
535 			 * this iop data does no chance to make me overflow
536 			 * again here, so just do it
537 			 */
538 			while (iop_len > 0) {
539 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
540 				(void) memcpy(pQbuffer, iop_data, 1);
541 				acb->rqbuf_lastidx++;
542 				/* if last index number set it to 0 */
543 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
544 				iop_data++;
545 				iop_len--;
546 			}
547 			/* let IOP know data has been read */
548 			arcmsr_iop_message_read(acb);
549 		}
550 		(void) memcpy(pktioctlfld->messagedatabuffer,
551 		    ver_addr, allxfer_len);
552 		pktioctlfld->cmdmessage.Length = allxfer_len;
553 		pktioctlfld->cmdmessage.ReturnCode =
554 		    ARCMSR_MESSAGE_RETURNCODE_OK;
555 
556 		if (ddi_copyout(pktioctlfld, (void *)arg,
557 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
558 			retvalue = ENXIO;
559 
560 		kmem_free(ver_addr, MSGDATABUFLEN);
561 		break;
562 	}
563 
564 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
565 	{
566 		uint8_t *ver_addr;
567 		int32_t my_empty_len, user_len;
568 		int32_t wqbuf_firstidx, wqbuf_lastidx;
569 		uint8_t *pQbuffer, *ptmpuserbuffer;
570 
571 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
572 
573 		ptmpuserbuffer = ver_addr;
574 		user_len = min(pktioctlfld->cmdmessage.Length,
575 		    MSGDATABUFLEN);
576 		(void) memcpy(ptmpuserbuffer,
577 		    pktioctlfld->messagedatabuffer, user_len);
578 		/*
579 		 * check ifdata xfer length of this request will overflow
580 		 * my array qbuffer
581 		 */
582 		wqbuf_lastidx = acb->wqbuf_lastidx;
583 		wqbuf_firstidx = acb->wqbuf_firstidx;
584 		if (wqbuf_lastidx != wqbuf_firstidx) {
585 			arcmsr_post_ioctldata2iop(acb);
586 			pktioctlfld->cmdmessage.ReturnCode =
587 			    ARCMSR_MESSAGE_RETURNCODE_ERROR;
588 		} else {
589 			my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
590 			    & (ARCMSR_MAX_QBUFFER - 1);
591 			if (my_empty_len >= user_len) {
592 				while (user_len > 0) {
593 					/* copy srb data to wqbuffer */
594 					pQbuffer =
595 					    &acb->wqbuffer[acb->wqbuf_lastidx];
596 					(void) memcpy(pQbuffer,
597 					    ptmpuserbuffer, 1);
598 					acb->wqbuf_lastidx++;
599 					/* iflast index number set it to 0 */
600 					acb->wqbuf_lastidx %=
601 					    ARCMSR_MAX_QBUFFER;
602 					ptmpuserbuffer++;
603 					user_len--;
604 				}
605 				/* post first Qbuffer */
606 				if (acb->acb_flags &
607 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
608 					acb->acb_flags &=
609 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
610 					arcmsr_post_ioctldata2iop(acb);
611 				}
612 				pktioctlfld->cmdmessage.ReturnCode =
613 				    ARCMSR_MESSAGE_RETURNCODE_OK;
614 			} else {
615 				pktioctlfld->cmdmessage.ReturnCode =
616 				    ARCMSR_MESSAGE_RETURNCODE_ERROR;
617 			}
618 		}
619 		if (ddi_copyout(pktioctlfld, (void *)arg,
620 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
621 			retvalue = ENXIO;
622 
623 		kmem_free(ver_addr, MSGDATABUFLEN);
624 		break;
625 	}
626 
627 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
628 	{
629 		uint8_t *pQbuffer = acb->rqbuffer;
630 
631 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
632 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
633 			arcmsr_iop_message_read(acb);
634 		}
635 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
636 		acb->rqbuf_firstidx = 0;
637 		acb->rqbuf_lastidx = 0;
638 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
639 		/* report success */
640 		pktioctlfld->cmdmessage.ReturnCode =
641 		    ARCMSR_MESSAGE_RETURNCODE_OK;
642 
643 		if (ddi_copyout(pktioctlfld, (void *)arg,
644 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
645 			retvalue = ENXIO;
646 		break;
647 	}
648 
649 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
650 	{
651 		uint8_t *pQbuffer = acb->wqbuffer;
652 
653 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
654 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
655 			arcmsr_iop_message_read(acb);
656 		}
657 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
658 		    ACB_F_MESSAGE_WQBUFFER_READ);
659 		acb->wqbuf_firstidx = 0;
660 		acb->wqbuf_lastidx = 0;
661 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
662 		/* report success */
663 		pktioctlfld->cmdmessage.ReturnCode =
664 		    ARCMSR_MESSAGE_RETURNCODE_OK;
665 
666 		if (ddi_copyout(pktioctlfld, (void *)arg,
667 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
668 			retvalue = ENXIO;
669 		break;
670 	}
671 
672 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
673 	{
674 		uint8_t *pQbuffer;
675 
676 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
677 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
678 			arcmsr_iop_message_read(acb);
679 		}
680 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
681 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
682 		    ACB_F_MESSAGE_WQBUFFER_READ);
683 		acb->rqbuf_firstidx = 0;
684 		acb->rqbuf_lastidx = 0;
685 		acb->wqbuf_firstidx = 0;
686 		acb->wqbuf_lastidx = 0;
687 		pQbuffer = acb->rqbuffer;
688 		bzero(pQbuffer, sizeof (struct QBUFFER));
689 		pQbuffer = acb->wqbuffer;
690 		bzero(pQbuffer, sizeof (struct QBUFFER));
691 		/* report success */
692 		pktioctlfld->cmdmessage.ReturnCode =
693 		    ARCMSR_MESSAGE_RETURNCODE_OK;
694 		if (ddi_copyout(pktioctlfld, (void *)arg,
695 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
696 			retvalue = ENXIO;
697 		break;
698 	}
699 
700 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
701 		pktioctlfld->cmdmessage.ReturnCode =
702 		    ARCMSR_MESSAGE_RETURNCODE_3F;
703 		if (ddi_copyout(pktioctlfld, (void *)arg,
704 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
705 			retvalue = ENXIO;
706 		break;
707 
708 	/* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
709 	case ARCMSR_MESSAGE_SAY_GOODBYE:
710 		arcmsr_iop_parking(acb);
711 		break;
712 
713 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
714 		switch (acb->adapter_type) {
715 		case ACB_ADAPTER_TYPE_A:
716 			arcmsr_flush_hba_cache(acb);
717 			break;
718 		case ACB_ADAPTER_TYPE_B:
719 			arcmsr_flush_hbb_cache(acb);
720 			break;
721 		case ACB_ADAPTER_TYPE_C:
722 			arcmsr_flush_hbc_cache(acb);
723 			break;
724 		}
725 		break;
726 
727 	default:
728 		mutex_exit(&acb->ioctl_mutex);
729 		kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
730 		return (scsi_hba_ioctl(dev, ioctl_cmd, arg, mode, credp,
731 		    rvalp));
732 	}
733 
734 ioctl_out:
735 	kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
736 	mutex_exit(&acb->ioctl_mutex);
737 
738 	return (retvalue);
739 }
740 
741 
742 /*
743  *    Function:	arcmsr_tran_tgt_init
744  * Description: Called when initializing a target device instance. If
745  *		        no per-target initialization is required, the HBA
746  *		        may leave tran_tgt_init to NULL
747  *       Input:
748  *		        dev_info_t *host_dev_info,
749  *		        dev_info_t *target_dev_info,
750  *		        scsi_hba_tran_t *tran,
751  *		        struct scsi_device *sd
752  *
753  *      Return: DDI_SUCCESS if success, else return DDI_FAILURE
754  *
755  *  entry point enables the HBA to allocate and/or initialize any per-
756  *  target resources.
757  *  It also enables the HBA to qualify the device's address as valid and
758  *  supportable for that particular HBA.
759  *  By returning DDI_FAILURE, the instance of the target driver for that
760  *  device will not be probed or attached.
761  *	This entry point is not required, and if none is supplied,
762  *  the framework will attempt to probe and attach all possible instances
763  *  of the appropriate target drivers.
764  */
765 static int
arcmsr_tran_tgt_init(dev_info_t * host_dev_info,dev_info_t * target_dev_info,scsi_hba_tran_t * tran,struct scsi_device * sd)766 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
767     scsi_hba_tran_t *tran, struct scsi_device *sd)
768 {
769 	uint16_t  target;
770 	uint8_t  lun;
771 	struct ACB *acb = tran->tran_hba_private;
772 
773 	_NOTE(ARGUNUSED(tran, target_dev_info, host_dev_info))
774 
775 	target = sd->sd_address.a_target;
776 	lun = sd->sd_address.a_lun;
777 	if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
778 		return (DDI_FAILURE);
779 	}
780 
781 
782 	if (ndi_dev_is_persistent_node(target_dev_info) == 0) {
783 		/*
784 		 * If no persistent node exist, we don't allow .conf node
785 		 * to be created.
786 		 */
787 		if (arcmsr_find_child(acb, target, lun) != NULL) {
788 			if ((ndi_merge_node(target_dev_info,
789 			    arcmsr_name_node) != DDI_SUCCESS)) {
790 				return (DDI_SUCCESS);
791 			}
792 		}
793 		return (DDI_FAILURE);
794 	}
795 
796 	return (DDI_SUCCESS);
797 }
798 
799 /*
800  *         Function: arcmsr_tran_getcap(9E)
801  *      Description: Get the capability named, and returnits value.
802  *    Return Values: current value of capability, ifdefined
803  *		             -1 ifcapability is not defined
804  * ------------------------------------------------------
805  *         Common Capability Strings Array
806  * ------------------------------------------------------
807  *	#define	SCSI_CAP_DMA_MAX		0
808  *	#define	SCSI_CAP_MSG_OUT		1
809  *	#define	SCSI_CAP_DISCONNECT		2
810  *	#define	SCSI_CAP_SYNCHRONOUS		3
811  *	#define	SCSI_CAP_WIDE_XFER		4
812  *	#define	SCSI_CAP_PARITY			5
813  *	#define	SCSI_CAP_INITIATOR_ID		6
814  *	#define	SCSI_CAP_UNTAGGED_QING		7
815  *	#define	SCSI_CAP_TAGGED_QING		8
816  *	#define	SCSI_CAP_ARQ			9
817  *	#define	SCSI_CAP_LINKED_CMDS		10 a
818  *	#define	SCSI_CAP_SECTOR_SIZE		11 b
819  *	#define	SCSI_CAP_TOTAL_SECTORS		12 c
820  *	#define	SCSI_CAP_GEOMETRY		13 d
821  *	#define	SCSI_CAP_RESET_NOTIFICATION	14 e
822  *	#define	SCSI_CAP_QFULL_RETRIES		15 f
823  *	#define	SCSI_CAP_QFULL_RETRY_INTERVAL	16 10
824  *	#define	SCSI_CAP_SCSI_VERSION		17 11
825  *	#define	SCSI_CAP_INTERCONNECT_TYPE	18 12
826  *	#define	SCSI_CAP_LUN_RESET		19 13
827  */
828 static int
arcmsr_tran_getcap(struct scsi_address * ap,char * cap,int whom)829 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
830 {
831 	int capability = 0;
832 	struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
833 
834 	if (cap == NULL || whom == 0) {
835 		return (DDI_FAILURE);
836 	}
837 
838 	mutex_enter(&acb->acb_mutex);
839 	if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
840 		mutex_exit(&acb->acb_mutex);
841 		return (-1);
842 	}
843 	switch (scsi_hba_lookup_capstr(cap)) {
844 	case SCSI_CAP_MSG_OUT:
845 	case SCSI_CAP_DISCONNECT:
846 	case SCSI_CAP_WIDE_XFER:
847 	case SCSI_CAP_TAGGED_QING:
848 	case SCSI_CAP_UNTAGGED_QING:
849 	case SCSI_CAP_PARITY:
850 	case SCSI_CAP_ARQ:
851 		capability = 1;
852 		break;
853 	case SCSI_CAP_SECTOR_SIZE:
854 		capability = ARCMSR_DEV_SECTOR_SIZE;
855 		break;
856 	case SCSI_CAP_DMA_MAX:
857 		/* Limit to 16MB max transfer */
858 		capability = ARCMSR_MAX_XFER_LEN;
859 		break;
860 	case SCSI_CAP_INITIATOR_ID:
861 		capability = ARCMSR_SCSI_INITIATOR_ID;
862 		break;
863 	case SCSI_CAP_GEOMETRY:
864 		/* head , track , cylinder */
865 		capability = (255 << 16) | 63;
866 		break;
867 	default:
868 		capability = -1;
869 		break;
870 	}
871 	mutex_exit(&acb->acb_mutex);
872 	return (capability);
873 }
874 
875 /*
876  *      Function: arcmsr_tran_setcap(9E)
877  *   Description: Set the specific capability.
878  * Return Values: 1 - capability exists and can be set to new value
879  *		          0 - capability could not be set to new value
880  *		         -1 - no such capability
881  */
882 static int
arcmsr_tran_setcap(struct scsi_address * ap,char * cap,int value,int whom)883 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
884 {
885 	_NOTE(ARGUNUSED(value))
886 
887 	int supported = 0;
888 	struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
889 
890 	if (cap == NULL || whom == 0) {
891 		return (-1);
892 	}
893 
894 	mutex_enter(&acb->acb_mutex);
895 	if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
896 		mutex_exit(&acb->acb_mutex);
897 		return (-1);
898 	}
899 	switch (supported = scsi_hba_lookup_capstr(cap)) {
900 	case SCSI_CAP_ARQ:			/* 9 auto request sense */
901 	case SCSI_CAP_UNTAGGED_QING:		/* 7 */
902 	case SCSI_CAP_TAGGED_QING:		/* 8 */
903 		/* these are always on, and cannot be turned off */
904 		supported = (value == 1) ? 1 : 0;
905 		break;
906 	case SCSI_CAP_TOTAL_SECTORS:		/* c */
907 		supported = 1;
908 		break;
909 	case SCSI_CAP_DISCONNECT:		/* 2 */
910 	case SCSI_CAP_WIDE_XFER:		/* 4 */
911 	case SCSI_CAP_INITIATOR_ID:		/* 6 */
912 	case SCSI_CAP_DMA_MAX:			/* 0 */
913 	case SCSI_CAP_MSG_OUT:			/* 1 */
914 	case SCSI_CAP_PARITY:			/* 5 */
915 	case SCSI_CAP_LINKED_CMDS:		/* a */
916 	case SCSI_CAP_RESET_NOTIFICATION:	/* e */
917 	case SCSI_CAP_SECTOR_SIZE:		/* b */
918 		/* these are not settable */
919 		supported = 0;
920 		break;
921 	default:
922 		supported = -1;
923 		break;
924 	}
925 	mutex_exit(&acb->acb_mutex);
926 	return (supported);
927 }
928 
929 
930 /*
931  *      Function: arcmsr_tran_init_pkt
932  * Return Values: pointer to scsi_pkt, or NULL
933  *   Description: simultaneously allocate both a scsi_pkt(9S) structure and
934  *                DMA resources for that pkt.
935  *                Called by kernel on behalf of a target driver
936  *		          calling scsi_init_pkt(9F).
937  *		          Refer to tran_init_pkt(9E) man page
938  *       Context: Can be called from different kernel process threads.
939  *		          Can be called by interrupt thread.
940  * Allocates SCSI packet and DMA resources
941  */
942 static struct
arcmsr_tran_init_pkt(struct scsi_address * ap,register struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)943 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
944     register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
945     int tgtlen, int flags, int (*callback)(), caddr_t arg)
946 {
947 	struct CCB *ccb;
948 	struct ARCMSR_CDB *arcmsr_cdb;
949 	struct ACB *acb;
950 	int old_pkt_flag;
951 
952 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
953 
954 	if (acb->acb_flags & ACB_F_BUS_RESET) {
955 		return (NULL);
956 	}
957 	if (pkt == NULL) {
958 		/* get free CCB */
959 		(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
960 		    DDI_DMA_SYNC_FORKERNEL);
961 		ccb = arcmsr_get_freeccb(acb);
962 		if (ccb == (struct CCB *)NULL) {
963 			return (NULL);
964 		}
965 
966 		if (statuslen < sizeof (struct scsi_arq_status)) {
967 			statuslen = sizeof (struct scsi_arq_status);
968 		}
969 		pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
970 		    statuslen, tgtlen, sizeof (void *), callback, arg);
971 		if (pkt == NULL) {
972 			arcmsr_warn(acb, "scsi pkt allocation failed");
973 			arcmsr_free_ccb(ccb);
974 			return (NULL);
975 		}
976 		/* Initialize CCB */
977 		ccb->pkt = pkt;
978 		ccb->pkt_dma_handle = NULL;
979 		/* record how many sg are needed to xfer on this pkt */
980 		ccb->pkt_ncookies = 0;
981 		/* record how many sg we got from this window */
982 		ccb->pkt_cookie = 0;
983 		/* record how many windows have partial dma map set */
984 		ccb->pkt_nwin = 0;
985 		/* record current sg window position */
986 		ccb->pkt_curwin	= 0;
987 		ccb->pkt_dma_len = 0;
988 		ccb->pkt_dma_offset = 0;
989 		ccb->resid_dmacookie.dmac_size = 0;
990 
991 		/*
992 		 * we will still use this point for we want to fake some
993 		 * information in tran_start
994 		 */
995 		ccb->bp = bp;
996 
997 		/* Initialize arcmsr_cdb */
998 		arcmsr_cdb = &ccb->arcmsr_cdb;
999 		bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
1000 		arcmsr_cdb->Bus = 0;
1001 		arcmsr_cdb->Function = 1;
1002 		arcmsr_cdb->LUN = ap->a_lun;
1003 		arcmsr_cdb->TargetID = ap->a_target;
1004 		arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1005 		arcmsr_cdb->Context = (uintptr_t)arcmsr_cdb;
1006 
1007 		/* Fill in the rest of the structure */
1008 		pkt->pkt_ha_private = ccb;
1009 		pkt->pkt_address = *ap;
1010 		pkt->pkt_comp = NULL;
1011 		pkt->pkt_flags = 0;
1012 		pkt->pkt_time = 0;
1013 		pkt->pkt_resid = 0;
1014 		pkt->pkt_statistics = 0;
1015 		pkt->pkt_reason = 0;
1016 		old_pkt_flag = 0;
1017 	} else {
1018 		ccb = pkt->pkt_ha_private;
1019 		if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1020 			if (!(ccb->ccb_state & ARCMSR_CCB_BACK)) {
1021 				return (NULL);
1022 			}
1023 		}
1024 
1025 		/*
1026 		 * you cannot update CdbLength with cmdlen here, it would
1027 		 * cause a data compare error
1028 		 */
1029 		ccb->ccb_state = ARCMSR_CCB_UNBUILD;
1030 		old_pkt_flag = 1;
1031 	}
1032 
1033 	/* Second step : dma allocation/move */
1034 	if (bp && bp->b_bcount != 0) {
1035 		/*
1036 		 * system had a lot of data trunk need to xfer, from...20 byte
1037 		 * to 819200 byte.
1038 		 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1039 		 * this lot of data trunk xfer done this mission will be done
1040 		 * by some of continue READ or WRITE scsi command, till this
1041 		 * lot of data trunk xfer completed.
1042 		 * arcmsr_dma_move do the action repeatedly, and use the same
1043 		 * ccb till this lot of data trunk xfer complete notice.
1044 		 * when after the arcmsr_tran_init_pkt returns the solaris
1045 		 * kernel is by your pkt_resid and its b_bcount to give you
1046 		 * which type of scsi command descriptor to implement the
1047 		 * length of folowing arcmsr_tran_start scsi cdb (data length)
1048 		 *
1049 		 * Each transfer should be aligned on a 512 byte boundary
1050 		 */
1051 		if (ccb->pkt_dma_handle == NULL) {
1052 			if (arcmsr_dma_alloc(acb, pkt, bp, flags, callback) ==
1053 			    DDI_FAILURE) {
1054 				/*
1055 				 * the HBA driver is unable to allocate DMA
1056 				 * resources, it must free the allocated
1057 				 * scsi_pkt(9S) before returning
1058 				 */
1059 				arcmsr_warn(acb, "dma allocation failure");
1060 				if (old_pkt_flag == 0) {
1061 					arcmsr_warn(acb, "dma "
1062 					    "allocation failed to free "
1063 					    "scsi hba pkt");
1064 					arcmsr_free_ccb(ccb);
1065 					scsi_hba_pkt_free(ap, pkt);
1066 				}
1067 				return (NULL);
1068 			}
1069 		} else {
1070 			/* DMA resources to next DMA window, for old pkt */
1071 			if (arcmsr_dma_move(acb, pkt, bp) == DDI_FAILURE) {
1072 				arcmsr_warn(acb, "dma move failed");
1073 				return (NULL);
1074 			}
1075 		}
1076 	} else {
1077 		pkt->pkt_resid = 0;
1078 	}
1079 	return (pkt);
1080 }
1081 
1082 /*
1083  *    Function: arcmsr_tran_start(9E)
1084  * Description: Transport the command in pktp to the target device.
1085  *		The command is not finished when this returns, only
1086  *		sent to the target; arcmsr_intr_handler will call
1087  *		scsi_hba_pkt_comp(pktp) when the target device has done.
1088  *
1089  *       Input: struct scsi_address *ap, struct scsi_pkt *pktp
1090  *      Output:	TRAN_ACCEPT if pkt is OK and not driver not busy
1091  *		TRAN_BUSY if driver is
1092  *		TRAN_BADPKT if pkt is invalid
1093  */
1094 static int
arcmsr_tran_start(struct scsi_address * ap,struct scsi_pkt * pkt)1095 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1096 {
1097 	struct ACB *acb;
1098 	struct CCB *ccb;
1099 	int target = ap->a_target;
1100 	int lun = ap->a_lun;
1101 
1102 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1103 	ccb = pkt->pkt_ha_private;
1104 	*pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1105 
1106 	if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
1107 	    (ccb->ccb_flags & DDI_DMA_CONSISTENT))
1108 		(void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1109 		    DDI_DMA_SYNC_FORDEV);
1110 
1111 	if (ccb->ccb_state == ARCMSR_CCB_UNBUILD)
1112 		arcmsr_build_ccb(ccb);
1113 
1114 	if (acb->acb_flags & ACB_F_BUS_RESET) {
1115 		pkt->pkt_reason = CMD_RESET;
1116 		pkt->pkt_statistics |= STAT_BUS_RESET;
1117 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1118 		    STATE_SENT_CMD | STATE_GOT_STATUS);
1119 		if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1120 		    (pkt->pkt_state & STATE_XFERRED_DATA))
1121 			(void) ddi_dma_sync(ccb->pkt_dma_handle,
1122 			    0, 0, DDI_DMA_SYNC_FORCPU);
1123 
1124 		scsi_hba_pkt_comp(pkt);
1125 		return (TRAN_ACCEPT);
1126 	}
1127 
1128 	/* IMPORTANT: Target 16 is a virtual device for iop message transfer */
1129 	if (target == 16) {
1130 
1131 		struct buf *bp = ccb->bp;
1132 		uint8_t scsicmd = pkt->pkt_cdbp[0];
1133 
1134 		switch (scsicmd) {
1135 		case SCMD_INQUIRY: {
1136 			if (lun != 0) {
1137 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
1138 				ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1139 				arcmsr_ccb_complete(ccb, 0);
1140 				return (TRAN_ACCEPT);
1141 			}
1142 
1143 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
1144 				uint8_t inqdata[36];
1145 
1146 				/* The EVDP and pagecode is not supported */
1147 				if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
1148 					inqdata[1] = 0xFF;
1149 					inqdata[2] = 0x00;
1150 				} else {
1151 					/* Periph Qualifier & Periph Dev Type */
1152 					inqdata[0] = DTYPE_PROCESSOR;
1153 					/* rem media bit & Dev Type Modifier */
1154 					inqdata[1] = 0;
1155 					/* ISO, ECMA, & ANSI versions */
1156 					inqdata[2] = 0;
1157 					inqdata[3] = 0;
1158 					/* length of additional data */
1159 					inqdata[4] = 31;
1160 					/* Vendor Identification */
1161 					bcopy("Areca   ", &inqdata[8], VIDLEN);
1162 					/* Product Identification */
1163 					bcopy("RAID controller ", &inqdata[16],
1164 					    PIDLEN);
1165 					/* Product Revision */
1166 					bcopy(&inqdata[32], "R001", REVLEN);
1167 					if (bp->b_flags & (B_PHYS | B_PAGEIO))
1168 						bp_mapin(bp);
1169 
1170 					(void) memcpy(bp->b_un.b_addr,
1171 					    inqdata, sizeof (inqdata));
1172 				}
1173 				ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1174 			}
1175 			arcmsr_ccb_complete(ccb, 0);
1176 			return (TRAN_ACCEPT);
1177 		}
1178 		case SCMD_WRITE_BUFFER:
1179 		case SCMD_READ_BUFFER: {
1180 			if (arcmsr_iop_message_xfer(acb, pkt)) {
1181 				/* error just for retry */
1182 				ccb->pkt->pkt_reason = CMD_TRAN_ERR;
1183 				ccb->pkt->pkt_statistics |= STAT_TERMINATED;
1184 			}
1185 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1186 			arcmsr_ccb_complete(ccb, 0);
1187 			return (TRAN_ACCEPT);
1188 		}
1189 		default:
1190 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1191 			arcmsr_ccb_complete(ccb, 0);
1192 			return (TRAN_ACCEPT);
1193 		}
1194 	}
1195 
1196 	if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1197 		uint8_t block_cmd;
1198 
1199 		block_cmd = pkt->pkt_cdbp[0] & 0x0f;
1200 		if (block_cmd == 0x08 || block_cmd == 0x0a) {
1201 			pkt->pkt_reason = CMD_TIMEOUT;
1202 			pkt->pkt_statistics |= STAT_TIMEOUT;
1203 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1204 			    STATE_SENT_CMD | STATE_GOT_STATUS);
1205 			if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1206 			    (pkt->pkt_state & STATE_XFERRED_DATA)) {
1207 				(void) ddi_dma_sync(ccb->pkt_dma_handle,
1208 				    ccb->pkt_dma_offset,
1209 				    ccb->pkt_dma_len, DDI_DMA_SYNC_FORCPU);
1210 			}
1211 			scsi_hba_pkt_comp(pkt);
1212 			return (TRAN_ACCEPT);
1213 		}
1214 	}
1215 	mutex_enter(&acb->postq_mutex);
1216 	if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
1217 		ccb->ccb_state = ARCMSR_CCB_RETRY;
1218 		mutex_exit(&acb->postq_mutex);
1219 		return (TRAN_BUSY);
1220 	} else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
1221 		arcmsr_warn(acb, "post ccb failure, ccboutstandingcount = %d",
1222 		    acb->ccboutstandingcount);
1223 		mutex_exit(&acb->postq_mutex);
1224 		return (TRAN_FATAL_ERROR);
1225 	}
1226 	mutex_exit(&acb->postq_mutex);
1227 	return (TRAN_ACCEPT);
1228 }
1229 
1230 /*
1231  * Function name: arcmsr_tran_destroy_pkt
1232  * Return Values: none
1233  *   Description: Called by kernel on behalf of a target driver
1234  *	          calling scsi_destroy_pkt(9F).
1235  *	          Refer to tran_destroy_pkt(9E) man page
1236  *       Context: Can be called from different kernel process threads.
1237  *	          Can be called by interrupt thread.
1238  */
1239 static void
arcmsr_tran_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1240 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1241 {
1242 	struct CCB *ccb = pkt->pkt_ha_private;
1243 	ddi_dma_handle_t pkt_dma_handle = ccb->pkt_dma_handle;
1244 
1245 	if (ccb == NULL) {
1246 		return;
1247 	}
1248 	if (ccb->pkt != pkt) {
1249 		return;
1250 	}
1251 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1252 		ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1253 		if (pkt_dma_handle) {
1254 			(void) ddi_dma_unbind_handle(ccb->pkt_dma_handle);
1255 		}
1256 	}
1257 	if (pkt_dma_handle) {
1258 		(void) ddi_dma_free_handle(&pkt_dma_handle);
1259 	}
1260 	pkt->pkt_ha_private = NULL;
1261 	if (ccb)	{
1262 		if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1263 			if (ccb->ccb_state & ARCMSR_CCB_BACK) {
1264 				arcmsr_free_ccb(ccb);
1265 			} else {
1266 				ccb->ccb_state |= ARCMSR_CCB_WAIT4_FREE;
1267 			}
1268 		} else {
1269 			arcmsr_free_ccb(ccb);
1270 		}
1271 	}
1272 	scsi_hba_pkt_free(ap, pkt);
1273 }
1274 
1275 /*
1276  * Function name: arcmsr_tran_dmafree()
1277  * Return Values: none
1278  *   Description: free dvma resources
1279  *       Context: Can be called from different kernel process threads.
1280  *	          Can be called by interrupt thread.
1281  */
1282 static void
arcmsr_tran_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)1283 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1284 {
1285 	struct CCB *ccb = pkt->pkt_ha_private;
1286 
1287 	if ((ccb == NULL) || (ccb->pkt != pkt)) {
1288 		return;
1289 	}
1290 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1291 		ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1292 		if (ddi_dma_unbind_handle(ccb->pkt_dma_handle) != DDI_SUCCESS) {
1293 			arcmsr_warn(ccb->acb, "ddi_dma_unbind_handle() failed "
1294 			    "(target %d lun %d)", ap->a_target, ap->a_lun);
1295 		}
1296 		ddi_dma_free_handle(&ccb->pkt_dma_handle);
1297 		ccb->pkt_dma_handle = NULL;
1298 	}
1299 }
1300 
1301 /*
1302  * Function name: arcmsr_tran_sync_pkt()
1303  * Return Values: none
1304  *   Description: sync dma
1305  *       Context: Can be called from different kernel process threads.
1306  *		  Can be called by interrupt thread.
1307  */
1308 static void
arcmsr_tran_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1309 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1310 {
1311 	struct CCB *ccb;
1312 
1313 	ccb = pkt->pkt_ha_private;
1314 	if ((ccb == NULL) || (ccb->pkt != pkt)) {
1315 		return;
1316 	}
1317 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1318 		if (ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1319 		    (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1320 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1321 		    DDI_SUCCESS) {
1322 			arcmsr_warn(ccb->acb,
1323 			    "sync pkt failed for target %d lun %d",
1324 			    ap->a_target, ap->a_lun);
1325 		}
1326 	}
1327 }
1328 
1329 
1330 /*
1331  * Function: arcmsr_tran_abort(9E)
1332  *		SCSA interface routine to abort pkt(s) in progress.
1333  *		Aborts the pkt specified.  If NULL pkt, aborts ALL pkts.
1334  * Output:	Return 1 if success
1335  *		Return 0 if failure
1336  */
1337 static int
arcmsr_tran_abort(struct scsi_address * ap,struct scsi_pkt * abortpkt)1338 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt)
1339 {
1340 	struct ACB *acb;
1341 	int return_code;
1342 
1343 	acb = ap->a_hba_tran->tran_hba_private;
1344 
1345 	while (acb->ccboutstandingcount != 0) {
1346 		drv_usecwait(10000);
1347 	}
1348 
1349 	mutex_enter(&acb->isr_mutex);
1350 	return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
1351 	mutex_exit(&acb->isr_mutex);
1352 
1353 	if (return_code != DDI_SUCCESS) {
1354 		arcmsr_warn(acb, "abort command failed for target %d lun %d",
1355 		    ap->a_target, ap->a_lun);
1356 		return (0);
1357 	}
1358 	return (1);
1359 }
1360 
1361 /*
1362  * Function: arcmsr_tran_reset(9E)
1363  *           SCSA interface routine to perform scsi resets on either
1364  *           a specified target or the bus (default).
1365  *   Output: Return 1 if success
1366  *	     Return 0 if failure
1367  */
1368 static int
arcmsr_tran_reset(struct scsi_address * ap,int level)1369 arcmsr_tran_reset(struct scsi_address *ap, int level)
1370 {
1371 	struct ACB *acb;
1372 	int return_code = 1;
1373 	int target = ap->a_target;
1374 	int lun = ap->a_lun;
1375 
1376 	/* Are we in the middle of dumping core? */
1377 	if (ddi_in_panic())
1378 		return (return_code);
1379 
1380 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1381 	mutex_enter(&acb->isr_mutex);
1382 	switch (level) {
1383 	case RESET_ALL:		/* 0 */
1384 		acb->num_resets++;
1385 		acb->acb_flags |= ACB_F_BUS_RESET;
1386 		if (acb->timeout_count) {
1387 			if (arcmsr_iop_reset(acb) != 0) {
1388 				arcmsr_handle_iop_bus_hold(acb);
1389 				acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1390 			}
1391 		}
1392 		acb->acb_flags &= ~ACB_F_BUS_RESET;
1393 		break;
1394 	case RESET_TARGET:	/* 1 */
1395 		if (acb->devstate[target][lun] == ARECA_RAID_GONE)
1396 			return_code = 0;
1397 		break;
1398 	case RESET_BUS:		/* 2 */
1399 		return_code = 0;
1400 		break;
1401 	case RESET_LUN:		/* 3 */
1402 		return_code = 0;
1403 		break;
1404 	default:
1405 		return_code = 0;
1406 	}
1407 	mutex_exit(&acb->isr_mutex);
1408 	return (return_code);
1409 }
1410 
1411 static int
arcmsr_tran_bus_config(dev_info_t * parent,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** childp)1412 arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
1413     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1414 {
1415 	struct ACB *acb;
1416 	int rval;
1417 	int tgt, lun;
1418 
1419 	if ((acb = ddi_get_soft_state(arcmsr_soft_state,
1420 	    ddi_get_instance(parent))) == NULL)
1421 		return (NDI_FAILURE);
1422 
1423 	ndi_devi_enter(parent);
1424 	switch (op) {
1425 	case BUS_CONFIG_ONE:
1426 		if (arcmsr_parse_devname(arg, &tgt, &lun) != 0) {
1427 			rval = NDI_FAILURE;
1428 			break;
1429 		}
1430 		if (acb->device_map[tgt] & 1 << lun) {
1431 			acb->devstate[tgt][lun] = ARECA_RAID_GOOD;
1432 			rval = arcmsr_config_lun(acb, tgt, lun, childp);
1433 		}
1434 		break;
1435 
1436 	case BUS_CONFIG_DRIVER:
1437 	case BUS_CONFIG_ALL:
1438 		for (tgt = 0; tgt < ARCMSR_MAX_TARGETID; tgt++)
1439 			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1440 				if (acb->device_map[tgt] & 1 << lun) {
1441 					acb->devstate[tgt][lun] =
1442 					    ARECA_RAID_GOOD;
1443 					(void) arcmsr_config_lun(acb, tgt,
1444 					    lun, NULL);
1445 				}
1446 
1447 		rval = NDI_SUCCESS;
1448 		break;
1449 	}
1450 	if (rval == NDI_SUCCESS)
1451 		rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
1452 	ndi_devi_exit(parent);
1453 	return (rval);
1454 }
1455 
1456 /*
1457  * Function name: arcmsr_dma_alloc
1458  * Return Values: 0 if successful, -1 if failure
1459  *   Description: allocate DMA resources
1460  *       Context: Can only be called from arcmsr_tran_init_pkt()
1461  *     register struct scsi_address	*ap = &((pkt)->pkt_address);
1462  */
1463 static int
arcmsr_dma_alloc(struct ACB * acb,struct scsi_pkt * pkt,struct buf * bp,int flags,int (* callback)())1464 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1465     struct buf *bp, int flags, int (*callback)())
1466 {
1467 	struct CCB *ccb = pkt->pkt_ha_private;
1468 	int alloc_result, map_method, dma_flags;
1469 	int resid = 0;
1470 	int total_ccb_xferlen = 0;
1471 	int (*cb)(caddr_t);
1472 	uint8_t i;
1473 
1474 	/*
1475 	 * at this point the PKT SCSI CDB is empty, and dma xfer length
1476 	 * is bp->b_bcount
1477 	 */
1478 
1479 	if (bp->b_flags & B_READ) {
1480 		ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1481 		dma_flags = DDI_DMA_READ;
1482 	} else {
1483 		ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1484 		dma_flags = DDI_DMA_WRITE;
1485 	}
1486 
1487 	if (flags & PKT_CONSISTENT) {
1488 		ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1489 		dma_flags |= DDI_DMA_CONSISTENT;
1490 	}
1491 	if (flags & PKT_DMA_PARTIAL) {
1492 		dma_flags |= DDI_DMA_PARTIAL;
1493 	}
1494 
1495 	dma_flags |= DDI_DMA_REDZONE;
1496 	cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1497 
1498 	alloc_result = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_dma_attr,
1499 	    cb, 0, &ccb->pkt_dma_handle);
1500 	if (alloc_result != DDI_SUCCESS) {
1501 		arcmsr_warn(acb, "dma allocate failed (%x)", alloc_result);
1502 		return (DDI_FAILURE);
1503 	}
1504 
1505 	map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle,
1506 	    bp, dma_flags, cb, 0,
1507 	    &ccb->pkt_dmacookies[0],	/* SG List pointer */
1508 	    &ccb->pkt_ncookies);	/* number of sgl cookies */
1509 
1510 	switch (map_method) {
1511 	case DDI_DMA_PARTIAL_MAP:
1512 		/*
1513 		 * When your main memory size larger then 4G
1514 		 * DDI_DMA_PARTIAL_MAP will be touched.
1515 		 *
1516 		 * We've already set DDI_DMA_PARTIAL in dma_flags,
1517 		 * so if it's now missing, there's something screwy
1518 		 * happening. We plow on....
1519 		 */
1520 
1521 		if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1522 			arcmsr_warn(acb,
1523 			    "dma partial mapping lost ...impossible case!");
1524 		}
1525 		if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1526 		    DDI_FAILURE) {
1527 			arcmsr_warn(acb, "ddi_dma_numwin() failed");
1528 		}
1529 
1530 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1531 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1532 		    &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1533 		    DDI_FAILURE) {
1534 			arcmsr_warn(acb, "ddi_dma_getwin failed");
1535 		}
1536 
1537 		i = 0;
1538 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1539 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1540 		for (;;) {
1541 			i++;
1542 			if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1543 			    (i == ccb->pkt_ncookies) ||
1544 			    (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1545 				break;
1546 			}
1547 			/*
1548 			 * next cookie will be retrieved from
1549 			 * ccb->pkt_dmacookies[i]
1550 			 */
1551 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1552 			    &ccb->pkt_dmacookies[i]);
1553 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1554 		}
1555 		ccb->pkt_cookie = i;
1556 		ccb->arcmsr_cdb.sgcount = i;
1557 		if (total_ccb_xferlen > 512) {
1558 			resid = total_ccb_xferlen % 512;
1559 			if (resid != 0) {
1560 				i--;
1561 				total_ccb_xferlen -= resid;
1562 				/* modify last sg length */
1563 				ccb->pkt_dmacookies[i].dmac_size =
1564 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1565 				ccb->resid_dmacookie.dmac_size = resid;
1566 				ccb->resid_dmacookie.dmac_laddress =
1567 				    ccb->pkt_dmacookies[i].dmac_laddress +
1568 				    ccb->pkt_dmacookies[i].dmac_size;
1569 			}
1570 		}
1571 		ccb->total_dmac_size = total_ccb_xferlen;
1572 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1573 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1574 
1575 		return (DDI_SUCCESS);
1576 
1577 	case DDI_DMA_MAPPED:
1578 		ccb->pkt_nwin = 1; /* all mapped, so only one window */
1579 		ccb->pkt_dma_len = 0;
1580 		ccb->pkt_dma_offset = 0;
1581 		i = 0;
1582 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1583 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1584 		for (;;) {
1585 			i++;
1586 			if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1587 			    (i == ccb->pkt_ncookies) ||
1588 			    (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1589 				break;
1590 			}
1591 			/*
1592 			 * next cookie will be retrieved from
1593 			 * ccb->pkt_dmacookies[i]
1594 			 */
1595 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1596 			    &ccb->pkt_dmacookies[i]);
1597 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1598 		}
1599 		ccb->pkt_cookie = i;
1600 		ccb->arcmsr_cdb.sgcount = i;
1601 		if (total_ccb_xferlen > 512) {
1602 			resid = total_ccb_xferlen % 512;
1603 			if (resid != 0) {
1604 				i--;
1605 				total_ccb_xferlen -= resid;
1606 				/* modify last sg length */
1607 				ccb->pkt_dmacookies[i].dmac_size =
1608 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1609 				ccb->resid_dmacookie.dmac_size = resid;
1610 				ccb->resid_dmacookie.dmac_laddress =
1611 				    ccb->pkt_dmacookies[i].dmac_laddress +
1612 				    ccb->pkt_dmacookies[i].dmac_size;
1613 			}
1614 		}
1615 		ccb->total_dmac_size = total_ccb_xferlen;
1616 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1617 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1618 		return (DDI_SUCCESS);
1619 
1620 	case DDI_DMA_NORESOURCES:
1621 		arcmsr_warn(acb, "dma map got 'no resources'");
1622 		bioerror(bp, ENOMEM);
1623 		break;
1624 
1625 	case DDI_DMA_NOMAPPING:
1626 		arcmsr_warn(acb, "dma map got 'no mapping'");
1627 		bioerror(bp, EFAULT);
1628 		break;
1629 
1630 	case DDI_DMA_TOOBIG:
1631 		arcmsr_warn(acb, "dma map got 'too big'");
1632 		bioerror(bp, EINVAL);
1633 		break;
1634 
1635 	case DDI_DMA_INUSE:
1636 		arcmsr_warn(acb, "dma map got 'in use' "
1637 		    "(should not happen)");
1638 		break;
1639 	default:
1640 		arcmsr_warn(acb, "dma map failed (0x%x)", i);
1641 		break;
1642 	}
1643 
1644 	ddi_dma_free_handle(&ccb->pkt_dma_handle);
1645 	ccb->pkt_dma_handle = NULL;
1646 	ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1647 	return (DDI_FAILURE);
1648 }
1649 
1650 
1651 /*
1652  * Function name: arcmsr_dma_move
1653  * Return Values: 0 if successful, -1 if failure
1654  *   Description: move DMA resources to next DMA window
1655  *       Context: Can only be called from arcmsr_tran_init_pkt()
1656  */
1657 static int
arcmsr_dma_move(struct ACB * acb,struct scsi_pkt * pkt,struct buf * bp)1658 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt, struct buf *bp)
1659 {
1660 	struct CCB *ccb = pkt->pkt_ha_private;
1661 	uint8_t i = 0;
1662 	int resid = 0;
1663 	int total_ccb_xferlen = 0;
1664 
1665 	if (ccb->resid_dmacookie.dmac_size != 0) {
1666 		total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1667 		ccb->pkt_dmacookies[i].dmac_size =
1668 		    ccb->resid_dmacookie.dmac_size;
1669 		ccb->pkt_dmacookies[i].dmac_laddress =
1670 		    ccb->resid_dmacookie.dmac_laddress;
1671 		i++;
1672 		ccb->resid_dmacookie.dmac_size = 0;
1673 	}
1674 	/*
1675 	 * If there are no more cookies remaining in this window,
1676 	 * move to the next window.
1677 	 */
1678 	if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1679 		/*
1680 		 * only dma map "partial" arrive here
1681 		 */
1682 		if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1683 		    (ccb->pkt_nwin == 1)) {
1684 			return (DDI_SUCCESS);
1685 		}
1686 
1687 		/* At last window, cannot move */
1688 		if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1689 			arcmsr_warn(acb, "dma partial set, numwin exceeded");
1690 			return (DDI_FAILURE);
1691 		}
1692 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1693 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1694 		    &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1695 		    DDI_FAILURE) {
1696 			arcmsr_warn(acb, "ddi_dma_getwin failed");
1697 			return (DDI_FAILURE);
1698 		}
1699 		/* reset cookie pointer */
1700 		ccb->pkt_cookie = 0;
1701 	} else {
1702 		/*
1703 		 * only dma map "all" arrive here
1704 		 * We still have more cookies in this window,
1705 		 * get the next one
1706 		 * access the pkt_dma_handle remain cookie record at
1707 		 * ccb->pkt_dmacookies array
1708 		 */
1709 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1710 		    &ccb->pkt_dmacookies[i]);
1711 	}
1712 
1713 	/* Get remaining cookies in this window, up to our maximum */
1714 	total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1715 
1716 	/* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1717 	for (;;) {
1718 		i++;
1719 		/* handled cookies count level indicator */
1720 		ccb->pkt_cookie++;
1721 		if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1722 		    (ccb->pkt_cookie == ccb->pkt_ncookies) ||
1723 		    (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1724 			break;
1725 		}
1726 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1727 		    &ccb->pkt_dmacookies[i]);
1728 		total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1729 	}
1730 
1731 	ccb->arcmsr_cdb.sgcount = i;
1732 	if (total_ccb_xferlen > 512) {
1733 		resid = total_ccb_xferlen % 512;
1734 		if (resid != 0) {
1735 			i--;
1736 			total_ccb_xferlen -= resid;
1737 			/* modify last sg length */
1738 			ccb->pkt_dmacookies[i].dmac_size =
1739 			    ccb->pkt_dmacookies[i].dmac_size - resid;
1740 			ccb->resid_dmacookie.dmac_size = resid;
1741 			ccb->resid_dmacookie.dmac_laddress =
1742 			    ccb->pkt_dmacookies[i].dmac_laddress +
1743 			    ccb->pkt_dmacookies[i].dmac_size;
1744 		}
1745 	}
1746 	ccb->total_dmac_size += total_ccb_xferlen;
1747 	pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1748 
1749 	return (DDI_SUCCESS);
1750 }
1751 
1752 
1753 /*ARGSUSED*/
1754 static void
arcmsr_build_ccb(struct CCB * ccb)1755 arcmsr_build_ccb(struct CCB *ccb)
1756 {
1757 	struct scsi_pkt *pkt = ccb->pkt;
1758 	struct ARCMSR_CDB *arcmsr_cdb;
1759 	char *psge;
1760 	uint32_t address_lo, address_hi;
1761 	int arccdbsize = 0x30;
1762 	uint8_t sgcount;
1763 
1764 	arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1765 	psge = (char *)&arcmsr_cdb->sgu;
1766 
1767 	bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb, arcmsr_cdb->CdbLength);
1768 	sgcount = ccb->arcmsr_cdb.sgcount;
1769 
1770 	if (sgcount != 0) {
1771 		int length, i;
1772 		int cdb_sgcount = 0;
1773 		int total_xfer_length = 0;
1774 
1775 		/* map stor port SG list to our iop SG List. */
1776 		for (i = 0; i < sgcount; i++) {
1777 			/* Get physaddr of the current data pointer */
1778 			length = ccb->pkt_dmacookies[i].dmac_size;
1779 			total_xfer_length += length;
1780 			address_lo =
1781 			    dma_addr_lo32(ccb->pkt_dmacookies[i].dmac_laddress);
1782 			address_hi =
1783 			    dma_addr_hi32(ccb->pkt_dmacookies[i].dmac_laddress);
1784 
1785 			if (address_hi == 0) {
1786 				struct SG32ENTRY *dma_sg;
1787 
1788 				dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
1789 				dma_sg->address = address_lo;
1790 				dma_sg->length = length;
1791 				psge += sizeof (struct SG32ENTRY);
1792 				arccdbsize += sizeof (struct SG32ENTRY);
1793 			} else {
1794 				struct SG64ENTRY *dma_sg;
1795 
1796 				dma_sg = (struct SG64ENTRY *)(intptr_t)psge;
1797 				dma_sg->addresshigh = address_hi;
1798 				dma_sg->address = address_lo;
1799 				dma_sg->length = length | IS_SG64_ADDR;
1800 				psge += sizeof (struct SG64ENTRY);
1801 				arccdbsize += sizeof (struct SG64ENTRY);
1802 			}
1803 			cdb_sgcount++;
1804 		}
1805 		arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
1806 		arcmsr_cdb->DataLength = total_xfer_length;
1807 		if (arccdbsize > 256) {
1808 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1809 		}
1810 	} else {
1811 		arcmsr_cdb->DataLength = 0;
1812 	}
1813 
1814 	if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
1815 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1816 	ccb->arc_cdb_size = arccdbsize;
1817 }
1818 
1819 /*
1820  * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
1821  *
1822  * handle:		Handle of registered ARC protocol driver
1823  * adapter_id:		AIOC unique identifier(integer)
1824  * pPOSTCARD_SEND:	Pointer to ARC send postcard
1825  *
1826  * This routine posts a ARC send postcard to the request post FIFO of a
1827  * specific ARC adapter.
1828  */
1829 static int
arcmsr_post_ccb(struct ACB * acb,struct CCB * ccb)1830 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb)
1831 {
1832 	uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1833 	struct scsi_pkt *pkt = ccb->pkt;
1834 	struct ARCMSR_CDB *arcmsr_cdb;
1835 	uint_t pkt_flags = pkt->pkt_flags;
1836 
1837 	arcmsr_cdb = &ccb->arcmsr_cdb;
1838 
1839 	/* TODO: Use correct offset and size for syncing? */
1840 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0, DDI_DMA_SYNC_FORDEV) ==
1841 	    DDI_FAILURE)
1842 		return (DDI_FAILURE);
1843 
1844 	atomic_inc_32(&acb->ccboutstandingcount);
1845 	ccb->ccb_time = (time_t)(ddi_get_time() + pkt->pkt_time);
1846 
1847 	ccb->ccb_state = ARCMSR_CCB_START;
1848 	switch (acb->adapter_type) {
1849 	case ACB_ADAPTER_TYPE_A:
1850 	{
1851 		struct HBA_msgUnit *phbamu;
1852 
1853 		phbamu = (struct HBA_msgUnit *)acb->pmu;
1854 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1855 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1856 			    &phbamu->inbound_queueport,
1857 			    cdb_phyaddr_pattern |
1858 			    ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1859 		} else {
1860 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1861 			    &phbamu->inbound_queueport, cdb_phyaddr_pattern);
1862 		}
1863 		if (pkt_flags & FLAG_NOINTR)
1864 			arcmsr_polling_hba_ccbdone(acb, ccb);
1865 		break;
1866 	}
1867 
1868 	case ACB_ADAPTER_TYPE_B:
1869 	{
1870 		struct HBB_msgUnit *phbbmu;
1871 		int ending_index, index;
1872 
1873 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
1874 		index = phbbmu->postq_index;
1875 		ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
1876 		phbbmu->post_qbuffer[ending_index] = 0;
1877 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1878 			phbbmu->post_qbuffer[index] =
1879 			    (cdb_phyaddr_pattern|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1880 		} else {
1881 			phbbmu->post_qbuffer[index] = cdb_phyaddr_pattern;
1882 		}
1883 		index++;
1884 		/* if last index number set it to 0 */
1885 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
1886 		phbbmu->postq_index = index;
1887 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1888 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1889 		    ARCMSR_DRV2IOP_CDB_POSTED);
1890 
1891 		if (pkt_flags & FLAG_NOINTR)
1892 			arcmsr_polling_hbb_ccbdone(acb, ccb);
1893 		break;
1894 	}
1895 
1896 	case ACB_ADAPTER_TYPE_C:
1897 	{
1898 		struct HBC_msgUnit *phbcmu;
1899 		uint32_t ccb_post_stamp, arc_cdb_size;
1900 
1901 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
1902 		arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 :
1903 		    ccb->arc_cdb_size;
1904 		ccb_post_stamp = (cdb_phyaddr_pattern |
1905 		    ((arc_cdb_size-1) >> 6) |1);
1906 		if (acb->cdb_phyaddr_hi32) {
1907 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1908 			    &phbcmu->inbound_queueport_high,
1909 			    acb->cdb_phyaddr_hi32);
1910 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1911 			    &phbcmu->inbound_queueport_low, ccb_post_stamp);
1912 		} else {
1913 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1914 			    &phbcmu->inbound_queueport_low, ccb_post_stamp);
1915 		}
1916 		if (pkt_flags & FLAG_NOINTR)
1917 			arcmsr_polling_hbc_ccbdone(acb, ccb);
1918 		break;
1919 	}
1920 
1921 	}
1922 	return (DDI_SUCCESS);
1923 }
1924 
1925 
1926 static void
arcmsr_ccb_complete(struct CCB * ccb,int flag)1927 arcmsr_ccb_complete(struct CCB *ccb, int flag)
1928 {
1929 	struct ACB *acb = ccb->acb;
1930 	struct scsi_pkt *pkt = ccb->pkt;
1931 
1932 	if (pkt == NULL) {
1933 		return;
1934 	}
1935 	ccb->ccb_state |= ARCMSR_CCB_DONE;
1936 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1937 	    STATE_SENT_CMD | STATE_GOT_STATUS);
1938 
1939 	if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1940 	    (pkt->pkt_state & STATE_XFERRED_DATA)) {
1941 		(void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1942 		    DDI_DMA_SYNC_FORCPU);
1943 	}
1944 	/*
1945 	 * TODO: This represents a potential race condition, and is
1946 	 * ultimately a poor design decision.  Revisit this code
1947 	 * and solve the mutex ownership issue correctly.
1948 	 */
1949 	if (mutex_owned(&acb->isr_mutex)) {
1950 		mutex_exit(&acb->isr_mutex);
1951 		scsi_hba_pkt_comp(pkt);
1952 		mutex_enter(&acb->isr_mutex);
1953 	} else {
1954 		scsi_hba_pkt_comp(pkt);
1955 	}
1956 	if (flag == 1) {
1957 		atomic_dec_32(&acb->ccboutstandingcount);
1958 	}
1959 }
1960 
1961 static void
arcmsr_report_ccb_state(struct ACB * acb,struct CCB * ccb,boolean_t error)1962 arcmsr_report_ccb_state(struct ACB *acb, struct CCB *ccb, boolean_t error)
1963 {
1964 	int id, lun;
1965 
1966 	ccb->ccb_state |= ARCMSR_CCB_DONE;
1967 	id = ccb->pkt->pkt_address.a_target;
1968 	lun = ccb->pkt->pkt_address.a_lun;
1969 
1970 	if (!error) {
1971 		if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1972 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
1973 		}
1974 		ccb->pkt->pkt_reason = CMD_CMPLT;
1975 		ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1976 		arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1977 		    &ccb->complete_queue_pointer, &acb->ccb_complete_list);
1978 
1979 	} else {
1980 		switch (ccb->arcmsr_cdb.DeviceStatus) {
1981 		case ARCMSR_DEV_SELECT_TIMEOUT:
1982 			if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
1983 				arcmsr_warn(acb,
1984 				    "target %d lun %d selection "
1985 				    "timeout", id, lun);
1986 			}
1987 			acb->devstate[id][lun] = ARECA_RAID_GONE;
1988 			ccb->pkt->pkt_reason = CMD_TIMEOUT; /* CMD_DEV_GONE; */
1989 			ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1990 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1991 			    &ccb->complete_queue_pointer,
1992 			    &acb->ccb_complete_list);
1993 			break;
1994 		case ARCMSR_DEV_ABORTED:
1995 		case ARCMSR_DEV_INIT_FAIL:
1996 			arcmsr_warn(acb, "isr got 'ARCMSR_DEV_ABORTED'"
1997 			    " 'ARCMSR_DEV_INIT_FAIL'");
1998 			arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
1999 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2000 			ccb->pkt->pkt_reason = CMD_DEV_GONE;
2001 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2002 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2003 			    &ccb->complete_queue_pointer,
2004 			    &acb->ccb_complete_list);
2005 			break;
2006 		case SCSISTAT_CHECK_CONDITION:
2007 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
2008 			arcmsr_report_sense_info(ccb);
2009 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2010 			    &ccb->complete_queue_pointer,
2011 			    &acb->ccb_complete_list);
2012 			break;
2013 		default:
2014 			arcmsr_warn(acb,
2015 			    "target %d lun %d isr received CMD_DONE"
2016 			    " with unknown DeviceStatus (0x%x)",
2017 			    id, lun, ccb->arcmsr_cdb.DeviceStatus);
2018 			arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2019 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2020 			/* unknown error or crc error just for retry */
2021 			ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2022 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2023 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2024 			    &ccb->complete_queue_pointer,
2025 			    &acb->ccb_complete_list);
2026 			break;
2027 		}
2028 	}
2029 }
2030 
2031 
2032 static void
arcmsr_drain_donequeue(struct ACB * acb,struct CCB * ccb,boolean_t error)2033 arcmsr_drain_donequeue(struct ACB *acb, struct CCB *ccb, boolean_t error)
2034 {
2035 	uint16_t	ccb_state;
2036 
2037 	if (ccb->acb != acb) {
2038 		return;
2039 	}
2040 	if (ccb->ccb_state != ARCMSR_CCB_START) {
2041 		switch (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
2042 		case ARCMSR_CCB_TIMEOUT:
2043 			ccb_state = ccb->ccb_state;
2044 			if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2045 				arcmsr_free_ccb(ccb);
2046 			else
2047 				ccb->ccb_state |= ARCMSR_CCB_BACK;
2048 			return;
2049 
2050 		case ARCMSR_CCB_ABORTED:
2051 			ccb_state = ccb->ccb_state;
2052 			if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2053 				arcmsr_free_ccb(ccb);
2054 			else
2055 				ccb->ccb_state |= ARCMSR_CCB_BACK;
2056 			return;
2057 		case ARCMSR_CCB_RESET:
2058 			ccb_state = ccb->ccb_state;
2059 			if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2060 				arcmsr_free_ccb(ccb);
2061 			else
2062 				ccb->ccb_state |= ARCMSR_CCB_BACK;
2063 			return;
2064 		default:
2065 			return;
2066 		}
2067 	}
2068 	arcmsr_report_ccb_state(acb, ccb, error);
2069 }
2070 
2071 static void
arcmsr_report_sense_info(struct CCB * ccb)2072 arcmsr_report_sense_info(struct CCB *ccb)
2073 {
2074 	struct SENSE_DATA *cdb_sensedata;
2075 	struct scsi_pkt *pkt = ccb->pkt;
2076 	struct scsi_arq_status *arq_status;
2077 	union scsi_cdb *cdbp;
2078 	uint64_t err_blkno;
2079 
2080 	cdbp = (void *)pkt->pkt_cdbp;
2081 	err_blkno = ARCMSR_GETGXADDR(ccb->arcmsr_cdb.CdbLength, cdbp);
2082 
2083 	arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
2084 	bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
2085 	*pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
2086 	arq_status->sts_rqpkt_reason = CMD_CMPLT;
2087 	arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
2088 	    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
2089 	arq_status->sts_rqpkt_statistics = 0;
2090 	arq_status->sts_rqpkt_resid = 0;
2091 
2092 	pkt->pkt_reason = CMD_CMPLT;
2093 	/* auto rqsense took place */
2094 	pkt->pkt_state |= STATE_ARQ_DONE;
2095 
2096 	cdb_sensedata = (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
2097 	if (err_blkno <= 0xfffffffful) {
2098 		struct scsi_extended_sense *sts_sensedata;
2099 
2100 		sts_sensedata = &arq_status->sts_sensedata;
2101 		sts_sensedata->es_code = cdb_sensedata->ErrorCode;
2102 		/* must eq CLASS_EXTENDED_SENSE (0x07) */
2103 		sts_sensedata->es_class = cdb_sensedata->ErrorClass;
2104 		sts_sensedata->es_valid = cdb_sensedata->Valid;
2105 		sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
2106 		sts_sensedata->es_key = cdb_sensedata->SenseKey;
2107 		sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
2108 		sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
2109 		sts_sensedata->es_filmk = cdb_sensedata->FileMark;
2110 		sts_sensedata->es_info_1 = (err_blkno >> 24) & 0xFF;
2111 		sts_sensedata->es_info_2 = (err_blkno >> 16) & 0xFF;
2112 		sts_sensedata->es_info_3 = (err_blkno >>  8) & 0xFF;
2113 		sts_sensedata->es_info_4 = err_blkno & 0xFF;
2114 		sts_sensedata->es_add_len =
2115 		    cdb_sensedata->AdditionalSenseLength;
2116 		sts_sensedata->es_cmd_info[0] =
2117 		    cdb_sensedata->CommandSpecificInformation[0];
2118 		sts_sensedata->es_cmd_info[1] =
2119 		    cdb_sensedata->CommandSpecificInformation[1];
2120 		sts_sensedata->es_cmd_info[2] =
2121 		    cdb_sensedata->CommandSpecificInformation[2];
2122 		sts_sensedata->es_cmd_info[3] =
2123 		    cdb_sensedata->CommandSpecificInformation[3];
2124 		sts_sensedata->es_add_code =
2125 		    cdb_sensedata->AdditionalSenseCode;
2126 		sts_sensedata->es_qual_code =
2127 		    cdb_sensedata->AdditionalSenseCodeQualifier;
2128 		sts_sensedata->es_fru_code =
2129 		    cdb_sensedata->FieldReplaceableUnitCode;
2130 	} else { /* 64-bit LBA */
2131 		struct scsi_descr_sense_hdr *dsp;
2132 		struct scsi_information_sense_descr *isd;
2133 
2134 		dsp = (struct scsi_descr_sense_hdr *)
2135 		    &arq_status->sts_sensedata;
2136 		dsp->ds_class = CLASS_EXTENDED_SENSE;
2137 		dsp->ds_code = CODE_FMT_DESCR_CURRENT;
2138 		dsp->ds_key = cdb_sensedata->SenseKey;
2139 		dsp->ds_add_code = cdb_sensedata->AdditionalSenseCode;
2140 		dsp->ds_qual_code =
2141 		    cdb_sensedata->AdditionalSenseCodeQualifier;
2142 		dsp->ds_addl_sense_length =
2143 		    sizeof (struct scsi_information_sense_descr);
2144 
2145 		isd = (struct scsi_information_sense_descr *)(dsp+1);
2146 		isd->isd_descr_type = DESCR_INFORMATION;
2147 		isd->isd_valid = 1;
2148 		isd->isd_information[0] = (err_blkno >> 56) & 0xFF;
2149 		isd->isd_information[1] = (err_blkno >> 48) & 0xFF;
2150 		isd->isd_information[2] = (err_blkno >> 40) & 0xFF;
2151 		isd->isd_information[3] = (err_blkno >> 32) & 0xFF;
2152 		isd->isd_information[4] = (err_blkno >> 24) & 0xFF;
2153 		isd->isd_information[5] = (err_blkno >> 16) & 0xFF;
2154 		isd->isd_information[6] = (err_blkno >>  8) & 0xFF;
2155 		isd->isd_information[7] = (err_blkno) & 0xFF;
2156 	}
2157 }
2158 
2159 
2160 static int
arcmsr_seek_cmd2abort(struct ACB * acb,struct scsi_pkt * abortpkt)2161 arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt)
2162 {
2163 	struct CCB *ccb;
2164 	uint32_t intmask_org = 0;
2165 	int i = 0;
2166 
2167 	acb->num_aborts++;
2168 
2169 	if (abortpkt != NULL) {
2170 		/*
2171 		 * We don't support abort of a single packet.  All
2172 		 * callers in our kernel always do a global abort, so
2173 		 * there is no point in having code to support it
2174 		 * here.
2175 		 */
2176 		return (DDI_FAILURE);
2177 	}
2178 
2179 	/*
2180 	 * if abortpkt is NULL, the upper layer needs us
2181 	 * to abort all commands
2182 	 */
2183 	if (acb->ccboutstandingcount != 0) {
2184 		/* disable all outbound interrupt */
2185 		intmask_org = arcmsr_disable_allintr(acb);
2186 		/* clear and abort all outbound posted Q */
2187 		arcmsr_done4abort_postqueue(acb);
2188 		/* talk to iop 331 outstanding command aborted */
2189 		(void) arcmsr_abort_host_command(acb);
2190 
2191 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2192 			ccb = acb->pccb_pool[i];
2193 			if (ccb->ccb_state == ARCMSR_CCB_START) {
2194 				/*
2195 				 * this ccb will complete at
2196 				 * hwinterrupt
2197 				 */
2198 				/* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
2199 				ccb->pkt->pkt_reason = CMD_ABORTED;
2200 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
2201 				arcmsr_ccb_complete(ccb, 1);
2202 			}
2203 		}
2204 		/*
2205 		 * enable outbound Post Queue, outbound
2206 		 * doorbell Interrupt
2207 		 */
2208 		arcmsr_enable_allintr(acb, intmask_org);
2209 	}
2210 	return (DDI_SUCCESS);
2211 }
2212 
2213 
2214 /*
2215  * Autoconfiguration support
2216  */
2217 static int
arcmsr_parse_devname(char * devnm,int * tgt,int * lun)2218 arcmsr_parse_devname(char *devnm, int *tgt, int *lun)
2219 {
2220 	char devbuf[SCSI_MAXNAMELEN];
2221 	char *addr;
2222 	char *p,  *tp, *lp;
2223 	long num;
2224 
2225 	/* Parse dev name and address */
2226 	(void) strlcpy(devbuf, devnm, sizeof (devbuf));
2227 	addr = "";
2228 	for (p = devbuf; *p != '\0'; p++) {
2229 		if (*p == '@') {
2230 			addr = p + 1;
2231 			*p = '\0';
2232 		} else if (*p == ':') {
2233 			*p = '\0';
2234 			break;
2235 		}
2236 	}
2237 
2238 	/* Parse target and lun */
2239 	for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
2240 		if (*p == ',') {
2241 			lp = p + 1;
2242 			*p = '\0';
2243 			break;
2244 		}
2245 	}
2246 	if ((tgt != NULL) && (tp != NULL)) {
2247 		if (ddi_strtol(tp, NULL, 0x10, &num) != 0)
2248 			return (-1);
2249 		*tgt = (int)num;
2250 	}
2251 	if ((lun != NULL) && (lp != NULL)) {
2252 		if (ddi_strtol(lp, NULL, 0x10, &num) != 0)
2253 			return (-1);
2254 		*lun = (int)num;
2255 	}
2256 	return (0);
2257 }
2258 
2259 static int
arcmsr_name_node(dev_info_t * dip,char * name,int len)2260 arcmsr_name_node(dev_info_t *dip, char *name, int len)
2261 {
2262 	int tgt, lun;
2263 
2264 	tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "target",
2265 	    -1);
2266 	if (tgt == -1)
2267 		return (DDI_FAILURE);
2268 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "lun",
2269 	    -1);
2270 	if (lun == -1)
2271 		return (DDI_FAILURE);
2272 	(void) snprintf(name, len, "%x,%x", tgt, lun);
2273 	return (DDI_SUCCESS);
2274 }
2275 
2276 static dev_info_t *
arcmsr_find_child(struct ACB * acb,uint16_t tgt,uint8_t lun)2277 arcmsr_find_child(struct ACB *acb, uint16_t tgt, uint8_t lun)
2278 {
2279 	dev_info_t *child = NULL;
2280 	char addr[SCSI_MAXNAMELEN];
2281 	char tmp[SCSI_MAXNAMELEN];
2282 
2283 	(void) sprintf(addr, "%x,%x", tgt, lun);
2284 
2285 	for (child = ddi_get_child(acb->dev_info);
2286 	    child;
2287 	    child = ddi_get_next_sibling(child)) {
2288 		/* We don't care about non-persistent node */
2289 		if (ndi_dev_is_persistent_node(child) == 0)
2290 			continue;
2291 		if (arcmsr_name_node(child, tmp, SCSI_MAXNAMELEN) !=
2292 		    DDI_SUCCESS)
2293 			continue;
2294 		if (strcmp(addr, tmp) == 0)
2295 			break;
2296 	}
2297 	return (child);
2298 }
2299 
2300 static int
arcmsr_config_child(struct ACB * acb,struct scsi_device * sd,dev_info_t ** dipp)2301 arcmsr_config_child(struct ACB *acb, struct scsi_device *sd, dev_info_t **dipp)
2302 {
2303 	char *nodename = NULL;
2304 	char **compatible = NULL;
2305 	int ncompatible = 0;
2306 	dev_info_t *ldip = NULL;
2307 	int tgt = sd->sd_address.a_target;
2308 	int lun = sd->sd_address.a_lun;
2309 	int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
2310 	int rval;
2311 
2312 	scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
2313 	    NULL, &nodename, &compatible, &ncompatible);
2314 	if (nodename == NULL) {
2315 		arcmsr_warn(acb, "found no comptible driver for T%dL%d",
2316 		    tgt, lun);
2317 		rval = NDI_FAILURE;
2318 		goto finish;
2319 	}
2320 	/* Create dev node */
2321 	rval = ndi_devi_alloc(acb->dev_info, nodename, DEVI_SID_NODEID, &ldip);
2322 	if (rval == NDI_SUCCESS) {
2323 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
2324 		    DDI_PROP_SUCCESS) {
2325 			arcmsr_warn(acb,
2326 			    "unable to create target property for T%dL%d",
2327 			    tgt, lun);
2328 			rval = NDI_FAILURE;
2329 			goto finish;
2330 		}
2331 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
2332 		    DDI_PROP_SUCCESS) {
2333 			arcmsr_warn(acb,
2334 			    "unable to create lun property for T%dL%d",
2335 			    tgt, lun);
2336 			rval = NDI_FAILURE;
2337 			goto finish;
2338 		}
2339 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
2340 		    "compatible", compatible, ncompatible) !=
2341 		    DDI_PROP_SUCCESS) {
2342 			arcmsr_warn(acb,
2343 			    "unable to create compatible property for T%dL%d",
2344 			    tgt, lun);
2345 			rval = NDI_FAILURE;
2346 			goto finish;
2347 		}
2348 		rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
2349 		if (rval != NDI_SUCCESS) {
2350 			arcmsr_warn(acb, "unable to online T%dL%d", tgt, lun);
2351 			ndi_prop_remove_all(ldip);
2352 			(void) ndi_devi_free(ldip);
2353 		} else {
2354 			arcmsr_log(acb, CE_NOTE, "T%dL%d onlined", tgt, lun);
2355 		}
2356 	}
2357 finish:
2358 	if (dipp)
2359 		*dipp = ldip;
2360 
2361 	scsi_hba_nodename_compatible_free(nodename, compatible);
2362 	return (rval);
2363 }
2364 
2365 static int
arcmsr_config_lun(struct ACB * acb,uint16_t tgt,uint8_t lun,dev_info_t ** ldip)2366 arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun, dev_info_t **ldip)
2367 {
2368 	struct scsi_device sd;
2369 	dev_info_t *child;
2370 	int rval;
2371 
2372 	if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
2373 		if (ldip) {
2374 			*ldip = child;
2375 		}
2376 		return (NDI_SUCCESS);
2377 	}
2378 	bzero(&sd, sizeof (struct scsi_device));
2379 	sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
2380 	sd.sd_address.a_target = tgt;
2381 	sd.sd_address.a_lun = lun;
2382 
2383 	rval = scsi_hba_probe(&sd, NULL);
2384 	if (rval == SCSIPROBE_EXISTS)
2385 		rval = arcmsr_config_child(acb, &sd, ldip);
2386 	scsi_unprobe(&sd);
2387 	return (rval);
2388 }
2389 
2390 
2391 static int
arcmsr_add_intr(struct ACB * acb,int intr_type)2392 arcmsr_add_intr(struct ACB *acb, int intr_type)
2393 {
2394 	int	rc, count;
2395 	dev_info_t *dev_info;
2396 	const char *type_str;
2397 
2398 	switch (intr_type) {
2399 	case DDI_INTR_TYPE_MSI:
2400 		type_str = "MSI";
2401 		break;
2402 	case DDI_INTR_TYPE_MSIX:
2403 		type_str = "MSIX";
2404 		break;
2405 	case DDI_INTR_TYPE_FIXED:
2406 		type_str = "FIXED";
2407 		break;
2408 	default:
2409 		type_str = "unknown";
2410 		break;
2411 	}
2412 
2413 	dev_info = acb->dev_info;
2414 	/* Determine number of supported interrupts */
2415 	rc = ddi_intr_get_nintrs(dev_info, intr_type, &count);
2416 	if ((rc != DDI_SUCCESS) || (count == 0)) {
2417 		arcmsr_warn(acb,
2418 		    "no interrupts of type %s, rc=0x%x, count=%d",
2419 		    type_str, rc, count);
2420 		return (DDI_FAILURE);
2421 	}
2422 	acb->intr_size = sizeof (ddi_intr_handle_t) * count;
2423 	acb->phandle = kmem_zalloc(acb->intr_size, KM_SLEEP);
2424 	rc = ddi_intr_alloc(dev_info, acb->phandle, intr_type, 0,
2425 	    count, &acb->intr_count, DDI_INTR_ALLOC_NORMAL);
2426 	if ((rc != DDI_SUCCESS) || (acb->intr_count == 0)) {
2427 		arcmsr_warn(acb, "ddi_intr_alloc(%s) failed 0x%x",
2428 		    type_str, rc);
2429 		return (DDI_FAILURE);
2430 	}
2431 	if (acb->intr_count < count) {
2432 		arcmsr_log(acb, CE_NOTE, "Got %d interrupts, but requested %d",
2433 		    acb->intr_count, count);
2434 	}
2435 	/*
2436 	 * Get priority for first msi, assume remaining are all the same
2437 	 */
2438 	if (ddi_intr_get_pri(acb->phandle[0], &acb->intr_pri) != DDI_SUCCESS) {
2439 		arcmsr_warn(acb, "ddi_intr_get_pri failed");
2440 		return (DDI_FAILURE);
2441 	}
2442 	if (acb->intr_pri >= ddi_intr_get_hilevel_pri()) {
2443 		arcmsr_warn(acb,  "high level interrupt not supported");
2444 		return (DDI_FAILURE);
2445 	}
2446 
2447 	for (int x = 0; x < acb->intr_count; x++) {
2448 		if (ddi_intr_add_handler(acb->phandle[x], arcmsr_intr_handler,
2449 		    (caddr_t)acb, NULL) != DDI_SUCCESS) {
2450 			arcmsr_warn(acb, "ddi_intr_add_handler(%s) failed",
2451 			    type_str);
2452 			return (DDI_FAILURE);
2453 		}
2454 	}
2455 	(void) ddi_intr_get_cap(acb->phandle[0], &acb->intr_cap);
2456 	if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2457 		/* Call ddi_intr_block_enable() for MSI */
2458 		(void) ddi_intr_block_enable(acb->phandle, acb->intr_count);
2459 	} else {
2460 		/* Call ddi_intr_enable() for MSI non block enable */
2461 		for (int x = 0; x < acb->intr_count; x++) {
2462 			(void) ddi_intr_enable(acb->phandle[x]);
2463 		}
2464 	}
2465 	return (DDI_SUCCESS);
2466 }
2467 
2468 static void
arcmsr_remove_intr(struct ACB * acb)2469 arcmsr_remove_intr(struct ACB *acb)
2470 {
2471 	int x;
2472 
2473 	if (acb->phandle == NULL)
2474 		return;
2475 
2476 	/* Disable all interrupts */
2477 	if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2478 		/* Call ddi_intr_block_disable() */
2479 		(void) ddi_intr_block_disable(acb->phandle, acb->intr_count);
2480 	} else {
2481 		for (x = 0; x < acb->intr_count; x++) {
2482 			(void) ddi_intr_disable(acb->phandle[x]);
2483 		}
2484 	}
2485 	/* Call ddi_intr_remove_handler() */
2486 	for (x = 0; x < acb->intr_count; x++) {
2487 		(void) ddi_intr_remove_handler(acb->phandle[x]);
2488 		(void) ddi_intr_free(acb->phandle[x]);
2489 	}
2490 	kmem_free(acb->phandle, acb->intr_size);
2491 	acb->phandle = NULL;
2492 }
2493 
2494 static void
arcmsr_mutex_init(struct ACB * acb)2495 arcmsr_mutex_init(struct ACB *acb)
2496 {
2497 	mutex_init(&acb->isr_mutex, NULL, MUTEX_DRIVER, NULL);
2498 	mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER, NULL);
2499 	mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER, NULL);
2500 	mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER, NULL);
2501 	mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
2502 }
2503 
2504 static void
arcmsr_mutex_destroy(struct ACB * acb)2505 arcmsr_mutex_destroy(struct ACB *acb)
2506 {
2507 	mutex_destroy(&acb->isr_mutex);
2508 	mutex_destroy(&acb->acb_mutex);
2509 	mutex_destroy(&acb->postq_mutex);
2510 	mutex_destroy(&acb->workingQ_mutex);
2511 	mutex_destroy(&acb->ioctl_mutex);
2512 }
2513 
2514 static int
arcmsr_initialize(struct ACB * acb)2515 arcmsr_initialize(struct ACB *acb)
2516 {
2517 	struct CCB *pccb_tmp;
2518 	size_t allocated_length;
2519 	uint16_t wval;
2520 	uint_t intmask_org, count;
2521 	caddr_t	arcmsr_ccbs_area;
2522 	uint32_t wlval, cdb_phyaddr, offset, realccb_size;
2523 	int32_t dma_sync_size;
2524 	int i, id, lun, instance;
2525 
2526 	instance = ddi_get_instance(acb->dev_info);
2527 	wlval = pci_config_get32(acb->pci_acc_handle, 0);
2528 	wval = (uint16_t)((wlval >> 16) & 0xffff);
2529 	realccb_size = P2ROUNDUP(sizeof (struct CCB), 32);
2530 	switch (wval) {
2531 	case PCI_DEVICE_ID_ARECA_1880:
2532 	case PCI_DEVICE_ID_ARECA_1882:
2533 	{
2534 		uint32_t *iop_mu_regs_map0;
2535 
2536 		acb->adapter_type = ACB_ADAPTER_TYPE_C; /* lsi */
2537 		dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2538 		if (ddi_regs_map_setup(acb->dev_info, 2,
2539 		    (caddr_t *)&iop_mu_regs_map0, 0,
2540 		    sizeof (struct HBC_msgUnit), &acb->dev_acc_attr,
2541 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2542 			arcmsr_warn(acb, "unable to map registers");
2543 			return (DDI_FAILURE);
2544 		}
2545 
2546 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2547 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2548 		    DDI_SUCCESS) {
2549 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2550 			arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2551 			return (DDI_FAILURE);
2552 		}
2553 
2554 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2555 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2556 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2557 		    &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2558 			arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2559 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2560 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2561 			return (DDI_FAILURE);
2562 		}
2563 
2564 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2565 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2566 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2567 		    &count) != DDI_DMA_MAPPED) {
2568 			arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2569 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2570 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2571 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2572 			return (DDI_FAILURE);
2573 		}
2574 		bzero(arcmsr_ccbs_area, dma_sync_size);
2575 		offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2576 		    - PtrToNum(arcmsr_ccbs_area));
2577 		arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2578 		/* ioport base */
2579 		acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2580 		break;
2581 	}
2582 
2583 	case PCI_DEVICE_ID_ARECA_1201:
2584 	{
2585 		uint32_t *iop_mu_regs_map0;
2586 		uint32_t *iop_mu_regs_map1;
2587 		struct HBB_msgUnit *phbbmu;
2588 
2589 		acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
2590 		dma_sync_size =
2591 		    (ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20) +
2592 		    sizeof (struct HBB_msgUnit);
2593 		/* Allocate memory for the ccb */
2594 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2595 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2596 		    DDI_SUCCESS) {
2597 			arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2598 			return (DDI_FAILURE);
2599 		}
2600 
2601 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2602 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2603 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2604 		    &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2605 			arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2606 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2607 			return (DDI_FAILURE);
2608 		}
2609 
2610 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2611 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size,
2612 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2613 		    NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
2614 			arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2615 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2616 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2617 			return (DDI_FAILURE);
2618 		}
2619 		bzero(arcmsr_ccbs_area, dma_sync_size);
2620 		offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2621 		    - PtrToNum(arcmsr_ccbs_area));
2622 		arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2623 		acb->pmu = (struct msgUnit *)
2624 		    NumToPtr(PtrToNum(arcmsr_ccbs_area) +
2625 		    (realccb_size*ARCMSR_MAX_FREECCB_NUM));
2626 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2627 
2628 		/* setup device register */
2629 		if (ddi_regs_map_setup(acb->dev_info, 1,
2630 		    (caddr_t *)&iop_mu_regs_map0, 0,
2631 		    sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
2632 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2633 			arcmsr_warn(acb, "unable to map base0 registers");
2634 			(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2635 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2636 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2637 			return (DDI_FAILURE);
2638 		}
2639 
2640 		/* ARCMSR_DRV2IOP_DOORBELL */
2641 		phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)iop_mu_regs_map0;
2642 		if (ddi_regs_map_setup(acb->dev_info, 2,
2643 		    (caddr_t *)&iop_mu_regs_map1, 0,
2644 		    sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
2645 		    &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
2646 			arcmsr_warn(acb, "unable to map base1 registers");
2647 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2648 			(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2649 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2650 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2651 			return (DDI_FAILURE);
2652 		}
2653 
2654 		/* ARCMSR_MSGCODE_RWBUFFER */
2655 		phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)iop_mu_regs_map1;
2656 		break;
2657 	}
2658 
2659 	case	PCI_DEVICE_ID_ARECA_1110:
2660 	case	PCI_DEVICE_ID_ARECA_1120:
2661 	case	PCI_DEVICE_ID_ARECA_1130:
2662 	case	PCI_DEVICE_ID_ARECA_1160:
2663 	case	PCI_DEVICE_ID_ARECA_1170:
2664 	case	PCI_DEVICE_ID_ARECA_1210:
2665 	case	PCI_DEVICE_ID_ARECA_1220:
2666 	case	PCI_DEVICE_ID_ARECA_1230:
2667 	case	PCI_DEVICE_ID_ARECA_1231:
2668 	case	PCI_DEVICE_ID_ARECA_1260:
2669 	case	PCI_DEVICE_ID_ARECA_1261:
2670 	case	PCI_DEVICE_ID_ARECA_1270:
2671 	case	PCI_DEVICE_ID_ARECA_1280:
2672 	case	PCI_DEVICE_ID_ARECA_1212:
2673 	case	PCI_DEVICE_ID_ARECA_1222:
2674 	case	PCI_DEVICE_ID_ARECA_1380:
2675 	case	PCI_DEVICE_ID_ARECA_1381:
2676 	case	PCI_DEVICE_ID_ARECA_1680:
2677 	case	PCI_DEVICE_ID_ARECA_1681:
2678 	{
2679 		uint32_t *iop_mu_regs_map0;
2680 
2681 		acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
2682 		dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2683 		if (ddi_regs_map_setup(acb->dev_info, 1,
2684 		    (caddr_t *)&iop_mu_regs_map0, 0,
2685 		    sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
2686 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2687 			arcmsr_warn(acb, "unable to map registers");
2688 			return (DDI_FAILURE);
2689 		}
2690 
2691 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2692 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2693 		    DDI_SUCCESS) {
2694 			arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2695 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2696 			return (DDI_FAILURE);
2697 		}
2698 
2699 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2700 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2701 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2702 		    &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2703 			arcmsr_warn(acb, "ddi_dma_mem_alloc failed", instance);
2704 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2705 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2706 			return (DDI_FAILURE);
2707 		}
2708 
2709 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2710 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2711 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2712 		    &count) != DDI_DMA_MAPPED) {
2713 			arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2714 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2715 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2716 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2717 			return (DDI_FAILURE);
2718 		}
2719 		bzero(arcmsr_ccbs_area, dma_sync_size);
2720 		offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2721 		    - PtrToNum(arcmsr_ccbs_area));
2722 		arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2723 		/* ioport base */
2724 		acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2725 		break;
2726 	}
2727 
2728 	default:
2729 		arcmsr_warn(acb, "Unknown RAID adapter type!");
2730 		return (DDI_FAILURE);
2731 	}
2732 	arcmsr_init_list_head(&acb->ccb_complete_list);
2733 	/* here we can not access pci configuration again */
2734 	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2735 	    ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
2736 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2737 	/* physical address of acb->pccb_pool */
2738 	cdb_phyaddr = acb->ccb_cookie.dmac_address + offset;
2739 
2740 	pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
2741 
2742 	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2743 		pccb_tmp->cdb_phyaddr_pattern =
2744 		    (acb->adapter_type == ACB_ADAPTER_TYPE_C) ?
2745 		    cdb_phyaddr : (cdb_phyaddr >> 5);
2746 		pccb_tmp->acb = acb;
2747 		acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
2748 		cdb_phyaddr = cdb_phyaddr + realccb_size;
2749 		pccb_tmp = (struct CCB *)NumToPtr(PtrToNum(pccb_tmp) +
2750 		    realccb_size);
2751 	}
2752 	acb->vir2phy_offset = PtrToNum(pccb_tmp) - cdb_phyaddr;
2753 
2754 	/* disable all outbound interrupt */
2755 	intmask_org = arcmsr_disable_allintr(acb);
2756 
2757 	if (!arcmsr_iop_confirm(acb)) {
2758 		arcmsr_warn(acb, "arcmsr_iop_confirm error", instance);
2759 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
2760 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
2761 		return (DDI_FAILURE);
2762 	}
2763 
2764 	for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
2765 		for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
2766 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2767 		}
2768 	}
2769 
2770 	/* enable outbound Post Queue, outbound doorbell Interrupt */
2771 	arcmsr_enable_allintr(acb, intmask_org);
2772 
2773 	return (0);
2774 }
2775 
2776 static int
arcmsr_do_ddi_attach(dev_info_t * dev_info,int instance)2777 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance)
2778 {
2779 	scsi_hba_tran_t *hba_trans;
2780 	ddi_device_acc_attr_t dev_acc_attr;
2781 	struct ACB *acb;
2782 	uint16_t wval;
2783 	int raid6 = 1;
2784 	char *type;
2785 	int intr_types;
2786 
2787 
2788 	/*
2789 	 * Soft State Structure
2790 	 * The driver should allocate the per-device-instance
2791 	 * soft state structure, being careful to clean up properly if
2792 	 * an error occurs. Allocate data structure.
2793 	 */
2794 	if (ddi_soft_state_zalloc(arcmsr_soft_state, instance) != DDI_SUCCESS) {
2795 		arcmsr_warn(NULL, "ddi_soft_state_zalloc failed");
2796 		return (DDI_FAILURE);
2797 	}
2798 
2799 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2800 	ASSERT(acb);
2801 
2802 	arcmsr_mutex_init(acb);
2803 
2804 	/* acb is already zalloc()d so we don't need to bzero() it */
2805 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2806 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2807 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2808 
2809 	acb->dev_info = dev_info;
2810 	acb->dev_acc_attr = dev_acc_attr;
2811 
2812 	/*
2813 	 * The driver, if providing DMA, should also check that its hardware is
2814 	 * installed in a DMA-capable slot
2815 	 */
2816 	if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
2817 		arcmsr_warn(acb, "hardware is not installed in"
2818 		    " a DMA-capable slot");
2819 		goto error_level_0;
2820 	}
2821 	if (pci_config_setup(dev_info, &acb->pci_acc_handle) != DDI_SUCCESS) {
2822 		arcmsr_warn(acb, "pci_config_setup() failed, attach failed");
2823 		goto error_level_0;
2824 	}
2825 
2826 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
2827 	if (wval != PCI_VENDOR_ID_ARECA) {
2828 		arcmsr_warn(acb,
2829 		    "'vendorid (0x%04x) does not match 0x%04x "
2830 		    "(PCI_VENDOR_ID_ARECA)",
2831 		    wval, PCI_VENDOR_ID_ARECA);
2832 		goto error_level_0;
2833 	}
2834 
2835 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
2836 	switch (wval) {
2837 	case PCI_DEVICE_ID_ARECA_1110:
2838 	case PCI_DEVICE_ID_ARECA_1210:
2839 	case PCI_DEVICE_ID_ARECA_1201:
2840 		raid6 = 0;
2841 		/*FALLTHRU*/
2842 	case PCI_DEVICE_ID_ARECA_1120:
2843 	case PCI_DEVICE_ID_ARECA_1130:
2844 	case PCI_DEVICE_ID_ARECA_1160:
2845 	case PCI_DEVICE_ID_ARECA_1170:
2846 	case PCI_DEVICE_ID_ARECA_1220:
2847 	case PCI_DEVICE_ID_ARECA_1230:
2848 	case PCI_DEVICE_ID_ARECA_1260:
2849 	case PCI_DEVICE_ID_ARECA_1270:
2850 	case PCI_DEVICE_ID_ARECA_1280:
2851 		type = "SATA 3G";
2852 		break;
2853 	case PCI_DEVICE_ID_ARECA_1380:
2854 	case PCI_DEVICE_ID_ARECA_1381:
2855 	case PCI_DEVICE_ID_ARECA_1680:
2856 	case PCI_DEVICE_ID_ARECA_1681:
2857 		type = "SAS 3G";
2858 		break;
2859 	case PCI_DEVICE_ID_ARECA_1880:
2860 		type = "SAS 6G";
2861 		break;
2862 	default:
2863 		type = "X-TYPE";
2864 		arcmsr_warn(acb, "Unknown Host Adapter RAID Controller!");
2865 		goto error_level_0;
2866 	}
2867 
2868 	arcmsr_log(acb, CE_CONT, "Areca %s Host Adapter RAID Controller%s\n",
2869 	    type, raid6 ? " (RAID6 capable)" : "");
2870 
2871 	/* we disable iop interrupt here */
2872 	if (arcmsr_initialize(acb) == DDI_FAILURE) {
2873 		arcmsr_warn(acb, "arcmsr_initialize failed");
2874 		goto error_level_1;
2875 	}
2876 
2877 	/* Allocate a transport structure */
2878 	hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
2879 	if (hba_trans == NULL) {
2880 		arcmsr_warn(acb, "scsi_hba_tran_alloc failed");
2881 		goto error_level_2;
2882 	}
2883 	acb->scsi_hba_transport = hba_trans;
2884 	acb->dev_info = dev_info;
2885 	/* init scsi host adapter transport entry */
2886 	hba_trans->tran_hba_private  = acb;
2887 	hba_trans->tran_tgt_private  = NULL;
2888 	/*
2889 	 * If no per-target initialization is required, the HBA can leave
2890 	 * tran_tgt_init set to NULL.
2891 	 */
2892 	hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
2893 	hba_trans->tran_tgt_probe = scsi_hba_probe;
2894 	hba_trans->tran_tgt_free = NULL;
2895 	hba_trans->tran_start = arcmsr_tran_start;
2896 	hba_trans->tran_abort = arcmsr_tran_abort;
2897 	hba_trans->tran_reset = arcmsr_tran_reset;
2898 	hba_trans->tran_getcap = arcmsr_tran_getcap;
2899 	hba_trans->tran_setcap = arcmsr_tran_setcap;
2900 	hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
2901 	hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
2902 	hba_trans->tran_dmafree = arcmsr_tran_dmafree;
2903 	hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
2904 
2905 	hba_trans->tran_reset_notify = NULL;
2906 	hba_trans->tran_get_bus_addr = NULL;
2907 	hba_trans->tran_get_name = NULL;
2908 	hba_trans->tran_quiesce = NULL;
2909 	hba_trans->tran_unquiesce = NULL;
2910 	hba_trans->tran_bus_reset = NULL;
2911 	hba_trans->tran_bus_config = arcmsr_tran_bus_config;
2912 	hba_trans->tran_add_eventcall = NULL;
2913 	hba_trans->tran_get_eventcookie = NULL;
2914 	hba_trans->tran_post_event = NULL;
2915 	hba_trans->tran_remove_eventcall = NULL;
2916 
2917 	/* iop init and enable interrupt here */
2918 	arcmsr_iop_init(acb);
2919 
2920 	/* Get supported interrupt types */
2921 	if (ddi_intr_get_supported_types(dev_info, &intr_types) !=
2922 	    DDI_SUCCESS) {
2923 		arcmsr_warn(acb, "ddi_intr_get_supported_types failed");
2924 		goto error_level_3;
2925 	}
2926 	if (intr_types & DDI_INTR_TYPE_FIXED) {
2927 		if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2928 			goto error_level_5;
2929 	} else if (intr_types & DDI_INTR_TYPE_MSI) {
2930 		if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2931 			goto error_level_5;
2932 	}
2933 
2934 	/*
2935 	 * The driver should attach this instance of the device, and
2936 	 * perform error cleanup if necessary
2937 	 */
2938 	if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
2939 	    hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
2940 		arcmsr_warn(acb, "scsi_hba_attach_setup failed");
2941 		goto error_level_5;
2942 	}
2943 
2944 	/* Create a taskq for dealing with dr events */
2945 	if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
2946 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
2947 		arcmsr_warn(acb, "ddi_taskq_create failed");
2948 		goto error_level_8;
2949 	}
2950 
2951 	acb->timeout_count = 0;
2952 	/* active ccbs "timeout" watchdog */
2953 	acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
2954 	    (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
2955 	acb->timeout_sc_id = timeout(arcmsr_devMap_monitor, (caddr_t)acb,
2956 	    (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
2957 
2958 	/* report device info */
2959 	ddi_report_dev(dev_info);
2960 
2961 	return (DDI_SUCCESS);
2962 
2963 error_level_8:
2964 
2965 error_level_7:
2966 error_level_6:
2967 	(void) scsi_hba_detach(dev_info);
2968 
2969 error_level_5:
2970 	arcmsr_remove_intr(acb);
2971 
2972 error_level_3:
2973 error_level_4:
2974 	if (acb->scsi_hba_transport)
2975 		scsi_hba_tran_free(acb->scsi_hba_transport);
2976 
2977 error_level_2:
2978 	if (acb->ccbs_acc_handle)
2979 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
2980 	if (acb->ccbs_pool_handle)
2981 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
2982 
2983 error_level_1:
2984 	if (acb->pci_acc_handle)
2985 		pci_config_teardown(&acb->pci_acc_handle);
2986 	arcmsr_mutex_destroy(acb);
2987 	ddi_soft_state_free(arcmsr_soft_state, instance);
2988 
2989 error_level_0:
2990 	return (DDI_FAILURE);
2991 }
2992 
2993 
2994 static void
arcmsr_vlog(struct ACB * acb,int level,char * fmt,va_list ap)2995 arcmsr_vlog(struct ACB *acb, int level, char *fmt, va_list ap)
2996 {
2997 	char	buf[256];
2998 
2999 	if (acb != NULL) {
3000 		(void) snprintf(buf, sizeof (buf), "%s%d: %s",
3001 		    ddi_driver_name(acb->dev_info),
3002 		    ddi_get_instance(acb->dev_info), fmt);
3003 		fmt = buf;
3004 	}
3005 	vcmn_err(level, fmt, ap);
3006 }
3007 
3008 static void
arcmsr_log(struct ACB * acb,int level,char * fmt,...)3009 arcmsr_log(struct ACB *acb, int level, char *fmt, ...)
3010 {
3011 	va_list ap;
3012 
3013 	va_start(ap, fmt);
3014 	arcmsr_vlog(acb, level, fmt, ap);
3015 	va_end(ap);
3016 }
3017 
3018 static void
arcmsr_warn(struct ACB * acb,char * fmt,...)3019 arcmsr_warn(struct ACB *acb, char *fmt, ...)
3020 {
3021 	va_list ap;
3022 
3023 	va_start(ap, fmt);
3024 	arcmsr_vlog(acb, CE_WARN, fmt, ap);
3025 	va_end(ap);
3026 }
3027 
3028 static void
arcmsr_init_list_head(struct list_head * list)3029 arcmsr_init_list_head(struct list_head *list)
3030 {
3031 	list->next = list;
3032 	list->prev = list;
3033 }
3034 
3035 static void
arcmsr_x_list_del(struct list_head * prev,struct list_head * next)3036 arcmsr_x_list_del(struct list_head *prev, struct list_head *next)
3037 {
3038 	next->prev = prev;
3039 	prev->next = next;
3040 }
3041 
3042 static void
arcmsr_x_list_add(struct list_head * new_one,struct list_head * prev,struct list_head * next)3043 arcmsr_x_list_add(struct list_head *new_one,  struct list_head *prev,
3044     struct list_head *next)
3045 {
3046 	next->prev = new_one;
3047 	new_one->next = next;
3048 	new_one->prev = prev;
3049 	prev->next = new_one;
3050 }
3051 
3052 static void
arcmsr_list_add_tail(kmutex_t * list_lock,struct list_head * new_one,struct list_head * head)3053 arcmsr_list_add_tail(kmutex_t *list_lock, struct list_head *new_one,
3054     struct list_head *head)
3055 {
3056 	mutex_enter(list_lock);
3057 	arcmsr_x_list_add(new_one, head->prev, head);
3058 	mutex_exit(list_lock);
3059 }
3060 
3061 static struct list_head *
arcmsr_list_get_first(kmutex_t * list_lock,struct list_head * head)3062 arcmsr_list_get_first(kmutex_t *list_lock, struct list_head *head)
3063 {
3064 	struct list_head *one = NULL;
3065 
3066 	mutex_enter(list_lock);
3067 	if (head->next == head)	{
3068 		mutex_exit(list_lock);
3069 		return (NULL);
3070 	}
3071 	one = head->next;
3072 	arcmsr_x_list_del(one->prev, one->next);
3073 	arcmsr_init_list_head(one);
3074 	mutex_exit(list_lock);
3075 	return (one);
3076 }
3077 
3078 static struct CCB *
arcmsr_get_complete_ccb_from_list(struct ACB * acb)3079 arcmsr_get_complete_ccb_from_list(struct ACB *acb)
3080 {
3081 	struct list_head *first_complete_ccb_list = NULL;
3082 	struct CCB *ccb;
3083 
3084 	first_complete_ccb_list =
3085 	    arcmsr_list_get_first(&acb->ccb_complete_list_mutex,
3086 	    &acb->ccb_complete_list);
3087 	if (first_complete_ccb_list == NULL) {
3088 		return (NULL);
3089 	}
3090 	ccb = (void *)((caddr_t)(first_complete_ccb_list) -
3091 	    offsetof(struct CCB, complete_queue_pointer));
3092 	return (ccb);
3093 }
3094 
3095 static struct CCB *
arcmsr_get_freeccb(struct ACB * acb)3096 arcmsr_get_freeccb(struct ACB *acb)
3097 {
3098 	struct CCB *ccb;
3099 	int ccb_get_index, ccb_put_index;
3100 
3101 	mutex_enter(&acb->workingQ_mutex);
3102 	ccb_put_index = acb->ccb_put_index;
3103 	ccb_get_index = acb->ccb_get_index;
3104 	ccb = acb->ccbworkingQ[ccb_get_index];
3105 	ccb_get_index++;
3106 	if (ccb_get_index >= ARCMSR_MAX_FREECCB_NUM)
3107 		ccb_get_index = ccb_get_index - ARCMSR_MAX_FREECCB_NUM;
3108 	if (ccb_put_index != ccb_get_index) {
3109 		acb->ccb_get_index = ccb_get_index;
3110 		arcmsr_init_list_head(&ccb->complete_queue_pointer);
3111 		ccb->ccb_state = ARCMSR_CCB_UNBUILD;
3112 	} else {
3113 		ccb = NULL;
3114 	}
3115 	mutex_exit(&acb->workingQ_mutex);
3116 	return (ccb);
3117 }
3118 
3119 
3120 static void
arcmsr_free_ccb(struct CCB * ccb)3121 arcmsr_free_ccb(struct CCB *ccb)
3122 {
3123 	struct ACB *acb = ccb->acb;
3124 
3125 	if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3126 		return;
3127 	}
3128 	mutex_enter(&acb->workingQ_mutex);
3129 	ccb->ccb_state = ARCMSR_CCB_FREE;
3130 	ccb->pkt = NULL;
3131 	ccb->pkt_dma_handle = NULL;
3132 	ccb->ccb_flags = 0;
3133 	acb->ccbworkingQ[acb->ccb_put_index] = ccb;
3134 	acb->ccb_put_index++;
3135 	if (acb->ccb_put_index >= ARCMSR_MAX_FREECCB_NUM)
3136 		acb->ccb_put_index =
3137 		    acb->ccb_put_index - ARCMSR_MAX_FREECCB_NUM;
3138 	mutex_exit(&acb->workingQ_mutex);
3139 }
3140 
3141 
3142 static void
arcmsr_ccbs_timeout(void * arg)3143 arcmsr_ccbs_timeout(void* arg)
3144 {
3145 	struct ACB *acb = (struct ACB *)arg;
3146 	struct CCB *ccb;
3147 	int i, instance, timeout_count = 0;
3148 	uint32_t intmask_org;
3149 	time_t current_time = ddi_get_time();
3150 
3151 	intmask_org = arcmsr_disable_allintr(acb);
3152 	mutex_enter(&acb->isr_mutex);
3153 	if (acb->ccboutstandingcount != 0) {
3154 		/* check each ccb */
3155 		i = ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
3156 		    DDI_DMA_SYNC_FORKERNEL);
3157 		if (i != DDI_SUCCESS) {
3158 			if ((acb->timeout_id != 0) &&
3159 			    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3160 				/* do pkt timeout check each 60 secs */
3161 				acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3162 				    (void*)acb, (ARCMSR_TIMEOUT_WATCH *
3163 				    drv_usectohz(1000000)));
3164 			}
3165 			mutex_exit(&acb->isr_mutex);
3166 			arcmsr_enable_allintr(acb, intmask_org);
3167 			return;
3168 		}
3169 		instance = ddi_get_instance(acb->dev_info);
3170 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3171 			ccb = acb->pccb_pool[i];
3172 			if (ccb->acb != acb) {
3173 				break;
3174 			}
3175 			if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3176 				continue;
3177 			}
3178 			if (ccb->pkt == NULL) {
3179 				continue;
3180 			}
3181 			if (ccb->pkt->pkt_time == 0) {
3182 				continue;
3183 			}
3184 			if (ccb->ccb_time >= current_time) {
3185 				continue;
3186 			}
3187 			int id = ccb->pkt->pkt_address.a_target;
3188 			int lun = ccb->pkt->pkt_address.a_lun;
3189 			if (ccb->ccb_state == ARCMSR_CCB_START) {
3190 				uint8_t	*cdb = (uint8_t	*)&ccb->arcmsr_cdb.Cdb;
3191 
3192 				timeout_count++;
3193 				arcmsr_warn(acb,
3194 				    "scsi target %d lun %d cmd=0x%x "
3195 				    "command timeout, ccb=0x%p",
3196 				    instance, id, lun, *cdb, (void *)ccb);
3197 				ccb->ccb_state = ARCMSR_CCB_TIMEOUT;
3198 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
3199 				ccb->pkt->pkt_statistics = STAT_TIMEOUT;
3200 				/* acb->devstate[id][lun] = ARECA_RAID_GONE; */
3201 				arcmsr_ccb_complete(ccb, 1);
3202 				continue;
3203 			} else if ((ccb->ccb_state & ARCMSR_CCB_CAN_BE_FREE) ==
3204 			    ARCMSR_CCB_CAN_BE_FREE) {
3205 				arcmsr_free_ccb(ccb);
3206 			}
3207 		}
3208 	}
3209 	if ((acb->timeout_id != 0) &&
3210 	    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3211 		/* do pkt timeout check each 60 secs */
3212 		acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3213 		    (void*)acb, (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
3214 	}
3215 	mutex_exit(&acb->isr_mutex);
3216 	arcmsr_enable_allintr(acb, intmask_org);
3217 }
3218 
3219 static void
arcmsr_abort_dr_ccbs(struct ACB * acb,uint16_t target,uint8_t lun)3220 arcmsr_abort_dr_ccbs(struct ACB *acb, uint16_t target, uint8_t lun)
3221 {
3222 	struct CCB *ccb;
3223 	uint32_t intmask_org;
3224 	int i;
3225 
3226 	/* disable all outbound interrupts */
3227 	intmask_org = arcmsr_disable_allintr(acb);
3228 	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3229 		ccb = acb->pccb_pool[i];
3230 		if (ccb->ccb_state == ARCMSR_CCB_START) {
3231 			if ((target == ccb->pkt->pkt_address.a_target) &&
3232 			    (lun == ccb->pkt->pkt_address.a_lun)) {
3233 				ccb->ccb_state = ARCMSR_CCB_ABORTED;
3234 				ccb->pkt->pkt_reason = CMD_ABORTED;
3235 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3236 				arcmsr_ccb_complete(ccb, 1);
3237 				arcmsr_log(acb, CE_NOTE,
3238 				    "abort T%dL%d ccb", target, lun);
3239 			}
3240 		}
3241 	}
3242 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3243 	arcmsr_enable_allintr(acb, intmask_org);
3244 }
3245 
3246 static int
arcmsr_scsi_device_probe(struct ACB * acb,uint16_t tgt,uint8_t lun)3247 arcmsr_scsi_device_probe(struct ACB *acb, uint16_t tgt, uint8_t lun)
3248 {
3249 	struct scsi_device sd;
3250 	dev_info_t *child;
3251 	int rval;
3252 
3253 	bzero(&sd, sizeof (struct scsi_device));
3254 	sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
3255 	sd.sd_address.a_target = (uint16_t)tgt;
3256 	sd.sd_address.a_lun = (uint8_t)lun;
3257 	if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
3258 		rval = scsi_hba_probe(&sd, NULL);
3259 		if (rval == SCSIPROBE_EXISTS) {
3260 			rval = ndi_devi_online(child, NDI_ONLINE_ATTACH);
3261 			if (rval != NDI_SUCCESS) {
3262 				arcmsr_warn(acb, "unable to online T%dL%d",
3263 				    tgt, lun);
3264 			} else {
3265 				arcmsr_log(acb, CE_NOTE, "T%dL%d onlined",
3266 				    tgt, lun);
3267 			}
3268 		}
3269 	} else {
3270 		rval = scsi_hba_probe(&sd, NULL);
3271 		if (rval == SCSIPROBE_EXISTS)
3272 			rval = arcmsr_config_child(acb, &sd, NULL);
3273 	}
3274 	scsi_unprobe(&sd);
3275 	return (rval);
3276 }
3277 
3278 static void
arcmsr_dr_handle(struct ACB * acb)3279 arcmsr_dr_handle(struct ACB *acb)
3280 {
3281 	char *acb_dev_map = (char *)acb->device_map;
3282 	char *devicemap;
3283 	char temp;
3284 	uint16_t target;
3285 	uint8_t lun;
3286 	char diff;
3287 	dev_info_t *dip;
3288 	ddi_acc_handle_t reg;
3289 
3290 	switch (acb->adapter_type) {
3291 	case ACB_ADAPTER_TYPE_A:
3292 	{
3293 		struct HBA_msgUnit *phbamu;
3294 
3295 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3296 		devicemap = (char *)&phbamu->msgcode_rwbuffer[21];
3297 		reg = acb->reg_mu_acc_handle0;
3298 		break;
3299 	}
3300 
3301 	case ACB_ADAPTER_TYPE_B:
3302 	{
3303 		struct HBB_msgUnit *phbbmu;
3304 
3305 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3306 		devicemap = (char *)
3307 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[21];
3308 		reg = acb->reg_mu_acc_handle1;
3309 		break;
3310 	}
3311 
3312 	case ACB_ADAPTER_TYPE_C:
3313 	{
3314 		struct HBC_msgUnit *phbcmu;
3315 
3316 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3317 		devicemap = (char *)&phbcmu->msgcode_rwbuffer[21];
3318 		reg = acb->reg_mu_acc_handle0;
3319 		break;
3320 	}
3321 
3322 	}
3323 
3324 	for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
3325 		temp = CHIP_REG_READ8(reg, devicemap);
3326 		diff = (*acb_dev_map)^ temp;
3327 		if (diff != 0) {
3328 			*acb_dev_map = temp;
3329 			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
3330 				if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
3331 					ndi_devi_enter(acb->dev_info);
3332 					acb->devstate[target][lun] =
3333 					    ARECA_RAID_GOOD;
3334 					(void) arcmsr_scsi_device_probe(acb,
3335 					    target, lun);
3336 					ndi_devi_exit(acb->dev_info);
3337 					arcmsr_log(acb, CE_NOTE,
3338 					    "T%dL%d on-line", target, lun);
3339 				} else if ((temp & 0x01) == 0 &&
3340 				    (diff & 0x01) == 1) {
3341 					dip = arcmsr_find_child(acb, target,
3342 					    lun);
3343 					if (dip != NULL) {
3344 						acb->devstate[target][lun] =
3345 						    ARECA_RAID_GONE;
3346 						if (mutex_owned(&acb->
3347 						    isr_mutex)) {
3348 							arcmsr_abort_dr_ccbs(
3349 							    acb, target, lun);
3350 							(void)
3351 							    ndi_devi_offline(
3352 							    dip,
3353 							    NDI_DEVI_REMOVE |
3354 							    NDI_DEVI_OFFLINE);
3355 						} else {
3356 							mutex_enter(&acb->
3357 							    isr_mutex);
3358 							arcmsr_abort_dr_ccbs(
3359 							    acb, target, lun);
3360 							(void)
3361 							    ndi_devi_offline(
3362 							    dip,
3363 							    NDI_DEVI_REMOVE |
3364 							    NDI_DEVI_OFFLINE);
3365 							mutex_exit(&acb->
3366 							    isr_mutex);
3367 						}
3368 					}
3369 					arcmsr_log(acb, CE_NOTE,
3370 					    "T%dL%d off-line", target, lun);
3371 				}
3372 				temp >>= 1;
3373 				diff >>= 1;
3374 			}
3375 		}
3376 		devicemap++;
3377 		acb_dev_map++;
3378 	}
3379 }
3380 
3381 
3382 static void
arcmsr_devMap_monitor(void * arg)3383 arcmsr_devMap_monitor(void* arg)
3384 {
3385 
3386 	struct ACB *acb = (struct ACB *)arg;
3387 	switch (acb->adapter_type) {
3388 	case ACB_ADAPTER_TYPE_A:
3389 	{
3390 		struct HBA_msgUnit *phbamu;
3391 
3392 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3393 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3394 		    &phbamu->inbound_msgaddr0,
3395 		    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3396 		break;
3397 	}
3398 
3399 	case ACB_ADAPTER_TYPE_B:
3400 	{
3401 		struct HBB_msgUnit *phbbmu;
3402 
3403 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3404 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3405 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3406 		    ARCMSR_MESSAGE_GET_CONFIG);
3407 		break;
3408 	}
3409 
3410 	case ACB_ADAPTER_TYPE_C:
3411 	{
3412 		struct HBC_msgUnit *phbcmu;
3413 
3414 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3415 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3416 		    &phbcmu->inbound_msgaddr0,
3417 		    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3418 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3419 		    &phbcmu->inbound_doorbell,
3420 		    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3421 		break;
3422 	}
3423 
3424 	}
3425 
3426 	if ((acb->timeout_id != 0) &&
3427 	    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3428 		/* do pkt timeout check each 5 secs */
3429 		acb->timeout_id = timeout(arcmsr_devMap_monitor, (void*)acb,
3430 		    (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
3431 	}
3432 }
3433 
3434 
3435 static uint32_t
arcmsr_disable_allintr(struct ACB * acb)3436 arcmsr_disable_allintr(struct ACB *acb)
3437 {
3438 	uint32_t intmask_org;
3439 
3440 	switch (acb->adapter_type) {
3441 	case ACB_ADAPTER_TYPE_A:
3442 	{
3443 		struct HBA_msgUnit *phbamu;
3444 
3445 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3446 		/* disable all outbound interrupt */
3447 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3448 		    &phbamu->outbound_intmask);
3449 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3450 		    &phbamu->outbound_intmask,
3451 		    intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
3452 		break;
3453 	}
3454 
3455 	case ACB_ADAPTER_TYPE_B:
3456 	{
3457 		struct HBB_msgUnit *phbbmu;
3458 
3459 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3460 		/* disable all outbound interrupt */
3461 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3462 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask);
3463 		/* disable all interrupts */
3464 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3465 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
3466 		break;
3467 	}
3468 
3469 	case ACB_ADAPTER_TYPE_C:
3470 	{
3471 		struct HBC_msgUnit *phbcmu;
3472 
3473 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3474 		/* disable all outbound interrupt */
3475 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3476 		    &phbcmu->host_int_mask); /* disable outbound message0 int */
3477 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3478 		    &phbcmu->host_int_mask,
3479 		    intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
3480 		break;
3481 	}
3482 
3483 	}
3484 	return (intmask_org);
3485 }
3486 
3487 
3488 static void
arcmsr_enable_allintr(struct ACB * acb,uint32_t intmask_org)3489 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org)
3490 {
3491 	int mask;
3492 
3493 	switch (acb->adapter_type) {
3494 	case ACB_ADAPTER_TYPE_A:
3495 	{
3496 		struct HBA_msgUnit *phbamu;
3497 
3498 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3499 		/*
3500 		 * enable outbound Post Queue, outbound doorbell message0
3501 		 * Interrupt
3502 		 */
3503 		mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
3504 		    ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE |
3505 		    ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
3506 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3507 		    &phbamu->outbound_intmask, intmask_org & mask);
3508 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
3509 		break;
3510 	}
3511 
3512 	case ACB_ADAPTER_TYPE_B:
3513 	{
3514 		struct HBB_msgUnit *phbbmu;
3515 
3516 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3517 		mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
3518 		    ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE |
3519 		    ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
3520 		/* 1=interrupt enable, 0=interrupt disable */
3521 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3522 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
3523 		    intmask_org | mask);
3524 		acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
3525 		break;
3526 	}
3527 
3528 	case ACB_ADAPTER_TYPE_C:
3529 	{
3530 		struct HBC_msgUnit *phbcmu;
3531 
3532 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3533 		/* enable outbound Post Queue,outbound doorbell Interrupt */
3534 		mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK |
3535 		    ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK |
3536 		    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
3537 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3538 		    &phbcmu->host_int_mask, intmask_org & mask);
3539 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
3540 		break;
3541 	}
3542 
3543 	}
3544 }
3545 
3546 
3547 static void
arcmsr_iop_parking(struct ACB * acb)3548 arcmsr_iop_parking(struct ACB *acb)
3549 {
3550 	/* stop adapter background rebuild */
3551 	if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
3552 		uint32_t intmask_org;
3553 
3554 		acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
3555 		/* disable all outbound interrupt */
3556 		intmask_org = arcmsr_disable_allintr(acb);
3557 		switch (acb->adapter_type) {
3558 		case ACB_ADAPTER_TYPE_A:
3559 			arcmsr_stop_hba_bgrb(acb);
3560 			arcmsr_flush_hba_cache(acb);
3561 			break;
3562 
3563 		case ACB_ADAPTER_TYPE_B:
3564 			arcmsr_stop_hbb_bgrb(acb);
3565 			arcmsr_flush_hbb_cache(acb);
3566 			break;
3567 
3568 		case ACB_ADAPTER_TYPE_C:
3569 			arcmsr_stop_hbc_bgrb(acb);
3570 			arcmsr_flush_hbc_cache(acb);
3571 			break;
3572 		}
3573 		/*
3574 		 * enable outbound Post Queue
3575 		 * enable outbound doorbell Interrupt
3576 		 */
3577 		arcmsr_enable_allintr(acb, intmask_org);
3578 	}
3579 }
3580 
3581 
3582 static uint8_t
arcmsr_hba_wait_msgint_ready(struct ACB * acb)3583 arcmsr_hba_wait_msgint_ready(struct ACB *acb)
3584 {
3585 	uint32_t i;
3586 	uint8_t retries = 0x00;
3587 	struct HBA_msgUnit *phbamu;
3588 
3589 
3590 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3591 
3592 	do {
3593 		for (i = 0; i < 100; i++) {
3594 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3595 			    &phbamu->outbound_intstatus) &
3596 			    ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
3597 				/* clear interrupt */
3598 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3599 				    &phbamu->outbound_intstatus,
3600 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3601 				return (TRUE);
3602 			}
3603 			drv_usecwait(10000);
3604 			if (ddi_in_panic()) {
3605 				/* clear interrupts */
3606 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3607 				    &phbamu->outbound_intstatus,
3608 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3609 				return (TRUE);
3610 			}
3611 		} /* max 1 second */
3612 	} while (retries++ < 20); /* max 20 seconds */
3613 	return (FALSE);
3614 }
3615 
3616 
3617 static uint8_t
arcmsr_hbb_wait_msgint_ready(struct ACB * acb)3618 arcmsr_hbb_wait_msgint_ready(struct ACB *acb)
3619 {
3620 	struct HBB_msgUnit *phbbmu;
3621 	uint32_t i;
3622 	uint8_t retries = 0x00;
3623 
3624 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3625 
3626 	do {
3627 		for (i = 0; i < 100; i++) {
3628 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3629 			    &phbbmu->hbb_doorbell->iop2drv_doorbell) &
3630 			    ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
3631 				/* clear interrupt */
3632 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3633 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3634 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3635 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3636 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3637 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3638 				return (TRUE);
3639 			}
3640 			drv_usecwait(10000);
3641 			if (ddi_in_panic()) {
3642 				/* clear interrupts */
3643 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3644 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3645 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3646 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3647 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3648 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3649 				return (TRUE);
3650 			}
3651 		} /* max 1 second */
3652 	} while (retries++ < 20); /* max 20 seconds */
3653 
3654 	return (FALSE);
3655 }
3656 
3657 
3658 static uint8_t
arcmsr_hbc_wait_msgint_ready(struct ACB * acb)3659 arcmsr_hbc_wait_msgint_ready(struct ACB *acb)
3660 {
3661 	uint32_t i;
3662 	uint8_t retries = 0x00;
3663 	struct HBC_msgUnit *phbcmu;
3664 	uint32_t c = ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR;
3665 
3666 
3667 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
3668 
3669 	do {
3670 		for (i = 0; i < 100; i++) {
3671 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3672 			    &phbcmu->outbound_doorbell) &
3673 			    ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
3674 				/* clear interrupt */
3675 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3676 				    &phbcmu->outbound_doorbell_clear, c);
3677 				return (TRUE);
3678 			}
3679 			drv_usecwait(10000);
3680 			if (ddi_in_panic()) {
3681 				/* clear interrupts */
3682 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3683 				    &phbcmu->outbound_doorbell_clear, c);
3684 				return (TRUE);
3685 			}
3686 		} /* max 1 second */
3687 	} while (retries++ < 20); /* max 20 seconds */
3688 	return (FALSE);
3689 }
3690 
3691 
3692 static void
arcmsr_flush_hba_cache(struct ACB * acb)3693 arcmsr_flush_hba_cache(struct ACB *acb)
3694 {
3695 	struct HBA_msgUnit *phbamu;
3696 	int retry_count = 30;
3697 
3698 	/* enlarge wait flush adapter cache time: 10 minutes */
3699 
3700 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3701 
3702 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3703 	    ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3704 	do {
3705 		if (arcmsr_hba_wait_msgint_ready(acb)) {
3706 			break;
3707 		} else {
3708 			retry_count--;
3709 		}
3710 	} while (retry_count != 0);
3711 }
3712 
3713 static void
arcmsr_flush_hbb_cache(struct ACB * acb)3714 arcmsr_flush_hbb_cache(struct ACB *acb)
3715 {
3716 	struct HBB_msgUnit *phbbmu;
3717 	int retry_count = 30;
3718 
3719 	/* enlarge wait flush adapter cache time: 10 minutes */
3720 
3721 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3722 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3723 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3724 	    ARCMSR_MESSAGE_FLUSH_CACHE);
3725 	do {
3726 		if (arcmsr_hbb_wait_msgint_ready(acb)) {
3727 			break;
3728 		} else {
3729 			retry_count--;
3730 		}
3731 	} while (retry_count != 0);
3732 }
3733 
3734 
3735 static void
arcmsr_flush_hbc_cache(struct ACB * acb)3736 arcmsr_flush_hbc_cache(struct ACB *acb)
3737 {
3738 	struct HBC_msgUnit *phbcmu;
3739 	int retry_count = 30;
3740 
3741 	/* enlarge wait flush adapter cache time: 10 minutes */
3742 
3743 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
3744 
3745 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3746 	    ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3747 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3748 	    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3749 	do {
3750 		if (arcmsr_hbc_wait_msgint_ready(acb)) {
3751 			break;
3752 		} else {
3753 			retry_count--;
3754 		}
3755 	} while (retry_count != 0);
3756 }
3757 
3758 
3759 
3760 static uint8_t
arcmsr_abort_hba_allcmd(struct ACB * acb)3761 arcmsr_abort_hba_allcmd(struct ACB *acb)
3762 {
3763 	struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
3764 
3765 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3766 	    ARCMSR_INBOUND_MESG0_ABORT_CMD);
3767 
3768 	if (!arcmsr_hba_wait_msgint_ready(acb)) {
3769 		arcmsr_warn(acb,
3770 		    "timeout while waiting for 'abort all "
3771 		    "outstanding commands'");
3772 		return (0xff);
3773 	}
3774 	return (0x00);
3775 }
3776 
3777 
3778 
3779 static uint8_t
arcmsr_abort_hbb_allcmd(struct ACB * acb)3780 arcmsr_abort_hbb_allcmd(struct ACB *acb)
3781 {
3782 	struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
3783 
3784 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3785 	    &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
3786 
3787 	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
3788 		arcmsr_warn(acb,
3789 		    "timeout while waiting for 'abort all "
3790 		    "outstanding commands'");
3791 		return (0x00);
3792 	}
3793 	return (0x00);
3794 }
3795 
3796 
3797 static uint8_t
arcmsr_abort_hbc_allcmd(struct ACB * acb)3798 arcmsr_abort_hbc_allcmd(struct ACB *acb)
3799 {
3800 	struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
3801 
3802 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3803 	    ARCMSR_INBOUND_MESG0_ABORT_CMD);
3804 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3805 	    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3806 
3807 	if (!arcmsr_hbc_wait_msgint_ready(acb)) {
3808 		arcmsr_warn(acb,
3809 		    "timeout while waiting for 'abort all "
3810 		    "outstanding commands'");
3811 		return (0xff);
3812 	}
3813 	return (0x00);
3814 }
3815 
3816 
3817 static void
arcmsr_done4abort_postqueue(struct ACB * acb)3818 arcmsr_done4abort_postqueue(struct ACB *acb)
3819 {
3820 
3821 	struct CCB *ccb;
3822 	uint32_t flag_ccb;
3823 	int i = 0;
3824 	boolean_t error;
3825 
3826 	switch (acb->adapter_type) {
3827 	case ACB_ADAPTER_TYPE_A:
3828 	{
3829 		struct HBA_msgUnit *phbamu;
3830 		uint32_t outbound_intstatus;
3831 
3832 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3833 		/* clear and abort all outbound posted Q */
3834 		outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3835 		    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3836 		/* clear interrupt */
3837 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3838 		    &phbamu->outbound_intstatus, outbound_intstatus);
3839 		while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3840 		    &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
3841 		    (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3842 			/* frame must be 32 bytes aligned */
3843 			/* the CDB is the first field of the CCB */
3844 			ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
3845 			/* check if command done with no error */
3846 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3847 			    B_TRUE : B_FALSE;
3848 			arcmsr_drain_donequeue(acb, ccb, error);
3849 		}
3850 		break;
3851 	}
3852 
3853 	case ACB_ADAPTER_TYPE_B:
3854 	{
3855 		struct HBB_msgUnit *phbbmu;
3856 
3857 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3858 		/* clear all outbound posted Q */
3859 		/* clear doorbell interrupt */
3860 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3861 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3862 		    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3863 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
3864 			if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
3865 				phbbmu->done_qbuffer[i] = 0;
3866 				/* frame must be 32 bytes aligned */
3867 				ccb = NumToPtr((acb->vir2phy_offset +
3868 				    (flag_ccb << 5)));
3869 				/* check if command done with no error */
3870 				error =
3871 				    (flag_ccb &
3872 				    ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3873 				    B_TRUE : B_FALSE;
3874 				arcmsr_drain_donequeue(acb, ccb, error);
3875 			}
3876 			phbbmu->post_qbuffer[i] = 0;
3877 		}	/* drain reply FIFO */
3878 		phbbmu->doneq_index = 0;
3879 		phbbmu->postq_index = 0;
3880 		break;
3881 	}
3882 
3883 	case ACB_ADAPTER_TYPE_C:
3884 	{
3885 		struct HBC_msgUnit *phbcmu;
3886 		uint32_t ccb_cdb_phy;
3887 
3888 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3889 		while ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3890 		    &phbcmu->host_int_status) &
3891 		    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) &&
3892 		    (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3893 			/* need to do */
3894 			flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3895 			    &phbcmu->outbound_queueport_low);
3896 			/* frame must be 32 bytes aligned */
3897 			ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3898 			ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
3899 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)?
3900 			    B_TRUE : B_FALSE;
3901 			arcmsr_drain_donequeue(acb, ccb, error);
3902 		}
3903 		break;
3904 	}
3905 
3906 	}
3907 }
3908 /*
3909  * Routine Description: try to get echo from iop.
3910  *           Arguments:
3911  *        Return Value: Nothing.
3912  */
3913 static uint8_t
arcmsr_get_echo_from_iop(struct ACB * acb)3914 arcmsr_get_echo_from_iop(struct ACB *acb)
3915 {
3916 	uint32_t intmask_org;
3917 	uint8_t rtnval = 0;
3918 
3919 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3920 		struct HBA_msgUnit *phbamu;
3921 
3922 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3923 		intmask_org = arcmsr_disable_allintr(acb);
3924 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3925 		    &phbamu->inbound_msgaddr0,
3926 		    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3927 		if (!arcmsr_hba_wait_msgint_ready(acb)) {
3928 			arcmsr_warn(acb, "try to get echo from iop,"
3929 			    "... timeout ...");
3930 			acb->acb_flags |= ACB_F_BUS_HANG_ON;
3931 			rtnval = 0xFF;
3932 		}
3933 		/* enable all outbound interrupt */
3934 		arcmsr_enable_allintr(acb, intmask_org);
3935 	}
3936 	return (rtnval);
3937 }
3938 
3939 /*
3940  * Routine Description: Reset 80331 iop.
3941  *           Arguments:
3942  *        Return Value: Nothing.
3943  */
3944 static uint8_t
arcmsr_iop_reset(struct ACB * acb)3945 arcmsr_iop_reset(struct ACB *acb)
3946 {
3947 	struct CCB *ccb;
3948 	uint32_t intmask_org;
3949 	uint8_t rtnval = 0;
3950 	int i = 0;
3951 
3952 	if (acb->ccboutstandingcount > 0) {
3953 		/* disable all outbound interrupt */
3954 		intmask_org = arcmsr_disable_allintr(acb);
3955 		/* clear and abort all outbound posted Q */
3956 		arcmsr_done4abort_postqueue(acb);
3957 		/* talk to iop 331 outstanding command aborted */
3958 		rtnval = (acb->acb_flags & ACB_F_BUS_HANG_ON) ?
3959 		    0xFF : arcmsr_abort_host_command(acb);
3960 
3961 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3962 			ccb = acb->pccb_pool[i];
3963 			if (ccb->ccb_state == ARCMSR_CCB_START) {
3964 				/* ccb->ccb_state = ARCMSR_CCB_RESET; */
3965 				ccb->pkt->pkt_reason = CMD_RESET;
3966 				ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
3967 				arcmsr_ccb_complete(ccb, 1);
3968 			}
3969 		}
3970 		atomic_and_32(&acb->ccboutstandingcount, 0);
3971 		/* enable all outbound interrupt */
3972 		arcmsr_enable_allintr(acb, intmask_org);
3973 	} else {
3974 		rtnval = arcmsr_get_echo_from_iop(acb);
3975 	}
3976 	return (rtnval);
3977 }
3978 
3979 
3980 static struct QBUFFER *
arcmsr_get_iop_rqbuffer(struct ACB * acb)3981 arcmsr_get_iop_rqbuffer(struct ACB *acb)
3982 {
3983 	struct QBUFFER *qb;
3984 
3985 	switch (acb->adapter_type) {
3986 	case ACB_ADAPTER_TYPE_A:
3987 	{
3988 		struct HBA_msgUnit *phbamu;
3989 
3990 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3991 		qb = (struct QBUFFER *)&phbamu->message_rbuffer;
3992 		break;
3993 	}
3994 
3995 	case ACB_ADAPTER_TYPE_B:
3996 	{
3997 		struct HBB_msgUnit *phbbmu;
3998 
3999 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4000 		qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
4001 		break;
4002 	}
4003 
4004 	case ACB_ADAPTER_TYPE_C:
4005 	{
4006 		struct HBC_msgUnit *phbcmu;
4007 
4008 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4009 		qb = (struct QBUFFER *)&phbcmu->message_rbuffer;
4010 		break;
4011 	}
4012 
4013 	}
4014 	return (qb);
4015 }
4016 
4017 
4018 static struct QBUFFER *
arcmsr_get_iop_wqbuffer(struct ACB * acb)4019 arcmsr_get_iop_wqbuffer(struct ACB *acb)
4020 {
4021 	struct QBUFFER *qbuffer = NULL;
4022 
4023 	switch (acb->adapter_type) {
4024 	case ACB_ADAPTER_TYPE_A:
4025 	{
4026 		struct HBA_msgUnit *phbamu;
4027 
4028 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4029 		qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
4030 		break;
4031 	}
4032 
4033 	case ACB_ADAPTER_TYPE_B:
4034 	{
4035 		struct HBB_msgUnit *phbbmu;
4036 
4037 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4038 		qbuffer = (struct QBUFFER *)
4039 		    &phbbmu->hbb_rwbuffer->message_wbuffer;
4040 		break;
4041 	}
4042 
4043 	case ACB_ADAPTER_TYPE_C:
4044 	{
4045 		struct HBC_msgUnit *phbcmu;
4046 
4047 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4048 		qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
4049 		break;
4050 	}
4051 
4052 	}
4053 	return (qbuffer);
4054 }
4055 
4056 
4057 
4058 static void
arcmsr_iop_message_read(struct ACB * acb)4059 arcmsr_iop_message_read(struct ACB *acb)
4060 {
4061 	switch (acb->adapter_type) {
4062 	case ACB_ADAPTER_TYPE_A:
4063 	{
4064 		struct HBA_msgUnit *phbamu;
4065 
4066 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4067 		/* let IOP know the data has been read */
4068 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4069 		    &phbamu->inbound_doorbell,
4070 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4071 		break;
4072 	}
4073 
4074 	case ACB_ADAPTER_TYPE_B:
4075 	{
4076 		struct HBB_msgUnit *phbbmu;
4077 
4078 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4079 		/* let IOP know the data has been read */
4080 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4081 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4082 		    ARCMSR_DRV2IOP_DATA_READ_OK);
4083 		break;
4084 	}
4085 
4086 	case ACB_ADAPTER_TYPE_C:
4087 	{
4088 		struct HBC_msgUnit *phbcmu;
4089 
4090 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4091 		/* let IOP know data has been read */
4092 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4093 		    &phbcmu->inbound_doorbell,
4094 		    ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
4095 		break;
4096 	}
4097 
4098 	}
4099 }
4100 
4101 
4102 
4103 static void
arcmsr_iop_message_wrote(struct ACB * acb)4104 arcmsr_iop_message_wrote(struct ACB *acb)
4105 {
4106 	switch (acb->adapter_type) {
4107 	case ACB_ADAPTER_TYPE_A: {
4108 		struct HBA_msgUnit *phbamu;
4109 
4110 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4111 		/*
4112 		 * push inbound doorbell tell iop, driver data write ok
4113 		 * and wait reply on next hwinterrupt for next Qbuffer post
4114 		 */
4115 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4116 		    &phbamu->inbound_doorbell,
4117 		    ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
4118 		break;
4119 	}
4120 
4121 	case ACB_ADAPTER_TYPE_B:
4122 	{
4123 		struct HBB_msgUnit *phbbmu;
4124 
4125 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4126 		/*
4127 		 * push inbound doorbell tell iop, driver data was writen
4128 		 * successfully, then await reply on next hwinterrupt for
4129 		 * next Qbuffer post
4130 		 */
4131 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4132 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4133 		    ARCMSR_DRV2IOP_DATA_WRITE_OK);
4134 		break;
4135 	}
4136 
4137 	case ACB_ADAPTER_TYPE_C:
4138 	{
4139 		struct HBC_msgUnit *phbcmu;
4140 
4141 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4142 		/*
4143 		 * push inbound doorbell tell iop, driver data write ok
4144 		 * and wait reply on next hwinterrupt for next Qbuffer post
4145 		 */
4146 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4147 		    &phbcmu->inbound_doorbell,
4148 		    ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
4149 		break;
4150 	}
4151 
4152 	}
4153 }
4154 
4155 
4156 
4157 static void
arcmsr_post_ioctldata2iop(struct ACB * acb)4158 arcmsr_post_ioctldata2iop(struct ACB *acb)
4159 {
4160 	uint8_t *pQbuffer;
4161 	struct QBUFFER *pwbuffer;
4162 	uint8_t *iop_data;
4163 	int32_t allxfer_len = 0;
4164 
4165 	pwbuffer = arcmsr_get_iop_wqbuffer(acb);
4166 	iop_data = (uint8_t *)pwbuffer->data;
4167 	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
4168 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
4169 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
4170 		    (allxfer_len < 124)) {
4171 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
4172 			(void) memcpy(iop_data, pQbuffer, 1);
4173 			acb->wqbuf_firstidx++;
4174 			/* if last index number set it to 0 */
4175 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4176 			iop_data++;
4177 			allxfer_len++;
4178 		}
4179 		pwbuffer->data_len = allxfer_len;
4180 		/*
4181 		 * push inbound doorbell and wait reply at hwinterrupt
4182 		 * routine for next Qbuffer post
4183 		 */
4184 		arcmsr_iop_message_wrote(acb);
4185 	}
4186 }
4187 
4188 
4189 
4190 static void
arcmsr_stop_hba_bgrb(struct ACB * acb)4191 arcmsr_stop_hba_bgrb(struct ACB *acb)
4192 {
4193 	struct HBA_msgUnit *phbamu;
4194 
4195 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4196 
4197 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4198 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4199 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4200 	if (!arcmsr_hba_wait_msgint_ready(acb))
4201 		arcmsr_warn(acb,
4202 		    "timeout while waiting for background rebuild completion");
4203 }
4204 
4205 
4206 static void
arcmsr_stop_hbb_bgrb(struct ACB * acb)4207 arcmsr_stop_hbb_bgrb(struct ACB *acb)
4208 {
4209 	struct HBB_msgUnit *phbbmu;
4210 
4211 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4212 
4213 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4214 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4215 	    &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
4216 
4217 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4218 		arcmsr_warn(acb,
4219 		    "timeout while waiting for background rebuild completion");
4220 }
4221 
4222 
4223 static void
arcmsr_stop_hbc_bgrb(struct ACB * acb)4224 arcmsr_stop_hbc_bgrb(struct ACB *acb)
4225 {
4226 	struct HBC_msgUnit *phbcmu;
4227 
4228 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
4229 
4230 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4231 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4232 	    &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4233 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4234 	    &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4235 	if (!arcmsr_hbc_wait_msgint_ready(acb))
4236 		arcmsr_warn(acb,
4237 		    "timeout while waiting for background rebuild completion");
4238 }
4239 
4240 
4241 static int
arcmsr_iop_message_xfer(struct ACB * acb,struct scsi_pkt * pkt)4242 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt)
4243 {
4244 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
4245 	struct CCB *ccb = pkt->pkt_ha_private;
4246 	struct buf *bp = ccb->bp;
4247 	uint8_t *pQbuffer;
4248 	int retvalue = 0, transfer_len = 0;
4249 	char *buffer;
4250 	uint32_t controlcode;
4251 
4252 
4253 	/* 4 bytes: Areca io control code */
4254 	controlcode =
4255 	    (uint32_t)pkt->pkt_cdbp[5] << 24 |
4256 	    (uint32_t)pkt->pkt_cdbp[6] << 16 |
4257 	    (uint32_t)pkt->pkt_cdbp[7] << 8 |
4258 	    (uint32_t)pkt->pkt_cdbp[8];
4259 
4260 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
4261 		bp_mapin(bp);
4262 
4263 	buffer = bp->b_un.b_addr;
4264 	transfer_len = bp->b_bcount;
4265 	if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
4266 		retvalue = ARCMSR_MESSAGE_FAIL;
4267 		goto message_out;
4268 	}
4269 
4270 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
4271 	switch (controlcode) {
4272 	case ARCMSR_MESSAGE_READ_RQBUFFER:
4273 	{
4274 		unsigned long *ver_addr;
4275 		uint8_t *ptmpQbuffer;
4276 		int32_t allxfer_len = 0;
4277 
4278 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4279 
4280 		ptmpQbuffer = (uint8_t *)ver_addr;
4281 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
4282 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
4283 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
4284 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
4285 			acb->rqbuf_firstidx++;
4286 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4287 			ptmpQbuffer++;
4288 			allxfer_len++;
4289 		}
4290 
4291 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4292 			struct QBUFFER *prbuffer;
4293 			uint8_t  *iop_data;
4294 			int32_t iop_len;
4295 
4296 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4297 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
4298 			iop_data = (uint8_t *)prbuffer->data;
4299 			iop_len = (int32_t)prbuffer->data_len;
4300 
4301 			while (iop_len > 0) {
4302 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
4303 				(void) memcpy(pQbuffer, iop_data, 1);
4304 				acb->rqbuf_lastidx++;
4305 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
4306 				iop_data++;
4307 				iop_len--;
4308 			}
4309 			arcmsr_iop_message_read(acb);
4310 		}
4311 
4312 		(void) memcpy(pcmdmessagefld->messagedatabuffer,
4313 		    (uint8_t *)ver_addr, allxfer_len);
4314 		pcmdmessagefld->cmdmessage.Length = allxfer_len;
4315 		pcmdmessagefld->cmdmessage.ReturnCode =
4316 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4317 		kmem_free(ver_addr, MSGDATABUFLEN);
4318 		break;
4319 	}
4320 
4321 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
4322 	{
4323 		uint8_t *ver_addr;
4324 		int32_t my_empty_len, user_len, wqbuf_firstidx,
4325 		    wqbuf_lastidx;
4326 		uint8_t *ptmpuserbuffer;
4327 
4328 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4329 
4330 		ptmpuserbuffer = ver_addr;
4331 		user_len = min(pcmdmessagefld->cmdmessage.Length,
4332 		    MSGDATABUFLEN);
4333 		(void) memcpy(ptmpuserbuffer,
4334 		    pcmdmessagefld->messagedatabuffer, user_len);
4335 		wqbuf_lastidx = acb->wqbuf_lastidx;
4336 		wqbuf_firstidx = acb->wqbuf_firstidx;
4337 		if (wqbuf_lastidx != wqbuf_firstidx) {
4338 			struct scsi_arq_status *arq_status;
4339 
4340 			arcmsr_post_ioctldata2iop(acb);
4341 			arq_status = (struct scsi_arq_status *)
4342 			    (intptr_t)(pkt->pkt_scbp);
4343 			bzero((caddr_t)arq_status,
4344 			    sizeof (struct scsi_arq_status));
4345 			arq_status->sts_rqpkt_reason = CMD_CMPLT;
4346 			arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
4347 			    STATE_GOT_TARGET | STATE_SENT_CMD |
4348 			    STATE_XFERRED_DATA | STATE_GOT_STATUS);
4349 
4350 			arq_status->sts_rqpkt_statistics =
4351 			    pkt->pkt_statistics;
4352 			arq_status->sts_rqpkt_resid = 0;
4353 
4354 			struct scsi_extended_sense *sts_sensedata;
4355 
4356 			sts_sensedata = &arq_status->sts_sensedata;
4357 
4358 			/* has error report sensedata */
4359 			sts_sensedata->es_code = 0x0;
4360 			sts_sensedata->es_valid = 0x01;
4361 			sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
4362 			/* AdditionalSenseLength */
4363 			sts_sensedata->es_add_len = 0x0A;
4364 			/* AdditionalSenseCode */
4365 			sts_sensedata->es_add_code = 0x20;
4366 			retvalue = ARCMSR_MESSAGE_FAIL;
4367 		} else {
4368 			my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
4369 			    (ARCMSR_MAX_QBUFFER - 1);
4370 			if (my_empty_len >= user_len) {
4371 				while (user_len > 0) {
4372 					pQbuffer = &acb->wqbuffer[
4373 					    acb->wqbuf_lastidx];
4374 					(void) memcpy(pQbuffer,
4375 					    ptmpuserbuffer, 1);
4376 					acb->wqbuf_lastidx++;
4377 					acb->wqbuf_lastidx %=
4378 					    ARCMSR_MAX_QBUFFER;
4379 					ptmpuserbuffer++;
4380 					user_len--;
4381 				}
4382 				if (acb->acb_flags &
4383 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
4384 					acb->acb_flags &=
4385 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
4386 					arcmsr_post_ioctldata2iop(acb);
4387 				}
4388 			} else {
4389 				struct scsi_arq_status *arq_status;
4390 
4391 				/* has error report sensedata */
4392 				arq_status = (struct scsi_arq_status *)
4393 				    (intptr_t)(pkt->pkt_scbp);
4394 				bzero((caddr_t)arq_status,
4395 				    sizeof (struct scsi_arq_status));
4396 				arq_status->sts_rqpkt_reason = CMD_CMPLT;
4397 				arq_status->sts_rqpkt_state =
4398 				    (STATE_GOT_BUS |
4399 				    STATE_GOT_TARGET |STATE_SENT_CMD |
4400 				    STATE_XFERRED_DATA | STATE_GOT_STATUS);
4401 				arq_status->sts_rqpkt_statistics =
4402 				    pkt->pkt_statistics;
4403 				arq_status->sts_rqpkt_resid = 0;
4404 
4405 				struct scsi_extended_sense *sts_sensedata;
4406 				sts_sensedata = &arq_status->sts_sensedata;
4407 
4408 				/* has error report sensedata */
4409 				sts_sensedata->es_code  = 0x0;
4410 				sts_sensedata->es_valid = 0x01;
4411 				sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
4412 				/* AdditionalSenseLength */
4413 				sts_sensedata->es_add_len = 0x0A;
4414 				/* AdditionalSenseCode */
4415 				sts_sensedata->es_add_code = 0x20;
4416 				retvalue = ARCMSR_MESSAGE_FAIL;
4417 			}
4418 		}
4419 		kmem_free(ver_addr, MSGDATABUFLEN);
4420 		break;
4421 	}
4422 
4423 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
4424 		pQbuffer = acb->rqbuffer;
4425 
4426 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4427 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4428 			arcmsr_iop_message_read(acb);
4429 		}
4430 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
4431 		acb->rqbuf_firstidx = 0;
4432 		acb->rqbuf_lastidx = 0;
4433 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4434 		pcmdmessagefld->cmdmessage.ReturnCode =
4435 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4436 		break;
4437 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
4438 		pQbuffer = acb->wqbuffer;
4439 
4440 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4441 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4442 			arcmsr_iop_message_read(acb);
4443 		}
4444 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4445 		    ACB_F_MESSAGE_WQBUFFER_READ);
4446 		acb->wqbuf_firstidx = 0;
4447 		acb->wqbuf_lastidx = 0;
4448 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4449 		pcmdmessagefld->cmdmessage.ReturnCode =
4450 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4451 		break;
4452 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
4453 
4454 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4455 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4456 			arcmsr_iop_message_read(acb);
4457 		}
4458 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4459 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
4460 		    ACB_F_MESSAGE_WQBUFFER_READ);
4461 		acb->rqbuf_firstidx = 0;
4462 		acb->rqbuf_lastidx = 0;
4463 		acb->wqbuf_firstidx = 0;
4464 		acb->wqbuf_lastidx = 0;
4465 		pQbuffer = acb->rqbuffer;
4466 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4467 		pQbuffer = acb->wqbuffer;
4468 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4469 		pcmdmessagefld->cmdmessage.ReturnCode =
4470 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4471 		break;
4472 
4473 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
4474 		pcmdmessagefld->cmdmessage.ReturnCode =
4475 		    ARCMSR_MESSAGE_RETURNCODE_3F;
4476 		break;
4477 	/*
4478 	 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
4479 	 */
4480 	case ARCMSR_MESSAGE_SAY_GOODBYE:
4481 		arcmsr_iop_parking(acb);
4482 		break;
4483 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
4484 		switch (acb->adapter_type) {
4485 		case ACB_ADAPTER_TYPE_A:
4486 			arcmsr_flush_hba_cache(acb);
4487 			break;
4488 		case ACB_ADAPTER_TYPE_B:
4489 			arcmsr_flush_hbb_cache(acb);
4490 			break;
4491 		case ACB_ADAPTER_TYPE_C:
4492 			arcmsr_flush_hbc_cache(acb);
4493 			break;
4494 		}
4495 		break;
4496 	default:
4497 		retvalue = ARCMSR_MESSAGE_FAIL;
4498 	}
4499 
4500 message_out:
4501 
4502 	return (retvalue);
4503 }
4504 
4505 
4506 
4507 
4508 static void
arcmsr_pcidev_disattach(struct ACB * acb)4509 arcmsr_pcidev_disattach(struct ACB *acb)
4510 {
4511 	struct CCB *ccb;
4512 	int i = 0;
4513 
4514 	/* disable all outbound interrupts */
4515 	(void) arcmsr_disable_allintr(acb);
4516 	/* stop adapter background rebuild */
4517 	switch (acb->adapter_type) {
4518 	case ACB_ADAPTER_TYPE_A:
4519 		arcmsr_stop_hba_bgrb(acb);
4520 		arcmsr_flush_hba_cache(acb);
4521 		break;
4522 	case ACB_ADAPTER_TYPE_B:
4523 		arcmsr_stop_hbb_bgrb(acb);
4524 		arcmsr_flush_hbb_cache(acb);
4525 		break;
4526 	case ACB_ADAPTER_TYPE_C:
4527 		arcmsr_stop_hbc_bgrb(acb);
4528 		arcmsr_flush_hbc_cache(acb);
4529 		break;
4530 	}
4531 	/* abort all outstanding commands */
4532 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
4533 	acb->acb_flags &= ~ACB_F_IOP_INITED;
4534 
4535 	if (acb->ccboutstandingcount != 0) {
4536 		/* clear and abort all outbound posted Q */
4537 		arcmsr_done4abort_postqueue(acb);
4538 		/* talk to iop outstanding command aborted */
4539 		(void) arcmsr_abort_host_command(acb);
4540 
4541 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4542 			ccb = acb->pccb_pool[i];
4543 			if (ccb->ccb_state == ARCMSR_CCB_START) {
4544 				/* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
4545 				ccb->pkt->pkt_reason = CMD_ABORTED;
4546 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
4547 				arcmsr_ccb_complete(ccb, 1);
4548 			}
4549 		}
4550 	}
4551 }
4552 
4553 /* get firmware miscellaneous data */
4554 static void
arcmsr_get_hba_config(struct ACB * acb)4555 arcmsr_get_hba_config(struct ACB *acb)
4556 {
4557 	struct HBA_msgUnit *phbamu;
4558 
4559 	char *acb_firm_model;
4560 	char *acb_firm_version;
4561 	char *acb_device_map;
4562 	char *iop_firm_model;
4563 	char *iop_firm_version;
4564 	char *iop_device_map;
4565 	int count;
4566 
4567 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4568 	acb_firm_model = acb->firm_model;
4569 	acb_firm_version = acb->firm_version;
4570 	acb_device_map = acb->device_map;
4571 	/* firm_model, 15 */
4572 	iop_firm_model =
4573 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4574 	/* firm_version, 17 */
4575 	iop_firm_version =
4576 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4577 
4578 	/* device_map, 21 */
4579 	iop_device_map =
4580 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4581 
4582 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4583 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4584 
4585 	if (!arcmsr_hba_wait_msgint_ready(acb))
4586 		arcmsr_warn(acb,
4587 		    "timeout while waiting for adapter firmware "
4588 		    "miscellaneous data");
4589 
4590 	count = 8;
4591 	while (count) {
4592 		*acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle0,
4593 		    iop_firm_model);
4594 		acb_firm_model++;
4595 		iop_firm_model++;
4596 		count--;
4597 	}
4598 
4599 	count = 16;
4600 	while (count) {
4601 		*acb_firm_version =
4602 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4603 		acb_firm_version++;
4604 		iop_firm_version++;
4605 		count--;
4606 	}
4607 
4608 	count = 16;
4609 	while (count) {
4610 		*acb_device_map =
4611 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4612 		acb_device_map++;
4613 		iop_device_map++;
4614 		count--;
4615 	}
4616 
4617 	arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4618 	    acb->firm_version);
4619 
4620 	/* firm_request_len, 1 */
4621 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4622 	    &phbamu->msgcode_rwbuffer[1]);
4623 	/* firm_numbers_queue, 2 */
4624 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4625 	    &phbamu->msgcode_rwbuffer[2]);
4626 	/* firm_sdram_size, 3 */
4627 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4628 	    &phbamu->msgcode_rwbuffer[3]);
4629 	/* firm_ide_channels, 4 */
4630 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4631 	    &phbamu->msgcode_rwbuffer[4]);
4632 }
4633 
4634 /* get firmware miscellaneous data */
4635 static void
arcmsr_get_hbb_config(struct ACB * acb)4636 arcmsr_get_hbb_config(struct ACB *acb)
4637 {
4638 	struct HBB_msgUnit *phbbmu;
4639 	char *acb_firm_model;
4640 	char *acb_firm_version;
4641 	char *acb_device_map;
4642 	char *iop_firm_model;
4643 	char *iop_firm_version;
4644 	char *iop_device_map;
4645 	int count;
4646 
4647 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4648 	acb_firm_model = acb->firm_model;
4649 	acb_firm_version = acb->firm_version;
4650 	acb_device_map = acb->device_map;
4651 	/* firm_model, 15 */
4652 	iop_firm_model = (char *)
4653 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4654 	/* firm_version, 17 */
4655 	iop_firm_version = (char *)
4656 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4657 	/* device_map, 21 */
4658 	iop_device_map = (char *)
4659 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4660 
4661 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4662 	    &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
4663 
4664 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4665 		arcmsr_warn(acb,
4666 		    "timeout while waiting for adapter firmware "
4667 		    "miscellaneous data");
4668 
4669 	count = 8;
4670 	while (count) {
4671 		*acb_firm_model =
4672 		    CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_model);
4673 		acb_firm_model++;
4674 		iop_firm_model++;
4675 		count--;
4676 	}
4677 	count = 16;
4678 	while (count) {
4679 		*acb_firm_version =
4680 		    CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_version);
4681 		acb_firm_version++;
4682 		iop_firm_version++;
4683 		count--;
4684 	}
4685 	count = 16;
4686 	while (count) {
4687 		*acb_device_map =
4688 		    CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_device_map);
4689 		acb_device_map++;
4690 		iop_device_map++;
4691 		count--;
4692 	}
4693 
4694 	arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4695 	    acb->firm_version);
4696 
4697 	/* firm_request_len, 1 */
4698 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4699 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
4700 	/* firm_numbers_queue, 2 */
4701 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4702 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
4703 	/* firm_sdram_size, 3 */
4704 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4705 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
4706 	/* firm_ide_channels, 4 */
4707 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4708 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
4709 }
4710 
4711 
4712 /* get firmware miscellaneous data */
4713 static void
arcmsr_get_hbc_config(struct ACB * acb)4714 arcmsr_get_hbc_config(struct ACB *acb)
4715 {
4716 	struct HBC_msgUnit *phbcmu;
4717 
4718 	char *acb_firm_model;
4719 	char *acb_firm_version;
4720 	char *acb_device_map;
4721 	char *iop_firm_model;
4722 	char *iop_firm_version;
4723 	char *iop_device_map;
4724 	int count;
4725 
4726 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
4727 	acb_firm_model = acb->firm_model;
4728 	acb_firm_version = acb->firm_version;
4729 	acb_device_map = acb->device_map;
4730 	/* firm_model, 15 */
4731 	iop_firm_model =
4732 	    (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4733 	/* firm_version, 17 */
4734 	iop_firm_version =
4735 	    (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4736 	/* device_map, 21 */
4737 	iop_device_map =
4738 	    (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4739 	/* post "get config" instruction */
4740 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4741 	    &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4742 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4743 	    &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4744 	if (!arcmsr_hbc_wait_msgint_ready(acb))
4745 		arcmsr_warn(acb,
4746 		    "timeout while waiting for adapter firmware "
4747 		    "miscellaneous data");
4748 	count = 8;
4749 	while (count) {
4750 		*acb_firm_model =
4751 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
4752 		acb_firm_model++;
4753 		iop_firm_model++;
4754 		count--;
4755 	}
4756 
4757 	count = 16;
4758 	while (count) {
4759 		*acb_firm_version =
4760 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4761 		acb_firm_version++;
4762 		iop_firm_version++;
4763 		count--;
4764 	}
4765 
4766 	count = 16;
4767 	while (count) {
4768 		*acb_device_map =
4769 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4770 		acb_device_map++;
4771 		iop_device_map++;
4772 		count--;
4773 	}
4774 
4775 	arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4776 	    acb->firm_version);
4777 
4778 	/* firm_request_len, 1, 04-07 */
4779 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4780 	    &phbcmu->msgcode_rwbuffer[1]);
4781 	/* firm_numbers_queue, 2, 08-11 */
4782 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4783 	    &phbcmu->msgcode_rwbuffer[2]);
4784 	/* firm_sdram_size, 3, 12-15 */
4785 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4786 	    &phbcmu->msgcode_rwbuffer[3]);
4787 	/* firm_ide_channels, 4, 16-19 */
4788 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4789 	    &phbcmu->msgcode_rwbuffer[4]);
4790 	/* firm_cfg_version, 25, 100-103 */
4791 	acb->firm_cfg_version = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4792 	    &phbcmu->msgcode_rwbuffer[25]);
4793 }
4794 
4795 
4796 /* start background rebuild */
4797 static void
arcmsr_start_hba_bgrb(struct ACB * acb)4798 arcmsr_start_hba_bgrb(struct ACB *acb)
4799 {
4800 	struct HBA_msgUnit *phbamu;
4801 
4802 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4803 
4804 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4805 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4806 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4807 
4808 	if (!arcmsr_hba_wait_msgint_ready(acb))
4809 		arcmsr_warn(acb,
4810 		    "timeout while waiting for background rebuild to start");
4811 }
4812 
4813 
4814 static void
arcmsr_start_hbb_bgrb(struct ACB * acb)4815 arcmsr_start_hbb_bgrb(struct ACB *acb)
4816 {
4817 	struct HBB_msgUnit *phbbmu;
4818 
4819 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4820 
4821 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4822 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4823 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4824 	    ARCMSR_MESSAGE_START_BGRB);
4825 
4826 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4827 		arcmsr_warn(acb,
4828 		    "timeout while waiting for background rebuild to start");
4829 }
4830 
4831 
4832 static void
arcmsr_start_hbc_bgrb(struct ACB * acb)4833 arcmsr_start_hbc_bgrb(struct ACB *acb)
4834 {
4835 	struct HBC_msgUnit *phbcmu;
4836 
4837 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
4838 
4839 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4840 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4841 	    &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4842 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4843 	    &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4844 	if (!arcmsr_hbc_wait_msgint_ready(acb))
4845 		arcmsr_warn(acb,
4846 		    "timeout while waiting for background rebuild to start");
4847 }
4848 
4849 static void
arcmsr_polling_hba_ccbdone(struct ACB * acb,struct CCB * poll_ccb)4850 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4851 {
4852 	struct HBA_msgUnit *phbamu;
4853 	struct CCB *ccb;
4854 	boolean_t error;
4855 	uint32_t flag_ccb, outbound_intstatus, intmask_org;
4856 	boolean_t poll_ccb_done = B_FALSE;
4857 	uint32_t poll_count = 0;
4858 
4859 
4860 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4861 
4862 polling_ccb_retry:
4863 	/* TODO: Use correct offset and size for syncing? */
4864 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4865 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4866 		return;
4867 	intmask_org = arcmsr_disable_allintr(acb);
4868 
4869 	for (;;) {
4870 		if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4871 		    &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
4872 			if (poll_ccb_done) {
4873 				/* chip FIFO no ccb for completion already */
4874 				break;
4875 			} else {
4876 				drv_usecwait(25000);
4877 				if ((poll_count > 100) && (poll_ccb != NULL)) {
4878 					break;
4879 				}
4880 				if (acb->ccboutstandingcount == 0) {
4881 					break;
4882 				}
4883 				poll_count++;
4884 				outbound_intstatus =
4885 				    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4886 				    &phbamu->outbound_intstatus) &
4887 				    acb->outbound_int_enable;
4888 
4889 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4890 				    &phbamu->outbound_intstatus,
4891 				    outbound_intstatus); /* clear interrupt */
4892 			}
4893 		}
4894 
4895 		/* frame must be 32 bytes aligned */
4896 		ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4897 
4898 		/* check if command done with no error */
4899 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4900 		    B_TRUE : B_FALSE;
4901 		if (poll_ccb != NULL)
4902 			poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4903 
4904 		if (ccb->acb != acb) {
4905 			arcmsr_warn(acb, "ccb got a wrong acb!");
4906 			continue;
4907 		}
4908 		if (ccb->ccb_state != ARCMSR_CCB_START) {
4909 			if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
4910 				ccb->ccb_state |= ARCMSR_CCB_BACK;
4911 				ccb->pkt->pkt_reason = CMD_ABORTED;
4912 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
4913 				arcmsr_ccb_complete(ccb, 1);
4914 				continue;
4915 			}
4916 			arcmsr_report_ccb_state(acb, ccb, error);
4917 			arcmsr_warn(acb,
4918 			    "polling op got unexpected ccb command done");
4919 			continue;
4920 		}
4921 		arcmsr_report_ccb_state(acb, ccb, error);
4922 	}	/* drain reply FIFO */
4923 	arcmsr_enable_allintr(acb, intmask_org);
4924 }
4925 
4926 
4927 static void
arcmsr_polling_hbb_ccbdone(struct ACB * acb,struct CCB * poll_ccb)4928 arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4929 {
4930 	struct HBB_msgUnit *phbbmu;
4931 	struct CCB *ccb;
4932 	uint32_t flag_ccb, intmask_org;
4933 	boolean_t error;
4934 	uint32_t poll_count = 0;
4935 	int index;
4936 	boolean_t poll_ccb_done = B_FALSE;
4937 
4938 
4939 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4940 
4941 
4942 polling_ccb_retry:
4943 	/* Use correct offset and size for syncing */
4944 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4945 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4946 		return;
4947 
4948 	intmask_org = arcmsr_disable_allintr(acb);
4949 
4950 	for (;;) {
4951 		index = phbbmu->doneq_index;
4952 		if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
4953 			if (poll_ccb_done) {
4954 				/* chip FIFO no ccb for completion already */
4955 				break;
4956 			} else {
4957 				drv_usecwait(25000);
4958 				if ((poll_count > 100) && (poll_ccb != NULL))
4959 					break;
4960 				if (acb->ccboutstandingcount == 0)
4961 					break;
4962 				poll_count++;
4963 				/* clear doorbell interrupt */
4964 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4965 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
4966 				    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
4967 			}
4968 		}
4969 
4970 		phbbmu->done_qbuffer[index] = 0;
4971 		index++;
4972 		/* if last index number set it to 0 */
4973 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
4974 		phbbmu->doneq_index = index;
4975 		/* check if command done with no error */
4976 		/* frame must be 32 bytes aligned */
4977 		ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4978 
4979 		/* check if command done with no error */
4980 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4981 		    B_TRUE : B_FALSE;
4982 
4983 		if (poll_ccb != NULL)
4984 			poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4985 		if (ccb->acb != acb) {
4986 			arcmsr_warn(acb, "ccb got a wrong acb!");
4987 			continue;
4988 		}
4989 		if (ccb->ccb_state != ARCMSR_CCB_START) {
4990 			if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
4991 				ccb->ccb_state |= ARCMSR_CCB_BACK;
4992 				ccb->pkt->pkt_reason = CMD_ABORTED;
4993 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
4994 				arcmsr_ccb_complete(ccb, 1);
4995 				continue;
4996 			}
4997 			arcmsr_report_ccb_state(acb, ccb, error);
4998 			arcmsr_warn(acb,
4999 			    "polling op got unexpect ccb command done");
5000 			continue;
5001 		}
5002 		arcmsr_report_ccb_state(acb, ccb, error);
5003 	}	/* drain reply FIFO */
5004 	arcmsr_enable_allintr(acb, intmask_org);
5005 }
5006 
5007 
5008 static void
arcmsr_polling_hbc_ccbdone(struct ACB * acb,struct CCB * poll_ccb)5009 arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
5010 {
5011 
5012 	struct HBC_msgUnit *phbcmu;
5013 	struct CCB *ccb;
5014 	boolean_t error;
5015 	uint32_t ccb_cdb_phy;
5016 	uint32_t flag_ccb, intmask_org;
5017 	boolean_t poll_ccb_done = B_FALSE;
5018 	uint32_t poll_count = 0;
5019 
5020 
5021 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5022 
5023 polling_ccb_retry:
5024 
5025 	/* Use correct offset and size for syncing */
5026 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5027 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5028 		return;
5029 
5030 	intmask_org = arcmsr_disable_allintr(acb);
5031 
5032 	for (;;) {
5033 		if (!(CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5034 		    &phbcmu->host_int_status) &
5035 		    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
5036 
5037 			if (poll_ccb_done) {
5038 				/* chip FIFO no ccb for completion already */
5039 				break;
5040 			} else {
5041 				drv_usecwait(25000);
5042 				if ((poll_count > 100) && (poll_ccb != NULL)) {
5043 					break;
5044 				}
5045 				if (acb->ccboutstandingcount == 0) {
5046 					break;
5047 				}
5048 				poll_count++;
5049 			}
5050 		}
5051 		flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5052 		    &phbcmu->outbound_queueport_low);
5053 		/* frame must be 32 bytes aligned */
5054 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5055 		/* the CDB is the first field of the CCB */
5056 		ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5057 
5058 		/* check if command done with no error */
5059 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5060 		    B_TRUE : B_FALSE;
5061 		if (poll_ccb != NULL)
5062 			poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
5063 
5064 		if (ccb->acb != acb) {
5065 			arcmsr_warn(acb, "ccb got a wrong acb!");
5066 			continue;
5067 		}
5068 		if (ccb->ccb_state != ARCMSR_CCB_START) {
5069 			if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5070 				ccb->ccb_state |= ARCMSR_CCB_BACK;
5071 				ccb->pkt->pkt_reason = CMD_ABORTED;
5072 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
5073 				arcmsr_ccb_complete(ccb, 1);
5074 				continue;
5075 			}
5076 			arcmsr_report_ccb_state(acb, ccb, error);
5077 			arcmsr_warn(acb,
5078 			    "polling op got unexpected ccb command done");
5079 			continue;
5080 		}
5081 		arcmsr_report_ccb_state(acb, ccb, error);
5082 	}	/* drain reply FIFO */
5083 	arcmsr_enable_allintr(acb, intmask_org);
5084 }
5085 
5086 
5087 /*
5088  * Function: arcmsr_hba_hardware_reset()
5089  *           Bug Fix for Intel IOP cause firmware hang on.
5090  *           and kernel panic
5091  */
5092 static void
arcmsr_hba_hardware_reset(struct ACB * acb)5093 arcmsr_hba_hardware_reset(struct ACB *acb)
5094 {
5095 	struct HBA_msgUnit *phbamu;
5096 	uint8_t value[64];
5097 	int i;
5098 
5099 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5100 	/* backup pci config data */
5101 	for (i = 0; i < 64; i++) {
5102 		value[i] = pci_config_get8(acb->pci_acc_handle, i);
5103 	}
5104 	/* hardware reset signal */
5105 	if ((PCI_DEVICE_ID_ARECA_1680 ==
5106 	    pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID))) {
5107 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5108 		    &phbamu->reserved1[0], 0x00000003);
5109 	} else {
5110 		pci_config_put8(acb->pci_acc_handle, 0x84, 0x20);
5111 	}
5112 	drv_usecwait(1000000);
5113 	/* write back pci config data */
5114 	for (i = 0; i < 64; i++) {
5115 		pci_config_put8(acb->pci_acc_handle, i, value[i]);
5116 	}
5117 	drv_usecwait(1000000);
5118 }
5119 
5120 /*
5121  * Function: arcmsr_abort_host_command
5122  */
5123 static uint8_t
arcmsr_abort_host_command(struct ACB * acb)5124 arcmsr_abort_host_command(struct ACB *acb)
5125 {
5126 	uint8_t rtnval = 0;
5127 
5128 	switch (acb->adapter_type) {
5129 	case ACB_ADAPTER_TYPE_A:
5130 		rtnval = arcmsr_abort_hba_allcmd(acb);
5131 		break;
5132 	case ACB_ADAPTER_TYPE_B:
5133 		rtnval = arcmsr_abort_hbb_allcmd(acb);
5134 		break;
5135 	case ACB_ADAPTER_TYPE_C:
5136 		rtnval = arcmsr_abort_hbc_allcmd(acb);
5137 		break;
5138 	}
5139 	return (rtnval);
5140 }
5141 
5142 /*
5143  * Function: arcmsr_handle_iop_bus_hold
5144  */
5145 static void
arcmsr_handle_iop_bus_hold(struct ACB * acb)5146 arcmsr_handle_iop_bus_hold(struct ACB *acb)
5147 {
5148 
5149 	switch (acb->adapter_type) {
5150 	case ACB_ADAPTER_TYPE_A:
5151 	{
5152 		struct HBA_msgUnit *phbamu;
5153 		int retry_count = 0;
5154 
5155 		acb->timeout_count = 0;
5156 		phbamu = (struct HBA_msgUnit *)acb->pmu;
5157 		arcmsr_hba_hardware_reset(acb);
5158 		acb->acb_flags &= ~ACB_F_IOP_INITED;
5159 	sleep_again:
5160 		drv_usecwait(1000000);
5161 		if ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5162 		    &phbamu->outbound_msgaddr1) &
5163 		    ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
5164 			if (retry_count > 60) {
5165 				arcmsr_warn(acb,
5166 				    "waiting for hardware"
5167 				    "bus reset return, RETRY TERMINATED!!");
5168 				return;
5169 			}
5170 			retry_count++;
5171 			goto sleep_again;
5172 		}
5173 		arcmsr_iop_init(acb);
5174 		break;
5175 	}
5176 
5177 	}
5178 }
5179 
5180 static void
arcmsr_iop2drv_data_wrote_handle(struct ACB * acb)5181 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb)
5182 {
5183 	struct QBUFFER *prbuffer;
5184 	uint8_t *pQbuffer;
5185 	uint8_t *iop_data;
5186 	int my_empty_len, iop_len;
5187 	int rqbuf_firstidx, rqbuf_lastidx;
5188 
5189 	/* check this iop data if overflow my rqbuffer */
5190 	rqbuf_lastidx = acb->rqbuf_lastidx;
5191 	rqbuf_firstidx = acb->rqbuf_firstidx;
5192 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
5193 	iop_data = (uint8_t *)prbuffer->data;
5194 	iop_len = prbuffer->data_len;
5195 	my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
5196 	    (ARCMSR_MAX_QBUFFER - 1);
5197 
5198 	if (my_empty_len >= iop_len) {
5199 		while (iop_len > 0) {
5200 			pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
5201 			(void) memcpy(pQbuffer, iop_data, 1);
5202 			rqbuf_lastidx++;
5203 			/* if last index number set it to 0 */
5204 			rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
5205 			iop_data++;
5206 			iop_len--;
5207 		}
5208 		acb->rqbuf_lastidx = rqbuf_lastidx;
5209 		arcmsr_iop_message_read(acb);
5210 		/* signature, let IOP know data has been read */
5211 	} else {
5212 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
5213 	}
5214 }
5215 
5216 
5217 
5218 static void
arcmsr_iop2drv_data_read_handle(struct ACB * acb)5219 arcmsr_iop2drv_data_read_handle(struct ACB *acb)
5220 {
5221 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
5222 	/*
5223 	 * check if there are any mail packages from user space program
5224 	 * in my post bag, now is the time to send them into Areca's firmware
5225 	 */
5226 
5227 	if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
5228 
5229 		uint8_t *pQbuffer;
5230 		struct QBUFFER *pwbuffer;
5231 		uint8_t *iop_data;
5232 		int allxfer_len = 0;
5233 
5234 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
5235 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
5236 		iop_data = (uint8_t *)pwbuffer->data;
5237 
5238 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
5239 		    (allxfer_len < 124)) {
5240 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
5241 			(void) memcpy(iop_data, pQbuffer, 1);
5242 			acb->wqbuf_firstidx++;
5243 			/* if last index number set it to 0 */
5244 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
5245 			iop_data++;
5246 			allxfer_len++;
5247 		}
5248 		pwbuffer->data_len = allxfer_len;
5249 		/*
5250 		 * push inbound doorbell, tell iop driver data write ok
5251 		 * await reply on next hwinterrupt for next Qbuffer post
5252 		 */
5253 		arcmsr_iop_message_wrote(acb);
5254 	}
5255 
5256 	if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
5257 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
5258 }
5259 
5260 
5261 static void
arcmsr_hba_doorbell_isr(struct ACB * acb)5262 arcmsr_hba_doorbell_isr(struct ACB *acb)
5263 {
5264 	uint32_t outbound_doorbell;
5265 	struct HBA_msgUnit *phbamu;
5266 
5267 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5268 
5269 	/*
5270 	 *  Maybe here we need to check wrqbuffer_lock is locked or not
5271 	 *  DOORBELL: ding! dong!
5272 	 *  check if there are any mail need to pack from firmware
5273 	 */
5274 
5275 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5276 	    &phbamu->outbound_doorbell);
5277 	/* clear doorbell interrupt */
5278 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5279 	    &phbamu->outbound_doorbell, outbound_doorbell);
5280 
5281 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
5282 		arcmsr_iop2drv_data_wrote_handle(acb);
5283 
5284 
5285 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
5286 		arcmsr_iop2drv_data_read_handle(acb);
5287 }
5288 
5289 
5290 
5291 static void
arcmsr_hbc_doorbell_isr(struct ACB * acb)5292 arcmsr_hbc_doorbell_isr(struct ACB *acb)
5293 {
5294 	uint32_t outbound_doorbell;
5295 	struct HBC_msgUnit *phbcmu;
5296 
5297 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5298 
5299 	/*
5300 	 *  Maybe here we need to check wrqbuffer_lock is locked or not
5301 	 *  DOORBELL: ding! dong!
5302 	 *  check if there are any mail need to pick from firmware
5303 	 */
5304 
5305 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5306 	    &phbcmu->outbound_doorbell);
5307 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5308 	    &phbcmu->outbound_doorbell_clear,
5309 	    outbound_doorbell); /* clear interrupt */
5310 	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
5311 		arcmsr_iop2drv_data_wrote_handle(acb);
5312 	}
5313 	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
5314 		arcmsr_iop2drv_data_read_handle(acb);
5315 	}
5316 	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
5317 		/* messenger of "driver to iop commands" */
5318 		arcmsr_hbc_message_isr(acb);
5319 	}
5320 }
5321 
5322 
5323 static void
arcmsr_hba_message_isr(struct ACB * acb)5324 arcmsr_hba_message_isr(struct ACB *acb)
5325 {
5326 	struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
5327 	uint32_t  *signature = (&phbamu->msgcode_rwbuffer[0]);
5328 	uint32_t outbound_message;
5329 
5330 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5331 	    &phbamu->outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
5332 
5333 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5334 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5335 		if ((ddi_taskq_dispatch(acb->taskq,
5336 		    (void (*)(void *))arcmsr_dr_handle,
5337 		    acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5338 			arcmsr_warn(acb, "DR task start failed");
5339 		}
5340 }
5341 
5342 static void
arcmsr_hbb_message_isr(struct ACB * acb)5343 arcmsr_hbb_message_isr(struct ACB *acb)
5344 {
5345 	struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
5346 	uint32_t  *signature = (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0]);
5347 	uint32_t outbound_message;
5348 
5349 	/* clear interrupts */
5350 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5351 	    &phbbmu->hbb_doorbell->iop2drv_doorbell,
5352 	    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5353 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5354 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5355 	    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5356 
5357 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5358 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5359 		if ((ddi_taskq_dispatch(acb->taskq,
5360 		    (void (*)(void *))arcmsr_dr_handle,
5361 		    acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5362 			arcmsr_warn(acb, "DR task start failed");
5363 		}
5364 }
5365 
5366 static void
arcmsr_hbc_message_isr(struct ACB * acb)5367 arcmsr_hbc_message_isr(struct ACB *acb)
5368 {
5369 	struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
5370 	uint32_t  *signature = (&phbcmu->msgcode_rwbuffer[0]);
5371 	uint32_t outbound_message;
5372 
5373 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5374 	    &phbcmu->outbound_doorbell_clear,
5375 	    ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
5376 
5377 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5378 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5379 		if ((ddi_taskq_dispatch(acb->taskq,
5380 		    (void (*)(void *))arcmsr_dr_handle,
5381 		    acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5382 			arcmsr_warn(acb, "DR task start failed");
5383 		}
5384 }
5385 
5386 
5387 static void
arcmsr_hba_postqueue_isr(struct ACB * acb)5388 arcmsr_hba_postqueue_isr(struct ACB *acb)
5389 {
5390 
5391 	struct HBA_msgUnit *phbamu;
5392 	struct CCB *ccb;
5393 	uint32_t flag_ccb;
5394 	boolean_t error;
5395 
5396 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5397 
5398 	/* areca cdb command done */
5399 	/* Use correct offset and size for syncing */
5400 	(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5401 	    DDI_DMA_SYNC_FORKERNEL);
5402 
5403 	while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5404 	    &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
5405 		/* frame must be 32 bytes aligned */
5406 		ccb = NumToPtr((acb->vir2phy_offset+(flag_ccb << 5)));
5407 		/* check if command done with no error */
5408 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5409 		    B_TRUE : B_FALSE;
5410 		arcmsr_drain_donequeue(acb, ccb, error);
5411 	}	/* drain reply FIFO */
5412 }
5413 
5414 
5415 static void
arcmsr_hbb_postqueue_isr(struct ACB * acb)5416 arcmsr_hbb_postqueue_isr(struct ACB *acb)
5417 {
5418 	struct HBB_msgUnit *phbbmu;
5419 	struct CCB *ccb;
5420 	uint32_t flag_ccb;
5421 	boolean_t error;
5422 	int index;
5423 
5424 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
5425 
5426 	/* areca cdb command done */
5427 	index = phbbmu->doneq_index;
5428 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5429 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5430 		return;
5431 	while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
5432 		phbbmu->done_qbuffer[index] = 0;
5433 		/* frame must be 32 bytes aligned */
5434 
5435 		/* the CDB is the first field of the CCB */
5436 		ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
5437 
5438 		/* check if command done with no error */
5439 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5440 		    B_TRUE : B_FALSE;
5441 		arcmsr_drain_donequeue(acb, ccb, error);
5442 		index++;
5443 		/* if last index number set it to 0 */
5444 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
5445 		phbbmu->doneq_index = index;
5446 	}	/* drain reply FIFO */
5447 }
5448 
5449 
5450 static void
arcmsr_hbc_postqueue_isr(struct ACB * acb)5451 arcmsr_hbc_postqueue_isr(struct ACB *acb)
5452 {
5453 
5454 	struct HBC_msgUnit *phbcmu;
5455 	struct CCB *ccb;
5456 	uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
5457 	boolean_t error;
5458 
5459 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5460 	/* areca cdb command done */
5461 	/* Use correct offset and size for syncing */
5462 	(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5463 	    DDI_DMA_SYNC_FORKERNEL);
5464 
5465 	while (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5466 	    &phbcmu->host_int_status) &
5467 	    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5468 		/* check if command done with no error */
5469 		flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5470 		    &phbcmu->outbound_queueport_low);
5471 		/* frame must be 32 bytes aligned */
5472 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5473 
5474 		/* the CDB is the first field of the CCB */
5475 		ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5476 
5477 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5478 		    B_TRUE : B_FALSE;
5479 		/* check if command done with no error */
5480 		arcmsr_drain_donequeue(acb, ccb, error);
5481 		if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
5482 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5483 			    &phbcmu->inbound_doorbell,
5484 			    ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
5485 			break;
5486 		}
5487 		throttling++;
5488 	}	/* drain reply FIFO */
5489 }
5490 
5491 
5492 static uint_t
arcmsr_handle_hba_isr(struct ACB * acb)5493 arcmsr_handle_hba_isr(struct ACB *acb)
5494 {
5495 	uint32_t outbound_intstatus;
5496 	struct HBA_msgUnit *phbamu;
5497 
5498 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5499 
5500 	outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5501 	    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
5502 
5503 	if (outbound_intstatus == 0)	/* it must be a shared irq */
5504 		return (DDI_INTR_UNCLAIMED);
5505 
5506 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
5507 	    outbound_intstatus); /* clear interrupt */
5508 
5509 	/* MU doorbell interrupts */
5510 
5511 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
5512 		arcmsr_hba_doorbell_isr(acb);
5513 
5514 	/* MU post queue interrupts */
5515 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
5516 		arcmsr_hba_postqueue_isr(acb);
5517 
5518 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
5519 		arcmsr_hba_message_isr(acb);
5520 	}
5521 
5522 	return (DDI_INTR_CLAIMED);
5523 }
5524 
5525 
5526 static uint_t
arcmsr_handle_hbb_isr(struct ACB * acb)5527 arcmsr_handle_hbb_isr(struct ACB *acb)
5528 {
5529 	uint32_t outbound_doorbell;
5530 	struct HBB_msgUnit *phbbmu;
5531 
5532 
5533 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
5534 
5535 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5536 	    &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
5537 
5538 	if (outbound_doorbell == 0)		/* it must be a shared irq */
5539 		return (DDI_INTR_UNCLAIMED);
5540 
5541 	/* clear doorbell interrupt */
5542 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5543 	    &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
5544 	/* wait a cycle */
5545 	(void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5546 	    &phbbmu->hbb_doorbell->iop2drv_doorbell);
5547 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5548 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5549 	    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5550 
5551 	/* MU ioctl transfer doorbell interrupts */
5552 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
5553 		arcmsr_iop2drv_data_wrote_handle(acb);
5554 
5555 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
5556 		arcmsr_iop2drv_data_read_handle(acb);
5557 
5558 	/* MU post queue interrupts */
5559 	if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
5560 		arcmsr_hbb_postqueue_isr(acb);
5561 
5562 	/* MU message interrupt */
5563 
5564 	if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
5565 		arcmsr_hbb_message_isr(acb);
5566 	}
5567 
5568 	return (DDI_INTR_CLAIMED);
5569 }
5570 
5571 static uint_t
arcmsr_handle_hbc_isr(struct ACB * acb)5572 arcmsr_handle_hbc_isr(struct ACB *acb)
5573 {
5574 	uint32_t host_interrupt_status;
5575 	struct HBC_msgUnit *phbcmu;
5576 
5577 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5578 	/*  check outbound intstatus */
5579 	host_interrupt_status=
5580 	    CHIP_REG_READ32(acb->reg_mu_acc_handle0, &phbcmu->host_int_status);
5581 	if (host_interrupt_status == 0)	/* it must be share irq */
5582 		return (DDI_INTR_UNCLAIMED);
5583 	/* MU ioctl transfer doorbell interrupts */
5584 	if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
5585 		/* messenger of "ioctl message read write" */
5586 		arcmsr_hbc_doorbell_isr(acb);
5587 	}
5588 	/* MU post queue interrupts */
5589 	if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5590 		/* messenger of "scsi commands" */
5591 		arcmsr_hbc_postqueue_isr(acb);
5592 	}
5593 	return (DDI_INTR_CLAIMED);
5594 }
5595 
5596 static uint_t
arcmsr_intr_handler(caddr_t arg,caddr_t arg2)5597 arcmsr_intr_handler(caddr_t arg, caddr_t arg2)
5598 {
5599 	struct ACB *acb = (void *)arg;
5600 	struct CCB *ccb;
5601 	uint_t retrn = DDI_INTR_UNCLAIMED;
5602 	_NOTE(ARGUNUSED(arg2))
5603 
5604 	mutex_enter(&acb->isr_mutex);
5605 	switch (acb->adapter_type) {
5606 	case ACB_ADAPTER_TYPE_A:
5607 		retrn = arcmsr_handle_hba_isr(acb);
5608 		break;
5609 
5610 	case ACB_ADAPTER_TYPE_B:
5611 		retrn = arcmsr_handle_hbb_isr(acb);
5612 		break;
5613 
5614 	case ACB_ADAPTER_TYPE_C:
5615 		retrn = arcmsr_handle_hbc_isr(acb);
5616 		break;
5617 
5618 	default:
5619 		/* We should never be here */
5620 		ASSERT(0);
5621 		break;
5622 	}
5623 	mutex_exit(&acb->isr_mutex);
5624 	while ((ccb = arcmsr_get_complete_ccb_from_list(acb)) != NULL) {
5625 		arcmsr_ccb_complete(ccb, 1);
5626 	}
5627 	return (retrn);
5628 }
5629 
5630 
5631 static void
arcmsr_wait_firmware_ready(struct ACB * acb)5632 arcmsr_wait_firmware_ready(struct ACB *acb)
5633 {
5634 	uint32_t firmware_state;
5635 
5636 	firmware_state = 0;
5637 
5638 	switch (acb->adapter_type) {
5639 	case ACB_ADAPTER_TYPE_A:
5640 	{
5641 		struct HBA_msgUnit *phbamu;
5642 		phbamu = (struct HBA_msgUnit *)acb->pmu;
5643 		do {
5644 			firmware_state =
5645 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5646 			    &phbamu->outbound_msgaddr1);
5647 		} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
5648 		    == 0);
5649 		break;
5650 	}
5651 
5652 	case ACB_ADAPTER_TYPE_B:
5653 	{
5654 		struct HBB_msgUnit *phbbmu;
5655 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
5656 		do {
5657 			firmware_state =
5658 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5659 			    &phbbmu->hbb_doorbell->iop2drv_doorbell);
5660 		} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
5661 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5662 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5663 		    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5664 		break;
5665 	}
5666 
5667 	case ACB_ADAPTER_TYPE_C:
5668 	{
5669 		struct HBC_msgUnit *phbcmu;
5670 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
5671 		do {
5672 			firmware_state =
5673 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5674 			    &phbcmu->outbound_msgaddr1);
5675 		} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK)
5676 		    == 0);
5677 		break;
5678 	}
5679 
5680 	}
5681 }
5682 
5683 static void
arcmsr_clear_doorbell_queue_buffer(struct ACB * acb)5684 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb)
5685 {
5686 	switch (acb->adapter_type) {
5687 	case ACB_ADAPTER_TYPE_A: {
5688 		struct HBA_msgUnit *phbamu;
5689 		uint32_t outbound_doorbell;
5690 
5691 		phbamu = (struct HBA_msgUnit *)acb->pmu;
5692 		/* empty doorbell Qbuffer if door bell rung */
5693 		outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5694 		    &phbamu->outbound_doorbell);
5695 		/* clear doorbell interrupt */
5696 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5697 		    &phbamu->outbound_doorbell, outbound_doorbell);
5698 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5699 		    &phbamu->inbound_doorbell,
5700 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
5701 		break;
5702 	}
5703 
5704 	case ACB_ADAPTER_TYPE_B: {
5705 		struct HBB_msgUnit *phbbmu;
5706 
5707 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
5708 		/* clear interrupt and message state */
5709 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5710 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
5711 		    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5712 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5713 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5714 		    ARCMSR_DRV2IOP_DATA_READ_OK);
5715 		/* let IOP know data has been read */
5716 		break;
5717 	}
5718 
5719 	case ACB_ADAPTER_TYPE_C: {
5720 		struct HBC_msgUnit *phbcmu;
5721 		uint32_t outbound_doorbell;
5722 
5723 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
5724 		/* empty doorbell Qbuffer if door bell ringed */
5725 		outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5726 		    &phbcmu->outbound_doorbell);
5727 		/* clear outbound doobell isr */
5728 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5729 		    &phbcmu->outbound_doorbell_clear, outbound_doorbell);
5730 		/* let IOP know data has been read */
5731 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5732 		    &phbcmu->inbound_doorbell,
5733 		    ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
5734 		break;
5735 	}
5736 
5737 	}
5738 }
5739 
5740 
5741 static uint32_t
arcmsr_iop_confirm(struct ACB * acb)5742 arcmsr_iop_confirm(struct ACB *acb)
5743 {
5744 	uint64_t cdb_phyaddr;
5745 	uint32_t cdb_phyaddr_hi32;
5746 
5747 	/*
5748 	 * here we need to tell iop 331 about our freeccb.HighPart
5749 	 * if freeccb.HighPart is non-zero
5750 	 */
5751 	cdb_phyaddr = acb->ccb_cookie.dmac_laddress;
5752 	cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
5753 	acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
5754 	switch (acb->adapter_type) {
5755 	case ACB_ADAPTER_TYPE_A:
5756 		if (cdb_phyaddr_hi32 != 0) {
5757 			struct HBA_msgUnit *phbamu;
5758 
5759 			phbamu = (struct HBA_msgUnit *)acb->pmu;
5760 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5761 			    &phbamu->msgcode_rwbuffer[0],
5762 			    ARCMSR_SIGNATURE_SET_CONFIG);
5763 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5764 			    &phbamu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5765 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5766 			    &phbamu->inbound_msgaddr0,
5767 			    ARCMSR_INBOUND_MESG0_SET_CONFIG);
5768 			if (!arcmsr_hba_wait_msgint_ready(acb)) {
5769 				arcmsr_warn(acb,
5770 				    "timeout setting ccb "
5771 				    "high physical address");
5772 				return (FALSE);
5773 			}
5774 		}
5775 		break;
5776 
5777 	/* if adapter is type B, set window of "post command queue" */
5778 	case ACB_ADAPTER_TYPE_B: {
5779 		uint32_t post_queue_phyaddr;
5780 		struct HBB_msgUnit *phbbmu;
5781 
5782 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
5783 		phbbmu->postq_index = 0;
5784 		phbbmu->doneq_index = 0;
5785 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5786 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5787 		    ARCMSR_MESSAGE_SET_POST_WINDOW);
5788 
5789 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5790 			arcmsr_warn(acb, "timeout setting post command "
5791 			    "queue window");
5792 			return (FALSE);
5793 		}
5794 
5795 		post_queue_phyaddr = (uint32_t)cdb_phyaddr +
5796 		    ARCMSR_MAX_FREECCB_NUM * P2ROUNDUP(sizeof (struct CCB), 32)
5797 		    + offsetof(struct HBB_msgUnit, post_qbuffer);
5798 		/* driver "set config" signature */
5799 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5800 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
5801 		    ARCMSR_SIGNATURE_SET_CONFIG);
5802 		/* normal should be zero */
5803 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5804 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
5805 		    cdb_phyaddr_hi32);
5806 		/* postQ size (256+8)*4 */
5807 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5808 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
5809 		    post_queue_phyaddr);
5810 		/* doneQ size (256+8)*4 */
5811 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5812 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
5813 		    post_queue_phyaddr+1056);
5814 		/* ccb maxQ size must be --> [(256+8)*4] */
5815 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5816 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
5817 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5818 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5819 		    ARCMSR_MESSAGE_SET_CONFIG);
5820 
5821 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5822 			arcmsr_warn(acb,
5823 			    "timeout setting command queue window");
5824 			return (FALSE);
5825 		}
5826 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5827 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5828 		    ARCMSR_MESSAGE_START_DRIVER_MODE);
5829 
5830 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5831 			arcmsr_warn(acb, "timeout in 'start driver mode'");
5832 			return (FALSE);
5833 		}
5834 		break;
5835 	}
5836 
5837 	case ACB_ADAPTER_TYPE_C:
5838 		if (cdb_phyaddr_hi32 != 0) {
5839 			struct HBC_msgUnit *phbcmu;
5840 
5841 			phbcmu = (struct HBC_msgUnit *)acb->pmu;
5842 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5843 			    &phbcmu->msgcode_rwbuffer[0],
5844 			    ARCMSR_SIGNATURE_SET_CONFIG);
5845 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5846 			    &phbcmu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5847 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5848 			    &phbcmu->inbound_msgaddr0,
5849 			    ARCMSR_INBOUND_MESG0_SET_CONFIG);
5850 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5851 			    &phbcmu->inbound_doorbell,
5852 			    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
5853 			if (!arcmsr_hbc_wait_msgint_ready(acb)) {
5854 				arcmsr_warn(acb, "'set ccb "
5855 				    "high part physical address' timeout");
5856 				return (FALSE);
5857 			}
5858 		}
5859 		break;
5860 	}
5861 	return (TRUE);
5862 }
5863 
5864 
5865 /*
5866  * ONLY used for Adapter type B
5867  */
5868 static void
arcmsr_enable_eoi_mode(struct ACB * acb)5869 arcmsr_enable_eoi_mode(struct ACB *acb)
5870 {
5871 	struct HBB_msgUnit *phbbmu;
5872 
5873 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
5874 
5875 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5876 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5877 	    ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
5878 
5879 	if (!arcmsr_hbb_wait_msgint_ready(acb))
5880 		arcmsr_warn(acb, "'iop enable eoi mode' timeout");
5881 }
5882 
5883 /* start background rebuild */
5884 static void
arcmsr_iop_init(struct ACB * acb)5885 arcmsr_iop_init(struct ACB *acb)
5886 {
5887 	uint32_t intmask_org;
5888 
5889 	/* disable all outbound interrupt */
5890 	intmask_org = arcmsr_disable_allintr(acb);
5891 	arcmsr_wait_firmware_ready(acb);
5892 	(void) arcmsr_iop_confirm(acb);
5893 
5894 	/* start background rebuild */
5895 	switch (acb->adapter_type) {
5896 	case ACB_ADAPTER_TYPE_A:
5897 		arcmsr_get_hba_config(acb);
5898 		arcmsr_start_hba_bgrb(acb);
5899 		break;
5900 	case ACB_ADAPTER_TYPE_B:
5901 		arcmsr_get_hbb_config(acb);
5902 		arcmsr_start_hbb_bgrb(acb);
5903 		break;
5904 	case ACB_ADAPTER_TYPE_C:
5905 		arcmsr_get_hbc_config(acb);
5906 		arcmsr_start_hbc_bgrb(acb);
5907 		break;
5908 	}
5909 	/* empty doorbell Qbuffer if door bell rang */
5910 	arcmsr_clear_doorbell_queue_buffer(acb);
5911 
5912 	if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
5913 		arcmsr_enable_eoi_mode(acb);
5914 
5915 	/* enable outbound Post Queue, outbound doorbell Interrupt */
5916 	arcmsr_enable_allintr(acb, intmask_org);
5917 	acb->acb_flags |= ACB_F_IOP_INITED;
5918 }
5919