xref: /illumos-gate/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c (revision 33efde4275d24731ef87927237b0ffb0630b6b2d)
1 /*
2  *       O.S   : Solaris
3  *  FILE NAME  : arcmsr.c
4  *       BY    : Erich Chen, C.L. Huang
5  *  Description: SCSI RAID Device Driver for
6  *               ARECA RAID Host adapter
7  *
8  *  Copyright (C) 2002,2010 Areca Technology Corporation All rights reserved.
9  *  Copyright (C) 2002,2010 Erich Chen
10  *	    Web site: www.areca.com.tw
11  *	      E-mail: erich@areca.com.tw; ching2048@areca.com.tw
12  *
13  *	Redistribution and use in source and binary forms, with or without
14  *	modification, are permitted provided that the following conditions
15  *	are met:
16  *	1. Redistributions of source code must retain the above copyright
17  *	   notice, this list of conditions and the following disclaimer.
18  *	2. Redistributions in binary form must reproduce the above copyright
19  *	   notice, this list of conditions and the following disclaimer in the
20  *	   documentation and/or other materials provided with the distribution.
21  *  3. The party using or redistributing the source code and binary forms
22  *     agrees to the disclaimer below and the terms and conditions set forth
23  *     herein.
24  *
25  *  THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  *  ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  *  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  *  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  *  OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  *  HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  *  OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  *  SUCH DAMAGE.
36  *
37  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
38  * Use is subject to license terms.
39  *
40  */
41 /*
42  * This file and its contents are supplied under the terms of the
43  * Common Development and Distribution License ("CDDL"), version 1.0.
44  * You may only use this file in accordance with the terms of version
45  * 1.0 of the CDDL.
46  *
47  * A full copy of the text of the CDDL should have accompanied this
48  * source.  A copy of the CDDL is also available via the Internet at
49  * http://www.illumos.org/license/CDDL.
50  */
51 /*
52  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
53  * Copyright 2023 Oxide Computer Company
54  */
55 #include <sys/types.h>
56 #include <sys/ddidmareq.h>
57 #include <sys/scsi/scsi.h>
58 #include <sys/ddi.h>
59 #include <sys/sunddi.h>
60 #include <sys/file.h>
61 #include <sys/disp.h>
62 #include <sys/signal.h>
63 #include <sys/debug.h>
64 #include <sys/pci.h>
65 #include <sys/policy.h>
66 #include <sys/atomic.h>
67 #include "arcmsr.h"
68 
69 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
70 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
71     int mode, cred_t *credp, int *rvalp);
72 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
73 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
74 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
75 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
76 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
77 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
78 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
79     int whom);
80 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
81     dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
82     struct scsi_device *sd);
83 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
84 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
85     struct scsi_pkt *pkt);
86 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
87     struct scsi_pkt *pkt);
88 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
89     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
90     int tgtlen, int flags, int (*callback)(), caddr_t arg);
91 static int arcmsr_config_child(struct ACB *acb, struct scsi_device *sd,
92     dev_info_t **dipp);
93 
94 static int arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
95     dev_info_t **ldip);
96 static uint8_t arcmsr_abort_host_command(struct ACB *acb);
97 static uint8_t arcmsr_get_echo_from_iop(struct ACB *acb);
98 static uint_t arcmsr_intr_handler(caddr_t arg, caddr_t arg2);
99 static int arcmsr_initialize(struct ACB *acb);
100 static int arcmsr_dma_alloc(struct ACB *acb,
101     struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
102 static int arcmsr_dma_move(struct ACB *acb,
103     struct scsi_pkt *pkt, struct buf *bp);
104 static void arcmsr_handle_iop_bus_hold(struct ACB *acb);
105 static void arcmsr_hbc_message_isr(struct ACB *acb);
106 static void arcmsr_pcidev_disattach(struct ACB *acb);
107 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
108 static void arcmsr_iop_init(struct ACB *acb);
109 static void arcmsr_iop_parking(struct ACB *acb);
110 /*PRINTFLIKE3*/
111 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
112 /*PRINTFLIKE2*/
113 static void arcmsr_warn(struct ACB *acb, char *fmt, ...);
114 static void arcmsr_mutex_init(struct ACB *acb);
115 static void arcmsr_remove_intr(struct ACB *acb);
116 static void arcmsr_ccbs_timeout(void* arg);
117 static void arcmsr_devMap_monitor(void* arg);
118 static void arcmsr_pcidev_disattach(struct ACB *acb);
119 static void arcmsr_iop_message_read(struct ACB *acb);
120 static void arcmsr_free_ccb(struct CCB *ccb);
121 static void arcmsr_post_ioctldata2iop(struct ACB *acb);
122 static void arcmsr_report_sense_info(struct CCB *ccb);
123 static void arcmsr_init_list_head(struct list_head *list);
124 static void arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org);
125 static void arcmsr_done4abort_postqueue(struct ACB *acb);
126 static void arcmsr_list_add_tail(kmutex_t *list_lock,
127     struct list_head *new_one, struct list_head *head);
128 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
129 static int arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt);
130 static int arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt);
131 static int arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb);
132 static int arcmsr_parse_devname(char *devnm, int *tgt, int *lun);
133 static int arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance);
134 static uint8_t arcmsr_iop_reset(struct ACB *acb);
135 static uint32_t arcmsr_disable_allintr(struct ACB *acb);
136 static uint32_t arcmsr_iop_confirm(struct ACB *acb);
137 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
138 static void arcmsr_flush_hba_cache(struct ACB *acb);
139 static void arcmsr_flush_hbb_cache(struct ACB *acb);
140 static void arcmsr_flush_hbc_cache(struct ACB *acb);
141 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
142 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
143 static void arcmsr_stop_hbc_bgrb(struct ACB *acb);
144 static void arcmsr_start_hba_bgrb(struct ACB *acb);
145 static void arcmsr_start_hbb_bgrb(struct ACB *acb);
146 static void arcmsr_start_hbc_bgrb(struct ACB *acb);
147 static void arcmsr_mutex_destroy(struct ACB *acb);
148 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
149 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
150 static void arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
151 static void arcmsr_build_ccb(struct CCB *ccb);
152 static int arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
153     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
154 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
155 static dev_info_t *arcmsr_find_child(struct ACB *acb, uint16_t tgt,
156     uint8_t lun);
157 static struct QBUFFER *arcmsr_get_iop_rqbuffer(struct ACB *acb);
158 
159 static int arcmsr_add_intr(struct ACB *, int);
160 
161 static void *arcmsr_soft_state = NULL;
162 
163 static ddi_dma_attr_t arcmsr_dma_attr = {
164 	DMA_ATTR_V0,		/* ddi_dma_attr version */
165 	0,			/* low DMA address range */
166 	0xffffffffffffffffull,	/* high DMA address range */
167 	0x00ffffff,		/* DMA counter counter upper bound */
168 	1,			/* DMA address alignment requirements */
169 	DEFAULT_BURSTSIZE | BURST32 | BURST64,	/* burst sizes */
170 	1,			/* minimum effective DMA size */
171 	ARCMSR_MAX_XFER_LEN,	/* maximum DMA xfer size */
172 	/*
173 	 * The dma_attr_seg field supplies the limit of each Scatter/Gather
174 	 * list element's "address+length". The Intel IOP331 can not use
175 	 * segments over the 4G boundary due to segment boundary restrictions
176 	 */
177 	0xffffffff,
178 	ARCMSR_MAX_SG_ENTRIES,	/* scatter/gather list count */
179 	1,			/* device granularity */
180 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
181 };
182 
183 
184 static ddi_dma_attr_t arcmsr_ccb_attr = {
185 	DMA_ATTR_V0,	/* ddi_dma_attr version */
186 	0,		/* low DMA address range */
187 	0xffffffff,	/* high DMA address range */
188 	0x00ffffff,	/* DMA counter counter upper bound */
189 	1,		/* default byte alignment */
190 	DEFAULT_BURSTSIZE | BURST32 | BURST64,   /* burst sizes */
191 	1,		/* minimum effective DMA size */
192 	0xffffffff,	/* maximum DMA xfer size */
193 	0x00ffffff,	/* max segment size, segment boundary restrictions */
194 	1,		/* scatter/gather list count */
195 	1,		/* device granularity */
196 	DDI_DMA_FORCE_PHYSICAL	/* Bus specific DMA flags */
197 };
198 
199 
200 static struct cb_ops arcmsr_cb_ops = {
201 	scsi_hba_open,		/* open(9E) */
202 	scsi_hba_close,		/* close(9E) */
203 	nodev,			/* strategy(9E), returns ENXIO */
204 	nodev,			/* print(9E) */
205 	nodev,			/* dump(9E) Cannot be used as a dump device */
206 	nodev,			/* read(9E) */
207 	nodev,			/* write(9E) */
208 	arcmsr_cb_ioctl,	/* ioctl(9E) */
209 	nodev,			/* devmap(9E) */
210 	nodev,			/* mmap(9E) */
211 	nodev,			/* segmap(9E) */
212 	NULL,			/* chpoll(9E) returns ENXIO */
213 	nodev,			/* prop_op(9E) */
214 	NULL,			/* streamtab(9S) */
215 	D_MP,
216 	CB_REV,
217 	nodev,			/* aread(9E) */
218 	nodev			/* awrite(9E) */
219 };
220 
221 static struct dev_ops arcmsr_ops = {
222 	DEVO_REV,		/* devo_rev */
223 	0,			/* reference count */
224 	nodev,			/* getinfo */
225 	nulldev,		/* identify */
226 	nulldev,		/* probe */
227 	arcmsr_attach,		/* attach */
228 	arcmsr_detach,		/* detach */
229 	arcmsr_reset,		/* reset, shutdown, reboot notify */
230 	&arcmsr_cb_ops,		/* driver operations */
231 	NULL,			/* bus operations */
232 	NULL			/* power */
233 };
234 
235 static struct modldrv arcmsr_modldrv = {
236 	&mod_driverops,			/* Type of module. This is a driver. */
237 	"ARECA RAID Controller",	/* module name, from arcmsr.h */
238 	&arcmsr_ops,			/* driver ops */
239 };
240 
241 static struct modlinkage arcmsr_modlinkage = {
242 	MODREV_1,
243 	&arcmsr_modldrv,
244 	NULL
245 };
246 
247 
248 int
_init(void)249 _init(void)
250 {
251 	int ret;
252 
253 	ret = ddi_soft_state_init(&arcmsr_soft_state, sizeof (struct ACB), 1);
254 	if (ret != 0) {
255 		return (ret);
256 	}
257 	if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
258 		ddi_soft_state_fini(&arcmsr_soft_state);
259 		return (ret);
260 	}
261 
262 	if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
263 		scsi_hba_fini(&arcmsr_modlinkage);
264 		if (arcmsr_soft_state != NULL) {
265 			ddi_soft_state_fini(&arcmsr_soft_state);
266 		}
267 	}
268 	return (ret);
269 }
270 
271 
272 int
_fini(void)273 _fini(void)
274 {
275 	int ret;
276 
277 	ret = mod_remove(&arcmsr_modlinkage);
278 	if (ret == 0) {
279 		/* if ret = 0 , said driver can remove */
280 		scsi_hba_fini(&arcmsr_modlinkage);
281 		if (arcmsr_soft_state != NULL) {
282 			ddi_soft_state_fini(&arcmsr_soft_state);
283 		}
284 	}
285 	return (ret);
286 }
287 
288 
289 int
_info(struct modinfo * modinfop)290 _info(struct modinfo *modinfop)
291 {
292 	return (mod_info(&arcmsr_modlinkage, modinfop));
293 }
294 
295 
296 /*
297  *      Function: arcmsr_attach(9E)
298  *   Description: Set up all device state and allocate data structures,
299  *		  mutexes, condition variables, etc. for device operation.
300  *		  Set mt_attr property for driver to indicate MT-safety.
301  *		  Add interrupts needed.
302  *         Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
303  *        Output: Return DDI_SUCCESS if device is ready,
304  *		          else return DDI_FAILURE
305  */
306 static int
arcmsr_attach(dev_info_t * dev_info,ddi_attach_cmd_t cmd)307 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd)
308 {
309 	scsi_hba_tran_t *hba_trans;
310 	struct ACB *acb;
311 
312 	switch (cmd) {
313 	case DDI_ATTACH:
314 		return (arcmsr_do_ddi_attach(dev_info,
315 		    ddi_get_instance(dev_info)));
316 	case DDI_RESUME:
317 		/*
318 		 * There is no hardware state to restart and no
319 		 * timeouts to restart since we didn't DDI_SUSPEND with
320 		 * active cmds or active timeouts We just need to
321 		 * unblock waiting threads and restart I/O the code
322 		 */
323 		hba_trans = ddi_get_driver_private(dev_info);
324 		if (hba_trans == NULL) {
325 			return (DDI_FAILURE);
326 		}
327 		acb = hba_trans->tran_hba_private;
328 		mutex_enter(&acb->acb_mutex);
329 		arcmsr_iop_init(acb);
330 
331 		/* restart ccbs "timeout" watchdog */
332 		acb->timeout_count = 0;
333 		acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
334 		    (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
335 		acb->timeout_sc_id = timeout(arcmsr_devMap_monitor,
336 		    (caddr_t)acb,
337 		    (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
338 		mutex_exit(&acb->acb_mutex);
339 		return (DDI_SUCCESS);
340 
341 	default:
342 		return (DDI_FAILURE);
343 	}
344 }
345 
346 /*
347  *    Function:	arcmsr_detach(9E)
348  * Description: Remove all device allocation and system resources, disable
349  *		        device interrupt.
350  *       Input: dev_info_t *dev_info
351  *		        ddi_detach_cmd_t cmd
352  *      Output:	Return DDI_SUCCESS if done,
353  *		        else returnDDI_FAILURE
354  */
355 static int
arcmsr_detach(dev_info_t * dev_info,ddi_detach_cmd_t cmd)356 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd)
357 {
358 	int instance;
359 	struct ACB *acb;
360 
361 	instance = ddi_get_instance(dev_info);
362 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
363 	if (acb == NULL)
364 		return (DDI_FAILURE);
365 
366 	switch (cmd) {
367 	case DDI_DETACH:
368 		mutex_enter(&acb->acb_mutex);
369 		if (acb->timeout_id != 0) {
370 			mutex_exit(&acb->acb_mutex);
371 			(void) untimeout(acb->timeout_id);
372 			mutex_enter(&acb->acb_mutex);
373 			acb->timeout_id = 0;
374 		}
375 		if (acb->timeout_sc_id != 0) {
376 			mutex_exit(&acb->acb_mutex);
377 			(void) untimeout(acb->timeout_sc_id);
378 			mutex_enter(&acb->acb_mutex);
379 			acb->timeout_sc_id = 0;
380 		}
381 		arcmsr_pcidev_disattach(acb);
382 		/* Remove interrupt set up by ddi_add_intr */
383 		arcmsr_remove_intr(acb);
384 		/* unbind mapping object to handle */
385 		(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
386 		/* Free ccb pool memory */
387 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
388 		/* Free DMA handle */
389 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
390 		ddi_regs_map_free(&acb->reg_mu_acc_handle0);
391 		if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
392 			arcmsr_warn(acb, "Unable to detach instance cleanly "
393 			    "(should not happen)");
394 		/* free scsi_hba_transport from scsi_hba_tran_alloc */
395 		scsi_hba_tran_free(acb->scsi_hba_transport);
396 		ddi_taskq_destroy(acb->taskq);
397 		ddi_prop_remove_all(dev_info);
398 		mutex_exit(&acb->acb_mutex);
399 		arcmsr_mutex_destroy(acb);
400 		pci_config_teardown(&acb->pci_acc_handle);
401 		ddi_set_driver_private(dev_info, NULL);
402 		ddi_soft_state_free(arcmsr_soft_state, instance);
403 		return (DDI_SUCCESS);
404 	case DDI_SUSPEND:
405 		mutex_enter(&acb->acb_mutex);
406 		if (acb->timeout_id != 0) {
407 			acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
408 			mutex_exit(&acb->acb_mutex);
409 			(void) untimeout(acb->timeout_id);
410 			(void) untimeout(acb->timeout_sc_id);
411 			mutex_enter(&acb->acb_mutex);
412 			acb->timeout_id = 0;
413 		}
414 
415 		if (acb->timeout_sc_id != 0) {
416 			acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
417 			mutex_exit(&acb->acb_mutex);
418 			(void) untimeout(acb->timeout_sc_id);
419 			mutex_enter(&acb->acb_mutex);
420 			acb->timeout_sc_id = 0;
421 		}
422 
423 		/* disable all outbound interrupt */
424 		(void) arcmsr_disable_allintr(acb);
425 		/* stop adapter background rebuild */
426 		switch (acb->adapter_type) {
427 		case ACB_ADAPTER_TYPE_A:
428 			arcmsr_stop_hba_bgrb(acb);
429 			arcmsr_flush_hba_cache(acb);
430 			break;
431 
432 		case ACB_ADAPTER_TYPE_B:
433 			arcmsr_stop_hbb_bgrb(acb);
434 			arcmsr_flush_hbb_cache(acb);
435 			break;
436 
437 		case ACB_ADAPTER_TYPE_C:
438 			arcmsr_stop_hbc_bgrb(acb);
439 			arcmsr_flush_hbc_cache(acb);
440 			break;
441 		}
442 		mutex_exit(&acb->acb_mutex);
443 		return (DDI_SUCCESS);
444 	default:
445 		return (DDI_FAILURE);
446 	}
447 }
448 
449 static int
arcmsr_reset(dev_info_t * resetdev,ddi_reset_cmd_t cmd)450 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd)
451 {
452 	struct ACB *acb;
453 	scsi_hba_tran_t *scsi_hba_transport;
454 	_NOTE(ARGUNUSED(cmd));
455 
456 	scsi_hba_transport = ddi_get_driver_private(resetdev);
457 	if (scsi_hba_transport == NULL)
458 		return (DDI_FAILURE);
459 
460 	acb = (struct ACB *)scsi_hba_transport->tran_hba_private;
461 	if (!acb)
462 		return (DDI_FAILURE);
463 
464 	arcmsr_pcidev_disattach(acb);
465 
466 	return (DDI_SUCCESS);
467 }
468 
469 static int
arcmsr_cb_ioctl(dev_t dev,int ioctl_cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)470 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
471     cred_t *credp, int *rvalp)
472 {
473 	struct ACB *acb;
474 	struct CMD_MESSAGE_FIELD *pktioctlfld;
475 	int retvalue = 0;
476 	int instance = MINOR2INST(getminor(dev));
477 
478 	if (instance < 0)
479 		return (ENXIO);
480 
481 	if (secpolicy_sys_config(credp, B_FALSE) != 0)
482 		return (EPERM);
483 
484 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
485 	if (acb == NULL)
486 		return (ENXIO);
487 
488 	pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD), KM_SLEEP);
489 
490 	mutex_enter(&acb->ioctl_mutex);
491 	if (ddi_copyin((void *)arg, pktioctlfld,
492 	    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
493 		retvalue = ENXIO;
494 		goto ioctl_out;
495 	}
496 
497 	if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
498 		/* validity check */
499 		retvalue = ENXIO;
500 		goto ioctl_out;
501 	}
502 
503 	switch ((unsigned int)ioctl_cmd) {
504 	case ARCMSR_MESSAGE_READ_RQBUFFER:
505 	{
506 		uint8_t *ver_addr;
507 		uint8_t *pQbuffer, *ptmpQbuffer;
508 		int32_t allxfer_len = 0;
509 
510 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
511 		ptmpQbuffer = ver_addr;
512 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
513 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
514 			/* copy READ QBUFFER to srb */
515 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
516 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
517 			acb->rqbuf_firstidx++;
518 			/* if last index number set it to 0 */
519 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
520 			ptmpQbuffer++;
521 			allxfer_len++;
522 		}
523 
524 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
525 			struct QBUFFER *prbuffer;
526 			uint8_t *pQbuffer;
527 			uint8_t *iop_data;
528 			int32_t iop_len;
529 
530 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
531 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
532 			iop_data = (uint8_t *)prbuffer->data;
533 			iop_len = (int32_t)prbuffer->data_len;
534 			/*
535 			 * this iop data does no chance to make me overflow
536 			 * again here, so just do it
537 			 */
538 			while (iop_len > 0) {
539 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
540 				(void) memcpy(pQbuffer, iop_data, 1);
541 				acb->rqbuf_lastidx++;
542 				/* if last index number set it to 0 */
543 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
544 				iop_data++;
545 				iop_len--;
546 			}
547 			/* let IOP know data has been read */
548 			arcmsr_iop_message_read(acb);
549 		}
550 		(void) memcpy(pktioctlfld->messagedatabuffer,
551 		    ver_addr, allxfer_len);
552 		pktioctlfld->cmdmessage.Length = allxfer_len;
553 		pktioctlfld->cmdmessage.ReturnCode =
554 		    ARCMSR_MESSAGE_RETURNCODE_OK;
555 
556 		if (ddi_copyout(pktioctlfld, (void *)arg,
557 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
558 			retvalue = ENXIO;
559 
560 		kmem_free(ver_addr, MSGDATABUFLEN);
561 		break;
562 	}
563 
564 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
565 	{
566 		uint8_t *ver_addr;
567 		int32_t my_empty_len, user_len;
568 		int32_t wqbuf_firstidx, wqbuf_lastidx;
569 		uint8_t *pQbuffer, *ptmpuserbuffer;
570 
571 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
572 
573 		ptmpuserbuffer = ver_addr;
574 		user_len = min(pktioctlfld->cmdmessage.Length,
575 		    MSGDATABUFLEN);
576 		(void) memcpy(ptmpuserbuffer,
577 		    pktioctlfld->messagedatabuffer, user_len);
578 		/*
579 		 * check ifdata xfer length of this request will overflow
580 		 * my array qbuffer
581 		 */
582 		wqbuf_lastidx = acb->wqbuf_lastidx;
583 		wqbuf_firstidx = acb->wqbuf_firstidx;
584 		if (wqbuf_lastidx != wqbuf_firstidx) {
585 			arcmsr_post_ioctldata2iop(acb);
586 			pktioctlfld->cmdmessage.ReturnCode =
587 			    ARCMSR_MESSAGE_RETURNCODE_ERROR;
588 		} else {
589 			my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
590 			    & (ARCMSR_MAX_QBUFFER - 1);
591 			if (my_empty_len >= user_len) {
592 				while (user_len > 0) {
593 					/* copy srb data to wqbuffer */
594 					pQbuffer =
595 					    &acb->wqbuffer[acb->wqbuf_lastidx];
596 					(void) memcpy(pQbuffer,
597 					    ptmpuserbuffer, 1);
598 					acb->wqbuf_lastidx++;
599 					/* iflast index number set it to 0 */
600 					acb->wqbuf_lastidx %=
601 					    ARCMSR_MAX_QBUFFER;
602 					ptmpuserbuffer++;
603 					user_len--;
604 				}
605 				/* post first Qbuffer */
606 				if (acb->acb_flags &
607 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
608 					acb->acb_flags &=
609 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
610 					arcmsr_post_ioctldata2iop(acb);
611 				}
612 				pktioctlfld->cmdmessage.ReturnCode =
613 				    ARCMSR_MESSAGE_RETURNCODE_OK;
614 			} else {
615 				pktioctlfld->cmdmessage.ReturnCode =
616 				    ARCMSR_MESSAGE_RETURNCODE_ERROR;
617 			}
618 		}
619 		if (ddi_copyout(pktioctlfld, (void *)arg,
620 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
621 			retvalue = ENXIO;
622 
623 		kmem_free(ver_addr, MSGDATABUFLEN);
624 		break;
625 	}
626 
627 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
628 	{
629 		uint8_t *pQbuffer = acb->rqbuffer;
630 
631 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
632 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
633 			arcmsr_iop_message_read(acb);
634 		}
635 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
636 		acb->rqbuf_firstidx = 0;
637 		acb->rqbuf_lastidx = 0;
638 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
639 		/* report success */
640 		pktioctlfld->cmdmessage.ReturnCode =
641 		    ARCMSR_MESSAGE_RETURNCODE_OK;
642 
643 		if (ddi_copyout(pktioctlfld, (void *)arg,
644 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
645 			retvalue = ENXIO;
646 		break;
647 	}
648 
649 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
650 	{
651 		uint8_t *pQbuffer = acb->wqbuffer;
652 
653 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
654 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
655 			arcmsr_iop_message_read(acb);
656 		}
657 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
658 		    ACB_F_MESSAGE_WQBUFFER_READ);
659 		acb->wqbuf_firstidx = 0;
660 		acb->wqbuf_lastidx = 0;
661 		bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
662 		/* report success */
663 		pktioctlfld->cmdmessage.ReturnCode =
664 		    ARCMSR_MESSAGE_RETURNCODE_OK;
665 
666 		if (ddi_copyout(pktioctlfld, (void *)arg,
667 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
668 			retvalue = ENXIO;
669 		break;
670 	}
671 
672 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
673 	{
674 		uint8_t *pQbuffer;
675 
676 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
677 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
678 			arcmsr_iop_message_read(acb);
679 		}
680 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
681 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
682 		    ACB_F_MESSAGE_WQBUFFER_READ);
683 		acb->rqbuf_firstidx = 0;
684 		acb->rqbuf_lastidx = 0;
685 		acb->wqbuf_firstidx = 0;
686 		acb->wqbuf_lastidx = 0;
687 		pQbuffer = acb->rqbuffer;
688 		bzero(pQbuffer, sizeof (struct QBUFFER));
689 		pQbuffer = acb->wqbuffer;
690 		bzero(pQbuffer, sizeof (struct QBUFFER));
691 		/* report success */
692 		pktioctlfld->cmdmessage.ReturnCode =
693 		    ARCMSR_MESSAGE_RETURNCODE_OK;
694 		if (ddi_copyout(pktioctlfld, (void *)arg,
695 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
696 			retvalue = ENXIO;
697 		break;
698 	}
699 
700 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
701 		pktioctlfld->cmdmessage.ReturnCode =
702 		    ARCMSR_MESSAGE_RETURNCODE_3F;
703 		if (ddi_copyout(pktioctlfld, (void *)arg,
704 		    sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
705 			retvalue = ENXIO;
706 		break;
707 
708 	/* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
709 	case ARCMSR_MESSAGE_SAY_GOODBYE:
710 		arcmsr_iop_parking(acb);
711 		break;
712 
713 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
714 		switch (acb->adapter_type) {
715 		case ACB_ADAPTER_TYPE_A:
716 			arcmsr_flush_hba_cache(acb);
717 			break;
718 		case ACB_ADAPTER_TYPE_B:
719 			arcmsr_flush_hbb_cache(acb);
720 			break;
721 		case ACB_ADAPTER_TYPE_C:
722 			arcmsr_flush_hbc_cache(acb);
723 			break;
724 		}
725 		break;
726 
727 	default:
728 		mutex_exit(&acb->ioctl_mutex);
729 		kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
730 		return (scsi_hba_ioctl(dev, ioctl_cmd, arg, mode, credp,
731 		    rvalp));
732 	}
733 
734 ioctl_out:
735 	kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
736 	mutex_exit(&acb->ioctl_mutex);
737 
738 	return (retvalue);
739 }
740 
741 
742 /*
743  *    Function:	arcmsr_tran_tgt_init
744  * Description: Called when initializing a target device instance. If
745  *		        no per-target initialization is required, the HBA
746  *		        may leave tran_tgt_init to NULL
747  *       Input:
748  *		        dev_info_t *host_dev_info,
749  *		        dev_info_t *target_dev_info,
750  *		        scsi_hba_tran_t *tran,
751  *		        struct scsi_device *sd
752  *
753  *      Return: DDI_SUCCESS if success, else return DDI_FAILURE
754  *
755  *  entry point enables the HBA to allocate and/or initialize any per-
756  *  target resources.
757  *  It also enables the HBA to qualify the device's address as valid and
758  *  supportable for that particular HBA.
759  *  By returning DDI_FAILURE, the instance of the target driver for that
760  *  device will not be probed or attached.
761  *	This entry point is not required, and if none is supplied,
762  *  the framework will attempt to probe and attach all possible instances
763  *  of the appropriate target drivers.
764  */
765 static int
arcmsr_tran_tgt_init(dev_info_t * host_dev_info,dev_info_t * target_dev_info,scsi_hba_tran_t * tran,struct scsi_device * sd)766 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
767     scsi_hba_tran_t *tran, struct scsi_device *sd)
768 {
769 	uint16_t  target;
770 	uint8_t  lun;
771 	struct ACB *acb = tran->tran_hba_private;
772 
773 	_NOTE(ARGUNUSED(tran, target_dev_info, host_dev_info))
774 
775 	target = sd->sd_address.a_target;
776 	lun = sd->sd_address.a_lun;
777 	if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
778 		return (DDI_FAILURE);
779 	}
780 
781 
782 	if (ndi_dev_is_persistent_node(target_dev_info) == 0) {
783 		/*
784 		 * If no persistent node exist, we don't allow .conf node
785 		 * to be created.
786 		 */
787 		if (arcmsr_find_child(acb, target, lun) != NULL) {
788 			if ((ndi_merge_node(target_dev_info,
789 			    arcmsr_name_node) != DDI_SUCCESS)) {
790 				return (DDI_SUCCESS);
791 			}
792 		}
793 		return (DDI_FAILURE);
794 	}
795 
796 	return (DDI_SUCCESS);
797 }
798 
799 /*
800  *         Function: arcmsr_tran_getcap(9E)
801  *      Description: Get the capability named, and returnits value.
802  *    Return Values: current value of capability, ifdefined
803  *		             -1 ifcapability is not defined
804  * ------------------------------------------------------
805  *         Common Capability Strings Array
806  * ------------------------------------------------------
807  *	#define	SCSI_CAP_DMA_MAX		0
808  *	#define	SCSI_CAP_MSG_OUT		1
809  *	#define	SCSI_CAP_DISCONNECT		2
810  *	#define	SCSI_CAP_SYNCHRONOUS		3
811  *	#define	SCSI_CAP_WIDE_XFER		4
812  *	#define	SCSI_CAP_PARITY			5
813  *	#define	SCSI_CAP_INITIATOR_ID		6
814  *	#define	SCSI_CAP_UNTAGGED_QING		7
815  *	#define	SCSI_CAP_TAGGED_QING		8
816  *	#define	SCSI_CAP_ARQ			9
817  *	#define	SCSI_CAP_LINKED_CMDS		10 a
818  *	#define	SCSI_CAP_SECTOR_SIZE		11 b
819  *	#define	SCSI_CAP_TOTAL_SECTORS		12 c
820  *	#define	SCSI_CAP_GEOMETRY		13 d
821  *	#define	SCSI_CAP_RESET_NOTIFICATION	14 e
822  *	#define	SCSI_CAP_QFULL_RETRIES		15 f
823  *	#define	SCSI_CAP_QFULL_RETRY_INTERVAL	16 10
824  *	#define	SCSI_CAP_SCSI_VERSION		17 11
825  *	#define	SCSI_CAP_INTERCONNECT_TYPE	18 12
826  *	#define	SCSI_CAP_LUN_RESET		19 13
827  */
828 static int
arcmsr_tran_getcap(struct scsi_address * ap,char * cap,int whom)829 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
830 {
831 	int capability = 0;
832 	struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
833 
834 	if (cap == NULL || whom == 0) {
835 		return (DDI_FAILURE);
836 	}
837 
838 	mutex_enter(&acb->acb_mutex);
839 	if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
840 		mutex_exit(&acb->acb_mutex);
841 		return (-1);
842 	}
843 	switch (scsi_hba_lookup_capstr(cap)) {
844 	case SCSI_CAP_MSG_OUT:
845 	case SCSI_CAP_DISCONNECT:
846 	case SCSI_CAP_WIDE_XFER:
847 	case SCSI_CAP_TAGGED_QING:
848 	case SCSI_CAP_UNTAGGED_QING:
849 	case SCSI_CAP_PARITY:
850 	case SCSI_CAP_ARQ:
851 		capability = 1;
852 		break;
853 	case SCSI_CAP_SECTOR_SIZE:
854 		capability = ARCMSR_DEV_SECTOR_SIZE;
855 		break;
856 	case SCSI_CAP_DMA_MAX:
857 		/* Limit to 16MB max transfer */
858 		capability = ARCMSR_MAX_XFER_LEN;
859 		break;
860 	case SCSI_CAP_INITIATOR_ID:
861 		capability = ARCMSR_SCSI_INITIATOR_ID;
862 		break;
863 	case SCSI_CAP_GEOMETRY:
864 		/* head , track , cylinder */
865 		capability = (255 << 16) | 63;
866 		break;
867 	default:
868 		capability = -1;
869 		break;
870 	}
871 	mutex_exit(&acb->acb_mutex);
872 	return (capability);
873 }
874 
875 /*
876  *      Function: arcmsr_tran_setcap(9E)
877  *   Description: Set the specific capability.
878  * Return Values: 1 - capability exists and can be set to new value
879  *		          0 - capability could not be set to new value
880  *		         -1 - no such capability
881  */
882 static int
arcmsr_tran_setcap(struct scsi_address * ap,char * cap,int value,int whom)883 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
884 {
885 	_NOTE(ARGUNUSED(value))
886 
887 	int supported = 0;
888 	struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
889 
890 	if (cap == NULL || whom == 0) {
891 		return (-1);
892 	}
893 
894 	mutex_enter(&acb->acb_mutex);
895 	if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
896 		mutex_exit(&acb->acb_mutex);
897 		return (-1);
898 	}
899 	switch (supported = scsi_hba_lookup_capstr(cap)) {
900 	case SCSI_CAP_ARQ:			/* 9 auto request sense */
901 	case SCSI_CAP_UNTAGGED_QING:		/* 7 */
902 	case SCSI_CAP_TAGGED_QING:		/* 8 */
903 		/* these are always on, and cannot be turned off */
904 		supported = (value == 1) ? 1 : 0;
905 		break;
906 	case SCSI_CAP_TOTAL_SECTORS:		/* c */
907 		supported = 1;
908 		break;
909 	case SCSI_CAP_DISCONNECT:		/* 2 */
910 	case SCSI_CAP_WIDE_XFER:		/* 4 */
911 	case SCSI_CAP_INITIATOR_ID:		/* 6 */
912 	case SCSI_CAP_DMA_MAX:			/* 0 */
913 	case SCSI_CAP_MSG_OUT:			/* 1 */
914 	case SCSI_CAP_PARITY:			/* 5 */
915 	case SCSI_CAP_LINKED_CMDS:		/* a */
916 	case SCSI_CAP_RESET_NOTIFICATION:	/* e */
917 	case SCSI_CAP_SECTOR_SIZE:		/* b */
918 		/* these are not settable */
919 		supported = 0;
920 		break;
921 	default:
922 		supported = -1;
923 		break;
924 	}
925 	mutex_exit(&acb->acb_mutex);
926 	return (supported);
927 }
928 
929 
930 /*
931  *      Function: arcmsr_tran_init_pkt
932  * Return Values: pointer to scsi_pkt, or NULL
933  *   Description: simultaneously allocate both a scsi_pkt(9S) structure and
934  *                DMA resources for that pkt.
935  *                Called by kernel on behalf of a target driver
936  *		          calling scsi_init_pkt(9F).
937  *		          Refer to tran_init_pkt(9E) man page
938  *       Context: Can be called from different kernel process threads.
939  *		          Can be called by interrupt thread.
940  * Allocates SCSI packet and DMA resources
941  */
942 static struct
arcmsr_tran_init_pkt(struct scsi_address * ap,register struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)943 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
944     register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
945     int tgtlen, int flags, int (*callback)(), caddr_t arg)
946 {
947 	struct CCB *ccb;
948 	struct ARCMSR_CDB *arcmsr_cdb;
949 	struct ACB *acb;
950 	int old_pkt_flag;
951 
952 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
953 
954 	if (acb->acb_flags & ACB_F_BUS_RESET) {
955 		return (NULL);
956 	}
957 	if (pkt == NULL) {
958 		/* get free CCB */
959 		(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
960 		    DDI_DMA_SYNC_FORKERNEL);
961 		ccb = arcmsr_get_freeccb(acb);
962 		if (ccb == (struct CCB *)NULL) {
963 			return (NULL);
964 		}
965 
966 		if (statuslen < sizeof (struct scsi_arq_status)) {
967 			statuslen = sizeof (struct scsi_arq_status);
968 		}
969 		pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
970 		    statuslen, tgtlen, sizeof (void *), callback, arg);
971 		if (pkt == NULL) {
972 			arcmsr_warn(acb, "scsi pkt allocation failed");
973 			arcmsr_free_ccb(ccb);
974 			return (NULL);
975 		}
976 		/* Initialize CCB */
977 		ccb->pkt = pkt;
978 		ccb->pkt_dma_handle = NULL;
979 		/* record how many sg are needed to xfer on this pkt */
980 		ccb->pkt_ncookies = 0;
981 		/* record how many sg we got from this window */
982 		ccb->pkt_cookie = 0;
983 		/* record how many windows have partial dma map set */
984 		ccb->pkt_nwin = 0;
985 		/* record current sg window position */
986 		ccb->pkt_curwin	= 0;
987 		ccb->pkt_dma_len = 0;
988 		ccb->pkt_dma_offset = 0;
989 		ccb->resid_dmacookie.dmac_size = 0;
990 
991 		/*
992 		 * we will still use this point for we want to fake some
993 		 * information in tran_start
994 		 */
995 		ccb->bp = bp;
996 
997 		/* Initialize arcmsr_cdb */
998 		arcmsr_cdb = &ccb->arcmsr_cdb;
999 		bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
1000 		arcmsr_cdb->Bus = 0;
1001 		arcmsr_cdb->Function = 1;
1002 		arcmsr_cdb->LUN = ap->a_lun;
1003 		arcmsr_cdb->TargetID = ap->a_target;
1004 		arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1005 		arcmsr_cdb->Context = (uintptr_t)arcmsr_cdb;
1006 
1007 		/* Fill in the rest of the structure */
1008 		pkt->pkt_ha_private = ccb;
1009 		pkt->pkt_address = *ap;
1010 		pkt->pkt_comp = NULL;
1011 		pkt->pkt_flags = 0;
1012 		pkt->pkt_time = 0;
1013 		pkt->pkt_resid = 0;
1014 		pkt->pkt_statistics = 0;
1015 		pkt->pkt_reason = 0;
1016 		old_pkt_flag = 0;
1017 	} else {
1018 		ccb = pkt->pkt_ha_private;
1019 		if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1020 			if (!(ccb->ccb_state & ARCMSR_CCB_BACK)) {
1021 				return (NULL);
1022 			}
1023 		}
1024 
1025 		/*
1026 		 * you cannot update CdbLength with cmdlen here, it would
1027 		 * cause a data compare error
1028 		 */
1029 		ccb->ccb_state = ARCMSR_CCB_UNBUILD;
1030 		old_pkt_flag = 1;
1031 	}
1032 
1033 	/* Second step : dma allocation/move */
1034 	if (bp && bp->b_bcount != 0) {
1035 		/*
1036 		 * system had a lot of data trunk need to xfer, from...20 byte
1037 		 * to 819200 byte.
1038 		 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1039 		 * this lot of data trunk xfer done this mission will be done
1040 		 * by some of continue READ or WRITE scsi command, till this
1041 		 * lot of data trunk xfer completed.
1042 		 * arcmsr_dma_move do the action repeatedly, and use the same
1043 		 * ccb till this lot of data trunk xfer complete notice.
1044 		 * when after the arcmsr_tran_init_pkt returns the solaris
1045 		 * kernel is by your pkt_resid and its b_bcount to give you
1046 		 * which type of scsi command descriptor to implement the
1047 		 * length of folowing arcmsr_tran_start scsi cdb (data length)
1048 		 *
1049 		 * Each transfer should be aligned on a 512 byte boundary
1050 		 */
1051 		if (ccb->pkt_dma_handle == NULL) {
1052 			if (arcmsr_dma_alloc(acb, pkt, bp, flags, callback) ==
1053 			    DDI_FAILURE) {
1054 				/*
1055 				 * the HBA driver is unable to allocate DMA
1056 				 * resources, it must free the allocated
1057 				 * scsi_pkt(9S) before returning
1058 				 */
1059 				arcmsr_warn(acb, "dma allocation failure");
1060 				if (old_pkt_flag == 0) {
1061 					arcmsr_warn(acb, "dma "
1062 					    "allocation failed to free "
1063 					    "scsi hba pkt");
1064 					arcmsr_free_ccb(ccb);
1065 					scsi_hba_pkt_free(ap, pkt);
1066 				}
1067 				return (NULL);
1068 			}
1069 		} else {
1070 			/* DMA resources to next DMA window, for old pkt */
1071 			if (arcmsr_dma_move(acb, pkt, bp) == DDI_FAILURE) {
1072 				arcmsr_warn(acb, "dma move failed");
1073 				return (NULL);
1074 			}
1075 		}
1076 	} else {
1077 		pkt->pkt_resid = 0;
1078 	}
1079 	return (pkt);
1080 }
1081 
1082 /*
1083  *    Function: arcmsr_tran_start(9E)
1084  * Description: Transport the command in pktp to the target device.
1085  *		The command is not finished when this returns, only
1086  *		sent to the target; arcmsr_intr_handler will call
1087  *		scsi_hba_pkt_comp(pktp) when the target device has done.
1088  *
1089  *       Input: struct scsi_address *ap, struct scsi_pkt *pktp
1090  *      Output:	TRAN_ACCEPT if pkt is OK and not driver not busy
1091  *		TRAN_BUSY if driver is
1092  *		TRAN_BADPKT if pkt is invalid
1093  */
1094 static int
arcmsr_tran_start(struct scsi_address * ap,struct scsi_pkt * pkt)1095 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1096 {
1097 	struct ACB *acb;
1098 	struct CCB *ccb;
1099 	int target = ap->a_target;
1100 	int lun = ap->a_lun;
1101 
1102 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1103 	ccb = pkt->pkt_ha_private;
1104 	*pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1105 
1106 	if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
1107 	    (ccb->ccb_flags & DDI_DMA_CONSISTENT))
1108 		(void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1109 		    DDI_DMA_SYNC_FORDEV);
1110 
1111 	if (ccb->ccb_state == ARCMSR_CCB_UNBUILD)
1112 		arcmsr_build_ccb(ccb);
1113 
1114 	if (acb->acb_flags & ACB_F_BUS_RESET) {
1115 		pkt->pkt_reason = CMD_RESET;
1116 		pkt->pkt_statistics |= STAT_BUS_RESET;
1117 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1118 		    STATE_SENT_CMD | STATE_GOT_STATUS);
1119 		if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1120 		    (pkt->pkt_state & STATE_XFERRED_DATA))
1121 			(void) ddi_dma_sync(ccb->pkt_dma_handle,
1122 			    0, 0, DDI_DMA_SYNC_FORCPU);
1123 
1124 		scsi_hba_pkt_comp(pkt);
1125 		return (TRAN_ACCEPT);
1126 	}
1127 
1128 	/* IMPORTANT: Target 16 is a virtual device for iop message transfer */
1129 	if (target == 16) {
1130 
1131 		struct buf *bp = ccb->bp;
1132 		uint8_t scsicmd = pkt->pkt_cdbp[0];
1133 
1134 		switch (scsicmd) {
1135 		case SCMD_INQUIRY: {
1136 			if (lun != 0) {
1137 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
1138 				ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1139 				arcmsr_ccb_complete(ccb, 0);
1140 				return (TRAN_ACCEPT);
1141 			}
1142 
1143 			if (bp && bp->b_un.b_addr && bp->b_bcount) {
1144 				uint8_t inqdata[36];
1145 
1146 				/* The EVDP and pagecode is not supported */
1147 				if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
1148 					inqdata[1] = 0xFF;
1149 					inqdata[2] = 0x00;
1150 				} else {
1151 					/* Periph Qualifier & Periph Dev Type */
1152 					inqdata[0] = DTYPE_PROCESSOR;
1153 					/* rem media bit & Dev Type Modifier */
1154 					inqdata[1] = 0;
1155 					/* ISO, ECMA, & ANSI versions */
1156 					inqdata[2] = 0;
1157 					inqdata[3] = 0;
1158 					/* length of additional data */
1159 					inqdata[4] = 31;
1160 					/* Vendor Identification */
1161 					bcopy("Areca   ", &inqdata[8], VIDLEN);
1162 					/* Product Identification */
1163 					bcopy("RAID controller ", &inqdata[16],
1164 					    PIDLEN);
1165 					/* Product Revision */
1166 					bcopy(&inqdata[32], "R001", REVLEN);
1167 					if (bp->b_flags & (B_PHYS | B_PAGEIO))
1168 						bp_mapin(bp);
1169 
1170 					(void) memcpy(bp->b_un.b_addr,
1171 					    inqdata, sizeof (inqdata));
1172 				}
1173 				ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1174 			}
1175 			arcmsr_ccb_complete(ccb, 0);
1176 			return (TRAN_ACCEPT);
1177 		}
1178 		case SCMD_WRITE_BUFFER:
1179 		case SCMD_READ_BUFFER: {
1180 			if (arcmsr_iop_message_xfer(acb, pkt)) {
1181 				/* error just for retry */
1182 				ccb->pkt->pkt_reason = CMD_TRAN_ERR;
1183 				ccb->pkt->pkt_statistics |= STAT_TERMINATED;
1184 			}
1185 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1186 			arcmsr_ccb_complete(ccb, 0);
1187 			return (TRAN_ACCEPT);
1188 		}
1189 		default:
1190 			ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1191 			arcmsr_ccb_complete(ccb, 0);
1192 			return (TRAN_ACCEPT);
1193 		}
1194 	}
1195 
1196 	if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1197 		uint8_t block_cmd;
1198 
1199 		block_cmd = pkt->pkt_cdbp[0] & 0x0f;
1200 		if (block_cmd == 0x08 || block_cmd == 0x0a) {
1201 			pkt->pkt_reason = CMD_TIMEOUT;
1202 			pkt->pkt_statistics |= STAT_TIMEOUT;
1203 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1204 			    STATE_SENT_CMD | STATE_GOT_STATUS);
1205 			if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1206 			    (pkt->pkt_state & STATE_XFERRED_DATA)) {
1207 				(void) ddi_dma_sync(ccb->pkt_dma_handle,
1208 				    ccb->pkt_dma_offset,
1209 				    ccb->pkt_dma_len, DDI_DMA_SYNC_FORCPU);
1210 			}
1211 			scsi_hba_pkt_comp(pkt);
1212 			return (TRAN_ACCEPT);
1213 		}
1214 	}
1215 	mutex_enter(&acb->postq_mutex);
1216 	if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
1217 		ccb->ccb_state = ARCMSR_CCB_RETRY;
1218 		mutex_exit(&acb->postq_mutex);
1219 		return (TRAN_BUSY);
1220 	} else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
1221 		arcmsr_warn(acb, "post ccb failure, ccboutstandingcount = %d",
1222 		    acb->ccboutstandingcount);
1223 		mutex_exit(&acb->postq_mutex);
1224 		return (TRAN_FATAL_ERROR);
1225 	}
1226 	mutex_exit(&acb->postq_mutex);
1227 	return (TRAN_ACCEPT);
1228 }
1229 
1230 /*
1231  * Function name: arcmsr_tran_destroy_pkt
1232  * Return Values: none
1233  *   Description: Called by kernel on behalf of a target driver
1234  *	          calling scsi_destroy_pkt(9F).
1235  *	          Refer to tran_destroy_pkt(9E) man page
1236  *       Context: Can be called from different kernel process threads.
1237  *	          Can be called by interrupt thread.
1238  */
1239 static void
arcmsr_tran_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1240 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1241 {
1242 	struct CCB *ccb = pkt->pkt_ha_private;
1243 	ddi_dma_handle_t pkt_dma_handle = ccb->pkt_dma_handle;
1244 
1245 	if (ccb == NULL) {
1246 		return;
1247 	}
1248 	if (ccb->pkt != pkt) {
1249 		return;
1250 	}
1251 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1252 		ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1253 		if (pkt_dma_handle) {
1254 			(void) ddi_dma_unbind_handle(ccb->pkt_dma_handle);
1255 		}
1256 	}
1257 	if (pkt_dma_handle) {
1258 		(void) ddi_dma_free_handle(&pkt_dma_handle);
1259 	}
1260 	pkt->pkt_ha_private = NULL;
1261 	if (ccb)	{
1262 		if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1263 			if (ccb->ccb_state & ARCMSR_CCB_BACK) {
1264 				arcmsr_free_ccb(ccb);
1265 			} else {
1266 				ccb->ccb_state |= ARCMSR_CCB_WAIT4_FREE;
1267 			}
1268 		} else {
1269 			arcmsr_free_ccb(ccb);
1270 		}
1271 	}
1272 	scsi_hba_pkt_free(ap, pkt);
1273 }
1274 
1275 /*
1276  * Function name: arcmsr_tran_dmafree()
1277  * Return Values: none
1278  *   Description: free dvma resources
1279  *       Context: Can be called from different kernel process threads.
1280  *	          Can be called by interrupt thread.
1281  */
1282 static void
arcmsr_tran_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)1283 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1284 {
1285 	struct CCB *ccb = pkt->pkt_ha_private;
1286 
1287 	if ((ccb == NULL) || (ccb->pkt != pkt)) {
1288 		return;
1289 	}
1290 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1291 		ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1292 		if (ddi_dma_unbind_handle(ccb->pkt_dma_handle) != DDI_SUCCESS) {
1293 			arcmsr_warn(ccb->acb, "ddi_dma_unbind_handle() failed "
1294 			    "(target %d lun %d)", ap->a_target, ap->a_lun);
1295 		}
1296 		ddi_dma_free_handle(&ccb->pkt_dma_handle);
1297 		ccb->pkt_dma_handle = NULL;
1298 	}
1299 }
1300 
1301 /*
1302  * Function name: arcmsr_tran_sync_pkt()
1303  * Return Values: none
1304  *   Description: sync dma
1305  *       Context: Can be called from different kernel process threads.
1306  *		  Can be called by interrupt thread.
1307  */
1308 static void
arcmsr_tran_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)1309 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1310 {
1311 	struct CCB *ccb;
1312 
1313 	ccb = pkt->pkt_ha_private;
1314 	if ((ccb == NULL) || (ccb->pkt != pkt)) {
1315 		return;
1316 	}
1317 	if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1318 		if (ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1319 		    (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1320 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1321 		    DDI_SUCCESS) {
1322 			arcmsr_warn(ccb->acb,
1323 			    "sync pkt failed for target %d lun %d",
1324 			    ap->a_target, ap->a_lun);
1325 		}
1326 	}
1327 }
1328 
1329 
1330 /*
1331  * Function: arcmsr_tran_abort(9E)
1332  *		SCSA interface routine to abort pkt(s) in progress.
1333  *		Aborts the pkt specified.  If NULL pkt, aborts ALL pkts.
1334  * Output:	Return 1 if success
1335  *		Return 0 if failure
1336  */
1337 static int
arcmsr_tran_abort(struct scsi_address * ap,struct scsi_pkt * abortpkt)1338 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt)
1339 {
1340 	struct ACB *acb;
1341 	int return_code;
1342 
1343 	acb = ap->a_hba_tran->tran_hba_private;
1344 
1345 	while (acb->ccboutstandingcount != 0) {
1346 		drv_usecwait(10000);
1347 	}
1348 
1349 	mutex_enter(&acb->isr_mutex);
1350 	return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
1351 	mutex_exit(&acb->isr_mutex);
1352 
1353 	if (return_code != DDI_SUCCESS) {
1354 		arcmsr_warn(acb, "abort command failed for target %d lun %d",
1355 		    ap->a_target, ap->a_lun);
1356 		return (0);
1357 	}
1358 	return (1);
1359 }
1360 
1361 /*
1362  * Function: arcmsr_tran_reset(9E)
1363  *           SCSA interface routine to perform scsi resets on either
1364  *           a specified target or the bus (default).
1365  *   Output: Return 1 if success
1366  *	     Return 0 if failure
1367  */
1368 static int
arcmsr_tran_reset(struct scsi_address * ap,int level)1369 arcmsr_tran_reset(struct scsi_address *ap, int level)
1370 {
1371 	struct ACB *acb;
1372 	int return_code = 1;
1373 	int target = ap->a_target;
1374 	int lun = ap->a_lun;
1375 
1376 	/* Are we in the middle of dumping core? */
1377 	if (ddi_in_panic())
1378 		return (return_code);
1379 
1380 	acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1381 	mutex_enter(&acb->isr_mutex);
1382 	switch (level) {
1383 	case RESET_ALL:		/* 0 */
1384 		acb->num_resets++;
1385 		acb->acb_flags |= ACB_F_BUS_RESET;
1386 		if (acb->timeout_count) {
1387 			if (arcmsr_iop_reset(acb) != 0) {
1388 				arcmsr_handle_iop_bus_hold(acb);
1389 				acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1390 			}
1391 		}
1392 		acb->acb_flags &= ~ACB_F_BUS_RESET;
1393 		break;
1394 	case RESET_TARGET:	/* 1 */
1395 		if (acb->devstate[target][lun] == ARECA_RAID_GONE)
1396 			return_code = 0;
1397 		break;
1398 	case RESET_BUS:		/* 2 */
1399 		return_code = 0;
1400 		break;
1401 	case RESET_LUN:		/* 3 */
1402 		return_code = 0;
1403 		break;
1404 	default:
1405 		return_code = 0;
1406 	}
1407 	mutex_exit(&acb->isr_mutex);
1408 	return (return_code);
1409 }
1410 
1411 static int
arcmsr_tran_bus_config(dev_info_t * parent,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** childp)1412 arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
1413     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1414 {
1415 	struct ACB *acb;
1416 	int rval;
1417 	int tgt, lun;
1418 
1419 	if ((acb = ddi_get_soft_state(arcmsr_soft_state,
1420 	    ddi_get_instance(parent))) == NULL)
1421 		return (NDI_FAILURE);
1422 
1423 	ndi_devi_enter(parent);
1424 	switch (op) {
1425 	case BUS_CONFIG_ONE:
1426 		if (arcmsr_parse_devname(arg, &tgt, &lun) != 0) {
1427 			rval = NDI_FAILURE;
1428 			break;
1429 		}
1430 		if (acb->device_map[tgt] & 1 << lun) {
1431 			acb->devstate[tgt][lun] = ARECA_RAID_GOOD;
1432 			rval = arcmsr_config_lun(acb, tgt, lun, childp);
1433 		}
1434 		break;
1435 
1436 	case BUS_CONFIG_DRIVER:
1437 	case BUS_CONFIG_ALL:
1438 		for (tgt = 0; tgt < ARCMSR_MAX_TARGETID; tgt++)
1439 			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1440 				if (acb->device_map[tgt] & 1 << lun) {
1441 					acb->devstate[tgt][lun] =
1442 					    ARECA_RAID_GOOD;
1443 					(void) arcmsr_config_lun(acb, tgt,
1444 					    lun, NULL);
1445 				}
1446 
1447 		rval = NDI_SUCCESS;
1448 		break;
1449 	}
1450 	if (rval == NDI_SUCCESS)
1451 		rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
1452 	ndi_devi_exit(parent);
1453 	return (rval);
1454 }
1455 
1456 /*
1457  * Function name: arcmsr_dma_alloc
1458  * Return Values: 0 if successful, -1 if failure
1459  *   Description: allocate DMA resources
1460  *       Context: Can only be called from arcmsr_tran_init_pkt()
1461  *     register struct scsi_address	*ap = &((pkt)->pkt_address);
1462  */
1463 static int
arcmsr_dma_alloc(struct ACB * acb,struct scsi_pkt * pkt,struct buf * bp,int flags,int (* callback)())1464 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1465     struct buf *bp, int flags, int (*callback)())
1466 {
1467 	struct CCB *ccb = pkt->pkt_ha_private;
1468 	int alloc_result, map_method, dma_flags;
1469 	int resid = 0;
1470 	int total_ccb_xferlen = 0;
1471 	int (*cb)(caddr_t);
1472 	uint8_t i;
1473 
1474 	/*
1475 	 * at this point the PKT SCSI CDB is empty, and dma xfer length
1476 	 * is bp->b_bcount
1477 	 */
1478 
1479 	if (bp->b_flags & B_READ) {
1480 		ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1481 		dma_flags = DDI_DMA_READ;
1482 	} else {
1483 		ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1484 		dma_flags = DDI_DMA_WRITE;
1485 	}
1486 
1487 	if (flags & PKT_CONSISTENT) {
1488 		ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1489 		dma_flags |= DDI_DMA_CONSISTENT;
1490 	}
1491 	if (flags & PKT_DMA_PARTIAL) {
1492 		dma_flags |= DDI_DMA_PARTIAL;
1493 	}
1494 
1495 	dma_flags |= DDI_DMA_REDZONE;
1496 	cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1497 
1498 	alloc_result = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_dma_attr,
1499 	    cb, 0, &ccb->pkt_dma_handle);
1500 	if (alloc_result != DDI_SUCCESS) {
1501 		arcmsr_warn(acb, "dma allocate failed (%x)", alloc_result);
1502 		return (DDI_FAILURE);
1503 	}
1504 
1505 	map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle,
1506 	    bp, dma_flags, cb, 0,
1507 	    &ccb->pkt_dmacookies[0],	/* SG List pointer */
1508 	    &ccb->pkt_ncookies);	/* number of sgl cookies */
1509 
1510 	switch (map_method) {
1511 	case DDI_DMA_PARTIAL_MAP:
1512 		/*
1513 		 * When your main memory size larger then 4G
1514 		 * DDI_DMA_PARTIAL_MAP will be touched.
1515 		 *
1516 		 * We've already set DDI_DMA_PARTIAL in dma_flags,
1517 		 * so if it's now missing, there's something screwy
1518 		 * happening. We plow on....
1519 		 */
1520 
1521 		if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1522 			arcmsr_warn(acb,
1523 			    "dma partial mapping lost ...impossible case!");
1524 		}
1525 		if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1526 		    DDI_FAILURE) {
1527 			arcmsr_warn(acb, "ddi_dma_numwin() failed");
1528 		}
1529 
1530 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1531 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1532 		    &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1533 		    DDI_FAILURE) {
1534 			arcmsr_warn(acb, "ddi_dma_getwin failed");
1535 		}
1536 
1537 		i = 0;
1538 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1539 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1540 		for (;;) {
1541 			i++;
1542 			if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1543 			    (i == ccb->pkt_ncookies) ||
1544 			    (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1545 				break;
1546 			}
1547 			/*
1548 			 * next cookie will be retrieved from
1549 			 * ccb->pkt_dmacookies[i]
1550 			 */
1551 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1552 			    &ccb->pkt_dmacookies[i]);
1553 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1554 		}
1555 		ccb->pkt_cookie = i;
1556 		ccb->arcmsr_cdb.sgcount = i;
1557 		if (total_ccb_xferlen > 512) {
1558 			resid = total_ccb_xferlen % 512;
1559 			if (resid != 0) {
1560 				i--;
1561 				total_ccb_xferlen -= resid;
1562 				/* modify last sg length */
1563 				ccb->pkt_dmacookies[i].dmac_size =
1564 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1565 				ccb->resid_dmacookie.dmac_size = resid;
1566 				ccb->resid_dmacookie.dmac_laddress =
1567 				    ccb->pkt_dmacookies[i].dmac_laddress +
1568 				    ccb->pkt_dmacookies[i].dmac_size;
1569 			}
1570 		}
1571 		ccb->total_dmac_size = total_ccb_xferlen;
1572 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1573 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1574 
1575 		return (DDI_SUCCESS);
1576 
1577 	case DDI_DMA_MAPPED:
1578 		ccb->pkt_nwin = 1; /* all mapped, so only one window */
1579 		ccb->pkt_dma_len = 0;
1580 		ccb->pkt_dma_offset = 0;
1581 		i = 0;
1582 		/* first cookie is accessed from ccb->pkt_dmacookies[0] */
1583 		total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1584 		for (;;) {
1585 			i++;
1586 			if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1587 			    (i == ccb->pkt_ncookies) ||
1588 			    (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1589 				break;
1590 			}
1591 			/*
1592 			 * next cookie will be retrieved from
1593 			 * ccb->pkt_dmacookies[i]
1594 			 */
1595 			ddi_dma_nextcookie(ccb->pkt_dma_handle,
1596 			    &ccb->pkt_dmacookies[i]);
1597 			total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1598 		}
1599 		ccb->pkt_cookie = i;
1600 		ccb->arcmsr_cdb.sgcount = i;
1601 		if (total_ccb_xferlen > 512) {
1602 			resid = total_ccb_xferlen % 512;
1603 			if (resid != 0) {
1604 				i--;
1605 				total_ccb_xferlen -= resid;
1606 				/* modify last sg length */
1607 				ccb->pkt_dmacookies[i].dmac_size =
1608 				    ccb->pkt_dmacookies[i].dmac_size - resid;
1609 				ccb->resid_dmacookie.dmac_size = resid;
1610 				ccb->resid_dmacookie.dmac_laddress =
1611 				    ccb->pkt_dmacookies[i].dmac_laddress +
1612 				    ccb->pkt_dmacookies[i].dmac_size;
1613 			}
1614 		}
1615 		ccb->total_dmac_size = total_ccb_xferlen;
1616 		ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1617 		pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1618 		return (DDI_SUCCESS);
1619 
1620 	case DDI_DMA_NORESOURCES:
1621 		arcmsr_warn(acb, "dma map got 'no resources'");
1622 		bioerror(bp, ENOMEM);
1623 		break;
1624 
1625 	case DDI_DMA_NOMAPPING:
1626 		arcmsr_warn(acb, "dma map got 'no mapping'");
1627 		bioerror(bp, EFAULT);
1628 		break;
1629 
1630 	case DDI_DMA_TOOBIG:
1631 		arcmsr_warn(acb, "dma map got 'too big'");
1632 		bioerror(bp, EINVAL);
1633 		break;
1634 
1635 	case DDI_DMA_INUSE:
1636 		arcmsr_warn(acb, "dma map got 'in use' "
1637 		    "(should not happen)");
1638 		break;
1639 	default:
1640 		arcmsr_warn(acb, "dma map failed (0x%x)", i);
1641 		break;
1642 	}
1643 
1644 	ddi_dma_free_handle(&ccb->pkt_dma_handle);
1645 	ccb->pkt_dma_handle = NULL;
1646 	ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1647 	return (DDI_FAILURE);
1648 }
1649 
1650 
1651 /*
1652  * Function name: arcmsr_dma_move
1653  * Return Values: 0 if successful, -1 if failure
1654  *   Description: move DMA resources to next DMA window
1655  *       Context: Can only be called from arcmsr_tran_init_pkt()
1656  */
1657 static int
arcmsr_dma_move(struct ACB * acb,struct scsi_pkt * pkt,struct buf * bp)1658 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt, struct buf *bp)
1659 {
1660 	struct CCB *ccb = pkt->pkt_ha_private;
1661 	uint8_t i = 0;
1662 	int resid = 0;
1663 	int total_ccb_xferlen = 0;
1664 
1665 	if (ccb->resid_dmacookie.dmac_size != 0) {
1666 		total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1667 		ccb->pkt_dmacookies[i].dmac_size =
1668 		    ccb->resid_dmacookie.dmac_size;
1669 		ccb->pkt_dmacookies[i].dmac_laddress =
1670 		    ccb->resid_dmacookie.dmac_laddress;
1671 		i++;
1672 		ccb->resid_dmacookie.dmac_size = 0;
1673 	}
1674 	/*
1675 	 * If there are no more cookies remaining in this window,
1676 	 * move to the next window.
1677 	 */
1678 	if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1679 		/*
1680 		 * only dma map "partial" arrive here
1681 		 */
1682 		if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1683 		    (ccb->pkt_nwin == 1)) {
1684 			return (DDI_SUCCESS);
1685 		}
1686 
1687 		/* At last window, cannot move */
1688 		if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1689 			arcmsr_warn(acb, "dma partial set, numwin exceeded");
1690 			return (DDI_FAILURE);
1691 		}
1692 		if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1693 		    &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1694 		    &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1695 		    DDI_FAILURE) {
1696 			arcmsr_warn(acb, "ddi_dma_getwin failed");
1697 			return (DDI_FAILURE);
1698 		}
1699 		/* reset cookie pointer */
1700 		ccb->pkt_cookie = 0;
1701 	} else {
1702 		/*
1703 		 * only dma map "all" arrive here
1704 		 * We still have more cookies in this window,
1705 		 * get the next one
1706 		 * access the pkt_dma_handle remain cookie record at
1707 		 * ccb->pkt_dmacookies array
1708 		 */
1709 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1710 		    &ccb->pkt_dmacookies[i]);
1711 	}
1712 
1713 	/* Get remaining cookies in this window, up to our maximum */
1714 	total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1715 
1716 	/* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1717 	for (;;) {
1718 		i++;
1719 		/* handled cookies count level indicator */
1720 		ccb->pkt_cookie++;
1721 		if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1722 		    (ccb->pkt_cookie == ccb->pkt_ncookies) ||
1723 		    (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1724 			break;
1725 		}
1726 		ddi_dma_nextcookie(ccb->pkt_dma_handle,
1727 		    &ccb->pkt_dmacookies[i]);
1728 		total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1729 	}
1730 
1731 	ccb->arcmsr_cdb.sgcount = i;
1732 	if (total_ccb_xferlen > 512) {
1733 		resid = total_ccb_xferlen % 512;
1734 		if (resid != 0) {
1735 			i--;
1736 			total_ccb_xferlen -= resid;
1737 			/* modify last sg length */
1738 			ccb->pkt_dmacookies[i].dmac_size =
1739 			    ccb->pkt_dmacookies[i].dmac_size - resid;
1740 			ccb->resid_dmacookie.dmac_size = resid;
1741 			ccb->resid_dmacookie.dmac_laddress =
1742 			    ccb->pkt_dmacookies[i].dmac_laddress +
1743 			    ccb->pkt_dmacookies[i].dmac_size;
1744 		}
1745 	}
1746 	ccb->total_dmac_size += total_ccb_xferlen;
1747 	pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1748 
1749 	return (DDI_SUCCESS);
1750 }
1751 
1752 
1753 /*ARGSUSED*/
1754 static void
arcmsr_build_ccb(struct CCB * ccb)1755 arcmsr_build_ccb(struct CCB *ccb)
1756 {
1757 	struct scsi_pkt *pkt = ccb->pkt;
1758 	struct ARCMSR_CDB *arcmsr_cdb;
1759 	char *psge;
1760 	uint32_t address_lo, address_hi;
1761 	int arccdbsize = 0x30;
1762 	uint8_t sgcount;
1763 
1764 	arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1765 	psge = (char *)&arcmsr_cdb->sgu;
1766 
1767 	bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb, arcmsr_cdb->CdbLength);
1768 	sgcount = ccb->arcmsr_cdb.sgcount;
1769 
1770 	if (sgcount != 0) {
1771 		int length, i;
1772 		int cdb_sgcount = 0;
1773 		int total_xfer_length = 0;
1774 
1775 		/* map stor port SG list to our iop SG List. */
1776 		for (i = 0; i < sgcount; i++) {
1777 			/* Get physaddr of the current data pointer */
1778 			length = ccb->pkt_dmacookies[i].dmac_size;
1779 			total_xfer_length += length;
1780 			address_lo =
1781 			    dma_addr_lo32(ccb->pkt_dmacookies[i].dmac_laddress);
1782 			address_hi =
1783 			    dma_addr_hi32(ccb->pkt_dmacookies[i].dmac_laddress);
1784 
1785 			if (address_hi == 0) {
1786 				struct SG32ENTRY *dma_sg;
1787 
1788 				dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
1789 				dma_sg->address = address_lo;
1790 				dma_sg->length = length;
1791 				psge += sizeof (struct SG32ENTRY);
1792 				arccdbsize += sizeof (struct SG32ENTRY);
1793 			} else {
1794 				struct SG64ENTRY *dma_sg;
1795 
1796 				dma_sg = (struct SG64ENTRY *)(intptr_t)psge;
1797 				dma_sg->addresshigh = address_hi;
1798 				dma_sg->address = address_lo;
1799 				dma_sg->length = length | IS_SG64_ADDR;
1800 				psge += sizeof (struct SG64ENTRY);
1801 				arccdbsize += sizeof (struct SG64ENTRY);
1802 			}
1803 			cdb_sgcount++;
1804 		}
1805 		arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
1806 		arcmsr_cdb->DataLength = total_xfer_length;
1807 		if (arccdbsize > 256) {
1808 			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1809 		}
1810 	} else {
1811 		arcmsr_cdb->DataLength = 0;
1812 	}
1813 
1814 	if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
1815 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1816 	ccb->arc_cdb_size = arccdbsize;
1817 }
1818 
1819 /*
1820  * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
1821  *
1822  * handle:		Handle of registered ARC protocol driver
1823  * adapter_id:		AIOC unique identifier(integer)
1824  * pPOSTCARD_SEND:	Pointer to ARC send postcard
1825  *
1826  * This routine posts a ARC send postcard to the request post FIFO of a
1827  * specific ARC adapter.
1828  */
1829 static int
arcmsr_post_ccb(struct ACB * acb,struct CCB * ccb)1830 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb)
1831 {
1832 	uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1833 	struct scsi_pkt *pkt = ccb->pkt;
1834 	struct ARCMSR_CDB *arcmsr_cdb;
1835 	uint_t pkt_flags = pkt->pkt_flags;
1836 
1837 	arcmsr_cdb = &ccb->arcmsr_cdb;
1838 
1839 	/* TODO: Use correct offset and size for syncing? */
1840 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0, DDI_DMA_SYNC_FORDEV) ==
1841 	    DDI_FAILURE)
1842 		return (DDI_FAILURE);
1843 
1844 	atomic_inc_32(&acb->ccboutstandingcount);
1845 	ccb->ccb_time = (time_t)(ddi_get_time() + pkt->pkt_time);
1846 
1847 	ccb->ccb_state = ARCMSR_CCB_START;
1848 	switch (acb->adapter_type) {
1849 	case ACB_ADAPTER_TYPE_A:
1850 	{
1851 		struct HBA_msgUnit *phbamu;
1852 
1853 		phbamu = (struct HBA_msgUnit *)acb->pmu;
1854 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1855 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1856 			    &phbamu->inbound_queueport,
1857 			    cdb_phyaddr_pattern |
1858 			    ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1859 		} else {
1860 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1861 			    &phbamu->inbound_queueport, cdb_phyaddr_pattern);
1862 		}
1863 		if (pkt_flags & FLAG_NOINTR)
1864 			arcmsr_polling_hba_ccbdone(acb, ccb);
1865 		break;
1866 	}
1867 
1868 	case ACB_ADAPTER_TYPE_B:
1869 	{
1870 		struct HBB_msgUnit *phbbmu;
1871 		int ending_index, index;
1872 
1873 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
1874 		index = phbbmu->postq_index;
1875 		ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
1876 		phbbmu->post_qbuffer[ending_index] = 0;
1877 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1878 			phbbmu->post_qbuffer[index] =
1879 			    (cdb_phyaddr_pattern|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1880 		} else {
1881 			phbbmu->post_qbuffer[index] = cdb_phyaddr_pattern;
1882 		}
1883 		index++;
1884 		/* if last index number set it to 0 */
1885 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
1886 		phbbmu->postq_index = index;
1887 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1888 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
1889 		    ARCMSR_DRV2IOP_CDB_POSTED);
1890 
1891 		if (pkt_flags & FLAG_NOINTR)
1892 			arcmsr_polling_hbb_ccbdone(acb, ccb);
1893 		break;
1894 	}
1895 
1896 	case ACB_ADAPTER_TYPE_C:
1897 	{
1898 		struct HBC_msgUnit *phbcmu;
1899 		uint32_t ccb_post_stamp, arc_cdb_size;
1900 
1901 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
1902 		arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 :
1903 		    ccb->arc_cdb_size;
1904 		ccb_post_stamp = (cdb_phyaddr_pattern |
1905 		    ((arc_cdb_size-1) >> 6) |1);
1906 		if (acb->cdb_phyaddr_hi32) {
1907 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1908 			    &phbcmu->inbound_queueport_high,
1909 			    acb->cdb_phyaddr_hi32);
1910 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1911 			    &phbcmu->inbound_queueport_low, ccb_post_stamp);
1912 		} else {
1913 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1914 			    &phbcmu->inbound_queueport_low, ccb_post_stamp);
1915 		}
1916 		if (pkt_flags & FLAG_NOINTR)
1917 			arcmsr_polling_hbc_ccbdone(acb, ccb);
1918 		break;
1919 	}
1920 
1921 	}
1922 	return (DDI_SUCCESS);
1923 }
1924 
1925 
1926 static void
arcmsr_ccb_complete(struct CCB * ccb,int flag)1927 arcmsr_ccb_complete(struct CCB *ccb, int flag)
1928 {
1929 	struct ACB *acb = ccb->acb;
1930 	struct scsi_pkt *pkt = ccb->pkt;
1931 
1932 	if (pkt == NULL) {
1933 		return;
1934 	}
1935 	ccb->ccb_state |= ARCMSR_CCB_DONE;
1936 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1937 	    STATE_SENT_CMD | STATE_GOT_STATUS);
1938 
1939 	if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1940 	    (pkt->pkt_state & STATE_XFERRED_DATA)) {
1941 		(void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1942 		    DDI_DMA_SYNC_FORCPU);
1943 	}
1944 	/*
1945 	 * TODO: This represents a potential race condition, and is
1946 	 * ultimately a poor design decision.  Revisit this code
1947 	 * and solve the mutex ownership issue correctly.
1948 	 */
1949 	if (mutex_owned(&acb->isr_mutex)) {
1950 		mutex_exit(&acb->isr_mutex);
1951 		scsi_hba_pkt_comp(pkt);
1952 		mutex_enter(&acb->isr_mutex);
1953 	} else {
1954 		scsi_hba_pkt_comp(pkt);
1955 	}
1956 	if (flag == 1) {
1957 		atomic_dec_32(&acb->ccboutstandingcount);
1958 	}
1959 }
1960 
1961 static void
arcmsr_report_ccb_state(struct ACB * acb,struct CCB * ccb,boolean_t error)1962 arcmsr_report_ccb_state(struct ACB *acb, struct CCB *ccb, boolean_t error)
1963 {
1964 	int id, lun;
1965 
1966 	ccb->ccb_state |= ARCMSR_CCB_DONE;
1967 	id = ccb->pkt->pkt_address.a_target;
1968 	lun = ccb->pkt->pkt_address.a_lun;
1969 
1970 	if (!error) {
1971 		if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1972 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
1973 		}
1974 		ccb->pkt->pkt_reason = CMD_CMPLT;
1975 		ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1976 		arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1977 		    &ccb->complete_queue_pointer, &acb->ccb_complete_list);
1978 
1979 	} else {
1980 		switch (ccb->arcmsr_cdb.DeviceStatus) {
1981 		case ARCMSR_DEV_SELECT_TIMEOUT:
1982 			if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
1983 				arcmsr_warn(acb,
1984 				    "target %d lun %d selection "
1985 				    "timeout", id, lun);
1986 			}
1987 			acb->devstate[id][lun] = ARECA_RAID_GONE;
1988 			ccb->pkt->pkt_reason = CMD_TIMEOUT; /* CMD_DEV_GONE; */
1989 			ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1990 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1991 			    &ccb->complete_queue_pointer,
1992 			    &acb->ccb_complete_list);
1993 			break;
1994 		case ARCMSR_DEV_ABORTED:
1995 		case ARCMSR_DEV_INIT_FAIL:
1996 			arcmsr_warn(acb, "isr got 'ARCMSR_DEV_ABORTED'"
1997 			    " 'ARCMSR_DEV_INIT_FAIL'");
1998 			arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
1999 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2000 			ccb->pkt->pkt_reason = CMD_DEV_GONE;
2001 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2002 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2003 			    &ccb->complete_queue_pointer,
2004 			    &acb->ccb_complete_list);
2005 			break;
2006 		case SCSISTAT_CHECK_CONDITION:
2007 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
2008 			arcmsr_report_sense_info(ccb);
2009 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2010 			    &ccb->complete_queue_pointer,
2011 			    &acb->ccb_complete_list);
2012 			break;
2013 		default:
2014 			arcmsr_warn(acb,
2015 			    "target %d lun %d isr received CMD_DONE"
2016 			    " with unknown DeviceStatus (0x%x)",
2017 			    id, lun, ccb->arcmsr_cdb.DeviceStatus);
2018 			arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2019 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2020 			/* unknown error or crc error just for retry */
2021 			ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2022 			ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2023 			arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2024 			    &ccb->complete_queue_pointer,
2025 			    &acb->ccb_complete_list);
2026 			break;
2027 		}
2028 	}
2029 }
2030 
2031 
2032 static void
arcmsr_drain_donequeue(struct ACB * acb,struct CCB * ccb,boolean_t error)2033 arcmsr_drain_donequeue(struct ACB *acb, struct CCB *ccb, boolean_t error)
2034 {
2035 	uint16_t	ccb_state;
2036 
2037 	if (ccb->acb != acb) {
2038 		return;
2039 	}
2040 	if (ccb->ccb_state != ARCMSR_CCB_START) {
2041 		switch (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
2042 		case ARCMSR_CCB_TIMEOUT:
2043 			ccb_state = ccb->ccb_state;
2044 			if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2045 				arcmsr_free_ccb(ccb);
2046 			else
2047 				ccb->ccb_state |= ARCMSR_CCB_BACK;
2048 			return;
2049 
2050 		case ARCMSR_CCB_ABORTED:
2051 			ccb_state = ccb->ccb_state;
2052 			if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2053 				arcmsr_free_ccb(ccb);
2054 			else
2055 				ccb->ccb_state |= ARCMSR_CCB_BACK;
2056 			return;
2057 		case ARCMSR_CCB_RESET:
2058 			ccb_state = ccb->ccb_state;
2059 			if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2060 				arcmsr_free_ccb(ccb);
2061 			else
2062 				ccb->ccb_state |= ARCMSR_CCB_BACK;
2063 			return;
2064 		default:
2065 			return;
2066 		}
2067 	}
2068 	arcmsr_report_ccb_state(acb, ccb, error);
2069 }
2070 
2071 static void
arcmsr_report_sense_info(struct CCB * ccb)2072 arcmsr_report_sense_info(struct CCB *ccb)
2073 {
2074 	struct SENSE_DATA *cdb_sensedata;
2075 	struct scsi_pkt *pkt = ccb->pkt;
2076 	struct scsi_arq_status *arq_status;
2077 	union scsi_cdb *cdbp;
2078 	uint64_t err_blkno;
2079 
2080 	cdbp = (void *)pkt->pkt_cdbp;
2081 	err_blkno = ARCMSR_GETGXADDR(ccb->arcmsr_cdb.CdbLength, cdbp);
2082 
2083 	arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
2084 	bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
2085 	*pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
2086 	arq_status->sts_rqpkt_reason = CMD_CMPLT;
2087 	arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
2088 	    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
2089 	arq_status->sts_rqpkt_statistics = 0;
2090 	arq_status->sts_rqpkt_resid = 0;
2091 
2092 	pkt->pkt_reason = CMD_CMPLT;
2093 	/* auto rqsense took place */
2094 	pkt->pkt_state |= STATE_ARQ_DONE;
2095 
2096 	cdb_sensedata = (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
2097 	if (err_blkno <= 0xfffffffful) {
2098 		struct scsi_extended_sense *sts_sensedata;
2099 
2100 		sts_sensedata = &arq_status->sts_sensedata;
2101 		sts_sensedata->es_code = cdb_sensedata->ErrorCode;
2102 		/* must eq CLASS_EXTENDED_SENSE (0x07) */
2103 		sts_sensedata->es_class = cdb_sensedata->ErrorClass;
2104 		sts_sensedata->es_valid = cdb_sensedata->Valid;
2105 		sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
2106 		sts_sensedata->es_key = cdb_sensedata->SenseKey;
2107 		sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
2108 		sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
2109 		sts_sensedata->es_filmk = cdb_sensedata->FileMark;
2110 		sts_sensedata->es_info_1 = (err_blkno >> 24) & 0xFF;
2111 		sts_sensedata->es_info_2 = (err_blkno >> 16) & 0xFF;
2112 		sts_sensedata->es_info_3 = (err_blkno >>  8) & 0xFF;
2113 		sts_sensedata->es_info_4 = err_blkno & 0xFF;
2114 		sts_sensedata->es_add_len =
2115 		    cdb_sensedata->AdditionalSenseLength;
2116 		sts_sensedata->es_cmd_info[0] =
2117 		    cdb_sensedata->CommandSpecificInformation[0];
2118 		sts_sensedata->es_cmd_info[1] =
2119 		    cdb_sensedata->CommandSpecificInformation[1];
2120 		sts_sensedata->es_cmd_info[2] =
2121 		    cdb_sensedata->CommandSpecificInformation[2];
2122 		sts_sensedata->es_cmd_info[3] =
2123 		    cdb_sensedata->CommandSpecificInformation[3];
2124 		sts_sensedata->es_add_code =
2125 		    cdb_sensedata->AdditionalSenseCode;
2126 		sts_sensedata->es_qual_code =
2127 		    cdb_sensedata->AdditionalSenseCodeQualifier;
2128 		sts_sensedata->es_fru_code =
2129 		    cdb_sensedata->FieldReplaceableUnitCode;
2130 	} else { /* 64-bit LBA */
2131 		struct scsi_descr_sense_hdr *dsp;
2132 		struct scsi_information_sense_descr *isd;
2133 
2134 		dsp = (struct scsi_descr_sense_hdr *)
2135 		    &arq_status->sts_sensedata;
2136 		dsp->ds_class = CLASS_EXTENDED_SENSE;
2137 		dsp->ds_code = CODE_FMT_DESCR_CURRENT;
2138 		dsp->ds_key = cdb_sensedata->SenseKey;
2139 		dsp->ds_add_code = cdb_sensedata->AdditionalSenseCode;
2140 		dsp->ds_qual_code =
2141 		    cdb_sensedata->AdditionalSenseCodeQualifier;
2142 		dsp->ds_addl_sense_length =
2143 		    sizeof (struct scsi_information_sense_descr);
2144 
2145 		isd = (struct scsi_information_sense_descr *)(dsp+1);
2146 		isd->isd_descr_type = DESCR_INFORMATION;
2147 		isd->isd_valid = 1;
2148 		isd->isd_information[0] = (err_blkno >> 56) & 0xFF;
2149 		isd->isd_information[1] = (err_blkno >> 48) & 0xFF;
2150 		isd->isd_information[2] = (err_blkno >> 40) & 0xFF;
2151 		isd->isd_information[3] = (err_blkno >> 32) & 0xFF;
2152 		isd->isd_information[4] = (err_blkno >> 24) & 0xFF;
2153 		isd->isd_information[5] = (err_blkno >> 16) & 0xFF;
2154 		isd->isd_information[6] = (err_blkno >>  8) & 0xFF;
2155 		isd->isd_information[7] = (err_blkno) & 0xFF;
2156 	}
2157 }
2158 
2159 
2160 static int
arcmsr_seek_cmd2abort(struct ACB * acb,struct scsi_pkt * abortpkt)2161 arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt)
2162 {
2163 	struct CCB *ccb;
2164 	uint32_t intmask_org = 0;
2165 	int i = 0;
2166 
2167 	acb->num_aborts++;
2168 
2169 	if (abortpkt != NULL) {
2170 		/*
2171 		 * We don't support abort of a single packet.  All
2172 		 * callers in our kernel always do a global abort, so
2173 		 * there is no point in having code to support it
2174 		 * here.
2175 		 */
2176 		return (DDI_FAILURE);
2177 	}
2178 
2179 	/*
2180 	 * if abortpkt is NULL, the upper layer needs us
2181 	 * to abort all commands
2182 	 */
2183 	if (acb->ccboutstandingcount != 0) {
2184 		/* disable all outbound interrupt */
2185 		intmask_org = arcmsr_disable_allintr(acb);
2186 		/* clear and abort all outbound posted Q */
2187 		arcmsr_done4abort_postqueue(acb);
2188 		/* talk to iop 331 outstanding command aborted */
2189 		(void) arcmsr_abort_host_command(acb);
2190 
2191 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2192 			ccb = acb->pccb_pool[i];
2193 			if (ccb->ccb_state == ARCMSR_CCB_START) {
2194 				/*
2195 				 * this ccb will complete at
2196 				 * hwinterrupt
2197 				 */
2198 				/* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
2199 				ccb->pkt->pkt_reason = CMD_ABORTED;
2200 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
2201 				arcmsr_ccb_complete(ccb, 1);
2202 			}
2203 		}
2204 		/*
2205 		 * enable outbound Post Queue, outbound
2206 		 * doorbell Interrupt
2207 		 */
2208 		arcmsr_enable_allintr(acb, intmask_org);
2209 	}
2210 	return (DDI_SUCCESS);
2211 }
2212 
2213 
2214 /*
2215  * Autoconfiguration support
2216  */
2217 static int
arcmsr_parse_devname(char * devnm,int * tgt,int * lun)2218 arcmsr_parse_devname(char *devnm, int *tgt, int *lun)
2219 {
2220 	char devbuf[SCSI_MAXNAMELEN];
2221 	char *addr;
2222 	char *p,  *tp, *lp;
2223 	long num;
2224 
2225 	/* Parse dev name and address */
2226 	(void) strlcpy(devbuf, devnm, sizeof (devbuf));
2227 	addr = "";
2228 	for (p = devbuf; *p != '\0'; p++) {
2229 		if (*p == '@') {
2230 			addr = p + 1;
2231 			*p = '\0';
2232 		} else if (*p == ':') {
2233 			*p = '\0';
2234 			break;
2235 		}
2236 	}
2237 
2238 	/* Parse target and lun */
2239 	for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
2240 		if (*p == ',') {
2241 			lp = p + 1;
2242 			*p = '\0';
2243 			break;
2244 		}
2245 	}
2246 	if ((tgt != NULL) && (tp != NULL)) {
2247 		if (ddi_strtol(tp, NULL, 0x10, &num) != 0)
2248 			return (-1);
2249 		*tgt = (int)num;
2250 	}
2251 	if ((lun != NULL) && (lp != NULL)) {
2252 		if (ddi_strtol(lp, NULL, 0x10, &num) != 0)
2253 			return (-1);
2254 		*lun = (int)num;
2255 	}
2256 	return (0);
2257 }
2258 
2259 static int
arcmsr_name_node(dev_info_t * dip,char * name,int len)2260 arcmsr_name_node(dev_info_t *dip, char *name, int len)
2261 {
2262 	int tgt, lun;
2263 
2264 	tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "target",
2265 	    -1);
2266 	if (tgt == -1)
2267 		return (DDI_FAILURE);
2268 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "lun",
2269 	    -1);
2270 	if (lun == -1)
2271 		return (DDI_FAILURE);
2272 	(void) snprintf(name, len, "%x,%x", tgt, lun);
2273 	return (DDI_SUCCESS);
2274 }
2275 
2276 static dev_info_t *
arcmsr_find_child(struct ACB * acb,uint16_t tgt,uint8_t lun)2277 arcmsr_find_child(struct ACB *acb, uint16_t tgt, uint8_t lun)
2278 {
2279 	dev_info_t *child = NULL;
2280 	char addr[SCSI_MAXNAMELEN];
2281 	char tmp[SCSI_MAXNAMELEN];
2282 
2283 	(void) sprintf(addr, "%x,%x", tgt, lun);
2284 
2285 	for (child = ddi_get_child(acb->dev_info);
2286 	    child;
2287 	    child = ddi_get_next_sibling(child)) {
2288 		/* We don't care about non-persistent node */
2289 		if (ndi_dev_is_persistent_node(child) == 0)
2290 			continue;
2291 		if (arcmsr_name_node(child, tmp, SCSI_MAXNAMELEN) !=
2292 		    DDI_SUCCESS)
2293 			continue;
2294 		if (strcmp(addr, tmp) == 0)
2295 			break;
2296 	}
2297 	return (child);
2298 }
2299 
2300 static int
arcmsr_config_child(struct ACB * acb,struct scsi_device * sd,dev_info_t ** dipp)2301 arcmsr_config_child(struct ACB *acb, struct scsi_device *sd, dev_info_t **dipp)
2302 {
2303 	char *nodename = NULL;
2304 	char **compatible = NULL;
2305 	int ncompatible = 0;
2306 	dev_info_t *ldip = NULL;
2307 	int tgt = sd->sd_address.a_target;
2308 	int lun = sd->sd_address.a_lun;
2309 	int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
2310 	int rval;
2311 
2312 	scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
2313 	    NULL, &nodename, &compatible, &ncompatible);
2314 	if (nodename == NULL) {
2315 		arcmsr_warn(acb, "found no comptible driver for T%dL%d",
2316 		    tgt, lun);
2317 		rval = NDI_FAILURE;
2318 		goto finish;
2319 	}
2320 	/* Create dev node */
2321 	rval = ndi_devi_alloc(acb->dev_info, nodename, DEVI_SID_NODEID, &ldip);
2322 	if (rval == NDI_SUCCESS) {
2323 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
2324 		    DDI_PROP_SUCCESS) {
2325 			arcmsr_warn(acb,
2326 			    "unable to create target property for T%dL%d",
2327 			    tgt, lun);
2328 			rval = NDI_FAILURE;
2329 			goto finish;
2330 		}
2331 		if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
2332 		    DDI_PROP_SUCCESS) {
2333 			arcmsr_warn(acb,
2334 			    "unable to create lun property for T%dL%d",
2335 			    tgt, lun);
2336 			rval = NDI_FAILURE;
2337 			goto finish;
2338 		}
2339 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
2340 		    "compatible", compatible, ncompatible) !=
2341 		    DDI_PROP_SUCCESS) {
2342 			arcmsr_warn(acb,
2343 			    "unable to create compatible property for T%dL%d",
2344 			    tgt, lun);
2345 			rval = NDI_FAILURE;
2346 			goto finish;
2347 		}
2348 		rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
2349 		if (rval != NDI_SUCCESS) {
2350 			arcmsr_warn(acb, "unable to online T%dL%d", tgt, lun);
2351 			ndi_prop_remove_all(ldip);
2352 			(void) ndi_devi_free(ldip);
2353 		} else {
2354 			arcmsr_log(acb, CE_NOTE, "T%dL%d onlined", tgt, lun);
2355 		}
2356 	}
2357 finish:
2358 	if (dipp)
2359 		*dipp = ldip;
2360 
2361 	scsi_hba_nodename_compatible_free(nodename, compatible);
2362 	return (rval);
2363 }
2364 
2365 static int
arcmsr_config_lun(struct ACB * acb,uint16_t tgt,uint8_t lun,dev_info_t ** ldip)2366 arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun, dev_info_t **ldip)
2367 {
2368 	struct scsi_device sd;
2369 	dev_info_t *child;
2370 	int rval;
2371 
2372 	if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
2373 		if (ldip) {
2374 			*ldip = child;
2375 		}
2376 		return (NDI_SUCCESS);
2377 	}
2378 	bzero(&sd, sizeof (struct scsi_device));
2379 	sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
2380 	sd.sd_address.a_target = tgt;
2381 	sd.sd_address.a_lun = lun;
2382 
2383 	rval = scsi_hba_probe(&sd, NULL);
2384 	if (rval == SCSIPROBE_EXISTS)
2385 		rval = arcmsr_config_child(acb, &sd, ldip);
2386 	scsi_unprobe(&sd);
2387 	return (rval);
2388 }
2389 
2390 
2391 static int
arcmsr_add_intr(struct ACB * acb,int intr_type)2392 arcmsr_add_intr(struct ACB *acb, int intr_type)
2393 {
2394 	int	rc, count;
2395 	dev_info_t *dev_info;
2396 	const char *type_str;
2397 
2398 	switch (intr_type) {
2399 	case DDI_INTR_TYPE_MSI:
2400 		type_str = "MSI";
2401 		break;
2402 	case DDI_INTR_TYPE_MSIX:
2403 		type_str = "MSIX";
2404 		break;
2405 	case DDI_INTR_TYPE_FIXED:
2406 		type_str = "FIXED";
2407 		break;
2408 	default:
2409 		type_str = "unknown";
2410 		break;
2411 	}
2412 
2413 	dev_info = acb->dev_info;
2414 	/* Determine number of supported interrupts */
2415 	rc = ddi_intr_get_nintrs(dev_info, intr_type, &count);
2416 	if ((rc != DDI_SUCCESS) || (count == 0)) {
2417 		arcmsr_warn(acb,
2418 		    "no interrupts of type %s, rc=0x%x, count=%d",
2419 		    type_str, rc, count);
2420 		return (DDI_FAILURE);
2421 	}
2422 	acb->intr_size = sizeof (ddi_intr_handle_t) * count;
2423 	acb->phandle = kmem_zalloc(acb->intr_size, KM_SLEEP);
2424 	rc = ddi_intr_alloc(dev_info, acb->phandle, intr_type, 0,
2425 	    count, &acb->intr_count, DDI_INTR_ALLOC_NORMAL);
2426 	if ((rc != DDI_SUCCESS) || (acb->intr_count == 0)) {
2427 		arcmsr_warn(acb, "ddi_intr_alloc(%s) failed 0x%x",
2428 		    type_str, rc);
2429 		return (DDI_FAILURE);
2430 	}
2431 	if (acb->intr_count < count) {
2432 		arcmsr_log(acb, CE_NOTE, "Got %d interrupts, but requested %d",
2433 		    acb->intr_count, count);
2434 	}
2435 	/*
2436 	 * Get priority for first msi, assume remaining are all the same
2437 	 */
2438 	if (ddi_intr_get_pri(acb->phandle[0], &acb->intr_pri) != DDI_SUCCESS) {
2439 		arcmsr_warn(acb, "ddi_intr_get_pri failed");
2440 		return (DDI_FAILURE);
2441 	}
2442 	if (acb->intr_pri >= ddi_intr_get_hilevel_pri()) {
2443 		arcmsr_warn(acb,  "high level interrupt not supported");
2444 		return (DDI_FAILURE);
2445 	}
2446 
2447 	for (int x = 0; x < acb->intr_count; x++) {
2448 		if (ddi_intr_add_handler(acb->phandle[x], arcmsr_intr_handler,
2449 		    (caddr_t)acb, NULL) != DDI_SUCCESS) {
2450 			arcmsr_warn(acb, "ddi_intr_add_handler(%s) failed",
2451 			    type_str);
2452 			return (DDI_FAILURE);
2453 		}
2454 	}
2455 	(void) ddi_intr_get_cap(acb->phandle[0], &acb->intr_cap);
2456 	if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2457 		/* Call ddi_intr_block_enable() for MSI */
2458 		(void) ddi_intr_block_enable(acb->phandle, acb->intr_count);
2459 	} else {
2460 		/* Call ddi_intr_enable() for MSI non block enable */
2461 		for (int x = 0; x < acb->intr_count; x++) {
2462 			(void) ddi_intr_enable(acb->phandle[x]);
2463 		}
2464 	}
2465 	return (DDI_SUCCESS);
2466 }
2467 
2468 static void
arcmsr_remove_intr(struct ACB * acb)2469 arcmsr_remove_intr(struct ACB *acb)
2470 {
2471 	int x;
2472 
2473 	if (acb->phandle == NULL)
2474 		return;
2475 
2476 	/* Disable all interrupts */
2477 	if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2478 		/* Call ddi_intr_block_disable() */
2479 		(void) ddi_intr_block_disable(acb->phandle, acb->intr_count);
2480 	} else {
2481 		for (x = 0; x < acb->intr_count; x++) {
2482 			(void) ddi_intr_disable(acb->phandle[x]);
2483 		}
2484 	}
2485 	/* Call ddi_intr_remove_handler() */
2486 	for (x = 0; x < acb->intr_count; x++) {
2487 		(void) ddi_intr_remove_handler(acb->phandle[x]);
2488 		(void) ddi_intr_free(acb->phandle[x]);
2489 	}
2490 	kmem_free(acb->phandle, acb->intr_size);
2491 	acb->phandle = NULL;
2492 }
2493 
2494 static void
arcmsr_mutex_init(struct ACB * acb)2495 arcmsr_mutex_init(struct ACB *acb)
2496 {
2497 	mutex_init(&acb->isr_mutex, NULL, MUTEX_DRIVER, NULL);
2498 	mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER, NULL);
2499 	mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER, NULL);
2500 	mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER, NULL);
2501 	mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
2502 }
2503 
2504 static void
arcmsr_mutex_destroy(struct ACB * acb)2505 arcmsr_mutex_destroy(struct ACB *acb)
2506 {
2507 	mutex_destroy(&acb->isr_mutex);
2508 	mutex_destroy(&acb->acb_mutex);
2509 	mutex_destroy(&acb->postq_mutex);
2510 	mutex_destroy(&acb->workingQ_mutex);
2511 	mutex_destroy(&acb->ioctl_mutex);
2512 }
2513 
2514 static int
arcmsr_initialize(struct ACB * acb)2515 arcmsr_initialize(struct ACB *acb)
2516 {
2517 	struct CCB *pccb_tmp;
2518 	size_t allocated_length;
2519 	uint16_t wval;
2520 	uint_t intmask_org, count;
2521 	caddr_t	arcmsr_ccbs_area;
2522 	uint32_t wlval, cdb_phyaddr, offset, realccb_size;
2523 	int32_t dma_sync_size;
2524 	int i, id, lun, instance;
2525 
2526 	instance = ddi_get_instance(acb->dev_info);
2527 	wlval = pci_config_get32(acb->pci_acc_handle, 0);
2528 	wval = (uint16_t)((wlval >> 16) & 0xffff);
2529 	realccb_size = P2ROUNDUP(sizeof (struct CCB), 32);
2530 	switch (wval) {
2531 	case PCI_DEVICE_ID_ARECA_1880:
2532 	case PCI_DEVICE_ID_ARECA_1882:
2533 	{
2534 		uint32_t *iop_mu_regs_map0;
2535 
2536 		acb->adapter_type = ACB_ADAPTER_TYPE_C; /* lsi */
2537 		dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2538 		if (ddi_regs_map_setup(acb->dev_info, 2,
2539 		    (caddr_t *)&iop_mu_regs_map0, 0,
2540 		    sizeof (struct HBC_msgUnit), &acb->dev_acc_attr,
2541 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2542 			arcmsr_warn(acb, "unable to map registers");
2543 			return (DDI_FAILURE);
2544 		}
2545 
2546 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2547 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2548 		    DDI_SUCCESS) {
2549 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2550 			arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2551 			return (DDI_FAILURE);
2552 		}
2553 
2554 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2555 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2556 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2557 		    &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2558 			arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2559 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2560 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2561 			return (DDI_FAILURE);
2562 		}
2563 
2564 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2565 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2566 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2567 		    &count) != DDI_DMA_MAPPED) {
2568 			arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2569 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2570 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2571 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2572 			return (DDI_FAILURE);
2573 		}
2574 		bzero(arcmsr_ccbs_area, dma_sync_size);
2575 		offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2576 		    - PtrToNum(arcmsr_ccbs_area));
2577 		arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2578 		/* ioport base */
2579 		acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2580 		break;
2581 	}
2582 
2583 	case PCI_DEVICE_ID_ARECA_1201:
2584 	{
2585 		uint32_t *iop_mu_regs_map0;
2586 		uint32_t *iop_mu_regs_map1;
2587 		struct HBB_msgUnit *phbbmu;
2588 
2589 		acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
2590 		dma_sync_size =
2591 		    (ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20) +
2592 		    sizeof (struct HBB_msgUnit);
2593 		/* Allocate memory for the ccb */
2594 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2595 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2596 		    DDI_SUCCESS) {
2597 			arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2598 			return (DDI_FAILURE);
2599 		}
2600 
2601 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2602 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2603 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2604 		    &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2605 			arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2606 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2607 			return (DDI_FAILURE);
2608 		}
2609 
2610 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2611 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size,
2612 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2613 		    NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
2614 			arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2615 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2616 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2617 			return (DDI_FAILURE);
2618 		}
2619 		bzero(arcmsr_ccbs_area, dma_sync_size);
2620 		offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2621 		    - PtrToNum(arcmsr_ccbs_area));
2622 		arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2623 		acb->pmu = (struct msgUnit *)
2624 		    NumToPtr(PtrToNum(arcmsr_ccbs_area) +
2625 		    (realccb_size*ARCMSR_MAX_FREECCB_NUM));
2626 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
2627 
2628 		/* setup device register */
2629 		if (ddi_regs_map_setup(acb->dev_info, 1,
2630 		    (caddr_t *)&iop_mu_regs_map0, 0,
2631 		    sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
2632 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2633 			arcmsr_warn(acb, "unable to map base0 registers");
2634 			(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2635 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2636 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2637 			return (DDI_FAILURE);
2638 		}
2639 
2640 		/* ARCMSR_DRV2IOP_DOORBELL */
2641 		phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)iop_mu_regs_map0;
2642 		if (ddi_regs_map_setup(acb->dev_info, 2,
2643 		    (caddr_t *)&iop_mu_regs_map1, 0,
2644 		    sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
2645 		    &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
2646 			arcmsr_warn(acb, "unable to map base1 registers");
2647 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2648 			(void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2649 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2650 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2651 			return (DDI_FAILURE);
2652 		}
2653 
2654 		/* ARCMSR_MSGCODE_RWBUFFER */
2655 		phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)iop_mu_regs_map1;
2656 		break;
2657 	}
2658 
2659 	case	PCI_DEVICE_ID_ARECA_1110:
2660 	case	PCI_DEVICE_ID_ARECA_1120:
2661 	case	PCI_DEVICE_ID_ARECA_1130:
2662 	case	PCI_DEVICE_ID_ARECA_1160:
2663 	case	PCI_DEVICE_ID_ARECA_1170:
2664 	case	PCI_DEVICE_ID_ARECA_1210:
2665 	case	PCI_DEVICE_ID_ARECA_1220:
2666 	case	PCI_DEVICE_ID_ARECA_1230:
2667 	case	PCI_DEVICE_ID_ARECA_1231:
2668 	case	PCI_DEVICE_ID_ARECA_1260:
2669 	case	PCI_DEVICE_ID_ARECA_1261:
2670 	case	PCI_DEVICE_ID_ARECA_1270:
2671 	case	PCI_DEVICE_ID_ARECA_1280:
2672 	case	PCI_DEVICE_ID_ARECA_1212:
2673 	case	PCI_DEVICE_ID_ARECA_1222:
2674 	case	PCI_DEVICE_ID_ARECA_1380:
2675 	case	PCI_DEVICE_ID_ARECA_1381:
2676 	case	PCI_DEVICE_ID_ARECA_1680:
2677 	case	PCI_DEVICE_ID_ARECA_1681:
2678 	{
2679 		uint32_t *iop_mu_regs_map0;
2680 
2681 		acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
2682 		dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2683 		if (ddi_regs_map_setup(acb->dev_info, 1,
2684 		    (caddr_t *)&iop_mu_regs_map0, 0,
2685 		    sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
2686 		    &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2687 			arcmsr_warn(acb, "unable to map registers");
2688 			return (DDI_FAILURE);
2689 		}
2690 
2691 		if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2692 		    DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2693 		    DDI_SUCCESS) {
2694 			arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2695 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2696 			return (DDI_FAILURE);
2697 		}
2698 
2699 		if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2700 		    &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2701 		    DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2702 		    &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2703 			arcmsr_warn(acb, "ddi_dma_mem_alloc failed", instance);
2704 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2705 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2706 			return (DDI_FAILURE);
2707 		}
2708 
2709 		if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2710 		    (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2711 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2712 		    &count) != DDI_DMA_MAPPED) {
2713 			arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2714 			ddi_dma_mem_free(&acb->ccbs_acc_handle);
2715 			ddi_dma_free_handle(&acb->ccbs_pool_handle);
2716 			ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2717 			return (DDI_FAILURE);
2718 		}
2719 		bzero(arcmsr_ccbs_area, dma_sync_size);
2720 		offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2721 		    - PtrToNum(arcmsr_ccbs_area));
2722 		arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2723 		/* ioport base */
2724 		acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2725 		break;
2726 	}
2727 
2728 	default:
2729 		arcmsr_warn(acb, "Unknown RAID adapter type!");
2730 		return (DDI_FAILURE);
2731 	}
2732 	arcmsr_init_list_head(&acb->ccb_complete_list);
2733 	/* here we can not access pci configuration again */
2734 	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2735 	    ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
2736 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2737 	/* physical address of acb->pccb_pool */
2738 	cdb_phyaddr = acb->ccb_cookie.dmac_address + offset;
2739 
2740 	pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
2741 
2742 	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2743 		pccb_tmp->cdb_phyaddr_pattern =
2744 		    (acb->adapter_type == ACB_ADAPTER_TYPE_C) ?
2745 		    cdb_phyaddr : (cdb_phyaddr >> 5);
2746 		pccb_tmp->acb = acb;
2747 		acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
2748 		cdb_phyaddr = cdb_phyaddr + realccb_size;
2749 		pccb_tmp = (struct CCB *)NumToPtr(PtrToNum(pccb_tmp) +
2750 		    realccb_size);
2751 	}
2752 	acb->vir2phy_offset = PtrToNum(pccb_tmp) - cdb_phyaddr;
2753 
2754 	/* disable all outbound interrupt */
2755 	intmask_org = arcmsr_disable_allintr(acb);
2756 
2757 	if (!arcmsr_iop_confirm(acb)) {
2758 		arcmsr_warn(acb, "arcmsr_iop_confirm error", instance);
2759 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
2760 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
2761 		return (DDI_FAILURE);
2762 	}
2763 
2764 	for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
2765 		for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
2766 			acb->devstate[id][lun] = ARECA_RAID_GONE;
2767 		}
2768 	}
2769 
2770 	/* enable outbound Post Queue, outbound doorbell Interrupt */
2771 	arcmsr_enable_allintr(acb, intmask_org);
2772 
2773 	return (0);
2774 }
2775 
2776 static int
arcmsr_do_ddi_attach(dev_info_t * dev_info,int instance)2777 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance)
2778 {
2779 	scsi_hba_tran_t *hba_trans;
2780 	ddi_device_acc_attr_t dev_acc_attr;
2781 	struct ACB *acb;
2782 	uint16_t wval;
2783 	int raid6 = 1;
2784 	char *type;
2785 	int intr_types;
2786 
2787 
2788 	/*
2789 	 * Soft State Structure
2790 	 * The driver should allocate the per-device-instance
2791 	 * soft state structure, being careful to clean up properly if
2792 	 * an error occurs. Allocate data structure.
2793 	 */
2794 	if (ddi_soft_state_zalloc(arcmsr_soft_state, instance) != DDI_SUCCESS) {
2795 		arcmsr_warn(NULL, "ddi_soft_state_zalloc failed");
2796 		return (DDI_FAILURE);
2797 	}
2798 
2799 	acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2800 	ASSERT(acb);
2801 
2802 	arcmsr_mutex_init(acb);
2803 
2804 	/* acb is already zalloc()d so we don't need to bzero() it */
2805 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2806 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2807 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2808 
2809 	acb->dev_info = dev_info;
2810 	acb->dev_acc_attr = dev_acc_attr;
2811 
2812 	/*
2813 	 * The driver, if providing DMA, should also check that its hardware is
2814 	 * installed in a DMA-capable slot
2815 	 */
2816 	if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
2817 		arcmsr_warn(acb, "hardware is not installed in"
2818 		    " a DMA-capable slot");
2819 		goto error_level_0;
2820 	}
2821 	if (pci_config_setup(dev_info, &acb->pci_acc_handle) != DDI_SUCCESS) {
2822 		arcmsr_warn(acb, "pci_config_setup() failed, attach failed");
2823 		goto error_level_0;
2824 	}
2825 
2826 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
2827 	if (wval != PCI_VENDOR_ID_ARECA) {
2828 		arcmsr_warn(acb,
2829 		    "'vendorid (0x%04x) does not match 0x%04x "
2830 		    "(PCI_VENDOR_ID_ARECA)",
2831 		    wval, PCI_VENDOR_ID_ARECA);
2832 		goto error_level_0;
2833 	}
2834 
2835 	wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
2836 	switch (wval) {
2837 	case PCI_DEVICE_ID_ARECA_1110:
2838 	case PCI_DEVICE_ID_ARECA_1210:
2839 	case PCI_DEVICE_ID_ARECA_1201:
2840 		raid6 = 0;
2841 		/*FALLTHRU*/
2842 	case PCI_DEVICE_ID_ARECA_1120:
2843 	case PCI_DEVICE_ID_ARECA_1130:
2844 	case PCI_DEVICE_ID_ARECA_1160:
2845 	case PCI_DEVICE_ID_ARECA_1170:
2846 	case PCI_DEVICE_ID_ARECA_1220:
2847 	case PCI_DEVICE_ID_ARECA_1230:
2848 	case PCI_DEVICE_ID_ARECA_1260:
2849 	case PCI_DEVICE_ID_ARECA_1270:
2850 	case PCI_DEVICE_ID_ARECA_1280:
2851 		type = "SATA 3G";
2852 		break;
2853 	case PCI_DEVICE_ID_ARECA_1380:
2854 	case PCI_DEVICE_ID_ARECA_1381:
2855 	case PCI_DEVICE_ID_ARECA_1680:
2856 	case PCI_DEVICE_ID_ARECA_1681:
2857 		type = "SAS 3G";
2858 		break;
2859 	case PCI_DEVICE_ID_ARECA_1880:
2860 		type = "SAS 6G";
2861 		break;
2862 	default:
2863 		type = "X-TYPE";
2864 		arcmsr_warn(acb, "Unknown Host Adapter RAID Controller!");
2865 		goto error_level_0;
2866 	}
2867 
2868 	arcmsr_log(acb, CE_CONT, "Areca %s Host Adapter RAID Controller%s\n",
2869 	    type, raid6 ? " (RAID6 capable)" : "");
2870 
2871 	/* we disable iop interrupt here */
2872 	if (arcmsr_initialize(acb) == DDI_FAILURE) {
2873 		arcmsr_warn(acb, "arcmsr_initialize failed");
2874 		goto error_level_1;
2875 	}
2876 
2877 	/* Allocate a transport structure */
2878 	hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
2879 	if (hba_trans == NULL) {
2880 		arcmsr_warn(acb, "scsi_hba_tran_alloc failed");
2881 		goto error_level_2;
2882 	}
2883 	acb->scsi_hba_transport = hba_trans;
2884 	acb->dev_info = dev_info;
2885 	/* init scsi host adapter transport entry */
2886 	hba_trans->tran_hba_private  = acb;
2887 	hba_trans->tran_tgt_private  = NULL;
2888 	/*
2889 	 * If no per-target initialization is required, the HBA can leave
2890 	 * tran_tgt_init set to NULL.
2891 	 */
2892 	hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
2893 	hba_trans->tran_tgt_probe = scsi_hba_probe;
2894 	hba_trans->tran_tgt_free = NULL;
2895 	hba_trans->tran_start = arcmsr_tran_start;
2896 	hba_trans->tran_abort = arcmsr_tran_abort;
2897 	hba_trans->tran_reset = arcmsr_tran_reset;
2898 	hba_trans->tran_getcap = arcmsr_tran_getcap;
2899 	hba_trans->tran_setcap = arcmsr_tran_setcap;
2900 	hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
2901 	hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
2902 	hba_trans->tran_dmafree = arcmsr_tran_dmafree;
2903 	hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
2904 
2905 	hba_trans->tran_reset_notify = NULL;
2906 	hba_trans->tran_get_bus_addr = NULL;
2907 	hba_trans->tran_get_name = NULL;
2908 	hba_trans->tran_quiesce = NULL;
2909 	hba_trans->tran_unquiesce = NULL;
2910 	hba_trans->tran_bus_reset = NULL;
2911 	hba_trans->tran_bus_config = arcmsr_tran_bus_config;
2912 	hba_trans->tran_add_eventcall = NULL;
2913 	hba_trans->tran_get_eventcookie = NULL;
2914 	hba_trans->tran_post_event = NULL;
2915 	hba_trans->tran_remove_eventcall = NULL;
2916 
2917 	/* iop init and enable interrupt here */
2918 	arcmsr_iop_init(acb);
2919 
2920 	/* Get supported interrupt types */
2921 	if (ddi_intr_get_supported_types(dev_info, &intr_types) !=
2922 	    DDI_SUCCESS) {
2923 		arcmsr_warn(acb, "ddi_intr_get_supported_types failed");
2924 		goto error_level_3;
2925 	}
2926 	if (intr_types & DDI_INTR_TYPE_FIXED) {
2927 		if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2928 			goto error_level_5;
2929 	} else if (intr_types & DDI_INTR_TYPE_MSI) {
2930 		if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2931 			goto error_level_5;
2932 	}
2933 
2934 	/*
2935 	 * The driver should attach this instance of the device, and
2936 	 * perform error cleanup if necessary
2937 	 */
2938 	if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
2939 	    hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
2940 		arcmsr_warn(acb, "scsi_hba_attach_setup failed");
2941 		goto error_level_5;
2942 	}
2943 
2944 	/* Create a taskq for dealing with dr events */
2945 	if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
2946 	    TASKQ_DEFAULTPRI, 0)) == NULL) {
2947 		arcmsr_warn(acb, "ddi_taskq_create failed");
2948 		goto error_level_8;
2949 	}
2950 
2951 	acb->timeout_count = 0;
2952 	/* active ccbs "timeout" watchdog */
2953 	acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
2954 	    (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
2955 	acb->timeout_sc_id = timeout(arcmsr_devMap_monitor, (caddr_t)acb,
2956 	    (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
2957 
2958 	/* report device info */
2959 	ddi_report_dev(dev_info);
2960 
2961 	return (DDI_SUCCESS);
2962 
2963 error_level_8:
2964 	(void) scsi_hba_detach(dev_info);
2965 
2966 error_level_5:
2967 	arcmsr_remove_intr(acb);
2968 
2969 error_level_3:
2970 	if (acb->scsi_hba_transport)
2971 		scsi_hba_tran_free(acb->scsi_hba_transport);
2972 
2973 error_level_2:
2974 	if (acb->ccbs_acc_handle)
2975 		ddi_dma_mem_free(&acb->ccbs_acc_handle);
2976 	if (acb->ccbs_pool_handle)
2977 		ddi_dma_free_handle(&acb->ccbs_pool_handle);
2978 
2979 error_level_1:
2980 	if (acb->pci_acc_handle)
2981 		pci_config_teardown(&acb->pci_acc_handle);
2982 	arcmsr_mutex_destroy(acb);
2983 	ddi_soft_state_free(arcmsr_soft_state, instance);
2984 
2985 error_level_0:
2986 	return (DDI_FAILURE);
2987 }
2988 
2989 
2990 static void
arcmsr_vlog(struct ACB * acb,int level,char * fmt,va_list ap)2991 arcmsr_vlog(struct ACB *acb, int level, char *fmt, va_list ap)
2992 {
2993 	char	buf[256];
2994 
2995 	if (acb != NULL) {
2996 		(void) snprintf(buf, sizeof (buf), "%s%d: %s",
2997 		    ddi_driver_name(acb->dev_info),
2998 		    ddi_get_instance(acb->dev_info), fmt);
2999 		fmt = buf;
3000 	}
3001 	vcmn_err(level, fmt, ap);
3002 }
3003 
3004 static void
arcmsr_log(struct ACB * acb,int level,char * fmt,...)3005 arcmsr_log(struct ACB *acb, int level, char *fmt, ...)
3006 {
3007 	va_list ap;
3008 
3009 	va_start(ap, fmt);
3010 	arcmsr_vlog(acb, level, fmt, ap);
3011 	va_end(ap);
3012 }
3013 
3014 static void
arcmsr_warn(struct ACB * acb,char * fmt,...)3015 arcmsr_warn(struct ACB *acb, char *fmt, ...)
3016 {
3017 	va_list ap;
3018 
3019 	va_start(ap, fmt);
3020 	arcmsr_vlog(acb, CE_WARN, fmt, ap);
3021 	va_end(ap);
3022 }
3023 
3024 static void
arcmsr_init_list_head(struct list_head * list)3025 arcmsr_init_list_head(struct list_head *list)
3026 {
3027 	list->next = list;
3028 	list->prev = list;
3029 }
3030 
3031 static void
arcmsr_x_list_del(struct list_head * prev,struct list_head * next)3032 arcmsr_x_list_del(struct list_head *prev, struct list_head *next)
3033 {
3034 	next->prev = prev;
3035 	prev->next = next;
3036 }
3037 
3038 static void
arcmsr_x_list_add(struct list_head * new_one,struct list_head * prev,struct list_head * next)3039 arcmsr_x_list_add(struct list_head *new_one,  struct list_head *prev,
3040     struct list_head *next)
3041 {
3042 	next->prev = new_one;
3043 	new_one->next = next;
3044 	new_one->prev = prev;
3045 	prev->next = new_one;
3046 }
3047 
3048 static void
arcmsr_list_add_tail(kmutex_t * list_lock,struct list_head * new_one,struct list_head * head)3049 arcmsr_list_add_tail(kmutex_t *list_lock, struct list_head *new_one,
3050     struct list_head *head)
3051 {
3052 	mutex_enter(list_lock);
3053 	arcmsr_x_list_add(new_one, head->prev, head);
3054 	mutex_exit(list_lock);
3055 }
3056 
3057 static struct list_head *
arcmsr_list_get_first(kmutex_t * list_lock,struct list_head * head)3058 arcmsr_list_get_first(kmutex_t *list_lock, struct list_head *head)
3059 {
3060 	struct list_head *one = NULL;
3061 
3062 	mutex_enter(list_lock);
3063 	if (head->next == head)	{
3064 		mutex_exit(list_lock);
3065 		return (NULL);
3066 	}
3067 	one = head->next;
3068 	arcmsr_x_list_del(one->prev, one->next);
3069 	arcmsr_init_list_head(one);
3070 	mutex_exit(list_lock);
3071 	return (one);
3072 }
3073 
3074 static struct CCB *
arcmsr_get_complete_ccb_from_list(struct ACB * acb)3075 arcmsr_get_complete_ccb_from_list(struct ACB *acb)
3076 {
3077 	struct list_head *first_complete_ccb_list = NULL;
3078 	struct CCB *ccb;
3079 
3080 	first_complete_ccb_list =
3081 	    arcmsr_list_get_first(&acb->ccb_complete_list_mutex,
3082 	    &acb->ccb_complete_list);
3083 	if (first_complete_ccb_list == NULL) {
3084 		return (NULL);
3085 	}
3086 	ccb = (void *)((caddr_t)(first_complete_ccb_list) -
3087 	    offsetof(struct CCB, complete_queue_pointer));
3088 	return (ccb);
3089 }
3090 
3091 static struct CCB *
arcmsr_get_freeccb(struct ACB * acb)3092 arcmsr_get_freeccb(struct ACB *acb)
3093 {
3094 	struct CCB *ccb;
3095 	int ccb_get_index, ccb_put_index;
3096 
3097 	mutex_enter(&acb->workingQ_mutex);
3098 	ccb_put_index = acb->ccb_put_index;
3099 	ccb_get_index = acb->ccb_get_index;
3100 	ccb = acb->ccbworkingQ[ccb_get_index];
3101 	ccb_get_index++;
3102 	if (ccb_get_index >= ARCMSR_MAX_FREECCB_NUM)
3103 		ccb_get_index = ccb_get_index - ARCMSR_MAX_FREECCB_NUM;
3104 	if (ccb_put_index != ccb_get_index) {
3105 		acb->ccb_get_index = ccb_get_index;
3106 		arcmsr_init_list_head(&ccb->complete_queue_pointer);
3107 		ccb->ccb_state = ARCMSR_CCB_UNBUILD;
3108 	} else {
3109 		ccb = NULL;
3110 	}
3111 	mutex_exit(&acb->workingQ_mutex);
3112 	return (ccb);
3113 }
3114 
3115 
3116 static void
arcmsr_free_ccb(struct CCB * ccb)3117 arcmsr_free_ccb(struct CCB *ccb)
3118 {
3119 	struct ACB *acb = ccb->acb;
3120 
3121 	if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3122 		return;
3123 	}
3124 	mutex_enter(&acb->workingQ_mutex);
3125 	ccb->ccb_state = ARCMSR_CCB_FREE;
3126 	ccb->pkt = NULL;
3127 	ccb->pkt_dma_handle = NULL;
3128 	ccb->ccb_flags = 0;
3129 	acb->ccbworkingQ[acb->ccb_put_index] = ccb;
3130 	acb->ccb_put_index++;
3131 	if (acb->ccb_put_index >= ARCMSR_MAX_FREECCB_NUM)
3132 		acb->ccb_put_index =
3133 		    acb->ccb_put_index - ARCMSR_MAX_FREECCB_NUM;
3134 	mutex_exit(&acb->workingQ_mutex);
3135 }
3136 
3137 
3138 static void
arcmsr_ccbs_timeout(void * arg)3139 arcmsr_ccbs_timeout(void* arg)
3140 {
3141 	struct ACB *acb = (struct ACB *)arg;
3142 	struct CCB *ccb;
3143 	int i, instance, timeout_count = 0;
3144 	uint32_t intmask_org;
3145 	time_t current_time = ddi_get_time();
3146 
3147 	intmask_org = arcmsr_disable_allintr(acb);
3148 	mutex_enter(&acb->isr_mutex);
3149 	if (acb->ccboutstandingcount != 0) {
3150 		/* check each ccb */
3151 		i = ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
3152 		    DDI_DMA_SYNC_FORKERNEL);
3153 		if (i != DDI_SUCCESS) {
3154 			if ((acb->timeout_id != 0) &&
3155 			    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3156 				/* do pkt timeout check each 60 secs */
3157 				acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3158 				    (void*)acb, (ARCMSR_TIMEOUT_WATCH *
3159 				    drv_usectohz(1000000)));
3160 			}
3161 			mutex_exit(&acb->isr_mutex);
3162 			arcmsr_enable_allintr(acb, intmask_org);
3163 			return;
3164 		}
3165 		instance = ddi_get_instance(acb->dev_info);
3166 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3167 			ccb = acb->pccb_pool[i];
3168 			if (ccb->acb != acb) {
3169 				break;
3170 			}
3171 			if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3172 				continue;
3173 			}
3174 			if (ccb->pkt == NULL) {
3175 				continue;
3176 			}
3177 			if (ccb->pkt->pkt_time == 0) {
3178 				continue;
3179 			}
3180 			if (ccb->ccb_time >= current_time) {
3181 				continue;
3182 			}
3183 			int id = ccb->pkt->pkt_address.a_target;
3184 			int lun = ccb->pkt->pkt_address.a_lun;
3185 			if (ccb->ccb_state == ARCMSR_CCB_START) {
3186 				uint8_t	*cdb = (uint8_t	*)&ccb->arcmsr_cdb.Cdb;
3187 
3188 				timeout_count++;
3189 				arcmsr_warn(acb,
3190 				    "scsi target %d lun %d cmd=0x%x "
3191 				    "command timeout, ccb=0x%p",
3192 				    instance, id, lun, *cdb, (void *)ccb);
3193 				ccb->ccb_state = ARCMSR_CCB_TIMEOUT;
3194 				ccb->pkt->pkt_reason = CMD_TIMEOUT;
3195 				ccb->pkt->pkt_statistics = STAT_TIMEOUT;
3196 				/* acb->devstate[id][lun] = ARECA_RAID_GONE; */
3197 				arcmsr_ccb_complete(ccb, 1);
3198 				continue;
3199 			} else if ((ccb->ccb_state & ARCMSR_CCB_CAN_BE_FREE) ==
3200 			    ARCMSR_CCB_CAN_BE_FREE) {
3201 				arcmsr_free_ccb(ccb);
3202 			}
3203 		}
3204 	}
3205 	if ((acb->timeout_id != 0) &&
3206 	    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3207 		/* do pkt timeout check each 60 secs */
3208 		acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3209 		    (void*)acb, (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
3210 	}
3211 	mutex_exit(&acb->isr_mutex);
3212 	arcmsr_enable_allintr(acb, intmask_org);
3213 }
3214 
3215 static void
arcmsr_abort_dr_ccbs(struct ACB * acb,uint16_t target,uint8_t lun)3216 arcmsr_abort_dr_ccbs(struct ACB *acb, uint16_t target, uint8_t lun)
3217 {
3218 	struct CCB *ccb;
3219 	uint32_t intmask_org;
3220 	int i;
3221 
3222 	/* disable all outbound interrupts */
3223 	intmask_org = arcmsr_disable_allintr(acb);
3224 	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3225 		ccb = acb->pccb_pool[i];
3226 		if (ccb->ccb_state == ARCMSR_CCB_START) {
3227 			if ((target == ccb->pkt->pkt_address.a_target) &&
3228 			    (lun == ccb->pkt->pkt_address.a_lun)) {
3229 				ccb->ccb_state = ARCMSR_CCB_ABORTED;
3230 				ccb->pkt->pkt_reason = CMD_ABORTED;
3231 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
3232 				arcmsr_ccb_complete(ccb, 1);
3233 				arcmsr_log(acb, CE_NOTE,
3234 				    "abort T%dL%d ccb", target, lun);
3235 			}
3236 		}
3237 	}
3238 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3239 	arcmsr_enable_allintr(acb, intmask_org);
3240 }
3241 
3242 static int
arcmsr_scsi_device_probe(struct ACB * acb,uint16_t tgt,uint8_t lun)3243 arcmsr_scsi_device_probe(struct ACB *acb, uint16_t tgt, uint8_t lun)
3244 {
3245 	struct scsi_device sd;
3246 	dev_info_t *child;
3247 	int rval;
3248 
3249 	bzero(&sd, sizeof (struct scsi_device));
3250 	sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
3251 	sd.sd_address.a_target = (uint16_t)tgt;
3252 	sd.sd_address.a_lun = (uint8_t)lun;
3253 	if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
3254 		rval = scsi_hba_probe(&sd, NULL);
3255 		if (rval == SCSIPROBE_EXISTS) {
3256 			rval = ndi_devi_online(child, NDI_ONLINE_ATTACH);
3257 			if (rval != NDI_SUCCESS) {
3258 				arcmsr_warn(acb, "unable to online T%dL%d",
3259 				    tgt, lun);
3260 			} else {
3261 				arcmsr_log(acb, CE_NOTE, "T%dL%d onlined",
3262 				    tgt, lun);
3263 			}
3264 		}
3265 	} else {
3266 		rval = scsi_hba_probe(&sd, NULL);
3267 		if (rval == SCSIPROBE_EXISTS)
3268 			rval = arcmsr_config_child(acb, &sd, NULL);
3269 	}
3270 	scsi_unprobe(&sd);
3271 	return (rval);
3272 }
3273 
3274 static void
arcmsr_dr_handle(struct ACB * acb)3275 arcmsr_dr_handle(struct ACB *acb)
3276 {
3277 	char *acb_dev_map = (char *)acb->device_map;
3278 	char *devicemap;
3279 	char temp;
3280 	uint16_t target;
3281 	uint8_t lun;
3282 	char diff;
3283 	dev_info_t *dip;
3284 	ddi_acc_handle_t reg;
3285 
3286 	switch (acb->adapter_type) {
3287 	case ACB_ADAPTER_TYPE_A:
3288 	{
3289 		struct HBA_msgUnit *phbamu;
3290 
3291 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3292 		devicemap = (char *)&phbamu->msgcode_rwbuffer[21];
3293 		reg = acb->reg_mu_acc_handle0;
3294 		break;
3295 	}
3296 
3297 	case ACB_ADAPTER_TYPE_B:
3298 	{
3299 		struct HBB_msgUnit *phbbmu;
3300 
3301 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3302 		devicemap = (char *)
3303 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[21];
3304 		reg = acb->reg_mu_acc_handle1;
3305 		break;
3306 	}
3307 
3308 	case ACB_ADAPTER_TYPE_C:
3309 	{
3310 		struct HBC_msgUnit *phbcmu;
3311 
3312 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3313 		devicemap = (char *)&phbcmu->msgcode_rwbuffer[21];
3314 		reg = acb->reg_mu_acc_handle0;
3315 		break;
3316 	}
3317 
3318 	}
3319 
3320 	for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
3321 		temp = CHIP_REG_READ8(reg, devicemap);
3322 		diff = (*acb_dev_map)^ temp;
3323 		if (diff != 0) {
3324 			*acb_dev_map = temp;
3325 			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
3326 				if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
3327 					ndi_devi_enter(acb->dev_info);
3328 					acb->devstate[target][lun] =
3329 					    ARECA_RAID_GOOD;
3330 					(void) arcmsr_scsi_device_probe(acb,
3331 					    target, lun);
3332 					ndi_devi_exit(acb->dev_info);
3333 					arcmsr_log(acb, CE_NOTE,
3334 					    "T%dL%d on-line", target, lun);
3335 				} else if ((temp & 0x01) == 0 &&
3336 				    (diff & 0x01) == 1) {
3337 					dip = arcmsr_find_child(acb, target,
3338 					    lun);
3339 					if (dip != NULL) {
3340 						acb->devstate[target][lun] =
3341 						    ARECA_RAID_GONE;
3342 						if (mutex_owned(&acb->
3343 						    isr_mutex)) {
3344 							arcmsr_abort_dr_ccbs(
3345 							    acb, target, lun);
3346 							(void)
3347 							    ndi_devi_offline(
3348 							    dip,
3349 							    NDI_DEVI_REMOVE |
3350 							    NDI_DEVI_OFFLINE);
3351 						} else {
3352 							mutex_enter(&acb->
3353 							    isr_mutex);
3354 							arcmsr_abort_dr_ccbs(
3355 							    acb, target, lun);
3356 							(void)
3357 							    ndi_devi_offline(
3358 							    dip,
3359 							    NDI_DEVI_REMOVE |
3360 							    NDI_DEVI_OFFLINE);
3361 							mutex_exit(&acb->
3362 							    isr_mutex);
3363 						}
3364 					}
3365 					arcmsr_log(acb, CE_NOTE,
3366 					    "T%dL%d off-line", target, lun);
3367 				}
3368 				temp >>= 1;
3369 				diff >>= 1;
3370 			}
3371 		}
3372 		devicemap++;
3373 		acb_dev_map++;
3374 	}
3375 }
3376 
3377 
3378 static void
arcmsr_devMap_monitor(void * arg)3379 arcmsr_devMap_monitor(void* arg)
3380 {
3381 
3382 	struct ACB *acb = (struct ACB *)arg;
3383 	switch (acb->adapter_type) {
3384 	case ACB_ADAPTER_TYPE_A:
3385 	{
3386 		struct HBA_msgUnit *phbamu;
3387 
3388 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3389 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3390 		    &phbamu->inbound_msgaddr0,
3391 		    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3392 		break;
3393 	}
3394 
3395 	case ACB_ADAPTER_TYPE_B:
3396 	{
3397 		struct HBB_msgUnit *phbbmu;
3398 
3399 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3400 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3401 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3402 		    ARCMSR_MESSAGE_GET_CONFIG);
3403 		break;
3404 	}
3405 
3406 	case ACB_ADAPTER_TYPE_C:
3407 	{
3408 		struct HBC_msgUnit *phbcmu;
3409 
3410 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3411 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3412 		    &phbcmu->inbound_msgaddr0,
3413 		    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3414 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3415 		    &phbcmu->inbound_doorbell,
3416 		    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3417 		break;
3418 	}
3419 
3420 	}
3421 
3422 	if ((acb->timeout_id != 0) &&
3423 	    ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3424 		/* do pkt timeout check each 5 secs */
3425 		acb->timeout_id = timeout(arcmsr_devMap_monitor, (void*)acb,
3426 		    (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
3427 	}
3428 }
3429 
3430 
3431 static uint32_t
arcmsr_disable_allintr(struct ACB * acb)3432 arcmsr_disable_allintr(struct ACB *acb)
3433 {
3434 	uint32_t intmask_org;
3435 
3436 	switch (acb->adapter_type) {
3437 	case ACB_ADAPTER_TYPE_A:
3438 	{
3439 		struct HBA_msgUnit *phbamu;
3440 
3441 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3442 		/* disable all outbound interrupt */
3443 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3444 		    &phbamu->outbound_intmask);
3445 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3446 		    &phbamu->outbound_intmask,
3447 		    intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
3448 		break;
3449 	}
3450 
3451 	case ACB_ADAPTER_TYPE_B:
3452 	{
3453 		struct HBB_msgUnit *phbbmu;
3454 
3455 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3456 		/* disable all outbound interrupt */
3457 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3458 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask);
3459 		/* disable all interrupts */
3460 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3461 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
3462 		break;
3463 	}
3464 
3465 	case ACB_ADAPTER_TYPE_C:
3466 	{
3467 		struct HBC_msgUnit *phbcmu;
3468 
3469 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3470 		/* disable all outbound interrupt */
3471 		intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3472 		    &phbcmu->host_int_mask); /* disable outbound message0 int */
3473 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3474 		    &phbcmu->host_int_mask,
3475 		    intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
3476 		break;
3477 	}
3478 
3479 	}
3480 	return (intmask_org);
3481 }
3482 
3483 
3484 static void
arcmsr_enable_allintr(struct ACB * acb,uint32_t intmask_org)3485 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org)
3486 {
3487 	int mask;
3488 
3489 	switch (acb->adapter_type) {
3490 	case ACB_ADAPTER_TYPE_A:
3491 	{
3492 		struct HBA_msgUnit *phbamu;
3493 
3494 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3495 		/*
3496 		 * enable outbound Post Queue, outbound doorbell message0
3497 		 * Interrupt
3498 		 */
3499 		mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
3500 		    ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE |
3501 		    ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
3502 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3503 		    &phbamu->outbound_intmask, intmask_org & mask);
3504 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
3505 		break;
3506 	}
3507 
3508 	case ACB_ADAPTER_TYPE_B:
3509 	{
3510 		struct HBB_msgUnit *phbbmu;
3511 
3512 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3513 		mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
3514 		    ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE |
3515 		    ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
3516 		/* 1=interrupt enable, 0=interrupt disable */
3517 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3518 		    &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
3519 		    intmask_org | mask);
3520 		acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
3521 		break;
3522 	}
3523 
3524 	case ACB_ADAPTER_TYPE_C:
3525 	{
3526 		struct HBC_msgUnit *phbcmu;
3527 
3528 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3529 		/* enable outbound Post Queue,outbound doorbell Interrupt */
3530 		mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK |
3531 		    ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK |
3532 		    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
3533 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3534 		    &phbcmu->host_int_mask, intmask_org & mask);
3535 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
3536 		break;
3537 	}
3538 
3539 	}
3540 }
3541 
3542 
3543 static void
arcmsr_iop_parking(struct ACB * acb)3544 arcmsr_iop_parking(struct ACB *acb)
3545 {
3546 	/* stop adapter background rebuild */
3547 	if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
3548 		uint32_t intmask_org;
3549 
3550 		acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
3551 		/* disable all outbound interrupt */
3552 		intmask_org = arcmsr_disable_allintr(acb);
3553 		switch (acb->adapter_type) {
3554 		case ACB_ADAPTER_TYPE_A:
3555 			arcmsr_stop_hba_bgrb(acb);
3556 			arcmsr_flush_hba_cache(acb);
3557 			break;
3558 
3559 		case ACB_ADAPTER_TYPE_B:
3560 			arcmsr_stop_hbb_bgrb(acb);
3561 			arcmsr_flush_hbb_cache(acb);
3562 			break;
3563 
3564 		case ACB_ADAPTER_TYPE_C:
3565 			arcmsr_stop_hbc_bgrb(acb);
3566 			arcmsr_flush_hbc_cache(acb);
3567 			break;
3568 		}
3569 		/*
3570 		 * enable outbound Post Queue
3571 		 * enable outbound doorbell Interrupt
3572 		 */
3573 		arcmsr_enable_allintr(acb, intmask_org);
3574 	}
3575 }
3576 
3577 
3578 static uint8_t
arcmsr_hba_wait_msgint_ready(struct ACB * acb)3579 arcmsr_hba_wait_msgint_ready(struct ACB *acb)
3580 {
3581 	uint32_t i;
3582 	uint8_t retries = 0x00;
3583 	struct HBA_msgUnit *phbamu;
3584 
3585 
3586 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3587 
3588 	do {
3589 		for (i = 0; i < 100; i++) {
3590 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3591 			    &phbamu->outbound_intstatus) &
3592 			    ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
3593 				/* clear interrupt */
3594 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3595 				    &phbamu->outbound_intstatus,
3596 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3597 				return (TRUE);
3598 			}
3599 			drv_usecwait(10000);
3600 			if (ddi_in_panic()) {
3601 				/* clear interrupts */
3602 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3603 				    &phbamu->outbound_intstatus,
3604 				    ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3605 				return (TRUE);
3606 			}
3607 		} /* max 1 second */
3608 	} while (retries++ < 20); /* max 20 seconds */
3609 	return (FALSE);
3610 }
3611 
3612 
3613 static uint8_t
arcmsr_hbb_wait_msgint_ready(struct ACB * acb)3614 arcmsr_hbb_wait_msgint_ready(struct ACB *acb)
3615 {
3616 	struct HBB_msgUnit *phbbmu;
3617 	uint32_t i;
3618 	uint8_t retries = 0x00;
3619 
3620 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3621 
3622 	do {
3623 		for (i = 0; i < 100; i++) {
3624 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3625 			    &phbbmu->hbb_doorbell->iop2drv_doorbell) &
3626 			    ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
3627 				/* clear interrupt */
3628 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3629 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3630 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3631 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3632 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3633 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3634 				return (TRUE);
3635 			}
3636 			drv_usecwait(10000);
3637 			if (ddi_in_panic()) {
3638 				/* clear interrupts */
3639 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3640 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3641 				    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3642 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3643 				    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3644 				    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3645 				return (TRUE);
3646 			}
3647 		} /* max 1 second */
3648 	} while (retries++ < 20); /* max 20 seconds */
3649 
3650 	return (FALSE);
3651 }
3652 
3653 
3654 static uint8_t
arcmsr_hbc_wait_msgint_ready(struct ACB * acb)3655 arcmsr_hbc_wait_msgint_ready(struct ACB *acb)
3656 {
3657 	uint32_t i;
3658 	uint8_t retries = 0x00;
3659 	struct HBC_msgUnit *phbcmu;
3660 	uint32_t c = ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR;
3661 
3662 
3663 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
3664 
3665 	do {
3666 		for (i = 0; i < 100; i++) {
3667 			if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3668 			    &phbcmu->outbound_doorbell) &
3669 			    ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
3670 				/* clear interrupt */
3671 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3672 				    &phbcmu->outbound_doorbell_clear, c);
3673 				return (TRUE);
3674 			}
3675 			drv_usecwait(10000);
3676 			if (ddi_in_panic()) {
3677 				/* clear interrupts */
3678 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3679 				    &phbcmu->outbound_doorbell_clear, c);
3680 				return (TRUE);
3681 			}
3682 		} /* max 1 second */
3683 	} while (retries++ < 20); /* max 20 seconds */
3684 	return (FALSE);
3685 }
3686 
3687 
3688 static void
arcmsr_flush_hba_cache(struct ACB * acb)3689 arcmsr_flush_hba_cache(struct ACB *acb)
3690 {
3691 	struct HBA_msgUnit *phbamu;
3692 	int retry_count = 30;
3693 
3694 	/* enlarge wait flush adapter cache time: 10 minutes */
3695 
3696 	phbamu = (struct HBA_msgUnit *)acb->pmu;
3697 
3698 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3699 	    ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3700 	do {
3701 		if (arcmsr_hba_wait_msgint_ready(acb)) {
3702 			break;
3703 		} else {
3704 			retry_count--;
3705 		}
3706 	} while (retry_count != 0);
3707 }
3708 
3709 static void
arcmsr_flush_hbb_cache(struct ACB * acb)3710 arcmsr_flush_hbb_cache(struct ACB *acb)
3711 {
3712 	struct HBB_msgUnit *phbbmu;
3713 	int retry_count = 30;
3714 
3715 	/* enlarge wait flush adapter cache time: 10 minutes */
3716 
3717 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
3718 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3719 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
3720 	    ARCMSR_MESSAGE_FLUSH_CACHE);
3721 	do {
3722 		if (arcmsr_hbb_wait_msgint_ready(acb)) {
3723 			break;
3724 		} else {
3725 			retry_count--;
3726 		}
3727 	} while (retry_count != 0);
3728 }
3729 
3730 
3731 static void
arcmsr_flush_hbc_cache(struct ACB * acb)3732 arcmsr_flush_hbc_cache(struct ACB *acb)
3733 {
3734 	struct HBC_msgUnit *phbcmu;
3735 	int retry_count = 30;
3736 
3737 	/* enlarge wait flush adapter cache time: 10 minutes */
3738 
3739 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
3740 
3741 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3742 	    ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3743 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3744 	    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3745 	do {
3746 		if (arcmsr_hbc_wait_msgint_ready(acb)) {
3747 			break;
3748 		} else {
3749 			retry_count--;
3750 		}
3751 	} while (retry_count != 0);
3752 }
3753 
3754 
3755 
3756 static uint8_t
arcmsr_abort_hba_allcmd(struct ACB * acb)3757 arcmsr_abort_hba_allcmd(struct ACB *acb)
3758 {
3759 	struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
3760 
3761 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3762 	    ARCMSR_INBOUND_MESG0_ABORT_CMD);
3763 
3764 	if (!arcmsr_hba_wait_msgint_ready(acb)) {
3765 		arcmsr_warn(acb,
3766 		    "timeout while waiting for 'abort all "
3767 		    "outstanding commands'");
3768 		return (0xff);
3769 	}
3770 	return (0x00);
3771 }
3772 
3773 
3774 
3775 static uint8_t
arcmsr_abort_hbb_allcmd(struct ACB * acb)3776 arcmsr_abort_hbb_allcmd(struct ACB *acb)
3777 {
3778 	struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
3779 
3780 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3781 	    &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
3782 
3783 	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
3784 		arcmsr_warn(acb,
3785 		    "timeout while waiting for 'abort all "
3786 		    "outstanding commands'");
3787 		return (0x00);
3788 	}
3789 	return (0x00);
3790 }
3791 
3792 
3793 static uint8_t
arcmsr_abort_hbc_allcmd(struct ACB * acb)3794 arcmsr_abort_hbc_allcmd(struct ACB *acb)
3795 {
3796 	struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
3797 
3798 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3799 	    ARCMSR_INBOUND_MESG0_ABORT_CMD);
3800 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3801 	    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3802 
3803 	if (!arcmsr_hbc_wait_msgint_ready(acb)) {
3804 		arcmsr_warn(acb,
3805 		    "timeout while waiting for 'abort all "
3806 		    "outstanding commands'");
3807 		return (0xff);
3808 	}
3809 	return (0x00);
3810 }
3811 
3812 
3813 static void
arcmsr_done4abort_postqueue(struct ACB * acb)3814 arcmsr_done4abort_postqueue(struct ACB *acb)
3815 {
3816 
3817 	struct CCB *ccb;
3818 	uint32_t flag_ccb;
3819 	int i = 0;
3820 	boolean_t error;
3821 
3822 	switch (acb->adapter_type) {
3823 	case ACB_ADAPTER_TYPE_A:
3824 	{
3825 		struct HBA_msgUnit *phbamu;
3826 		uint32_t outbound_intstatus;
3827 
3828 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3829 		/* clear and abort all outbound posted Q */
3830 		outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3831 		    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3832 		/* clear interrupt */
3833 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3834 		    &phbamu->outbound_intstatus, outbound_intstatus);
3835 		while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3836 		    &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
3837 		    (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3838 			/* frame must be 32 bytes aligned */
3839 			/* the CDB is the first field of the CCB */
3840 			ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
3841 			/* check if command done with no error */
3842 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3843 			    B_TRUE : B_FALSE;
3844 			arcmsr_drain_donequeue(acb, ccb, error);
3845 		}
3846 		break;
3847 	}
3848 
3849 	case ACB_ADAPTER_TYPE_B:
3850 	{
3851 		struct HBB_msgUnit *phbbmu;
3852 
3853 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3854 		/* clear all outbound posted Q */
3855 		/* clear doorbell interrupt */
3856 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3857 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
3858 		    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3859 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
3860 			if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
3861 				phbbmu->done_qbuffer[i] = 0;
3862 				/* frame must be 32 bytes aligned */
3863 				ccb = NumToPtr((acb->vir2phy_offset +
3864 				    (flag_ccb << 5)));
3865 				/* check if command done with no error */
3866 				error =
3867 				    (flag_ccb &
3868 				    ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3869 				    B_TRUE : B_FALSE;
3870 				arcmsr_drain_donequeue(acb, ccb, error);
3871 			}
3872 			phbbmu->post_qbuffer[i] = 0;
3873 		}	/* drain reply FIFO */
3874 		phbbmu->doneq_index = 0;
3875 		phbbmu->postq_index = 0;
3876 		break;
3877 	}
3878 
3879 	case ACB_ADAPTER_TYPE_C:
3880 	{
3881 		struct HBC_msgUnit *phbcmu;
3882 		uint32_t ccb_cdb_phy;
3883 
3884 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
3885 		while ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3886 		    &phbcmu->host_int_status) &
3887 		    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) &&
3888 		    (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3889 			/* need to do */
3890 			flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3891 			    &phbcmu->outbound_queueport_low);
3892 			/* frame must be 32 bytes aligned */
3893 			ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3894 			ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
3895 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)?
3896 			    B_TRUE : B_FALSE;
3897 			arcmsr_drain_donequeue(acb, ccb, error);
3898 		}
3899 		break;
3900 	}
3901 
3902 	}
3903 }
3904 /*
3905  * Routine Description: try to get echo from iop.
3906  *           Arguments:
3907  *        Return Value: Nothing.
3908  */
3909 static uint8_t
arcmsr_get_echo_from_iop(struct ACB * acb)3910 arcmsr_get_echo_from_iop(struct ACB *acb)
3911 {
3912 	uint32_t intmask_org;
3913 	uint8_t rtnval = 0;
3914 
3915 	if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3916 		struct HBA_msgUnit *phbamu;
3917 
3918 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3919 		intmask_org = arcmsr_disable_allintr(acb);
3920 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3921 		    &phbamu->inbound_msgaddr0,
3922 		    ARCMSR_INBOUND_MESG0_GET_CONFIG);
3923 		if (!arcmsr_hba_wait_msgint_ready(acb)) {
3924 			arcmsr_warn(acb, "try to get echo from iop,"
3925 			    "... timeout ...");
3926 			acb->acb_flags |= ACB_F_BUS_HANG_ON;
3927 			rtnval = 0xFF;
3928 		}
3929 		/* enable all outbound interrupt */
3930 		arcmsr_enable_allintr(acb, intmask_org);
3931 	}
3932 	return (rtnval);
3933 }
3934 
3935 /*
3936  * Routine Description: Reset 80331 iop.
3937  *           Arguments:
3938  *        Return Value: Nothing.
3939  */
3940 static uint8_t
arcmsr_iop_reset(struct ACB * acb)3941 arcmsr_iop_reset(struct ACB *acb)
3942 {
3943 	struct CCB *ccb;
3944 	uint32_t intmask_org;
3945 	uint8_t rtnval = 0;
3946 	int i = 0;
3947 
3948 	if (acb->ccboutstandingcount > 0) {
3949 		/* disable all outbound interrupt */
3950 		intmask_org = arcmsr_disable_allintr(acb);
3951 		/* clear and abort all outbound posted Q */
3952 		arcmsr_done4abort_postqueue(acb);
3953 		/* talk to iop 331 outstanding command aborted */
3954 		rtnval = (acb->acb_flags & ACB_F_BUS_HANG_ON) ?
3955 		    0xFF : arcmsr_abort_host_command(acb);
3956 
3957 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3958 			ccb = acb->pccb_pool[i];
3959 			if (ccb->ccb_state == ARCMSR_CCB_START) {
3960 				/* ccb->ccb_state = ARCMSR_CCB_RESET; */
3961 				ccb->pkt->pkt_reason = CMD_RESET;
3962 				ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
3963 				arcmsr_ccb_complete(ccb, 1);
3964 			}
3965 		}
3966 		atomic_and_32(&acb->ccboutstandingcount, 0);
3967 		/* enable all outbound interrupt */
3968 		arcmsr_enable_allintr(acb, intmask_org);
3969 	} else {
3970 		rtnval = arcmsr_get_echo_from_iop(acb);
3971 	}
3972 	return (rtnval);
3973 }
3974 
3975 
3976 static struct QBUFFER *
arcmsr_get_iop_rqbuffer(struct ACB * acb)3977 arcmsr_get_iop_rqbuffer(struct ACB *acb)
3978 {
3979 	struct QBUFFER *qb;
3980 
3981 	switch (acb->adapter_type) {
3982 	case ACB_ADAPTER_TYPE_A:
3983 	{
3984 		struct HBA_msgUnit *phbamu;
3985 
3986 		phbamu = (struct HBA_msgUnit *)acb->pmu;
3987 		qb = (struct QBUFFER *)&phbamu->message_rbuffer;
3988 		break;
3989 	}
3990 
3991 	case ACB_ADAPTER_TYPE_B:
3992 	{
3993 		struct HBB_msgUnit *phbbmu;
3994 
3995 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
3996 		qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
3997 		break;
3998 	}
3999 
4000 	case ACB_ADAPTER_TYPE_C:
4001 	{
4002 		struct HBC_msgUnit *phbcmu;
4003 
4004 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4005 		qb = (struct QBUFFER *)&phbcmu->message_rbuffer;
4006 		break;
4007 	}
4008 
4009 	}
4010 	return (qb);
4011 }
4012 
4013 
4014 static struct QBUFFER *
arcmsr_get_iop_wqbuffer(struct ACB * acb)4015 arcmsr_get_iop_wqbuffer(struct ACB *acb)
4016 {
4017 	struct QBUFFER *qbuffer = NULL;
4018 
4019 	switch (acb->adapter_type) {
4020 	case ACB_ADAPTER_TYPE_A:
4021 	{
4022 		struct HBA_msgUnit *phbamu;
4023 
4024 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4025 		qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
4026 		break;
4027 	}
4028 
4029 	case ACB_ADAPTER_TYPE_B:
4030 	{
4031 		struct HBB_msgUnit *phbbmu;
4032 
4033 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4034 		qbuffer = (struct QBUFFER *)
4035 		    &phbbmu->hbb_rwbuffer->message_wbuffer;
4036 		break;
4037 	}
4038 
4039 	case ACB_ADAPTER_TYPE_C:
4040 	{
4041 		struct HBC_msgUnit *phbcmu;
4042 
4043 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4044 		qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
4045 		break;
4046 	}
4047 
4048 	}
4049 	return (qbuffer);
4050 }
4051 
4052 
4053 
4054 static void
arcmsr_iop_message_read(struct ACB * acb)4055 arcmsr_iop_message_read(struct ACB *acb)
4056 {
4057 	switch (acb->adapter_type) {
4058 	case ACB_ADAPTER_TYPE_A:
4059 	{
4060 		struct HBA_msgUnit *phbamu;
4061 
4062 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4063 		/* let IOP know the data has been read */
4064 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4065 		    &phbamu->inbound_doorbell,
4066 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4067 		break;
4068 	}
4069 
4070 	case ACB_ADAPTER_TYPE_B:
4071 	{
4072 		struct HBB_msgUnit *phbbmu;
4073 
4074 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4075 		/* let IOP know the data has been read */
4076 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4077 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4078 		    ARCMSR_DRV2IOP_DATA_READ_OK);
4079 		break;
4080 	}
4081 
4082 	case ACB_ADAPTER_TYPE_C:
4083 	{
4084 		struct HBC_msgUnit *phbcmu;
4085 
4086 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4087 		/* let IOP know data has been read */
4088 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4089 		    &phbcmu->inbound_doorbell,
4090 		    ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
4091 		break;
4092 	}
4093 
4094 	}
4095 }
4096 
4097 
4098 
4099 static void
arcmsr_iop_message_wrote(struct ACB * acb)4100 arcmsr_iop_message_wrote(struct ACB *acb)
4101 {
4102 	switch (acb->adapter_type) {
4103 	case ACB_ADAPTER_TYPE_A: {
4104 		struct HBA_msgUnit *phbamu;
4105 
4106 		phbamu = (struct HBA_msgUnit *)acb->pmu;
4107 		/*
4108 		 * push inbound doorbell tell iop, driver data write ok
4109 		 * and wait reply on next hwinterrupt for next Qbuffer post
4110 		 */
4111 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4112 		    &phbamu->inbound_doorbell,
4113 		    ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
4114 		break;
4115 	}
4116 
4117 	case ACB_ADAPTER_TYPE_B:
4118 	{
4119 		struct HBB_msgUnit *phbbmu;
4120 
4121 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
4122 		/*
4123 		 * push inbound doorbell tell iop, driver data was writen
4124 		 * successfully, then await reply on next hwinterrupt for
4125 		 * next Qbuffer post
4126 		 */
4127 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4128 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4129 		    ARCMSR_DRV2IOP_DATA_WRITE_OK);
4130 		break;
4131 	}
4132 
4133 	case ACB_ADAPTER_TYPE_C:
4134 	{
4135 		struct HBC_msgUnit *phbcmu;
4136 
4137 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
4138 		/*
4139 		 * push inbound doorbell tell iop, driver data write ok
4140 		 * and wait reply on next hwinterrupt for next Qbuffer post
4141 		 */
4142 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4143 		    &phbcmu->inbound_doorbell,
4144 		    ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
4145 		break;
4146 	}
4147 
4148 	}
4149 }
4150 
4151 
4152 
4153 static void
arcmsr_post_ioctldata2iop(struct ACB * acb)4154 arcmsr_post_ioctldata2iop(struct ACB *acb)
4155 {
4156 	uint8_t *pQbuffer;
4157 	struct QBUFFER *pwbuffer;
4158 	uint8_t *iop_data;
4159 	int32_t allxfer_len = 0;
4160 
4161 	pwbuffer = arcmsr_get_iop_wqbuffer(acb);
4162 	iop_data = (uint8_t *)pwbuffer->data;
4163 	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
4164 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
4165 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
4166 		    (allxfer_len < 124)) {
4167 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
4168 			(void) memcpy(iop_data, pQbuffer, 1);
4169 			acb->wqbuf_firstidx++;
4170 			/* if last index number set it to 0 */
4171 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4172 			iop_data++;
4173 			allxfer_len++;
4174 		}
4175 		pwbuffer->data_len = allxfer_len;
4176 		/*
4177 		 * push inbound doorbell and wait reply at hwinterrupt
4178 		 * routine for next Qbuffer post
4179 		 */
4180 		arcmsr_iop_message_wrote(acb);
4181 	}
4182 }
4183 
4184 
4185 
4186 static void
arcmsr_stop_hba_bgrb(struct ACB * acb)4187 arcmsr_stop_hba_bgrb(struct ACB *acb)
4188 {
4189 	struct HBA_msgUnit *phbamu;
4190 
4191 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4192 
4193 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4194 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4195 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4196 	if (!arcmsr_hba_wait_msgint_ready(acb))
4197 		arcmsr_warn(acb,
4198 		    "timeout while waiting for background rebuild completion");
4199 }
4200 
4201 
4202 static void
arcmsr_stop_hbb_bgrb(struct ACB * acb)4203 arcmsr_stop_hbb_bgrb(struct ACB *acb)
4204 {
4205 	struct HBB_msgUnit *phbbmu;
4206 
4207 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4208 
4209 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4210 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4211 	    &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
4212 
4213 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4214 		arcmsr_warn(acb,
4215 		    "timeout while waiting for background rebuild completion");
4216 }
4217 
4218 
4219 static void
arcmsr_stop_hbc_bgrb(struct ACB * acb)4220 arcmsr_stop_hbc_bgrb(struct ACB *acb)
4221 {
4222 	struct HBC_msgUnit *phbcmu;
4223 
4224 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
4225 
4226 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4227 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4228 	    &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4229 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4230 	    &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4231 	if (!arcmsr_hbc_wait_msgint_ready(acb))
4232 		arcmsr_warn(acb,
4233 		    "timeout while waiting for background rebuild completion");
4234 }
4235 
4236 
4237 static int
arcmsr_iop_message_xfer(struct ACB * acb,struct scsi_pkt * pkt)4238 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt)
4239 {
4240 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
4241 	struct CCB *ccb = pkt->pkt_ha_private;
4242 	struct buf *bp = ccb->bp;
4243 	uint8_t *pQbuffer;
4244 	int retvalue = 0, transfer_len = 0;
4245 	char *buffer;
4246 	uint32_t controlcode;
4247 
4248 
4249 	/* 4 bytes: Areca io control code */
4250 	controlcode =
4251 	    (uint32_t)pkt->pkt_cdbp[5] << 24 |
4252 	    (uint32_t)pkt->pkt_cdbp[6] << 16 |
4253 	    (uint32_t)pkt->pkt_cdbp[7] << 8 |
4254 	    (uint32_t)pkt->pkt_cdbp[8];
4255 
4256 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
4257 		bp_mapin(bp);
4258 
4259 	buffer = bp->b_un.b_addr;
4260 	transfer_len = bp->b_bcount;
4261 	if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
4262 		retvalue = ARCMSR_MESSAGE_FAIL;
4263 		goto message_out;
4264 	}
4265 
4266 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
4267 	switch (controlcode) {
4268 	case ARCMSR_MESSAGE_READ_RQBUFFER:
4269 	{
4270 		unsigned long *ver_addr;
4271 		uint8_t *ptmpQbuffer;
4272 		int32_t allxfer_len = 0;
4273 
4274 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4275 
4276 		ptmpQbuffer = (uint8_t *)ver_addr;
4277 		while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
4278 		    (allxfer_len < (MSGDATABUFLEN - 1))) {
4279 			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
4280 			(void) memcpy(ptmpQbuffer, pQbuffer, 1);
4281 			acb->rqbuf_firstidx++;
4282 			acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4283 			ptmpQbuffer++;
4284 			allxfer_len++;
4285 		}
4286 
4287 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4288 			struct QBUFFER *prbuffer;
4289 			uint8_t  *iop_data;
4290 			int32_t iop_len;
4291 
4292 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4293 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
4294 			iop_data = (uint8_t *)prbuffer->data;
4295 			iop_len = (int32_t)prbuffer->data_len;
4296 
4297 			while (iop_len > 0) {
4298 				pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
4299 				(void) memcpy(pQbuffer, iop_data, 1);
4300 				acb->rqbuf_lastidx++;
4301 				acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
4302 				iop_data++;
4303 				iop_len--;
4304 			}
4305 			arcmsr_iop_message_read(acb);
4306 		}
4307 
4308 		(void) memcpy(pcmdmessagefld->messagedatabuffer,
4309 		    (uint8_t *)ver_addr, allxfer_len);
4310 		pcmdmessagefld->cmdmessage.Length = allxfer_len;
4311 		pcmdmessagefld->cmdmessage.ReturnCode =
4312 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4313 		kmem_free(ver_addr, MSGDATABUFLEN);
4314 		break;
4315 	}
4316 
4317 	case ARCMSR_MESSAGE_WRITE_WQBUFFER:
4318 	{
4319 		uint8_t *ver_addr;
4320 		int32_t my_empty_len, user_len, wqbuf_firstidx,
4321 		    wqbuf_lastidx;
4322 		uint8_t *ptmpuserbuffer;
4323 
4324 		ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4325 
4326 		ptmpuserbuffer = ver_addr;
4327 		user_len = min(pcmdmessagefld->cmdmessage.Length,
4328 		    MSGDATABUFLEN);
4329 		(void) memcpy(ptmpuserbuffer,
4330 		    pcmdmessagefld->messagedatabuffer, user_len);
4331 		wqbuf_lastidx = acb->wqbuf_lastidx;
4332 		wqbuf_firstidx = acb->wqbuf_firstidx;
4333 		if (wqbuf_lastidx != wqbuf_firstidx) {
4334 			struct scsi_arq_status *arq_status;
4335 
4336 			arcmsr_post_ioctldata2iop(acb);
4337 			arq_status = (struct scsi_arq_status *)
4338 			    (intptr_t)(pkt->pkt_scbp);
4339 			bzero((caddr_t)arq_status,
4340 			    sizeof (struct scsi_arq_status));
4341 			arq_status->sts_rqpkt_reason = CMD_CMPLT;
4342 			arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
4343 			    STATE_GOT_TARGET | STATE_SENT_CMD |
4344 			    STATE_XFERRED_DATA | STATE_GOT_STATUS);
4345 
4346 			arq_status->sts_rqpkt_statistics =
4347 			    pkt->pkt_statistics;
4348 			arq_status->sts_rqpkt_resid = 0;
4349 
4350 			struct scsi_extended_sense *sts_sensedata;
4351 
4352 			sts_sensedata = &arq_status->sts_sensedata;
4353 
4354 			/* has error report sensedata */
4355 			sts_sensedata->es_code = 0x0;
4356 			sts_sensedata->es_valid = 0x01;
4357 			sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
4358 			/* AdditionalSenseLength */
4359 			sts_sensedata->es_add_len = 0x0A;
4360 			/* AdditionalSenseCode */
4361 			sts_sensedata->es_add_code = 0x20;
4362 			retvalue = ARCMSR_MESSAGE_FAIL;
4363 		} else {
4364 			my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
4365 			    (ARCMSR_MAX_QBUFFER - 1);
4366 			if (my_empty_len >= user_len) {
4367 				while (user_len > 0) {
4368 					pQbuffer = &acb->wqbuffer[
4369 					    acb->wqbuf_lastidx];
4370 					(void) memcpy(pQbuffer,
4371 					    ptmpuserbuffer, 1);
4372 					acb->wqbuf_lastidx++;
4373 					acb->wqbuf_lastidx %=
4374 					    ARCMSR_MAX_QBUFFER;
4375 					ptmpuserbuffer++;
4376 					user_len--;
4377 				}
4378 				if (acb->acb_flags &
4379 				    ACB_F_MESSAGE_WQBUFFER_CLEARED) {
4380 					acb->acb_flags &=
4381 					    ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
4382 					arcmsr_post_ioctldata2iop(acb);
4383 				}
4384 			} else {
4385 				struct scsi_arq_status *arq_status;
4386 
4387 				/* has error report sensedata */
4388 				arq_status = (struct scsi_arq_status *)
4389 				    (intptr_t)(pkt->pkt_scbp);
4390 				bzero((caddr_t)arq_status,
4391 				    sizeof (struct scsi_arq_status));
4392 				arq_status->sts_rqpkt_reason = CMD_CMPLT;
4393 				arq_status->sts_rqpkt_state =
4394 				    (STATE_GOT_BUS |
4395 				    STATE_GOT_TARGET |STATE_SENT_CMD |
4396 				    STATE_XFERRED_DATA | STATE_GOT_STATUS);
4397 				arq_status->sts_rqpkt_statistics =
4398 				    pkt->pkt_statistics;
4399 				arq_status->sts_rqpkt_resid = 0;
4400 
4401 				struct scsi_extended_sense *sts_sensedata;
4402 				sts_sensedata = &arq_status->sts_sensedata;
4403 
4404 				/* has error report sensedata */
4405 				sts_sensedata->es_code  = 0x0;
4406 				sts_sensedata->es_valid = 0x01;
4407 				sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
4408 				/* AdditionalSenseLength */
4409 				sts_sensedata->es_add_len = 0x0A;
4410 				/* AdditionalSenseCode */
4411 				sts_sensedata->es_add_code = 0x20;
4412 				retvalue = ARCMSR_MESSAGE_FAIL;
4413 			}
4414 		}
4415 		kmem_free(ver_addr, MSGDATABUFLEN);
4416 		break;
4417 	}
4418 
4419 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
4420 		pQbuffer = acb->rqbuffer;
4421 
4422 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4423 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4424 			arcmsr_iop_message_read(acb);
4425 		}
4426 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
4427 		acb->rqbuf_firstidx = 0;
4428 		acb->rqbuf_lastidx = 0;
4429 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4430 		pcmdmessagefld->cmdmessage.ReturnCode =
4431 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4432 		break;
4433 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
4434 		pQbuffer = acb->wqbuffer;
4435 
4436 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4437 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4438 			arcmsr_iop_message_read(acb);
4439 		}
4440 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4441 		    ACB_F_MESSAGE_WQBUFFER_READ);
4442 		acb->wqbuf_firstidx = 0;
4443 		acb->wqbuf_lastidx = 0;
4444 		(void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4445 		pcmdmessagefld->cmdmessage.ReturnCode =
4446 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4447 		break;
4448 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
4449 
4450 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4451 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4452 			arcmsr_iop_message_read(acb);
4453 		}
4454 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4455 		    ACB_F_MESSAGE_RQBUFFER_CLEARED |
4456 		    ACB_F_MESSAGE_WQBUFFER_READ);
4457 		acb->rqbuf_firstidx = 0;
4458 		acb->rqbuf_lastidx = 0;
4459 		acb->wqbuf_firstidx = 0;
4460 		acb->wqbuf_lastidx = 0;
4461 		pQbuffer = acb->rqbuffer;
4462 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4463 		pQbuffer = acb->wqbuffer;
4464 		(void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4465 		pcmdmessagefld->cmdmessage.ReturnCode =
4466 		    ARCMSR_MESSAGE_RETURNCODE_OK;
4467 		break;
4468 
4469 	case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
4470 		pcmdmessagefld->cmdmessage.ReturnCode =
4471 		    ARCMSR_MESSAGE_RETURNCODE_3F;
4472 		break;
4473 	/*
4474 	 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
4475 	 */
4476 	case ARCMSR_MESSAGE_SAY_GOODBYE:
4477 		arcmsr_iop_parking(acb);
4478 		break;
4479 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
4480 		switch (acb->adapter_type) {
4481 		case ACB_ADAPTER_TYPE_A:
4482 			arcmsr_flush_hba_cache(acb);
4483 			break;
4484 		case ACB_ADAPTER_TYPE_B:
4485 			arcmsr_flush_hbb_cache(acb);
4486 			break;
4487 		case ACB_ADAPTER_TYPE_C:
4488 			arcmsr_flush_hbc_cache(acb);
4489 			break;
4490 		}
4491 		break;
4492 	default:
4493 		retvalue = ARCMSR_MESSAGE_FAIL;
4494 	}
4495 
4496 message_out:
4497 
4498 	return (retvalue);
4499 }
4500 
4501 
4502 
4503 
4504 static void
arcmsr_pcidev_disattach(struct ACB * acb)4505 arcmsr_pcidev_disattach(struct ACB *acb)
4506 {
4507 	struct CCB *ccb;
4508 	int i = 0;
4509 
4510 	/* disable all outbound interrupts */
4511 	(void) arcmsr_disable_allintr(acb);
4512 	/* stop adapter background rebuild */
4513 	switch (acb->adapter_type) {
4514 	case ACB_ADAPTER_TYPE_A:
4515 		arcmsr_stop_hba_bgrb(acb);
4516 		arcmsr_flush_hba_cache(acb);
4517 		break;
4518 	case ACB_ADAPTER_TYPE_B:
4519 		arcmsr_stop_hbb_bgrb(acb);
4520 		arcmsr_flush_hbb_cache(acb);
4521 		break;
4522 	case ACB_ADAPTER_TYPE_C:
4523 		arcmsr_stop_hbc_bgrb(acb);
4524 		arcmsr_flush_hbc_cache(acb);
4525 		break;
4526 	}
4527 	/* abort all outstanding commands */
4528 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
4529 	acb->acb_flags &= ~ACB_F_IOP_INITED;
4530 
4531 	if (acb->ccboutstandingcount != 0) {
4532 		/* clear and abort all outbound posted Q */
4533 		arcmsr_done4abort_postqueue(acb);
4534 		/* talk to iop outstanding command aborted */
4535 		(void) arcmsr_abort_host_command(acb);
4536 
4537 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4538 			ccb = acb->pccb_pool[i];
4539 			if (ccb->ccb_state == ARCMSR_CCB_START) {
4540 				/* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
4541 				ccb->pkt->pkt_reason = CMD_ABORTED;
4542 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
4543 				arcmsr_ccb_complete(ccb, 1);
4544 			}
4545 		}
4546 	}
4547 }
4548 
4549 /* get firmware miscellaneous data */
4550 static void
arcmsr_get_hba_config(struct ACB * acb)4551 arcmsr_get_hba_config(struct ACB *acb)
4552 {
4553 	struct HBA_msgUnit *phbamu;
4554 
4555 	char *acb_firm_model;
4556 	char *acb_firm_version;
4557 	char *acb_device_map;
4558 	char *iop_firm_model;
4559 	char *iop_firm_version;
4560 	char *iop_device_map;
4561 	int count;
4562 
4563 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4564 	acb_firm_model = acb->firm_model;
4565 	acb_firm_version = acb->firm_version;
4566 	acb_device_map = acb->device_map;
4567 	/* firm_model, 15 */
4568 	iop_firm_model =
4569 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4570 	/* firm_version, 17 */
4571 	iop_firm_version =
4572 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4573 
4574 	/* device_map, 21 */
4575 	iop_device_map =
4576 	    (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4577 
4578 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4579 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4580 
4581 	if (!arcmsr_hba_wait_msgint_ready(acb))
4582 		arcmsr_warn(acb,
4583 		    "timeout while waiting for adapter firmware "
4584 		    "miscellaneous data");
4585 
4586 	count = 8;
4587 	while (count) {
4588 		*acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle0,
4589 		    iop_firm_model);
4590 		acb_firm_model++;
4591 		iop_firm_model++;
4592 		count--;
4593 	}
4594 
4595 	count = 16;
4596 	while (count) {
4597 		*acb_firm_version =
4598 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4599 		acb_firm_version++;
4600 		iop_firm_version++;
4601 		count--;
4602 	}
4603 
4604 	count = 16;
4605 	while (count) {
4606 		*acb_device_map =
4607 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4608 		acb_device_map++;
4609 		iop_device_map++;
4610 		count--;
4611 	}
4612 
4613 	arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4614 	    acb->firm_version);
4615 
4616 	/* firm_request_len, 1 */
4617 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4618 	    &phbamu->msgcode_rwbuffer[1]);
4619 	/* firm_numbers_queue, 2 */
4620 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4621 	    &phbamu->msgcode_rwbuffer[2]);
4622 	/* firm_sdram_size, 3 */
4623 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4624 	    &phbamu->msgcode_rwbuffer[3]);
4625 	/* firm_ide_channels, 4 */
4626 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4627 	    &phbamu->msgcode_rwbuffer[4]);
4628 }
4629 
4630 /* get firmware miscellaneous data */
4631 static void
arcmsr_get_hbb_config(struct ACB * acb)4632 arcmsr_get_hbb_config(struct ACB *acb)
4633 {
4634 	struct HBB_msgUnit *phbbmu;
4635 	char *acb_firm_model;
4636 	char *acb_firm_version;
4637 	char *acb_device_map;
4638 	char *iop_firm_model;
4639 	char *iop_firm_version;
4640 	char *iop_device_map;
4641 	int count;
4642 
4643 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4644 	acb_firm_model = acb->firm_model;
4645 	acb_firm_version = acb->firm_version;
4646 	acb_device_map = acb->device_map;
4647 	/* firm_model, 15 */
4648 	iop_firm_model = (char *)
4649 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4650 	/* firm_version, 17 */
4651 	iop_firm_version = (char *)
4652 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4653 	/* device_map, 21 */
4654 	iop_device_map = (char *)
4655 	    (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4656 
4657 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4658 	    &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
4659 
4660 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4661 		arcmsr_warn(acb,
4662 		    "timeout while waiting for adapter firmware "
4663 		    "miscellaneous data");
4664 
4665 	count = 8;
4666 	while (count) {
4667 		*acb_firm_model =
4668 		    CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_model);
4669 		acb_firm_model++;
4670 		iop_firm_model++;
4671 		count--;
4672 	}
4673 	count = 16;
4674 	while (count) {
4675 		*acb_firm_version =
4676 		    CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_version);
4677 		acb_firm_version++;
4678 		iop_firm_version++;
4679 		count--;
4680 	}
4681 	count = 16;
4682 	while (count) {
4683 		*acb_device_map =
4684 		    CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_device_map);
4685 		acb_device_map++;
4686 		iop_device_map++;
4687 		count--;
4688 	}
4689 
4690 	arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4691 	    acb->firm_version);
4692 
4693 	/* firm_request_len, 1 */
4694 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4695 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
4696 	/* firm_numbers_queue, 2 */
4697 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4698 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
4699 	/* firm_sdram_size, 3 */
4700 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4701 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
4702 	/* firm_ide_channels, 4 */
4703 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4704 	    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
4705 }
4706 
4707 
4708 /* get firmware miscellaneous data */
4709 static void
arcmsr_get_hbc_config(struct ACB * acb)4710 arcmsr_get_hbc_config(struct ACB *acb)
4711 {
4712 	struct HBC_msgUnit *phbcmu;
4713 
4714 	char *acb_firm_model;
4715 	char *acb_firm_version;
4716 	char *acb_device_map;
4717 	char *iop_firm_model;
4718 	char *iop_firm_version;
4719 	char *iop_device_map;
4720 	int count;
4721 
4722 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
4723 	acb_firm_model = acb->firm_model;
4724 	acb_firm_version = acb->firm_version;
4725 	acb_device_map = acb->device_map;
4726 	/* firm_model, 15 */
4727 	iop_firm_model =
4728 	    (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4729 	/* firm_version, 17 */
4730 	iop_firm_version =
4731 	    (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4732 	/* device_map, 21 */
4733 	iop_device_map =
4734 	    (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4735 	/* post "get config" instruction */
4736 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4737 	    &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4738 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4739 	    &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4740 	if (!arcmsr_hbc_wait_msgint_ready(acb))
4741 		arcmsr_warn(acb,
4742 		    "timeout while waiting for adapter firmware "
4743 		    "miscellaneous data");
4744 	count = 8;
4745 	while (count) {
4746 		*acb_firm_model =
4747 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
4748 		acb_firm_model++;
4749 		iop_firm_model++;
4750 		count--;
4751 	}
4752 
4753 	count = 16;
4754 	while (count) {
4755 		*acb_firm_version =
4756 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4757 		acb_firm_version++;
4758 		iop_firm_version++;
4759 		count--;
4760 	}
4761 
4762 	count = 16;
4763 	while (count) {
4764 		*acb_device_map =
4765 		    CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4766 		acb_device_map++;
4767 		iop_device_map++;
4768 		count--;
4769 	}
4770 
4771 	arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4772 	    acb->firm_version);
4773 
4774 	/* firm_request_len, 1, 04-07 */
4775 	acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4776 	    &phbcmu->msgcode_rwbuffer[1]);
4777 	/* firm_numbers_queue, 2, 08-11 */
4778 	acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4779 	    &phbcmu->msgcode_rwbuffer[2]);
4780 	/* firm_sdram_size, 3, 12-15 */
4781 	acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4782 	    &phbcmu->msgcode_rwbuffer[3]);
4783 	/* firm_ide_channels, 4, 16-19 */
4784 	acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4785 	    &phbcmu->msgcode_rwbuffer[4]);
4786 	/* firm_cfg_version, 25, 100-103 */
4787 	acb->firm_cfg_version = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4788 	    &phbcmu->msgcode_rwbuffer[25]);
4789 }
4790 
4791 
4792 /* start background rebuild */
4793 static void
arcmsr_start_hba_bgrb(struct ACB * acb)4794 arcmsr_start_hba_bgrb(struct ACB *acb)
4795 {
4796 	struct HBA_msgUnit *phbamu;
4797 
4798 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4799 
4800 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4801 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4802 	    &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4803 
4804 	if (!arcmsr_hba_wait_msgint_ready(acb))
4805 		arcmsr_warn(acb,
4806 		    "timeout while waiting for background rebuild to start");
4807 }
4808 
4809 
4810 static void
arcmsr_start_hbb_bgrb(struct ACB * acb)4811 arcmsr_start_hbb_bgrb(struct ACB *acb)
4812 {
4813 	struct HBB_msgUnit *phbbmu;
4814 
4815 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4816 
4817 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4818 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4819 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
4820 	    ARCMSR_MESSAGE_START_BGRB);
4821 
4822 	if (!arcmsr_hbb_wait_msgint_ready(acb))
4823 		arcmsr_warn(acb,
4824 		    "timeout while waiting for background rebuild to start");
4825 }
4826 
4827 
4828 static void
arcmsr_start_hbc_bgrb(struct ACB * acb)4829 arcmsr_start_hbc_bgrb(struct ACB *acb)
4830 {
4831 	struct HBC_msgUnit *phbcmu;
4832 
4833 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
4834 
4835 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4836 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4837 	    &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4838 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4839 	    &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4840 	if (!arcmsr_hbc_wait_msgint_ready(acb))
4841 		arcmsr_warn(acb,
4842 		    "timeout while waiting for background rebuild to start");
4843 }
4844 
4845 static void
arcmsr_polling_hba_ccbdone(struct ACB * acb,struct CCB * poll_ccb)4846 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4847 {
4848 	struct HBA_msgUnit *phbamu;
4849 	struct CCB *ccb;
4850 	boolean_t error;
4851 	uint32_t flag_ccb, outbound_intstatus, intmask_org;
4852 	boolean_t poll_ccb_done = B_FALSE;
4853 	uint32_t poll_count = 0;
4854 
4855 
4856 	phbamu = (struct HBA_msgUnit *)acb->pmu;
4857 
4858 	/* TODO: Use correct offset and size for syncing? */
4859 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4860 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4861 		return;
4862 	intmask_org = arcmsr_disable_allintr(acb);
4863 
4864 	for (;;) {
4865 		if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4866 		    &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
4867 			if (poll_ccb_done) {
4868 				/* chip FIFO no ccb for completion already */
4869 				break;
4870 			} else {
4871 				drv_usecwait(25000);
4872 				if ((poll_count > 100) && (poll_ccb != NULL)) {
4873 					break;
4874 				}
4875 				if (acb->ccboutstandingcount == 0) {
4876 					break;
4877 				}
4878 				poll_count++;
4879 				outbound_intstatus =
4880 				    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4881 				    &phbamu->outbound_intstatus) &
4882 				    acb->outbound_int_enable;
4883 
4884 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4885 				    &phbamu->outbound_intstatus,
4886 				    outbound_intstatus); /* clear interrupt */
4887 			}
4888 		}
4889 
4890 		/* frame must be 32 bytes aligned */
4891 		ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4892 
4893 		/* check if command done with no error */
4894 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4895 		    B_TRUE : B_FALSE;
4896 		if (poll_ccb != NULL)
4897 			poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4898 
4899 		if (ccb->acb != acb) {
4900 			arcmsr_warn(acb, "ccb got a wrong acb!");
4901 			continue;
4902 		}
4903 		if (ccb->ccb_state != ARCMSR_CCB_START) {
4904 			if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
4905 				ccb->ccb_state |= ARCMSR_CCB_BACK;
4906 				ccb->pkt->pkt_reason = CMD_ABORTED;
4907 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
4908 				arcmsr_ccb_complete(ccb, 1);
4909 				continue;
4910 			}
4911 			arcmsr_report_ccb_state(acb, ccb, error);
4912 			arcmsr_warn(acb,
4913 			    "polling op got unexpected ccb command done");
4914 			continue;
4915 		}
4916 		arcmsr_report_ccb_state(acb, ccb, error);
4917 	}	/* drain reply FIFO */
4918 	arcmsr_enable_allintr(acb, intmask_org);
4919 }
4920 
4921 
4922 static void
arcmsr_polling_hbb_ccbdone(struct ACB * acb,struct CCB * poll_ccb)4923 arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4924 {
4925 	struct HBB_msgUnit *phbbmu;
4926 	struct CCB *ccb;
4927 	uint32_t flag_ccb, intmask_org;
4928 	boolean_t error;
4929 	uint32_t poll_count = 0;
4930 	int index;
4931 	boolean_t poll_ccb_done = B_FALSE;
4932 
4933 
4934 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
4935 
4936 	/* Use correct offset and size for syncing */
4937 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4938 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4939 		return;
4940 
4941 	intmask_org = arcmsr_disable_allintr(acb);
4942 
4943 	for (;;) {
4944 		index = phbbmu->doneq_index;
4945 		if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
4946 			if (poll_ccb_done) {
4947 				/* chip FIFO no ccb for completion already */
4948 				break;
4949 			} else {
4950 				drv_usecwait(25000);
4951 				if ((poll_count > 100) && (poll_ccb != NULL))
4952 					break;
4953 				if (acb->ccboutstandingcount == 0)
4954 					break;
4955 				poll_count++;
4956 				/* clear doorbell interrupt */
4957 				CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4958 				    &phbbmu->hbb_doorbell->iop2drv_doorbell,
4959 				    ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
4960 			}
4961 		}
4962 
4963 		phbbmu->done_qbuffer[index] = 0;
4964 		index++;
4965 		/* if last index number set it to 0 */
4966 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
4967 		phbbmu->doneq_index = index;
4968 		/* check if command done with no error */
4969 		/* frame must be 32 bytes aligned */
4970 		ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4971 
4972 		/* check if command done with no error */
4973 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4974 		    B_TRUE : B_FALSE;
4975 
4976 		if (poll_ccb != NULL)
4977 			poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4978 		if (ccb->acb != acb) {
4979 			arcmsr_warn(acb, "ccb got a wrong acb!");
4980 			continue;
4981 		}
4982 		if (ccb->ccb_state != ARCMSR_CCB_START) {
4983 			if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
4984 				ccb->ccb_state |= ARCMSR_CCB_BACK;
4985 				ccb->pkt->pkt_reason = CMD_ABORTED;
4986 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
4987 				arcmsr_ccb_complete(ccb, 1);
4988 				continue;
4989 			}
4990 			arcmsr_report_ccb_state(acb, ccb, error);
4991 			arcmsr_warn(acb,
4992 			    "polling op got unexpect ccb command done");
4993 			continue;
4994 		}
4995 		arcmsr_report_ccb_state(acb, ccb, error);
4996 	}	/* drain reply FIFO */
4997 	arcmsr_enable_allintr(acb, intmask_org);
4998 }
4999 
5000 
5001 static void
arcmsr_polling_hbc_ccbdone(struct ACB * acb,struct CCB * poll_ccb)5002 arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
5003 {
5004 
5005 	struct HBC_msgUnit *phbcmu;
5006 	struct CCB *ccb;
5007 	boolean_t error;
5008 	uint32_t ccb_cdb_phy;
5009 	uint32_t flag_ccb, intmask_org;
5010 	boolean_t poll_ccb_done = B_FALSE;
5011 	uint32_t poll_count = 0;
5012 
5013 
5014 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5015 
5016 	/* Use correct offset and size for syncing */
5017 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5018 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5019 		return;
5020 
5021 	intmask_org = arcmsr_disable_allintr(acb);
5022 
5023 	for (;;) {
5024 		if (!(CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5025 		    &phbcmu->host_int_status) &
5026 		    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
5027 
5028 			if (poll_ccb_done) {
5029 				/* chip FIFO no ccb for completion already */
5030 				break;
5031 			} else {
5032 				drv_usecwait(25000);
5033 				if ((poll_count > 100) && (poll_ccb != NULL)) {
5034 					break;
5035 				}
5036 				if (acb->ccboutstandingcount == 0) {
5037 					break;
5038 				}
5039 				poll_count++;
5040 			}
5041 		}
5042 		flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5043 		    &phbcmu->outbound_queueport_low);
5044 		/* frame must be 32 bytes aligned */
5045 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5046 		/* the CDB is the first field of the CCB */
5047 		ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5048 
5049 		/* check if command done with no error */
5050 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5051 		    B_TRUE : B_FALSE;
5052 		if (poll_ccb != NULL)
5053 			poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
5054 
5055 		if (ccb->acb != acb) {
5056 			arcmsr_warn(acb, "ccb got a wrong acb!");
5057 			continue;
5058 		}
5059 		if (ccb->ccb_state != ARCMSR_CCB_START) {
5060 			if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5061 				ccb->ccb_state |= ARCMSR_CCB_BACK;
5062 				ccb->pkt->pkt_reason = CMD_ABORTED;
5063 				ccb->pkt->pkt_statistics |= STAT_ABORTED;
5064 				arcmsr_ccb_complete(ccb, 1);
5065 				continue;
5066 			}
5067 			arcmsr_report_ccb_state(acb, ccb, error);
5068 			arcmsr_warn(acb,
5069 			    "polling op got unexpected ccb command done");
5070 			continue;
5071 		}
5072 		arcmsr_report_ccb_state(acb, ccb, error);
5073 	}	/* drain reply FIFO */
5074 	arcmsr_enable_allintr(acb, intmask_org);
5075 }
5076 
5077 
5078 /*
5079  * Function: arcmsr_hba_hardware_reset()
5080  *           Bug Fix for Intel IOP cause firmware hang on.
5081  *           and kernel panic
5082  */
5083 static void
arcmsr_hba_hardware_reset(struct ACB * acb)5084 arcmsr_hba_hardware_reset(struct ACB *acb)
5085 {
5086 	struct HBA_msgUnit *phbamu;
5087 	uint8_t value[64];
5088 	int i;
5089 
5090 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5091 	/* backup pci config data */
5092 	for (i = 0; i < 64; i++) {
5093 		value[i] = pci_config_get8(acb->pci_acc_handle, i);
5094 	}
5095 	/* hardware reset signal */
5096 	if ((PCI_DEVICE_ID_ARECA_1680 ==
5097 	    pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID))) {
5098 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5099 		    &phbamu->reserved1[0], 0x00000003);
5100 	} else {
5101 		pci_config_put8(acb->pci_acc_handle, 0x84, 0x20);
5102 	}
5103 	drv_usecwait(1000000);
5104 	/* write back pci config data */
5105 	for (i = 0; i < 64; i++) {
5106 		pci_config_put8(acb->pci_acc_handle, i, value[i]);
5107 	}
5108 	drv_usecwait(1000000);
5109 }
5110 
5111 /*
5112  * Function: arcmsr_abort_host_command
5113  */
5114 static uint8_t
arcmsr_abort_host_command(struct ACB * acb)5115 arcmsr_abort_host_command(struct ACB *acb)
5116 {
5117 	uint8_t rtnval = 0;
5118 
5119 	switch (acb->adapter_type) {
5120 	case ACB_ADAPTER_TYPE_A:
5121 		rtnval = arcmsr_abort_hba_allcmd(acb);
5122 		break;
5123 	case ACB_ADAPTER_TYPE_B:
5124 		rtnval = arcmsr_abort_hbb_allcmd(acb);
5125 		break;
5126 	case ACB_ADAPTER_TYPE_C:
5127 		rtnval = arcmsr_abort_hbc_allcmd(acb);
5128 		break;
5129 	}
5130 	return (rtnval);
5131 }
5132 
5133 /*
5134  * Function: arcmsr_handle_iop_bus_hold
5135  */
5136 static void
arcmsr_handle_iop_bus_hold(struct ACB * acb)5137 arcmsr_handle_iop_bus_hold(struct ACB *acb)
5138 {
5139 
5140 	switch (acb->adapter_type) {
5141 	case ACB_ADAPTER_TYPE_A:
5142 	{
5143 		struct HBA_msgUnit *phbamu;
5144 		int retry_count = 0;
5145 
5146 		acb->timeout_count = 0;
5147 		phbamu = (struct HBA_msgUnit *)acb->pmu;
5148 		arcmsr_hba_hardware_reset(acb);
5149 		acb->acb_flags &= ~ACB_F_IOP_INITED;
5150 	sleep_again:
5151 		drv_usecwait(1000000);
5152 		if ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5153 		    &phbamu->outbound_msgaddr1) &
5154 		    ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
5155 			if (retry_count > 60) {
5156 				arcmsr_warn(acb,
5157 				    "waiting for hardware"
5158 				    "bus reset return, RETRY TERMINATED!!");
5159 				return;
5160 			}
5161 			retry_count++;
5162 			goto sleep_again;
5163 		}
5164 		arcmsr_iop_init(acb);
5165 		break;
5166 	}
5167 
5168 	}
5169 }
5170 
5171 static void
arcmsr_iop2drv_data_wrote_handle(struct ACB * acb)5172 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb)
5173 {
5174 	struct QBUFFER *prbuffer;
5175 	uint8_t *pQbuffer;
5176 	uint8_t *iop_data;
5177 	int my_empty_len, iop_len;
5178 	int rqbuf_firstidx, rqbuf_lastidx;
5179 
5180 	/* check this iop data if overflow my rqbuffer */
5181 	rqbuf_lastidx = acb->rqbuf_lastidx;
5182 	rqbuf_firstidx = acb->rqbuf_firstidx;
5183 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
5184 	iop_data = (uint8_t *)prbuffer->data;
5185 	iop_len = prbuffer->data_len;
5186 	my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
5187 	    (ARCMSR_MAX_QBUFFER - 1);
5188 
5189 	if (my_empty_len >= iop_len) {
5190 		while (iop_len > 0) {
5191 			pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
5192 			(void) memcpy(pQbuffer, iop_data, 1);
5193 			rqbuf_lastidx++;
5194 			/* if last index number set it to 0 */
5195 			rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
5196 			iop_data++;
5197 			iop_len--;
5198 		}
5199 		acb->rqbuf_lastidx = rqbuf_lastidx;
5200 		arcmsr_iop_message_read(acb);
5201 		/* signature, let IOP know data has been read */
5202 	} else {
5203 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
5204 	}
5205 }
5206 
5207 
5208 
5209 static void
arcmsr_iop2drv_data_read_handle(struct ACB * acb)5210 arcmsr_iop2drv_data_read_handle(struct ACB *acb)
5211 {
5212 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
5213 	/*
5214 	 * check if there are any mail packages from user space program
5215 	 * in my post bag, now is the time to send them into Areca's firmware
5216 	 */
5217 
5218 	if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
5219 
5220 		uint8_t *pQbuffer;
5221 		struct QBUFFER *pwbuffer;
5222 		uint8_t *iop_data;
5223 		int allxfer_len = 0;
5224 
5225 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
5226 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
5227 		iop_data = (uint8_t *)pwbuffer->data;
5228 
5229 		while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
5230 		    (allxfer_len < 124)) {
5231 			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
5232 			(void) memcpy(iop_data, pQbuffer, 1);
5233 			acb->wqbuf_firstidx++;
5234 			/* if last index number set it to 0 */
5235 			acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
5236 			iop_data++;
5237 			allxfer_len++;
5238 		}
5239 		pwbuffer->data_len = allxfer_len;
5240 		/*
5241 		 * push inbound doorbell, tell iop driver data write ok
5242 		 * await reply on next hwinterrupt for next Qbuffer post
5243 		 */
5244 		arcmsr_iop_message_wrote(acb);
5245 	}
5246 
5247 	if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
5248 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
5249 }
5250 
5251 
5252 static void
arcmsr_hba_doorbell_isr(struct ACB * acb)5253 arcmsr_hba_doorbell_isr(struct ACB *acb)
5254 {
5255 	uint32_t outbound_doorbell;
5256 	struct HBA_msgUnit *phbamu;
5257 
5258 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5259 
5260 	/*
5261 	 *  Maybe here we need to check wrqbuffer_lock is locked or not
5262 	 *  DOORBELL: ding! dong!
5263 	 *  check if there are any mail need to pack from firmware
5264 	 */
5265 
5266 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5267 	    &phbamu->outbound_doorbell);
5268 	/* clear doorbell interrupt */
5269 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5270 	    &phbamu->outbound_doorbell, outbound_doorbell);
5271 
5272 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
5273 		arcmsr_iop2drv_data_wrote_handle(acb);
5274 
5275 
5276 	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
5277 		arcmsr_iop2drv_data_read_handle(acb);
5278 }
5279 
5280 
5281 
5282 static void
arcmsr_hbc_doorbell_isr(struct ACB * acb)5283 arcmsr_hbc_doorbell_isr(struct ACB *acb)
5284 {
5285 	uint32_t outbound_doorbell;
5286 	struct HBC_msgUnit *phbcmu;
5287 
5288 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5289 
5290 	/*
5291 	 *  Maybe here we need to check wrqbuffer_lock is locked or not
5292 	 *  DOORBELL: ding! dong!
5293 	 *  check if there are any mail need to pick from firmware
5294 	 */
5295 
5296 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5297 	    &phbcmu->outbound_doorbell);
5298 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5299 	    &phbcmu->outbound_doorbell_clear,
5300 	    outbound_doorbell); /* clear interrupt */
5301 	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
5302 		arcmsr_iop2drv_data_wrote_handle(acb);
5303 	}
5304 	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
5305 		arcmsr_iop2drv_data_read_handle(acb);
5306 	}
5307 	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
5308 		/* messenger of "driver to iop commands" */
5309 		arcmsr_hbc_message_isr(acb);
5310 	}
5311 }
5312 
5313 
5314 static void
arcmsr_hba_message_isr(struct ACB * acb)5315 arcmsr_hba_message_isr(struct ACB *acb)
5316 {
5317 	struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
5318 	uint32_t  *signature = (&phbamu->msgcode_rwbuffer[0]);
5319 	uint32_t outbound_message;
5320 
5321 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5322 	    &phbamu->outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
5323 
5324 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5325 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5326 		if ((ddi_taskq_dispatch(acb->taskq,
5327 		    (void (*)(void *))arcmsr_dr_handle,
5328 		    acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5329 			arcmsr_warn(acb, "DR task start failed");
5330 		}
5331 }
5332 
5333 static void
arcmsr_hbb_message_isr(struct ACB * acb)5334 arcmsr_hbb_message_isr(struct ACB *acb)
5335 {
5336 	struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
5337 	uint32_t  *signature = (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0]);
5338 	uint32_t outbound_message;
5339 
5340 	/* clear interrupts */
5341 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5342 	    &phbbmu->hbb_doorbell->iop2drv_doorbell,
5343 	    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5344 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5345 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5346 	    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5347 
5348 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5349 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5350 		if ((ddi_taskq_dispatch(acb->taskq,
5351 		    (void (*)(void *))arcmsr_dr_handle,
5352 		    acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5353 			arcmsr_warn(acb, "DR task start failed");
5354 		}
5355 }
5356 
5357 static void
arcmsr_hbc_message_isr(struct ACB * acb)5358 arcmsr_hbc_message_isr(struct ACB *acb)
5359 {
5360 	struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
5361 	uint32_t  *signature = (&phbcmu->msgcode_rwbuffer[0]);
5362 	uint32_t outbound_message;
5363 
5364 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5365 	    &phbcmu->outbound_doorbell_clear,
5366 	    ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
5367 
5368 	outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5369 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5370 		if ((ddi_taskq_dispatch(acb->taskq,
5371 		    (void (*)(void *))arcmsr_dr_handle,
5372 		    acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5373 			arcmsr_warn(acb, "DR task start failed");
5374 		}
5375 }
5376 
5377 
5378 static void
arcmsr_hba_postqueue_isr(struct ACB * acb)5379 arcmsr_hba_postqueue_isr(struct ACB *acb)
5380 {
5381 
5382 	struct HBA_msgUnit *phbamu;
5383 	struct CCB *ccb;
5384 	uint32_t flag_ccb;
5385 	boolean_t error;
5386 
5387 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5388 
5389 	/* areca cdb command done */
5390 	/* Use correct offset and size for syncing */
5391 	(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5392 	    DDI_DMA_SYNC_FORKERNEL);
5393 
5394 	while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5395 	    &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
5396 		/* frame must be 32 bytes aligned */
5397 		ccb = NumToPtr((acb->vir2phy_offset+(flag_ccb << 5)));
5398 		/* check if command done with no error */
5399 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5400 		    B_TRUE : B_FALSE;
5401 		arcmsr_drain_donequeue(acb, ccb, error);
5402 	}	/* drain reply FIFO */
5403 }
5404 
5405 
5406 static void
arcmsr_hbb_postqueue_isr(struct ACB * acb)5407 arcmsr_hbb_postqueue_isr(struct ACB *acb)
5408 {
5409 	struct HBB_msgUnit *phbbmu;
5410 	struct CCB *ccb;
5411 	uint32_t flag_ccb;
5412 	boolean_t error;
5413 	int index;
5414 
5415 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
5416 
5417 	/* areca cdb command done */
5418 	index = phbbmu->doneq_index;
5419 	if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5420 	    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5421 		return;
5422 	while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
5423 		phbbmu->done_qbuffer[index] = 0;
5424 		/* frame must be 32 bytes aligned */
5425 
5426 		/* the CDB is the first field of the CCB */
5427 		ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
5428 
5429 		/* check if command done with no error */
5430 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5431 		    B_TRUE : B_FALSE;
5432 		arcmsr_drain_donequeue(acb, ccb, error);
5433 		index++;
5434 		/* if last index number set it to 0 */
5435 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
5436 		phbbmu->doneq_index = index;
5437 	}	/* drain reply FIFO */
5438 }
5439 
5440 
5441 static void
arcmsr_hbc_postqueue_isr(struct ACB * acb)5442 arcmsr_hbc_postqueue_isr(struct ACB *acb)
5443 {
5444 
5445 	struct HBC_msgUnit *phbcmu;
5446 	struct CCB *ccb;
5447 	uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
5448 	boolean_t error;
5449 
5450 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5451 	/* areca cdb command done */
5452 	/* Use correct offset and size for syncing */
5453 	(void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5454 	    DDI_DMA_SYNC_FORKERNEL);
5455 
5456 	while (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5457 	    &phbcmu->host_int_status) &
5458 	    ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5459 		/* check if command done with no error */
5460 		flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5461 		    &phbcmu->outbound_queueport_low);
5462 		/* frame must be 32 bytes aligned */
5463 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5464 
5465 		/* the CDB is the first field of the CCB */
5466 		ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5467 
5468 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5469 		    B_TRUE : B_FALSE;
5470 		/* check if command done with no error */
5471 		arcmsr_drain_donequeue(acb, ccb, error);
5472 		if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
5473 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5474 			    &phbcmu->inbound_doorbell,
5475 			    ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
5476 			break;
5477 		}
5478 		throttling++;
5479 	}	/* drain reply FIFO */
5480 }
5481 
5482 
5483 static uint_t
arcmsr_handle_hba_isr(struct ACB * acb)5484 arcmsr_handle_hba_isr(struct ACB *acb)
5485 {
5486 	uint32_t outbound_intstatus;
5487 	struct HBA_msgUnit *phbamu;
5488 
5489 	phbamu = (struct HBA_msgUnit *)acb->pmu;
5490 
5491 	outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5492 	    &phbamu->outbound_intstatus) & acb->outbound_int_enable;
5493 
5494 	if (outbound_intstatus == 0)	/* it must be a shared irq */
5495 		return (DDI_INTR_UNCLAIMED);
5496 
5497 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
5498 	    outbound_intstatus); /* clear interrupt */
5499 
5500 	/* MU doorbell interrupts */
5501 
5502 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
5503 		arcmsr_hba_doorbell_isr(acb);
5504 
5505 	/* MU post queue interrupts */
5506 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
5507 		arcmsr_hba_postqueue_isr(acb);
5508 
5509 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
5510 		arcmsr_hba_message_isr(acb);
5511 	}
5512 
5513 	return (DDI_INTR_CLAIMED);
5514 }
5515 
5516 
5517 static uint_t
arcmsr_handle_hbb_isr(struct ACB * acb)5518 arcmsr_handle_hbb_isr(struct ACB *acb)
5519 {
5520 	uint32_t outbound_doorbell;
5521 	struct HBB_msgUnit *phbbmu;
5522 
5523 
5524 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
5525 
5526 	outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5527 	    &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
5528 
5529 	if (outbound_doorbell == 0)		/* it must be a shared irq */
5530 		return (DDI_INTR_UNCLAIMED);
5531 
5532 	/* clear doorbell interrupt */
5533 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5534 	    &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
5535 	/* wait a cycle */
5536 	(void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5537 	    &phbbmu->hbb_doorbell->iop2drv_doorbell);
5538 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5539 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5540 	    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5541 
5542 	/* MU ioctl transfer doorbell interrupts */
5543 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
5544 		arcmsr_iop2drv_data_wrote_handle(acb);
5545 
5546 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
5547 		arcmsr_iop2drv_data_read_handle(acb);
5548 
5549 	/* MU post queue interrupts */
5550 	if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
5551 		arcmsr_hbb_postqueue_isr(acb);
5552 
5553 	/* MU message interrupt */
5554 
5555 	if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
5556 		arcmsr_hbb_message_isr(acb);
5557 	}
5558 
5559 	return (DDI_INTR_CLAIMED);
5560 }
5561 
5562 static uint_t
arcmsr_handle_hbc_isr(struct ACB * acb)5563 arcmsr_handle_hbc_isr(struct ACB *acb)
5564 {
5565 	uint32_t host_interrupt_status;
5566 	struct HBC_msgUnit *phbcmu;
5567 
5568 	phbcmu = (struct HBC_msgUnit *)acb->pmu;
5569 	/*  check outbound intstatus */
5570 	host_interrupt_status=
5571 	    CHIP_REG_READ32(acb->reg_mu_acc_handle0, &phbcmu->host_int_status);
5572 	if (host_interrupt_status == 0)	/* it must be share irq */
5573 		return (DDI_INTR_UNCLAIMED);
5574 	/* MU ioctl transfer doorbell interrupts */
5575 	if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
5576 		/* messenger of "ioctl message read write" */
5577 		arcmsr_hbc_doorbell_isr(acb);
5578 	}
5579 	/* MU post queue interrupts */
5580 	if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5581 		/* messenger of "scsi commands" */
5582 		arcmsr_hbc_postqueue_isr(acb);
5583 	}
5584 	return (DDI_INTR_CLAIMED);
5585 }
5586 
5587 static uint_t
arcmsr_intr_handler(caddr_t arg,caddr_t arg2)5588 arcmsr_intr_handler(caddr_t arg, caddr_t arg2)
5589 {
5590 	struct ACB *acb = (void *)arg;
5591 	struct CCB *ccb;
5592 	uint_t retrn = DDI_INTR_UNCLAIMED;
5593 	_NOTE(ARGUNUSED(arg2))
5594 
5595 	mutex_enter(&acb->isr_mutex);
5596 	switch (acb->adapter_type) {
5597 	case ACB_ADAPTER_TYPE_A:
5598 		retrn = arcmsr_handle_hba_isr(acb);
5599 		break;
5600 
5601 	case ACB_ADAPTER_TYPE_B:
5602 		retrn = arcmsr_handle_hbb_isr(acb);
5603 		break;
5604 
5605 	case ACB_ADAPTER_TYPE_C:
5606 		retrn = arcmsr_handle_hbc_isr(acb);
5607 		break;
5608 
5609 	default:
5610 		/* We should never be here */
5611 		ASSERT(0);
5612 		break;
5613 	}
5614 	mutex_exit(&acb->isr_mutex);
5615 	while ((ccb = arcmsr_get_complete_ccb_from_list(acb)) != NULL) {
5616 		arcmsr_ccb_complete(ccb, 1);
5617 	}
5618 	return (retrn);
5619 }
5620 
5621 
5622 static void
arcmsr_wait_firmware_ready(struct ACB * acb)5623 arcmsr_wait_firmware_ready(struct ACB *acb)
5624 {
5625 	uint32_t firmware_state;
5626 
5627 	firmware_state = 0;
5628 
5629 	switch (acb->adapter_type) {
5630 	case ACB_ADAPTER_TYPE_A:
5631 	{
5632 		struct HBA_msgUnit *phbamu;
5633 		phbamu = (struct HBA_msgUnit *)acb->pmu;
5634 		do {
5635 			firmware_state =
5636 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5637 			    &phbamu->outbound_msgaddr1);
5638 		} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
5639 		    == 0);
5640 		break;
5641 	}
5642 
5643 	case ACB_ADAPTER_TYPE_B:
5644 	{
5645 		struct HBB_msgUnit *phbbmu;
5646 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
5647 		do {
5648 			firmware_state =
5649 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5650 			    &phbbmu->hbb_doorbell->iop2drv_doorbell);
5651 		} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
5652 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5653 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5654 		    ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5655 		break;
5656 	}
5657 
5658 	case ACB_ADAPTER_TYPE_C:
5659 	{
5660 		struct HBC_msgUnit *phbcmu;
5661 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
5662 		do {
5663 			firmware_state =
5664 			    CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5665 			    &phbcmu->outbound_msgaddr1);
5666 		} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK)
5667 		    == 0);
5668 		break;
5669 	}
5670 
5671 	}
5672 }
5673 
5674 static void
arcmsr_clear_doorbell_queue_buffer(struct ACB * acb)5675 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb)
5676 {
5677 	switch (acb->adapter_type) {
5678 	case ACB_ADAPTER_TYPE_A: {
5679 		struct HBA_msgUnit *phbamu;
5680 		uint32_t outbound_doorbell;
5681 
5682 		phbamu = (struct HBA_msgUnit *)acb->pmu;
5683 		/* empty doorbell Qbuffer if door bell rung */
5684 		outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5685 		    &phbamu->outbound_doorbell);
5686 		/* clear doorbell interrupt */
5687 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5688 		    &phbamu->outbound_doorbell, outbound_doorbell);
5689 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5690 		    &phbamu->inbound_doorbell,
5691 		    ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
5692 		break;
5693 	}
5694 
5695 	case ACB_ADAPTER_TYPE_B: {
5696 		struct HBB_msgUnit *phbbmu;
5697 
5698 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
5699 		/* clear interrupt and message state */
5700 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5701 		    &phbbmu->hbb_doorbell->iop2drv_doorbell,
5702 		    ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5703 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5704 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5705 		    ARCMSR_DRV2IOP_DATA_READ_OK);
5706 		/* let IOP know data has been read */
5707 		break;
5708 	}
5709 
5710 	case ACB_ADAPTER_TYPE_C: {
5711 		struct HBC_msgUnit *phbcmu;
5712 		uint32_t outbound_doorbell;
5713 
5714 		phbcmu = (struct HBC_msgUnit *)acb->pmu;
5715 		/* empty doorbell Qbuffer if door bell ringed */
5716 		outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5717 		    &phbcmu->outbound_doorbell);
5718 		/* clear outbound doobell isr */
5719 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5720 		    &phbcmu->outbound_doorbell_clear, outbound_doorbell);
5721 		/* let IOP know data has been read */
5722 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5723 		    &phbcmu->inbound_doorbell,
5724 		    ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
5725 		break;
5726 	}
5727 
5728 	}
5729 }
5730 
5731 
5732 static uint32_t
arcmsr_iop_confirm(struct ACB * acb)5733 arcmsr_iop_confirm(struct ACB *acb)
5734 {
5735 	uint64_t cdb_phyaddr;
5736 	uint32_t cdb_phyaddr_hi32;
5737 
5738 	/*
5739 	 * here we need to tell iop 331 about our freeccb.HighPart
5740 	 * if freeccb.HighPart is non-zero
5741 	 */
5742 	cdb_phyaddr = acb->ccb_cookie.dmac_laddress;
5743 	cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
5744 	acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
5745 	switch (acb->adapter_type) {
5746 	case ACB_ADAPTER_TYPE_A:
5747 		if (cdb_phyaddr_hi32 != 0) {
5748 			struct HBA_msgUnit *phbamu;
5749 
5750 			phbamu = (struct HBA_msgUnit *)acb->pmu;
5751 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5752 			    &phbamu->msgcode_rwbuffer[0],
5753 			    ARCMSR_SIGNATURE_SET_CONFIG);
5754 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5755 			    &phbamu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5756 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5757 			    &phbamu->inbound_msgaddr0,
5758 			    ARCMSR_INBOUND_MESG0_SET_CONFIG);
5759 			if (!arcmsr_hba_wait_msgint_ready(acb)) {
5760 				arcmsr_warn(acb,
5761 				    "timeout setting ccb "
5762 				    "high physical address");
5763 				return (FALSE);
5764 			}
5765 		}
5766 		break;
5767 
5768 	/* if adapter is type B, set window of "post command queue" */
5769 	case ACB_ADAPTER_TYPE_B: {
5770 		uint32_t post_queue_phyaddr;
5771 		struct HBB_msgUnit *phbbmu;
5772 
5773 		phbbmu = (struct HBB_msgUnit *)acb->pmu;
5774 		phbbmu->postq_index = 0;
5775 		phbbmu->doneq_index = 0;
5776 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5777 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5778 		    ARCMSR_MESSAGE_SET_POST_WINDOW);
5779 
5780 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5781 			arcmsr_warn(acb, "timeout setting post command "
5782 			    "queue window");
5783 			return (FALSE);
5784 		}
5785 
5786 		post_queue_phyaddr = (uint32_t)cdb_phyaddr +
5787 		    ARCMSR_MAX_FREECCB_NUM * P2ROUNDUP(sizeof (struct CCB), 32)
5788 		    + offsetof(struct HBB_msgUnit, post_qbuffer);
5789 		/* driver "set config" signature */
5790 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5791 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
5792 		    ARCMSR_SIGNATURE_SET_CONFIG);
5793 		/* normal should be zero */
5794 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5795 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
5796 		    cdb_phyaddr_hi32);
5797 		/* postQ size (256+8)*4 */
5798 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5799 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
5800 		    post_queue_phyaddr);
5801 		/* doneQ size (256+8)*4 */
5802 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5803 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
5804 		    post_queue_phyaddr+1056);
5805 		/* ccb maxQ size must be --> [(256+8)*4] */
5806 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5807 		    &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
5808 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5809 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5810 		    ARCMSR_MESSAGE_SET_CONFIG);
5811 
5812 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5813 			arcmsr_warn(acb,
5814 			    "timeout setting command queue window");
5815 			return (FALSE);
5816 		}
5817 		CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5818 		    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5819 		    ARCMSR_MESSAGE_START_DRIVER_MODE);
5820 
5821 		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5822 			arcmsr_warn(acb, "timeout in 'start driver mode'");
5823 			return (FALSE);
5824 		}
5825 		break;
5826 	}
5827 
5828 	case ACB_ADAPTER_TYPE_C:
5829 		if (cdb_phyaddr_hi32 != 0) {
5830 			struct HBC_msgUnit *phbcmu;
5831 
5832 			phbcmu = (struct HBC_msgUnit *)acb->pmu;
5833 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5834 			    &phbcmu->msgcode_rwbuffer[0],
5835 			    ARCMSR_SIGNATURE_SET_CONFIG);
5836 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5837 			    &phbcmu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5838 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5839 			    &phbcmu->inbound_msgaddr0,
5840 			    ARCMSR_INBOUND_MESG0_SET_CONFIG);
5841 			CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5842 			    &phbcmu->inbound_doorbell,
5843 			    ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
5844 			if (!arcmsr_hbc_wait_msgint_ready(acb)) {
5845 				arcmsr_warn(acb, "'set ccb "
5846 				    "high part physical address' timeout");
5847 				return (FALSE);
5848 			}
5849 		}
5850 		break;
5851 	}
5852 	return (TRUE);
5853 }
5854 
5855 
5856 /*
5857  * ONLY used for Adapter type B
5858  */
5859 static void
arcmsr_enable_eoi_mode(struct ACB * acb)5860 arcmsr_enable_eoi_mode(struct ACB *acb)
5861 {
5862 	struct HBB_msgUnit *phbbmu;
5863 
5864 	phbbmu = (struct HBB_msgUnit *)acb->pmu;
5865 
5866 	CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5867 	    &phbbmu->hbb_doorbell->drv2iop_doorbell,
5868 	    ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
5869 
5870 	if (!arcmsr_hbb_wait_msgint_ready(acb))
5871 		arcmsr_warn(acb, "'iop enable eoi mode' timeout");
5872 }
5873 
5874 /* start background rebuild */
5875 static void
arcmsr_iop_init(struct ACB * acb)5876 arcmsr_iop_init(struct ACB *acb)
5877 {
5878 	uint32_t intmask_org;
5879 
5880 	/* disable all outbound interrupt */
5881 	intmask_org = arcmsr_disable_allintr(acb);
5882 	arcmsr_wait_firmware_ready(acb);
5883 	(void) arcmsr_iop_confirm(acb);
5884 
5885 	/* start background rebuild */
5886 	switch (acb->adapter_type) {
5887 	case ACB_ADAPTER_TYPE_A:
5888 		arcmsr_get_hba_config(acb);
5889 		arcmsr_start_hba_bgrb(acb);
5890 		break;
5891 	case ACB_ADAPTER_TYPE_B:
5892 		arcmsr_get_hbb_config(acb);
5893 		arcmsr_start_hbb_bgrb(acb);
5894 		break;
5895 	case ACB_ADAPTER_TYPE_C:
5896 		arcmsr_get_hbc_config(acb);
5897 		arcmsr_start_hbc_bgrb(acb);
5898 		break;
5899 	}
5900 	/* empty doorbell Qbuffer if door bell rang */
5901 	arcmsr_clear_doorbell_queue_buffer(acb);
5902 
5903 	if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
5904 		arcmsr_enable_eoi_mode(acb);
5905 
5906 	/* enable outbound Post Queue, outbound doorbell Interrupt */
5907 	arcmsr_enable_allintr(acb, intmask_org);
5908 	acb->acb_flags |= ACB_F_IOP_INITED;
5909 }
5910