xref: /illumos-gate/usr/src/uts/intel/io/amr/amr.c (revision 92a0208178405fef708b0283ffcaa02fbc3468ff)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 /*
6  * Copyright (c) 1999,2000 Michael Smith
7  * Copyright (c) 2000 BSDi
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 /*
32  * Copyright (c) 2002 Eric Moore
33  * Copyright (c) 2002 LSI Logic Corporation
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. The party using or redistributing the source code and binary forms
45  *    agrees to the disclaimer below and the terms and conditions set forth
46  *    herein.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  */
60 
61 #include <sys/int_types.h>
62 #include <sys/scsi/scsi.h>
63 #include <sys/dkbad.h>
64 #include <sys/dklabel.h>
65 #include <sys/dkio.h>
66 #include <sys/cdio.h>
67 #include <sys/mhd.h>
68 #include <sys/vtoc.h>
69 #include <sys/dktp/fdisk.h>
70 #include <sys/scsi/targets/sddef.h>
71 #include <sys/debug.h>
72 #include <sys/pci.h>
73 #include <sys/ksynch.h>
74 #include <sys/ddi.h>
75 #include <sys/sunddi.h>
76 #include <sys/modctl.h>
77 #include <sys/byteorder.h>
78 
79 #include "amrreg.h"
80 #include "amrvar.h"
81 
82 /* dynamic debug symbol */
83 int	amr_debug_var = 0;
84 
85 #define	AMR_DELAY(cond, count, done_flag) { \
86 		int local_counter = 0; \
87 		done_flag = 1; \
88 		while (!(cond)) { \
89 			delay(drv_usectohz(100)); \
90 			if ((local_counter) > count) { \
91 				done_flag = 0; \
92 				break; \
93 			} \
94 			(local_counter)++; \
95 		} \
96 	}
97 
98 #define	AMR_BUSYWAIT(cond, count, done_flag) { \
99 		int local_counter = 0; \
100 		done_flag = 1; \
101 		while (!(cond)) { \
102 			drv_usecwait(100); \
103 			if ((local_counter) > count) { \
104 				done_flag = 0; \
105 				break; \
106 			} \
107 			(local_counter)++; \
108 		} \
109 	}
110 
111 /*
112  * driver interfaces
113  */
114 char _depends_on[] = "misc/scsi";
115 
116 static uint_t amr_intr(caddr_t arg);
117 static void amr_done(struct amr_softs *softs);
118 
119 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
120 			void *arg, void **result);
121 static int amr_attach(dev_info_t *, ddi_attach_cmd_t);
122 static int amr_detach(dev_info_t *, ddi_detach_cmd_t);
123 
124 static int amr_setup_mbox(struct amr_softs *softs);
125 static int amr_setup_sg(struct amr_softs *softs);
126 
127 /*
128  * Command wrappers
129  */
130 static int amr_query_controller(struct amr_softs *softs);
131 static void *amr_enquiry(struct amr_softs *softs, size_t bufsize,
132 			uint8_t cmd, uint8_t cmdsub, uint8_t cmdqual);
133 static int amr_flush(struct amr_softs *softs);
134 
135 /*
136  * Command processing.
137  */
138 static void amr_rw_command(struct amr_softs *softs,
139 			struct scsi_pkt *pkt, int lun);
140 static void amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp,
141 			unsigned int capacity);
142 static void amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key);
143 static int amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size);
144 static void amr_enquiry_unmapcmd(struct amr_command *ac);
145 static int amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg);
146 static void amr_unmapcmd(struct amr_command *ac);
147 
148 /*
149  * Status monitoring
150  */
151 static void amr_periodic(void *data);
152 
153 /*
154  * Interface-specific shims
155  */
156 static int amr_poll_command(struct amr_command *ac);
157 static void amr_start_waiting_queue(void *softp);
158 static void amr_call_pkt_comp(struct amr_command *head);
159 
160 /*
161  * SCSI interface
162  */
163 static int amr_setup_tran(dev_info_t  *dip, struct amr_softs *softp);
164 
165 /*
166  * Function prototypes
167  *
168  * SCSA functions exported by means of the transport table
169  */
170 static int amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
171 	scsi_hba_tran_t *tran, struct scsi_device *sd);
172 static int amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
173 static int amr_tran_reset(struct scsi_address *ap, int level);
174 static int amr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
175 static int amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
176     int whom);
177 static struct scsi_pkt *amr_tran_init_pkt(struct scsi_address *ap,
178     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
179     int tgtlen, int flags, int (*callback)(), caddr_t arg);
180 static void amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
181 static void amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
182 static void amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
183 
184 static ddi_dma_attr_t buffer_dma_attr = {
185 		DMA_ATTR_V0,	/* version of this structure */
186 		0,		/* lowest usable address */
187 		0xffffffffull,	/* highest usable address */
188 		0x00ffffffull,	/* maximum DMAable byte count */
189 		4,		/* alignment */
190 		1,		/* burst sizes */
191 		1,		/* minimum transfer */
192 		0xffffffffull,	/* maximum transfer */
193 		0xffffffffull,	/* maximum segment length */
194 		AMR_NSEG,	/* maximum number of segments */
195 		AMR_BLKSIZE,	/* granularity */
196 		0,		/* flags (reserved) */
197 };
198 
199 static ddi_dma_attr_t addr_dma_attr = {
200 		DMA_ATTR_V0,	/* version of this structure */
201 		0,		/* lowest usable address */
202 		0xffffffffull,	/* highest usable address */
203 		0x7fffffff,	/* maximum DMAable byte count */
204 		4,		/* alignment */
205 		1,		/* burst sizes */
206 		1,		/* minimum transfer */
207 		0xffffffffull,	/* maximum transfer */
208 		0xffffffffull,	/* maximum segment length */
209 		1,		/* maximum number of segments */
210 		1,		/* granularity */
211 		0,		/* flags (reserved) */
212 };
213 
214 
215 static struct dev_ops   amr_ops = {
216 	DEVO_REV,	/* devo_rev, */
217 	0,		/* refcnt  */
218 	amr_info,	/* info */
219 	nulldev,	/* identify */
220 	nulldev,	/* probe */
221 	amr_attach,	/* attach */
222 	amr_detach,	/* detach */
223 	nodev,		/* reset */
224 	NULL,		/* driver operations */
225 	(struct bus_ops *)0,	/* bus operations */
226 	0		/* power */
227 };
228 
229 
230 extern struct mod_ops mod_driverops;
231 static struct modldrv modldrv = {
232 	&mod_driverops,		/* Type of module. driver here */
233 	"AMR Driver",		/* Name of the module. */
234 	&amr_ops,		/* Driver ops vector */
235 };
236 
237 static struct modlinkage modlinkage = {
238 	MODREV_1,
239 	&modldrv,
240 	NULL
241 };
242 
243 /* DMA access attributes */
244 static ddi_device_acc_attr_t accattr = {
245 	DDI_DEVICE_ATTR_V0,
246 	DDI_NEVERSWAP_ACC,
247 	DDI_STRICTORDER_ACC
248 };
249 
250 static struct amr_softs  *amr_softstatep;
251 
252 
253 int
254 _init(void)
255 {
256 	int		error;
257 
258 	error = ddi_soft_state_init((void *)&amr_softstatep,
259 			sizeof (struct amr_softs), 0);
260 
261 	if (error != 0)
262 		goto error_out;
263 
264 	if ((error = scsi_hba_init(&modlinkage)) != 0) {
265 		ddi_soft_state_fini((void*)&amr_softstatep);
266 		goto error_out;
267 	}
268 
269 	error = mod_install(&modlinkage);
270 	if (error != 0) {
271 		scsi_hba_fini(&modlinkage);
272 		ddi_soft_state_fini((void*)&amr_softstatep);
273 		goto error_out;
274 	}
275 
276 	return (error);
277 
278 error_out:
279 	cmn_err(CE_NOTE, "_init failed");
280 	return (error);
281 }
282 
283 int
284 _info(struct modinfo *modinfop)
285 {
286 	return (mod_info(&modlinkage, modinfop));
287 }
288 
289 int
290 _fini(void)
291 {
292 	int	error;
293 
294 	if ((error = mod_remove(&modlinkage)) != 0) {
295 		return (error);
296 	}
297 
298 	scsi_hba_fini(&modlinkage);
299 
300 	ddi_soft_state_fini((void*)&amr_softstatep);
301 	return (error);
302 }
303 
304 
305 static int
306 amr_attach(dev_info_t *dev, ddi_attach_cmd_t cmd)
307 {
308 	struct amr_softs	*softs;
309 	int			error;
310 	uint32_t		command, i;
311 	int			instance;
312 	caddr_t			cfgaddr;
313 
314 	instance = ddi_get_instance(dev);
315 
316 	switch (cmd) {
317 		case DDI_ATTACH:
318 			break;
319 
320 		case DDI_RESUME:
321 			return (DDI_FAILURE);
322 
323 		default:
324 			return (DDI_FAILURE);
325 	}
326 
327 	/*
328 	 * Initialize softs.
329 	 */
330 	if (ddi_soft_state_zalloc(amr_softstatep, instance) != DDI_SUCCESS)
331 		return (DDI_FAILURE);
332 	softs = ddi_get_soft_state(amr_softstatep, instance);
333 	softs->state |= AMR_STATE_SOFT_STATE_SETUP;
334 
335 	softs->dev_info_p = dev;
336 
337 	AMRDB_PRINT((CE_NOTE, "softs: %p; busy_slot addr: %p",
338 		(void *)softs, (void *)&(softs->amr_busyslots)));
339 
340 	if (pci_config_setup(dev, &(softs->pciconfig_handle))
341 		!= DDI_SUCCESS) {
342 		goto error_out;
343 	}
344 	softs->state |= AMR_STATE_PCI_CONFIG_SETUP;
345 
346 	error = ddi_regs_map_setup(dev, 1, &cfgaddr, 0, 0,
347 		&accattr, &(softs->regsmap_handle));
348 	if (error != DDI_SUCCESS) {
349 		goto error_out;
350 	}
351 	softs->state |= AMR_STATE_PCI_MEM_MAPPED;
352 
353 	/*
354 	 * Determine board type.
355 	 */
356 	command = pci_config_get16(softs->pciconfig_handle, PCI_CONF_COMM);
357 
358 	/*
359 	 * Make sure we are going to be able to talk to this board.
360 	 */
361 	if ((command & PCI_COMM_MAE) == 0) {
362 		AMRDB_PRINT((CE_NOTE,  "memory window not available"));
363 		goto error_out;
364 	}
365 
366 	/* force the busmaster enable bit on */
367 	if (!(command & PCI_COMM_ME)) {
368 		command |= PCI_COMM_ME;
369 		pci_config_put16(softs->pciconfig_handle,
370 				PCI_CONF_COMM, command);
371 		command = pci_config_get16(softs->pciconfig_handle,
372 				PCI_CONF_COMM);
373 		if (!(command & PCI_COMM_ME))
374 			goto error_out;
375 	}
376 
377 	/*
378 	 * Allocate and connect our interrupt.
379 	 */
380 	if (ddi_intr_hilevel(dev, 0) != 0) {
381 	    AMRDB_PRINT((CE_NOTE,  "High level interrupt is not supported!"));
382 	    goto error_out;
383 	}
384 
385 	if (ddi_get_iblock_cookie(dev, 0,  &softs->iblock_cookiep)
386 		!= DDI_SUCCESS) {
387 		goto error_out;
388 	}
389 
390 	mutex_init(&softs->cmd_mutex, NULL, MUTEX_DRIVER,
391 		softs->iblock_cookiep); /* should be used in interrupt */
392 	mutex_init(&softs->queue_mutex, NULL, MUTEX_DRIVER,
393 	    softs->iblock_cookiep); /* should be used in interrupt */
394 	mutex_init(&softs->periodic_mutex, NULL, MUTEX_DRIVER,
395 	    softs->iblock_cookiep); /* should be used in interrupt */
396 	/* sychronize waits for the busy slots via this cv */
397 	cv_init(&softs->cmd_cv, NULL, CV_DRIVER, NULL);
398 	softs->state |= AMR_STATE_KMUTEX_INITED;
399 
400 	/*
401 	 * Do bus-independent initialisation, bring controller online.
402 	 */
403 	if (amr_setup_mbox(softs) != DDI_SUCCESS)
404 		goto error_out;
405 	softs->state |= AMR_STATE_MAILBOX_SETUP;
406 
407 	if (amr_setup_sg(softs) != DDI_SUCCESS)
408 		goto error_out;
409 
410 	softs->state |= AMR_STATE_SG_TABLES_SETUP;
411 
412 	if (amr_query_controller(softs) != DDI_SUCCESS)
413 		goto error_out;
414 
415 	/*
416 	 * A taskq is created for dispatching the waiting queue processing
417 	 * thread. The threads number equals to the logic drive number and
418 	 * the thread number should be 1 if there is no logic driver is
419 	 * configured for this instance.
420 	 */
421 	if ((softs->amr_taskq = ddi_taskq_create(dev, "amr_taskq",
422 		MAX(softs->amr_nlogdrives, 1), TASKQ_DEFAULTPRI, 0)) == NULL) {
423 		goto error_out;
424 	}
425 	softs->state |= AMR_STATE_TASKQ_SETUP;
426 
427 	if (ddi_add_intr(dev, 0, &softs->iblock_cookiep, NULL,
428 		amr_intr, (caddr_t)softs) != DDI_SUCCESS) {
429 		goto error_out;
430 	}
431 	softs->state |= AMR_STATE_INTR_SETUP;
432 
433 	/* set up the tran interface */
434 	if (amr_setup_tran(softs->dev_info_p, softs) != DDI_SUCCESS) {
435 		AMRDB_PRINT((CE_NOTE, "setup tran failed"));
436 		goto error_out;
437 	}
438 	softs->state |= AMR_STATE_TRAN_SETUP;
439 
440 	/* schedule a thread for periodic check */
441 	mutex_enter(&softs->periodic_mutex);
442 	softs->timeout_t = timeout(amr_periodic, (void *)softs,
443 				drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
444 	softs->state |= AMR_STATE_TIMEOUT_ENABLED;
445 	mutex_exit(&softs->periodic_mutex);
446 
447 	/* print firmware information in verbose mode */
448 	cmn_err(CE_CONT, "?MegaRaid %s %s attached.",
449 		softs->amr_product_info.pi_product_name,
450 		softs->amr_product_info.pi_firmware_ver);
451 
452 	/* clear any interrupts */
453 	AMR_QCLEAR_INTR(softs);
454 	return (DDI_SUCCESS);
455 
456 error_out:
457 	if (softs->state & AMR_STATE_INTR_SETUP) {
458 		ddi_remove_intr(dev, 0, softs->iblock_cookiep);
459 	}
460 	if (softs->state & AMR_STATE_TASKQ_SETUP) {
461 		ddi_taskq_destroy(softs->amr_taskq);
462 	}
463 	if (softs->state & AMR_STATE_SG_TABLES_SETUP) {
464 		for (i = 0; i < softs->sg_max_count; i++) {
465 			(void) ddi_dma_unbind_handle(
466 				softs->sg_items[i].sg_handle);
467 			(void) ddi_dma_mem_free(
468 				&((softs->sg_items[i]).sg_acc_handle));
469 			(void) ddi_dma_free_handle(
470 				&(softs->sg_items[i].sg_handle));
471 		}
472 	}
473 	if (softs->state & AMR_STATE_MAILBOX_SETUP) {
474 		(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
475 		(void) ddi_dma_mem_free(&softs->mbox_acc_handle);
476 		(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
477 	}
478 	if (softs->state & AMR_STATE_KMUTEX_INITED) {
479 		mutex_destroy(&softs->queue_mutex);
480 		mutex_destroy(&softs->cmd_mutex);
481 		mutex_destroy(&softs->periodic_mutex);
482 		cv_destroy(&softs->cmd_cv);
483 	}
484 	if (softs->state & AMR_STATE_PCI_MEM_MAPPED)
485 		ddi_regs_map_free(&softs->regsmap_handle);
486 	if (softs->state & AMR_STATE_PCI_CONFIG_SETUP)
487 		pci_config_teardown(&softs->pciconfig_handle);
488 	if (softs->state & AMR_STATE_SOFT_STATE_SETUP)
489 		ddi_soft_state_free(amr_softstatep, instance);
490 	return (DDI_FAILURE);
491 }
492 
493 /*
494  * Bring the controller down to a dormant state and detach all child devices.
495  * This function is called during detach, system shutdown.
496  *
497  * Note that we can assume that the bufq on the controller is empty, as we won't
498  * allow shutdown if any device is open.
499  */
500 /*ARGSUSED*/
501 static int amr_detach(dev_info_t *dev, ddi_detach_cmd_t cmd)
502 {
503 	struct amr_softs	*softs;
504 	int			instance;
505 	uint32_t		i, done_flag;
506 
507 	instance = ddi_get_instance(dev);
508 	softs = ddi_get_soft_state(amr_softstatep, instance);
509 
510 	/* flush the controllor */
511 	if (amr_flush(softs) != 0) {
512 		AMRDB_PRINT((CE_NOTE, "device shutdown failed"));
513 		return (EIO);
514 	}
515 
516 	/* release the amr timer */
517 	mutex_enter(&softs->periodic_mutex);
518 	softs->state &= ~AMR_STATE_TIMEOUT_ENABLED;
519 	if (softs->timeout_t) {
520 		(void) untimeout(softs->timeout_t);
521 		softs->timeout_t = 0;
522 	}
523 	mutex_exit(&softs->periodic_mutex);
524 
525 	for (i = 0; i < softs->sg_max_count; i++) {
526 		(void) ddi_dma_unbind_handle(
527 			softs->sg_items[i].sg_handle);
528 		(void) ddi_dma_mem_free(
529 			&((softs->sg_items[i]).sg_acc_handle));
530 		(void) ddi_dma_free_handle(
531 			&(softs->sg_items[i].sg_handle));
532 	}
533 
534 	(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
535 	(void) ddi_dma_mem_free(&softs->mbox_acc_handle);
536 	(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
537 
538 	/* disconnect the interrupt handler */
539 	ddi_remove_intr(softs->dev_info_p,  0, softs->iblock_cookiep);
540 
541 	/* wait for the completion of current in-progress interruptes */
542 	AMR_DELAY((softs->amr_interrupts_counter == 0), 1000, done_flag);
543 	if (!done_flag) {
544 		cmn_err(CE_WARN, "Suspicious interrupts in-progress.");
545 	}
546 
547 	ddi_taskq_destroy(softs->amr_taskq);
548 
549 	(void) scsi_hba_detach(dev);
550 	scsi_hba_tran_free(softs->hba_tran);
551 	ddi_regs_map_free(&softs->regsmap_handle);
552 	pci_config_teardown(&softs->pciconfig_handle);
553 
554 	mutex_destroy(&softs->queue_mutex);
555 	mutex_destroy(&softs->cmd_mutex);
556 	mutex_destroy(&softs->periodic_mutex);
557 	cv_destroy(&softs->cmd_cv);
558 
559 	/* print firmware information in verbose mode */
560 	cmn_err(CE_NOTE, "?MegaRaid %s %s detached.",
561 		softs->amr_product_info.pi_product_name,
562 		softs->amr_product_info.pi_firmware_ver);
563 
564 	ddi_soft_state_free(amr_softstatep, instance);
565 
566 	return (DDI_SUCCESS);
567 }
568 
569 
570 /*ARGSUSED*/
571 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
572 	void *arg, void **result)
573 {
574 	struct amr_softs	*softs;
575 	int			instance;
576 
577 	instance = ddi_get_instance(dip);
578 
579 	switch (infocmd) {
580 		case DDI_INFO_DEVT2DEVINFO:
581 			softs = ddi_get_soft_state(amr_softstatep, instance);
582 			if (softs != NULL) {
583 				*result = softs->dev_info_p;
584 				return (DDI_SUCCESS);
585 			} else {
586 				*result = NULL;
587 				return (DDI_FAILURE);
588 			}
589 		case DDI_INFO_DEVT2INSTANCE:
590 			*(int *)result = instance;
591 			break;
592 		default:
593 			break;
594 	}
595 	return (DDI_SUCCESS);
596 }
597 
598 /*
599  * Take an interrupt, or be poked by other code to look for interrupt-worthy
600  * status.
601  */
602 static uint_t
603 amr_intr(caddr_t arg)
604 {
605 	struct amr_softs *softs = (struct amr_softs *)arg;
606 
607 	softs->amr_interrupts_counter++;
608 
609 	if (AMR_QGET_ODB(softs) != AMR_QODB_READY) {
610 		softs->amr_interrupts_counter--;
611 		return (DDI_INTR_UNCLAIMED);
612 	}
613 
614 	/* collect finished commands, queue anything waiting */
615 	amr_done(softs);
616 
617 	softs->amr_interrupts_counter--;
618 
619 	return (DDI_INTR_CLAIMED);
620 
621 }
622 
623 /*
624  * Setup the amr mailbox
625  */
626 static int
627 amr_setup_mbox(struct amr_softs *softs)
628 {
629 	uint32_t	move;
630 	size_t		mbox_len;
631 
632 	if (ddi_dma_alloc_handle(
633 		softs->dev_info_p,
634 		&addr_dma_attr,
635 		DDI_DMA_SLEEP,
636 		NULL,
637 		&softs->mbox_dma_handle) != DDI_SUCCESS) {
638 		AMRDB_PRINT((CE_NOTE, "Cannot alloc dma handle for mailbox"));
639 		goto error_out;
640 	}
641 
642 	if (ddi_dma_mem_alloc(
643 		softs->mbox_dma_handle,
644 		sizeof (struct amr_mailbox) + 16,
645 		&accattr,
646 		DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
647 		DDI_DMA_SLEEP,
648 		NULL,
649 		(caddr_t *)(&softs->mbox),
650 		&mbox_len,
651 		&softs->mbox_acc_handle) !=
652 		DDI_SUCCESS) {
653 
654 		AMRDB_PRINT((CE_WARN, "Cannot alloc dma memory for mailbox"));
655 		goto error_out;
656 	}
657 
658 	if (ddi_dma_addr_bind_handle(
659 		softs->mbox_dma_handle,
660 		NULL,
661 		(caddr_t)softs->mbox,
662 		mbox_len,
663 		DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
664 		DDI_DMA_SLEEP,
665 		NULL,
666 		&softs->mbox_dma_cookie,
667 		&softs->mbox_dma_cookien) != DDI_DMA_MAPPED) {
668 
669 		AMRDB_PRINT((CE_NOTE, "Cannot bind dma memory for mailbox"));
670 		goto error_out;
671 	}
672 
673 	if (softs->mbox_dma_cookien != 1)
674 		goto error_out;
675 
676 	/* The phy address of mailbox must be aligned on a 16-byte boundary */
677 	move = 16 - (((uint32_t)softs->mbox_dma_cookie.dmac_address)&0xf);
678 	softs->mbox_phyaddr =
679 		(softs->mbox_dma_cookie.dmac_address + move);
680 
681 	softs->mailbox =
682 		(struct amr_mailbox *)(((uintptr_t)softs->mbox) + move);
683 
684 	AMRDB_PRINT((CE_NOTE, "phraddy=%x, mailbox=%p, softs->mbox=%p, move=%x",
685 		softs->mbox_phyaddr, (void *)softs->mailbox,
686 		softs->mbox, move));
687 
688 	return (DDI_SUCCESS);
689 
690 error_out:
691 	if (softs->mbox_dma_cookien)
692 		(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
693 	if (softs->mbox_acc_handle) {
694 		(void) ddi_dma_mem_free(&(softs->mbox_acc_handle));
695 		softs->mbox_acc_handle = NULL;
696 	}
697 	if (softs->mbox_dma_handle) {
698 		(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
699 		softs->mbox_dma_handle = NULL;
700 	}
701 
702 	return (DDI_FAILURE);
703 }
704 
705 /*
706  * Perform a periodic check of the controller status
707  */
708 static void
709 amr_periodic(void *data)
710 {
711 	uint32_t		i;
712 	struct amr_softs	*softs = (struct amr_softs *)data;
713 	struct scsi_pkt 	*pkt;
714 	register struct amr_command	*ac;
715 
716 	for (i = 0; i < softs->sg_max_count; i++) {
717 		if (softs->busycmd[i] == NULL)
718 			continue;
719 
720 		mutex_enter(&softs->cmd_mutex);
721 
722 		if (softs->busycmd[i] == NULL) {
723 			mutex_exit(&softs->cmd_mutex);
724 			continue;
725 		}
726 
727 		pkt = softs->busycmd[i]->pkt;
728 
729 		if ((pkt->pkt_time != 0) &&
730 			(ddi_get_time() -
731 			softs->busycmd[i]->ac_timestamp >
732 			pkt->pkt_time)) {
733 
734 			cmn_err(CE_WARN,
735 				"!timed out packet detected,\
736 				sc = %p, pkt = %p, index = %d, ac = %p",
737 				(void *)softs,
738 				(void *)pkt,
739 				i,
740 				(void *)softs->busycmd[i]);
741 
742 			ac = softs->busycmd[i];
743 			ac->ac_next = NULL;
744 
745 			/* pull command from the busy index */
746 			softs->busycmd[i] = NULL;
747 			if (softs->amr_busyslots > 0)
748 				softs->amr_busyslots--;
749 			if (softs->amr_busyslots == 0)
750 				cv_broadcast(&softs->cmd_cv);
751 
752 			mutex_exit(&softs->cmd_mutex);
753 
754 			pkt = ac->pkt;
755 			*pkt->pkt_scbp = 0;
756 			pkt->pkt_statistics |= STAT_TIMEOUT;
757 			pkt->pkt_reason = CMD_TIMEOUT;
758 			if (!(pkt->pkt_flags &
759 			FLAG_NOINTR) && pkt->pkt_comp) {
760 				/* call pkt callback */
761 				(*pkt->pkt_comp)(pkt);
762 			}
763 
764 		} else {
765 			mutex_exit(&softs->cmd_mutex);
766 		}
767 	}
768 
769 	/* restart the amr timer */
770 	mutex_enter(&softs->periodic_mutex);
771 	if (softs->state & AMR_STATE_TIMEOUT_ENABLED)
772 		softs->timeout_t = timeout(amr_periodic, (void *)softs,
773 				drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
774 	mutex_exit(&softs->periodic_mutex);
775 }
776 
777 /*
778  * Interrogate the controller for the operational parameters we require.
779  */
780 static int
781 amr_query_controller(struct amr_softs *softs)
782 {
783 	struct amr_enquiry3	*aex;
784 	struct amr_prodinfo	*ap;
785 	struct amr_enquiry	*ae;
786 	uint32_t		ldrv;
787 	int			instance;
788 
789 	/*
790 	 * If we haven't found the real limit yet, let us have a couple of
791 	 * commands in order to be able to probe.
792 	 */
793 	if (softs->maxio == 0)
794 		softs->maxio = 2;
795 
796 	instance = ddi_get_instance(softs->dev_info_p);
797 
798 	/*
799 	 * Try to issue an ENQUIRY3 command
800 	 */
801 	if ((aex = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE, AMR_CMD_CONFIG,
802 		AMR_CONFIG_ENQ3, AMR_CONFIG_ENQ3_SOLICITED_FULL)) != NULL) {
803 
804 		AMRDB_PRINT((CE_NOTE, "First enquiry"));
805 
806 		for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
807 		    softs->logic_drive[ldrv].al_size =
808 						aex->ae_drivesize[ldrv];
809 		    softs->logic_drive[ldrv].al_state =
810 						aex->ae_drivestate[ldrv];
811 		    softs->logic_drive[ldrv].al_properties =
812 						aex->ae_driveprop[ldrv];
813 		    AMRDB_PRINT((CE_NOTE,
814 			"  drive %d: size: %d state %x properties %x\n",
815 			ldrv,
816 			softs->logic_drive[ldrv].al_size,
817 			softs->logic_drive[ldrv].al_state,
818 			softs->logic_drive[ldrv].al_properties));
819 
820 		    if (softs->logic_drive[ldrv].al_state == AMR_LDRV_OFFLINE)
821 			cmn_err(CE_NOTE, "!instance %d log-drive %d is offline",
822 				instance, ldrv);
823 		    else
824 			softs->amr_nlogdrives++;
825 		}
826 		kmem_free(aex, AMR_ENQ_BUFFER_SIZE);
827 
828 		if ((ap = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE,
829 			AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) == NULL) {
830 			AMRDB_PRINT((CE_NOTE,
831 				"Cannot obtain product data from controller"));
832 			return (EIO);
833 		}
834 
835 		softs->maxdrives = AMR_40LD_MAXDRIVES;
836 		softs->maxchan = ap->ap_nschan;
837 		softs->maxio = ap->ap_maxio;
838 
839 		bcopy(ap->ap_firmware, softs->amr_product_info.pi_firmware_ver,
840 			AMR_FIRMWARE_VER_SIZE);
841 		softs->amr_product_info.
842 			pi_firmware_ver[AMR_FIRMWARE_VER_SIZE] = 0;
843 
844 		bcopy(ap->ap_product, softs->amr_product_info.pi_product_name,
845 			AMR_PRODUCT_INFO_SIZE);
846 		softs->amr_product_info.
847 			pi_product_name[AMR_PRODUCT_INFO_SIZE] = 0;
848 
849 		kmem_free(ap, AMR_ENQ_BUFFER_SIZE);
850 		AMRDB_PRINT((CE_NOTE, "maxio=%d", softs->maxio));
851 	} else {
852 
853 		AMRDB_PRINT((CE_NOTE, "First enquiry failed, \
854 				so try another way"));
855 
856 		/* failed, try the 8LD ENQUIRY commands */
857 		if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
858 			AMR_ENQ_BUFFER_SIZE, AMR_CMD_EXT_ENQUIRY2, 0, 0))
859 			== NULL) {
860 
861 			if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
862 				AMR_ENQ_BUFFER_SIZE, AMR_CMD_ENQUIRY, 0, 0))
863 				== NULL) {
864 				AMRDB_PRINT((CE_NOTE,
865 					"Cannot obtain configuration data"));
866 				return (EIO);
867 			}
868 			ae->ae_signature = 0;
869 		}
870 
871 		/*
872 		 * Fetch current state of logical drives.
873 		 */
874 		for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
875 		    softs->logic_drive[ldrv].al_size =
876 						ae->ae_ldrv.al_size[ldrv];
877 		    softs->logic_drive[ldrv].al_state =
878 						ae->ae_ldrv.al_state[ldrv];
879 		    softs->logic_drive[ldrv].al_properties =
880 						ae->ae_ldrv.al_properties[ldrv];
881 		    AMRDB_PRINT((CE_NOTE,
882 			" ********* drive %d: %d state %x properties %x",
883 			ldrv,
884 			softs->logic_drive[ldrv].al_size,
885 			softs->logic_drive[ldrv].al_state,
886 			softs->logic_drive[ldrv].al_properties));
887 
888 		    if (softs->logic_drive[ldrv].al_state == AMR_LDRV_OFFLINE)
889 			cmn_err(CE_NOTE, "!instance %d log-drive %d is offline",
890 				instance, ldrv);
891 		    else
892 			softs->amr_nlogdrives++;
893 		}
894 
895 		softs->maxdrives = AMR_8LD_MAXDRIVES;
896 		softs->maxchan = ae->ae_adapter.aa_channels;
897 		softs->maxio = ae->ae_adapter.aa_maxio;
898 		kmem_free(ae, AMR_ENQ_BUFFER_SIZE);
899 	}
900 
901 	/*
902 	 * Mark remaining drives as unused.
903 	 */
904 	for (; ldrv < AMR_MAXLD; ldrv++)
905 		softs->logic_drive[ldrv].al_state = AMR_LDRV_OFFLINE;
906 
907 	/*
908 	 * Cap the maximum number of outstanding I/Os.  AMI's driver
909 	 * doesn't trust the controller's reported value, and lockups have
910 	 * been seen when we do.
911 	 */
912 	softs->maxio = MIN(softs->maxio, AMR_LIMITCMD);
913 
914 	return (DDI_SUCCESS);
915 }
916 
917 /*
918  * Run a generic enquiry-style command.
919  */
920 static void *
921 amr_enquiry(struct amr_softs *softs, size_t bufsize, uint8_t cmd,
922 				uint8_t cmdsub, uint8_t cmdqual)
923 {
924 	struct amr_command	ac;
925 	void			*result;
926 
927 	result = NULL;
928 
929 	bzero(&ac, sizeof (struct amr_command));
930 	ac.ac_softs = softs;
931 
932 	/* set command flags */
933 	ac.ac_flags |= AMR_CMD_DATAOUT;
934 
935 	/* build the command proper */
936 	ac.mailbox.mb_command	= cmd;
937 	ac.mailbox.mb_cmdsub	= cmdsub;
938 	ac.mailbox.mb_cmdqual	= cmdqual;
939 
940 	if (amr_enquiry_mapcmd(&ac, bufsize) != DDI_SUCCESS)
941 		return (NULL);
942 
943 	if (amr_poll_command(&ac) || ac.ac_status != 0) {
944 		AMRDB_PRINT((CE_NOTE, "can not poll command, goto out"));
945 		amr_enquiry_unmapcmd(&ac);
946 		return (NULL);
947 	}
948 
949 	/* allocate the response structure */
950 	result = kmem_zalloc(bufsize, KM_SLEEP);
951 
952 	bcopy(ac.ac_data, result, bufsize);
953 
954 	amr_enquiry_unmapcmd(&ac);
955 	return (result);
956 }
957 
958 /*
959  * Flush the controller's internal cache, return status.
960  */
961 static int
962 amr_flush(struct amr_softs *softs)
963 {
964 	struct amr_command	ac;
965 	int			error = 0;
966 
967 	bzero(&ac, sizeof (struct amr_command));
968 	ac.ac_softs = softs;
969 
970 	ac.ac_flags |= AMR_CMD_DATAOUT;
971 
972 	/* build the command proper */
973 	ac.mailbox.mb_command = AMR_CMD_FLUSH;
974 
975 	/* have to poll, as the system may be going down or otherwise damaged */
976 	if (error = amr_poll_command(&ac)) {
977 		AMRDB_PRINT((CE_NOTE, "can not poll this cmd"));
978 		return (error);
979 	}
980 
981 	return (error);
982 }
983 
984 /*
985  * Take a command, submit it to the controller and wait for it to return.
986  * Returns nonzero on error.  Can be safely called with interrupts enabled.
987  */
988 static int
989 amr_poll_command(struct amr_command *ac)
990 {
991 	struct amr_softs	*softs = ac->ac_softs;
992 	volatile uint32_t	done_flag;
993 
994 	AMRDB_PRINT((CE_NOTE, "Amr_Poll bcopy(%p, %p, %d)",
995 			(void *)&ac->mailbox,
996 			(void *)softs->mailbox,
997 			(uint32_t)AMR_MBOX_CMDSIZE));
998 
999 	mutex_enter(&softs->cmd_mutex);
1000 
1001 	while (softs->amr_busyslots != 0)
1002 		cv_wait(&softs->cmd_cv, &softs->cmd_mutex);
1003 
1004 	/*
1005 	 * For read/write commands, the scatter/gather table should be
1006 	 * filled, and the last entry in scatter/gather table will be used.
1007 	 */
1008 	if ((ac->mailbox.mb_command == AMR_CMD_LREAD) ||
1009 	    (ac->mailbox.mb_command == AMR_CMD_LWRITE)) {
1010 		bcopy(ac->sgtable,
1011 			softs->sg_items[softs->sg_max_count - 1].sg_table,
1012 			sizeof (struct amr_sgentry) * AMR_NSEG);
1013 
1014 		(void) ddi_dma_sync(
1015 			softs->sg_items[softs->sg_max_count - 1].sg_handle,
1016 			0, 0, DDI_DMA_SYNC_FORDEV);
1017 
1018 		ac->mailbox.mb_physaddr =
1019 			softs->sg_items[softs->sg_max_count - 1].sg_phyaddr;
1020 	}
1021 
1022 	bcopy(&ac->mailbox, (void *)softs->mailbox, AMR_MBOX_CMDSIZE);
1023 
1024 	/* sync the dma memory */
1025 	(void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
1026 
1027 	/* clear the poll/ack fields in the mailbox */
1028 	softs->mailbox->mb_ident = AMR_POLL_COMMAND_ID;
1029 	softs->mailbox->mb_nstatus = AMR_POLL_DEFAULT_NSTATUS;
1030 	softs->mailbox->mb_status = AMR_POLL_DEFAULT_STATUS;
1031 	softs->mailbox->mb_poll = 0;
1032 	softs->mailbox->mb_ack = 0;
1033 	softs->mailbox->mb_busy = 1;
1034 
1035 	AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
1036 
1037 	/* sync the dma memory */
1038 	(void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1039 
1040 	AMR_DELAY((softs->mailbox->mb_nstatus != AMR_POLL_DEFAULT_NSTATUS),
1041 			1000, done_flag);
1042 	if (!done_flag) {
1043 		mutex_exit(&softs->cmd_mutex);
1044 		return (1);
1045 	}
1046 
1047 	ac->ac_status = softs->mailbox->mb_status;
1048 
1049 	AMR_DELAY((softs->mailbox->mb_poll == AMR_POLL_ACK), 1000, done_flag);
1050 	if (!done_flag) {
1051 		mutex_exit(&softs->cmd_mutex);
1052 		return (1);
1053 	}
1054 
1055 	softs->mailbox->mb_poll = 0;
1056 	softs->mailbox->mb_ack = AMR_POLL_ACK;
1057 
1058 	/* acknowledge that we have the commands */
1059 	AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1060 
1061 	AMR_DELAY(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK), 1000, done_flag);
1062 	if (!done_flag) {
1063 		mutex_exit(&softs->cmd_mutex);
1064 		return (1);
1065 	}
1066 
1067 	mutex_exit(&softs->cmd_mutex);
1068 	return (ac->ac_status != AMR_STATUS_SUCCESS);
1069 }
1070 
1071 /*
1072  * setup the scatter/gather table
1073  */
1074 static int
1075 amr_setup_sg(struct amr_softs *softs)
1076 {
1077 	uint32_t		i;
1078 	size_t			len;
1079 	ddi_dma_cookie_t	cookie;
1080 	uint_t			cookien;
1081 
1082 	softs->sg_max_count = 0;
1083 
1084 	for (i = 0; i < AMR_MAXCMD; i++) {
1085 
1086 		/* reset the cookien */
1087 		cookien = 0;
1088 
1089 		(softs->sg_items[i]).sg_handle = NULL;
1090 		if (ddi_dma_alloc_handle(
1091 			softs->dev_info_p,
1092 			&addr_dma_attr,
1093 			DDI_DMA_SLEEP,
1094 			NULL,
1095 			&((softs->sg_items[i]).sg_handle)) != DDI_SUCCESS) {
1096 
1097 			AMRDB_PRINT((CE_WARN,
1098 			"Cannot alloc dma handle for s/g table"));
1099 			goto error_out;
1100 		}
1101 
1102 		if (ddi_dma_mem_alloc((softs->sg_items[i]).sg_handle,
1103 			sizeof (struct amr_sgentry) * AMR_NSEG,
1104 			&accattr,
1105 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1106 			DDI_DMA_SLEEP, NULL,
1107 			(caddr_t *)(&(softs->sg_items[i]).sg_table),
1108 			&len,
1109 			&(softs->sg_items[i]).sg_acc_handle)
1110 			!= DDI_SUCCESS) {
1111 
1112 			AMRDB_PRINT((CE_WARN,
1113 			"Cannot allocate DMA memory"));
1114 			goto error_out;
1115 		}
1116 
1117 		if (ddi_dma_addr_bind_handle(
1118 			(softs->sg_items[i]).sg_handle,
1119 			NULL,
1120 			(caddr_t)((softs->sg_items[i]).sg_table),
1121 			len,
1122 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1123 			DDI_DMA_SLEEP,
1124 			NULL,
1125 			&cookie,
1126 			&cookien) != DDI_DMA_MAPPED) {
1127 
1128 			AMRDB_PRINT((CE_WARN,
1129 			"Cannot bind communication area for s/g table"));
1130 			goto error_out;
1131 		}
1132 
1133 		if (cookien != 1)
1134 			goto error_out;
1135 
1136 		softs->sg_items[i].sg_phyaddr = cookie.dmac_address;
1137 		softs->sg_max_count++;
1138 	}
1139 
1140 	return (DDI_SUCCESS);
1141 
1142 error_out:
1143 	/*
1144 	 * Couldn't allocate/initialize all of the sg table entries.
1145 	 * Clean up the partially-initialized entry before returning.
1146 	 */
1147 	if (cookien) {
1148 		(void) ddi_dma_unbind_handle((softs->sg_items[i]).sg_handle);
1149 	}
1150 	if ((softs->sg_items[i]).sg_acc_handle) {
1151 		(void) ddi_dma_mem_free(&((softs->sg_items[i]).sg_acc_handle));
1152 		(softs->sg_items[i]).sg_acc_handle = NULL;
1153 	}
1154 	if ((softs->sg_items[i]).sg_handle) {
1155 		(void) ddi_dma_free_handle(&((softs->sg_items[i]).sg_handle));
1156 		(softs->sg_items[i]).sg_handle = NULL;
1157 	}
1158 
1159 	/*
1160 	 * At least two sg table entries are needed. One is for regular data
1161 	 * I/O commands, the other is for poll I/O commands.
1162 	 */
1163 	return (softs->sg_max_count > 1 ? DDI_SUCCESS : DDI_FAILURE);
1164 }
1165 
1166 /*
1167  * Map/unmap (ac)'s data in the controller's addressable space as required.
1168  *
1169  * These functions may be safely called multiple times on a given command.
1170  */
1171 static void
1172 amr_setup_dmamap(struct amr_command *ac, ddi_dma_cookie_t *buffer_dma_cookiep,
1173 		int nsegments)
1174 {
1175 	struct amr_sgentry	*sg;
1176 	uint32_t		i, size;
1177 
1178 	sg = ac->sgtable;
1179 
1180 	size = 0;
1181 
1182 	ac->mailbox.mb_nsgelem = (uint8_t)nsegments;
1183 	for (i = 0; i < nsegments; i++, sg++) {
1184 		sg->sg_addr = buffer_dma_cookiep->dmac_address;
1185 		sg->sg_count = buffer_dma_cookiep->dmac_size;
1186 		size += sg->sg_count;
1187 
1188 		/*
1189 		 * There is no next cookie if the end of the current
1190 		 * window is reached. Otherwise, the next cookie
1191 		 * would be found.
1192 		 */
1193 		if ((ac->current_cookie + i + 1) != ac->num_of_cookie)
1194 			ddi_dma_nextcookie(ac->buffer_dma_handle,
1195 				buffer_dma_cookiep);
1196 	}
1197 
1198 	ac->transfer_size = size;
1199 	ac->data_transfered += size;
1200 }
1201 
1202 
1203 /*
1204  * map the amr command for enquiry, allocate the DMA resource
1205  */
1206 static int
1207 amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size)
1208 {
1209 	struct amr_softs	*softs = ac->ac_softs;
1210 	size_t			len;
1211 	uint_t			dma_flags;
1212 
1213 	AMRDB_PRINT((CE_NOTE, "Amr_enquiry_mapcmd called, ac=%p, flags=%x",
1214 			(void *)ac, ac->ac_flags));
1215 
1216 	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1217 		dma_flags = DDI_DMA_READ;
1218 	} else {
1219 		dma_flags = DDI_DMA_WRITE;
1220 	}
1221 
1222 	dma_flags |= DDI_DMA_CONSISTENT;
1223 
1224 	/* process the DMA by address bind mode */
1225 	if (ddi_dma_alloc_handle(softs->dev_info_p,
1226 		&addr_dma_attr, DDI_DMA_SLEEP, NULL,
1227 		&ac->buffer_dma_handle) !=
1228 		DDI_SUCCESS) {
1229 
1230 		AMRDB_PRINT((CE_WARN,
1231 		"Cannot allocate addr DMA tag"));
1232 		goto error_out;
1233 	}
1234 
1235 	if (ddi_dma_mem_alloc(ac->buffer_dma_handle,
1236 		data_size,
1237 		&accattr,
1238 		dma_flags,
1239 		DDI_DMA_SLEEP,
1240 		NULL,
1241 		(caddr_t *)&ac->ac_data,
1242 		&len,
1243 		&ac->buffer_acc_handle) !=
1244 		DDI_SUCCESS) {
1245 
1246 		AMRDB_PRINT((CE_WARN,
1247 		"Cannot allocate DMA memory"));
1248 		goto error_out;
1249 	}
1250 
1251 	if ((ddi_dma_addr_bind_handle(
1252 		ac->buffer_dma_handle,
1253 		NULL, ac->ac_data, len, dma_flags,
1254 		DDI_DMA_SLEEP, NULL, &ac->buffer_dma_cookie,
1255 		&ac->num_of_cookie)) != DDI_DMA_MAPPED) {
1256 
1257 		AMRDB_PRINT((CE_WARN,
1258 			"Cannot bind addr for dma"));
1259 		goto error_out;
1260 	}
1261 
1262 	ac->ac_dataphys = (&ac->buffer_dma_cookie)->dmac_address;
1263 
1264 	((struct amr_mailbox *)&(ac->mailbox))->mb_param = 0;
1265 	ac->mailbox.mb_nsgelem = 0;
1266 	ac->mailbox.mb_physaddr = ac->ac_dataphys;
1267 
1268 	ac->ac_flags |= AMR_CMD_MAPPED;
1269 
1270 	return (DDI_SUCCESS);
1271 
1272 error_out:
1273 	if (ac->num_of_cookie)
1274 		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1275 	if (ac->buffer_acc_handle) {
1276 		ddi_dma_mem_free(&ac->buffer_acc_handle);
1277 		ac->buffer_acc_handle = NULL;
1278 	}
1279 	if (ac->buffer_dma_handle) {
1280 		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1281 		ac->buffer_dma_handle = NULL;
1282 	}
1283 
1284 	return (DDI_FAILURE);
1285 }
1286 
1287 /*
1288  * unmap the amr command for enquiry, free the DMA resource
1289  */
1290 static void
1291 amr_enquiry_unmapcmd(struct amr_command *ac)
1292 {
1293 	AMRDB_PRINT((CE_NOTE, "Amr_enquiry_unmapcmd called, ac=%p",
1294 			(void *)ac));
1295 
1296 	/* if the command involved data at all and was mapped */
1297 	if ((ac->ac_flags & AMR_CMD_MAPPED) && ac->ac_data) {
1298 		if (ac->buffer_dma_handle)
1299 			(void) ddi_dma_unbind_handle(
1300 				ac->buffer_dma_handle);
1301 		if (ac->buffer_acc_handle) {
1302 			ddi_dma_mem_free(&ac->buffer_acc_handle);
1303 			ac->buffer_acc_handle = NULL;
1304 		}
1305 		if (ac->buffer_dma_handle) {
1306 			(void) ddi_dma_free_handle(
1307 				&ac->buffer_dma_handle);
1308 			ac->buffer_dma_handle = NULL;
1309 		}
1310 	}
1311 
1312 	ac->ac_flags &= ~AMR_CMD_MAPPED;
1313 }
1314 
1315 /*
1316  * map the amr command, allocate the DMA resource
1317  */
1318 static int
1319 amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg)
1320 {
1321 	uint_t	dma_flags;
1322 	off_t	off;
1323 	size_t	len;
1324 	int	error;
1325 	int	(*cb)(caddr_t);
1326 
1327 	AMRDB_PRINT((CE_NOTE, "Amr_mapcmd called, ac=%p, flags=%x",
1328 			(void *)ac, ac->ac_flags));
1329 
1330 	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1331 		dma_flags = DDI_DMA_READ;
1332 	} else {
1333 		dma_flags = DDI_DMA_WRITE;
1334 	}
1335 
1336 	if (ac->ac_flags & AMR_CMD_PKT_CONSISTENT) {
1337 		dma_flags |= DDI_DMA_CONSISTENT;
1338 	}
1339 	if (ac->ac_flags & AMR_CMD_PKT_DMA_PARTIAL) {
1340 		dma_flags |= DDI_DMA_PARTIAL;
1341 	}
1342 
1343 	if ((!(ac->ac_flags & AMR_CMD_MAPPED)) && (ac->ac_buf == NULL)) {
1344 		ac->ac_flags |= AMR_CMD_MAPPED;
1345 		return (DDI_SUCCESS);
1346 	}
1347 
1348 	cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1349 
1350 	/* if the command involves data at all, and hasn't been mapped */
1351 	if (!(ac->ac_flags & AMR_CMD_MAPPED)) {
1352 		/* process the DMA by buffer bind mode */
1353 		error = ddi_dma_buf_bind_handle(ac->buffer_dma_handle,
1354 			ac->ac_buf,
1355 			dma_flags,
1356 			cb,
1357 			arg,
1358 			&ac->buffer_dma_cookie,
1359 			&ac->num_of_cookie);
1360 		switch (error) {
1361 		case DDI_DMA_PARTIAL_MAP:
1362 			if (ddi_dma_numwin(ac->buffer_dma_handle,
1363 				&ac->num_of_win) == DDI_FAILURE) {
1364 
1365 				AMRDB_PRINT((CE_WARN,
1366 					"Cannot get dma num win"));
1367 				(void) ddi_dma_unbind_handle(
1368 					ac->buffer_dma_handle);
1369 				(void) ddi_dma_free_handle(
1370 					&ac->buffer_dma_handle);
1371 				ac->buffer_dma_handle = NULL;
1372 				return (DDI_FAILURE);
1373 			}
1374 			ac->current_win = 0;
1375 			break;
1376 
1377 		case DDI_DMA_MAPPED:
1378 			ac->num_of_win = 1;
1379 			ac->current_win = 0;
1380 			break;
1381 
1382 		default:
1383 			AMRDB_PRINT((CE_WARN,
1384 				"Cannot bind buf for dma"));
1385 
1386 			(void) ddi_dma_free_handle(
1387 				&ac->buffer_dma_handle);
1388 			ac->buffer_dma_handle = NULL;
1389 			return (DDI_FAILURE);
1390 		}
1391 
1392 		ac->current_cookie = 0;
1393 
1394 		ac->ac_flags |= AMR_CMD_MAPPED;
1395 	} else if (ac->current_cookie == AMR_LAST_COOKIE_TAG) {
1396 		/* get the next window */
1397 		ac->current_win++;
1398 		(void) ddi_dma_getwin(ac->buffer_dma_handle,
1399 			ac->current_win, &off, &len,
1400 			&ac->buffer_dma_cookie,
1401 			&ac->num_of_cookie);
1402 		ac->current_cookie = 0;
1403 	}
1404 
1405 	if ((ac->num_of_cookie - ac->current_cookie) > AMR_NSEG) {
1406 		amr_setup_dmamap(ac, &ac->buffer_dma_cookie, AMR_NSEG);
1407 		ac->current_cookie += AMR_NSEG;
1408 	} else {
1409 		amr_setup_dmamap(ac, &ac->buffer_dma_cookie,
1410 		ac->num_of_cookie - ac->current_cookie);
1411 		ac->current_cookie = AMR_LAST_COOKIE_TAG;
1412 	}
1413 
1414 	return (DDI_SUCCESS);
1415 }
1416 
1417 /*
1418  * unmap the amr command, free the DMA resource
1419  */
1420 static void
1421 amr_unmapcmd(struct amr_command *ac)
1422 {
1423 	AMRDB_PRINT((CE_NOTE, "Amr_unmapcmd called, ac=%p",
1424 			(void *)ac));
1425 
1426 	/* if the command involved data at all and was mapped */
1427 	if ((ac->ac_flags & AMR_CMD_MAPPED) &&
1428 		ac->ac_buf && ac->buffer_dma_handle)
1429 		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1430 
1431 	ac->ac_flags &= ~AMR_CMD_MAPPED;
1432 }
1433 
1434 static int
1435 amr_setup_tran(dev_info_t  *dip, struct amr_softs *softp)
1436 {
1437 	softp->hba_tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
1438 
1439 	/*
1440 	 * hba_private always points to the amr_softs struct
1441 	 */
1442 	softp->hba_tran->tran_hba_private	= softp;
1443 	softp->hba_tran->tran_tgt_init		= amr_tran_tgt_init;
1444 	softp->hba_tran->tran_tgt_probe		= scsi_hba_probe;
1445 	softp->hba_tran->tran_start		= amr_tran_start;
1446 	softp->hba_tran->tran_reset		= amr_tran_reset;
1447 	softp->hba_tran->tran_getcap		= amr_tran_getcap;
1448 	softp->hba_tran->tran_setcap		= amr_tran_setcap;
1449 	softp->hba_tran->tran_init_pkt		= amr_tran_init_pkt;
1450 	softp->hba_tran->tran_destroy_pkt	= amr_tran_destroy_pkt;
1451 	softp->hba_tran->tran_dmafree		= amr_tran_dmafree;
1452 	softp->hba_tran->tran_sync_pkt		= amr_tran_sync_pkt;
1453 	softp->hba_tran->tran_abort		= NULL;
1454 	softp->hba_tran->tran_tgt_free		= NULL;
1455 	softp->hba_tran->tran_quiesce		= NULL;
1456 	softp->hba_tran->tran_unquiesce		= NULL;
1457 	softp->hba_tran->tran_sd		= NULL;
1458 
1459 	if (scsi_hba_attach_setup(dip, &buffer_dma_attr, softp->hba_tran,
1460 		SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
1461 		scsi_hba_tran_free(softp->hba_tran);
1462 		softp->hba_tran = NULL;
1463 		return (DDI_FAILURE);
1464 	} else {
1465 		return (DDI_SUCCESS);
1466 	}
1467 }
1468 
1469 /*ARGSUSED*/
1470 static int
1471 amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1472 	scsi_hba_tran_t *tran, struct scsi_device *sd)
1473 {
1474 	struct amr_softs	*softs;
1475 	ushort_t		target = sd->sd_address.a_target;
1476 	uchar_t			lun = sd->sd_address.a_lun;
1477 
1478 	softs = (struct amr_softs *)
1479 		(sd->sd_address.a_hba_tran->tran_hba_private);
1480 
1481 	if ((lun == 0) && (target < AMR_MAXLD))
1482 		if (softs->logic_drive[target].al_state != AMR_LDRV_OFFLINE)
1483 			return (DDI_SUCCESS);
1484 
1485 	return (DDI_FAILURE);
1486 }
1487 
1488 static int
1489 amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1490 {
1491 	struct amr_softs	*softs;
1492 	struct buf		*bp = NULL;
1493 	union scsi_cdb		*cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1494 	int			ret;
1495 	uint32_t		capacity;
1496 	struct amr_command	*ac;
1497 
1498 	AMRDB_PRINT((CE_NOTE, "amr_tran_start, cmd=%X,target=%d,lun=%d",
1499 		cdbp->scc_cmd, ap->a_target, ap->a_lun));
1500 
1501 	softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1502 	if ((ap->a_lun != 0) || (ap->a_target >= AMR_MAXLD) ||
1503 		(softs->logic_drive[ap->a_target].al_state ==
1504 			AMR_LDRV_OFFLINE)) {
1505 		cmn_err(CE_WARN, "target or lun is not correct!");
1506 		ret = TRAN_BADPKT;
1507 		return (ret);
1508 	}
1509 
1510 	ac = (struct amr_command *)pkt->pkt_ha_private;
1511 	bp = ac->ac_buf;
1512 
1513 	AMRDB_PRINT((CE_NOTE, "scsi cmd accepted, cmd=%X", cdbp->scc_cmd));
1514 
1515 	switch (cdbp->scc_cmd) {
1516 	case SCMD_READ:		/* read		*/
1517 	case SCMD_READ_G1:	/* read	g1	*/
1518 	case SCMD_READ_BUFFER:	/* read buffer	*/
1519 	case SCMD_WRITE:	/* write	*/
1520 	case SCMD_WRITE_G1:	/* write g1	*/
1521 	case SCMD_WRITE_BUFFER:	/* write buffer	*/
1522 		amr_rw_command(softs, pkt, ap->a_target);
1523 
1524 		if (pkt->pkt_flags & FLAG_NOINTR) {
1525 			(void) amr_poll_command(ac);
1526 			pkt->pkt_state |= (STATE_GOT_BUS
1527 					| STATE_GOT_TARGET
1528 					| STATE_SENT_CMD
1529 					| STATE_XFERRED_DATA);
1530 			*pkt->pkt_scbp = 0;
1531 			pkt->pkt_statistics |= STAT_SYNC;
1532 			pkt->pkt_reason = CMD_CMPLT;
1533 		} else {
1534 			mutex_enter(&softs->queue_mutex);
1535 			if (softs->waiting_q_head == NULL) {
1536 				ac->ac_prev = NULL;
1537 				ac->ac_next = NULL;
1538 				softs->waiting_q_head = ac;
1539 				softs->waiting_q_tail = ac;
1540 			} else {
1541 				ac->ac_next = NULL;
1542 				ac->ac_prev = softs->waiting_q_tail;
1543 				softs->waiting_q_tail->ac_next = ac;
1544 				softs->waiting_q_tail = ac;
1545 			}
1546 			mutex_exit(&softs->queue_mutex);
1547 			amr_start_waiting_queue((void *)softs);
1548 		}
1549 		ret = TRAN_ACCEPT;
1550 		break;
1551 
1552 	case SCMD_INQUIRY: /* inquiry */
1553 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1554 			struct scsi_inquiry inqp;
1555 			uint8_t *sinq_p = (uint8_t *)&inqp;
1556 
1557 			bzero(&inqp, sizeof (struct scsi_inquiry));
1558 
1559 			if (((char *)cdbp)[1] || ((char *)cdbp)[2]) {
1560 				/*
1561 				 * The EVDP and pagecode is
1562 				 * not supported
1563 				 */
1564 				sinq_p[1] = 0xFF;
1565 				sinq_p[2] = 0x0;
1566 			} else {
1567 				inqp.inq_len = AMR_INQ_ADDITIONAL_LEN;
1568 				inqp.inq_ansi = AMR_INQ_ANSI_VER;
1569 				inqp.inq_rdf = AMR_INQ_RESP_DATA_FORMAT;
1570 				/* Enable Tag Queue */
1571 				inqp.inq_cmdque = 1;
1572 				bcopy("MegaRaid", inqp.inq_vid,
1573 					sizeof (inqp.inq_vid));
1574 				bcopy(softs->amr_product_info.pi_product_name,
1575 					inqp.inq_pid,
1576 					AMR_PRODUCT_INFO_SIZE);
1577 				bcopy(softs->amr_product_info.pi_firmware_ver,
1578 					inqp.inq_revision,
1579 					AMR_FIRMWARE_VER_SIZE);
1580 			}
1581 
1582 			amr_unmapcmd(ac);
1583 
1584 			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1585 				bp_mapin(bp);
1586 			bcopy(&inqp, bp->b_un.b_addr,
1587 				sizeof (struct scsi_inquiry));
1588 
1589 			pkt->pkt_state |= STATE_XFERRED_DATA;
1590 		}
1591 		pkt->pkt_reason = CMD_CMPLT;
1592 		pkt->pkt_state |= (STATE_GOT_BUS
1593 				| STATE_GOT_TARGET
1594 				| STATE_SENT_CMD);
1595 		*pkt->pkt_scbp = 0;
1596 		ret = TRAN_ACCEPT;
1597 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1598 			(*pkt->pkt_comp)(pkt);
1599 		break;
1600 
1601 	case SCMD_READ_CAPACITY: /* read capacity */
1602 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1603 			struct scsi_capacity cp;
1604 
1605 			capacity = softs->logic_drive[ap->a_target].al_size - 1;
1606 			cp.capacity = BE_32(capacity);
1607 			cp.lbasize = BE_32(512);
1608 
1609 			amr_unmapcmd(ac);
1610 
1611 			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1612 				bp_mapin(bp);
1613 			bcopy(&cp, bp->b_un.b_addr, 8);
1614 		}
1615 		pkt->pkt_reason = CMD_CMPLT;
1616 		pkt->pkt_state |= (STATE_GOT_BUS
1617 				| STATE_GOT_TARGET
1618 				| STATE_SENT_CMD
1619 				| STATE_XFERRED_DATA);
1620 		*pkt->pkt_scbp = 0;
1621 		ret = TRAN_ACCEPT;
1622 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1623 			(*pkt->pkt_comp)(pkt);
1624 		break;
1625 
1626 	case SCMD_MODE_SENSE:		/* mode sense */
1627 	case SCMD_MODE_SENSE_G1:	/* mode sense g1 */
1628 		amr_unmapcmd(ac);
1629 
1630 		capacity = softs->logic_drive[ap->a_target].al_size - 1;
1631 		amr_mode_sense(cdbp, bp, capacity);
1632 
1633 		pkt->pkt_reason = CMD_CMPLT;
1634 		pkt->pkt_state |= (STATE_GOT_BUS
1635 				| STATE_GOT_TARGET
1636 				| STATE_SENT_CMD
1637 				| STATE_XFERRED_DATA);
1638 		*pkt->pkt_scbp = 0;
1639 		ret = TRAN_ACCEPT;
1640 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1641 			(*pkt->pkt_comp)(pkt);
1642 		break;
1643 
1644 	case SCMD_TEST_UNIT_READY:	/* test unit ready */
1645 	case SCMD_REQUEST_SENSE:	/* request sense */
1646 	case SCMD_FORMAT:		/* format */
1647 	case SCMD_START_STOP:		/* start stop */
1648 	case SCMD_SYNCHRONIZE_CACHE:	/* synchronize cache */
1649 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1650 			amr_unmapcmd(ac);
1651 
1652 			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1653 				bp_mapin(bp);
1654 			bzero(bp->b_un.b_addr, bp->b_bcount);
1655 
1656 			pkt->pkt_state |= STATE_XFERRED_DATA;
1657 		}
1658 		pkt->pkt_reason = CMD_CMPLT;
1659 		pkt->pkt_state |= (STATE_GOT_BUS
1660 				| STATE_GOT_TARGET
1661 				| STATE_SENT_CMD);
1662 		ret = TRAN_ACCEPT;
1663 		*pkt->pkt_scbp = 0;
1664 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1665 			(*pkt->pkt_comp)(pkt);
1666 		break;
1667 
1668 	default: /* any other commands */
1669 		amr_unmapcmd(ac);
1670 		pkt->pkt_reason = CMD_INCOMPLETE;
1671 		pkt->pkt_state = (STATE_GOT_BUS
1672 				| STATE_GOT_TARGET
1673 				| STATE_SENT_CMD
1674 				| STATE_GOT_STATUS
1675 				| STATE_ARQ_DONE);
1676 		ret = TRAN_ACCEPT;
1677 		*pkt->pkt_scbp = 0;
1678 		amr_set_arq_data(pkt, KEY_ILLEGAL_REQUEST);
1679 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1680 			(*pkt->pkt_comp)(pkt);
1681 		break;
1682 	}
1683 
1684 	return (ret);
1685 }
1686 
1687 /*
1688  * tran_reset() will reset the bus/target/adapter to support the fault recovery
1689  * functionality according to the "level" in interface. However, we got the
1690  * confirmation from LSI that these HBA cards does not support any commands to
1691  * reset bus/target/adapter/channel.
1692  *
1693  * If the tran_reset() return a FAILURE to the sd, the system will not
1694  * continue to dump the core. But core dump is an crucial method to analyze
1695  * problems in panic. Now we adopt a work around solution, that is to return
1696  * a fake SUCCESS to sd during panic, which will force the system continue
1697  * to dump core though the core may have problems in some situtation because
1698  * some on-the-fly commands will continue DMAing data to the memory.
1699  * In addition, the work around core dump method may not be performed
1700  * successfully if the panic is caused by the HBA itself. So the work around
1701  * solution is not a good example for the implementation of tran_reset(),
1702  * the most reasonable approach should send a reset command to the adapter.
1703  */
1704 /*ARGSUSED*/
1705 static int
1706 amr_tran_reset(struct scsi_address *ap, int level)
1707 {
1708 	struct amr_softs	*softs;
1709 	volatile uint32_t	done_flag;
1710 
1711 	if (ddi_in_panic()) {
1712 		softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1713 
1714 		/* Acknowledge the card if there are any significant commands */
1715 		while (softs->amr_busyslots > 0) {
1716 			AMR_DELAY((softs->mailbox->mb_busy == 0),
1717 					AMR_RETRYCOUNT, done_flag);
1718 			if (!done_flag) {
1719 				/*
1720 				 * command not completed, indicate the
1721 				 * problem and continue get ac
1722 				 */
1723 				cmn_err(CE_WARN,
1724 					"AMR command is not completed");
1725 				return (0);
1726 			}
1727 
1728 			AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1729 
1730 			/* wait for the acknowledge from hardware */
1731 			AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
1732 					AMR_RETRYCOUNT, done_flag);
1733 			if (!done_flag) {
1734 				/*
1735 				 * command is not completed, return from the
1736 				 * current interrupt and wait for the next one
1737 				 */
1738 				cmn_err(CE_WARN, "No answer from the hardware");
1739 
1740 				mutex_exit(&softs->cmd_mutex);
1741 				return (0);
1742 			}
1743 
1744 			softs->amr_busyslots -= softs->mailbox->mb_nstatus;
1745 		}
1746 
1747 		/* flush the controllor */
1748 		(void) amr_flush(softs);
1749 
1750 		/*
1751 		 * If the system is in panic, the tran_reset() will return a
1752 		 * fake SUCCESS to sd, then the system would continue dump the
1753 		 * core by poll commands. This is a work around for dumping
1754 		 * core in panic.
1755 		 *
1756 		 * Note: Some on-the-fly command will continue DMAing data to
1757 		 *	 the memory when the core is dumping, which may cause
1758 		 *	 some flaws in the dumped core file, so a cmn_err()
1759 		 *	 will be printed out to warn users. However, for most
1760 		 *	 cases, the core file will be fine.
1761 		 */
1762 		cmn_err(CE_WARN, "This system contains a SCSI HBA card/driver "
1763 				"that doesn't support software reset. This "
1764 				"means that memory being used by the HBA for "
1765 				"DMA based reads could have been updated after "
1766 				"we panic'd.");
1767 		return (1);
1768 	} else {
1769 		/* return failure to sd */
1770 		return (0);
1771 	}
1772 }
1773 
1774 /*ARGSUSED*/
1775 static int
1776 amr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1777 {
1778 	struct amr_softs	*softs;
1779 
1780 	/*
1781 	 * We don't allow inquiring about capabilities for other targets
1782 	 */
1783 	if (cap == NULL || whom == 0)
1784 		return (-1);
1785 
1786 	softs = ((struct amr_softs *)(ap->a_hba_tran)->tran_hba_private);
1787 
1788 	switch (scsi_hba_lookup_capstr(cap)) {
1789 	case SCSI_CAP_ARQ:
1790 		return (1);
1791 	case SCSI_CAP_GEOMETRY:
1792 		return ((AMR_DEFAULT_HEADS << 16) | AMR_DEFAULT_CYLINDERS);
1793 	case SCSI_CAP_SECTOR_SIZE:
1794 		return (AMR_DEFAULT_SECTORS);
1795 	case SCSI_CAP_TOTAL_SECTORS:
1796 		/* number of sectors */
1797 		return (softs->logic_drive[ap->a_target].al_size);
1798 	case SCSI_CAP_UNTAGGED_QING:
1799 	case SCSI_CAP_TAGGED_QING:
1800 		return (1);
1801 	default:
1802 		return (-1);
1803 	}
1804 }
1805 
1806 /*ARGSUSED*/
1807 static int
1808 amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1809 		int whom)
1810 {
1811 	/*
1812 	 * We don't allow setting capabilities for other targets
1813 	 */
1814 	if (cap == NULL || whom == 0) {
1815 		AMRDB_PRINT((CE_NOTE,
1816 			"Set Cap not supported, string = %s, whom=%d",
1817 			cap, whom));
1818 		return (-1);
1819 	}
1820 
1821 	switch (scsi_hba_lookup_capstr(cap)) {
1822 	case SCSI_CAP_ARQ:
1823 		return (1);
1824 	case SCSI_CAP_TOTAL_SECTORS:
1825 		return (1);
1826 	case SCSI_CAP_SECTOR_SIZE:
1827 		return (1);
1828 	case SCSI_CAP_UNTAGGED_QING:
1829 	case SCSI_CAP_TAGGED_QING:
1830 		return ((value == 1) ? 1 : 0);
1831 	default:
1832 		return (0);
1833 	}
1834 }
1835 
1836 static struct scsi_pkt *
1837 amr_tran_init_pkt(struct scsi_address *ap,
1838     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1839     int tgtlen, int flags, int (*callback)(), caddr_t arg)
1840 {
1841 	struct amr_softs	*softs;
1842 	struct amr_command	*ac;
1843 	uint32_t		slen;
1844 
1845 	softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1846 
1847 	if ((ap->a_lun != 0)||(ap->a_target >= AMR_MAXLD)||
1848 		(softs->logic_drive[ap->a_target].al_state ==
1849 			AMR_LDRV_OFFLINE)) {
1850 		return (NULL);
1851 	}
1852 
1853 	if (pkt == NULL) {
1854 		/* force auto request sense */
1855 		slen = MAX(statuslen, sizeof (struct scsi_arq_status));
1856 
1857 		pkt = scsi_hba_pkt_alloc(softs->dev_info_p, ap, cmdlen,
1858 			slen, tgtlen, sizeof (struct amr_command),
1859 			callback, arg);
1860 		if (pkt == NULL) {
1861 			AMRDB_PRINT((CE_WARN, "scsi_hba_pkt_alloc failed"));
1862 			return (NULL);
1863 		}
1864 		pkt->pkt_address	= *ap;
1865 		pkt->pkt_comp		= (void (*)())NULL;
1866 		pkt->pkt_time		= 0;
1867 		pkt->pkt_resid		= 0;
1868 		pkt->pkt_statistics	= 0;
1869 		pkt->pkt_reason		= 0;
1870 
1871 		ac = (struct amr_command *)pkt->pkt_ha_private;
1872 		ac->ac_buf = bp;
1873 		ac->cmdlen = cmdlen;
1874 		ac->ac_softs = softs;
1875 		ac->pkt = pkt;
1876 		ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
1877 		ac->ac_flags &= ~AMR_CMD_BUSY;
1878 
1879 		if ((bp == NULL) || (bp->b_bcount == 0)) {
1880 			return (pkt);
1881 		}
1882 
1883 		if (ddi_dma_alloc_handle(softs->dev_info_p, &buffer_dma_attr,
1884 			DDI_DMA_SLEEP, NULL,
1885 			&ac->buffer_dma_handle) != DDI_SUCCESS) {
1886 
1887 			AMRDB_PRINT((CE_WARN,
1888 				"Cannot allocate buffer DMA tag"));
1889 			scsi_hba_pkt_free(ap, pkt);
1890 			return (NULL);
1891 
1892 		}
1893 
1894 	} else {
1895 		if ((bp == NULL) || (bp->b_bcount == 0)) {
1896 			return (pkt);
1897 		}
1898 		ac = (struct amr_command *)pkt->pkt_ha_private;
1899 	}
1900 
1901 	ASSERT(ac != NULL);
1902 
1903 	if (bp->b_flags & B_READ) {
1904 		ac->ac_flags |= AMR_CMD_DATAOUT;
1905 	} else {
1906 		ac->ac_flags |= AMR_CMD_DATAIN;
1907 	}
1908 
1909 	if (flags & PKT_CONSISTENT) {
1910 		ac->ac_flags |= AMR_CMD_PKT_CONSISTENT;
1911 	}
1912 
1913 	if (flags & PKT_DMA_PARTIAL) {
1914 		ac->ac_flags |= AMR_CMD_PKT_DMA_PARTIAL;
1915 	}
1916 
1917 	if (amr_mapcmd(ac, callback, arg) != DDI_SUCCESS) {
1918 		scsi_hba_pkt_free(ap, pkt);
1919 		return (NULL);
1920 	}
1921 
1922 	pkt->pkt_resid = bp->b_bcount - ac->data_transfered;
1923 
1924 	AMRDB_PRINT((CE_NOTE,
1925 		"init pkt, pkt_resid=%d, b_bcount=%d, data_transfered=%d",
1926 		(uint32_t)pkt->pkt_resid, (uint32_t)bp->b_bcount,
1927 		ac->data_transfered));
1928 
1929 	ASSERT(pkt->pkt_resid >= 0);
1930 
1931 	return (pkt);
1932 }
1933 
1934 static void
1935 amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1936 {
1937 	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1938 
1939 	amr_unmapcmd(ac);
1940 
1941 	if (ac->buffer_dma_handle) {
1942 		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1943 		ac->buffer_dma_handle = NULL;
1944 	}
1945 
1946 	scsi_hba_pkt_free(ap, pkt);
1947 	AMRDB_PRINT((CE_NOTE, "Destroy pkt called"));
1948 }
1949 
1950 /*ARGSUSED*/
1951 static void
1952 amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1953 {
1954 	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1955 
1956 	if (ac->buffer_dma_handle) {
1957 		(void) ddi_dma_sync(ac->buffer_dma_handle, 0, 0,
1958 			(ac->ac_flags & AMR_CMD_DATAIN) ?
1959 			DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1960 	}
1961 }
1962 
1963 /*ARGSUSED*/
1964 static void
1965 amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1966 {
1967 	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1968 
1969 	if (ac->ac_flags & AMR_CMD_MAPPED) {
1970 		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1971 		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1972 		ac->buffer_dma_handle = NULL;
1973 		ac->ac_flags &= ~AMR_CMD_MAPPED;
1974 	}
1975 
1976 }
1977 
1978 /*ARGSUSED*/
1979 static void
1980 amr_rw_command(struct amr_softs *softs, struct scsi_pkt *pkt, int target)
1981 {
1982 	struct amr_command	*ac = (struct amr_command *)pkt->pkt_ha_private;
1983 	union scsi_cdb		*cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1984 	uint8_t			cmd;
1985 
1986 	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1987 		cmd = AMR_CMD_LREAD;
1988 	} else {
1989 		cmd = AMR_CMD_LWRITE;
1990 	}
1991 
1992 	ac->mailbox.mb_command = cmd;
1993 	ac->mailbox.mb_blkcount =
1994 		(ac->transfer_size + AMR_BLKSIZE - 1)/AMR_BLKSIZE;
1995 	ac->mailbox.mb_lba = (ac->cmdlen == 10) ?
1996 				GETG1ADDR(cdbp) : GETG0ADDR(cdbp);
1997 	ac->mailbox.mb_drive = (uint8_t)target;
1998 }
1999 
2000 static void
2001 amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp, unsigned int capacity)
2002 {
2003 	uchar_t			pagecode;
2004 	struct mode_format	*page3p;
2005 	struct mode_geometry	*page4p;
2006 	struct mode_header	*headerp;
2007 	uint32_t		ncyl;
2008 
2009 	if (!(bp && bp->b_un.b_addr && bp->b_bcount))
2010 		return;
2011 
2012 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
2013 		bp_mapin(bp);
2014 
2015 	pagecode = cdbp->cdb_un.sg.scsi[0];
2016 	switch (pagecode) {
2017 	case SD_MODE_SENSE_PAGE3_CODE:
2018 		headerp = (struct mode_header *)(bp->b_un.b_addr);
2019 		headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2020 
2021 		page3p = (struct mode_format *)((caddr_t)headerp +
2022 			MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2023 		page3p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE3_CODE);
2024 		page3p->mode_page.length = BE_8(sizeof (struct mode_format));
2025 		page3p->data_bytes_sect = BE_16(AMR_DEFAULT_SECTORS);
2026 		page3p->sect_track = BE_16(AMR_DEFAULT_CYLINDERS);
2027 
2028 		return;
2029 
2030 	case SD_MODE_SENSE_PAGE4_CODE:
2031 		headerp = (struct mode_header *)(bp->b_un.b_addr);
2032 		headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2033 
2034 		page4p = (struct mode_geometry *)((caddr_t)headerp +
2035 			MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2036 		page4p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE4_CODE);
2037 		page4p->mode_page.length = BE_8(sizeof (struct mode_geometry));
2038 		page4p->heads = BE_8(AMR_DEFAULT_HEADS);
2039 		page4p->rpm = BE_16(AMR_DEFAULT_ROTATIONS);
2040 
2041 		ncyl = capacity / (AMR_DEFAULT_HEADS*AMR_DEFAULT_CYLINDERS);
2042 		page4p->cyl_lb = BE_8(ncyl & 0xff);
2043 		page4p->cyl_mb = BE_8((ncyl >> 8) & 0xff);
2044 		page4p->cyl_ub = BE_8((ncyl >> 16) & 0xff);
2045 
2046 		return;
2047 	default:
2048 		bzero(bp->b_un.b_addr, bp->b_bcount);
2049 		return;
2050 	}
2051 }
2052 
2053 static void
2054 amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key)
2055 {
2056 	struct scsi_arq_status *arqstat;
2057 
2058 	arqstat = (struct scsi_arq_status *)(pkt->pkt_scbp);
2059 	arqstat->sts_status.sts_chk = 1; /* CHECK CONDITION */
2060 	arqstat->sts_rqpkt_reason = CMD_CMPLT;
2061 	arqstat->sts_rqpkt_resid = 0;
2062 	arqstat->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2063 				STATE_SENT_CMD | STATE_XFERRED_DATA;
2064 	arqstat->sts_rqpkt_statistics = 0;
2065 	arqstat->sts_sensedata.es_valid = 1;
2066 	arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2067 	arqstat->sts_sensedata.es_key = key;
2068 }
2069 
2070 static void
2071 amr_start_waiting_queue(void *softp)
2072 {
2073 	uint32_t		slot;
2074 	struct amr_command	*ac;
2075 	volatile uint32_t	done_flag;
2076 	struct amr_softs	*softs = (struct amr_softs *)softp;
2077 
2078 	/* only one command allowed at the same time */
2079 	mutex_enter(&softs->queue_mutex);
2080 	mutex_enter(&softs->cmd_mutex);
2081 
2082 	while ((ac = softs->waiting_q_head) != NULL) {
2083 		/*
2084 		 * Find an available slot, the last slot is
2085 		 * occupied by poll I/O command.
2086 		 */
2087 		for (slot = 0; slot < (softs->sg_max_count - 1); slot++) {
2088 			if (softs->busycmd[slot] == NULL) {
2089 				if (AMR_QGET_IDB(softs) & AMR_QIDB_SUBMIT) {
2090 					/*
2091 					 * only one command allowed at the
2092 					 * same time
2093 					 */
2094 					mutex_exit(&softs->cmd_mutex);
2095 					mutex_exit(&softs->queue_mutex);
2096 					return;
2097 				}
2098 
2099 				ac->ac_timestamp = ddi_get_time();
2100 
2101 				if (!(ac->ac_flags & AMR_CMD_GOT_SLOT)) {
2102 
2103 					softs->busycmd[slot] = ac;
2104 					ac->ac_slot = slot;
2105 					softs->amr_busyslots++;
2106 
2107 					bcopy(ac->sgtable,
2108 					softs->sg_items[slot].sg_table,
2109 					sizeof (struct amr_sgentry) * AMR_NSEG);
2110 
2111 					(void) ddi_dma_sync(
2112 					softs->sg_items[slot].sg_handle,
2113 					0, 0, DDI_DMA_SYNC_FORDEV);
2114 
2115 					ac->mailbox.mb_physaddr =
2116 					softs->sg_items[slot].sg_phyaddr;
2117 				}
2118 
2119 				/* take the cmd from the queue */
2120 				softs->waiting_q_head = ac->ac_next;
2121 
2122 				ac->mailbox.mb_ident = ac->ac_slot + 1;
2123 				ac->mailbox.mb_busy = 1;
2124 				ac->ac_next = NULL;
2125 				ac->ac_prev = NULL;
2126 				ac->ac_flags |= AMR_CMD_GOT_SLOT;
2127 
2128 				/* clear the poll/ack fields in the mailbox */
2129 				softs->mailbox->mb_poll = 0;
2130 				softs->mailbox->mb_ack = 0;
2131 
2132 				AMR_DELAY((softs->mailbox->mb_busy == 0),
2133 					AMR_RETRYCOUNT, done_flag);
2134 				if (!done_flag) {
2135 					/*
2136 					 * command not completed, indicate the
2137 					 * problem and continue get ac
2138 					 */
2139 					cmn_err(CE_WARN,
2140 						"AMR command is not completed");
2141 					break;
2142 				}
2143 
2144 				bcopy(&ac->mailbox, (void *)softs->mailbox,
2145 					AMR_MBOX_CMDSIZE);
2146 				ac->ac_flags |= AMR_CMD_BUSY;
2147 
2148 				(void) ddi_dma_sync(softs->mbox_dma_handle,
2149 					0, 0, DDI_DMA_SYNC_FORDEV);
2150 
2151 				AMR_QPUT_IDB(softs,
2152 					softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
2153 
2154 				/*
2155 				 * current ac is submitted
2156 				 * so quit 'for-loop' to get next ac
2157 				 */
2158 				break;
2159 			}
2160 		}
2161 
2162 		/* no slot, finish our task */
2163 		if (slot == softs->maxio)
2164 			break;
2165 	}
2166 
2167 	/* only one command allowed at the same time */
2168 	mutex_exit(&softs->cmd_mutex);
2169 	mutex_exit(&softs->queue_mutex);
2170 }
2171 
2172 static void
2173 amr_done(struct amr_softs *softs)
2174 {
2175 
2176 	uint32_t		i, idx;
2177 	volatile uint32_t	done_flag;
2178 	struct amr_mailbox	*mbox, mbsave;
2179 	struct amr_command	*ac, *head, *tail;
2180 
2181 	head = tail = NULL;
2182 
2183 	AMR_QPUT_ODB(softs, AMR_QODB_READY);
2184 
2185 	/* acknowledge interrupt */
2186 	(void) AMR_QGET_ODB(softs);
2187 
2188 	mutex_enter(&softs->cmd_mutex);
2189 
2190 	if (softs->mailbox->mb_nstatus != 0) {
2191 		(void) ddi_dma_sync(softs->mbox_dma_handle,
2192 			0, 0, DDI_DMA_SYNC_FORCPU);
2193 
2194 		/* save mailbox, which contains a list of completed commands */
2195 		bcopy((void *)(uintptr_t)(volatile void *)softs->mailbox,
2196 				&mbsave, sizeof (mbsave));
2197 
2198 		mbox = &mbsave;
2199 
2200 		AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
2201 
2202 		/* wait for the acknowledge from hardware */
2203 		AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
2204 				AMR_RETRYCOUNT, done_flag);
2205 		if (!done_flag) {
2206 			/*
2207 			 * command is not completed, return from the current
2208 			 * interrupt and wait for the next one
2209 			 */
2210 			cmn_err(CE_WARN, "No answer from the hardware");
2211 
2212 			mutex_exit(&softs->cmd_mutex);
2213 			return;
2214 		}
2215 
2216 		for (i = 0; i < mbox->mb_nstatus; i++) {
2217 			idx = mbox->mb_completed[i] - 1;
2218 			ac = softs->busycmd[idx];
2219 
2220 			if (ac != NULL) {
2221 				/* pull the command from the busy index */
2222 				softs->busycmd[idx] = NULL;
2223 				if (softs->amr_busyslots > 0)
2224 					softs->amr_busyslots--;
2225 				if (softs->amr_busyslots == 0)
2226 					cv_broadcast(&softs->cmd_cv);
2227 
2228 				ac->ac_flags &= ~AMR_CMD_BUSY;
2229 				ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
2230 				ac->ac_status = mbox->mb_status;
2231 
2232 				/* enqueue here */
2233 				if (head) {
2234 					tail->ac_next = ac;
2235 					tail = ac;
2236 					tail->ac_next = NULL;
2237 				} else {
2238 					tail = head = ac;
2239 					ac->ac_next = NULL;
2240 				}
2241 			} else {
2242 				AMRDB_PRINT((CE_WARN,
2243 					"ac in mailbox is NULL!"));
2244 			}
2245 		}
2246 	} else {
2247 		AMRDB_PRINT((CE_WARN, "mailbox is not ready for copy out!"));
2248 	}
2249 
2250 	mutex_exit(&softs->cmd_mutex);
2251 
2252 	if (head != NULL) {
2253 		amr_call_pkt_comp(head);
2254 	}
2255 
2256 	/* dispatch a thread to process the pending I/O if there is any */
2257 	if ((ddi_taskq_dispatch(softs->amr_taskq, amr_start_waiting_queue,
2258 		(void *)softs, DDI_NOSLEEP)) != DDI_SUCCESS) {
2259 		cmn_err(CE_WARN, "No memory available to dispatch taskq");
2260 	}
2261 }
2262 
2263 static void
2264 amr_call_pkt_comp(register struct amr_command *head)
2265 {
2266 	register struct scsi_pkt	*pkt;
2267 	register struct amr_command	*ac, *localhead;
2268 
2269 	localhead = head;
2270 
2271 	while (localhead) {
2272 		ac = localhead;
2273 		localhead = ac->ac_next;
2274 		ac->ac_next = NULL;
2275 
2276 		pkt = ac->pkt;
2277 		*pkt->pkt_scbp = 0;
2278 
2279 		if (ac->ac_status == AMR_STATUS_SUCCESS) {
2280 			pkt->pkt_state |= (STATE_GOT_BUS
2281 					| STATE_GOT_TARGET
2282 					| STATE_SENT_CMD
2283 					| STATE_XFERRED_DATA);
2284 			pkt->pkt_reason = CMD_CMPLT;
2285 		} else {
2286 			pkt->pkt_state |= STATE_GOT_BUS
2287 					| STATE_ARQ_DONE;
2288 			pkt->pkt_reason = CMD_INCOMPLETE;
2289 			amr_set_arq_data(pkt, KEY_HARDWARE_ERROR);
2290 		}
2291 
2292 		if (!(pkt->pkt_flags & FLAG_NOINTR) &&
2293 			pkt->pkt_comp) {
2294 			(*pkt->pkt_comp)(pkt);
2295 		}
2296 	}
2297 }
2298