xref: /titanic_52/usr/src/uts/intel/io/amr/amr.c (revision 28406508b3a9fa1994317b410fa4afd93b3f8cc4)
1 /*
2  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 /*
6  * Copyright (c) 1999,2000 Michael Smith
7  * Copyright (c) 2000 BSDi
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 /*
32  * Copyright (c) 2002 Eric Moore
33  * Copyright (c) 2002 LSI Logic Corporation
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. The party using or redistributing the source code and binary forms
45  *    agrees to the disclaimer below and the terms and conditions set forth
46  *    herein.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  */
60 #pragma ident	"%Z%%M%	%I%	%E% SMI"
61 
62 #include <sys/int_types.h>
63 #include <sys/scsi/scsi.h>
64 #include <sys/dkbad.h>
65 #include <sys/dklabel.h>
66 #include <sys/dkio.h>
67 #include <sys/cdio.h>
68 #include <sys/mhd.h>
69 #include <sys/vtoc.h>
70 #include <sys/dktp/fdisk.h>
71 #include <sys/scsi/targets/sddef.h>
72 #include <sys/debug.h>
73 #include <sys/pci.h>
74 #include <sys/ksynch.h>
75 #include <sys/ddi.h>
76 #include <sys/sunddi.h>
77 #include <sys/modctl.h>
78 #include <sys/byteorder.h>
79 
80 #include "amrreg.h"
81 #include "amrvar.h"
82 
83 /* dynamic debug symbol */
84 int	amr_debug_var = 0;
85 
86 #define	AMR_DELAY(cond, count, done_flag) { \
87 		int local_counter = 0; \
88 		done_flag = 1; \
89 		while (!(cond)) { \
90 			delay(drv_usectohz(100)); \
91 			if ((local_counter) > count) { \
92 				done_flag = 0; \
93 				break; \
94 			} \
95 			(local_counter)++; \
96 		} \
97 	}
98 
99 #define	AMR_BUSYWAIT(cond, count, done_flag) { \
100 		int local_counter = 0; \
101 		done_flag = 1; \
102 		while (!(cond)) { \
103 			drv_usecwait(100); \
104 			if ((local_counter) > count) { \
105 				done_flag = 0; \
106 				break; \
107 			} \
108 			(local_counter)++; \
109 		} \
110 	}
111 
112 /*
113  * driver interfaces
114  */
115 char _depends_on[] = "misc/scsi";
116 
117 static uint_t amr_intr(caddr_t arg);
118 static void amr_done(struct amr_softs *softs);
119 
120 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
121 			void *arg, void **result);
122 static int amr_attach(dev_info_t *, ddi_attach_cmd_t);
123 static int amr_detach(dev_info_t *, ddi_detach_cmd_t);
124 
125 static int amr_setup_mbox(struct amr_softs *softs);
126 static int amr_setup_sg(struct amr_softs *softs);
127 
128 /*
129  * Command wrappers
130  */
131 static int amr_query_controller(struct amr_softs *softs);
132 static void *amr_enquiry(struct amr_softs *softs, size_t bufsize,
133 			uint8_t cmd, uint8_t cmdsub, uint8_t cmdqual);
134 static int amr_flush(struct amr_softs *softs);
135 
136 /*
137  * Command processing.
138  */
139 static void amr_rw_command(struct amr_softs *softs,
140 			struct scsi_pkt *pkt, int lun);
141 static void amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp,
142 			unsigned int capacity);
143 static void amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key);
144 static int amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size);
145 static void amr_enquiry_unmapcmd(struct amr_command *ac);
146 static int amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg);
147 static void amr_unmapcmd(struct amr_command *ac);
148 
149 /*
150  * Status monitoring
151  */
152 static void amr_periodic(void *data);
153 
154 /*
155  * Interface-specific shims
156  */
157 static int amr_poll_command(struct amr_command *ac);
158 static void amr_start_waiting_queue(void *softp);
159 static void amr_call_pkt_comp(struct amr_command *head);
160 
161 /*
162  * SCSI interface
163  */
164 static int amr_setup_tran(dev_info_t  *dip, struct amr_softs *softp);
165 
166 /*
167  * Function prototypes
168  *
169  * SCSA functions exported by means of the transport table
170  */
171 static int amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
172 	scsi_hba_tran_t *tran, struct scsi_device *sd);
173 static int amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
174 static int amr_tran_reset(struct scsi_address *ap, int level);
175 static int amr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
176 static int amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
177     int whom);
178 static struct scsi_pkt *amr_tran_init_pkt(struct scsi_address *ap,
179     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
180     int tgtlen, int flags, int (*callback)(), caddr_t arg);
181 static void amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
182 static void amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
183 static void amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
184 
185 static ddi_dma_attr_t buffer_dma_attr = {
186 		DMA_ATTR_V0,	/* version of this structure */
187 		0,		/* lowest usable address */
188 		0xffffffffull,	/* highest usable address */
189 		0x00ffffffull,	/* maximum DMAable byte count */
190 		4,		/* alignment */
191 		1,		/* burst sizes */
192 		1,		/* minimum transfer */
193 		0xffffffffull,	/* maximum transfer */
194 		0xffffffffull,	/* maximum segment length */
195 		AMR_NSEG,	/* maximum number of segments */
196 		AMR_BLKSIZE,	/* granularity */
197 		0,		/* flags (reserved) */
198 };
199 
200 static ddi_dma_attr_t addr_dma_attr = {
201 		DMA_ATTR_V0,	/* version of this structure */
202 		0,		/* lowest usable address */
203 		0xffffffffull,	/* highest usable address */
204 		0x7fffffff,	/* maximum DMAable byte count */
205 		4,		/* alignment */
206 		1,		/* burst sizes */
207 		1,		/* minimum transfer */
208 		0xffffffffull,	/* maximum transfer */
209 		0xffffffffull,	/* maximum segment length */
210 		1,		/* maximum number of segments */
211 		1,		/* granularity */
212 		0,		/* flags (reserved) */
213 };
214 
215 
216 static struct dev_ops   amr_ops = {
217 	DEVO_REV,	/* devo_rev, */
218 	0,		/* refcnt  */
219 	amr_info,	/* info */
220 	nulldev,	/* identify */
221 	nulldev,	/* probe */
222 	amr_attach,	/* attach */
223 	amr_detach,	/* detach */
224 	nodev,		/* reset */
225 	NULL,		/* driver operations */
226 	(struct bus_ops *)0,	/* bus operations */
227 	0		/* power */
228 };
229 
230 
231 extern struct mod_ops mod_driverops;
232 static struct modldrv modldrv = {
233 	&mod_driverops,		/* Type of module. driver here */
234 	"AMR Driver V%I%",	/* Name of the module. */
235 	&amr_ops,		/* Driver ops vector */
236 };
237 
238 static struct modlinkage modlinkage = {
239 	MODREV_1,
240 	&modldrv,
241 	NULL
242 };
243 
244 /* DMA access attributes */
245 static ddi_device_acc_attr_t accattr = {
246 	DDI_DEVICE_ATTR_V0,
247 	DDI_NEVERSWAP_ACC,
248 	DDI_STRICTORDER_ACC
249 };
250 
251 static struct amr_softs  *amr_softstatep;
252 
253 
254 int
255 _init(void)
256 {
257 	int		error;
258 
259 	error = ddi_soft_state_init((void *)&amr_softstatep,
260 			sizeof (struct amr_softs), 0);
261 
262 	if (error != 0)
263 		goto error_out;
264 
265 	if ((error = scsi_hba_init(&modlinkage)) != 0) {
266 		ddi_soft_state_fini((void*)&amr_softstatep);
267 		goto error_out;
268 	}
269 
270 	error = mod_install(&modlinkage);
271 	if (error != 0) {
272 		scsi_hba_fini(&modlinkage);
273 		ddi_soft_state_fini((void*)&amr_softstatep);
274 		goto error_out;
275 	}
276 
277 	return (error);
278 
279 error_out:
280 	cmn_err(CE_NOTE, "_init failed");
281 	return (error);
282 }
283 
284 int
285 _info(struct modinfo *modinfop)
286 {
287 	return (mod_info(&modlinkage, modinfop));
288 }
289 
290 int
291 _fini(void)
292 {
293 	int	error;
294 
295 	if ((error = mod_remove(&modlinkage)) != 0) {
296 		return (error);
297 	}
298 
299 	scsi_hba_fini(&modlinkage);
300 
301 	ddi_soft_state_fini((void*)&amr_softstatep);
302 	return (error);
303 }
304 
305 
306 static int
307 amr_attach(dev_info_t *dev, ddi_attach_cmd_t cmd)
308 {
309 	struct amr_softs	*softs;
310 	int			error;
311 	uint32_t		command, i;
312 	int			instance;
313 	caddr_t			cfgaddr;
314 
315 	instance = ddi_get_instance(dev);
316 
317 	switch (cmd) {
318 		case DDI_ATTACH:
319 			break;
320 
321 		case DDI_RESUME:
322 			return (DDI_FAILURE);
323 
324 		default:
325 			return (DDI_FAILURE);
326 	}
327 
328 	/*
329 	 * Initialize softs.
330 	 */
331 	if (ddi_soft_state_zalloc(amr_softstatep, instance) != DDI_SUCCESS)
332 		return (DDI_FAILURE);
333 	softs = ddi_get_soft_state(amr_softstatep, instance);
334 	softs->state |= AMR_STATE_SOFT_STATE_SETUP;
335 
336 	softs->dev_info_p = dev;
337 
338 	AMRDB_PRINT((CE_NOTE, "softs: %p; busy_slot addr: %p",
339 		(void *)softs, (void *)&(softs->amr_busyslots)));
340 
341 	if (pci_config_setup(dev, &(softs->pciconfig_handle))
342 		!= DDI_SUCCESS) {
343 		goto error_out;
344 	}
345 	softs->state |= AMR_STATE_PCI_CONFIG_SETUP;
346 
347 	error = ddi_regs_map_setup(dev, 1, &cfgaddr, 0, 0,
348 		&accattr, &(softs->regsmap_handle));
349 	if (error != DDI_SUCCESS) {
350 		goto error_out;
351 	}
352 	softs->state |= AMR_STATE_PCI_MEM_MAPPED;
353 
354 	/*
355 	 * Determine board type.
356 	 */
357 	command = pci_config_get16(softs->pciconfig_handle, PCI_CONF_COMM);
358 
359 	/*
360 	 * Make sure we are going to be able to talk to this board.
361 	 */
362 	if ((command & PCI_COMM_MAE) == 0) {
363 		AMRDB_PRINT((CE_NOTE,  "memory window not available"));
364 		goto error_out;
365 	}
366 
367 	/* force the busmaster enable bit on */
368 	if (!(command & PCI_COMM_ME)) {
369 		command |= PCI_COMM_ME;
370 		pci_config_put16(softs->pciconfig_handle,
371 				PCI_CONF_COMM, command);
372 		command = pci_config_get16(softs->pciconfig_handle,
373 				PCI_CONF_COMM);
374 		if (!(command & PCI_COMM_ME))
375 			goto error_out;
376 	}
377 
378 	/*
379 	 * Allocate and connect our interrupt.
380 	 */
381 	if (ddi_intr_hilevel(dev, 0) != 0) {
382 	    AMRDB_PRINT((CE_NOTE,  "High level interrupt is not supported!"));
383 	    goto error_out;
384 	}
385 
386 	if (ddi_get_iblock_cookie(dev, 0,  &softs->iblock_cookiep)
387 		!= DDI_SUCCESS) {
388 		goto error_out;
389 	}
390 
391 	mutex_init(&softs->cmd_mutex, NULL, MUTEX_DRIVER,
392 		softs->iblock_cookiep); /* should be used in interrupt */
393 	mutex_init(&softs->queue_mutex, NULL, MUTEX_DRIVER,
394 	    softs->iblock_cookiep); /* should be used in interrupt */
395 	mutex_init(&softs->periodic_mutex, NULL, MUTEX_DRIVER,
396 	    softs->iblock_cookiep); /* should be used in interrupt */
397 	/* sychronize waits for the busy slots via this cv */
398 	cv_init(&softs->cmd_cv, NULL, CV_DRIVER, NULL);
399 	softs->state |= AMR_STATE_KMUTEX_INITED;
400 
401 	/*
402 	 * Do bus-independent initialisation, bring controller online.
403 	 */
404 	if (amr_setup_mbox(softs) != DDI_SUCCESS)
405 		goto error_out;
406 	softs->state |= AMR_STATE_MAILBOX_SETUP;
407 
408 	if (amr_setup_sg(softs) != DDI_SUCCESS)
409 		goto error_out;
410 
411 	softs->state |= AMR_STATE_SG_TABLES_SETUP;
412 
413 	if (amr_query_controller(softs) != DDI_SUCCESS)
414 		goto error_out;
415 
416 	/*
417 	 * A taskq is created for dispatching the waiting queue processing
418 	 * thread. The threads number equals to the logic drive number and
419 	 * the thread number should be 1 if there is no logic driver is
420 	 * configured for this instance.
421 	 */
422 	if ((softs->amr_taskq = ddi_taskq_create(dev, "amr_taskq",
423 		MAX(softs->amr_nlogdrives, 1), TASKQ_DEFAULTPRI, 0)) == NULL) {
424 		goto error_out;
425 	}
426 	softs->state |= AMR_STATE_TASKQ_SETUP;
427 
428 	if (ddi_add_intr(dev, 0, &softs->iblock_cookiep, NULL,
429 		amr_intr, (caddr_t)softs) != DDI_SUCCESS) {
430 		goto error_out;
431 	}
432 	softs->state |= AMR_STATE_INTR_SETUP;
433 
434 	/* set up the tran interface */
435 	if (amr_setup_tran(softs->dev_info_p, softs) != DDI_SUCCESS) {
436 		AMRDB_PRINT((CE_NOTE, "setup tran failed"));
437 		goto error_out;
438 	}
439 	softs->state |= AMR_STATE_TRAN_SETUP;
440 
441 	/* schedule a thread for periodic check */
442 	mutex_enter(&softs->periodic_mutex);
443 	softs->timeout_t = timeout(amr_periodic, (void *)softs,
444 				drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
445 	softs->state |= AMR_STATE_TIMEOUT_ENABLED;
446 	mutex_exit(&softs->periodic_mutex);
447 
448 	/* print firmware information in verbose mode */
449 	cmn_err(CE_CONT, "?MegaRaid %s %s attached.",
450 		softs->amr_product_info.pi_product_name,
451 		softs->amr_product_info.pi_firmware_ver);
452 
453 	/* clear any interrupts */
454 	AMR_QCLEAR_INTR(softs);
455 	return (DDI_SUCCESS);
456 
457 error_out:
458 	if (softs->state & AMR_STATE_INTR_SETUP) {
459 		ddi_remove_intr(dev, 0, softs->iblock_cookiep);
460 	}
461 	if (softs->state & AMR_STATE_TASKQ_SETUP) {
462 		ddi_taskq_destroy(softs->amr_taskq);
463 	}
464 	if (softs->state & AMR_STATE_SG_TABLES_SETUP) {
465 		for (i = 0; i < softs->sg_max_count; i++) {
466 			(void) ddi_dma_unbind_handle(
467 				softs->sg_items[i].sg_handle);
468 			(void) ddi_dma_mem_free(
469 				&((softs->sg_items[i]).sg_acc_handle));
470 			(void) ddi_dma_free_handle(
471 				&(softs->sg_items[i].sg_handle));
472 		}
473 	}
474 	if (softs->state & AMR_STATE_MAILBOX_SETUP) {
475 		(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
476 		(void) ddi_dma_mem_free(&softs->mbox_acc_handle);
477 		(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
478 	}
479 	if (softs->state & AMR_STATE_KMUTEX_INITED) {
480 		mutex_destroy(&softs->queue_mutex);
481 		mutex_destroy(&softs->cmd_mutex);
482 		mutex_destroy(&softs->periodic_mutex);
483 		cv_destroy(&softs->cmd_cv);
484 	}
485 	if (softs->state & AMR_STATE_PCI_MEM_MAPPED)
486 		ddi_regs_map_free(&softs->regsmap_handle);
487 	if (softs->state & AMR_STATE_PCI_CONFIG_SETUP)
488 		pci_config_teardown(&softs->pciconfig_handle);
489 	if (softs->state & AMR_STATE_SOFT_STATE_SETUP)
490 		ddi_soft_state_free(amr_softstatep, instance);
491 	return (DDI_FAILURE);
492 }
493 
494 /*
495  * Bring the controller down to a dormant state and detach all child devices.
496  * This function is called during detach, system shutdown.
497  *
498  * Note that we can assume that the bufq on the controller is empty, as we won't
499  * allow shutdown if any device is open.
500  */
501 /*ARGSUSED*/
502 static int amr_detach(dev_info_t *dev, ddi_detach_cmd_t cmd)
503 {
504 	struct amr_softs	*softs;
505 	int			instance;
506 	uint32_t		i, done_flag;
507 
508 	instance = ddi_get_instance(dev);
509 	softs = ddi_get_soft_state(amr_softstatep, instance);
510 
511 	/* flush the controllor */
512 	if (amr_flush(softs) != 0) {
513 		AMRDB_PRINT((CE_NOTE, "device shutdown failed"));
514 		return (EIO);
515 	}
516 
517 	/* release the amr timer */
518 	mutex_enter(&softs->periodic_mutex);
519 	softs->state &= ~AMR_STATE_TIMEOUT_ENABLED;
520 	if (softs->timeout_t) {
521 		(void) untimeout(softs->timeout_t);
522 		softs->timeout_t = 0;
523 	}
524 	mutex_exit(&softs->periodic_mutex);
525 
526 	for (i = 0; i < softs->sg_max_count; i++) {
527 		(void) ddi_dma_unbind_handle(
528 			softs->sg_items[i].sg_handle);
529 		(void) ddi_dma_mem_free(
530 			&((softs->sg_items[i]).sg_acc_handle));
531 		(void) ddi_dma_free_handle(
532 			&(softs->sg_items[i].sg_handle));
533 	}
534 
535 	(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
536 	(void) ddi_dma_mem_free(&softs->mbox_acc_handle);
537 	(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
538 
539 	/* disconnect the interrupt handler */
540 	ddi_remove_intr(softs->dev_info_p,  0, softs->iblock_cookiep);
541 
542 	/* wait for the completion of current in-progress interruptes */
543 	AMR_DELAY((softs->amr_interrupts_counter == 0), 1000, done_flag);
544 	if (!done_flag) {
545 		cmn_err(CE_WARN, "Suspicious interrupts in-progress.");
546 	}
547 
548 	ddi_taskq_destroy(softs->amr_taskq);
549 
550 	(void) scsi_hba_detach(dev);
551 	scsi_hba_tran_free(softs->hba_tran);
552 	ddi_regs_map_free(&softs->regsmap_handle);
553 	pci_config_teardown(&softs->pciconfig_handle);
554 
555 	mutex_destroy(&softs->queue_mutex);
556 	mutex_destroy(&softs->cmd_mutex);
557 	mutex_destroy(&softs->periodic_mutex);
558 	cv_destroy(&softs->cmd_cv);
559 
560 	/* print firmware information in verbose mode */
561 	cmn_err(CE_NOTE, "?MegaRaid %s %s detached.",
562 		softs->amr_product_info.pi_product_name,
563 		softs->amr_product_info.pi_firmware_ver);
564 
565 	ddi_soft_state_free(amr_softstatep, instance);
566 
567 	return (DDI_SUCCESS);
568 }
569 
570 
571 /*ARGSUSED*/
572 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
573 	void *arg, void **result)
574 {
575 	struct amr_softs	*softs;
576 	int			instance;
577 
578 	instance = ddi_get_instance(dip);
579 
580 	switch (infocmd) {
581 		case DDI_INFO_DEVT2DEVINFO:
582 			softs = ddi_get_soft_state(amr_softstatep, instance);
583 			if (softs != NULL) {
584 				*result = softs->dev_info_p;
585 				return (DDI_SUCCESS);
586 			} else {
587 				*result = NULL;
588 				return (DDI_FAILURE);
589 			}
590 		case DDI_INFO_DEVT2INSTANCE:
591 			*(int *)result = instance;
592 			break;
593 		default:
594 			break;
595 	}
596 	return (DDI_SUCCESS);
597 }
598 
599 /*
600  * Take an interrupt, or be poked by other code to look for interrupt-worthy
601  * status.
602  */
603 static uint_t
604 amr_intr(caddr_t arg)
605 {
606 	struct amr_softs *softs = (struct amr_softs *)arg;
607 
608 	softs->amr_interrupts_counter++;
609 
610 	if (AMR_QGET_ODB(softs) != AMR_QODB_READY) {
611 		softs->amr_interrupts_counter--;
612 		return (DDI_INTR_UNCLAIMED);
613 	}
614 
615 	/* collect finished commands, queue anything waiting */
616 	amr_done(softs);
617 
618 	softs->amr_interrupts_counter--;
619 
620 	return (DDI_INTR_CLAIMED);
621 
622 }
623 
624 /*
625  * Setup the amr mailbox
626  */
627 static int
628 amr_setup_mbox(struct amr_softs *softs)
629 {
630 	uint32_t	move;
631 	size_t		mbox_len;
632 
633 	if (ddi_dma_alloc_handle(
634 		softs->dev_info_p,
635 		&addr_dma_attr,
636 		DDI_DMA_SLEEP,
637 		NULL,
638 		&softs->mbox_dma_handle) != DDI_SUCCESS) {
639 		AMRDB_PRINT((CE_NOTE, "Cannot alloc dma handle for mailbox"));
640 		goto error_out;
641 	}
642 
643 	if (ddi_dma_mem_alloc(
644 		softs->mbox_dma_handle,
645 		sizeof (struct amr_mailbox) + 16,
646 		&accattr,
647 		DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
648 		DDI_DMA_SLEEP,
649 		NULL,
650 		(caddr_t *)(&softs->mbox),
651 		&mbox_len,
652 		&softs->mbox_acc_handle) !=
653 		DDI_SUCCESS) {
654 
655 		AMRDB_PRINT((CE_WARN, "Cannot alloc dma memory for mailbox"));
656 		goto error_out;
657 	}
658 
659 	if (ddi_dma_addr_bind_handle(
660 		softs->mbox_dma_handle,
661 		NULL,
662 		(caddr_t)softs->mbox,
663 		mbox_len,
664 		DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
665 		DDI_DMA_SLEEP,
666 		NULL,
667 		&softs->mbox_dma_cookie,
668 		&softs->mbox_dma_cookien) != DDI_DMA_MAPPED) {
669 
670 		AMRDB_PRINT((CE_NOTE, "Cannot bind dma memory for mailbox"));
671 		goto error_out;
672 	}
673 
674 	if (softs->mbox_dma_cookien != 1)
675 		goto error_out;
676 
677 	/* The phy address of mailbox must be aligned on a 16-byte boundary */
678 	move = 16 - (((uint32_t)softs->mbox_dma_cookie.dmac_address)&0xf);
679 	softs->mbox_phyaddr =
680 		(softs->mbox_dma_cookie.dmac_address + move);
681 
682 	softs->mailbox =
683 		(struct amr_mailbox *)(((uintptr_t)softs->mbox) + move);
684 
685 	AMRDB_PRINT((CE_NOTE, "phraddy=%x, mailbox=%p, softs->mbox=%p, move=%x",
686 		softs->mbox_phyaddr, (void *)softs->mailbox,
687 		softs->mbox, move));
688 
689 	return (DDI_SUCCESS);
690 
691 error_out:
692 	if (softs->mbox_dma_cookien)
693 		(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
694 	if (softs->mbox_acc_handle) {
695 		(void) ddi_dma_mem_free(&(softs->mbox_acc_handle));
696 		softs->mbox_acc_handle = NULL;
697 	}
698 	if (softs->mbox_dma_handle) {
699 		(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
700 		softs->mbox_dma_handle = NULL;
701 	}
702 
703 	return (DDI_FAILURE);
704 }
705 
706 /*
707  * Perform a periodic check of the controller status
708  */
709 static void
710 amr_periodic(void *data)
711 {
712 	uint32_t		i;
713 	struct amr_softs	*softs = (struct amr_softs *)data;
714 	struct scsi_pkt 	*pkt;
715 	register struct amr_command	*ac;
716 
717 	for (i = 0; i < softs->sg_max_count; i++) {
718 		if (softs->busycmd[i] == NULL)
719 			continue;
720 
721 		mutex_enter(&softs->cmd_mutex);
722 
723 		if (softs->busycmd[i] == NULL) {
724 			mutex_exit(&softs->cmd_mutex);
725 			continue;
726 		}
727 
728 		pkt = softs->busycmd[i]->pkt;
729 
730 		if ((pkt->pkt_time != 0) &&
731 			(ddi_get_time() -
732 			softs->busycmd[i]->ac_timestamp >
733 			pkt->pkt_time)) {
734 
735 			cmn_err(CE_WARN,
736 				"!timed out packet detected,\
737 				sc = %p, pkt = %p, index = %d, ac = %p",
738 				(void *)softs,
739 				(void *)pkt,
740 				i,
741 				(void *)softs->busycmd[i]);
742 
743 			ac = softs->busycmd[i];
744 			ac->ac_next = NULL;
745 
746 			/* pull command from the busy index */
747 			softs->busycmd[i] = NULL;
748 			if (softs->amr_busyslots > 0)
749 				softs->amr_busyslots--;
750 			if (softs->amr_busyslots == 0)
751 				cv_broadcast(&softs->cmd_cv);
752 
753 			mutex_exit(&softs->cmd_mutex);
754 
755 			pkt = ac->pkt;
756 			*pkt->pkt_scbp = 0;
757 			pkt->pkt_statistics |= STAT_TIMEOUT;
758 			pkt->pkt_reason = CMD_TIMEOUT;
759 			if (!(pkt->pkt_flags &
760 			FLAG_NOINTR) && pkt->pkt_comp) {
761 				/* call pkt callback */
762 				(*pkt->pkt_comp)(pkt);
763 			}
764 
765 		} else {
766 			mutex_exit(&softs->cmd_mutex);
767 		}
768 	}
769 
770 	/* restart the amr timer */
771 	mutex_enter(&softs->periodic_mutex);
772 	if (softs->state & AMR_STATE_TIMEOUT_ENABLED)
773 		softs->timeout_t = timeout(amr_periodic, (void *)softs,
774 				drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
775 	mutex_exit(&softs->periodic_mutex);
776 }
777 
778 /*
779  * Interrogate the controller for the operational parameters we require.
780  */
781 static int
782 amr_query_controller(struct amr_softs *softs)
783 {
784 	struct amr_enquiry3	*aex;
785 	struct amr_prodinfo	*ap;
786 	struct amr_enquiry	*ae;
787 	uint32_t		ldrv;
788 	int			instance;
789 
790 	/*
791 	 * If we haven't found the real limit yet, let us have a couple of
792 	 * commands in order to be able to probe.
793 	 */
794 	if (softs->maxio == 0)
795 		softs->maxio = 2;
796 
797 	instance = ddi_get_instance(softs->dev_info_p);
798 
799 	/*
800 	 * Try to issue an ENQUIRY3 command
801 	 */
802 	if ((aex = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE, AMR_CMD_CONFIG,
803 		AMR_CONFIG_ENQ3, AMR_CONFIG_ENQ3_SOLICITED_FULL)) != NULL) {
804 
805 		AMRDB_PRINT((CE_NOTE, "First enquiry"));
806 
807 		for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
808 		    softs->logic_drive[ldrv].al_size =
809 						aex->ae_drivesize[ldrv];
810 		    softs->logic_drive[ldrv].al_state =
811 						aex->ae_drivestate[ldrv];
812 		    softs->logic_drive[ldrv].al_properties =
813 						aex->ae_driveprop[ldrv];
814 		    AMRDB_PRINT((CE_NOTE,
815 			"  drive %d: size: %d state %x properties %x\n",
816 			ldrv,
817 			softs->logic_drive[ldrv].al_size,
818 			softs->logic_drive[ldrv].al_state,
819 			softs->logic_drive[ldrv].al_properties));
820 
821 		    if (softs->logic_drive[ldrv].al_state == AMR_LDRV_OFFLINE)
822 			cmn_err(CE_NOTE, "!instance %d log-drive %d is offline",
823 				instance, ldrv);
824 		    else
825 			softs->amr_nlogdrives++;
826 		}
827 		kmem_free(aex, AMR_ENQ_BUFFER_SIZE);
828 
829 		if ((ap = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE,
830 			AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) == NULL) {
831 			AMRDB_PRINT((CE_NOTE,
832 				"Cannot obtain product data from controller"));
833 			return (EIO);
834 		}
835 
836 		softs->maxdrives = AMR_40LD_MAXDRIVES;
837 		softs->maxchan = ap->ap_nschan;
838 		softs->maxio = ap->ap_maxio;
839 
840 		bcopy(ap->ap_firmware, softs->amr_product_info.pi_firmware_ver,
841 			AMR_FIRMWARE_VER_SIZE);
842 		softs->amr_product_info.
843 			pi_firmware_ver[AMR_FIRMWARE_VER_SIZE] = 0;
844 
845 		bcopy(ap->ap_product, softs->amr_product_info.pi_product_name,
846 			AMR_PRODUCT_INFO_SIZE);
847 		softs->amr_product_info.
848 			pi_product_name[AMR_PRODUCT_INFO_SIZE] = 0;
849 
850 		kmem_free(ap, AMR_ENQ_BUFFER_SIZE);
851 		AMRDB_PRINT((CE_NOTE, "maxio=%d", softs->maxio));
852 	} else {
853 
854 		AMRDB_PRINT((CE_NOTE, "First enquiry failed, \
855 				so try another way"));
856 
857 		/* failed, try the 8LD ENQUIRY commands */
858 		if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
859 			AMR_ENQ_BUFFER_SIZE, AMR_CMD_EXT_ENQUIRY2, 0, 0))
860 			== NULL) {
861 
862 			if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
863 				AMR_ENQ_BUFFER_SIZE, AMR_CMD_ENQUIRY, 0, 0))
864 				== NULL) {
865 				AMRDB_PRINT((CE_NOTE,
866 					"Cannot obtain configuration data"));
867 				return (EIO);
868 			}
869 			ae->ae_signature = 0;
870 		}
871 
872 		/*
873 		 * Fetch current state of logical drives.
874 		 */
875 		for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
876 		    softs->logic_drive[ldrv].al_size =
877 						ae->ae_ldrv.al_size[ldrv];
878 		    softs->logic_drive[ldrv].al_state =
879 						ae->ae_ldrv.al_state[ldrv];
880 		    softs->logic_drive[ldrv].al_properties =
881 						ae->ae_ldrv.al_properties[ldrv];
882 		    AMRDB_PRINT((CE_NOTE,
883 			" ********* drive %d: %d state %x properties %x",
884 			ldrv,
885 			softs->logic_drive[ldrv].al_size,
886 			softs->logic_drive[ldrv].al_state,
887 			softs->logic_drive[ldrv].al_properties));
888 
889 		    if (softs->logic_drive[ldrv].al_state == AMR_LDRV_OFFLINE)
890 			cmn_err(CE_NOTE, "!instance %d log-drive %d is offline",
891 				instance, ldrv);
892 		    else
893 			softs->amr_nlogdrives++;
894 		}
895 
896 		softs->maxdrives = AMR_8LD_MAXDRIVES;
897 		softs->maxchan = ae->ae_adapter.aa_channels;
898 		softs->maxio = ae->ae_adapter.aa_maxio;
899 		kmem_free(ae, AMR_ENQ_BUFFER_SIZE);
900 	}
901 
902 	/*
903 	 * Mark remaining drives as unused.
904 	 */
905 	for (; ldrv < AMR_MAXLD; ldrv++)
906 		softs->logic_drive[ldrv].al_state = AMR_LDRV_OFFLINE;
907 
908 	/*
909 	 * Cap the maximum number of outstanding I/Os.  AMI's driver
910 	 * doesn't trust the controller's reported value, and lockups have
911 	 * been seen when we do.
912 	 */
913 	softs->maxio = MIN(softs->maxio, AMR_LIMITCMD);
914 
915 	return (DDI_SUCCESS);
916 }
917 
918 /*
919  * Run a generic enquiry-style command.
920  */
921 static void *
922 amr_enquiry(struct amr_softs *softs, size_t bufsize, uint8_t cmd,
923 				uint8_t cmdsub, uint8_t cmdqual)
924 {
925 	struct amr_command	ac;
926 	void			*result;
927 
928 	result = NULL;
929 
930 	bzero(&ac, sizeof (struct amr_command));
931 	ac.ac_softs = softs;
932 
933 	/* set command flags */
934 	ac.ac_flags |= AMR_CMD_DATAOUT;
935 
936 	/* build the command proper */
937 	ac.mailbox.mb_command	= cmd;
938 	ac.mailbox.mb_cmdsub	= cmdsub;
939 	ac.mailbox.mb_cmdqual	= cmdqual;
940 
941 	if (amr_enquiry_mapcmd(&ac, bufsize) != DDI_SUCCESS)
942 		return (NULL);
943 
944 	if (amr_poll_command(&ac) || ac.ac_status != 0) {
945 		AMRDB_PRINT((CE_NOTE, "can not poll command, goto out"));
946 		amr_enquiry_unmapcmd(&ac);
947 		return (NULL);
948 	}
949 
950 	/* allocate the response structure */
951 	result = kmem_zalloc(bufsize, KM_SLEEP);
952 
953 	bcopy(ac.ac_data, result, bufsize);
954 
955 	amr_enquiry_unmapcmd(&ac);
956 	return (result);
957 }
958 
959 /*
960  * Flush the controller's internal cache, return status.
961  */
962 static int
963 amr_flush(struct amr_softs *softs)
964 {
965 	struct amr_command	ac;
966 	int			error = 0;
967 
968 	bzero(&ac, sizeof (struct amr_command));
969 	ac.ac_softs = softs;
970 
971 	ac.ac_flags |= AMR_CMD_DATAOUT;
972 
973 	/* build the command proper */
974 	ac.mailbox.mb_command = AMR_CMD_FLUSH;
975 
976 	/* have to poll, as the system may be going down or otherwise damaged */
977 	if (error = amr_poll_command(&ac)) {
978 		AMRDB_PRINT((CE_NOTE, "can not poll this cmd"));
979 		return (error);
980 	}
981 
982 	return (error);
983 }
984 
985 /*
986  * Take a command, submit it to the controller and wait for it to return.
987  * Returns nonzero on error.  Can be safely called with interrupts enabled.
988  */
989 static int
990 amr_poll_command(struct amr_command *ac)
991 {
992 	struct amr_softs	*softs = ac->ac_softs;
993 	volatile uint32_t	done_flag;
994 
995 	AMRDB_PRINT((CE_NOTE, "Amr_Poll bcopy(%p, %p, %d)",
996 			(void *)&ac->mailbox,
997 			(void *)softs->mailbox,
998 			(uint32_t)AMR_MBOX_CMDSIZE));
999 
1000 	mutex_enter(&softs->cmd_mutex);
1001 
1002 	while (softs->amr_busyslots != 0)
1003 		cv_wait(&softs->cmd_cv, &softs->cmd_mutex);
1004 
1005 	/*
1006 	 * For read/write commands, the scatter/gather table should be
1007 	 * filled, and the last entry in scatter/gather table will be used.
1008 	 */
1009 	if ((ac->mailbox.mb_command == AMR_CMD_LREAD) ||
1010 	    (ac->mailbox.mb_command == AMR_CMD_LWRITE)) {
1011 		bcopy(ac->sgtable,
1012 			softs->sg_items[softs->sg_max_count - 1].sg_table,
1013 			sizeof (struct amr_sgentry) * AMR_NSEG);
1014 
1015 		(void) ddi_dma_sync(
1016 			softs->sg_items[softs->sg_max_count - 1].sg_handle,
1017 			0, 0, DDI_DMA_SYNC_FORDEV);
1018 
1019 		ac->mailbox.mb_physaddr =
1020 			softs->sg_items[softs->sg_max_count - 1].sg_phyaddr;
1021 	}
1022 
1023 	bcopy(&ac->mailbox, (void *)softs->mailbox, AMR_MBOX_CMDSIZE);
1024 
1025 	/* sync the dma memory */
1026 	(void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
1027 
1028 	/* clear the poll/ack fields in the mailbox */
1029 	softs->mailbox->mb_ident = AMR_POLL_COMMAND_ID;
1030 	softs->mailbox->mb_nstatus = AMR_POLL_DEFAULT_NSTATUS;
1031 	softs->mailbox->mb_status = AMR_POLL_DEFAULT_STATUS;
1032 	softs->mailbox->mb_poll = 0;
1033 	softs->mailbox->mb_ack = 0;
1034 	softs->mailbox->mb_busy = 1;
1035 
1036 	AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
1037 
1038 	/* sync the dma memory */
1039 	(void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1040 
1041 	AMR_DELAY((softs->mailbox->mb_nstatus != AMR_POLL_DEFAULT_NSTATUS),
1042 			1000, done_flag);
1043 	if (!done_flag) {
1044 		mutex_exit(&softs->cmd_mutex);
1045 		return (1);
1046 	}
1047 
1048 	ac->ac_status = softs->mailbox->mb_status;
1049 
1050 	AMR_DELAY((softs->mailbox->mb_poll == AMR_POLL_ACK), 1000, done_flag);
1051 	if (!done_flag) {
1052 		mutex_exit(&softs->cmd_mutex);
1053 		return (1);
1054 	}
1055 
1056 	softs->mailbox->mb_poll = 0;
1057 	softs->mailbox->mb_ack = AMR_POLL_ACK;
1058 
1059 	/* acknowledge that we have the commands */
1060 	AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1061 
1062 	AMR_DELAY(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK), 1000, done_flag);
1063 	if (!done_flag) {
1064 		mutex_exit(&softs->cmd_mutex);
1065 		return (1);
1066 	}
1067 
1068 	mutex_exit(&softs->cmd_mutex);
1069 	return (ac->ac_status != AMR_STATUS_SUCCESS);
1070 }
1071 
1072 /*
1073  * setup the scatter/gather table
1074  */
1075 static int
1076 amr_setup_sg(struct amr_softs *softs)
1077 {
1078 	uint32_t		i;
1079 	size_t			len;
1080 	ddi_dma_cookie_t	cookie;
1081 	uint_t			cookien;
1082 
1083 	softs->sg_max_count = 0;
1084 
1085 	for (i = 0; i < AMR_MAXCMD; i++) {
1086 
1087 		/* reset the cookien */
1088 		cookien = 0;
1089 
1090 		(softs->sg_items[i]).sg_handle = NULL;
1091 		if (ddi_dma_alloc_handle(
1092 			softs->dev_info_p,
1093 			&addr_dma_attr,
1094 			DDI_DMA_SLEEP,
1095 			NULL,
1096 			&((softs->sg_items[i]).sg_handle)) != DDI_SUCCESS) {
1097 
1098 			AMRDB_PRINT((CE_WARN,
1099 			"Cannot alloc dma handle for s/g table"));
1100 			goto error_out;
1101 		}
1102 
1103 		if (ddi_dma_mem_alloc((softs->sg_items[i]).sg_handle,
1104 			sizeof (struct amr_sgentry) * AMR_NSEG,
1105 			&accattr,
1106 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1107 			DDI_DMA_SLEEP, NULL,
1108 			(caddr_t *)(&(softs->sg_items[i]).sg_table),
1109 			&len,
1110 			&(softs->sg_items[i]).sg_acc_handle)
1111 			!= DDI_SUCCESS) {
1112 
1113 			AMRDB_PRINT((CE_WARN,
1114 			"Cannot allocate DMA memory"));
1115 			goto error_out;
1116 		}
1117 
1118 		if (ddi_dma_addr_bind_handle(
1119 			(softs->sg_items[i]).sg_handle,
1120 			NULL,
1121 			(caddr_t)((softs->sg_items[i]).sg_table),
1122 			len,
1123 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1124 			DDI_DMA_SLEEP,
1125 			NULL,
1126 			&cookie,
1127 			&cookien) != DDI_DMA_MAPPED) {
1128 
1129 			AMRDB_PRINT((CE_WARN,
1130 			"Cannot bind communication area for s/g table"));
1131 			goto error_out;
1132 		}
1133 
1134 		if (cookien != 1)
1135 			goto error_out;
1136 
1137 		softs->sg_items[i].sg_phyaddr = cookie.dmac_address;
1138 		softs->sg_max_count++;
1139 	}
1140 
1141 	return (DDI_SUCCESS);
1142 
1143 error_out:
1144 	/*
1145 	 * Couldn't allocate/initialize all of the sg table entries.
1146 	 * Clean up the partially-initialized entry before returning.
1147 	 */
1148 	if (cookien) {
1149 		(void) ddi_dma_unbind_handle((softs->sg_items[i]).sg_handle);
1150 	}
1151 	if ((softs->sg_items[i]).sg_acc_handle) {
1152 		(void) ddi_dma_mem_free(&((softs->sg_items[i]).sg_acc_handle));
1153 		(softs->sg_items[i]).sg_acc_handle = NULL;
1154 	}
1155 	if ((softs->sg_items[i]).sg_handle) {
1156 		(void) ddi_dma_free_handle(&((softs->sg_items[i]).sg_handle));
1157 		(softs->sg_items[i]).sg_handle = NULL;
1158 	}
1159 
1160 	/*
1161 	 * At least two sg table entries are needed. One is for regular data
1162 	 * I/O commands, the other is for poll I/O commands.
1163 	 */
1164 	return (softs->sg_max_count > 1 ? DDI_SUCCESS : DDI_FAILURE);
1165 }
1166 
1167 /*
1168  * Map/unmap (ac)'s data in the controller's addressable space as required.
1169  *
1170  * These functions may be safely called multiple times on a given command.
1171  */
1172 static void
1173 amr_setup_dmamap(struct amr_command *ac, ddi_dma_cookie_t *buffer_dma_cookiep,
1174 		int nsegments)
1175 {
1176 	struct amr_sgentry	*sg;
1177 	uint32_t		i, size;
1178 
1179 	sg = ac->sgtable;
1180 
1181 	size = 0;
1182 
1183 	ac->mailbox.mb_nsgelem = (uint8_t)nsegments;
1184 	for (i = 0; i < nsegments; i++, sg++) {
1185 		sg->sg_addr = buffer_dma_cookiep->dmac_address;
1186 		sg->sg_count = buffer_dma_cookiep->dmac_size;
1187 		size += sg->sg_count;
1188 
1189 		/*
1190 		 * There is no next cookie if the end of the current
1191 		 * window is reached. Otherwise, the next cookie
1192 		 * would be found.
1193 		 */
1194 		if ((ac->current_cookie + i + 1) != ac->num_of_cookie)
1195 			ddi_dma_nextcookie(ac->buffer_dma_handle,
1196 				buffer_dma_cookiep);
1197 	}
1198 
1199 	ac->transfer_size = size;
1200 	ac->data_transfered += size;
1201 }
1202 
1203 
1204 /*
1205  * map the amr command for enquiry, allocate the DMA resource
1206  */
1207 static int
1208 amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size)
1209 {
1210 	struct amr_softs	*softs = ac->ac_softs;
1211 	size_t			len;
1212 	uint_t			dma_flags;
1213 
1214 	AMRDB_PRINT((CE_NOTE, "Amr_enquiry_mapcmd called, ac=%p, flags=%x",
1215 			(void *)ac, ac->ac_flags));
1216 
1217 	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1218 		dma_flags = DDI_DMA_READ;
1219 	} else {
1220 		dma_flags = DDI_DMA_WRITE;
1221 	}
1222 
1223 	dma_flags |= DDI_DMA_CONSISTENT;
1224 
1225 	/* process the DMA by address bind mode */
1226 	if (ddi_dma_alloc_handle(softs->dev_info_p,
1227 		&addr_dma_attr, DDI_DMA_SLEEP, NULL,
1228 		&ac->buffer_dma_handle) !=
1229 		DDI_SUCCESS) {
1230 
1231 		AMRDB_PRINT((CE_WARN,
1232 		"Cannot allocate addr DMA tag"));
1233 		goto error_out;
1234 	}
1235 
1236 	if (ddi_dma_mem_alloc(ac->buffer_dma_handle,
1237 		data_size,
1238 		&accattr,
1239 		dma_flags,
1240 		DDI_DMA_SLEEP,
1241 		NULL,
1242 		(caddr_t *)&ac->ac_data,
1243 		&len,
1244 		&ac->buffer_acc_handle) !=
1245 		DDI_SUCCESS) {
1246 
1247 		AMRDB_PRINT((CE_WARN,
1248 		"Cannot allocate DMA memory"));
1249 		goto error_out;
1250 	}
1251 
1252 	if ((ddi_dma_addr_bind_handle(
1253 		ac->buffer_dma_handle,
1254 		NULL, ac->ac_data, len, dma_flags,
1255 		DDI_DMA_SLEEP, NULL, &ac->buffer_dma_cookie,
1256 		&ac->num_of_cookie)) != DDI_DMA_MAPPED) {
1257 
1258 		AMRDB_PRINT((CE_WARN,
1259 			"Cannot bind addr for dma"));
1260 		goto error_out;
1261 	}
1262 
1263 	ac->ac_dataphys = (&ac->buffer_dma_cookie)->dmac_address;
1264 
1265 	((struct amr_mailbox *)&(ac->mailbox))->mb_param = 0;
1266 	ac->mailbox.mb_nsgelem = 0;
1267 	ac->mailbox.mb_physaddr = ac->ac_dataphys;
1268 
1269 	ac->ac_flags |= AMR_CMD_MAPPED;
1270 
1271 	return (DDI_SUCCESS);
1272 
1273 error_out:
1274 	if (ac->num_of_cookie)
1275 		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1276 	if (ac->buffer_acc_handle) {
1277 		ddi_dma_mem_free(&ac->buffer_acc_handle);
1278 		ac->buffer_acc_handle = NULL;
1279 	}
1280 	if (ac->buffer_dma_handle) {
1281 		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1282 		ac->buffer_dma_handle = NULL;
1283 	}
1284 
1285 	return (DDI_FAILURE);
1286 }
1287 
1288 /*
1289  * unmap the amr command for enquiry, free the DMA resource
1290  */
1291 static void
1292 amr_enquiry_unmapcmd(struct amr_command *ac)
1293 {
1294 	AMRDB_PRINT((CE_NOTE, "Amr_enquiry_unmapcmd called, ac=%p",
1295 			(void *)ac));
1296 
1297 	/* if the command involved data at all and was mapped */
1298 	if ((ac->ac_flags & AMR_CMD_MAPPED) && ac->ac_data) {
1299 		if (ac->buffer_dma_handle)
1300 			(void) ddi_dma_unbind_handle(
1301 				ac->buffer_dma_handle);
1302 		if (ac->buffer_acc_handle) {
1303 			ddi_dma_mem_free(&ac->buffer_acc_handle);
1304 			ac->buffer_acc_handle = NULL;
1305 		}
1306 		if (ac->buffer_dma_handle) {
1307 			(void) ddi_dma_free_handle(
1308 				&ac->buffer_dma_handle);
1309 			ac->buffer_dma_handle = NULL;
1310 		}
1311 	}
1312 
1313 	ac->ac_flags &= ~AMR_CMD_MAPPED;
1314 }
1315 
1316 /*
1317  * map the amr command, allocate the DMA resource
1318  */
1319 static int
1320 amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg)
1321 {
1322 	uint_t	dma_flags;
1323 	off_t	off;
1324 	size_t	len;
1325 	int	error;
1326 	int	(*cb)(caddr_t);
1327 
1328 	AMRDB_PRINT((CE_NOTE, "Amr_mapcmd called, ac=%p, flags=%x",
1329 			(void *)ac, ac->ac_flags));
1330 
1331 	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1332 		dma_flags = DDI_DMA_READ;
1333 	} else {
1334 		dma_flags = DDI_DMA_WRITE;
1335 	}
1336 
1337 	if (ac->ac_flags & AMR_CMD_PKT_CONSISTENT) {
1338 		dma_flags |= DDI_DMA_CONSISTENT;
1339 	}
1340 	if (ac->ac_flags & AMR_CMD_PKT_DMA_PARTIAL) {
1341 		dma_flags |= DDI_DMA_PARTIAL;
1342 	}
1343 
1344 	if ((!(ac->ac_flags & AMR_CMD_MAPPED)) && (ac->ac_buf == NULL)) {
1345 		ac->ac_flags |= AMR_CMD_MAPPED;
1346 		return (DDI_SUCCESS);
1347 	}
1348 
1349 	cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1350 
1351 	/* if the command involves data at all, and hasn't been mapped */
1352 	if (!(ac->ac_flags & AMR_CMD_MAPPED)) {
1353 		/* process the DMA by buffer bind mode */
1354 		error = ddi_dma_buf_bind_handle(ac->buffer_dma_handle,
1355 			ac->ac_buf,
1356 			dma_flags,
1357 			cb,
1358 			arg,
1359 			&ac->buffer_dma_cookie,
1360 			&ac->num_of_cookie);
1361 		switch (error) {
1362 		case DDI_DMA_PARTIAL_MAP:
1363 			if (ddi_dma_numwin(ac->buffer_dma_handle,
1364 				&ac->num_of_win) == DDI_FAILURE) {
1365 
1366 				AMRDB_PRINT((CE_WARN,
1367 					"Cannot get dma num win"));
1368 				(void) ddi_dma_unbind_handle(
1369 					ac->buffer_dma_handle);
1370 				(void) ddi_dma_free_handle(
1371 					&ac->buffer_dma_handle);
1372 				ac->buffer_dma_handle = NULL;
1373 				return (DDI_FAILURE);
1374 			}
1375 			ac->current_win = 0;
1376 			break;
1377 
1378 		case DDI_DMA_MAPPED:
1379 			ac->num_of_win = 1;
1380 			ac->current_win = 0;
1381 			break;
1382 
1383 		default:
1384 			AMRDB_PRINT((CE_WARN,
1385 				"Cannot bind buf for dma"));
1386 
1387 			(void) ddi_dma_free_handle(
1388 				&ac->buffer_dma_handle);
1389 			ac->buffer_dma_handle = NULL;
1390 			return (DDI_FAILURE);
1391 		}
1392 
1393 		ac->current_cookie = 0;
1394 
1395 		ac->ac_flags |= AMR_CMD_MAPPED;
1396 	} else if (ac->current_cookie == AMR_LAST_COOKIE_TAG) {
1397 		/* get the next window */
1398 		ac->current_win++;
1399 		(void) ddi_dma_getwin(ac->buffer_dma_handle,
1400 			ac->current_win, &off, &len,
1401 			&ac->buffer_dma_cookie,
1402 			&ac->num_of_cookie);
1403 		ac->current_cookie = 0;
1404 	}
1405 
1406 	if ((ac->num_of_cookie - ac->current_cookie) > AMR_NSEG) {
1407 		amr_setup_dmamap(ac, &ac->buffer_dma_cookie, AMR_NSEG);
1408 		ac->current_cookie += AMR_NSEG;
1409 	} else {
1410 		amr_setup_dmamap(ac, &ac->buffer_dma_cookie,
1411 		ac->num_of_cookie - ac->current_cookie);
1412 		ac->current_cookie = AMR_LAST_COOKIE_TAG;
1413 	}
1414 
1415 	return (DDI_SUCCESS);
1416 }
1417 
1418 /*
1419  * unmap the amr command, free the DMA resource
1420  */
1421 static void
1422 amr_unmapcmd(struct amr_command *ac)
1423 {
1424 	AMRDB_PRINT((CE_NOTE, "Amr_unmapcmd called, ac=%p",
1425 			(void *)ac));
1426 
1427 	/* if the command involved data at all and was mapped */
1428 	if ((ac->ac_flags & AMR_CMD_MAPPED) &&
1429 		ac->ac_buf && ac->buffer_dma_handle)
1430 		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1431 
1432 	ac->ac_flags &= ~AMR_CMD_MAPPED;
1433 }
1434 
1435 static int
1436 amr_setup_tran(dev_info_t  *dip, struct amr_softs *softp)
1437 {
1438 	softp->hba_tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
1439 
1440 	/*
1441 	 * hba_private always points to the amr_softs struct
1442 	 */
1443 	softp->hba_tran->tran_hba_private	= softp;
1444 	softp->hba_tran->tran_tgt_init		= amr_tran_tgt_init;
1445 	softp->hba_tran->tran_tgt_probe		= scsi_hba_probe;
1446 	softp->hba_tran->tran_start		= amr_tran_start;
1447 	softp->hba_tran->tran_reset		= amr_tran_reset;
1448 	softp->hba_tran->tran_getcap		= amr_tran_getcap;
1449 	softp->hba_tran->tran_setcap		= amr_tran_setcap;
1450 	softp->hba_tran->tran_init_pkt		= amr_tran_init_pkt;
1451 	softp->hba_tran->tran_destroy_pkt	= amr_tran_destroy_pkt;
1452 	softp->hba_tran->tran_dmafree		= amr_tran_dmafree;
1453 	softp->hba_tran->tran_sync_pkt		= amr_tran_sync_pkt;
1454 	softp->hba_tran->tran_abort		= NULL;
1455 	softp->hba_tran->tran_tgt_free		= NULL;
1456 	softp->hba_tran->tran_quiesce		= NULL;
1457 	softp->hba_tran->tran_unquiesce		= NULL;
1458 	softp->hba_tran->tran_sd		= NULL;
1459 
1460 	if (scsi_hba_attach_setup(dip, &buffer_dma_attr, softp->hba_tran,
1461 		SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
1462 		scsi_hba_tran_free(softp->hba_tran);
1463 		softp->hba_tran = NULL;
1464 		return (DDI_FAILURE);
1465 	} else {
1466 		return (DDI_SUCCESS);
1467 	}
1468 }
1469 
1470 /*ARGSUSED*/
1471 static int
1472 amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1473 	scsi_hba_tran_t *tran, struct scsi_device *sd)
1474 {
1475 	struct amr_softs	*softs;
1476 	ushort_t		target = sd->sd_address.a_target;
1477 	uchar_t			lun = sd->sd_address.a_lun;
1478 
1479 	softs = (struct amr_softs *)
1480 		(sd->sd_address.a_hba_tran->tran_hba_private);
1481 
1482 	if ((lun == 0) && (target < AMR_MAXLD))
1483 		if (softs->logic_drive[target].al_state != AMR_LDRV_OFFLINE)
1484 			return (DDI_SUCCESS);
1485 
1486 	return (DDI_FAILURE);
1487 }
1488 
1489 static int
1490 amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1491 {
1492 	struct amr_softs	*softs;
1493 	struct buf		*bp = NULL;
1494 	union scsi_cdb		*cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1495 	int			ret;
1496 	uint32_t		capacity;
1497 	struct amr_command	*ac;
1498 
1499 	AMRDB_PRINT((CE_NOTE, "amr_tran_start, cmd=%X,target=%d,lun=%d",
1500 		cdbp->scc_cmd, ap->a_target, ap->a_lun));
1501 
1502 	softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1503 	if ((ap->a_lun != 0) || (ap->a_target >= AMR_MAXLD) ||
1504 		(softs->logic_drive[ap->a_target].al_state ==
1505 			AMR_LDRV_OFFLINE)) {
1506 		cmn_err(CE_WARN, "target or lun is not correct!");
1507 		ret = TRAN_BADPKT;
1508 		return (ret);
1509 	}
1510 
1511 	ac = (struct amr_command *)pkt->pkt_ha_private;
1512 	bp = ac->ac_buf;
1513 
1514 	AMRDB_PRINT((CE_NOTE, "scsi cmd accepted, cmd=%X", cdbp->scc_cmd));
1515 
1516 	switch (cdbp->scc_cmd) {
1517 	case SCMD_READ:		/* read		*/
1518 	case SCMD_READ_G1:	/* read	g1	*/
1519 	case SCMD_READ_BUFFER:	/* read buffer	*/
1520 	case SCMD_WRITE:	/* write	*/
1521 	case SCMD_WRITE_G1:	/* write g1	*/
1522 	case SCMD_WRITE_BUFFER:	/* write buffer	*/
1523 		amr_rw_command(softs, pkt, ap->a_target);
1524 
1525 		if (pkt->pkt_flags & FLAG_NOINTR) {
1526 			(void) amr_poll_command(ac);
1527 			pkt->pkt_state |= (STATE_GOT_BUS
1528 					| STATE_GOT_TARGET
1529 					| STATE_SENT_CMD
1530 					| STATE_XFERRED_DATA);
1531 			*pkt->pkt_scbp = 0;
1532 			pkt->pkt_statistics |= STAT_SYNC;
1533 			pkt->pkt_reason = CMD_CMPLT;
1534 		} else {
1535 			mutex_enter(&softs->queue_mutex);
1536 			if (softs->waiting_q_head == NULL) {
1537 				ac->ac_prev = NULL;
1538 				ac->ac_next = NULL;
1539 				softs->waiting_q_head = ac;
1540 				softs->waiting_q_tail = ac;
1541 			} else {
1542 				ac->ac_next = NULL;
1543 				ac->ac_prev = softs->waiting_q_tail;
1544 				softs->waiting_q_tail->ac_next = ac;
1545 				softs->waiting_q_tail = ac;
1546 			}
1547 			mutex_exit(&softs->queue_mutex);
1548 			amr_start_waiting_queue((void *)softs);
1549 		}
1550 		ret = TRAN_ACCEPT;
1551 		break;
1552 
1553 	case SCMD_INQUIRY: /* inquiry */
1554 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1555 			struct scsi_inquiry inqp;
1556 			uint8_t *sinq_p = (uint8_t *)&inqp;
1557 
1558 			bzero(&inqp, sizeof (struct scsi_inquiry));
1559 
1560 			if (((char *)cdbp)[1] || ((char *)cdbp)[2]) {
1561 				/*
1562 				 * The EVDP and pagecode is
1563 				 * not supported
1564 				 */
1565 				sinq_p[1] = 0xFF;
1566 				sinq_p[2] = 0x0;
1567 			} else {
1568 				inqp.inq_len = AMR_INQ_ADDITIONAL_LEN;
1569 				inqp.inq_ansi = AMR_INQ_ANSI_VER;
1570 				inqp.inq_rdf = AMR_INQ_RESP_DATA_FORMAT;
1571 				/* Enable Tag Queue */
1572 				inqp.inq_cmdque = 1;
1573 				bcopy("MegaRaid", inqp.inq_vid,
1574 					sizeof (inqp.inq_vid));
1575 				bcopy(softs->amr_product_info.pi_product_name,
1576 					inqp.inq_pid,
1577 					AMR_PRODUCT_INFO_SIZE);
1578 				bcopy(softs->amr_product_info.pi_firmware_ver,
1579 					inqp.inq_revision,
1580 					AMR_FIRMWARE_VER_SIZE);
1581 			}
1582 
1583 			amr_unmapcmd(ac);
1584 
1585 			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1586 				bp_mapin(bp);
1587 			bcopy(&inqp, bp->b_un.b_addr,
1588 				sizeof (struct scsi_inquiry));
1589 
1590 			pkt->pkt_state |= STATE_XFERRED_DATA;
1591 		}
1592 		pkt->pkt_reason = CMD_CMPLT;
1593 		pkt->pkt_state |= (STATE_GOT_BUS
1594 				| STATE_GOT_TARGET
1595 				| STATE_SENT_CMD);
1596 		*pkt->pkt_scbp = 0;
1597 		ret = TRAN_ACCEPT;
1598 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1599 			(*pkt->pkt_comp)(pkt);
1600 		break;
1601 
1602 	case SCMD_READ_CAPACITY: /* read capacity */
1603 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1604 			struct scsi_capacity cp;
1605 
1606 			capacity = softs->logic_drive[ap->a_target].al_size - 1;
1607 			cp.capacity = BE_32(capacity);
1608 			cp.lbasize = BE_32(512);
1609 
1610 			amr_unmapcmd(ac);
1611 
1612 			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1613 				bp_mapin(bp);
1614 			bcopy(&cp, bp->b_un.b_addr, 8);
1615 		}
1616 		pkt->pkt_reason = CMD_CMPLT;
1617 		pkt->pkt_state |= (STATE_GOT_BUS
1618 				| STATE_GOT_TARGET
1619 				| STATE_SENT_CMD
1620 				| STATE_XFERRED_DATA);
1621 		*pkt->pkt_scbp = 0;
1622 		ret = TRAN_ACCEPT;
1623 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1624 			(*pkt->pkt_comp)(pkt);
1625 		break;
1626 
1627 	case SCMD_MODE_SENSE:		/* mode sense */
1628 	case SCMD_MODE_SENSE_G1:	/* mode sense g1 */
1629 		amr_unmapcmd(ac);
1630 
1631 		capacity = softs->logic_drive[ap->a_target].al_size - 1;
1632 		amr_mode_sense(cdbp, bp, capacity);
1633 
1634 		pkt->pkt_reason = CMD_CMPLT;
1635 		pkt->pkt_state |= (STATE_GOT_BUS
1636 				| STATE_GOT_TARGET
1637 				| STATE_SENT_CMD
1638 				| STATE_XFERRED_DATA);
1639 		*pkt->pkt_scbp = 0;
1640 		ret = TRAN_ACCEPT;
1641 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1642 			(*pkt->pkt_comp)(pkt);
1643 		break;
1644 
1645 	case SCMD_TEST_UNIT_READY:	/* test unit ready */
1646 	case SCMD_REQUEST_SENSE:	/* request sense */
1647 	case SCMD_FORMAT:		/* format */
1648 	case SCMD_START_STOP:		/* start stop */
1649 	case SCMD_SYNCHRONIZE_CACHE:	/* synchronize cache */
1650 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1651 			amr_unmapcmd(ac);
1652 
1653 			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1654 				bp_mapin(bp);
1655 			bzero(bp->b_un.b_addr, bp->b_bcount);
1656 
1657 			pkt->pkt_state |= STATE_XFERRED_DATA;
1658 		}
1659 		pkt->pkt_reason = CMD_CMPLT;
1660 		pkt->pkt_state |= (STATE_GOT_BUS
1661 				| STATE_GOT_TARGET
1662 				| STATE_SENT_CMD);
1663 		ret = TRAN_ACCEPT;
1664 		*pkt->pkt_scbp = 0;
1665 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1666 			(*pkt->pkt_comp)(pkt);
1667 		break;
1668 
1669 	default: /* any other commands */
1670 		amr_unmapcmd(ac);
1671 		pkt->pkt_reason = CMD_INCOMPLETE;
1672 		pkt->pkt_state = (STATE_GOT_BUS
1673 				| STATE_GOT_TARGET
1674 				| STATE_SENT_CMD
1675 				| STATE_GOT_STATUS
1676 				| STATE_ARQ_DONE);
1677 		ret = TRAN_ACCEPT;
1678 		*pkt->pkt_scbp = 0;
1679 		amr_set_arq_data(pkt, KEY_ILLEGAL_REQUEST);
1680 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1681 			(*pkt->pkt_comp)(pkt);
1682 		break;
1683 	}
1684 
1685 	return (ret);
1686 }
1687 
1688 /*
1689  * tran_reset() will reset the bus/target/adapter to support the fault recovery
1690  * functionality according to the "level" in interface. However, we got the
1691  * confirmation from LSI that these HBA cards does not support any commands to
1692  * reset bus/target/adapter/channel.
1693  *
1694  * If the tran_reset() return a FAILURE to the sd, the system will not
1695  * continue to dump the core. But core dump is an crucial method to analyze
1696  * problems in panic. Now we adopt a work around solution, that is to return
1697  * a fake SUCCESS to sd during panic, which will force the system continue
1698  * to dump core though the core may have problems in some situtation because
1699  * some on-the-fly commands will continue DMAing data to the memory.
1700  * In addition, the work around core dump method may not be performed
1701  * successfully if the panic is caused by the HBA itself. So the work around
1702  * solution is not a good example for the implementation of tran_reset(),
1703  * the most reasonable approach should send a reset command to the adapter.
1704  */
1705 /*ARGSUSED*/
1706 static int
1707 amr_tran_reset(struct scsi_address *ap, int level)
1708 {
1709 	struct amr_softs	*softs;
1710 	volatile uint32_t	done_flag;
1711 
1712 	if (ddi_in_panic()) {
1713 		softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1714 
1715 		/* Acknowledge the card if there are any significant commands */
1716 		while (softs->amr_busyslots > 0) {
1717 			AMR_DELAY((softs->mailbox->mb_busy == 0),
1718 					AMR_RETRYCOUNT, done_flag);
1719 			if (!done_flag) {
1720 				/*
1721 				 * command not completed, indicate the
1722 				 * problem and continue get ac
1723 				 */
1724 				cmn_err(CE_WARN,
1725 					"AMR command is not completed");
1726 				return (0);
1727 			}
1728 
1729 			AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1730 
1731 			/* wait for the acknowledge from hardware */
1732 			AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
1733 					AMR_RETRYCOUNT, done_flag);
1734 			if (!done_flag) {
1735 				/*
1736 				 * command is not completed, return from the
1737 				 * current interrupt and wait for the next one
1738 				 */
1739 				cmn_err(CE_WARN, "No answer from the hardware");
1740 
1741 				mutex_exit(&softs->cmd_mutex);
1742 				return (0);
1743 			}
1744 
1745 			softs->amr_busyslots -= softs->mailbox->mb_nstatus;
1746 		}
1747 
1748 		/* flush the controllor */
1749 		(void) amr_flush(softs);
1750 
1751 		/*
1752 		 * If the system is in panic, the tran_reset() will return a
1753 		 * fake SUCCESS to sd, then the system would continue dump the
1754 		 * core by poll commands. This is a work around for dumping
1755 		 * core in panic.
1756 		 *
1757 		 * Note: Some on-the-fly command will continue DMAing data to
1758 		 *	 the memory when the core is dumping, which may cause
1759 		 *	 some flaws in the dumped core file, so a cmn_err()
1760 		 *	 will be printed out to warn users. However, for most
1761 		 *	 cases, the core file will be fine.
1762 		 */
1763 		cmn_err(CE_WARN, "This system contains a SCSI HBA card/driver "
1764 				"that doesn't support software reset. This "
1765 				"means that memory being used by the HBA for "
1766 				"DMA based reads could have been updated after "
1767 				"we panic'd.");
1768 		return (1);
1769 	} else {
1770 		/* return failure to sd */
1771 		return (0);
1772 	}
1773 }
1774 
1775 /*ARGSUSED*/
1776 static int
1777 amr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1778 {
1779 	struct amr_softs	*softs;
1780 
1781 	/*
1782 	 * We don't allow inquiring about capabilities for other targets
1783 	 */
1784 	if (cap == NULL || whom == 0)
1785 		return (-1);
1786 
1787 	softs = ((struct amr_softs *)(ap->a_hba_tran)->tran_hba_private);
1788 
1789 	switch (scsi_hba_lookup_capstr(cap)) {
1790 	case SCSI_CAP_ARQ:
1791 		return (1);
1792 	case SCSI_CAP_GEOMETRY:
1793 		return ((AMR_DEFAULT_HEADS << 16) | AMR_DEFAULT_CYLINDERS);
1794 	case SCSI_CAP_SECTOR_SIZE:
1795 		return (AMR_DEFAULT_SECTORS);
1796 	case SCSI_CAP_TOTAL_SECTORS:
1797 		/* number of sectors */
1798 		return (softs->logic_drive[ap->a_target].al_size);
1799 	case SCSI_CAP_UNTAGGED_QING:
1800 	case SCSI_CAP_TAGGED_QING:
1801 		return (1);
1802 	default:
1803 		return (-1);
1804 	}
1805 }
1806 
1807 /*ARGSUSED*/
1808 static int
1809 amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1810 		int whom)
1811 {
1812 	/*
1813 	 * We don't allow setting capabilities for other targets
1814 	 */
1815 	if (cap == NULL || whom == 0) {
1816 		AMRDB_PRINT((CE_NOTE,
1817 			"Set Cap not supported, string = %s, whom=%d",
1818 			cap, whom));
1819 		return (-1);
1820 	}
1821 
1822 	switch (scsi_hba_lookup_capstr(cap)) {
1823 	case SCSI_CAP_ARQ:
1824 		return (1);
1825 	case SCSI_CAP_TOTAL_SECTORS:
1826 		return (1);
1827 	case SCSI_CAP_SECTOR_SIZE:
1828 		return (1);
1829 	case SCSI_CAP_UNTAGGED_QING:
1830 	case SCSI_CAP_TAGGED_QING:
1831 		return ((value == 1) ? 1 : 0);
1832 	default:
1833 		return (0);
1834 	}
1835 }
1836 
1837 static struct scsi_pkt *
1838 amr_tran_init_pkt(struct scsi_address *ap,
1839     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1840     int tgtlen, int flags, int (*callback)(), caddr_t arg)
1841 {
1842 	struct amr_softs	*softs;
1843 	struct amr_command	*ac;
1844 	uint32_t		slen;
1845 
1846 	softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1847 
1848 	if ((ap->a_lun != 0)||(ap->a_target >= AMR_MAXLD)||
1849 		(softs->logic_drive[ap->a_target].al_state ==
1850 			AMR_LDRV_OFFLINE)) {
1851 		return (NULL);
1852 	}
1853 
1854 	if (pkt == NULL) {
1855 		/* force auto request sense */
1856 		slen = MAX(statuslen, sizeof (struct scsi_arq_status));
1857 
1858 		pkt = scsi_hba_pkt_alloc(softs->dev_info_p, ap, cmdlen,
1859 			slen, tgtlen, sizeof (struct amr_command),
1860 			callback, arg);
1861 		if (pkt == NULL) {
1862 			AMRDB_PRINT((CE_WARN, "scsi_hba_pkt_alloc failed"));
1863 			return (NULL);
1864 		}
1865 		pkt->pkt_address	= *ap;
1866 		pkt->pkt_comp		= (void (*)())NULL;
1867 		pkt->pkt_time		= 0;
1868 		pkt->pkt_resid		= 0;
1869 		pkt->pkt_statistics	= 0;
1870 		pkt->pkt_reason		= 0;
1871 
1872 		ac = (struct amr_command *)pkt->pkt_ha_private;
1873 		ac->ac_buf = bp;
1874 		ac->cmdlen = cmdlen;
1875 		ac->ac_softs = softs;
1876 		ac->pkt = pkt;
1877 		ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
1878 		ac->ac_flags &= ~AMR_CMD_BUSY;
1879 
1880 		if ((bp == NULL) || (bp->b_bcount == 0)) {
1881 			return (pkt);
1882 		}
1883 
1884 		if (ddi_dma_alloc_handle(softs->dev_info_p, &buffer_dma_attr,
1885 			DDI_DMA_SLEEP, NULL,
1886 			&ac->buffer_dma_handle) != DDI_SUCCESS) {
1887 
1888 			AMRDB_PRINT((CE_WARN,
1889 				"Cannot allocate buffer DMA tag"));
1890 			scsi_hba_pkt_free(ap, pkt);
1891 			return (NULL);
1892 
1893 		}
1894 
1895 	} else {
1896 		if ((bp == NULL) || (bp->b_bcount == 0)) {
1897 			return (pkt);
1898 		}
1899 		ac = (struct amr_command *)pkt->pkt_ha_private;
1900 	}
1901 
1902 	ASSERT(ac != NULL);
1903 
1904 	if (bp->b_flags & B_READ) {
1905 		ac->ac_flags |= AMR_CMD_DATAOUT;
1906 	} else {
1907 		ac->ac_flags |= AMR_CMD_DATAIN;
1908 	}
1909 
1910 	if (flags & PKT_CONSISTENT) {
1911 		ac->ac_flags |= AMR_CMD_PKT_CONSISTENT;
1912 	}
1913 
1914 	if (flags & PKT_DMA_PARTIAL) {
1915 		ac->ac_flags |= AMR_CMD_PKT_DMA_PARTIAL;
1916 	}
1917 
1918 	if (amr_mapcmd(ac, callback, arg) != DDI_SUCCESS) {
1919 		scsi_hba_pkt_free(ap, pkt);
1920 		return (NULL);
1921 	}
1922 
1923 	pkt->pkt_resid = bp->b_bcount - ac->data_transfered;
1924 
1925 	AMRDB_PRINT((CE_NOTE,
1926 		"init pkt, pkt_resid=%d, b_bcount=%d, data_transfered=%d",
1927 		(uint32_t)pkt->pkt_resid, (uint32_t)bp->b_bcount,
1928 		ac->data_transfered));
1929 
1930 	ASSERT(pkt->pkt_resid >= 0);
1931 
1932 	return (pkt);
1933 }
1934 
1935 static void
1936 amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1937 {
1938 	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1939 
1940 	amr_unmapcmd(ac);
1941 
1942 	if (ac->buffer_dma_handle) {
1943 		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1944 		ac->buffer_dma_handle = NULL;
1945 	}
1946 
1947 	scsi_hba_pkt_free(ap, pkt);
1948 	AMRDB_PRINT((CE_NOTE, "Destroy pkt called"));
1949 }
1950 
1951 /*ARGSUSED*/
1952 static void
1953 amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1954 {
1955 	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1956 
1957 	if (ac->buffer_dma_handle) {
1958 		(void) ddi_dma_sync(ac->buffer_dma_handle, 0, 0,
1959 			(ac->ac_flags & AMR_CMD_DATAIN) ?
1960 			DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1961 	}
1962 }
1963 
1964 /*ARGSUSED*/
1965 static void
1966 amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1967 {
1968 	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1969 
1970 	if (ac->ac_flags & AMR_CMD_MAPPED) {
1971 		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1972 		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1973 		ac->buffer_dma_handle = NULL;
1974 		ac->ac_flags &= ~AMR_CMD_MAPPED;
1975 	}
1976 
1977 }
1978 
1979 /*ARGSUSED*/
1980 static void
1981 amr_rw_command(struct amr_softs *softs, struct scsi_pkt *pkt, int target)
1982 {
1983 	struct amr_command	*ac = (struct amr_command *)pkt->pkt_ha_private;
1984 	union scsi_cdb		*cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1985 	uint8_t			cmd;
1986 
1987 	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1988 		cmd = AMR_CMD_LREAD;
1989 	} else {
1990 		cmd = AMR_CMD_LWRITE;
1991 	}
1992 
1993 	ac->mailbox.mb_command = cmd;
1994 	ac->mailbox.mb_blkcount =
1995 		(ac->transfer_size + AMR_BLKSIZE - 1)/AMR_BLKSIZE;
1996 	ac->mailbox.mb_lba = (ac->cmdlen == 10) ?
1997 				GETG1ADDR(cdbp) : GETG0ADDR(cdbp);
1998 	ac->mailbox.mb_drive = (uint8_t)target;
1999 }
2000 
2001 static void
2002 amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp, unsigned int capacity)
2003 {
2004 	uchar_t			pagecode;
2005 	struct mode_format	*page3p;
2006 	struct mode_geometry	*page4p;
2007 	struct mode_header	*headerp;
2008 	uint32_t		ncyl;
2009 
2010 	if (!(bp && bp->b_un.b_addr && bp->b_bcount))
2011 		return;
2012 
2013 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
2014 		bp_mapin(bp);
2015 
2016 	pagecode = cdbp->cdb_un.sg.scsi[0];
2017 	switch (pagecode) {
2018 	case SD_MODE_SENSE_PAGE3_CODE:
2019 		headerp = (struct mode_header *)(bp->b_un.b_addr);
2020 		headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2021 
2022 		page3p = (struct mode_format *)((caddr_t)headerp +
2023 			MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2024 		page3p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE3_CODE);
2025 		page3p->mode_page.length = BE_8(sizeof (struct mode_format));
2026 		page3p->data_bytes_sect = BE_16(AMR_DEFAULT_SECTORS);
2027 		page3p->sect_track = BE_16(AMR_DEFAULT_CYLINDERS);
2028 
2029 		return;
2030 
2031 	case SD_MODE_SENSE_PAGE4_CODE:
2032 		headerp = (struct mode_header *)(bp->b_un.b_addr);
2033 		headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2034 
2035 		page4p = (struct mode_geometry *)((caddr_t)headerp +
2036 			MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2037 		page4p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE4_CODE);
2038 		page4p->mode_page.length = BE_8(sizeof (struct mode_geometry));
2039 		page4p->heads = BE_8(AMR_DEFAULT_HEADS);
2040 		page4p->rpm = BE_16(AMR_DEFAULT_ROTATIONS);
2041 
2042 		ncyl = capacity / (AMR_DEFAULT_HEADS*AMR_DEFAULT_CYLINDERS);
2043 		page4p->cyl_lb = BE_8(ncyl & 0xff);
2044 		page4p->cyl_mb = BE_8((ncyl >> 8) & 0xff);
2045 		page4p->cyl_ub = BE_8((ncyl >> 16) & 0xff);
2046 
2047 		return;
2048 	default:
2049 		bzero(bp->b_un.b_addr, bp->b_bcount);
2050 		return;
2051 	}
2052 }
2053 
2054 static void
2055 amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key)
2056 {
2057 	struct scsi_arq_status *arqstat;
2058 
2059 	arqstat = (struct scsi_arq_status *)(pkt->pkt_scbp);
2060 	arqstat->sts_status.sts_chk = 1; /* CHECK CONDITION */
2061 	arqstat->sts_rqpkt_reason = CMD_CMPLT;
2062 	arqstat->sts_rqpkt_resid = 0;
2063 	arqstat->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2064 				STATE_SENT_CMD | STATE_XFERRED_DATA;
2065 	arqstat->sts_rqpkt_statistics = 0;
2066 	arqstat->sts_sensedata.es_valid = 1;
2067 	arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2068 	arqstat->sts_sensedata.es_key = key;
2069 }
2070 
2071 static void
2072 amr_start_waiting_queue(void *softp)
2073 {
2074 	uint32_t		slot;
2075 	struct amr_command	*ac;
2076 	volatile uint32_t	done_flag;
2077 	struct amr_softs	*softs = (struct amr_softs *)softp;
2078 
2079 	/* only one command allowed at the same time */
2080 	mutex_enter(&softs->queue_mutex);
2081 	mutex_enter(&softs->cmd_mutex);
2082 
2083 	while ((ac = softs->waiting_q_head) != NULL) {
2084 		/*
2085 		 * Find an available slot, the last slot is
2086 		 * occupied by poll I/O command.
2087 		 */
2088 		for (slot = 0; slot < (softs->sg_max_count - 1); slot++) {
2089 			if (softs->busycmd[slot] == NULL) {
2090 				if (AMR_QGET_IDB(softs) & AMR_QIDB_SUBMIT) {
2091 					/*
2092 					 * only one command allowed at the
2093 					 * same time
2094 					 */
2095 					mutex_exit(&softs->cmd_mutex);
2096 					mutex_exit(&softs->queue_mutex);
2097 					return;
2098 				}
2099 
2100 				ac->ac_timestamp = ddi_get_time();
2101 
2102 				if (!(ac->ac_flags & AMR_CMD_GOT_SLOT)) {
2103 
2104 					softs->busycmd[slot] = ac;
2105 					ac->ac_slot = slot;
2106 					softs->amr_busyslots++;
2107 
2108 					bcopy(ac->sgtable,
2109 					softs->sg_items[slot].sg_table,
2110 					sizeof (struct amr_sgentry) * AMR_NSEG);
2111 
2112 					(void) ddi_dma_sync(
2113 					softs->sg_items[slot].sg_handle,
2114 					0, 0, DDI_DMA_SYNC_FORDEV);
2115 
2116 					ac->mailbox.mb_physaddr =
2117 					softs->sg_items[slot].sg_phyaddr;
2118 				}
2119 
2120 				/* take the cmd from the queue */
2121 				softs->waiting_q_head = ac->ac_next;
2122 
2123 				ac->mailbox.mb_ident = ac->ac_slot + 1;
2124 				ac->mailbox.mb_busy = 1;
2125 				ac->ac_next = NULL;
2126 				ac->ac_prev = NULL;
2127 				ac->ac_flags |= AMR_CMD_GOT_SLOT;
2128 
2129 				/* clear the poll/ack fields in the mailbox */
2130 				softs->mailbox->mb_poll = 0;
2131 				softs->mailbox->mb_ack = 0;
2132 
2133 				AMR_DELAY((softs->mailbox->mb_busy == 0),
2134 					AMR_RETRYCOUNT, done_flag);
2135 				if (!done_flag) {
2136 					/*
2137 					 * command not completed, indicate the
2138 					 * problem and continue get ac
2139 					 */
2140 					cmn_err(CE_WARN,
2141 						"AMR command is not completed");
2142 					break;
2143 				}
2144 
2145 				bcopy(&ac->mailbox, (void *)softs->mailbox,
2146 					AMR_MBOX_CMDSIZE);
2147 				ac->ac_flags |= AMR_CMD_BUSY;
2148 
2149 				(void) ddi_dma_sync(softs->mbox_dma_handle,
2150 					0, 0, DDI_DMA_SYNC_FORDEV);
2151 
2152 				AMR_QPUT_IDB(softs,
2153 					softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
2154 
2155 				/*
2156 				 * current ac is submitted
2157 				 * so quit 'for-loop' to get next ac
2158 				 */
2159 				break;
2160 			}
2161 		}
2162 
2163 		/* no slot, finish our task */
2164 		if (slot == softs->maxio)
2165 			break;
2166 	}
2167 
2168 	/* only one command allowed at the same time */
2169 	mutex_exit(&softs->cmd_mutex);
2170 	mutex_exit(&softs->queue_mutex);
2171 }
2172 
2173 static void
2174 amr_done(struct amr_softs *softs)
2175 {
2176 
2177 	uint32_t		i, idx;
2178 	volatile uint32_t	done_flag;
2179 	struct amr_mailbox	*mbox, mbsave;
2180 	struct amr_command	*ac, *head, *tail;
2181 
2182 	head = tail = NULL;
2183 
2184 	AMR_QPUT_ODB(softs, AMR_QODB_READY);
2185 
2186 	/* acknowledge interrupt */
2187 	(void) AMR_QGET_ODB(softs);
2188 
2189 	mutex_enter(&softs->cmd_mutex);
2190 
2191 	if (softs->mailbox->mb_nstatus != 0) {
2192 		(void) ddi_dma_sync(softs->mbox_dma_handle,
2193 			0, 0, DDI_DMA_SYNC_FORCPU);
2194 
2195 		/* save mailbox, which contains a list of completed commands */
2196 		bcopy((void *)(uintptr_t)(volatile void *)softs->mailbox,
2197 				&mbsave, sizeof (mbsave));
2198 
2199 		mbox = &mbsave;
2200 
2201 		AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
2202 
2203 		/* wait for the acknowledge from hardware */
2204 		AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
2205 				AMR_RETRYCOUNT, done_flag);
2206 		if (!done_flag) {
2207 			/*
2208 			 * command is not completed, return from the current
2209 			 * interrupt and wait for the next one
2210 			 */
2211 			cmn_err(CE_WARN, "No answer from the hardware");
2212 
2213 			mutex_exit(&softs->cmd_mutex);
2214 			return;
2215 		}
2216 
2217 		for (i = 0; i < mbox->mb_nstatus; i++) {
2218 			idx = mbox->mb_completed[i] - 1;
2219 			ac = softs->busycmd[idx];
2220 
2221 			if (ac != NULL) {
2222 				/* pull the command from the busy index */
2223 				softs->busycmd[idx] = NULL;
2224 				if (softs->amr_busyslots > 0)
2225 					softs->amr_busyslots--;
2226 				if (softs->amr_busyslots == 0)
2227 					cv_broadcast(&softs->cmd_cv);
2228 
2229 				ac->ac_flags &= ~AMR_CMD_BUSY;
2230 				ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
2231 				ac->ac_status = mbox->mb_status;
2232 
2233 				/* enqueue here */
2234 				if (head) {
2235 					tail->ac_next = ac;
2236 					tail = ac;
2237 					tail->ac_next = NULL;
2238 				} else {
2239 					tail = head = ac;
2240 					ac->ac_next = NULL;
2241 				}
2242 			} else {
2243 				AMRDB_PRINT((CE_WARN,
2244 					"ac in mailbox is NULL!"));
2245 			}
2246 		}
2247 	} else {
2248 		AMRDB_PRINT((CE_WARN, "mailbox is not ready for copy out!"));
2249 	}
2250 
2251 	mutex_exit(&softs->cmd_mutex);
2252 
2253 	if (head != NULL) {
2254 		amr_call_pkt_comp(head);
2255 	}
2256 
2257 	/* dispatch a thread to process the pending I/O if there is any */
2258 	if ((ddi_taskq_dispatch(softs->amr_taskq, amr_start_waiting_queue,
2259 		(void *)softs, DDI_NOSLEEP)) != DDI_SUCCESS) {
2260 		cmn_err(CE_WARN, "No memory available to dispatch taskq");
2261 	}
2262 }
2263 
2264 static void
2265 amr_call_pkt_comp(register struct amr_command *head)
2266 {
2267 	register struct scsi_pkt	*pkt;
2268 	register struct amr_command	*ac, *localhead;
2269 
2270 	localhead = head;
2271 
2272 	while (localhead) {
2273 		ac = localhead;
2274 		localhead = ac->ac_next;
2275 		ac->ac_next = NULL;
2276 
2277 		pkt = ac->pkt;
2278 		*pkt->pkt_scbp = 0;
2279 
2280 		if (ac->ac_status == AMR_STATUS_SUCCESS) {
2281 			pkt->pkt_state |= (STATE_GOT_BUS
2282 					| STATE_GOT_TARGET
2283 					| STATE_SENT_CMD
2284 					| STATE_XFERRED_DATA);
2285 			pkt->pkt_reason = CMD_CMPLT;
2286 		} else {
2287 			pkt->pkt_state |= STATE_GOT_BUS
2288 					| STATE_ARQ_DONE;
2289 			pkt->pkt_reason = CMD_INCOMPLETE;
2290 			amr_set_arq_data(pkt, KEY_HARDWARE_ERROR);
2291 		}
2292 
2293 		if (!(pkt->pkt_flags & FLAG_NOINTR) &&
2294 			pkt->pkt_comp) {
2295 			(*pkt->pkt_comp)(pkt);
2296 		}
2297 	}
2298 }
2299