xref: /titanic_44/usr/src/uts/intel/io/amr/amr.c (revision fb3fb4f3d76d55b64440afd0af72775dfad3bd1d)
1 /*
2  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 /*
6  * Copyright (c) 1999,2000 Michael Smith
7  * Copyright (c) 2000 BSDi
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 /*
32  * Copyright (c) 2002 Eric Moore
33  * Copyright (c) 2002 LSI Logic Corporation
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. The party using or redistributing the source code and binary forms
45  *    agrees to the disclaimer below and the terms and conditions set forth
46  *    herein.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  */
60 #pragma ident	"%Z%%M%	%I%	%E% SMI"
61 
62 #include <sys/int_types.h>
63 #include <sys/scsi/scsi.h>
64 #include <sys/dkbad.h>
65 #include <sys/dklabel.h>
66 #include <sys/dkio.h>
67 #include <sys/cdio.h>
68 #include <sys/mhd.h>
69 #include <sys/vtoc.h>
70 #include <sys/dktp/fdisk.h>
71 #include <sys/scsi/targets/sddef.h>
72 #include <sys/debug.h>
73 #include <sys/pci.h>
74 #include <sys/ksynch.h>
75 #include <sys/ddi.h>
76 #include <sys/sunddi.h>
77 #include <sys/modctl.h>
78 #include <sys/byteorder.h>
79 
80 #include "amrreg.h"
81 #include "amrvar.h"
82 
83 /* dynamic debug symbol */
84 int	amr_debug_var = 0;
85 
86 #define	AMR_DELAY(cond, count, done_flag) { \
87 		int local_counter = 0; \
88 		done_flag = 1; \
89 		while (!(cond)) { \
90 			delay(drv_usectohz(100)); \
91 			if ((local_counter) > count) { \
92 				done_flag = 0; \
93 				break; \
94 			} \
95 			(local_counter)++; \
96 		} \
97 	}
98 
99 #define	AMR_BUSYWAIT(cond, count, done_flag) { \
100 		int local_counter = 0; \
101 		done_flag = 1; \
102 		while (!(cond)) { \
103 			drv_usecwait(100); \
104 			if ((local_counter) > count) { \
105 				done_flag = 0; \
106 				break; \
107 			} \
108 			(local_counter)++; \
109 		} \
110 	}
111 
112 /*
113  * driver interfaces
114  */
115 char _depends_on[] = "misc/scsi";
116 
117 static uint_t amr_intr(caddr_t arg);
118 static void amr_done(struct amr_softs *softs);
119 
120 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
121 			void *arg, void **result);
122 static int amr_attach(dev_info_t *, ddi_attach_cmd_t);
123 static int amr_detach(dev_info_t *, ddi_detach_cmd_t);
124 
125 static int amr_setup_mbox(struct amr_softs *softs);
126 static int amr_setup_sg(struct amr_softs *softs);
127 
128 /*
129  * Command wrappers
130  */
131 static int amr_query_controller(struct amr_softs *softs);
132 static void *amr_enquiry(struct amr_softs *softs, size_t bufsize,
133 			uint8_t cmd, uint8_t cmdsub, uint8_t cmdqual);
134 static int amr_flush(struct amr_softs *softs);
135 
136 /*
137  * Command processing.
138  */
139 static void amr_rw_command(struct amr_softs *softs,
140 			struct scsi_pkt *pkt, int lun);
141 static void amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp,
142 			unsigned int capacity);
143 static void amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key);
144 static int amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size);
145 static void amr_enquiry_unmapcmd(struct amr_command *ac);
146 static int amr_mapcmd(struct amr_command *ac);
147 static void amr_unmapcmd(struct amr_command *ac);
148 
149 /*
150  * Status monitoring
151  */
152 static void amr_periodic(void *data);
153 
154 /*
155  * Interface-specific shims
156  */
157 static int amr_poll_command(struct amr_command *ac);
158 static void amr_start_waiting_queue(void *softp);
159 static void amr_call_pkt_comp(struct amr_command *head);
160 
161 /*
162  * SCSI interface
163  */
164 static int amr_setup_tran(dev_info_t  *dip, struct amr_softs *softp);
165 
166 /*
167  * Function prototypes
168  *
169  * SCSA functions exported by means of the transport table
170  */
171 static int amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
172 	scsi_hba_tran_t *tran, struct scsi_device *sd);
173 static int amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
174 static int amr_tran_reset(struct scsi_address *ap, int level);
175 static int amr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
176 static int amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
177     int whom);
178 static struct scsi_pkt *amr_tran_init_pkt(struct scsi_address *ap,
179     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
180     int tgtlen, int flags, int (*callback)(), caddr_t arg);
181 static void amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
182 static void amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
183 static void amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
184 
185 static ddi_dma_attr_t buffer_dma_attr = {
186 		DMA_ATTR_V0,	/* version of this structure */
187 		0,		/* lowest usable address */
188 		0xffffffffull,	/* highest usable address */
189 		0x00ffffffull,	/* maximum DMAable byte count */
190 		4,		/* alignment */
191 		1,		/* burst sizes */
192 		1,		/* minimum transfer */
193 		0xffffffffull,	/* maximum transfer */
194 		0xffffffffull,	/* maximum segment length */
195 		AMR_NSEG,	/* maximum number of segments */
196 		AMR_BLKSIZE,	/* granularity */
197 		0,		/* flags (reserved) */
198 };
199 
200 static ddi_dma_attr_t addr_dma_attr = {
201 		DMA_ATTR_V0,	/* version of this structure */
202 		0,		/* lowest usable address */
203 		0xffffffffull,	/* highest usable address */
204 		0x7fffffff,	/* maximum DMAable byte count */
205 		4,		/* alignment */
206 		1,		/* burst sizes */
207 		1,		/* minimum transfer */
208 		0xffffffffull,	/* maximum transfer */
209 		0xffffffffull,	/* maximum segment length */
210 		1,		/* maximum number of segments */
211 		1,		/* granularity */
212 		0,		/* flags (reserved) */
213 };
214 
215 
216 static struct dev_ops   amr_ops = {
217 	DEVO_REV,	/* devo_rev, */
218 	0,		/* refcnt  */
219 	amr_info,	/* info */
220 	nulldev,	/* identify */
221 	nulldev,	/* probe */
222 	amr_attach,	/* attach */
223 	amr_detach,	/* detach */
224 	nodev,		/* reset */
225 	NULL,		/* driver operations */
226 	(struct bus_ops *)0,	/* bus operations */
227 	0		/* power */
228 };
229 
230 
231 extern struct mod_ops mod_driverops;
232 static struct modldrv modldrv = {
233 	&mod_driverops,		/* Type of module. driver here */
234 	"AMR Driver V%I%",	/* Name of the module. */
235 	&amr_ops,		/* Driver ops vector */
236 };
237 
238 static struct modlinkage modlinkage = {
239 	MODREV_1,
240 	&modldrv,
241 	NULL
242 };
243 
244 /* DMA access attributes */
245 static ddi_device_acc_attr_t accattr = {
246 	DDI_DEVICE_ATTR_V0,
247 	DDI_NEVERSWAP_ACC,
248 	DDI_STRICTORDER_ACC
249 };
250 
251 static struct amr_softs  *amr_softstatep;
252 
253 
254 int
255 _init(void)
256 {
257 	int		error;
258 
259 	error = ddi_soft_state_init((void *)&amr_softstatep,
260 			sizeof (struct amr_softs), 0);
261 
262 	if (error != 0)
263 		goto error_out;
264 
265 	if ((error = scsi_hba_init(&modlinkage)) != 0) {
266 		ddi_soft_state_fini((void*)&amr_softstatep);
267 		goto error_out;
268 	}
269 
270 	error = mod_install(&modlinkage);
271 	if (error != 0) {
272 		scsi_hba_fini(&modlinkage);
273 		ddi_soft_state_fini((void*)&amr_softstatep);
274 		goto error_out;
275 	}
276 
277 	return (error);
278 
279 error_out:
280 	cmn_err(CE_NOTE, "_init failed");
281 	return (error);
282 }
283 
284 int
285 _info(struct modinfo *modinfop)
286 {
287 	return (mod_info(&modlinkage, modinfop));
288 }
289 
290 int
291 _fini(void)
292 {
293 	int	error;
294 
295 	if ((error = mod_remove(&modlinkage)) != 0) {
296 		return (error);
297 	}
298 
299 	scsi_hba_fini(&modlinkage);
300 
301 	ddi_soft_state_fini((void*)&amr_softstatep);
302 	return (error);
303 }
304 
305 
306 static int
307 amr_attach(dev_info_t *dev, ddi_attach_cmd_t cmd)
308 {
309 	struct amr_softs	*softs;
310 	int			error;
311 	uint32_t		command, i;
312 	int			instance;
313 	caddr_t			cfgaddr;
314 
315 	instance = ddi_get_instance(dev);
316 
317 	switch (cmd) {
318 		case DDI_ATTACH:
319 			break;
320 
321 		case DDI_RESUME:
322 			return (DDI_FAILURE);
323 
324 		default:
325 			return (DDI_FAILURE);
326 	}
327 
328 	/*
329 	 * Initialize softs.
330 	 */
331 	if (ddi_soft_state_zalloc(amr_softstatep, instance) != DDI_SUCCESS)
332 		return (DDI_FAILURE);
333 	softs = ddi_get_soft_state(amr_softstatep, instance);
334 	softs->state |= AMR_STATE_SOFT_STATE_SETUP;
335 
336 	softs->dev_info_p = dev;
337 
338 	AMRDB_PRINT((CE_NOTE, "softs: %p; busy_slot addr: %p",
339 		(void *)softs, (void *)&(softs->amr_busyslots)));
340 
341 	if (pci_config_setup(dev, &(softs->pciconfig_handle))
342 		!= DDI_SUCCESS) {
343 		goto error_out;
344 	}
345 	softs->state |= AMR_STATE_PCI_CONFIG_SETUP;
346 
347 	error = ddi_regs_map_setup(dev, 1, &cfgaddr, 0, 0,
348 		&accattr, &(softs->regsmap_handle));
349 	if (error != DDI_SUCCESS) {
350 		goto error_out;
351 	}
352 	softs->state |= AMR_STATE_PCI_MEM_MAPPED;
353 
354 	/*
355 	 * Determine board type.
356 	 */
357 	command = pci_config_get16(softs->pciconfig_handle, PCI_CONF_COMM);
358 
359 	/*
360 	 * Make sure we are going to be able to talk to this board.
361 	 */
362 	if ((command & PCI_COMM_MAE) == 0) {
363 		AMRDB_PRINT((CE_NOTE,  "memory window not available"));
364 		goto error_out;
365 	}
366 
367 	/* force the busmaster enable bit on */
368 	if (!(command & PCI_COMM_ME)) {
369 		command |= PCI_COMM_ME;
370 		pci_config_put16(softs->pciconfig_handle,
371 				PCI_CONF_COMM, command);
372 		command = pci_config_get16(softs->pciconfig_handle,
373 				PCI_CONF_COMM);
374 		if (!(command & PCI_COMM_ME))
375 			goto error_out;
376 	}
377 
378 	/*
379 	 * Allocate and connect our interrupt.
380 	 */
381 	if (ddi_intr_hilevel(dev, 0) != 0) {
382 	    AMRDB_PRINT((CE_NOTE,  "High level interrupt is not supported!"));
383 	    goto error_out;
384 	}
385 
386 	if (ddi_get_iblock_cookie(dev, 0,  &softs->iblock_cookiep)
387 		!= DDI_SUCCESS) {
388 		goto error_out;
389 	}
390 
391 	mutex_init(&softs->cmd_mutex, NULL, MUTEX_DRIVER,
392 		softs->iblock_cookiep); /* should be used in interrupt */
393 	mutex_init(&softs->queue_mutex, NULL, MUTEX_DRIVER,
394 	    softs->iblock_cookiep); /* should be used in interrupt */
395 	mutex_init(&softs->periodic_mutex, NULL, MUTEX_DRIVER,
396 	    softs->iblock_cookiep); /* should be used in interrupt */
397 	/* sychronize waits for the busy slots via this cv */
398 	cv_init(&softs->cmd_cv, NULL, CV_DRIVER, NULL);
399 	softs->state |= AMR_STATE_KMUTEX_INITED;
400 
401 	/*
402 	 * Do bus-independent initialisation, bring controller online.
403 	 */
404 	if (amr_setup_mbox(softs) != DDI_SUCCESS)
405 		goto error_out;
406 	softs->state |= AMR_STATE_MAILBOX_SETUP;
407 
408 	if (amr_setup_sg(softs) != DDI_SUCCESS)
409 		goto error_out;
410 
411 	softs->state |= AMR_STATE_SG_TABLES_SETUP;
412 
413 	if (amr_query_controller(softs) != DDI_SUCCESS)
414 		goto error_out;
415 
416 	/*
417 	 * A taskq is created for dispatching the waiting queue processing
418 	 * thread. The threads number equals to the logic drive number and
419 	 * the thread number should be 1 if there is no logic driver is
420 	 * configured for this instance.
421 	 */
422 	if ((softs->amr_taskq = ddi_taskq_create(dev, "amr_taskq",
423 		MAX(softs->amr_nlogdrives, 1), TASKQ_DEFAULTPRI, 0)) == NULL) {
424 		goto error_out;
425 	}
426 	softs->state |= AMR_STATE_TASKQ_SETUP;
427 
428 	if (ddi_add_intr(dev, 0, &softs->iblock_cookiep, NULL,
429 		amr_intr, (caddr_t)softs) != DDI_SUCCESS) {
430 		goto error_out;
431 	}
432 	softs->state |= AMR_STATE_INTR_SETUP;
433 
434 	/* set up the tran interface */
435 	if (amr_setup_tran(softs->dev_info_p, softs) != DDI_SUCCESS) {
436 		AMRDB_PRINT((CE_NOTE, "setup tran failed"));
437 		goto error_out;
438 	}
439 	softs->state |= AMR_STATE_TRAN_SETUP;
440 
441 	/* schedule a thread for periodic check */
442 	mutex_enter(&softs->periodic_mutex);
443 	softs->timeout_t = timeout(amr_periodic, (void *)softs,
444 				drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
445 	softs->state |= AMR_STATE_TIMEOUT_ENABLED;
446 	mutex_exit(&softs->periodic_mutex);
447 
448 	/* print firmware information in verbose mode */
449 	cmn_err(CE_CONT, "?MegaRaid %s %s attached.",
450 		softs->amr_product_info.pi_product_name,
451 		softs->amr_product_info.pi_firmware_ver);
452 
453 	/* clear any interrupts */
454 	AMR_QCLEAR_INTR(softs);
455 	return (DDI_SUCCESS);
456 
457 error_out:
458 	if (softs->state & AMR_STATE_INTR_SETUP) {
459 		ddi_remove_intr(dev, 0, softs->iblock_cookiep);
460 	}
461 	if (softs->state & AMR_STATE_TASKQ_SETUP) {
462 		ddi_taskq_destroy(softs->amr_taskq);
463 	}
464 	if (softs->state & AMR_STATE_SG_TABLES_SETUP) {
465 		for (i = 0; i < softs->sg_max_count; i++) {
466 			(void) ddi_dma_unbind_handle(
467 				softs->sg_items[i].sg_handle);
468 			(void) ddi_dma_mem_free(
469 				&((softs->sg_items[i]).sg_acc_handle));
470 			(void) ddi_dma_free_handle(
471 				&(softs->sg_items[i].sg_handle));
472 		}
473 	}
474 	if (softs->state & AMR_STATE_MAILBOX_SETUP) {
475 		(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
476 		(void) ddi_dma_mem_free(&softs->mbox_acc_handle);
477 		(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
478 	}
479 	if (softs->state & AMR_STATE_KMUTEX_INITED) {
480 		mutex_destroy(&softs->queue_mutex);
481 		mutex_destroy(&softs->cmd_mutex);
482 		mutex_destroy(&softs->periodic_mutex);
483 		cv_destroy(&softs->cmd_cv);
484 	}
485 	if (softs->state & AMR_STATE_PCI_MEM_MAPPED)
486 		ddi_regs_map_free(&softs->regsmap_handle);
487 	if (softs->state & AMR_STATE_PCI_CONFIG_SETUP)
488 		pci_config_teardown(&softs->pciconfig_handle);
489 	if (softs->state & AMR_STATE_SOFT_STATE_SETUP)
490 		ddi_soft_state_free(amr_softstatep, instance);
491 	return (DDI_FAILURE);
492 }
493 
494 /*
495  * Bring the controller down to a dormant state and detach all child devices.
496  * This function is called during detach, system shutdown.
497  *
498  * Note that we can assume that the bufq on the controller is empty, as we won't
499  * allow shutdown if any device is open.
500  */
501 /*ARGSUSED*/
502 static int amr_detach(dev_info_t *dev, ddi_detach_cmd_t cmd)
503 {
504 	struct amr_softs	*softs;
505 	int			instance;
506 	uint32_t		i, done_flag;
507 
508 	instance = ddi_get_instance(dev);
509 	softs = ddi_get_soft_state(amr_softstatep, instance);
510 
511 	/* flush the controllor */
512 	if (amr_flush(softs) != 0) {
513 		AMRDB_PRINT((CE_NOTE, "device shutdown failed"));
514 		return (EIO);
515 	}
516 
517 	/* release the amr timer */
518 	mutex_enter(&softs->periodic_mutex);
519 	softs->state &= ~AMR_STATE_TIMEOUT_ENABLED;
520 	if (softs->timeout_t) {
521 		(void) untimeout(softs->timeout_t);
522 		softs->timeout_t = 0;
523 	}
524 	mutex_exit(&softs->periodic_mutex);
525 
526 	for (i = 0; i < softs->sg_max_count; i++) {
527 		(void) ddi_dma_unbind_handle(
528 			softs->sg_items[i].sg_handle);
529 		(void) ddi_dma_mem_free(
530 			&((softs->sg_items[i]).sg_acc_handle));
531 		(void) ddi_dma_free_handle(
532 			&(softs->sg_items[i].sg_handle));
533 	}
534 
535 	(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
536 	(void) ddi_dma_mem_free(&softs->mbox_acc_handle);
537 	(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
538 
539 	/* disconnect the interrupt handler */
540 	ddi_remove_intr(softs->dev_info_p,  0, softs->iblock_cookiep);
541 
542 	/* wait for the completion of current in-progress interruptes */
543 	AMR_DELAY((softs->amr_interrupts_counter == 0), 1000, done_flag);
544 	if (!done_flag) {
545 		cmn_err(CE_WARN, "Suspicious interrupts in-progress.");
546 	}
547 
548 	ddi_taskq_destroy(softs->amr_taskq);
549 
550 	(void) scsi_hba_detach(dev);
551 	scsi_hba_tran_free(softs->hba_tran);
552 	ddi_regs_map_free(&softs->regsmap_handle);
553 	pci_config_teardown(&softs->pciconfig_handle);
554 
555 	mutex_destroy(&softs->queue_mutex);
556 	mutex_destroy(&softs->cmd_mutex);
557 	mutex_destroy(&softs->periodic_mutex);
558 	cv_destroy(&softs->cmd_cv);
559 
560 	/* print firmware information in verbose mode */
561 	cmn_err(CE_NOTE, "?MegaRaid %s %s detached.",
562 		softs->amr_product_info.pi_product_name,
563 		softs->amr_product_info.pi_firmware_ver);
564 
565 	ddi_soft_state_free(amr_softstatep, instance);
566 
567 	return (DDI_SUCCESS);
568 }
569 
570 
571 /*ARGSUSED*/
572 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
573 	void *arg, void **result)
574 {
575 	struct amr_softs	*softs;
576 	int			instance;
577 
578 	instance = ddi_get_instance(dip);
579 
580 	switch (infocmd) {
581 		case DDI_INFO_DEVT2DEVINFO:
582 			softs = ddi_get_soft_state(amr_softstatep, instance);
583 			if (softs != NULL) {
584 				*result = softs->dev_info_p;
585 				return (DDI_SUCCESS);
586 			} else {
587 				*result = NULL;
588 				return (DDI_FAILURE);
589 			}
590 		case DDI_INFO_DEVT2INSTANCE:
591 			*(int *)result = instance;
592 			break;
593 		default:
594 			break;
595 	}
596 	return (DDI_SUCCESS);
597 }
598 
599 /*
600  * Take an interrupt, or be poked by other code to look for interrupt-worthy
601  * status.
602  */
603 static uint_t
604 amr_intr(caddr_t arg)
605 {
606 	struct amr_softs *softs = (struct amr_softs *)arg;
607 
608 	softs->amr_interrupts_counter++;
609 
610 	if (AMR_QGET_ODB(softs) != AMR_QODB_READY) {
611 		softs->amr_interrupts_counter--;
612 		return (DDI_INTR_UNCLAIMED);
613 	}
614 
615 	/* collect finished commands, queue anything waiting */
616 	amr_done(softs);
617 
618 	softs->amr_interrupts_counter--;
619 
620 	return (DDI_INTR_CLAIMED);
621 
622 }
623 
624 /*
625  * Setup the amr mailbox
626  */
627 static int
628 amr_setup_mbox(struct amr_softs *softs)
629 {
630 	uint32_t	move;
631 	size_t		mbox_len;
632 
633 	if (ddi_dma_alloc_handle(
634 		softs->dev_info_p,
635 		&addr_dma_attr,
636 		DDI_DMA_SLEEP,
637 		NULL,
638 		&softs->mbox_dma_handle) != DDI_SUCCESS) {
639 
640 		AMRDB_PRINT((CE_NOTE, "Cannot alloc dma handle for mailbox"));
641 		goto error_out;
642 	}
643 
644 	if (ddi_dma_mem_alloc(
645 		softs->mbox_dma_handle,
646 		sizeof (struct amr_mailbox) + 16,
647 		&accattr,
648 		DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
649 		DDI_DMA_SLEEP,
650 		NULL,
651 		(caddr_t *)(&softs->mbox),
652 		&mbox_len,
653 		&softs->mbox_acc_handle) !=
654 		DDI_SUCCESS) {
655 
656 		AMRDB_PRINT((CE_WARN, "Cannot alloc dma memory for mailbox"));
657 		goto error_out;
658 	}
659 
660 	if (ddi_dma_addr_bind_handle(
661 		softs->mbox_dma_handle,
662 		NULL,
663 		(caddr_t)softs->mbox,
664 		sizeof (struct amr_mailbox) + 16,
665 		DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
666 		DDI_DMA_SLEEP,
667 		NULL,
668 		&softs->mbox_dma_cookie,
669 		&softs->mbox_dma_cookien) != DDI_DMA_MAPPED) {
670 
671 		AMRDB_PRINT((CE_NOTE, "Cannot bind dma memory for mailbox"));
672 		goto error_out;
673 	}
674 
675 	if (softs->mbox_dma_cookien != 1)
676 		goto error_out;
677 
678 	/* The phy address of mailbox must be aligned on a 16-byte boundary */
679 	move = 16 - (((uint32_t)softs->mbox_dma_cookie.dmac_address)&0xf);
680 	softs->mbox_phyaddr =
681 		(softs->mbox_dma_cookie.dmac_address + move);
682 
683 	softs->mailbox =
684 		(struct amr_mailbox *)(((uintptr_t)softs->mbox) + move);
685 
686 	AMRDB_PRINT((CE_NOTE, "phraddy=%x, mailbox=%p, softs->mbox=%p, move=%x",
687 		softs->mbox_phyaddr, (void *)softs->mailbox,
688 		softs->mbox, move));
689 
690 	return (DDI_SUCCESS);
691 
692 error_out:
693 	if (softs->mbox_dma_cookien)
694 		(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
695 	if (softs->mbox_acc_handle) {
696 		(void) ddi_dma_mem_free(&(softs->mbox_acc_handle));
697 		softs->mbox_acc_handle = NULL;
698 	}
699 	if (softs->mbox_dma_handle) {
700 		(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
701 		softs->mbox_dma_handle = NULL;
702 	}
703 
704 	return (DDI_FAILURE);
705 }
706 
707 /*
708  * Perform a periodic check of the controller status
709  */
710 static void
711 amr_periodic(void *data)
712 {
713 	uint32_t		i;
714 	struct amr_softs	*softs = (struct amr_softs *)data;
715 	struct scsi_pkt 	*pkt;
716 	register struct amr_command	*ac;
717 
718 	for (i = 0; i < softs->sg_max_count; i++) {
719 		if (softs->busycmd[i] == NULL)
720 			continue;
721 
722 		mutex_enter(&softs->cmd_mutex);
723 
724 		if (softs->busycmd[i] == NULL) {
725 			mutex_exit(&softs->cmd_mutex);
726 			continue;
727 		}
728 
729 		pkt = softs->busycmd[i]->pkt;
730 
731 		if ((pkt->pkt_time != 0) &&
732 			(ddi_get_time() -
733 			softs->busycmd[i]->ac_timestamp >
734 			pkt->pkt_time)) {
735 
736 			cmn_err(CE_WARN,
737 				"timed out package detected,\
738 				sc = %p, pkt = %p, index = %d, ac = %p",
739 				(void *)softs,
740 				(void *)pkt,
741 				i,
742 				(void *)softs->busycmd[i]);
743 
744 			ac = softs->busycmd[i];
745 			ac->ac_next = NULL;
746 
747 			/* pull command from the busy index */
748 			softs->busycmd[i] = NULL;
749 			if (softs->amr_busyslots > 0)
750 				softs->amr_busyslots--;
751 			if (softs->amr_busyslots == 0)
752 				cv_broadcast(&softs->cmd_cv);
753 
754 			mutex_exit(&softs->cmd_mutex);
755 
756 			pkt = ac->pkt;
757 			*pkt->pkt_scbp = 0;
758 			pkt->pkt_statistics |= STAT_TIMEOUT;
759 			pkt->pkt_reason = CMD_TIMEOUT;
760 			if (!(pkt->pkt_flags &
761 			FLAG_NOINTR) && pkt->pkt_comp) {
762 				/* call pkt callback */
763 				(*pkt->pkt_comp)(pkt);
764 			}
765 
766 		} else {
767 			mutex_exit(&softs->cmd_mutex);
768 		}
769 	}
770 
771 	/* restart the amr timer */
772 	mutex_enter(&softs->periodic_mutex);
773 	if (softs->state & AMR_STATE_TIMEOUT_ENABLED)
774 		softs->timeout_t = timeout(amr_periodic, (void *)softs,
775 				drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
776 	mutex_exit(&softs->periodic_mutex);
777 }
778 
779 /*
780  * Interrogate the controller for the operational parameters we require.
781  */
782 static int
783 amr_query_controller(struct amr_softs *softs)
784 {
785 	struct amr_enquiry3	*aex;
786 	struct amr_prodinfo	*ap;
787 	struct amr_enquiry	*ae;
788 	uint32_t		ldrv;
789 	int			instance;
790 
791 	/*
792 	 * If we haven't found the real limit yet, let us have a couple of
793 	 * commands in order to be able to probe.
794 	 */
795 	if (softs->maxio == 0)
796 		softs->maxio = 2;
797 
798 	instance = ddi_get_instance(softs->dev_info_p);
799 
800 	/*
801 	 * Try to issue an ENQUIRY3 command
802 	 */
803 	if ((aex = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE, AMR_CMD_CONFIG,
804 		AMR_CONFIG_ENQ3, AMR_CONFIG_ENQ3_SOLICITED_FULL)) != NULL) {
805 
806 		AMRDB_PRINT((CE_NOTE, "First enquiry"));
807 
808 		for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
809 		    softs->logic_drive[ldrv].al_size =
810 						aex->ae_drivesize[ldrv];
811 		    softs->logic_drive[ldrv].al_state =
812 						aex->ae_drivestate[ldrv];
813 		    softs->logic_drive[ldrv].al_properties =
814 						aex->ae_driveprop[ldrv];
815 		    AMRDB_PRINT((CE_NOTE,
816 			"  drive %d: size: %d state %x properties %x\n",
817 			ldrv,
818 			softs->logic_drive[ldrv].al_size,
819 			softs->logic_drive[ldrv].al_state,
820 			softs->logic_drive[ldrv].al_properties));
821 
822 		    if (softs->logic_drive[ldrv].al_state == AMR_LDRV_OFFLINE)
823 			cmn_err(CE_NOTE, "!instance %d log-drive %d is offline",
824 				instance, ldrv);
825 		    else
826 			softs->amr_nlogdrives++;
827 		}
828 		kmem_free(aex, AMR_ENQ_BUFFER_SIZE);
829 
830 		if ((ap = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE,
831 			AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) == NULL) {
832 			AMRDB_PRINT((CE_NOTE,
833 				"Cannot obtain product data from controller"));
834 			return (EIO);
835 		}
836 
837 		softs->maxdrives = AMR_40LD_MAXDRIVES;
838 		softs->maxchan = ap->ap_nschan;
839 		softs->maxio = ap->ap_maxio;
840 
841 		bcopy(ap->ap_firmware, softs->amr_product_info.pi_firmware_ver,
842 			AMR_FIRMWARE_VER_SIZE);
843 		softs->amr_product_info.
844 			pi_firmware_ver[AMR_FIRMWARE_VER_SIZE] = 0;
845 
846 		bcopy(ap->ap_product, softs->amr_product_info.pi_product_name,
847 			AMR_PRODUCT_INFO_SIZE);
848 		softs->amr_product_info.
849 			pi_product_name[AMR_PRODUCT_INFO_SIZE] = 0;
850 
851 		kmem_free(ap, AMR_ENQ_BUFFER_SIZE);
852 		AMRDB_PRINT((CE_NOTE, "maxio=%d", softs->maxio));
853 	} else {
854 
855 		AMRDB_PRINT((CE_NOTE, "First enquiry failed, \
856 				so try another way"));
857 
858 		/* failed, try the 8LD ENQUIRY commands */
859 		if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
860 			AMR_ENQ_BUFFER_SIZE, AMR_CMD_EXT_ENQUIRY2, 0, 0))
861 			== NULL) {
862 
863 			if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
864 				AMR_ENQ_BUFFER_SIZE, AMR_CMD_ENQUIRY, 0, 0))
865 				== NULL) {
866 				AMRDB_PRINT((CE_NOTE,
867 					"Cannot obtain configuration data"));
868 				return (EIO);
869 			}
870 			ae->ae_signature = 0;
871 		}
872 
873 		/*
874 		 * Fetch current state of logical drives.
875 		 */
876 		for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
877 		    softs->logic_drive[ldrv].al_size =
878 						ae->ae_ldrv.al_size[ldrv];
879 		    softs->logic_drive[ldrv].al_state =
880 						ae->ae_ldrv.al_state[ldrv];
881 		    softs->logic_drive[ldrv].al_properties =
882 						ae->ae_ldrv.al_properties[ldrv];
883 		    AMRDB_PRINT((CE_NOTE,
884 			" ********* drive %d: %d state %x properties %x",
885 			ldrv,
886 			softs->logic_drive[ldrv].al_size,
887 			softs->logic_drive[ldrv].al_state,
888 			softs->logic_drive[ldrv].al_properties));
889 
890 		    if (softs->logic_drive[ldrv].al_state == AMR_LDRV_OFFLINE)
891 			cmn_err(CE_NOTE, "!instance %d log-drive %d is offline",
892 				instance, ldrv);
893 		    else
894 			softs->amr_nlogdrives++;
895 		}
896 
897 		softs->maxdrives = AMR_8LD_MAXDRIVES;
898 		softs->maxchan = ae->ae_adapter.aa_channels;
899 		softs->maxio = ae->ae_adapter.aa_maxio;
900 		kmem_free(ae, AMR_ENQ_BUFFER_SIZE);
901 	}
902 
903 	/*
904 	 * Mark remaining drives as unused.
905 	 */
906 	for (; ldrv < AMR_MAXLD; ldrv++)
907 		softs->logic_drive[ldrv].al_state = AMR_LDRV_OFFLINE;
908 
909 	/*
910 	 * Cap the maximum number of outstanding I/Os.  AMI's driver
911 	 * doesn't trust the controller's reported value, and lockups have
912 	 * been seen when we do.
913 	 */
914 	softs->maxio = MIN(softs->maxio, AMR_LIMITCMD);
915 
916 	return (DDI_SUCCESS);
917 }
918 
919 /*
920  * Run a generic enquiry-style command.
921  */
922 static void *
923 amr_enquiry(struct amr_softs *softs, size_t bufsize, uint8_t cmd,
924 				uint8_t cmdsub, uint8_t cmdqual)
925 {
926 	struct amr_command	ac;
927 	void			*result;
928 
929 	result = NULL;
930 
931 	bzero(&ac, sizeof (struct amr_command));
932 	ac.ac_softs = softs;
933 
934 	/* set command flags */
935 	ac.ac_flags |= AMR_CMD_DATAOUT;
936 
937 	/* build the command proper */
938 	ac.mailbox.mb_command	= cmd;
939 	ac.mailbox.mb_cmdsub	= cmdsub;
940 	ac.mailbox.mb_cmdqual	= cmdqual;
941 
942 	if (amr_enquiry_mapcmd(&ac, bufsize) != DDI_SUCCESS)
943 		return (NULL);
944 
945 	if (amr_poll_command(&ac) || ac.ac_status != 0) {
946 		AMRDB_PRINT((CE_NOTE, "can not poll command, goto out"));
947 		amr_enquiry_unmapcmd(&ac);
948 		return (NULL);
949 	}
950 
951 	/* allocate the response structure */
952 	result = kmem_zalloc(bufsize, KM_SLEEP);
953 
954 	bcopy(ac.ac_data, result, bufsize);
955 
956 	amr_enquiry_unmapcmd(&ac);
957 	return (result);
958 }
959 
960 /*
961  * Flush the controller's internal cache, return status.
962  */
963 static int
964 amr_flush(struct amr_softs *softs)
965 {
966 	struct amr_command	ac;
967 	int			error = 0;
968 
969 	bzero(&ac, sizeof (struct amr_command));
970 	ac.ac_softs = softs;
971 
972 	ac.ac_flags |= AMR_CMD_DATAOUT;
973 
974 	/* build the command proper */
975 	ac.mailbox.mb_command = AMR_CMD_FLUSH;
976 
977 	/* have to poll, as the system may be going down or otherwise damaged */
978 	if (error = amr_poll_command(&ac)) {
979 		AMRDB_PRINT((CE_NOTE, "can not poll this cmd"));
980 		return (error);
981 	}
982 
983 	return (error);
984 }
985 
986 /*
987  * Take a command, submit it to the controller and wait for it to return.
988  * Returns nonzero on error.  Can be safely called with interrupts enabled.
989  */
990 static int
991 amr_poll_command(struct amr_command *ac)
992 {
993 	struct amr_softs	*softs = ac->ac_softs;
994 	volatile uint32_t	done_flag;
995 
996 	AMRDB_PRINT((CE_NOTE, "Amr_Poll bcopy(%p, %p, %d)",
997 			(void *)&ac->mailbox,
998 			(void *)softs->mailbox,
999 			(uint32_t)AMR_MBOX_CMDSIZE));
1000 
1001 	mutex_enter(&softs->cmd_mutex);
1002 
1003 	while (softs->amr_busyslots != 0)
1004 		cv_wait(&softs->cmd_cv, &softs->cmd_mutex);
1005 
1006 	/*
1007 	 * For read/write commands, the scatter/gather table should be
1008 	 * filled, and the last entry in scatter/gather table will be used.
1009 	 */
1010 	if ((ac->mailbox.mb_command == AMR_CMD_LREAD) ||
1011 	    (ac->mailbox.mb_command == AMR_CMD_LWRITE)) {
1012 		bcopy(ac->sgtable,
1013 			softs->sg_items[softs->sg_max_count - 1].sg_table,
1014 			sizeof (struct amr_sgentry) * AMR_NSEG);
1015 
1016 		(void) ddi_dma_sync(
1017 			softs->sg_items[softs->sg_max_count - 1].sg_handle,
1018 			0, 0, DDI_DMA_SYNC_FORDEV);
1019 
1020 		ac->mailbox.mb_physaddr =
1021 			softs->sg_items[softs->sg_max_count - 1].sg_phyaddr;
1022 	}
1023 
1024 	bcopy(&ac->mailbox, (void *)softs->mailbox, AMR_MBOX_CMDSIZE);
1025 
1026 	/* sync the dma memory */
1027 	(void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
1028 
1029 	/* clear the poll/ack fields in the mailbox */
1030 	softs->mailbox->mb_ident = AMR_POLL_COMMAND_ID;
1031 	softs->mailbox->mb_nstatus = AMR_POLL_DEFAULT_NSTATUS;
1032 	softs->mailbox->mb_status = AMR_POLL_DEFAULT_STATUS;
1033 	softs->mailbox->mb_poll = 0;
1034 	softs->mailbox->mb_ack = 0;
1035 	softs->mailbox->mb_busy = 1;
1036 
1037 	AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
1038 
1039 	/* sync the dma memory */
1040 	(void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1041 
1042 	AMR_DELAY((softs->mailbox->mb_nstatus != AMR_POLL_DEFAULT_NSTATUS),
1043 			1000, done_flag);
1044 	if (!done_flag) {
1045 		mutex_exit(&softs->cmd_mutex);
1046 		return (1);
1047 	}
1048 
1049 	ac->ac_status = softs->mailbox->mb_status;
1050 
1051 	AMR_DELAY((softs->mailbox->mb_poll == AMR_POLL_ACK), 1000, done_flag);
1052 	if (!done_flag) {
1053 		mutex_exit(&softs->cmd_mutex);
1054 		return (1);
1055 	}
1056 
1057 	softs->mailbox->mb_poll = 0;
1058 	softs->mailbox->mb_ack = AMR_POLL_ACK;
1059 
1060 	/* acknowledge that we have the commands */
1061 	AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1062 
1063 	AMR_DELAY(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK), 1000, done_flag);
1064 	if (!done_flag) {
1065 		mutex_exit(&softs->cmd_mutex);
1066 		return (1);
1067 	}
1068 
1069 	mutex_exit(&softs->cmd_mutex);
1070 	return (ac->ac_status != AMR_STATUS_SUCCESS);
1071 }
1072 
1073 /*
1074  * setup the scatter/gather table
1075  */
1076 static int
1077 amr_setup_sg(struct amr_softs *softs)
1078 {
1079 	uint32_t		i;
1080 	size_t			len;
1081 	ddi_dma_cookie_t	cookie;
1082 	uint_t			cookien;
1083 
1084 	softs->sg_max_count = 0;
1085 
1086 	for (i = 0; i < AMR_MAXCMD; i++) {
1087 
1088 		/* reset the cookien */
1089 		cookien = 0;
1090 
1091 		(softs->sg_items[i]).sg_handle = NULL;
1092 		if (ddi_dma_alloc_handle(
1093 			softs->dev_info_p,
1094 			&addr_dma_attr,
1095 			DDI_DMA_SLEEP,
1096 			NULL,
1097 			&((softs->sg_items[i]).sg_handle)) != DDI_SUCCESS) {
1098 
1099 			AMRDB_PRINT((CE_WARN,
1100 			"Cannot alloc dma handle for s/g table"));
1101 			goto error_out;
1102 		}
1103 
1104 		if (ddi_dma_mem_alloc((softs->sg_items[i]).sg_handle,
1105 			sizeof (struct amr_sgentry) * AMR_NSEG,
1106 			&accattr,
1107 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1108 			DDI_DMA_SLEEP, NULL,
1109 			(caddr_t *)(&(softs->sg_items[i]).sg_table),
1110 			&len,
1111 			&(softs->sg_items[i]).sg_acc_handle)
1112 			!= DDI_SUCCESS) {
1113 
1114 			AMRDB_PRINT((CE_WARN,
1115 			"Cannot allocate DMA memory"));
1116 			goto error_out;
1117 		}
1118 
1119 		if (ddi_dma_addr_bind_handle(
1120 			(softs->sg_items[i]).sg_handle,
1121 			NULL,
1122 			(caddr_t)((softs->sg_items[i]).sg_table),
1123 			sizeof (struct amr_sgentry) * AMR_NSEG,
1124 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1125 			DDI_DMA_SLEEP,
1126 			NULL,
1127 			&cookie,
1128 			&cookien) != DDI_DMA_MAPPED) {
1129 
1130 			AMRDB_PRINT((CE_WARN,
1131 			"Cannot bind communication area for s/g table"));
1132 			goto error_out;
1133 		}
1134 
1135 		if (cookien != 1)
1136 			goto error_out;
1137 
1138 		softs->sg_items[i].sg_phyaddr = cookie.dmac_address;
1139 		softs->sg_max_count++;
1140 	}
1141 
1142 	return (DDI_SUCCESS);
1143 
1144 error_out:
1145 	/*
1146 	 * Couldn't allocate/initialize all of the sg table entries.
1147 	 * Clean up the partially-initialized entry before returning.
1148 	 */
1149 	if (cookien) {
1150 		(void) ddi_dma_unbind_handle((softs->sg_items[i]).sg_handle);
1151 	}
1152 	if ((softs->sg_items[i]).sg_acc_handle) {
1153 		(void) ddi_dma_mem_free(&((softs->sg_items[i]).sg_acc_handle));
1154 		(softs->sg_items[i]).sg_acc_handle = NULL;
1155 	}
1156 	if ((softs->sg_items[i]).sg_handle) {
1157 		(void) ddi_dma_free_handle(&((softs->sg_items[i]).sg_handle));
1158 		(softs->sg_items[i]).sg_handle = NULL;
1159 	}
1160 
1161 	/*
1162 	 * At least two sg table entries are needed. One is for regular data
1163 	 * I/O commands, the other is for poll I/O commands.
1164 	 */
1165 	return (softs->sg_max_count > 1 ? DDI_SUCCESS : DDI_FAILURE);
1166 }
1167 
1168 /*
1169  * Map/unmap (ac)'s data in the controller's addressable space as required.
1170  *
1171  * These functions may be safely called multiple times on a given command.
1172  */
1173 static void
1174 amr_setup_dmamap(struct amr_command *ac, ddi_dma_cookie_t *buffer_dma_cookiep,
1175 		int nsegments)
1176 {
1177 	struct amr_sgentry	*sg;
1178 	uint32_t		i, size;
1179 
1180 	sg = ac->sgtable;
1181 
1182 	size = 0;
1183 
1184 	ac->mailbox.mb_nsgelem = (uint8_t)nsegments;
1185 	for (i = 0; i < nsegments; i++, sg++) {
1186 		sg->sg_addr = buffer_dma_cookiep->dmac_address;
1187 		sg->sg_count = buffer_dma_cookiep->dmac_size;
1188 		size += sg->sg_count;
1189 
1190 		/*
1191 		 * There is no next cookie if the end of the current
1192 		 * window is reached. Otherwise, the next cookie
1193 		 * would be found.
1194 		 */
1195 		if ((ac->current_cookie + i + 1) != ac->num_of_cookie)
1196 			ddi_dma_nextcookie(ac->buffer_dma_handle,
1197 				buffer_dma_cookiep);
1198 	}
1199 
1200 	ac->transfer_size = size;
1201 	ac->data_transfered += size;
1202 }
1203 
1204 
1205 /*
1206  * map the amr command for enquiry, allocate the DMA resource
1207  */
1208 static int
1209 amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size)
1210 {
1211 	struct amr_softs	*softs = ac->ac_softs;
1212 	size_t			len;
1213 	uint_t			dma_flags;
1214 
1215 	AMRDB_PRINT((CE_NOTE, "Amr_enquiry_mapcmd called, ac=%p, flags=%x",
1216 			(void *)ac, ac->ac_flags));
1217 
1218 	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1219 		dma_flags = DDI_DMA_READ;
1220 	} else {
1221 		dma_flags = DDI_DMA_WRITE;
1222 	}
1223 
1224 	dma_flags |= DDI_DMA_CONSISTENT;
1225 
1226 	/* process the DMA by address bind mode */
1227 	if (ddi_dma_alloc_handle(softs->dev_info_p,
1228 		&addr_dma_attr, DDI_DMA_SLEEP, NULL,
1229 		&ac->buffer_dma_handle) !=
1230 		DDI_SUCCESS) {
1231 
1232 		AMRDB_PRINT((CE_WARN,
1233 		"Cannot allocate addr DMA tag"));
1234 		goto error_out;
1235 	}
1236 
1237 	if (ddi_dma_mem_alloc(ac->buffer_dma_handle,
1238 		data_size,
1239 		&accattr,
1240 		dma_flags,
1241 		DDI_DMA_SLEEP,
1242 		NULL,
1243 		(caddr_t *)&ac->ac_data,
1244 		&len,
1245 		&ac->buffer_acc_handle) !=
1246 		DDI_SUCCESS) {
1247 
1248 		AMRDB_PRINT((CE_WARN,
1249 		"Cannot allocate DMA memory"));
1250 		goto error_out;
1251 	}
1252 
1253 	if ((ddi_dma_addr_bind_handle(
1254 		ac->buffer_dma_handle,
1255 		NULL, ac->ac_data, data_size, dma_flags,
1256 		DDI_DMA_SLEEP, NULL, &ac->buffer_dma_cookie,
1257 		&ac->num_of_cookie)) != DDI_DMA_MAPPED) {
1258 
1259 		AMRDB_PRINT((CE_WARN,
1260 			"Cannot bind addr for dma"));
1261 		goto error_out;
1262 	}
1263 
1264 	ac->ac_dataphys = (&ac->buffer_dma_cookie)->dmac_address;
1265 
1266 	((struct amr_mailbox *)&(ac->mailbox))->mb_param = 0;
1267 	ac->mailbox.mb_nsgelem = 0;
1268 	ac->mailbox.mb_physaddr = ac->ac_dataphys;
1269 
1270 	ac->ac_flags |= AMR_CMD_MAPPED;
1271 
1272 	return (DDI_SUCCESS);
1273 
1274 error_out:
1275 	if (ac->num_of_cookie)
1276 		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1277 	if (ac->buffer_acc_handle) {
1278 		ddi_dma_mem_free(&ac->buffer_acc_handle);
1279 		ac->buffer_acc_handle = NULL;
1280 	}
1281 	if (ac->buffer_dma_handle) {
1282 		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1283 		ac->buffer_dma_handle = NULL;
1284 	}
1285 
1286 	return (DDI_FAILURE);
1287 }
1288 
1289 /*
1290  * unmap the amr command for enquiry, free the DMA resource
1291  */
1292 static void
1293 amr_enquiry_unmapcmd(struct amr_command *ac)
1294 {
1295 	AMRDB_PRINT((CE_NOTE, "Amr_enquiry_unmapcmd called, ac=%p",
1296 			(void *)ac));
1297 
1298 	/* if the command involved data at all and was mapped */
1299 	if ((ac->ac_flags & AMR_CMD_MAPPED) && ac->ac_data) {
1300 		if (ac->buffer_dma_handle)
1301 			(void) ddi_dma_unbind_handle(
1302 				ac->buffer_dma_handle);
1303 		if (ac->buffer_acc_handle) {
1304 			ddi_dma_mem_free(&ac->buffer_acc_handle);
1305 			ac->buffer_acc_handle = NULL;
1306 		}
1307 		if (ac->buffer_dma_handle) {
1308 			(void) ddi_dma_free_handle(
1309 				&ac->buffer_dma_handle);
1310 			ac->buffer_dma_handle = NULL;
1311 		}
1312 	}
1313 
1314 	ac->ac_flags &= ~AMR_CMD_MAPPED;
1315 }
1316 
1317 /*
1318  * map the amr command, allocate the DMA resource
1319  */
1320 static int
1321 amr_mapcmd(struct amr_command *ac)
1322 {
1323 	uint_t	dma_flags;
1324 	off_t	off;
1325 	size_t	len;
1326 	int	error;
1327 
1328 	AMRDB_PRINT((CE_NOTE, "Amr_mapcmd called, ac=%p, flags=%x",
1329 			(void *)ac, ac->ac_flags));
1330 
1331 	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1332 		dma_flags = DDI_DMA_READ;
1333 	} else {
1334 		dma_flags = DDI_DMA_WRITE;
1335 	}
1336 
1337 	if (ac->ac_flags & AMR_CMD_PKT_CONSISTENT) {
1338 		dma_flags |= DDI_DMA_CONSISTENT;
1339 	}
1340 	if (ac->ac_flags & AMR_CMD_PKT_DMA_PARTIAL) {
1341 		dma_flags |= DDI_DMA_PARTIAL;
1342 	}
1343 
1344 	if ((!(ac->ac_flags & AMR_CMD_MAPPED)) && (ac->ac_buf == NULL)) {
1345 		ac->ac_flags |= AMR_CMD_MAPPED;
1346 		return (DDI_SUCCESS);
1347 	}
1348 
1349 	/* if the command involves data at all, and hasn't been mapped */
1350 	if (!(ac->ac_flags & AMR_CMD_MAPPED)) {
1351 		/* process the DMA by buffer bind mode */
1352 		error = ddi_dma_buf_bind_handle(ac->buffer_dma_handle,
1353 			ac->ac_buf,
1354 			dma_flags,
1355 			DDI_DMA_SLEEP,
1356 			NULL,
1357 			&ac->buffer_dma_cookie,
1358 			&ac->num_of_cookie);
1359 		switch (error) {
1360 		case DDI_DMA_PARTIAL_MAP:
1361 			if (ddi_dma_numwin(ac->buffer_dma_handle,
1362 				&ac->num_of_win) == DDI_FAILURE) {
1363 
1364 				AMRDB_PRINT((CE_WARN,
1365 					"Cannot get dma num win"));
1366 				(void) ddi_dma_unbind_handle(
1367 					ac->buffer_dma_handle);
1368 				(void) ddi_dma_free_handle(
1369 					&ac->buffer_dma_handle);
1370 				ac->buffer_dma_handle = NULL;
1371 				return (DDI_FAILURE);
1372 			}
1373 			ac->current_win = 0;
1374 			break;
1375 
1376 		case DDI_DMA_MAPPED:
1377 			ac->num_of_win = 1;
1378 			ac->current_win = 0;
1379 			break;
1380 
1381 		default:
1382 			AMRDB_PRINT((CE_WARN,
1383 				"Cannot bind buf for dma"));
1384 
1385 			(void) ddi_dma_free_handle(
1386 				&ac->buffer_dma_handle);
1387 			ac->buffer_dma_handle = NULL;
1388 			return (DDI_FAILURE);
1389 		}
1390 
1391 		ac->current_cookie = 0;
1392 
1393 		ac->ac_flags |= AMR_CMD_MAPPED;
1394 	} else if (ac->current_cookie == AMR_LAST_COOKIE_TAG) {
1395 		/* get the next window */
1396 		ac->current_win++;
1397 		(void) ddi_dma_getwin(ac->buffer_dma_handle,
1398 			ac->current_win, &off, &len,
1399 			&ac->buffer_dma_cookie,
1400 			&ac->num_of_cookie);
1401 		ac->current_cookie = 0;
1402 	}
1403 
1404 	if ((ac->num_of_cookie - ac->current_cookie) > AMR_NSEG) {
1405 		amr_setup_dmamap(ac, &ac->buffer_dma_cookie, AMR_NSEG);
1406 		ac->current_cookie += AMR_NSEG;
1407 	} else {
1408 		amr_setup_dmamap(ac, &ac->buffer_dma_cookie,
1409 		ac->num_of_cookie - ac->current_cookie);
1410 		ac->current_cookie = AMR_LAST_COOKIE_TAG;
1411 	}
1412 
1413 	return (DDI_SUCCESS);
1414 }
1415 
1416 /*
1417  * unmap the amr command, free the DMA resource
1418  */
1419 static void
1420 amr_unmapcmd(struct amr_command *ac)
1421 {
1422 	AMRDB_PRINT((CE_NOTE, "Amr_unmapcmd called, ac=%p",
1423 			(void *)ac));
1424 
1425 	/* if the command involved data at all and was mapped */
1426 	if ((ac->ac_flags & AMR_CMD_MAPPED) &&
1427 		ac->ac_buf && ac->buffer_dma_handle)
1428 		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1429 
1430 	ac->ac_flags &= ~AMR_CMD_MAPPED;
1431 }
1432 
1433 static int
1434 amr_setup_tran(dev_info_t  *dip, struct amr_softs *softp)
1435 {
1436 	softp->hba_tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
1437 
1438 	/*
1439 	 * hba_private always points to the amr_softs struct
1440 	 */
1441 	softp->hba_tran->tran_hba_private	= softp;
1442 	softp->hba_tran->tran_tgt_init		= amr_tran_tgt_init;
1443 	softp->hba_tran->tran_tgt_probe		= scsi_hba_probe;
1444 	softp->hba_tran->tran_start		= amr_tran_start;
1445 	softp->hba_tran->tran_reset		= amr_tran_reset;
1446 	softp->hba_tran->tran_getcap		= amr_tran_getcap;
1447 	softp->hba_tran->tran_setcap		= amr_tran_setcap;
1448 	softp->hba_tran->tran_init_pkt		= amr_tran_init_pkt;
1449 	softp->hba_tran->tran_destroy_pkt	= amr_tran_destroy_pkt;
1450 	softp->hba_tran->tran_dmafree		= amr_tran_dmafree;
1451 	softp->hba_tran->tran_sync_pkt		= amr_tran_sync_pkt;
1452 	softp->hba_tran->tran_abort		= NULL;
1453 	softp->hba_tran->tran_tgt_free		= NULL;
1454 	softp->hba_tran->tran_quiesce		= NULL;
1455 	softp->hba_tran->tran_unquiesce		= NULL;
1456 	softp->hba_tran->tran_sd		= NULL;
1457 
1458 	if (scsi_hba_attach_setup(dip, &buffer_dma_attr, softp->hba_tran,
1459 		SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
1460 		scsi_hba_tran_free(softp->hba_tran);
1461 		softp->hba_tran = NULL;
1462 		return (DDI_FAILURE);
1463 	} else {
1464 		return (DDI_SUCCESS);
1465 	}
1466 }
1467 
1468 /*ARGSUSED*/
1469 static int
1470 amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1471 	scsi_hba_tran_t *tran, struct scsi_device *sd)
1472 {
1473 	struct amr_softs	*softs;
1474 	ushort_t		target = sd->sd_address.a_target;
1475 	uchar_t			lun = sd->sd_address.a_lun;
1476 
1477 	softs = (struct amr_softs *)
1478 		(sd->sd_address.a_hba_tran->tran_hba_private);
1479 
1480 	if ((lun == 0) && (target < AMR_MAXLD))
1481 		if (softs->logic_drive[target].al_state != AMR_LDRV_OFFLINE)
1482 			return (DDI_SUCCESS);
1483 
1484 	return (DDI_FAILURE);
1485 }
1486 
1487 static int
1488 amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1489 {
1490 	struct amr_softs	*softs;
1491 	struct buf		*bp = NULL;
1492 	union scsi_cdb		*cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1493 	int			ret;
1494 	uint32_t		capacity;
1495 	struct amr_command	*ac;
1496 
1497 	AMRDB_PRINT((CE_NOTE, "amr_tran_start, cmd=%X,target=%d,lun=%d",
1498 		cdbp->scc_cmd, ap->a_target, ap->a_lun));
1499 
1500 	softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1501 	if ((ap->a_lun != 0) || (ap->a_target >= AMR_MAXLD) ||
1502 		(softs->logic_drive[ap->a_target].al_state ==
1503 			AMR_LDRV_OFFLINE)) {
1504 		cmn_err(CE_WARN, "target or lun is not correct!");
1505 		ret = TRAN_BADPKT;
1506 		return (ret);
1507 	}
1508 
1509 	ac = (struct amr_command *)pkt->pkt_ha_private;
1510 	bp = ac->ac_buf;
1511 
1512 	AMRDB_PRINT((CE_NOTE, "scsi cmd accepted, cmd=%X", cdbp->scc_cmd));
1513 
1514 	switch (cdbp->scc_cmd) {
1515 	case SCMD_READ:		/* read		*/
1516 	case SCMD_READ_G1:	/* read	g1	*/
1517 	case SCMD_READ_BUFFER:	/* read buffer	*/
1518 	case SCMD_WRITE:	/* write	*/
1519 	case SCMD_WRITE_G1:	/* write g1	*/
1520 	case SCMD_WRITE_BUFFER:	/* write buffer	*/
1521 		amr_rw_command(softs, pkt, ap->a_target);
1522 
1523 		if (pkt->pkt_flags & FLAG_NOINTR) {
1524 			(void) amr_poll_command(ac);
1525 			pkt->pkt_state |= (STATE_GOT_BUS
1526 					| STATE_GOT_TARGET
1527 					| STATE_SENT_CMD
1528 					| STATE_XFERRED_DATA);
1529 			*pkt->pkt_scbp = 0;
1530 			pkt->pkt_statistics |= STAT_SYNC;
1531 			pkt->pkt_reason = CMD_CMPLT;
1532 		} else {
1533 			mutex_enter(&softs->queue_mutex);
1534 			if (softs->waiting_q_head == NULL) {
1535 				ac->ac_prev = NULL;
1536 				ac->ac_next = NULL;
1537 				softs->waiting_q_head = ac;
1538 				softs->waiting_q_tail = ac;
1539 			} else {
1540 				ac->ac_next = NULL;
1541 				ac->ac_prev = softs->waiting_q_tail;
1542 				softs->waiting_q_tail->ac_next = ac;
1543 				softs->waiting_q_tail = ac;
1544 			}
1545 			mutex_exit(&softs->queue_mutex);
1546 			amr_start_waiting_queue((void *)softs);
1547 		}
1548 		ret = TRAN_ACCEPT;
1549 		break;
1550 
1551 	case SCMD_INQUIRY: /* inquiry */
1552 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1553 			struct scsi_inquiry inqp;
1554 			uint8_t *sinq_p = (uint8_t *)&inqp;
1555 
1556 			bzero(&inqp, sizeof (struct scsi_inquiry));
1557 
1558 			if (((char *)cdbp)[1] || ((char *)cdbp)[2]) {
1559 				/*
1560 				 * The EVDP and pagecode is
1561 				 * not supported
1562 				 */
1563 				sinq_p[1] = 0xFF;
1564 				sinq_p[2] = 0x0;
1565 			} else {
1566 				inqp.inq_len = AMR_INQ_ADDITIONAL_LEN;
1567 				inqp.inq_ansi = AMR_INQ_ANSI_VER;
1568 				inqp.inq_rdf = AMR_INQ_RESP_DATA_FORMAT;
1569 				bcopy("MegaRaid", inqp.inq_vid,
1570 					sizeof (inqp.inq_vid));
1571 				bcopy(softs->amr_product_info.pi_product_name,
1572 					inqp.inq_pid,
1573 					AMR_PRODUCT_INFO_SIZE);
1574 				bcopy(softs->amr_product_info.pi_firmware_ver,
1575 					inqp.inq_revision,
1576 					AMR_FIRMWARE_VER_SIZE);
1577 			}
1578 
1579 			amr_unmapcmd(ac);
1580 
1581 			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1582 				bp_mapin(bp);
1583 			bcopy(&inqp, bp->b_un.b_addr,
1584 				sizeof (struct scsi_inquiry));
1585 
1586 			pkt->pkt_state |= STATE_XFERRED_DATA;
1587 		}
1588 		pkt->pkt_reason = CMD_CMPLT;
1589 		pkt->pkt_state |= (STATE_GOT_BUS
1590 				| STATE_GOT_TARGET
1591 				| STATE_SENT_CMD);
1592 		*pkt->pkt_scbp = 0;
1593 		ret = TRAN_ACCEPT;
1594 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1595 			(*pkt->pkt_comp)(pkt);
1596 		break;
1597 
1598 	case SCMD_READ_CAPACITY: /* read capacity */
1599 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1600 			struct scsi_capacity cp;
1601 
1602 			capacity = softs->logic_drive[ap->a_target].al_size - 1;
1603 			cp.capacity = BE_32(capacity);
1604 			cp.lbasize = BE_32(512);
1605 
1606 			amr_unmapcmd(ac);
1607 
1608 			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1609 				bp_mapin(bp);
1610 			bcopy(&cp, bp->b_un.b_addr, 8);
1611 		}
1612 		pkt->pkt_reason = CMD_CMPLT;
1613 		pkt->pkt_state |= (STATE_GOT_BUS
1614 				| STATE_GOT_TARGET
1615 				| STATE_SENT_CMD
1616 				| STATE_XFERRED_DATA);
1617 		*pkt->pkt_scbp = 0;
1618 		ret = TRAN_ACCEPT;
1619 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1620 			(*pkt->pkt_comp)(pkt);
1621 		break;
1622 
1623 	case SCMD_MODE_SENSE:		/* mode sense */
1624 	case SCMD_MODE_SENSE_G1:	/* mode sense g1 */
1625 		amr_unmapcmd(ac);
1626 
1627 		capacity = softs->logic_drive[ap->a_target].al_size - 1;
1628 		amr_mode_sense(cdbp, bp, capacity);
1629 
1630 		pkt->pkt_reason = CMD_CMPLT;
1631 		pkt->pkt_state |= (STATE_GOT_BUS
1632 				| STATE_GOT_TARGET
1633 				| STATE_SENT_CMD
1634 				| STATE_XFERRED_DATA);
1635 		*pkt->pkt_scbp = 0;
1636 		ret = TRAN_ACCEPT;
1637 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1638 			(*pkt->pkt_comp)(pkt);
1639 		break;
1640 
1641 	case SCMD_TEST_UNIT_READY:	/* test unit ready */
1642 	case SCMD_REQUEST_SENSE:	/* request sense */
1643 	case SCMD_FORMAT:		/* format */
1644 	case SCMD_START_STOP:		/* start stop */
1645 	case SCMD_SYNCHRONIZE_CACHE:	/* synchronize cache */
1646 		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1647 			amr_unmapcmd(ac);
1648 
1649 			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1650 				bp_mapin(bp);
1651 			bzero(bp->b_un.b_addr, bp->b_bcount);
1652 
1653 			pkt->pkt_state |= STATE_XFERRED_DATA;
1654 		}
1655 		pkt->pkt_reason = CMD_CMPLT;
1656 		pkt->pkt_state |= (STATE_GOT_BUS
1657 				| STATE_GOT_TARGET
1658 				| STATE_SENT_CMD);
1659 		ret = TRAN_ACCEPT;
1660 		*pkt->pkt_scbp = 0;
1661 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1662 			(*pkt->pkt_comp)(pkt);
1663 		break;
1664 
1665 	default: /* any other commands */
1666 		amr_unmapcmd(ac);
1667 		pkt->pkt_reason = CMD_INCOMPLETE;
1668 		pkt->pkt_state = (STATE_GOT_BUS
1669 				| STATE_GOT_TARGET
1670 				| STATE_SENT_CMD
1671 				| STATE_GOT_STATUS
1672 				| STATE_ARQ_DONE);
1673 		ret = TRAN_ACCEPT;
1674 		*pkt->pkt_scbp = 0;
1675 		amr_set_arq_data(pkt, KEY_ILLEGAL_REQUEST);
1676 		if (!(pkt->pkt_flags & FLAG_NOINTR))
1677 			(*pkt->pkt_comp)(pkt);
1678 		break;
1679 	}
1680 
1681 	return (ret);
1682 }
1683 
1684 /*
1685  * tran_reset() will reset the bus/target/adapter to support the fault recovery
1686  * functionality according to the "level" in interface. However, we got the
1687  * confirmation from LSI that these HBA cards does not support any commands to
1688  * reset bus/target/adapter/channel.
1689  *
1690  * If the tran_reset() return a FAILURE to the sd, the system will not
1691  * continue to dump the core. But core dump is an crucial method to analyze
1692  * problems in panic. Now we adopt a work around solution, that is to return
1693  * a fake SUCCESS to sd during panic, which will force the system continue
1694  * to dump core though the core may have problems in some situtation because
1695  * some on-the-fly commands will continue DMAing data to the memory.
1696  * In addition, the work around core dump method may not be performed
1697  * successfully if the panic is caused by the HBA itself. So the work around
1698  * solution is not a good example for the implementation of tran_reset(),
1699  * the most reasonable approach should send a reset command to the adapter.
1700  */
1701 /*ARGSUSED*/
1702 static int
1703 amr_tran_reset(struct scsi_address *ap, int level)
1704 {
1705 	struct amr_softs	*softs;
1706 	volatile uint32_t	done_flag;
1707 
1708 	if (ddi_in_panic()) {
1709 		softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1710 
1711 		/* Acknowledge the card if there are any significant commands */
1712 		while (softs->amr_busyslots > 0) {
1713 			AMR_DELAY((softs->mailbox->mb_busy == 0),
1714 					AMR_RETRYCOUNT, done_flag);
1715 			if (!done_flag) {
1716 				/*
1717 				 * command not completed, indicate the
1718 				 * problem and continue get ac
1719 				 */
1720 				cmn_err(CE_WARN,
1721 					"AMR command is not completed");
1722 				return (0);
1723 			}
1724 
1725 			AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1726 
1727 			/* wait for the acknowledge from hardware */
1728 			AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
1729 					AMR_RETRYCOUNT, done_flag);
1730 			if (!done_flag) {
1731 				/*
1732 				 * command is not completed, return from the
1733 				 * current interrupt and wait for the next one
1734 				 */
1735 				cmn_err(CE_WARN, "No answer from the hardware");
1736 
1737 				mutex_exit(&softs->cmd_mutex);
1738 				return (0);
1739 			}
1740 
1741 			softs->amr_busyslots -= softs->mailbox->mb_nstatus;
1742 		}
1743 
1744 		/* flush the controllor */
1745 		(void) amr_flush(softs);
1746 
1747 		/*
1748 		 * If the system is in panic, the tran_reset() will return a
1749 		 * fake SUCCESS to sd, then the system would continue dump the
1750 		 * core by poll commands. This is a work around for dumping
1751 		 * core in panic.
1752 		 *
1753 		 * Note: Some on-the-fly command will continue DMAing data to
1754 		 *	 the memory when the core is dumping, which may cause
1755 		 *	 some flaws in the dumped core file, so a cmn_err()
1756 		 *	 will be printed out to warn users. However, for most
1757 		 *	 cases, the core file will be fine.
1758 		 */
1759 		cmn_err(CE_WARN, "This system contains a SCSI HBA card/driver "
1760 				"that doesn't support software reset. This "
1761 				"means that memory being used by the HBA for "
1762 				"DMA based reads could have been updated after "
1763 				"we panic'd.");
1764 		return (1);
1765 	} else {
1766 		/* return failure to sd */
1767 		return (0);
1768 	}
1769 }
1770 
1771 /*ARGSUSED*/
1772 static int
1773 amr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1774 {
1775 	struct amr_softs	*softs;
1776 
1777 	/*
1778 	 * We don't allow inquiring about capabilities for other targets
1779 	 */
1780 	if (cap == NULL || whom == 0)
1781 		return (-1);
1782 
1783 	softs = ((struct amr_softs *)(ap->a_hba_tran)->tran_hba_private);
1784 
1785 	switch (scsi_hba_lookup_capstr(cap)) {
1786 	case SCSI_CAP_ARQ:
1787 		return (1);
1788 	case SCSI_CAP_GEOMETRY:
1789 		return ((AMR_DEFAULT_HEADS << 16) | AMR_DEFAULT_CYLINDERS);
1790 	case SCSI_CAP_SECTOR_SIZE:
1791 		return (AMR_DEFAULT_SECTORS);
1792 	case SCSI_CAP_TOTAL_SECTORS:
1793 		/* number of sectors */
1794 		return (softs->logic_drive[ap->a_target].al_size);
1795 	default:
1796 		return (-1);
1797 	}
1798 }
1799 
1800 /*ARGSUSED*/
1801 static int
1802 amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1803 		int whom)
1804 {
1805 	/*
1806 	 * We don't allow setting capabilities for other targets
1807 	 */
1808 	if (cap == NULL || whom == 0) {
1809 		AMRDB_PRINT((CE_NOTE,
1810 			"Set Cap not supported, string = %s, whom=%d",
1811 			cap, whom));
1812 		return (-1);
1813 	}
1814 
1815 	switch (scsi_hba_lookup_capstr(cap)) {
1816 	case SCSI_CAP_ARQ:
1817 		return (1);
1818 	case SCSI_CAP_TOTAL_SECTORS:
1819 		return (1);
1820 	case SCSI_CAP_SECTOR_SIZE:
1821 		return (1);
1822 	default:
1823 		return (0);
1824 	}
1825 }
1826 
1827 static struct scsi_pkt *
1828 amr_tran_init_pkt(struct scsi_address *ap,
1829     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1830     int tgtlen, int flags, int (*callback)(), caddr_t arg)
1831 {
1832 	struct amr_softs	*softs;
1833 	struct amr_command	*ac;
1834 	uint32_t		slen;
1835 
1836 	softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1837 
1838 	if ((ap->a_lun != 0)||(ap->a_target >= AMR_MAXLD)||
1839 		(softs->logic_drive[ap->a_target].al_state ==
1840 			AMR_LDRV_OFFLINE)) {
1841 		return (NULL);
1842 	}
1843 
1844 	if (pkt == NULL) {
1845 		/* force auto request sense */
1846 		slen = MAX(statuslen, sizeof (struct scsi_arq_status));
1847 
1848 		pkt = scsi_hba_pkt_alloc(softs->dev_info_p, ap, cmdlen,
1849 			slen, tgtlen, sizeof (struct amr_command),
1850 			callback, arg);
1851 		if (pkt == NULL) {
1852 			AMRDB_PRINT((CE_WARN, "scsi_hba_pkt_alloc failed"));
1853 			return (NULL);
1854 		}
1855 		pkt->pkt_address	= *ap;
1856 		pkt->pkt_comp		= (void (*)())NULL;
1857 		pkt->pkt_time		= 0;
1858 		pkt->pkt_resid		= 0;
1859 		pkt->pkt_statistics	= 0;
1860 		pkt->pkt_reason		= 0;
1861 
1862 		ac = (struct amr_command *)pkt->pkt_ha_private;
1863 		ac->ac_buf = bp;
1864 		ac->cmdlen = cmdlen;
1865 		ac->ac_softs = softs;
1866 		ac->pkt = pkt;
1867 		ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
1868 		ac->ac_flags &= ~AMR_CMD_BUSY;
1869 
1870 		if ((bp == NULL) || (bp->b_bcount == 0)) {
1871 			return (pkt);
1872 		}
1873 
1874 		if (ddi_dma_alloc_handle(softs->dev_info_p, &buffer_dma_attr,
1875 			DDI_DMA_SLEEP, NULL,
1876 			&ac->buffer_dma_handle) != DDI_SUCCESS) {
1877 
1878 			AMRDB_PRINT((CE_WARN,
1879 				"Cannot allocate buffer DMA tag"));
1880 			scsi_hba_pkt_free(ap, pkt);
1881 			return (NULL);
1882 
1883 		}
1884 
1885 	} else {
1886 		if ((bp == NULL) || (bp->b_bcount == 0)) {
1887 			return (pkt);
1888 		}
1889 		ac = (struct amr_command *)pkt->pkt_ha_private;
1890 	}
1891 
1892 	ASSERT(ac != NULL);
1893 
1894 	if (bp->b_flags & B_READ) {
1895 		ac->ac_flags |= AMR_CMD_DATAOUT;
1896 	} else {
1897 		ac->ac_flags |= AMR_CMD_DATAIN;
1898 	}
1899 
1900 	if (flags & PKT_CONSISTENT) {
1901 		ac->ac_flags |= AMR_CMD_PKT_CONSISTENT;
1902 	}
1903 
1904 	if (flags & PKT_DMA_PARTIAL) {
1905 		ac->ac_flags |= AMR_CMD_PKT_DMA_PARTIAL;
1906 	}
1907 
1908 	if (amr_mapcmd(ac) != DDI_SUCCESS) {
1909 		scsi_hba_pkt_free(ap, pkt);
1910 		return (NULL);
1911 	}
1912 
1913 	pkt->pkt_resid = bp->b_bcount - ac->data_transfered;
1914 
1915 	AMRDB_PRINT((CE_NOTE,
1916 		"init pkt, pkt_resid=%d, b_bcount=%d, data_transfered=%d",
1917 		(uint32_t)pkt->pkt_resid, (uint32_t)bp->b_bcount,
1918 		ac->data_transfered));
1919 
1920 	ASSERT(pkt->pkt_resid >= 0);
1921 
1922 	return (pkt);
1923 }
1924 
1925 static void
1926 amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1927 {
1928 	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1929 
1930 	amr_unmapcmd(ac);
1931 
1932 	if (ac->buffer_dma_handle) {
1933 		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1934 		ac->buffer_dma_handle = NULL;
1935 	}
1936 
1937 	scsi_hba_pkt_free(ap, pkt);
1938 	AMRDB_PRINT((CE_NOTE, "Destroy pkt called"));
1939 }
1940 
1941 /*ARGSUSED*/
1942 static void
1943 amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1944 {
1945 	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1946 
1947 	if (ac->buffer_dma_handle) {
1948 		(void) ddi_dma_sync(ac->buffer_dma_handle, 0, 0,
1949 			(ac->ac_flags & AMR_CMD_DATAIN) ?
1950 			DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1951 	}
1952 }
1953 
1954 /*ARGSUSED*/
1955 static void
1956 amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1957 {
1958 	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1959 
1960 	if (ac->ac_flags & AMR_CMD_MAPPED) {
1961 		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1962 		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1963 		ac->buffer_dma_handle = NULL;
1964 		ac->ac_flags &= ~AMR_CMD_MAPPED;
1965 	}
1966 
1967 }
1968 
1969 /*ARGSUSED*/
1970 static void
1971 amr_rw_command(struct amr_softs *softs, struct scsi_pkt *pkt, int target)
1972 {
1973 	struct amr_command	*ac = (struct amr_command *)pkt->pkt_ha_private;
1974 	union scsi_cdb		*cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1975 	uint8_t			cmd;
1976 
1977 	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1978 		cmd = AMR_CMD_LREAD;
1979 	} else {
1980 		cmd = AMR_CMD_LWRITE;
1981 	}
1982 
1983 	ac->mailbox.mb_command = cmd;
1984 	ac->mailbox.mb_blkcount =
1985 		(ac->transfer_size + AMR_BLKSIZE - 1)/AMR_BLKSIZE;
1986 	ac->mailbox.mb_lba = (ac->cmdlen == 10) ?
1987 				GETG1ADDR(cdbp) : GETG0ADDR(cdbp);
1988 	ac->mailbox.mb_drive = (uint8_t)target;
1989 }
1990 
1991 static void
1992 amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp, unsigned int capacity)
1993 {
1994 	uchar_t			pagecode;
1995 	struct mode_format	*page3p;
1996 	struct mode_geometry	*page4p;
1997 	struct mode_header	*headerp;
1998 	uint32_t		ncyl;
1999 
2000 	if (!(bp && bp->b_un.b_addr && bp->b_bcount))
2001 		return;
2002 
2003 	if (bp->b_flags & (B_PHYS | B_PAGEIO))
2004 		bp_mapin(bp);
2005 
2006 	pagecode = cdbp->cdb_un.sg.scsi[0];
2007 	switch (pagecode) {
2008 	case SD_MODE_SENSE_PAGE3_CODE:
2009 		headerp = (struct mode_header *)(bp->b_un.b_addr);
2010 		headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2011 
2012 		page3p = (struct mode_format *)((caddr_t)headerp +
2013 			MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2014 		page3p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE3_CODE);
2015 		page3p->mode_page.length = BE_8(sizeof (struct mode_format));
2016 		page3p->data_bytes_sect = BE_16(AMR_DEFAULT_SECTORS);
2017 		page3p->sect_track = BE_16(AMR_DEFAULT_CYLINDERS);
2018 
2019 		return;
2020 
2021 	case SD_MODE_SENSE_PAGE4_CODE:
2022 		headerp = (struct mode_header *)(bp->b_un.b_addr);
2023 		headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2024 
2025 		page4p = (struct mode_geometry *)((caddr_t)headerp +
2026 			MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2027 		page4p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE4_CODE);
2028 		page4p->mode_page.length = BE_8(sizeof (struct mode_geometry));
2029 		page4p->heads = BE_8(AMR_DEFAULT_HEADS);
2030 		page4p->rpm = BE_16(AMR_DEFAULT_ROTATIONS);
2031 
2032 		ncyl = capacity / (AMR_DEFAULT_HEADS*AMR_DEFAULT_CYLINDERS);
2033 		page4p->cyl_lb = BE_8(ncyl & 0xff);
2034 		page4p->cyl_mb = BE_8((ncyl >> 8) & 0xff);
2035 		page4p->cyl_ub = BE_8((ncyl >> 16) & 0xff);
2036 
2037 		return;
2038 	default:
2039 		bzero(bp->b_un.b_addr, bp->b_bcount);
2040 		return;
2041 	}
2042 }
2043 
2044 static void
2045 amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key)
2046 {
2047 	struct scsi_arq_status *arqstat;
2048 
2049 	arqstat = (struct scsi_arq_status *)(pkt->pkt_scbp);
2050 	arqstat->sts_status.sts_chk = 1; /* CHECK CONDITION */
2051 	arqstat->sts_rqpkt_reason = CMD_CMPLT;
2052 	arqstat->sts_rqpkt_resid = 0;
2053 	arqstat->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2054 				STATE_SENT_CMD | STATE_XFERRED_DATA;
2055 	arqstat->sts_rqpkt_statistics = 0;
2056 	arqstat->sts_sensedata.es_valid = 1;
2057 	arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2058 	arqstat->sts_sensedata.es_key = key;
2059 }
2060 
2061 static void
2062 amr_start_waiting_queue(void *softp)
2063 {
2064 	uint32_t		slot;
2065 	struct amr_command	*ac;
2066 	volatile uint32_t	done_flag;
2067 	struct amr_softs	*softs = (struct amr_softs *)softp;
2068 
2069 	/* only one command allowed at the same time */
2070 	mutex_enter(&softs->queue_mutex);
2071 	mutex_enter(&softs->cmd_mutex);
2072 
2073 	while ((ac = softs->waiting_q_head) != NULL) {
2074 		/*
2075 		 * Find an available slot, the last slot is
2076 		 * occupied by poll I/O command.
2077 		 */
2078 		for (slot = 0; slot < (softs->sg_max_count - 1); slot++) {
2079 			if (softs->busycmd[slot] == NULL) {
2080 				if (AMR_QGET_IDB(softs) & AMR_QIDB_SUBMIT) {
2081 					/*
2082 					 * only one command allowed at the
2083 					 * same time
2084 					 */
2085 					mutex_exit(&softs->cmd_mutex);
2086 					mutex_exit(&softs->queue_mutex);
2087 					return;
2088 				}
2089 
2090 				ac->ac_timestamp = ddi_get_time();
2091 
2092 				if (!(ac->ac_flags & AMR_CMD_GOT_SLOT)) {
2093 
2094 					softs->busycmd[slot] = ac;
2095 					ac->ac_slot = slot;
2096 					softs->amr_busyslots++;
2097 
2098 					bcopy(ac->sgtable,
2099 					softs->sg_items[slot].sg_table,
2100 					sizeof (struct amr_sgentry) * AMR_NSEG);
2101 
2102 					(void) ddi_dma_sync(
2103 					softs->sg_items[slot].sg_handle,
2104 					0, 0, DDI_DMA_SYNC_FORDEV);
2105 
2106 					ac->mailbox.mb_physaddr =
2107 					softs->sg_items[slot].sg_phyaddr;
2108 				}
2109 
2110 				/* take the cmd from the queue */
2111 				softs->waiting_q_head = ac->ac_next;
2112 
2113 				ac->mailbox.mb_ident = ac->ac_slot + 1;
2114 				ac->mailbox.mb_busy = 1;
2115 				ac->ac_next = NULL;
2116 				ac->ac_prev = NULL;
2117 				ac->ac_flags |= AMR_CMD_GOT_SLOT;
2118 
2119 				/* clear the poll/ack fields in the mailbox */
2120 				softs->mailbox->mb_poll = 0;
2121 				softs->mailbox->mb_ack = 0;
2122 
2123 				AMR_DELAY((softs->mailbox->mb_busy == 0),
2124 					AMR_RETRYCOUNT, done_flag);
2125 				if (!done_flag) {
2126 					/*
2127 					 * command not completed, indicate the
2128 					 * problem and continue get ac
2129 					 */
2130 					cmn_err(CE_WARN,
2131 						"AMR command is not completed");
2132 					break;
2133 				}
2134 
2135 				bcopy(&ac->mailbox, (void *)softs->mailbox,
2136 					AMR_MBOX_CMDSIZE);
2137 				ac->ac_flags |= AMR_CMD_BUSY;
2138 
2139 				(void) ddi_dma_sync(softs->mbox_dma_handle,
2140 					0, 0, DDI_DMA_SYNC_FORDEV);
2141 
2142 				AMR_QPUT_IDB(softs,
2143 					softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
2144 
2145 				/*
2146 				 * current ac is submitted
2147 				 * so quit 'for-loop' to get next ac
2148 				 */
2149 				break;
2150 			}
2151 		}
2152 
2153 		/* no slot, finish our task */
2154 		if (slot == softs->maxio)
2155 			break;
2156 	}
2157 
2158 	/* only one command allowed at the same time */
2159 	mutex_exit(&softs->cmd_mutex);
2160 	mutex_exit(&softs->queue_mutex);
2161 }
2162 
2163 static void
2164 amr_done(struct amr_softs *softs)
2165 {
2166 
2167 	uint32_t		i, idx;
2168 	volatile uint32_t	done_flag;
2169 	struct amr_mailbox	*mbox, mbsave;
2170 	struct amr_command	*ac, *head, *tail;
2171 
2172 	head = tail = NULL;
2173 
2174 	AMR_QPUT_ODB(softs, AMR_QODB_READY);
2175 
2176 	/* acknowledge interrupt */
2177 	(void) AMR_QGET_ODB(softs);
2178 
2179 	mutex_enter(&softs->cmd_mutex);
2180 
2181 	if (softs->mailbox->mb_nstatus != 0) {
2182 		(void) ddi_dma_sync(softs->mbox_dma_handle,
2183 			0, 0, DDI_DMA_SYNC_FORCPU);
2184 
2185 		/* save mailbox, which contains a list of completed commands */
2186 		bcopy((void *)(uintptr_t)(volatile void *)softs->mailbox,
2187 				&mbsave, sizeof (mbsave));
2188 
2189 		mbox = &mbsave;
2190 
2191 		AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
2192 
2193 		/* wait for the acknowledge from hardware */
2194 		AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
2195 				AMR_RETRYCOUNT, done_flag);
2196 		if (!done_flag) {
2197 			/*
2198 			 * command is not completed, return from the current
2199 			 * interrupt and wait for the next one
2200 			 */
2201 			cmn_err(CE_WARN, "No answer from the hardware");
2202 
2203 			mutex_exit(&softs->cmd_mutex);
2204 			return;
2205 		}
2206 
2207 		for (i = 0; i < mbox->mb_nstatus; i++) {
2208 			idx = mbox->mb_completed[i] - 1;
2209 			ac = softs->busycmd[idx];
2210 
2211 			if (ac != NULL) {
2212 				/* pull the command from the busy index */
2213 				softs->busycmd[idx] = NULL;
2214 				if (softs->amr_busyslots > 0)
2215 					softs->amr_busyslots--;
2216 				if (softs->amr_busyslots == 0)
2217 					cv_broadcast(&softs->cmd_cv);
2218 
2219 				ac->ac_flags &= ~AMR_CMD_BUSY;
2220 				ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
2221 				ac->ac_status = mbox->mb_status;
2222 
2223 				/* enqueue here */
2224 				if (head) {
2225 					tail->ac_next = ac;
2226 					tail = ac;
2227 					tail->ac_next = NULL;
2228 				} else {
2229 					tail = head = ac;
2230 					ac->ac_next = NULL;
2231 				}
2232 			} else {
2233 				AMRDB_PRINT((CE_WARN,
2234 					"ac in mailbox is NULL!"));
2235 			}
2236 		}
2237 	} else {
2238 		AMRDB_PRINT((CE_WARN, "mailbox is not ready for copy out!"));
2239 	}
2240 
2241 	mutex_exit(&softs->cmd_mutex);
2242 
2243 	if (head != NULL) {
2244 		amr_call_pkt_comp(head);
2245 	}
2246 
2247 	/* dispatch a thread to process the pending I/O if there is any */
2248 	if ((ddi_taskq_dispatch(softs->amr_taskq, amr_start_waiting_queue,
2249 		(void *)softs, DDI_NOSLEEP)) != DDI_SUCCESS) {
2250 		cmn_err(CE_WARN, "No memory available to dispatch taskq");
2251 	}
2252 }
2253 
2254 static void
2255 amr_call_pkt_comp(register struct amr_command *head)
2256 {
2257 	register struct scsi_pkt	*pkt;
2258 	register struct amr_command	*ac, *localhead;
2259 
2260 	localhead = head;
2261 
2262 	while (localhead) {
2263 		ac = localhead;
2264 		localhead = ac->ac_next;
2265 		ac->ac_next = NULL;
2266 
2267 		pkt = ac->pkt;
2268 		*pkt->pkt_scbp = 0;
2269 
2270 		if (ac->ac_status == AMR_STATUS_SUCCESS) {
2271 			pkt->pkt_state |= (STATE_GOT_BUS
2272 					| STATE_GOT_TARGET
2273 					| STATE_SENT_CMD
2274 					| STATE_XFERRED_DATA);
2275 			pkt->pkt_reason = CMD_CMPLT;
2276 		} else {
2277 			pkt->pkt_state |= STATE_GOT_BUS
2278 					| STATE_ARQ_DONE;
2279 			pkt->pkt_reason = CMD_INCOMPLETE;
2280 			amr_set_arq_data(pkt, KEY_HARDWARE_ERROR);
2281 		}
2282 
2283 		if (!(pkt->pkt_flags & FLAG_NOINTR) &&
2284 			pkt->pkt_comp) {
2285 			(*pkt->pkt_comp)(pkt);
2286 		}
2287 	}
2288 }
2289