xref: /illumos-gate/usr/src/uts/intel/io/dktp/controller/ata/atapi.c (revision 6e6545bfaed3bab9ce836ee82d1abd8f2edba89a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
26  */
27 
28 
29 #include <sys/types.h>
30 
31 #include "ata_common.h"
32 #include "atapi.h"
33 
34 /* SCSA entry points */
35 
36 static int atapi_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
37     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
38 static int atapi_tran_tgt_probe(struct scsi_device *sd, int (*callback)(void));
39 static void atapi_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
40     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
41 static int atapi_tran_abort(struct scsi_address *ap, struct scsi_pkt *spktp);
42 static int atapi_tran_reset(struct scsi_address *ap, int level);
43 static int atapi_tran_getcap(struct scsi_address *ap, char *capstr, int whom);
44 static int atapi_tran_setcap(struct scsi_address *ap, char *capstr,
45     int value, int whom);
46 static struct scsi_pkt	*atapi_tran_init_pkt(struct scsi_address *ap,
47     struct scsi_pkt *spktp, struct buf *bp, int cmdlen, int statuslen,
48     int tgtlen, int flags, int (*callback)(caddr_t), caddr_t arg);
49 static void atapi_tran_destroy_pkt(struct scsi_address *ap,
50     struct scsi_pkt *spktp);
51 static void atapi_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *spktp);
52 static void atapi_tran_sync_pkt(struct scsi_address *ap,
53     struct scsi_pkt *spktp);
54 static int atapi_tran_start(struct scsi_address *ap, struct scsi_pkt *spktp);
55 
56 /*
57  * packet callbacks
58  */
59 static void atapi_complete(ata_drv_t *ata_drvp, ata_pkt_t *ata_pktp,
60     int do_callback);
61 static int atapi_id_update(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
62     ata_pkt_t *ata_pktp);
63 
64 /*
65  * Local static data
66  */
67 
68 #if 0
69 static ddi_dma_lim_t atapi_dma_limits = {
70 	0,		/* address low				*/
71 	0xffffffffU,	/* address high				*/
72 	0,		/* counter max				*/
73 	1,		/* burstsize				*/
74 	DMA_UNIT_8,	/* minimum xfer				*/
75 	0,		/* dma speed				*/
76 	(uint_t)DMALIM_VER0,	/* version			*/
77 	0xffffffffU,	/* address register			*/
78 	0xffffffffU,	/* counter register			*/
79 	1,		/* granular				*/
80 	1,		/* scatter/gather list length		*/
81 	0xffffffffU	/* request size				*/
82 };
83 #endif
84 
85 static	int	atapi_use_static_geometry = TRUE;
86 static	int	atapi_arq_enable = TRUE;
87 
88 
89 /*
90  *
91  * Call SCSA init to initialize the ATAPI half of the driver
92  *
93  */
94 
95 int
96 atapi_attach(ata_ctl_t *ata_ctlp)
97 {
98 	dev_info_t	*dip = ata_ctlp->ac_dip;
99 	scsi_hba_tran_t *tran;
100 
101 	ADBG_TRACE(("atapi_init entered\n"));
102 
103 	/* allocate transport structure */
104 
105 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
106 
107 	if (tran == NULL) {
108 		ADBG_WARN(("atapi_init: scsi_hba_tran_alloc failed\n"));
109 		goto errout;
110 	}
111 
112 	ata_ctlp->ac_atapi_tran = tran;
113 	ata_ctlp->ac_flags |= AC_SCSI_HBA_TRAN_ALLOC;
114 
115 	/* initialize transport structure */
116 
117 	tran->tran_hba_private = ata_ctlp;
118 	tran->tran_tgt_private = NULL;
119 
120 	tran->tran_tgt_init = atapi_tran_tgt_init;
121 	tran->tran_tgt_probe = atapi_tran_tgt_probe;
122 	tran->tran_tgt_free = atapi_tran_tgt_free;
123 	tran->tran_start = atapi_tran_start;
124 	tran->tran_reset = atapi_tran_reset;
125 	tran->tran_abort = atapi_tran_abort;
126 	tran->tran_getcap = atapi_tran_getcap;
127 	tran->tran_setcap = atapi_tran_setcap;
128 	tran->tran_init_pkt = atapi_tran_init_pkt;
129 	tran->tran_destroy_pkt = atapi_tran_destroy_pkt;
130 	tran->tran_dmafree = atapi_tran_dmafree;
131 	tran->tran_sync_pkt = atapi_tran_sync_pkt;
132 
133 	if (scsi_hba_attach_setup(ata_ctlp->ac_dip, &ata_pciide_dma_attr, tran,
134 	    SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
135 		ADBG_WARN(("atapi_init: scsi_hba_attach_setup failed\n"));
136 		goto errout;
137 	}
138 
139 	ata_ctlp->ac_flags |= AC_SCSI_HBA_ATTACH;
140 
141 	return (TRUE);
142 
143 errout:
144 	atapi_detach(ata_ctlp);
145 	return (FALSE);
146 }
147 
148 
149 /*
150  *
151  * destroy the atapi sub-system
152  *
153  */
154 
155 void
156 atapi_detach(
157 	ata_ctl_t *ata_ctlp)
158 {
159 	ADBG_TRACE(("atapi_detach entered\n"));
160 
161 	if (ata_ctlp->ac_flags & AC_SCSI_HBA_ATTACH)
162 		(void) scsi_hba_detach(ata_ctlp->ac_dip);
163 
164 	if (ata_ctlp->ac_flags & AC_SCSI_HBA_TRAN_ALLOC)
165 		scsi_hba_tran_free(ata_ctlp->ac_atapi_tran);
166 }
167 
168 
169 
170 /*
171  *
172  * initialize the ATAPI drive's soft-state based on the
173  * response to IDENTIFY PACKET DEVICE command
174  *
175  */
176 
177 int
178 atapi_init_drive(
179 	ata_drv_t *ata_drvp)
180 {
181 	ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
182 
183 	ADBG_TRACE(("atapi_init_drive entered\n"));
184 
185 	/* Determine ATAPI CDB size */
186 	(void) atapi_id_update(ata_ctlp, ata_drvp, NULL);
187 
188 	switch (ata_drvp->ad_id.ai_config & ATAPI_ID_CFG_PKT_SZ) {
189 
190 	case ATAPI_ID_CFG_PKT_12B:
191 		ata_drvp->ad_cdb_len = 12;
192 		break;
193 	case ATAPI_ID_CFG_PKT_16B:
194 		ata_drvp->ad_cdb_len = 16;
195 		break;
196 	default:
197 		ADBG_WARN(("atapi_init_drive: bad pkt size support\n"));
198 		return (FALSE);
199 	}
200 
201 	/* determine if drive gives an intr when it wants the CDB */
202 
203 	if ((ata_drvp->ad_id.ai_config & ATAPI_ID_CFG_DRQ_TYPE) !=
204 	    ATAPI_ID_CFG_DRQ_INTR)
205 		ata_drvp->ad_flags |= AD_NO_CDB_INTR;
206 
207 	/*
208 	 * Some devices may have no DMA mode enabled (UDMA or MWDMA)
209 	 * by default, so here we need check and enable DMA if none
210 	 * mode is selected.
211 	 */
212 	if (ata_set_dma_mode(ata_ctlp, ata_drvp) == TRUE) {
213 		/* Update the IDENTIFY PACKET DEVICE data */
214 		(void) atapi_id_update(ata_ctlp, ata_drvp, NULL);
215 	}
216 
217 	return (TRUE);
218 }
219 
220 
221 /*
222  *
223  * destroy an atapi drive
224  *
225  */
226 
227 /* ARGSUSED */
228 void
229 atapi_uninit_drive(
230 	ata_drv_t *ata_drvp)
231 {
232 	ADBG_TRACE(("atapi_uninit_drive entered\n"));
233 }
234 
235 /*
236  *
237  * Issue an IDENTIFY PACKET (ATAPI) DEVICE command
238  *
239  */
240 
241 int
242 atapi_id(
243 	ddi_acc_handle_t io_hdl1,
244 	caddr_t		 ioaddr1,
245 	ddi_acc_handle_t io_hdl2,
246 	caddr_t		 ioaddr2,
247 	struct ata_id	*ata_idp)
248 {
249 	int	rc;
250 
251 	ADBG_TRACE(("atapi_id entered\n"));
252 
253 	rc = ata_id_common(ATC_ID_PACKET_DEVICE, FALSE, io_hdl1, ioaddr1,
254 	    io_hdl2, ioaddr2, ata_idp);
255 
256 	if (!rc)
257 		return (FALSE);
258 
259 	if ((ata_idp->ai_config & ATAC_ATAPI_TYPE_MASK) != ATAC_ATAPI_TYPE)
260 		return (FALSE);
261 
262 	return (TRUE);
263 }
264 
265 
266 /*
267  *
268  * Check the device's register block for the ATAPI signature.
269  *
270  * Although the spec says the sector count, sector number and device/head
271  * registers are also part of the signature, for some unknown reason, this
272  * routine only checks the cyl hi and cyl low registers. I'm just
273  * guessing, but it might be because ATA and ATAPI devices return
274  * identical values in those registers and we actually rely on the
275  * IDENTIFY DEVICE and IDENTIFY PACKET DEVICE commands to recognize the
276  * device type.
277  *
278  */
279 
280 int
281 atapi_signature(
282 	ddi_acc_handle_t io_hdl,
283 	caddr_t ioaddr)
284 {
285 	int	rc = FALSE;
286 	ADBG_TRACE(("atapi_signature entered\n"));
287 
288 	if (ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_HCYL) == ATAPI_SIG_HI &&
289 	    ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_LCYL) != ATAPI_SIG_LO)
290 		rc = TRUE;
291 
292 	/*
293 	 * The following is a little bit of bullet proofing.
294 	 *
295 	 * When some drives are configured on a master-only bus they
296 	 * "shadow" their registers for the not-present slave drive.
297 	 * This is bogus and if you're not careful it may cause a
298 	 * master-only drive to be mistakenly recognized as both
299 	 * master and slave. By clearing the signature registers here
300 	 * I can make certain that when ata_drive_type() switches from
301 	 * the master to slave drive that I'll read back non-signature
302 	 * values regardless of whether the master-only drive does
303 	 * the "shadow" register trick. This prevents a bogus
304 	 * IDENTIFY PACKET DEVICE command from being issued which
305 	 * a really bogus master-only drive will return "shadow"
306 	 * data for.
307 	 */
308 	ddi_put8(io_hdl, (uchar_t *)ioaddr + AT_HCYL, 0);
309 	ddi_put8(io_hdl, (uchar_t *)ioaddr + AT_LCYL, 0);
310 
311 	return (rc);
312 }
313 
314 
315 /*
316  *
317  * SCSA tran_tgt_init entry point
318  *
319  */
320 
321 /* ARGSUSED */
322 static int
323 atapi_tran_tgt_init(
324 	dev_info_t	*hba_dip,
325 	dev_info_t	*tgt_dip,
326 	scsi_hba_tran_t *hba_tran,
327 	struct scsi_device *sd)
328 {
329 	gtgt_t	  *gtgtp;	/* GHD's per-target-instance structure */
330 	ata_ctl_t *ata_ctlp;
331 	ata_tgt_t *ata_tgtp;
332 	ata_drv_t *ata_drvp;
333 	struct scsi_address *ap;
334 	int	rc = DDI_SUCCESS;
335 
336 	ADBG_TRACE(("atapi_tran_tgt_init entered\n"));
337 
338 	/*
339 	 * Qualification of targ, lun, and ATAPI device presence
340 	 *  have already been taken care of by ata_bus_ctl
341 	 */
342 
343 	/* store pointer to drive struct in cloned tran struct */
344 
345 	ata_ctlp = TRAN2CTL(hba_tran);
346 	ap = &sd->sd_address;
347 
348 	ata_drvp = CTL2DRV(ata_ctlp, ap->a_target, ap->a_lun);
349 
350 	/*
351 	 * Create the "atapi" property so the target driver knows
352 	 * to use the correct set of SCSI commands
353 	 */
354 	if (!ata_prop_create(tgt_dip, ata_drvp, "atapi")) {
355 		return (DDI_FAILURE);
356 	}
357 
358 	gtgtp = ghd_target_init(hba_dip, tgt_dip, &ata_ctlp->ac_ccc,
359 	    sizeof (ata_tgt_t), ata_ctlp,
360 	    ap->a_target, ap->a_lun);
361 
362 	/* tran_tgt_private points to gtgt_t */
363 	hba_tran->tran_tgt_private = gtgtp;
364 
365 	/* gt_tgt_private points to ata_tgt_t */
366 	ata_tgtp = GTGTP2ATATGTP(gtgtp);
367 
368 	/* initialize the per-target-instance data */
369 	ata_tgtp->at_drvp = ata_drvp;
370 	ata_tgtp->at_dma_attr = ata_pciide_dma_attr;
371 	ata_tgtp->at_dma_attr.dma_attr_maxxfer =
372 	    ata_ctlp->ac_max_transfer << SCTRSHFT;
373 
374 	return (rc);
375 }
376 
377 
378 /*
379  *
380  * SCSA tran_tgt_probe entry point
381  *
382  */
383 
384 static int
385 atapi_tran_tgt_probe(struct scsi_device *sd, int (*callback)(void))
386 {
387 	ADBG_TRACE(("atapi_tran_tgt_probe entered\n"));
388 
389 	return (scsi_hba_probe(sd, callback));
390 }
391 
392 
393 /*
394  *
395  * SCSA tran_tgt_free entry point
396  *
397  */
398 
399 /* ARGSUSED */
400 static void
401 atapi_tran_tgt_free(
402 	dev_info_t	*hba_dip,
403 	dev_info_t	*tgt_dip,
404 	scsi_hba_tran_t	*hba_tran,
405 	struct scsi_device *sd)
406 {
407 	ADBG_TRACE(("atapi_tran_tgt_free entered\n"));
408 
409 	ghd_target_free(hba_dip, tgt_dip, &TRAN2ATAP(hba_tran)->ac_ccc,
410 	    TRAN2GTGTP(hba_tran));
411 	hba_tran->tran_tgt_private = NULL;
412 }
413 
414 
415 
416 /*
417  *
418  * SCSA tran_abort entry point
419  *
420  */
421 
422 /* ARGSUSED */
423 static int
424 atapi_tran_abort(
425 	struct scsi_address *ap,
426 	struct scsi_pkt *spktp)
427 {
428 	ADBG_TRACE(("atapi_tran_abort entered\n"));
429 
430 	if (spktp) {
431 		return (ghd_tran_abort(&ADDR2CTL(ap)->ac_ccc, PKTP2GCMDP(spktp),
432 		    ADDR2GTGTP(ap), NULL));
433 	}
434 
435 	return (ghd_tran_abort_lun(&ADDR2CTL(ap)->ac_ccc, ADDR2GTGTP(ap),
436 	    NULL));
437 }
438 
439 
440 /*
441  *
442  * SCSA tran_reset entry point
443  *
444  */
445 
446 /* ARGSUSED */
447 static int
448 atapi_tran_reset(
449 	struct scsi_address *ap,
450 	int level)
451 {
452 	ADBG_TRACE(("atapi_tran_reset entered\n"));
453 
454 	if (level == RESET_TARGET)
455 		return (ghd_tran_reset_target(&ADDR2CTL(ap)->ac_ccc,
456 		    ADDR2GTGTP(ap), NULL));
457 	if (level == RESET_ALL)
458 		return (ghd_tran_reset_bus(&ADDR2CTL(ap)->ac_ccc,
459 		    ADDR2GTGTP(ap), NULL));
460 	return (FALSE);
461 
462 }
463 
464 
465 /*
466  *
467  * SCSA tran_setcap entry point
468  *
469  */
470 
471 static int
472 atapi_tran_setcap(
473 	struct scsi_address *ap,
474 	char *capstr,
475 	int value,
476 	int whom)
477 {
478 	gtgt_t	  *gtgtp = ADDR2GTGTP(ap);
479 	ata_tgt_t *tgtp = GTGTP2ATATGTP(gtgtp);
480 
481 	ADBG_TRACE(("atapi_tran_setcap entered\n"));
482 
483 	switch (scsi_hba_lookup_capstr(capstr)) {
484 		case SCSI_CAP_SECTOR_SIZE:
485 			tgtp->at_dma_attr.dma_attr_granular = (uint_t)value;
486 			return (TRUE);
487 
488 		case SCSI_CAP_ARQ:
489 			if (whom) {
490 				tgtp->at_arq = value;
491 				return (TRUE);
492 			}
493 			break;
494 
495 		case SCSI_CAP_TOTAL_SECTORS:
496 			tgtp->at_total_sectors = value;
497 			return (TRUE);
498 	}
499 	return (FALSE);
500 }
501 
502 
503 /*
504  *
505  * SCSA tran_getcap entry point
506  *
507  */
508 
509 static int
510 atapi_tran_getcap(
511 	struct scsi_address *ap,
512 	char *capstr,
513 	int whom)
514 {
515 	struct ata_id	 ata_id;
516 	struct ata_id	*ata_idp;
517 	ata_ctl_t	*ata_ctlp;
518 	ata_drv_t	*ata_drvp;
519 	gtgt_t		*gtgtp;
520 	int		 rval = -1;
521 
522 	ADBG_TRACE(("atapi_tran_getcap entered\n"));
523 
524 	if (capstr == NULL || whom == 0)
525 		return (-1);
526 
527 	ata_ctlp = ADDR2CTL(ap);
528 
529 	switch (scsi_hba_lookup_capstr(capstr)) {
530 	case SCSI_CAP_ARQ:
531 		rval = TRUE;
532 		break;
533 
534 	case SCSI_CAP_INITIATOR_ID:
535 		rval = 7;
536 		break;
537 
538 	case SCSI_CAP_DMA_MAX:
539 		/* XXX - what should the real limit be?? */
540 		/* limit to 64K ??? */
541 		rval = 4096 * (ATA_DMA_NSEGS - 1);
542 		break;
543 
544 	case SCSI_CAP_GEOMETRY:
545 		/* Default geometry */
546 		if (atapi_use_static_geometry) {
547 			rval = ATAPI_HEADS << 16 | ATAPI_SECTORS_PER_TRK;
548 			break;
549 		}
550 
551 		/* this code is currently not used */
552 
553 		ata_drvp = CTL2DRV(ata_ctlp, ap->a_target, ap->a_lun);
554 		gtgtp = ADDR2GTGTP(ap);
555 
556 		/*
557 		 * retrieve the current IDENTIFY PACKET DEVICE info
558 		 */
559 		if (!ata_queue_cmd(atapi_id_update, &ata_id, ata_ctlp,
560 		    ata_drvp, gtgtp)) {
561 			ADBG_TRACE(("atapi_tran_getcap geometry failed"));
562 			return (0);
563 		}
564 
565 		/*
566 		 * save the new response data
567 		 */
568 		ata_idp = &ata_drvp->ad_id;
569 		*ata_idp = ata_id;
570 
571 		switch ((ata_idp->ai_config >> 8) & 0xf) {
572 		case DTYPE_RODIRECT:
573 			rval = ATAPI_HEADS << 16 | ATAPI_SECTORS_PER_TRK;
574 			break;
575 		case DTYPE_DIRECT:
576 		case DTYPE_OPTICAL:
577 			rval = (ata_idp->ai_curheads << 16) |
578 			    ata_idp->ai_cursectrk;
579 			break;
580 		default:
581 			rval = 0;
582 		}
583 		break;
584 	}
585 
586 	return (rval);
587 }
588 
589 
590 
591 /*
592  *
593  * SCSA tran_init_pkt entry point
594  *
595  */
596 
597 static struct scsi_pkt *
598 atapi_tran_init_pkt(
599 	struct scsi_address *ap,
600 	struct scsi_pkt	*spktp,
601 	struct buf	*bp,
602 	int		 cmdlen,
603 	int		 statuslen,
604 	int		 tgtlen,
605 	int		 flags,
606 	int		(*callback)(caddr_t),
607 	caddr_t		 arg)
608 {
609 	gtgt_t		*gtgtp = ADDR2GTGTP(ap);
610 	ata_tgt_t	*ata_tgtp = GTGTP2ATATGTP(gtgtp);
611 	ata_ctl_t	*ata_ctlp = ADDR2CTL(ap);
612 	ata_pkt_t	*ata_pktp;
613 	struct scsi_pkt	*new_spktp;
614 	ddi_dma_attr_t	*sg_attrp;
615 	int		 bytes;
616 
617 	ADBG_TRACE(("atapi_tran_init_pkt entered\n"));
618 
619 
620 	/*
621 	 * Determine whether to do PCI-IDE DMA setup, start out by
622 	 * assuming we're not.
623 	 */
624 	sg_attrp = NULL;
625 
626 	if (bp == NULL) {
627 		/* no data to transfer */
628 		goto skip_dma_setup;
629 	}
630 
631 	if (bp->b_bcount == 0) {
632 		/* no data to transfer */
633 		goto skip_dma_setup;
634 	}
635 
636 	if ((GTGTP2ATADRVP(ADDR2GTGTP(ap))->ad_pciide_dma == ATA_DMA_OFF)) {
637 		goto skip_dma_setup;
638 	}
639 
640 	if (ata_dma_disabled)
641 		goto skip_dma_setup;
642 
643 
644 	/*
645 	 * The PCI-IDE DMA engine is brain-damaged and can't
646 	 * DMA non-aligned buffers.
647 	 */
648 	if (((bp->b_flags & B_PAGEIO) == 0) &&
649 	    ((uintptr_t)bp->b_un.b_addr) & PCIIDE_PRDE_ADDR_MASK) {
650 		/*
651 		 * if the virtual address isn't aligned, then the
652 		 * physical address also isn't aligned.
653 		 */
654 		goto skip_dma_setup;
655 	}
656 
657 	/*
658 	 * It also insists that the byte count must be even.
659 	 */
660 	if (bp->b_bcount & 1) {
661 		/* something odd here */
662 		goto skip_dma_setup;
663 	}
664 
665 	/*
666 	 * Huzza! We're really going to do it
667 	 */
668 	sg_attrp = &ata_tgtp->at_dma_attr;
669 
670 
671 skip_dma_setup:
672 
673 	/*
674 	 * Call GHD packet init function
675 	 */
676 
677 	new_spktp = ghd_tran_init_pkt_attr(&ata_ctlp->ac_ccc, ap, spktp, bp,
678 	    cmdlen, statuslen, tgtlen, flags,
679 	    callback, arg, sizeof (ata_pkt_t), sg_attrp);
680 
681 	if (new_spktp == NULL)
682 		return (NULL);
683 
684 	ata_pktp = SPKT2APKT(new_spktp);
685 	ata_pktp->ap_cdbp = new_spktp->pkt_cdbp;
686 	if (statuslen > 255) {
687 		statuslen = sizeof (struct scsi_arq_status);
688 	}
689 	ata_pktp->ap_statuslen = (uchar_t)statuslen;
690 
691 	/* reset data direction flags */
692 	if (spktp)
693 		ata_pktp->ap_flags &= ~(AP_READ | AP_WRITE);
694 
695 	/*
696 	 * check for ARQ mode
697 	 */
698 	if (atapi_arq_enable == TRUE &&
699 	    ata_tgtp->at_arq == TRUE &&
700 	    ata_pktp->ap_statuslen >= sizeof (struct scsi_arq_status)) {
701 		ADBG_TRACE(("atapi_tran_init_pkt ARQ\n"));
702 		ata_pktp->ap_scbp =
703 		    (struct scsi_arq_status *)new_spktp->pkt_scbp;
704 		ata_pktp->ap_flags |= AP_ARQ_ON_ERROR;
705 	}
706 
707 	/*
708 	 * fill these with zeros for ATA/ATAPI-4 compatibility
709 	 */
710 	ata_pktp->ap_sec = 0;
711 	ata_pktp->ap_count = 0;
712 
713 	if (ata_pktp->ap_sg_cnt) {
714 		ASSERT(bp != NULL);
715 		/* determine direction to program the DMA engine later */
716 		if (bp->b_flags & B_READ) {
717 			ata_pktp->ap_flags |= AP_READ;
718 		} else {
719 			ata_pktp->ap_flags |= AP_WRITE;
720 		}
721 		ata_pktp->ap_pciide_dma = TRUE;
722 		ata_pktp->ap_hicyl = 0;
723 		ata_pktp->ap_lwcyl = 0;
724 		return (new_spktp);
725 	}
726 
727 	/*
728 	 * Since we're not using DMA, we need to map the buffer into
729 	 * kernel address space
730 	 */
731 
732 	ata_pktp->ap_pciide_dma = FALSE;
733 	if (bp && bp->b_bcount) {
734 		/*
735 		 * If this is a fresh request map the buffer and
736 		 * reset the ap_baddr pointer and the current offset
737 		 * and byte count.
738 		 *
739 		 * The ap_boffset is used to set the ap_v_addr ptr at
740 		 * the start of each I/O request.
741 		 *
742 		 * The ap_bcount is used to update ap_boffset when the
743 		 * target driver requests the next segment.
744 		 *
745 		 */
746 		if (cmdlen) {
747 			bp_mapin(bp);
748 			ata_pktp->ap_baddr = bp->b_un.b_addr;
749 			ata_pktp->ap_bcount = 0;
750 			ata_pktp->ap_boffset = 0;
751 		}
752 		ASSERT(ata_pktp->ap_baddr != NULL);
753 
754 		/* determine direction for the PIO FSM */
755 		if (bp->b_flags & B_READ) {
756 			ata_pktp->ap_flags |= AP_READ;
757 		} else {
758 			ata_pktp->ap_flags |= AP_WRITE;
759 		}
760 
761 		/*
762 		 * If the drive has the Single Sector bug, limit
763 		 * the transfer to a single sector. This assumes
764 		 * ATAPI CD drives always use 2k sectors.
765 		 */
766 		if (GTGTP2ATADRVP(ADDR2GTGTP(ap))->ad_flags & AD_1SECTOR) {
767 			size_t resid;
768 			size_t tmp;
769 
770 			/* adjust offset based on prior request */
771 			ata_pktp->ap_boffset += ata_pktp->ap_bcount;
772 
773 			/* compute number of bytes left to transfer */
774 			resid = bp->b_bcount - ata_pktp->ap_boffset;
775 
776 			/* limit the transfer to 2k */
777 			tmp = MIN(2048, resid);
778 			ata_pktp->ap_bcount = tmp;
779 
780 			/* tell target driver how much is left for next time */
781 			new_spktp->pkt_resid = resid - tmp;
782 		} else {
783 			/* do the whole request in one swell foop */
784 			ata_pktp->ap_bcount = bp->b_bcount;
785 			new_spktp->pkt_resid = 0;
786 		}
787 
788 	} else {
789 		ata_pktp->ap_baddr = NULL;
790 		ata_pktp->ap_bcount = 0;
791 		ata_pktp->ap_boffset = 0;
792 	}
793 
794 	/*
795 	 * determine the size of each partial data transfer
796 	 * to/from the drive
797 	 */
798 	bytes = min(ata_pktp->ap_bcount, ATAPI_MAX_BYTES_PER_DRQ);
799 	ata_pktp->ap_hicyl = (uchar_t)(bytes >> 8);
800 	ata_pktp->ap_lwcyl = (uchar_t)bytes;
801 	return (new_spktp);
802 }
803 
804 
805 /*
806  * GHD ccballoc callback
807  *
808  *	Initializing the ata_pkt, and return the ptr to the gcmd_t to GHD.
809  *
810  */
811 
812 /* ARGSUSED */
813 int
814 atapi_ccballoc(
815 	gtgt_t	*gtgtp,
816 	gcmd_t	*gcmdp,
817 	int	 cmdlen,
818 	int	 statuslen,
819 	int	 tgtlen,
820 	int	 ccblen)
821 
822 {
823 	ata_drv_t *ata_drvp = GTGTP2ATADRVP(gtgtp);
824 	ata_pkt_t *ata_pktp = GCMD2APKT(gcmdp);
825 
826 	ADBG_TRACE(("atapi_ccballoc entered\n"));
827 
828 	/* set the back ptr from the ata_pkt to the gcmd_t */
829 	ata_pktp->ap_gcmdp = gcmdp;
830 
831 	/* check length of SCSI CDB is not larger than drive expects */
832 
833 	if (cmdlen > ata_drvp->ad_cdb_len) {
834 		ADBG_WARN(("atapi_ccballoc: SCSI CDB too large!\n"));
835 		return (FALSE);
836 	}
837 
838 	/*
839 	 * save length of the SCSI CDB, and calculate CDB padding
840 	 * note that for convenience, padding is expressed in shorts.
841 	 */
842 
843 	ata_pktp->ap_cdb_len = (uchar_t)cmdlen;
844 	ata_pktp->ap_cdb_pad =
845 		((unsigned)(ata_drvp->ad_cdb_len - cmdlen)) >> 1;
846 
847 	/* set up callback functions */
848 
849 	ata_pktp->ap_start = atapi_fsm_start;
850 	ata_pktp->ap_intr = atapi_fsm_intr;
851 	ata_pktp->ap_complete = atapi_complete;
852 
853 	/* set-up for start */
854 
855 	ata_pktp->ap_flags = AP_ATAPI;
856 	ata_pktp->ap_hd = ata_drvp->ad_drive_bits;
857 	ata_pktp->ap_cmd = ATC_PACKET;
858 
859 	return (TRUE);
860 }
861 
862 
863 
864 /*
865  *
866  * SCSA tran_destroy_pkt entry point
867  *
868  */
869 
870 static void
871 atapi_tran_destroy_pkt(
872 	struct scsi_address *ap,
873 	struct scsi_pkt *spktp)
874 {
875 	gcmd_t	  *gcmdp = PKTP2GCMDP(spktp);
876 
877 	ADBG_TRACE(("atapi_tran_destroy_pkt entered\n"));
878 
879 	if (gcmdp->cmd_dma_handle != NULL) {
880 		ghd_dmafree_attr(gcmdp);
881 	}
882 
883 	ghd_pktfree(&ADDR2CTL(ap)->ac_ccc, ap, spktp);
884 }
885 
886 
887 
888 /*
889  *
890  * GHD ccbfree callback function
891  *
892  */
893 
894 /* ARGSUSED */
895 void
896 atapi_ccbfree(
897 	gcmd_t *gcmdp)
898 {
899 	ADBG_TRACE(("atapi_ccbfree entered\n"));
900 
901 	/* nothing to do */
902 }
903 
904 
905 /*
906  *
907  * SCSA tran_dmafree entry point
908  *
909  */
910 
911 /*ARGSUSED*/
912 static void
913 atapi_tran_dmafree(
914 	struct scsi_address *ap,
915 	struct scsi_pkt *spktp)
916 {
917 	gcmd_t	  *gcmdp = PKTP2GCMDP(spktp);
918 
919 	ADBG_TRACE(("atapi_tran_dmafree entered\n"));
920 
921 	if (gcmdp->cmd_dma_handle != NULL) {
922 		ghd_dmafree_attr(gcmdp);
923 	}
924 }
925 
926 
927 
928 /*
929  *
930  * SCSA tran_sync_pkt entry point
931  *
932  */
933 
934 /*ARGSUSED*/
935 static void
936 atapi_tran_sync_pkt(
937 	struct scsi_address *ap,
938 	struct scsi_pkt *spktp)
939 {
940 
941 	ADBG_TRACE(("atapi_tran_sync_pkt entered\n"));
942 
943 	if (PKTP2GCMDP(spktp)->cmd_dma_handle != NULL) {
944 		ghd_tran_sync_pkt(ap, spktp);
945 	}
946 }
947 
948 
949 
950 /*
951  *
952  * SCSA tran_start entry point
953  *
954  */
955 
956 /* ARGSUSED */
957 static int
958 atapi_tran_start(
959 	struct scsi_address *ap,
960 	struct scsi_pkt *spktp)
961 {
962 	ata_pkt_t *ata_pktp = SPKT2APKT(spktp);
963 	ata_drv_t *ata_drvp = APKT2DRV(ata_pktp);
964 	ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
965 	gcmd_t	  *gcmdp = APKT2GCMD(ata_pktp);
966 	int	   polled = FALSE;
967 	int	   rc;
968 
969 	ADBG_TRACE(("atapi_tran_start entered\n"));
970 
971 	/*
972 	 * Basic initialization performed each and every time a
973 	 * scsi_pkt is submitted. A single scsi_pkt may be submitted
974 	 * multiple times so this routine has to be idempotent. One
975 	 * time initializations don't belong here.
976 	 */
977 
978 	/*
979 	 * The ap_v_addr pointer is incremented by the PIO data
980 	 * transfer routine as each word is transferred. Therefore, need
981 	 * to reset ap_v_addr here (rather than atapi_tran_init_pkt())
982 	 * in case the target resubmits the same pkt multiple times
983 	 * (which is permitted by SCSA).
984 	 */
985 	ata_pktp->ap_v_addr = ata_pktp->ap_baddr + ata_pktp->ap_boffset;
986 
987 	/* ap_resid is decremented as the data transfer progresses */
988 	ata_pktp->ap_resid = ata_pktp->ap_bcount;
989 
990 	/* clear error flags */
991 	ata_pktp->ap_flags &= (AP_ATAPI | AP_READ | AP_WRITE | AP_ARQ_ON_ERROR);
992 	spktp->pkt_reason = 0;
993 	spktp->pkt_state = 0;
994 	spktp->pkt_statistics = 0;
995 
996 	/*
997 	 * check for polling pkt
998 	 */
999 	if (spktp->pkt_flags & FLAG_NOINTR) {
1000 		polled = TRUE;
1001 	}
1002 
1003 #ifdef ___just_ignore_unsupported_flags___
1004 	/* driver cannot accept tagged commands */
1005 
1006 	if (spktp->pkt_flags & (FLAG_HTAG|FLAG_OTAG|FLAG_STAG)) {
1007 		spktp->pkt_reason = CMD_TRAN_ERR;
1008 		return (TRAN_BADPKT);
1009 	}
1010 #endif
1011 
1012 	/* call common transport routine */
1013 
1014 	rc = ghd_transport(&ata_ctlp->ac_ccc, gcmdp, gcmdp->cmd_gtgtp,
1015 	    spktp->pkt_time, polled, NULL);
1016 
1017 	/* see if pkt was not accepted */
1018 
1019 	if (rc != TRAN_ACCEPT)
1020 		return (rc);
1021 
1022 	return (rc);
1023 }
1024 
1025 
1026 /*
1027  *
1028  * GHD packet complete callback
1029  *
1030  */
1031 /* ARGSUSED */
1032 static void
1033 atapi_complete(
1034 	ata_drv_t *ata_drvp,
1035 	ata_pkt_t *ata_pktp,
1036 	int do_callback)
1037 {
1038 	struct scsi_pkt *spktp = APKT2SPKT(ata_pktp);
1039 	struct scsi_status *scsi_stat = (struct scsi_status *)spktp->pkt_scbp;
1040 
1041 	ADBG_TRACE(("atapi_complete entered\n"));
1042 	ADBG_TRANSPORT(("atapi_complete: pkt = 0x%p\n", ata_pktp));
1043 
1044 	/* update resid */
1045 
1046 	spktp->pkt_resid = ata_pktp->ap_resid;
1047 
1048 	if (ata_pktp->ap_flags & AP_SENT_CMD) {
1049 		spktp->pkt_state |=
1050 		    STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
1051 	}
1052 	if (ata_pktp->ap_flags & AP_XFERRED_DATA) {
1053 		spktp->pkt_state |= STATE_XFERRED_DATA;
1054 	}
1055 
1056 	if (ata_pktp->ap_flags & AP_GOT_STATUS) {
1057 		spktp->pkt_state |= STATE_GOT_STATUS;
1058 	}
1059 
1060 	/* check for fatal errors */
1061 
1062 	if (ata_pktp->ap_flags & AP_TRAN_ERROR) {
1063 		spktp->pkt_reason = CMD_TRAN_ERR;
1064 	} else if (ata_pktp->ap_flags & AP_BUS_RESET) {
1065 		spktp->pkt_reason = CMD_RESET;
1066 		spktp->pkt_statistics |= STAT_BUS_RESET;
1067 	} else if (ata_pktp->ap_flags & AP_DEV_RESET) {
1068 		spktp->pkt_reason = CMD_RESET;
1069 		spktp->pkt_statistics |= STAT_DEV_RESET;
1070 	} else if (ata_pktp->ap_flags & AP_ABORT) {
1071 		spktp->pkt_reason = CMD_ABORTED;
1072 		spktp->pkt_statistics |= STAT_ABORTED;
1073 	} else if (ata_pktp->ap_flags & AP_TIMEOUT) {
1074 		spktp->pkt_reason = CMD_TIMEOUT;
1075 		spktp->pkt_statistics |= STAT_TIMEOUT;
1076 	} else {
1077 		spktp->pkt_reason = CMD_CMPLT;
1078 	}
1079 
1080 	/* non-fatal errors */
1081 
1082 	if (ata_pktp->ap_flags & AP_ERROR)
1083 		scsi_stat->sts_chk = 1;
1084 	else
1085 		scsi_stat->sts_chk = 0;
1086 
1087 	if (ata_pktp->ap_flags & AP_ARQ_ERROR) {
1088 		ADBG_ARQ(("atapi_complete ARQ error 0x%p\n", ata_pktp));
1089 		spktp->pkt_reason = CMD_TRAN_ERR;
1090 
1091 	} else if (ata_pktp->ap_flags & AP_ARQ_OKAY) {
1092 		static struct scsi_status zero_scsi_status = { 0 };
1093 		struct scsi_arq_status *arqp;
1094 
1095 		ADBG_ARQ(("atapi_complete ARQ okay 0x%p\n", ata_pktp));
1096 		spktp->pkt_state |= STATE_ARQ_DONE;
1097 		arqp = ata_pktp->ap_scbp;
1098 		arqp->sts_rqpkt_reason = CMD_CMPLT;
1099 		arqp->sts_rqpkt_state = STATE_XFERRED_DATA;
1100 		arqp->sts_rqpkt_status = zero_scsi_status;
1101 		arqp->sts_rqpkt_resid = 0;
1102 		arqp->sts_rqpkt_statistics = 0;
1103 
1104 	}
1105 
1106 	ADBG_TRANSPORT(("atapi_complete: reason = 0x%x stats = 0x%x "
1107 	    "sts_chk = %d\n", spktp->pkt_reason, spktp->pkt_statistics,
1108 	    scsi_stat->sts_chk));
1109 
1110 	if (do_callback && (spktp->pkt_comp))
1111 		(*spktp->pkt_comp)(spktp);
1112 }
1113 
1114 
1115 
1116 /*
1117  * Update the IDENTIFY PACKET DEVICE info
1118  */
1119 
1120 static int
1121 atapi_id_update(
1122 	ata_ctl_t	*ata_ctlp,
1123 	ata_drv_t	*ata_drvp,
1124 	ata_pkt_t	*ata_pktp)
1125 {
1126 	ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
1127 	caddr_t		 ioaddr1 = ata_ctlp->ac_ioaddr1;
1128 	ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
1129 	caddr_t		 ioaddr2 = ata_ctlp->ac_ioaddr2;
1130 	struct ata_id	*aidp;
1131 	int	rc;
1132 
1133 	/*
1134 	 * select the appropriate drive and LUN
1135 	 */
1136 	ddi_put8(io_hdl1, (uchar_t *)ioaddr1 + AT_DRVHD,
1137 	    ata_drvp->ad_drive_bits);
1138 	ata_nsecwait(400);
1139 
1140 	/*
1141 	 * make certain the drive is selected, and wait for not busy
1142 	 */
1143 	if (!ata_wait(io_hdl2, ioaddr2, ATS_DRDY, ATS_BSY, 5 * 1000000)) {
1144 		ADBG_ERROR(("atapi_id_update: select failed\n"));
1145 		if (ata_pktp != NULL)
1146 			ata_pktp->ap_flags |= AP_ERROR;
1147 		return (ATA_FSM_RC_FINI);
1148 	}
1149 
1150 	if (ata_pktp != NULL)
1151 		aidp = (struct ata_id *)ata_pktp->ap_v_addr;
1152 	else
1153 		aidp = &ata_drvp->ad_id;
1154 
1155 	rc = atapi_id(ata_ctlp->ac_iohandle1, ata_ctlp->ac_ioaddr1,
1156 	    ata_ctlp->ac_iohandle2, ata_ctlp->ac_ioaddr2, aidp);
1157 	if (rc) {
1158 		swab(aidp->ai_drvser, aidp->ai_drvser,
1159 		    sizeof (aidp->ai_drvser));
1160 		swab(aidp->ai_fw, aidp->ai_fw,
1161 		    sizeof (aidp->ai_fw));
1162 		swab(aidp->ai_model, aidp->ai_model,
1163 		    sizeof (aidp->ai_model));
1164 	}
1165 
1166 	if (ata_pktp == NULL)
1167 		return (ATA_FSM_RC_FINI);
1168 
1169 	if (!rc) {
1170 		ata_pktp->ap_flags |= AP_ERROR;
1171 	} else {
1172 		ata_pktp->ap_flags |= AP_XFERRED_DATA;
1173 	}
1174 	return (ATA_FSM_RC_FINI);
1175 }
1176 
1177 
1178 
1179 /*
1180  * Both drives on the controller share a common pkt to do
1181  * ARQ processing. Therefore the pkt is only partially
1182  * initialized here. The rest of initialization occurs
1183  * just before starting the ARQ pkt when an error is
1184  * detected.
1185  */
1186 
1187 void
1188 atapi_init_arq(
1189 	ata_ctl_t *ata_ctlp)
1190 {
1191 	ata_pkt_t *arq_pktp = ata_ctlp->ac_arq_pktp;
1192 
1193 	arq_pktp->ap_cdbp = ata_ctlp->ac_arq_cdb;
1194 	arq_pktp->ap_cdb_len = sizeof (ata_ctlp->ac_arq_cdb);
1195 	arq_pktp->ap_start = atapi_fsm_start;
1196 	arq_pktp->ap_intr = atapi_fsm_intr;
1197 	arq_pktp->ap_complete = atapi_complete;
1198 	arq_pktp->ap_flags = AP_ATAPI;
1199 	arq_pktp->ap_cmd = ATC_PACKET;
1200 
1201 	ata_ctlp->ac_arq_cdb[0] = SCMD_REQUEST_SENSE;
1202 }
1203 
1204 void
1205 atapi_reset_dma_mode(ata_drv_t *ata_drvp, int need_wait)
1206 {
1207 	ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
1208 
1209 	/*
1210 	 * Some very old CD-ROM need to wait 500mS to
1211 	 * reset the DMA mode, so after reset the DMA
1212 	 * mode when resuming, check whether it was
1213 	 * enabled on the device, if not, delay 500mS
1214 	 * and reset it again.  Then for normal DVD/CD-ROM,
1215 	 * no delay will be on resume.
1216 	 */
1217 	if (need_wait == TRUE)
1218 		drv_usecwait(5 * 100000);
1219 	ata_reset_dma_mode(ata_drvp);
1220 	(void) atapi_id_update(ata_ctlp, ata_drvp, NULL);
1221 }
1222