xref: /illumos-gate/usr/src/uts/intel/io/dktp/controller/ata/atapi.c (revision 2983dda76a6d296fdb560c88114fe41caad1b84f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <sys/types.h>
29 
30 #include "ata_common.h"
31 #include "atapi.h"
32 
33 /* SCSA entry points */
34 
35 static int atapi_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
36     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
37 static int atapi_tran_tgt_probe(struct scsi_device *sd, int (*callback)(void));
38 static void atapi_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
39     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
40 static int atapi_tran_abort(struct scsi_address *ap, struct scsi_pkt *spktp);
41 static int atapi_tran_reset(struct scsi_address *ap, int level);
42 static int atapi_tran_getcap(struct scsi_address *ap, char *capstr, int whom);
43 static int atapi_tran_setcap(struct scsi_address *ap, char *capstr,
44     int value, int whom);
45 static struct scsi_pkt	*atapi_tran_init_pkt(struct scsi_address *ap,
46     struct scsi_pkt *spktp, struct buf *bp, int cmdlen, int statuslen,
47     int tgtlen, int flags, int (*callback)(caddr_t), caddr_t arg);
48 static void atapi_tran_destroy_pkt(struct scsi_address *ap,
49     struct scsi_pkt *spktp);
50 static void atapi_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *spktp);
51 static void atapi_tran_sync_pkt(struct scsi_address *ap,
52     struct scsi_pkt *spktp);
53 static int atapi_tran_start(struct scsi_address *ap, struct scsi_pkt *spktp);
54 
55 /*
56  * packet callbacks
57  */
58 static void atapi_complete(ata_drv_t *ata_drvp, ata_pkt_t *ata_pktp,
59     int do_callback);
60 static int atapi_id_update(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
61     ata_pkt_t *ata_pktp);
62 
63 
64 /* external dependencies */
65 
66 char _depends_on[] = "misc/scsi";
67 
68 /*
69  * Local static data
70  */
71 
72 #if 0
73 static ddi_dma_lim_t atapi_dma_limits = {
74 	0,		/* address low				*/
75 	0xffffffffU,	/* address high				*/
76 	0,		/* counter max				*/
77 	1,		/* burstsize				*/
78 	DMA_UNIT_8,	/* minimum xfer				*/
79 	0,		/* dma speed				*/
80 	(uint_t)DMALIM_VER0,	/* version			*/
81 	0xffffffffU,	/* address register			*/
82 	0xffffffffU,	/* counter register			*/
83 	1,		/* granular				*/
84 	1,		/* scatter/gather list length		*/
85 	0xffffffffU	/* request size				*/
86 };
87 #endif
88 
89 static	int	atapi_use_static_geometry = TRUE;
90 static	int	atapi_arq_enable = TRUE;
91 
92 
93 /*
94  *
95  * Call SCSA init to initialize the ATAPI half of the driver
96  *
97  */
98 
99 int
100 atapi_attach(ata_ctl_t *ata_ctlp)
101 {
102 	dev_info_t	*dip = ata_ctlp->ac_dip;
103 	scsi_hba_tran_t *tran;
104 
105 	ADBG_TRACE(("atapi_init entered\n"));
106 
107 	/* allocate transport structure */
108 
109 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
110 
111 	if (tran == NULL) {
112 		ADBG_WARN(("atapi_init: scsi_hba_tran_alloc failed\n"));
113 		goto errout;
114 	}
115 
116 	ata_ctlp->ac_atapi_tran = tran;
117 	ata_ctlp->ac_flags |= AC_SCSI_HBA_TRAN_ALLOC;
118 
119 	/* initialize transport structure */
120 
121 	tran->tran_hba_private = ata_ctlp;
122 	tran->tran_tgt_private = NULL;
123 
124 	tran->tran_tgt_init = atapi_tran_tgt_init;
125 	tran->tran_tgt_probe = atapi_tran_tgt_probe;
126 	tran->tran_tgt_free = atapi_tran_tgt_free;
127 	tran->tran_start = atapi_tran_start;
128 	tran->tran_reset = atapi_tran_reset;
129 	tran->tran_abort = atapi_tran_abort;
130 	tran->tran_getcap = atapi_tran_getcap;
131 	tran->tran_setcap = atapi_tran_setcap;
132 	tran->tran_init_pkt = atapi_tran_init_pkt;
133 	tran->tran_destroy_pkt = atapi_tran_destroy_pkt;
134 	tran->tran_dmafree = atapi_tran_dmafree;
135 	tran->tran_sync_pkt = atapi_tran_sync_pkt;
136 
137 	if (scsi_hba_attach_setup(ata_ctlp->ac_dip, &ata_pciide_dma_attr, tran,
138 	    SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
139 		ADBG_WARN(("atapi_init: scsi_hba_attach_setup failed\n"));
140 		goto errout;
141 	}
142 
143 	ata_ctlp->ac_flags |= AC_SCSI_HBA_ATTACH;
144 
145 	return (TRUE);
146 
147 errout:
148 	atapi_detach(ata_ctlp);
149 	return (FALSE);
150 }
151 
152 
153 /*
154  *
155  * destroy the atapi sub-system
156  *
157  */
158 
159 void
160 atapi_detach(
161 	ata_ctl_t *ata_ctlp)
162 {
163 	ADBG_TRACE(("atapi_detach entered\n"));
164 
165 	if (ata_ctlp->ac_flags & AC_SCSI_HBA_ATTACH)
166 		(void) scsi_hba_detach(ata_ctlp->ac_dip);
167 
168 	if (ata_ctlp->ac_flags & AC_SCSI_HBA_TRAN_ALLOC)
169 		scsi_hba_tran_free(ata_ctlp->ac_atapi_tran);
170 }
171 
172 
173 
174 /*
175  *
176  * initialize the ATAPI drive's soft-state based on the
177  * response to IDENTIFY PACKET DEVICE command
178  *
179  */
180 
181 int
182 atapi_init_drive(
183 	ata_drv_t *ata_drvp)
184 {
185 	ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
186 
187 	ADBG_TRACE(("atapi_init_drive entered\n"));
188 
189 	/* Determine ATAPI CDB size */
190 	(void) atapi_id_update(ata_ctlp, ata_drvp, NULL);
191 
192 	switch (ata_drvp->ad_id.ai_config & ATAPI_ID_CFG_PKT_SZ) {
193 
194 	case ATAPI_ID_CFG_PKT_12B:
195 		ata_drvp->ad_cdb_len = 12;
196 		break;
197 	case ATAPI_ID_CFG_PKT_16B:
198 		ata_drvp->ad_cdb_len = 16;
199 		break;
200 	default:
201 		ADBG_WARN(("atapi_init_drive: bad pkt size support\n"));
202 		return (FALSE);
203 	}
204 
205 	/* determine if drive gives an intr when it wants the CDB */
206 
207 	if ((ata_drvp->ad_id.ai_config & ATAPI_ID_CFG_DRQ_TYPE) !=
208 	    ATAPI_ID_CFG_DRQ_INTR)
209 		ata_drvp->ad_flags |= AD_NO_CDB_INTR;
210 
211 	/*
212 	 * Some devices may have no DMA mode enabled (UDMA or MWDMA)
213 	 * by default, so here we need check and enable DMA if none
214 	 * mode is selected.
215 	 */
216 	if (ata_set_dma_mode(ata_ctlp, ata_drvp) == TRUE) {
217 		/* Update the IDENTIFY PACKET DEVICE data */
218 		(void) atapi_id_update(ata_ctlp, ata_drvp, NULL);
219 	}
220 
221 	return (TRUE);
222 }
223 
224 
225 /*
226  *
227  * destroy an atapi drive
228  *
229  */
230 
231 /* ARGSUSED */
232 void
233 atapi_uninit_drive(
234 	ata_drv_t *ata_drvp)
235 {
236 	ADBG_TRACE(("atapi_uninit_drive entered\n"));
237 }
238 
239 /*
240  *
241  * Issue an IDENTIFY PACKET (ATAPI) DEVICE command
242  *
243  */
244 
245 int
246 atapi_id(
247 	ddi_acc_handle_t io_hdl1,
248 	caddr_t		 ioaddr1,
249 	ddi_acc_handle_t io_hdl2,
250 	caddr_t		 ioaddr2,
251 	struct ata_id	*ata_idp)
252 {
253 	int	rc;
254 
255 	ADBG_TRACE(("atapi_id entered\n"));
256 
257 	rc = ata_id_common(ATC_ID_PACKET_DEVICE, FALSE, io_hdl1, ioaddr1,
258 	    io_hdl2, ioaddr2, ata_idp);
259 
260 	if (!rc)
261 		return (FALSE);
262 
263 	if ((ata_idp->ai_config & ATAC_ATAPI_TYPE_MASK) != ATAC_ATAPI_TYPE)
264 		return (FALSE);
265 
266 	return (TRUE);
267 }
268 
269 
270 /*
271  *
272  * Check the device's register block for the ATAPI signature.
273  *
274  * Although the spec says the sector count, sector number and device/head
275  * registers are also part of the signature, for some unknown reason, this
276  * routine only checks the cyl hi and cyl low registers. I'm just
277  * guessing, but it might be because ATA and ATAPI devices return
278  * identical values in those registers and we actually rely on the
279  * IDENTIFY DEVICE and IDENTIFY PACKET DEVICE commands to recognize the
280  * device type.
281  *
282  */
283 
284 int
285 atapi_signature(
286 	ddi_acc_handle_t io_hdl,
287 	caddr_t ioaddr)
288 {
289 	int	rc = FALSE;
290 	ADBG_TRACE(("atapi_signature entered\n"));
291 
292 	if (ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_HCYL) == ATAPI_SIG_HI &&
293 	    ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_LCYL) != ATAPI_SIG_LO)
294 		rc = TRUE;
295 
296 	/*
297 	 * The following is a little bit of bullet proofing.
298 	 *
299 	 * When some drives are configured on a master-only bus they
300 	 * "shadow" their registers for the not-present slave drive.
301 	 * This is bogus and if you're not careful it may cause a
302 	 * master-only drive to be mistakenly recognized as both
303 	 * master and slave. By clearing the signature registers here
304 	 * I can make certain that when ata_drive_type() switches from
305 	 * the master to slave drive that I'll read back non-signature
306 	 * values regardless of whether the master-only drive does
307 	 * the "shadow" register trick. This prevents a bogus
308 	 * IDENTIFY PACKET DEVICE command from being issued which
309 	 * a really bogus master-only drive will return "shadow"
310 	 * data for.
311 	 */
312 	ddi_put8(io_hdl, (uchar_t *)ioaddr + AT_HCYL, 0);
313 	ddi_put8(io_hdl, (uchar_t *)ioaddr + AT_LCYL, 0);
314 
315 	return (rc);
316 }
317 
318 
319 /*
320  *
321  * SCSA tran_tgt_init entry point
322  *
323  */
324 
325 /* ARGSUSED */
326 static int
327 atapi_tran_tgt_init(
328 	dev_info_t	*hba_dip,
329 	dev_info_t	*tgt_dip,
330 	scsi_hba_tran_t *hba_tran,
331 	struct scsi_device *sd)
332 {
333 	gtgt_t	  *gtgtp;	/* GHD's per-target-instance structure */
334 	ata_ctl_t *ata_ctlp;
335 	ata_tgt_t *ata_tgtp;
336 	ata_drv_t *ata_drvp;
337 	struct scsi_address *ap;
338 	int	rc = DDI_SUCCESS;
339 
340 	ADBG_TRACE(("atapi_tran_tgt_init entered\n"));
341 
342 	/*
343 	 * Qualification of targ, lun, and ATAPI device presence
344 	 *  have already been taken care of by ata_bus_ctl
345 	 */
346 
347 	/* store pointer to drive struct in cloned tran struct */
348 
349 	ata_ctlp = TRAN2CTL(hba_tran);
350 	ap = &sd->sd_address;
351 
352 	ata_drvp = CTL2DRV(ata_ctlp, ap->a_target, ap->a_lun);
353 
354 	/*
355 	 * Create the "atapi" property so the target driver knows
356 	 * to use the correct set of SCSI commands
357 	 */
358 	if (!ata_prop_create(tgt_dip, ata_drvp, "atapi")) {
359 		return (DDI_FAILURE);
360 	}
361 
362 	gtgtp = ghd_target_init(hba_dip, tgt_dip, &ata_ctlp->ac_ccc,
363 	    sizeof (ata_tgt_t), ata_ctlp,
364 	    ap->a_target, ap->a_lun);
365 
366 	/* tran_tgt_private points to gtgt_t */
367 	hba_tran->tran_tgt_private = gtgtp;
368 
369 	/* gt_tgt_private points to ata_tgt_t */
370 	ata_tgtp = GTGTP2ATATGTP(gtgtp);
371 
372 	/* initialize the per-target-instance data */
373 	ata_tgtp->at_drvp = ata_drvp;
374 	ata_tgtp->at_dma_attr = ata_pciide_dma_attr;
375 	ata_tgtp->at_dma_attr.dma_attr_maxxfer =
376 	    ata_ctlp->ac_max_transfer << SCTRSHFT;
377 
378 	return (rc);
379 }
380 
381 
382 /*
383  *
384  * SCSA tran_tgt_probe entry point
385  *
386  */
387 
388 static int
389 atapi_tran_tgt_probe(struct scsi_device *sd, int (*callback)(void))
390 {
391 	ADBG_TRACE(("atapi_tran_tgt_probe entered\n"));
392 
393 	return (scsi_hba_probe(sd, callback));
394 }
395 
396 
397 /*
398  *
399  * SCSA tran_tgt_free entry point
400  *
401  */
402 
403 /* ARGSUSED */
404 static void
405 atapi_tran_tgt_free(
406 	dev_info_t	*hba_dip,
407 	dev_info_t	*tgt_dip,
408 	scsi_hba_tran_t	*hba_tran,
409 	struct scsi_device *sd)
410 {
411 	ADBG_TRACE(("atapi_tran_tgt_free entered\n"));
412 
413 	ghd_target_free(hba_dip, tgt_dip, &TRAN2ATAP(hba_tran)->ac_ccc,
414 	    TRAN2GTGTP(hba_tran));
415 	hba_tran->tran_tgt_private = NULL;
416 }
417 
418 
419 
420 /*
421  *
422  * SCSA tran_abort entry point
423  *
424  */
425 
426 /* ARGSUSED */
427 static int
428 atapi_tran_abort(
429 	struct scsi_address *ap,
430 	struct scsi_pkt *spktp)
431 {
432 	ADBG_TRACE(("atapi_tran_abort entered\n"));
433 
434 	if (spktp) {
435 		return (ghd_tran_abort(&ADDR2CTL(ap)->ac_ccc, PKTP2GCMDP(spktp),
436 		    ADDR2GTGTP(ap), NULL));
437 	}
438 
439 	return (ghd_tran_abort_lun(&ADDR2CTL(ap)->ac_ccc, ADDR2GTGTP(ap),
440 	    NULL));
441 }
442 
443 
444 /*
445  *
446  * SCSA tran_reset entry point
447  *
448  */
449 
450 /* ARGSUSED */
451 static int
452 atapi_tran_reset(
453 	struct scsi_address *ap,
454 	int level)
455 {
456 	ADBG_TRACE(("atapi_tran_reset entered\n"));
457 
458 	if (level == RESET_TARGET)
459 		return (ghd_tran_reset_target(&ADDR2CTL(ap)->ac_ccc,
460 		    ADDR2GTGTP(ap), NULL));
461 	if (level == RESET_ALL)
462 		return (ghd_tran_reset_bus(&ADDR2CTL(ap)->ac_ccc,
463 		    ADDR2GTGTP(ap), NULL));
464 	return (FALSE);
465 
466 }
467 
468 
469 /*
470  *
471  * SCSA tran_setcap entry point
472  *
473  */
474 
475 static int
476 atapi_tran_setcap(
477 	struct scsi_address *ap,
478 	char *capstr,
479 	int value,
480 	int whom)
481 {
482 	gtgt_t	  *gtgtp = ADDR2GTGTP(ap);
483 	ata_tgt_t *tgtp = GTGTP2ATATGTP(gtgtp);
484 
485 	ADBG_TRACE(("atapi_tran_setcap entered\n"));
486 
487 	switch (scsi_hba_lookup_capstr(capstr)) {
488 		case SCSI_CAP_SECTOR_SIZE:
489 			tgtp->at_dma_attr.dma_attr_granular = (uint_t)value;
490 			return (TRUE);
491 
492 		case SCSI_CAP_ARQ:
493 			if (whom) {
494 				tgtp->at_arq = value;
495 				return (TRUE);
496 			}
497 			break;
498 
499 		case SCSI_CAP_TOTAL_SECTORS:
500 			tgtp->at_total_sectors = value;
501 			return (TRUE);
502 	}
503 	return (FALSE);
504 }
505 
506 
507 /*
508  *
509  * SCSA tran_getcap entry point
510  *
511  */
512 
513 static int
514 atapi_tran_getcap(
515 	struct scsi_address *ap,
516 	char *capstr,
517 	int whom)
518 {
519 	struct ata_id	 ata_id;
520 	struct ata_id	*ata_idp;
521 	ata_ctl_t	*ata_ctlp;
522 	ata_drv_t	*ata_drvp;
523 	gtgt_t		*gtgtp;
524 	int		 rval = -1;
525 
526 	ADBG_TRACE(("atapi_tran_getcap entered\n"));
527 
528 	if (capstr == NULL || whom == 0)
529 		return (-1);
530 
531 	ata_ctlp = ADDR2CTL(ap);
532 
533 	switch (scsi_hba_lookup_capstr(capstr)) {
534 	case SCSI_CAP_ARQ:
535 		rval = TRUE;
536 		break;
537 
538 	case SCSI_CAP_INITIATOR_ID:
539 		rval = 7;
540 		break;
541 
542 	case SCSI_CAP_DMA_MAX:
543 		/* XXX - what should the real limit be?? */
544 		/* limit to 64K ??? */
545 		rval = 4096 * (ATA_DMA_NSEGS - 1);
546 		break;
547 
548 	case SCSI_CAP_GEOMETRY:
549 		/* Default geometry */
550 		if (atapi_use_static_geometry) {
551 			rval = ATAPI_HEADS << 16 | ATAPI_SECTORS_PER_TRK;
552 			break;
553 		}
554 
555 		/* this code is currently not used */
556 
557 		ata_drvp = CTL2DRV(ata_ctlp, ap->a_target, ap->a_lun);
558 		gtgtp = ADDR2GTGTP(ap);
559 
560 		/*
561 		 * retrieve the current IDENTIFY PACKET DEVICE info
562 		 */
563 		if (!ata_queue_cmd(atapi_id_update, &ata_id, ata_ctlp,
564 		    ata_drvp, gtgtp)) {
565 			ADBG_TRACE(("atapi_tran_getcap geometry failed"));
566 			return (0);
567 		}
568 
569 		/*
570 		 * save the new response data
571 		 */
572 		ata_idp = &ata_drvp->ad_id;
573 		*ata_idp = ata_id;
574 
575 		switch ((ata_idp->ai_config >> 8) & 0xf) {
576 		case DTYPE_RODIRECT:
577 			rval = ATAPI_HEADS << 16 | ATAPI_SECTORS_PER_TRK;
578 			break;
579 		case DTYPE_DIRECT:
580 		case DTYPE_OPTICAL:
581 			rval = (ata_idp->ai_curheads << 16) |
582 			    ata_idp->ai_cursectrk;
583 			break;
584 		default:
585 			rval = 0;
586 		}
587 		break;
588 	}
589 
590 	return (rval);
591 }
592 
593 
594 
595 /*
596  *
597  * SCSA tran_init_pkt entry point
598  *
599  */
600 
601 static struct scsi_pkt *
602 atapi_tran_init_pkt(
603 	struct scsi_address *ap,
604 	struct scsi_pkt	*spktp,
605 	struct buf	*bp,
606 	int		 cmdlen,
607 	int		 statuslen,
608 	int		 tgtlen,
609 	int		 flags,
610 	int		(*callback)(caddr_t),
611 	caddr_t		 arg)
612 {
613 	gtgt_t		*gtgtp = ADDR2GTGTP(ap);
614 	ata_tgt_t	*ata_tgtp = GTGTP2ATATGTP(gtgtp);
615 	ata_ctl_t	*ata_ctlp = ADDR2CTL(ap);
616 	ata_pkt_t	*ata_pktp;
617 	struct scsi_pkt	*new_spktp;
618 	ddi_dma_attr_t	*sg_attrp;
619 	int		 bytes;
620 
621 	ADBG_TRACE(("atapi_tran_init_pkt entered\n"));
622 
623 
624 	/*
625 	 * Determine whether to do PCI-IDE DMA setup, start out by
626 	 * assuming we're not.
627 	 */
628 	sg_attrp = NULL;
629 
630 	if (bp == NULL) {
631 		/* no data to transfer */
632 		goto skip_dma_setup;
633 	}
634 
635 	if (bp->b_bcount == 0) {
636 		/* no data to transfer */
637 		goto skip_dma_setup;
638 	}
639 
640 	if ((GTGTP2ATADRVP(ADDR2GTGTP(ap))->ad_pciide_dma == ATA_DMA_OFF)) {
641 		goto skip_dma_setup;
642 	}
643 
644 	if (ata_dma_disabled)
645 		goto skip_dma_setup;
646 
647 
648 	/*
649 	 * The PCI-IDE DMA engine is brain-damaged and can't
650 	 * DMA non-aligned buffers.
651 	 */
652 	if (((bp->b_flags & B_PAGEIO) == 0) &&
653 	    ((uintptr_t)bp->b_un.b_addr) & PCIIDE_PRDE_ADDR_MASK) {
654 		/*
655 		 * if the virtual address isn't aligned, then the
656 		 * physical address also isn't aligned.
657 		 */
658 		goto skip_dma_setup;
659 	}
660 
661 	/*
662 	 * It also insists that the byte count must be even.
663 	 */
664 	if (bp->b_bcount & 1) {
665 		/* something odd here */
666 		goto skip_dma_setup;
667 	}
668 
669 	/*
670 	 * Huzza! We're really going to do it
671 	 */
672 	sg_attrp = &ata_tgtp->at_dma_attr;
673 
674 
675 skip_dma_setup:
676 
677 	/*
678 	 * Call GHD packet init function
679 	 */
680 
681 	new_spktp = ghd_tran_init_pkt_attr(&ata_ctlp->ac_ccc, ap, spktp, bp,
682 	    cmdlen, statuslen, tgtlen, flags,
683 	    callback, arg, sizeof (ata_pkt_t), sg_attrp);
684 
685 	if (new_spktp == NULL)
686 		return (NULL);
687 
688 	ata_pktp = SPKT2APKT(new_spktp);
689 	ata_pktp->ap_cdbp = new_spktp->pkt_cdbp;
690 	if (statuslen > 255) {
691 		statuslen = sizeof (struct scsi_arq_status);
692 	}
693 	ata_pktp->ap_statuslen = (uchar_t)statuslen;
694 
695 	/* reset data direction flags */
696 	if (spktp)
697 		ata_pktp->ap_flags &= ~(AP_READ | AP_WRITE);
698 
699 	/*
700 	 * check for ARQ mode
701 	 */
702 	if (atapi_arq_enable == TRUE &&
703 	    ata_tgtp->at_arq == TRUE &&
704 	    ata_pktp->ap_statuslen >= sizeof (struct scsi_arq_status)) {
705 		ADBG_TRACE(("atapi_tran_init_pkt ARQ\n"));
706 		ata_pktp->ap_scbp =
707 		    (struct scsi_arq_status *)new_spktp->pkt_scbp;
708 		ata_pktp->ap_flags |= AP_ARQ_ON_ERROR;
709 	}
710 
711 	/*
712 	 * fill these with zeros for ATA/ATAPI-4 compatibility
713 	 */
714 	ata_pktp->ap_sec = 0;
715 	ata_pktp->ap_count = 0;
716 
717 	if (ata_pktp->ap_sg_cnt) {
718 		ASSERT(bp != NULL);
719 		/* determine direction to program the DMA engine later */
720 		if (bp->b_flags & B_READ) {
721 			ata_pktp->ap_flags |= AP_READ;
722 		} else {
723 			ata_pktp->ap_flags |= AP_WRITE;
724 		}
725 		ata_pktp->ap_pciide_dma = TRUE;
726 		ata_pktp->ap_hicyl = 0;
727 		ata_pktp->ap_lwcyl = 0;
728 		return (new_spktp);
729 	}
730 
731 	/*
732 	 * Since we're not using DMA, we need to map the buffer into
733 	 * kernel address space
734 	 */
735 
736 	ata_pktp->ap_pciide_dma = FALSE;
737 	if (bp && bp->b_bcount) {
738 		/*
739 		 * If this is a fresh request map the buffer and
740 		 * reset the ap_baddr pointer and the current offset
741 		 * and byte count.
742 		 *
743 		 * The ap_boffset is used to set the ap_v_addr ptr at
744 		 * the start of each I/O request.
745 		 *
746 		 * The ap_bcount is used to update ap_boffset when the
747 		 * target driver requests the next segment.
748 		 *
749 		 */
750 		if (cmdlen) {
751 			bp_mapin(bp);
752 			ata_pktp->ap_baddr = bp->b_un.b_addr;
753 			ata_pktp->ap_bcount = 0;
754 			ata_pktp->ap_boffset = 0;
755 		}
756 		ASSERT(ata_pktp->ap_baddr != NULL);
757 
758 		/* determine direction for the PIO FSM */
759 		if (bp->b_flags & B_READ) {
760 			ata_pktp->ap_flags |= AP_READ;
761 		} else {
762 			ata_pktp->ap_flags |= AP_WRITE;
763 		}
764 
765 		/*
766 		 * If the drive has the Single Sector bug, limit
767 		 * the transfer to a single sector. This assumes
768 		 * ATAPI CD drives always use 2k sectors.
769 		 */
770 		if (GTGTP2ATADRVP(ADDR2GTGTP(ap))->ad_flags & AD_1SECTOR) {
771 			size_t resid;
772 			size_t tmp;
773 
774 			/* adjust offset based on prior request */
775 			ata_pktp->ap_boffset += ata_pktp->ap_bcount;
776 
777 			/* compute number of bytes left to transfer */
778 			resid = bp->b_bcount - ata_pktp->ap_boffset;
779 
780 			/* limit the transfer to 2k */
781 			tmp = MIN(2048, resid);
782 			ata_pktp->ap_bcount = tmp;
783 
784 			/* tell target driver how much is left for next time */
785 			new_spktp->pkt_resid = resid - tmp;
786 		} else {
787 			/* do the whole request in one swell foop */
788 			ata_pktp->ap_bcount = bp->b_bcount;
789 			new_spktp->pkt_resid = 0;
790 		}
791 
792 	} else {
793 		ata_pktp->ap_baddr = NULL;
794 		ata_pktp->ap_bcount = 0;
795 		ata_pktp->ap_boffset = 0;
796 	}
797 
798 	/*
799 	 * determine the size of each partial data transfer
800 	 * to/from the drive
801 	 */
802 	bytes = min(ata_pktp->ap_bcount, ATAPI_MAX_BYTES_PER_DRQ);
803 	ata_pktp->ap_hicyl = (uchar_t)(bytes >> 8);
804 	ata_pktp->ap_lwcyl = (uchar_t)bytes;
805 	return (new_spktp);
806 }
807 
808 
809 /*
810  * GHD ccballoc callback
811  *
812  *	Initializing the ata_pkt, and return the ptr to the gcmd_t to GHD.
813  *
814  */
815 
816 /* ARGSUSED */
817 int
818 atapi_ccballoc(
819 	gtgt_t	*gtgtp,
820 	gcmd_t	*gcmdp,
821 	int	 cmdlen,
822 	int	 statuslen,
823 	int	 tgtlen,
824 	int	 ccblen)
825 
826 {
827 	ata_drv_t *ata_drvp = GTGTP2ATADRVP(gtgtp);
828 	ata_pkt_t *ata_pktp = GCMD2APKT(gcmdp);
829 
830 	ADBG_TRACE(("atapi_ccballoc entered\n"));
831 
832 	/* set the back ptr from the ata_pkt to the gcmd_t */
833 	ata_pktp->ap_gcmdp = gcmdp;
834 
835 	/* check length of SCSI CDB is not larger than drive expects */
836 
837 	if (cmdlen > ata_drvp->ad_cdb_len) {
838 		ADBG_WARN(("atapi_ccballoc: SCSI CDB too large!\n"));
839 		return (FALSE);
840 	}
841 
842 	/*
843 	 * save length of the SCSI CDB, and calculate CDB padding
844 	 * note that for convenience, padding is expressed in shorts.
845 	 */
846 
847 	ata_pktp->ap_cdb_len = (uchar_t)cmdlen;
848 	ata_pktp->ap_cdb_pad =
849 		((unsigned)(ata_drvp->ad_cdb_len - cmdlen)) >> 1;
850 
851 	/* set up callback functions */
852 
853 	ata_pktp->ap_start = atapi_fsm_start;
854 	ata_pktp->ap_intr = atapi_fsm_intr;
855 	ata_pktp->ap_complete = atapi_complete;
856 
857 	/* set-up for start */
858 
859 	ata_pktp->ap_flags = AP_ATAPI;
860 	ata_pktp->ap_hd = ata_drvp->ad_drive_bits;
861 	ata_pktp->ap_cmd = ATC_PACKET;
862 
863 	return (TRUE);
864 }
865 
866 
867 
868 /*
869  *
870  * SCSA tran_destroy_pkt entry point
871  *
872  */
873 
874 static void
875 atapi_tran_destroy_pkt(
876 	struct scsi_address *ap,
877 	struct scsi_pkt *spktp)
878 {
879 	gcmd_t	  *gcmdp = PKTP2GCMDP(spktp);
880 
881 	ADBG_TRACE(("atapi_tran_destroy_pkt entered\n"));
882 
883 	if (gcmdp->cmd_dma_handle != NULL) {
884 		ghd_dmafree_attr(gcmdp);
885 	}
886 
887 	ghd_pktfree(&ADDR2CTL(ap)->ac_ccc, ap, spktp);
888 }
889 
890 
891 
892 /*
893  *
894  * GHD ccbfree callback function
895  *
896  */
897 
898 /* ARGSUSED */
899 void
900 atapi_ccbfree(
901 	gcmd_t *gcmdp)
902 {
903 	ADBG_TRACE(("atapi_ccbfree entered\n"));
904 
905 	/* nothing to do */
906 }
907 
908 
909 /*
910  *
911  * SCSA tran_dmafree entry point
912  *
913  */
914 
915 /*ARGSUSED*/
916 static void
917 atapi_tran_dmafree(
918 	struct scsi_address *ap,
919 	struct scsi_pkt *spktp)
920 {
921 	gcmd_t	  *gcmdp = PKTP2GCMDP(spktp);
922 
923 	ADBG_TRACE(("atapi_tran_dmafree entered\n"));
924 
925 	if (gcmdp->cmd_dma_handle != NULL) {
926 		ghd_dmafree_attr(gcmdp);
927 	}
928 }
929 
930 
931 
932 /*
933  *
934  * SCSA tran_sync_pkt entry point
935  *
936  */
937 
938 /*ARGSUSED*/
939 static void
940 atapi_tran_sync_pkt(
941 	struct scsi_address *ap,
942 	struct scsi_pkt *spktp)
943 {
944 
945 	ADBG_TRACE(("atapi_tran_sync_pkt entered\n"));
946 
947 	if (PKTP2GCMDP(spktp)->cmd_dma_handle != NULL) {
948 		ghd_tran_sync_pkt(ap, spktp);
949 	}
950 }
951 
952 
953 
954 /*
955  *
956  * SCSA tran_start entry point
957  *
958  */
959 
960 /* ARGSUSED */
961 static int
962 atapi_tran_start(
963 	struct scsi_address *ap,
964 	struct scsi_pkt *spktp)
965 {
966 	ata_pkt_t *ata_pktp = SPKT2APKT(spktp);
967 	ata_drv_t *ata_drvp = APKT2DRV(ata_pktp);
968 	ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
969 	gcmd_t	  *gcmdp = APKT2GCMD(ata_pktp);
970 	int	   polled = FALSE;
971 	int	   rc;
972 
973 	ADBG_TRACE(("atapi_tran_start entered\n"));
974 
975 	/*
976 	 * Basic initialization performed each and every time a
977 	 * scsi_pkt is submitted. A single scsi_pkt may be submitted
978 	 * multiple times so this routine has to be idempotent. One
979 	 * time initializations don't belong here.
980 	 */
981 
982 	/*
983 	 * The ap_v_addr pointer is incremented by the PIO data
984 	 * transfer routine as each word is transferred. Therefore, need
985 	 * to reset ap_v_addr here (rather than atapi_tran_init_pkt())
986 	 * in case the target resubmits the same pkt multiple times
987 	 * (which is permitted by SCSA).
988 	 */
989 	ata_pktp->ap_v_addr = ata_pktp->ap_baddr + ata_pktp->ap_boffset;
990 
991 	/* ap_resid is decremented as the data transfer progresses */
992 	ata_pktp->ap_resid = ata_pktp->ap_bcount;
993 
994 	/* clear error flags */
995 	ata_pktp->ap_flags &= (AP_ATAPI | AP_READ | AP_WRITE | AP_ARQ_ON_ERROR);
996 	spktp->pkt_reason = 0;
997 	spktp->pkt_state = 0;
998 	spktp->pkt_statistics = 0;
999 
1000 	/*
1001 	 * check for polling pkt
1002 	 */
1003 	if (spktp->pkt_flags & FLAG_NOINTR) {
1004 		polled = TRUE;
1005 	}
1006 
1007 #ifdef ___just_ignore_unsupported_flags___
1008 	/* driver cannot accept tagged commands */
1009 
1010 	if (spktp->pkt_flags & (FLAG_HTAG|FLAG_OTAG|FLAG_STAG)) {
1011 		spktp->pkt_reason = CMD_TRAN_ERR;
1012 		return (TRAN_BADPKT);
1013 	}
1014 #endif
1015 
1016 	/* call common transport routine */
1017 
1018 	rc = ghd_transport(&ata_ctlp->ac_ccc, gcmdp, gcmdp->cmd_gtgtp,
1019 	    spktp->pkt_time, polled, NULL);
1020 
1021 	/* see if pkt was not accepted */
1022 
1023 	if (rc != TRAN_ACCEPT)
1024 		return (rc);
1025 
1026 	return (rc);
1027 }
1028 
1029 
1030 /*
1031  *
1032  * GHD packet complete callback
1033  *
1034  */
1035 /* ARGSUSED */
1036 static void
1037 atapi_complete(
1038 	ata_drv_t *ata_drvp,
1039 	ata_pkt_t *ata_pktp,
1040 	int do_callback)
1041 {
1042 	struct scsi_pkt *spktp = APKT2SPKT(ata_pktp);
1043 	struct scsi_status *scsi_stat = (struct scsi_status *)spktp->pkt_scbp;
1044 
1045 	ADBG_TRACE(("atapi_complete entered\n"));
1046 	ADBG_TRANSPORT(("atapi_complete: pkt = 0x%p\n", ata_pktp));
1047 
1048 	/* update resid */
1049 
1050 	spktp->pkt_resid = ata_pktp->ap_resid;
1051 
1052 	if (ata_pktp->ap_flags & AP_SENT_CMD) {
1053 		spktp->pkt_state |=
1054 		    STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
1055 	}
1056 	if (ata_pktp->ap_flags & AP_XFERRED_DATA) {
1057 		spktp->pkt_state |= STATE_XFERRED_DATA;
1058 	}
1059 
1060 	if (ata_pktp->ap_flags & AP_GOT_STATUS) {
1061 		spktp->pkt_state |= STATE_GOT_STATUS;
1062 	}
1063 
1064 	/* check for fatal errors */
1065 
1066 	if (ata_pktp->ap_flags & AP_TRAN_ERROR) {
1067 		spktp->pkt_reason = CMD_TRAN_ERR;
1068 	} else if (ata_pktp->ap_flags & AP_BUS_RESET) {
1069 		spktp->pkt_reason = CMD_RESET;
1070 		spktp->pkt_statistics |= STAT_BUS_RESET;
1071 	} else if (ata_pktp->ap_flags & AP_DEV_RESET) {
1072 		spktp->pkt_reason = CMD_RESET;
1073 		spktp->pkt_statistics |= STAT_DEV_RESET;
1074 	} else if (ata_pktp->ap_flags & AP_ABORT) {
1075 		spktp->pkt_reason = CMD_ABORTED;
1076 		spktp->pkt_statistics |= STAT_ABORTED;
1077 	} else if (ata_pktp->ap_flags & AP_TIMEOUT) {
1078 		spktp->pkt_reason = CMD_TIMEOUT;
1079 		spktp->pkt_statistics |= STAT_TIMEOUT;
1080 	} else {
1081 		spktp->pkt_reason = CMD_CMPLT;
1082 	}
1083 
1084 	/* non-fatal errors */
1085 
1086 	if (ata_pktp->ap_flags & AP_ERROR)
1087 		scsi_stat->sts_chk = 1;
1088 	else
1089 		scsi_stat->sts_chk = 0;
1090 
1091 	if (ata_pktp->ap_flags & AP_ARQ_ERROR) {
1092 		ADBG_ARQ(("atapi_complete ARQ error 0x%p\n", ata_pktp));
1093 		spktp->pkt_reason = CMD_TRAN_ERR;
1094 
1095 	} else if (ata_pktp->ap_flags & AP_ARQ_OKAY) {
1096 		static struct scsi_status zero_scsi_status = { 0 };
1097 		struct scsi_arq_status *arqp;
1098 
1099 		ADBG_ARQ(("atapi_complete ARQ okay 0x%p\n", ata_pktp));
1100 		spktp->pkt_state |= STATE_ARQ_DONE;
1101 		arqp = ata_pktp->ap_scbp;
1102 		arqp->sts_rqpkt_reason = CMD_CMPLT;
1103 		arqp->sts_rqpkt_state = STATE_XFERRED_DATA;
1104 		arqp->sts_rqpkt_status = zero_scsi_status;
1105 		arqp->sts_rqpkt_resid = 0;
1106 		arqp->sts_rqpkt_statistics = 0;
1107 
1108 	}
1109 
1110 	ADBG_TRANSPORT(("atapi_complete: reason = 0x%x stats = 0x%x "
1111 	    "sts_chk = %d\n", spktp->pkt_reason, spktp->pkt_statistics,
1112 	    scsi_stat->sts_chk));
1113 
1114 	if (do_callback && (spktp->pkt_comp))
1115 		(*spktp->pkt_comp)(spktp);
1116 }
1117 
1118 
1119 
1120 /*
1121  * Update the IDENTIFY PACKET DEVICE info
1122  */
1123 
1124 static int
1125 atapi_id_update(
1126 	ata_ctl_t	*ata_ctlp,
1127 	ata_drv_t	*ata_drvp,
1128 	ata_pkt_t	*ata_pktp)
1129 {
1130 	ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
1131 	caddr_t		 ioaddr1 = ata_ctlp->ac_ioaddr1;
1132 	ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
1133 	caddr_t		 ioaddr2 = ata_ctlp->ac_ioaddr2;
1134 	struct ata_id	*aidp;
1135 	int	rc;
1136 
1137 	/*
1138 	 * select the appropriate drive and LUN
1139 	 */
1140 	ddi_put8(io_hdl1, (uchar_t *)ioaddr1 + AT_DRVHD,
1141 	    ata_drvp->ad_drive_bits);
1142 	ata_nsecwait(400);
1143 
1144 	/*
1145 	 * make certain the drive is selected, and wait for not busy
1146 	 */
1147 	if (!ata_wait(io_hdl2, ioaddr2, ATS_DRDY, ATS_BSY, 5 * 1000000)) {
1148 		ADBG_ERROR(("atapi_id_update: select failed\n"));
1149 		if (ata_pktp != NULL)
1150 			ata_pktp->ap_flags |= AP_ERROR;
1151 		return (ATA_FSM_RC_FINI);
1152 	}
1153 
1154 	if (ata_pktp != NULL)
1155 		aidp = (struct ata_id *)ata_pktp->ap_v_addr;
1156 	else
1157 		aidp = &ata_drvp->ad_id;
1158 
1159 	rc = atapi_id(ata_ctlp->ac_iohandle1, ata_ctlp->ac_ioaddr1,
1160 	    ata_ctlp->ac_iohandle2, ata_ctlp->ac_ioaddr2, aidp);
1161 	if (rc) {
1162 		swab(aidp->ai_drvser, aidp->ai_drvser,
1163 		    sizeof (aidp->ai_drvser));
1164 		swab(aidp->ai_fw, aidp->ai_fw,
1165 		    sizeof (aidp->ai_fw));
1166 		swab(aidp->ai_model, aidp->ai_model,
1167 		    sizeof (aidp->ai_model));
1168 	}
1169 
1170 	if (ata_pktp == NULL)
1171 		return (ATA_FSM_RC_FINI);
1172 
1173 	if (!rc) {
1174 		ata_pktp->ap_flags |= AP_ERROR;
1175 	} else {
1176 		ata_pktp->ap_flags |= AP_XFERRED_DATA;
1177 	}
1178 	return (ATA_FSM_RC_FINI);
1179 }
1180 
1181 
1182 
1183 /*
1184  * Both drives on the controller share a common pkt to do
1185  * ARQ processing. Therefore the pkt is only partially
1186  * initialized here. The rest of initialization occurs
1187  * just before starting the ARQ pkt when an error is
1188  * detected.
1189  */
1190 
1191 void
1192 atapi_init_arq(
1193 	ata_ctl_t *ata_ctlp)
1194 {
1195 	ata_pkt_t *arq_pktp = ata_ctlp->ac_arq_pktp;
1196 
1197 	arq_pktp->ap_cdbp = ata_ctlp->ac_arq_cdb;
1198 	arq_pktp->ap_cdb_len = sizeof (ata_ctlp->ac_arq_cdb);
1199 	arq_pktp->ap_start = atapi_fsm_start;
1200 	arq_pktp->ap_intr = atapi_fsm_intr;
1201 	arq_pktp->ap_complete = atapi_complete;
1202 	arq_pktp->ap_flags = AP_ATAPI;
1203 	arq_pktp->ap_cmd = ATC_PACKET;
1204 
1205 	ata_ctlp->ac_arq_cdb[0] = SCMD_REQUEST_SENSE;
1206 }
1207 
1208 void
1209 atapi_reset_dma_mode(ata_drv_t *ata_drvp, int need_wait)
1210 {
1211 	ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
1212 
1213 	/*
1214 	 * Some very old CD-ROM need to wait 500mS to
1215 	 * reset the DMA mode, so after reset the DMA
1216 	 * mode when resuming, check whether it was
1217 	 * enabled on the device, if not, delay 500mS
1218 	 * and reset it again.  Then for normal DVD/CD-ROM,
1219 	 * no delay will be on resume.
1220 	 */
1221 	if (need_wait == TRUE)
1222 		drv_usecwait(5 * 100000);
1223 	ata_reset_dma_mode(ata_drvp);
1224 	(void) atapi_id_update(ata_ctlp, ata_drvp, NULL);
1225 }
1226