xref: /freebsd/sys/dev/ata/ata-all.c (revision bb15ca603fa442c72dde3f3cb8b46db6970e3950)
1 /*-
2  * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ata.h"
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/ata.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/endian.h>
37 #include <sys/ctype.h>
38 #include <sys/conf.h>
39 #include <sys/bus.h>
40 #include <sys/bio.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
43 #include <sys/sema.h>
44 #include <sys/taskqueue.h>
45 #include <vm/uma.h>
46 #include <machine/stdarg.h>
47 #include <machine/resource.h>
48 #include <machine/bus.h>
49 #include <sys/rman.h>
50 #include <dev/ata/ata-all.h>
51 #include <dev/pci/pcivar.h>
52 #include <ata_if.h>
53 
54 #ifdef ATA_CAM
55 #include <cam/cam.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_debug.h>
60 #endif
61 
62 #ifndef ATA_CAM
63 /* device structure */
64 static  d_ioctl_t       ata_ioctl;
65 static struct cdevsw ata_cdevsw = {
66 	.d_version =    D_VERSION,
67 	.d_flags =      D_NEEDGIANT, /* we need this as newbus isn't mpsafe */
68 	.d_ioctl =      ata_ioctl,
69 	.d_name =       "ata",
70 };
71 #endif
72 
73 /* prototypes */
74 #ifndef ATA_CAM
75 static void ata_boot_attach(void);
76 static device_t ata_add_child(device_t, struct ata_device *, int);
77 #else
78 static void ataaction(struct cam_sim *sim, union ccb *ccb);
79 static void atapoll(struct cam_sim *sim);
80 #endif
81 static void ata_conn_event(void *, int);
82 static void bswap(int8_t *, int);
83 static void btrim(int8_t *, int);
84 static void bpack(int8_t *, int8_t *, int);
85 static void ata_interrupt_locked(void *data);
86 #ifdef ATA_CAM
87 static void ata_periodic_poll(void *data);
88 #endif
89 
90 /* global vars */
91 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
92 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
93 struct intr_config_hook *ata_delayed_attach = NULL;
94 devclass_t ata_devclass;
95 uma_zone_t ata_request_zone;
96 uma_zone_t ata_composite_zone;
97 int ata_wc = 1;
98 int ata_setmax = 0;
99 int ata_dma_check_80pin = 1;
100 
101 /* local vars */
102 static int ata_dma = 1;
103 static int atapi_dma = 1;
104 
105 /* sysctl vars */
106 static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
107 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
108 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0,
109 	   "ATA disk DMA mode control");
110 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
111 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
112 	   CTLFLAG_RW, &ata_dma_check_80pin, 1,
113 	   "Check for 80pin cable before setting ATA DMA mode");
114 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
115 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0,
116 	   "ATAPI device DMA mode control");
117 TUNABLE_INT("hw.ata.wc", &ata_wc);
118 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0,
119 	   "ATA disk write caching");
120 TUNABLE_INT("hw.ata.setmax", &ata_setmax);
121 SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0,
122 	   "ATA disk set max native address");
123 #ifdef ATA_CAM
124 FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver");
125 #endif
126 
127 /*
128  * newbus device interface related functions
129  */
130 int
131 ata_probe(device_t dev)
132 {
133     return 0;
134 }
135 
136 int
137 ata_attach(device_t dev)
138 {
139     struct ata_channel *ch = device_get_softc(dev);
140     int error, rid;
141 #ifdef ATA_CAM
142     struct cam_devq *devq;
143     const char *res;
144     char buf[64];
145     int i, mode;
146 #endif
147 
148     /* check that we have a virgin channel to attach */
149     if (ch->r_irq)
150 	return EEXIST;
151 
152     /* initialize the softc basics */
153     ch->dev = dev;
154     ch->state = ATA_IDLE;
155     bzero(&ch->state_mtx, sizeof(struct mtx));
156     mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
157     bzero(&ch->queue_mtx, sizeof(struct mtx));
158     mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF);
159     TAILQ_INIT(&ch->ata_queue);
160     TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
161 #ifdef ATA_CAM
162 	for (i = 0; i < 16; i++) {
163 		ch->user[i].mode = 0;
164 		snprintf(buf, sizeof(buf), "dev%d.mode", i);
165 		if (resource_string_value(device_get_name(dev),
166 		    device_get_unit(dev), buf, &res) == 0)
167 			mode = ata_str2mode(res);
168 		else if (resource_string_value(device_get_name(dev),
169 		    device_get_unit(dev), "mode", &res) == 0)
170 			mode = ata_str2mode(res);
171 		else
172 			mode = -1;
173 		if (mode >= 0)
174 			ch->user[i].mode = mode;
175 		if (ch->flags & ATA_SATA)
176 			ch->user[i].bytecount = 8192;
177 		else
178 			ch->user[i].bytecount = MAXPHYS;
179 		ch->user[i].caps = 0;
180 		ch->curr[i] = ch->user[i];
181 		if (ch->pm_level > 0)
182 			ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ;
183 		if (ch->pm_level > 1)
184 			ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ;
185 	}
186 	callout_init(&ch->poll_callout, 1);
187 #endif
188 
189     /* reset the controller HW, the channel and device(s) */
190     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
191 	pause("ataatch", 1);
192 #ifndef ATA_CAM
193     ATA_RESET(dev);
194 #endif
195     ATA_LOCKING(dev, ATA_LF_UNLOCK);
196 
197     /* allocate DMA resources if DMA HW present*/
198     if (ch->dma.alloc)
199 	ch->dma.alloc(dev);
200 
201     /* setup interrupt delivery */
202     rid = ATA_IRQ_RID;
203     ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
204 				       RF_SHAREABLE | RF_ACTIVE);
205     if (!ch->r_irq) {
206 	device_printf(dev, "unable to allocate interrupt\n");
207 	return ENXIO;
208     }
209     if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
210 				ata_interrupt, ch, &ch->ih))) {
211 	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
212 	device_printf(dev, "unable to setup interrupt\n");
213 	return error;
214     }
215 
216 #ifndef ATA_CAM
217     /* probe and attach devices on this channel unless we are in early boot */
218     if (!ata_delayed_attach)
219 	ata_identify(dev);
220     return (0);
221 #else
222 	if (ch->flags & ATA_PERIODIC_POLL)
223 		callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
224 	mtx_lock(&ch->state_mtx);
225 	/* Create the device queue for our SIM. */
226 	devq = cam_simq_alloc(1);
227 	if (devq == NULL) {
228 		device_printf(dev, "Unable to allocate simq\n");
229 		error = ENOMEM;
230 		goto err1;
231 	}
232 	/* Construct SIM entry */
233 	ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
234 	    device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
235 	if (ch->sim == NULL) {
236 		device_printf(dev, "unable to allocate sim\n");
237 		cam_simq_free(devq);
238 		error = ENOMEM;
239 		goto err1;
240 	}
241 	if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
242 		device_printf(dev, "unable to register xpt bus\n");
243 		error = ENXIO;
244 		goto err2;
245 	}
246 	if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
247 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
248 		device_printf(dev, "unable to create path\n");
249 		error = ENXIO;
250 		goto err3;
251 	}
252 	mtx_unlock(&ch->state_mtx);
253 	return (0);
254 
255 err3:
256 	xpt_bus_deregister(cam_sim_path(ch->sim));
257 err2:
258 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
259 	ch->sim = NULL;
260 err1:
261 	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
262 	mtx_unlock(&ch->state_mtx);
263 	if (ch->flags & ATA_PERIODIC_POLL)
264 		callout_drain(&ch->poll_callout);
265 	return (error);
266 #endif
267 }
268 
269 int
270 ata_detach(device_t dev)
271 {
272     struct ata_channel *ch = device_get_softc(dev);
273 #ifndef ATA_CAM
274     device_t *children;
275     int nchildren, i;
276 #endif
277 
278     /* check that we have a valid channel to detach */
279     if (!ch->r_irq)
280 	return ENXIO;
281 
282     /* grap the channel lock so no new requests gets launched */
283     mtx_lock(&ch->state_mtx);
284     ch->state |= ATA_STALL_QUEUE;
285     mtx_unlock(&ch->state_mtx);
286 #ifdef ATA_CAM
287     if (ch->flags & ATA_PERIODIC_POLL)
288 	callout_drain(&ch->poll_callout);
289 #endif
290 
291 #ifndef ATA_CAM
292     /* detach & delete all children */
293     if (!device_get_children(dev, &children, &nchildren)) {
294 	for (i = 0; i < nchildren; i++)
295 	    if (children[i])
296 		device_delete_child(dev, children[i]);
297 	free(children, M_TEMP);
298     }
299 #endif
300     taskqueue_drain(taskqueue_thread, &ch->conntask);
301 
302 #ifdef ATA_CAM
303 	mtx_lock(&ch->state_mtx);
304 	xpt_async(AC_LOST_DEVICE, ch->path, NULL);
305 	xpt_free_path(ch->path);
306 	xpt_bus_deregister(cam_sim_path(ch->sim));
307 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
308 	ch->sim = NULL;
309 	mtx_unlock(&ch->state_mtx);
310 #endif
311 
312     /* release resources */
313     bus_teardown_intr(dev, ch->r_irq, ch->ih);
314     bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
315     ch->r_irq = NULL;
316 
317     /* free DMA resources if DMA HW present*/
318     if (ch->dma.free)
319 	ch->dma.free(dev);
320 
321     mtx_destroy(&ch->state_mtx);
322     mtx_destroy(&ch->queue_mtx);
323     return 0;
324 }
325 
326 static void
327 ata_conn_event(void *context, int dummy)
328 {
329 	device_t dev = (device_t)context;
330 #ifdef ATA_CAM
331 	struct ata_channel *ch = device_get_softc(dev);
332 	union ccb *ccb;
333 
334 	mtx_lock(&ch->state_mtx);
335 	if (ch->sim == NULL) {
336 		mtx_unlock(&ch->state_mtx);
337 		return;
338 	}
339 	ata_reinit(dev);
340 	if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
341 		return;
342 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
343 	    cam_sim_path(ch->sim),
344 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
345 		xpt_free_ccb(ccb);
346 		return;
347 	}
348 	xpt_rescan(ccb);
349 	mtx_unlock(&ch->state_mtx);
350 #else
351 	ata_reinit(dev);
352 #endif
353 }
354 
355 int
356 ata_reinit(device_t dev)
357 {
358     struct ata_channel *ch = device_get_softc(dev);
359     struct ata_request *request;
360 #ifndef ATA_CAM
361     device_t *children;
362     int nchildren, i;
363 
364     /* check that we have a valid channel to reinit */
365     if (!ch || !ch->r_irq)
366 	return ENXIO;
367 
368     if (bootverbose)
369 	device_printf(dev, "reiniting channel ..\n");
370 
371     /* poll for locking the channel */
372     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
373 	pause("atarini", 1);
374 
375     /* catch eventual request in ch->running */
376     mtx_lock(&ch->state_mtx);
377     if (ch->state & ATA_STALL_QUEUE) {
378 	/* Recursive reinits and reinits during detach prohobited. */
379 	mtx_unlock(&ch->state_mtx);
380 	return (ENXIO);
381     }
382     if ((request = ch->running))
383 	callout_stop(&request->callout);
384     ch->running = NULL;
385 
386     /* unconditionally grap the channel lock */
387     ch->state |= ATA_STALL_QUEUE;
388     mtx_unlock(&ch->state_mtx);
389 
390     /* reset the controller HW, the channel and device(s) */
391     ATA_RESET(dev);
392 
393     /* reinit the children and delete any that fails */
394     if (!device_get_children(dev, &children, &nchildren)) {
395 	mtx_lock(&Giant);       /* newbus suckage it needs Giant */
396 	for (i = 0; i < nchildren; i++) {
397 	    /* did any children go missing ? */
398 	    if (children[i] && device_is_attached(children[i]) &&
399 		ATA_REINIT(children[i])) {
400 		/*
401 		 * if we had a running request and its device matches
402 		 * this child we need to inform the request that the
403 		 * device is gone.
404 		 */
405 		if (request && request->dev == children[i]) {
406 		    request->result = ENXIO;
407 		    device_printf(request->dev, "FAILURE - device detached\n");
408 
409 		    /* if not timeout finish request here */
410 		    if (!(request->flags & ATA_R_TIMEOUT))
411 			    ata_finish(request);
412 		    request = NULL;
413 		}
414 		device_delete_child(dev, children[i]);
415 	    }
416 	}
417 	free(children, M_TEMP);
418 	mtx_unlock(&Giant);     /* newbus suckage dealt with, release Giant */
419     }
420 
421     /* if we still have a good request put it on the queue again */
422     if (request && !(request->flags & ATA_R_TIMEOUT)) {
423 	device_printf(request->dev,
424 		      "WARNING - %s requeued due to channel reset",
425 		      ata_cmd2str(request));
426 	if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
427 	    printf(" LBA=%ju", request->u.ata.lba);
428 	printf("\n");
429 	request->flags |= ATA_R_REQUEUE;
430 	ata_queue_request(request);
431     }
432 
433     /* we're done release the channel for new work */
434     mtx_lock(&ch->state_mtx);
435     ch->state = ATA_IDLE;
436     mtx_unlock(&ch->state_mtx);
437     ATA_LOCKING(dev, ATA_LF_UNLOCK);
438 
439     /* Add new children. */
440 /*    ata_identify(dev); */
441 
442     if (bootverbose)
443 	device_printf(dev, "reinit done ..\n");
444 
445     /* kick off requests on the queue */
446     ata_start(dev);
447 #else
448 	xpt_freeze_simq(ch->sim, 1);
449 	if ((request = ch->running)) {
450 		ch->running = NULL;
451 		if (ch->state == ATA_ACTIVE)
452 		    ch->state = ATA_IDLE;
453 		callout_stop(&request->callout);
454 		if (ch->dma.unload)
455 		    ch->dma.unload(request);
456 		request->result = ERESTART;
457 		ata_cam_end_transaction(dev, request);
458 	}
459 	/* reset the controller HW, the channel and device(s) */
460 	ATA_RESET(dev);
461 	/* Tell the XPT about the event */
462 	xpt_async(AC_BUS_RESET, ch->path, NULL);
463 	xpt_release_simq(ch->sim, TRUE);
464 #endif
465 	return(0);
466 }
467 
468 int
469 ata_suspend(device_t dev)
470 {
471     struct ata_channel *ch;
472 
473     /* check for valid device */
474     if (!dev || !(ch = device_get_softc(dev)))
475 	return ENXIO;
476 
477 #ifdef ATA_CAM
478 	if (ch->flags & ATA_PERIODIC_POLL)
479 		callout_drain(&ch->poll_callout);
480 	mtx_lock(&ch->state_mtx);
481 	xpt_freeze_simq(ch->sim, 1);
482 	while (ch->state != ATA_IDLE)
483 		msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100);
484 	mtx_unlock(&ch->state_mtx);
485 #else
486     /* wait for the channel to be IDLE or detached before suspending */
487     while (ch->r_irq) {
488 	mtx_lock(&ch->state_mtx);
489 	if (ch->state == ATA_IDLE) {
490 	    ch->state = ATA_ACTIVE;
491 	    mtx_unlock(&ch->state_mtx);
492 	    break;
493 	}
494 	mtx_unlock(&ch->state_mtx);
495 	tsleep(ch, PRIBIO, "atasusp", hz/10);
496     }
497     ATA_LOCKING(dev, ATA_LF_UNLOCK);
498 #endif
499     return(0);
500 }
501 
502 int
503 ata_resume(device_t dev)
504 {
505     struct ata_channel *ch;
506     int error;
507 
508     /* check for valid device */
509     if (!dev || !(ch = device_get_softc(dev)))
510 	return ENXIO;
511 
512 #ifdef ATA_CAM
513 	mtx_lock(&ch->state_mtx);
514 	error = ata_reinit(dev);
515 	xpt_release_simq(ch->sim, TRUE);
516 	mtx_unlock(&ch->state_mtx);
517 	if (ch->flags & ATA_PERIODIC_POLL)
518 		callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
519 #else
520     /* reinit the devices, we dont know what mode/state they are in */
521     error = ata_reinit(dev);
522     /* kick off requests on the queue */
523     ata_start(dev);
524 #endif
525     return error;
526 }
527 
528 void
529 ata_interrupt(void *data)
530 {
531 #ifdef ATA_CAM
532     struct ata_channel *ch = (struct ata_channel *)data;
533 
534     mtx_lock(&ch->state_mtx);
535 #endif
536     ata_interrupt_locked(data);
537 #ifdef ATA_CAM
538     mtx_unlock(&ch->state_mtx);
539 #endif
540 }
541 
542 static void
543 ata_interrupt_locked(void *data)
544 {
545     struct ata_channel *ch = (struct ata_channel *)data;
546     struct ata_request *request;
547 
548 #ifndef ATA_CAM
549     mtx_lock(&ch->state_mtx);
550 #endif
551     do {
552 	/* ignore interrupt if its not for us */
553 	if (ch->hw.status && !ch->hw.status(ch->dev))
554 	    break;
555 
556 	/* do we have a running request */
557 	if (!(request = ch->running))
558 	    break;
559 
560 	ATA_DEBUG_RQ(request, "interrupt");
561 
562 	/* safetycheck for the right state */
563 	if (ch->state == ATA_IDLE) {
564 	    device_printf(request->dev, "interrupt on idle channel ignored\n");
565 	    break;
566 	}
567 
568 	/*
569 	 * we have the HW locks, so end the transaction for this request
570 	 * if it finishes immediately otherwise wait for next interrupt
571 	 */
572 	if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
573 	    ch->running = NULL;
574 	    if (ch->state == ATA_ACTIVE)
575 		ch->state = ATA_IDLE;
576 #ifdef ATA_CAM
577 	    ata_cam_end_transaction(ch->dev, request);
578 #else
579 	    mtx_unlock(&ch->state_mtx);
580 	    ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
581 	    ata_finish(request);
582 #endif
583 	    return;
584 	}
585     } while (0);
586 #ifndef ATA_CAM
587     mtx_unlock(&ch->state_mtx);
588 #endif
589 }
590 
591 #ifdef ATA_CAM
592 static void
593 ata_periodic_poll(void *data)
594 {
595     struct ata_channel *ch = (struct ata_channel *)data;
596 
597     callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
598     ata_interrupt(ch);
599 }
600 #endif
601 
602 void
603 ata_print_cable(device_t dev, u_int8_t *who)
604 {
605     device_printf(dev,
606                   "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
607 }
608 
609 int
610 ata_check_80pin(device_t dev, int mode)
611 {
612     struct ata_device *atadev = device_get_softc(dev);
613 
614     if (!ata_dma_check_80pin) {
615         if (bootverbose)
616             device_printf(dev, "Skipping 80pin cable check\n");
617         return mode;
618     }
619 
620     if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) {
621         ata_print_cable(dev, "device");
622         mode = ATA_UDMA2;
623     }
624     return mode;
625 }
626 
627 void
628 ata_setmode(device_t dev)
629 {
630 	struct ata_channel *ch = device_get_softc(device_get_parent(dev));
631 	struct ata_device *atadev = device_get_softc(dev);
632 	int error, mode, pmode;
633 
634 	mode = atadev->mode;
635 	do {
636 		pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX);
637 		mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode);
638 		if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0)
639 			mode = ata_check_80pin(dev, mode);
640 	} while (pmode != mode); /* Interate till successfull negotiation. */
641 	error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
642 	if (bootverbose)
643 	        device_printf(dev, "%ssetting %s\n",
644 		    (error) ? "FAILURE " : "", ata_mode2str(mode));
645 	atadev->mode = mode;
646 }
647 
648 /*
649  * device related interfaces
650  */
651 #ifndef ATA_CAM
652 static int
653 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
654 	  int32_t flag, struct thread *td)
655 {
656     device_t device, *children;
657     struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data;
658     int *value = (int *)data;
659     int i, nchildren, error = ENOTTY;
660 
661     switch (cmd) {
662     case IOCATAGMAXCHANNEL:
663 	/* In case we have channel 0..n this will return n+1. */
664 	*value = devclass_get_maxunit(ata_devclass);
665 	error = 0;
666 	break;
667 
668     case IOCATAREINIT:
669 	if (*value >= devclass_get_maxunit(ata_devclass) ||
670 	    !(device = devclass_get_device(ata_devclass, *value)) ||
671 	    !device_is_attached(device))
672 	    return ENXIO;
673 	error = ata_reinit(device);
674 	break;
675 
676     case IOCATAATTACH:
677 	if (*value >= devclass_get_maxunit(ata_devclass) ||
678 	    !(device = devclass_get_device(ata_devclass, *value)) ||
679 	    !device_is_attached(device))
680 	    return ENXIO;
681 	error = DEVICE_ATTACH(device);
682 	break;
683 
684     case IOCATADETACH:
685 	if (*value >= devclass_get_maxunit(ata_devclass) ||
686 	    !(device = devclass_get_device(ata_devclass, *value)) ||
687 	    !device_is_attached(device))
688 	    return ENXIO;
689 	error = DEVICE_DETACH(device);
690 	break;
691 
692     case IOCATADEVICES:
693 	if (devices->channel >= devclass_get_maxunit(ata_devclass) ||
694 	    !(device = devclass_get_device(ata_devclass, devices->channel)) ||
695 	    !device_is_attached(device))
696 	    return ENXIO;
697 	bzero(devices->name[0], 32);
698 	bzero(&devices->params[0], sizeof(struct ata_params));
699 	bzero(devices->name[1], 32);
700 	bzero(&devices->params[1], sizeof(struct ata_params));
701 	if (!device_get_children(device, &children, &nchildren)) {
702 	    for (i = 0; i < nchildren; i++) {
703 		if (children[i] && device_is_attached(children[i])) {
704 		    struct ata_device *atadev = device_get_softc(children[i]);
705 
706 		    if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */
707 			strncpy(devices->name[0],
708 				device_get_nameunit(children[i]), 32);
709 			bcopy(&atadev->param, &devices->params[0],
710 			      sizeof(struct ata_params));
711 		    }
712 		    if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */
713 			strncpy(devices->name[1],
714 				device_get_nameunit(children[i]), 32);
715 			bcopy(&atadev->param, &devices->params[1],
716 			      sizeof(struct ata_params));
717 		    }
718 		}
719 	    }
720 	    free(children, M_TEMP);
721 	    error = 0;
722 	}
723 	else
724 	    error = ENODEV;
725 	break;
726 
727     default:
728 	if (ata_raid_ioctl_func)
729 	    error = ata_raid_ioctl_func(cmd, data);
730     }
731     return error;
732 }
733 #endif
734 
735 int
736 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
737 {
738     struct ata_device *atadev = device_get_softc(dev);
739     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
740     struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
741     struct ata_params *params = (struct ata_params *)data;
742     int *mode = (int *)data;
743     struct ata_request *request;
744     caddr_t buf;
745     int error;
746 
747     switch (cmd) {
748     case IOCATAREQUEST:
749 	if (ioc_request->count >
750 	    (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) {
751 		return (EFBIG);
752 	}
753 	if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) {
754 	    return ENOMEM;
755 	}
756 	if (!(request = ata_alloc_request())) {
757 	    free(buf, M_ATA);
758 	    return  ENOMEM;
759 	}
760 	request->dev = atadev->dev;
761 	if (ioc_request->flags & ATA_CMD_WRITE) {
762 	    error = copyin(ioc_request->data, buf, ioc_request->count);
763 	    if (error) {
764 		free(buf, M_ATA);
765 		ata_free_request(request);
766 		return error;
767 	    }
768 	}
769 	if (ioc_request->flags & ATA_CMD_ATAPI) {
770 	    request->flags = ATA_R_ATAPI;
771 	    bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
772 	}
773 	else {
774 	    request->u.ata.command = ioc_request->u.ata.command;
775 	    request->u.ata.feature = ioc_request->u.ata.feature;
776 	    request->u.ata.lba = ioc_request->u.ata.lba;
777 	    request->u.ata.count = ioc_request->u.ata.count;
778 	}
779 	request->timeout = ioc_request->timeout;
780 	request->data = buf;
781 	request->bytecount = ioc_request->count;
782 	request->transfersize = request->bytecount;
783 	if (ioc_request->flags & ATA_CMD_CONTROL)
784 	    request->flags |= ATA_R_CONTROL;
785 	if (ioc_request->flags & ATA_CMD_READ)
786 	    request->flags |= ATA_R_READ;
787 	if (ioc_request->flags & ATA_CMD_WRITE)
788 	    request->flags |= ATA_R_WRITE;
789 	ata_queue_request(request);
790 	if (request->flags & ATA_R_ATAPI) {
791 	    bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
792 		  sizeof(struct atapi_sense));
793 	}
794 	else {
795 	    ioc_request->u.ata.command = request->u.ata.command;
796 	    ioc_request->u.ata.feature = request->u.ata.feature;
797 	    ioc_request->u.ata.lba = request->u.ata.lba;
798 	    ioc_request->u.ata.count = request->u.ata.count;
799 	}
800 	ioc_request->error = request->result;
801 	if (ioc_request->flags & ATA_CMD_READ)
802 	    error = copyout(buf, ioc_request->data, ioc_request->count);
803 	else
804 	    error = 0;
805 	free(buf, M_ATA);
806 	ata_free_request(request);
807 	return error;
808 
809     case IOCATAGPARM:
810 	ata_getparam(atadev, 0);
811 	bcopy(&atadev->param, params, sizeof(struct ata_params));
812 	return 0;
813 
814     case IOCATASMODE:
815 	atadev->mode = *mode;
816 	ata_setmode(dev);
817 	return 0;
818 
819     case IOCATAGMODE:
820 	*mode = atadev->mode |
821 	    (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8);
822 	return 0;
823     case IOCATASSPINDOWN:
824 	atadev->spindown = *mode;
825 	return 0;
826     case IOCATAGSPINDOWN:
827 	*mode = atadev->spindown;
828 	return 0;
829     default:
830 	return ENOTTY;
831     }
832 }
833 
834 #ifndef ATA_CAM
835 static void
836 ata_boot_attach(void)
837 {
838     struct ata_channel *ch;
839     int ctlr;
840 
841     mtx_lock(&Giant);       /* newbus suckage it needs Giant */
842 
843     /* kick off probe and attach on all channels */
844     for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
845 	if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
846 	    ata_identify(ch->dev);
847 	}
848     }
849 
850     /* release the hook that got us here, we are only needed once during boot */
851     if (ata_delayed_attach) {
852 	config_intrhook_disestablish(ata_delayed_attach);
853 	free(ata_delayed_attach, M_TEMP);
854 	ata_delayed_attach = NULL;
855     }
856 
857     mtx_unlock(&Giant);     /* newbus suckage dealt with, release Giant */
858 }
859 #endif
860 
861 /*
862  * misc support functions
863  */
864 #ifndef ATA_CAM
865 static device_t
866 ata_add_child(device_t parent, struct ata_device *atadev, int unit)
867 {
868     device_t child;
869 
870     if ((child = device_add_child(parent, NULL, unit))) {
871 	device_set_softc(child, atadev);
872 	device_quiet(child);
873 	atadev->dev = child;
874 	atadev->max_iosize = DEV_BSIZE;
875 	atadev->mode = ATA_PIO_MAX;
876     }
877     return child;
878 }
879 #endif
880 
881 int
882 ata_getparam(struct ata_device *atadev, int init)
883 {
884     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
885     struct ata_request *request;
886     const char *res;
887     char buf[64];
888     u_int8_t command = 0;
889     int error = ENOMEM, retries = 2, mode = -1;
890 
891     if (ch->devices & (ATA_ATA_MASTER << atadev->unit))
892 	command = ATA_ATA_IDENTIFY;
893     if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit))
894 	command = ATA_ATAPI_IDENTIFY;
895     if (!command)
896 	return ENXIO;
897 
898     while (retries-- > 0 && error) {
899 	if (!(request = ata_alloc_request()))
900 	    break;
901 	request->dev = atadev->dev;
902 	request->timeout = 1;
903 	request->retries = 0;
904 	request->u.ata.command = command;
905 	request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT);
906 	if (!bootverbose)
907 	    request->flags |= ATA_R_QUIET;
908 	request->data = (void *)&atadev->param;
909 	request->bytecount = sizeof(struct ata_params);
910 	request->donecount = 0;
911 	request->transfersize = DEV_BSIZE;
912 	ata_queue_request(request);
913 	error = request->result;
914 	ata_free_request(request);
915     }
916 
917     if (!error && (isprint(atadev->param.model[0]) ||
918 		   isprint(atadev->param.model[1]))) {
919 	struct ata_params *atacap = &atadev->param;
920 	int16_t *ptr;
921 
922 	for (ptr = (int16_t *)atacap;
923 	     ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
924 	    *ptr = le16toh(*ptr);
925 	}
926 	if (!(!strncmp(atacap->model, "FX", 2) ||
927 	      !strncmp(atacap->model, "NEC", 3) ||
928 	      !strncmp(atacap->model, "Pioneer", 7) ||
929 	      !strncmp(atacap->model, "SHARP", 5))) {
930 	    bswap(atacap->model, sizeof(atacap->model));
931 	    bswap(atacap->revision, sizeof(atacap->revision));
932 	    bswap(atacap->serial, sizeof(atacap->serial));
933 	}
934 	btrim(atacap->model, sizeof(atacap->model));
935 	bpack(atacap->model, atacap->model, sizeof(atacap->model));
936 	btrim(atacap->revision, sizeof(atacap->revision));
937 	bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
938 	btrim(atacap->serial, sizeof(atacap->serial));
939 	bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
940 
941 	if (bootverbose)
942 	    printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
943 		   device_get_unit(ch->dev),
944 		   ata_unit2str(atadev),
945 		   ata_mode2str(ata_pmode(atacap)),
946 		   ata_mode2str(ata_wmode(atacap)),
947 		   ata_mode2str(ata_umode(atacap)),
948 		   (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
949 
950 	if (init) {
951 	    char buffer[64];
952 
953 	    sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
954 	    device_set_desc_copy(atadev->dev, buffer);
955 	    if ((atadev->param.config & ATA_PROTO_ATAPI) &&
956 		(atadev->param.config != ATA_CFA_MAGIC1) &&
957 		(atadev->param.config != ATA_CFA_MAGIC2)) {
958 		if (atapi_dma &&
959 		    (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
960 		    ata_umode(&atadev->param) >= ATA_UDMA2)
961 		    atadev->mode = ATA_DMA_MAX;
962 	    }
963 	    else {
964 		if (ata_dma &&
965 		    (ata_umode(&atadev->param) > 0 ||
966 		     ata_wmode(&atadev->param) > 0))
967 		    atadev->mode = ATA_DMA_MAX;
968 	    }
969 	    snprintf(buf, sizeof(buf), "dev%d.mode", atadev->unit);
970 	    if (resource_string_value(device_get_name(ch->dev),
971 	        device_get_unit(ch->dev), buf, &res) == 0)
972 		    mode = ata_str2mode(res);
973 	    else if (resource_string_value(device_get_name(ch->dev),
974 		device_get_unit(ch->dev), "mode", &res) == 0)
975 		    mode = ata_str2mode(res);
976 	    if (mode >= 0)
977 		    atadev->mode = mode;
978 	}
979     }
980     else {
981 	if (!error)
982 	    error = ENXIO;
983     }
984     return error;
985 }
986 
987 #ifndef ATA_CAM
988 int
989 ata_identify(device_t dev)
990 {
991     struct ata_channel *ch = device_get_softc(dev);
992     struct ata_device *atadev;
993     device_t *children;
994     device_t child, master = NULL;
995     int nchildren, i, n = ch->devices;
996 
997     if (bootverbose)
998 	device_printf(dev, "Identifying devices: %08x\n", ch->devices);
999 
1000     mtx_lock(&Giant);
1001     /* Skip existing devices. */
1002     if (!device_get_children(dev, &children, &nchildren)) {
1003 	for (i = 0; i < nchildren; i++) {
1004 	    if (children[i] && (atadev = device_get_softc(children[i])))
1005 		n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit);
1006 	}
1007 	free(children, M_TEMP);
1008     }
1009     /* Create new devices. */
1010     if (bootverbose)
1011 	device_printf(dev, "New devices: %08x\n", n);
1012     if (n == 0) {
1013 	mtx_unlock(&Giant);
1014 	return (0);
1015     }
1016     for (i = 0; i < ATA_PM; ++i) {
1017 	if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) {
1018 	    int unit = -1;
1019 
1020 	    if (!(atadev = malloc(sizeof(struct ata_device),
1021 				  M_ATA, M_NOWAIT | M_ZERO))) {
1022 		device_printf(dev, "out of memory\n");
1023 		return ENOMEM;
1024 	    }
1025 	    atadev->unit = i;
1026 #ifdef ATA_STATIC_ID
1027 	    if (n & (ATA_ATA_MASTER << i))
1028 		unit = (device_get_unit(dev) << 1) + i;
1029 #endif
1030 	    if ((child = ata_add_child(dev, atadev, unit))) {
1031 		/*
1032 		 * PATA slave should be identified first, to allow
1033 		 * device cable detection on master to work properly.
1034 		 */
1035 		if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 &&
1036 			(n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) {
1037 		    master = child;
1038 		    continue;
1039 		}
1040 		if (ata_getparam(atadev, 1)) {
1041 		    device_delete_child(dev, child);
1042 		    free(atadev, M_ATA);
1043 		}
1044 	    }
1045 	    else
1046 		free(atadev, M_ATA);
1047 	}
1048     }
1049     if (master) {
1050 	atadev = device_get_softc(master);
1051 	if (ata_getparam(atadev, 1)) {
1052 	    device_delete_child(dev, master);
1053 	    free(atadev, M_ATA);
1054 	}
1055     }
1056     bus_generic_probe(dev);
1057     bus_generic_attach(dev);
1058     mtx_unlock(&Giant);
1059     return 0;
1060 }
1061 #endif
1062 
1063 void
1064 ata_default_registers(device_t dev)
1065 {
1066     struct ata_channel *ch = device_get_softc(dev);
1067 
1068     /* fill in the defaults from whats setup already */
1069     ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
1070     ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
1071     ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
1072     ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
1073     ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
1074     ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
1075     ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
1076     ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
1077 }
1078 
1079 void
1080 ata_modify_if_48bit(struct ata_request *request)
1081 {
1082     struct ata_channel *ch = device_get_softc(request->parent);
1083     struct ata_device *atadev = device_get_softc(request->dev);
1084 
1085     request->flags &= ~ATA_R_48BIT;
1086 
1087     if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA ||
1088 	 request->u.ata.count > 256) &&
1089 	atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1090 
1091 	/* translate command into 48bit version */
1092 	switch (request->u.ata.command) {
1093 	case ATA_READ:
1094 	    request->u.ata.command = ATA_READ48;
1095 	    break;
1096 	case ATA_READ_MUL:
1097 	    request->u.ata.command = ATA_READ_MUL48;
1098 	    break;
1099 	case ATA_READ_DMA:
1100 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1101 		if (request->transfersize > DEV_BSIZE)
1102 		    request->u.ata.command = ATA_READ_MUL48;
1103 		else
1104 		    request->u.ata.command = ATA_READ48;
1105 		request->flags &= ~ATA_R_DMA;
1106 	    }
1107 	    else
1108 		request->u.ata.command = ATA_READ_DMA48;
1109 	    break;
1110 	case ATA_READ_DMA_QUEUED:
1111 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1112 		if (request->transfersize > DEV_BSIZE)
1113 		    request->u.ata.command = ATA_READ_MUL48;
1114 		else
1115 		    request->u.ata.command = ATA_READ48;
1116 		request->flags &= ~ATA_R_DMA;
1117 	    }
1118 	    else
1119 		request->u.ata.command = ATA_READ_DMA_QUEUED48;
1120 	    break;
1121 	case ATA_WRITE:
1122 	    request->u.ata.command = ATA_WRITE48;
1123 	    break;
1124 	case ATA_WRITE_MUL:
1125 	    request->u.ata.command = ATA_WRITE_MUL48;
1126 	    break;
1127 	case ATA_WRITE_DMA:
1128 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1129 		if (request->transfersize > DEV_BSIZE)
1130 		    request->u.ata.command = ATA_WRITE_MUL48;
1131 		else
1132 		    request->u.ata.command = ATA_WRITE48;
1133 		request->flags &= ~ATA_R_DMA;
1134 	    }
1135 	    else
1136 		request->u.ata.command = ATA_WRITE_DMA48;
1137 	    break;
1138 	case ATA_WRITE_DMA_QUEUED:
1139 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1140 		if (request->transfersize > DEV_BSIZE)
1141 		    request->u.ata.command = ATA_WRITE_MUL48;
1142 		else
1143 		    request->u.ata.command = ATA_WRITE48;
1144 		request->u.ata.command = ATA_WRITE48;
1145 		request->flags &= ~ATA_R_DMA;
1146 	    }
1147 	    else
1148 		request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
1149 	    break;
1150 	case ATA_FLUSHCACHE:
1151 	    request->u.ata.command = ATA_FLUSHCACHE48;
1152 	    break;
1153 	case ATA_SET_MAX_ADDRESS:
1154 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1155 	    break;
1156 	default:
1157 	    return;
1158 	}
1159 	request->flags |= ATA_R_48BIT;
1160     }
1161     else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1162 
1163 	/* translate command into 48bit version */
1164 	switch (request->u.ata.command) {
1165 	case ATA_FLUSHCACHE:
1166 	    request->u.ata.command = ATA_FLUSHCACHE48;
1167 	    break;
1168 	case ATA_READ_NATIVE_MAX_ADDRESS:
1169 	    request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48;
1170 	    break;
1171 	case ATA_SET_MAX_ADDRESS:
1172 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1173 	    break;
1174 	default:
1175 	    return;
1176 	}
1177 	request->flags |= ATA_R_48BIT;
1178     }
1179 }
1180 
1181 void
1182 ata_udelay(int interval)
1183 {
1184     /* for now just use DELAY, the timer/sleep subsytems are not there yet */
1185     if (1 || interval < (1000000/hz) || ata_delayed_attach)
1186 	DELAY(interval);
1187     else
1188 	pause("ataslp", interval/(1000000/hz));
1189 }
1190 
1191 char *
1192 ata_unit2str(struct ata_device *atadev)
1193 {
1194     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
1195     static char str[8];
1196 
1197     if (ch->devices & ATA_PORTMULTIPLIER)
1198 	sprintf(str, "port%d", atadev->unit);
1199     else
1200 	sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave");
1201     return str;
1202 }
1203 
1204 const char *
1205 ata_mode2str(int mode)
1206 {
1207     switch (mode) {
1208     case -1: return "UNSUPPORTED";
1209     case ATA_PIO0: return "PIO0";
1210     case ATA_PIO1: return "PIO1";
1211     case ATA_PIO2: return "PIO2";
1212     case ATA_PIO3: return "PIO3";
1213     case ATA_PIO4: return "PIO4";
1214     case ATA_WDMA0: return "WDMA0";
1215     case ATA_WDMA1: return "WDMA1";
1216     case ATA_WDMA2: return "WDMA2";
1217     case ATA_UDMA0: return "UDMA16";
1218     case ATA_UDMA1: return "UDMA25";
1219     case ATA_UDMA2: return "UDMA33";
1220     case ATA_UDMA3: return "UDMA40";
1221     case ATA_UDMA4: return "UDMA66";
1222     case ATA_UDMA5: return "UDMA100";
1223     case ATA_UDMA6: return "UDMA133";
1224     case ATA_SA150: return "SATA150";
1225     case ATA_SA300: return "SATA300";
1226     default:
1227 	if (mode & ATA_DMA_MASK)
1228 	    return "BIOSDMA";
1229 	else
1230 	    return "BIOSPIO";
1231     }
1232 }
1233 
1234 int
1235 ata_str2mode(const char *str)
1236 {
1237 
1238 	if (!strcasecmp(str, "PIO0")) return (ATA_PIO0);
1239 	if (!strcasecmp(str, "PIO1")) return (ATA_PIO1);
1240 	if (!strcasecmp(str, "PIO2")) return (ATA_PIO2);
1241 	if (!strcasecmp(str, "PIO3")) return (ATA_PIO3);
1242 	if (!strcasecmp(str, "PIO4")) return (ATA_PIO4);
1243 	if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0);
1244 	if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1);
1245 	if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2);
1246 	if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0);
1247 	if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0);
1248 	if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1);
1249 	if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1);
1250 	if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2);
1251 	if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2);
1252 	if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3);
1253 	if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3);
1254 	if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4);
1255 	if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4);
1256 	if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5);
1257 	if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5);
1258 	if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6);
1259 	if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6);
1260 	return (-1);
1261 }
1262 
1263 const char *
1264 ata_satarev2str(int rev)
1265 {
1266 	switch (rev) {
1267 	case 0: return "";
1268 	case 1: return "SATA 1.5Gb/s";
1269 	case 2: return "SATA 3Gb/s";
1270 	case 3: return "SATA 6Gb/s";
1271 	case 0xff: return "SATA";
1272 	default: return "???";
1273 	}
1274 }
1275 
1276 int
1277 ata_atapi(device_t dev, int target)
1278 {
1279     struct ata_channel *ch = device_get_softc(dev);
1280 
1281     return (ch->devices & (ATA_ATAPI_MASTER << target));
1282 }
1283 
1284 int
1285 ata_pmode(struct ata_params *ap)
1286 {
1287     if (ap->atavalid & ATA_FLAG_64_70) {
1288 	if (ap->apiomodes & 0x02)
1289 	    return ATA_PIO4;
1290 	if (ap->apiomodes & 0x01)
1291 	    return ATA_PIO3;
1292     }
1293     if (ap->mwdmamodes & 0x04)
1294 	return ATA_PIO4;
1295     if (ap->mwdmamodes & 0x02)
1296 	return ATA_PIO3;
1297     if (ap->mwdmamodes & 0x01)
1298 	return ATA_PIO2;
1299     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
1300 	return ATA_PIO2;
1301     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
1302 	return ATA_PIO1;
1303     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
1304 	return ATA_PIO0;
1305     return ATA_PIO0;
1306 }
1307 
1308 int
1309 ata_wmode(struct ata_params *ap)
1310 {
1311     if (ap->mwdmamodes & 0x04)
1312 	return ATA_WDMA2;
1313     if (ap->mwdmamodes & 0x02)
1314 	return ATA_WDMA1;
1315     if (ap->mwdmamodes & 0x01)
1316 	return ATA_WDMA0;
1317     return -1;
1318 }
1319 
1320 int
1321 ata_umode(struct ata_params *ap)
1322 {
1323     if (ap->atavalid & ATA_FLAG_88) {
1324 	if (ap->udmamodes & 0x40)
1325 	    return ATA_UDMA6;
1326 	if (ap->udmamodes & 0x20)
1327 	    return ATA_UDMA5;
1328 	if (ap->udmamodes & 0x10)
1329 	    return ATA_UDMA4;
1330 	if (ap->udmamodes & 0x08)
1331 	    return ATA_UDMA3;
1332 	if (ap->udmamodes & 0x04)
1333 	    return ATA_UDMA2;
1334 	if (ap->udmamodes & 0x02)
1335 	    return ATA_UDMA1;
1336 	if (ap->udmamodes & 0x01)
1337 	    return ATA_UDMA0;
1338     }
1339     return -1;
1340 }
1341 
1342 int
1343 ata_limit_mode(device_t dev, int mode, int maxmode)
1344 {
1345     struct ata_device *atadev = device_get_softc(dev);
1346 
1347     if (maxmode && mode > maxmode)
1348 	mode = maxmode;
1349 
1350     if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
1351 	return min(mode, ata_umode(&atadev->param));
1352 
1353     if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
1354 	return min(mode, ata_wmode(&atadev->param));
1355 
1356     if (mode > ata_pmode(&atadev->param))
1357 	return min(mode, ata_pmode(&atadev->param));
1358 
1359     return mode;
1360 }
1361 
1362 static void
1363 bswap(int8_t *buf, int len)
1364 {
1365     u_int16_t *ptr = (u_int16_t*)(buf + len);
1366 
1367     while (--ptr >= (u_int16_t*)buf)
1368 	*ptr = ntohs(*ptr);
1369 }
1370 
1371 static void
1372 btrim(int8_t *buf, int len)
1373 {
1374     int8_t *ptr;
1375 
1376     for (ptr = buf; ptr < buf+len; ++ptr)
1377 	if (!*ptr || *ptr == '_')
1378 	    *ptr = ' ';
1379     for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
1380 	*ptr = 0;
1381 }
1382 
1383 static void
1384 bpack(int8_t *src, int8_t *dst, int len)
1385 {
1386     int i, j, blank;
1387 
1388     for (i = j = blank = 0 ; i < len; i++) {
1389 	if (blank && src[i] == ' ') continue;
1390 	if (blank && src[i] != ' ') {
1391 	    dst[j++] = src[i];
1392 	    blank = 0;
1393 	    continue;
1394 	}
1395 	if (src[i] == ' ') {
1396 	    blank = 1;
1397 	    if (i == 0)
1398 		continue;
1399 	}
1400 	dst[j++] = src[i];
1401     }
1402     if (j < len)
1403 	dst[j] = 0x00;
1404 }
1405 
1406 #ifdef ATA_CAM
1407 void
1408 ata_cam_begin_transaction(device_t dev, union ccb *ccb)
1409 {
1410 	struct ata_channel *ch = device_get_softc(dev);
1411 	struct ata_request *request;
1412 
1413 	if (!(request = ata_alloc_request())) {
1414 		device_printf(dev, "FAILURE - out of memory in start\n");
1415 		ccb->ccb_h.status = CAM_REQ_INVALID;
1416 		xpt_done(ccb);
1417 		return;
1418 	}
1419 	bzero(request, sizeof(*request));
1420 
1421 	/* setup request */
1422 	request->dev = NULL;
1423 	request->parent = dev;
1424 	request->unit = ccb->ccb_h.target_id;
1425 	if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1426 		request->data = ccb->ataio.data_ptr;
1427 		request->bytecount = ccb->ataio.dxfer_len;
1428 		request->u.ata.command = ccb->ataio.cmd.command;
1429 		request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
1430 					  (uint16_t)ccb->ataio.cmd.features;
1431 		request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
1432 					(uint16_t)ccb->ataio.cmd.sector_count;
1433 		if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
1434 			request->flags |= ATA_R_48BIT;
1435 			request->u.ata.lba =
1436 				     ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
1437 				     ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
1438 				     ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
1439 		} else {
1440 			request->u.ata.lba =
1441 				     ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
1442 		}
1443 		request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
1444 				      ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
1445 				       (uint64_t)ccb->ataio.cmd.lba_low;
1446 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1447 		    ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
1448 			request->flags |= ATA_R_DMA;
1449 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1450 			request->flags |= ATA_R_READ;
1451 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1452 			request->flags |= ATA_R_WRITE;
1453 	} else {
1454 		request->data = ccb->csio.data_ptr;
1455 		request->bytecount = ccb->csio.dxfer_len;
1456 		bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1457 		    ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
1458 		    request->u.atapi.ccb, ccb->csio.cdb_len);
1459 		request->flags |= ATA_R_ATAPI;
1460 		if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1461 			request->flags |= ATA_R_ATAPI16;
1462 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1463 		    ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1464 			request->flags |= ATA_R_DMA;
1465 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1466 			request->flags |= ATA_R_READ;
1467 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1468 			request->flags |= ATA_R_WRITE;
1469 	}
1470 	request->transfersize = min(request->bytecount,
1471 	    ch->curr[ccb->ccb_h.target_id].bytecount);
1472 	request->retries = 0;
1473 	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1474 	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1475 	request->ccb = ccb;
1476 
1477 	ch->running = request;
1478 	ch->state = ATA_ACTIVE;
1479 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1480 	    ch->running = NULL;
1481 	    ch->state = ATA_IDLE;
1482 	    ata_cam_end_transaction(dev, request);
1483 	    return;
1484 	}
1485 }
1486 
1487 static void
1488 ata_cam_request_sense(device_t dev, struct ata_request *request)
1489 {
1490 	struct ata_channel *ch = device_get_softc(dev);
1491 	union ccb *ccb = request->ccb;
1492 
1493 	ch->requestsense = 1;
1494 
1495 	bzero(request, sizeof(&request));
1496 	request->dev = NULL;
1497 	request->parent = dev;
1498 	request->unit = ccb->ccb_h.target_id;
1499 	request->data = (void *)&ccb->csio.sense_data;
1500 	request->bytecount = ccb->csio.sense_len;
1501 	request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE;
1502 	request->u.atapi.ccb[4] = ccb->csio.sense_len;
1503 	request->flags |= ATA_R_ATAPI;
1504 	if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1505 		request->flags |= ATA_R_ATAPI16;
1506 	if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1507 		request->flags |= ATA_R_DMA;
1508 	request->flags |= ATA_R_READ;
1509 	request->transfersize = min(request->bytecount,
1510 	    ch->curr[ccb->ccb_h.target_id].bytecount);
1511 	request->retries = 0;
1512 	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1513 	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1514 	request->ccb = ccb;
1515 
1516 	ch->running = request;
1517 	ch->state = ATA_ACTIVE;
1518 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1519 		ch->running = NULL;
1520 		ch->state = ATA_IDLE;
1521 		ata_cam_end_transaction(dev, request);
1522 		return;
1523 	}
1524 }
1525 
1526 static void
1527 ata_cam_process_sense(device_t dev, struct ata_request *request)
1528 {
1529 	struct ata_channel *ch = device_get_softc(dev);
1530 	union ccb *ccb = request->ccb;
1531 	int fatalerr = 0;
1532 
1533 	ch->requestsense = 0;
1534 
1535 	if (request->flags & ATA_R_TIMEOUT)
1536 		fatalerr = 1;
1537 	if ((request->flags & ATA_R_TIMEOUT) == 0 &&
1538 	    (request->status & ATA_S_ERROR) == 0 &&
1539 	    request->result == 0) {
1540 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1541 	} else {
1542 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1543 		ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1544 	}
1545 
1546 	ata_free_request(request);
1547 	xpt_done(ccb);
1548 	/* Do error recovery if needed. */
1549 	if (fatalerr)
1550 		ata_reinit(dev);
1551 }
1552 
1553 void
1554 ata_cam_end_transaction(device_t dev, struct ata_request *request)
1555 {
1556 	struct ata_channel *ch = device_get_softc(dev);
1557 	union ccb *ccb = request->ccb;
1558 	int fatalerr = 0;
1559 
1560 	if (ch->requestsense) {
1561 		ata_cam_process_sense(dev, request);
1562 		return;
1563 	}
1564 
1565 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1566 	if (request->flags & ATA_R_TIMEOUT) {
1567 		xpt_freeze_simq(ch->sim, 1);
1568 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1569 		ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
1570 		fatalerr = 1;
1571 	} else if (request->status & ATA_S_ERROR) {
1572 		if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1573 			ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1574 		} else {
1575 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1576 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1577 		}
1578 	} else if (request->result == ERESTART)
1579 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1580 	else if (request->result != 0)
1581 		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1582 	else
1583 		ccb->ccb_h.status |= CAM_REQ_CMP;
1584 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
1585 	    !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1586 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1587 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1588 	}
1589 	if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1590 	    ((request->status & ATA_S_ERROR) ||
1591 	    (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
1592 		struct ata_res *res = &ccb->ataio.res;
1593 		res->status = request->status;
1594 		res->error = request->error;
1595 		res->lba_low = request->u.ata.lba;
1596 		res->lba_mid = request->u.ata.lba >> 8;
1597 		res->lba_high = request->u.ata.lba >> 16;
1598 		res->device = request->u.ata.lba >> 24;
1599 		res->lba_low_exp = request->u.ata.lba >> 24;
1600 		res->lba_mid_exp = request->u.ata.lba >> 32;
1601 		res->lba_high_exp = request->u.ata.lba >> 40;
1602 		res->sector_count = request->u.ata.count;
1603 		res->sector_count_exp = request->u.ata.count >> 8;
1604 	}
1605 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1606 		if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1607 			ccb->ataio.resid =
1608 			    ccb->ataio.dxfer_len - request->donecount;
1609 		} else {
1610 			ccb->csio.resid =
1611 			    ccb->csio.dxfer_len - request->donecount;
1612 		}
1613 	}
1614 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
1615 	    (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1616 		ata_cam_request_sense(dev, request);
1617 	else {
1618 		ata_free_request(request);
1619 		xpt_done(ccb);
1620 	}
1621 	/* Do error recovery if needed. */
1622 	if (fatalerr)
1623 		ata_reinit(dev);
1624 }
1625 
1626 static int
1627 ata_check_ids(device_t dev, union ccb *ccb)
1628 {
1629 	struct ata_channel *ch = device_get_softc(dev);
1630 
1631 	if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) {
1632 		ccb->ccb_h.status = CAM_TID_INVALID;
1633 		xpt_done(ccb);
1634 		return (-1);
1635 	}
1636 	if (ccb->ccb_h.target_lun != 0) {
1637 		ccb->ccb_h.status = CAM_LUN_INVALID;
1638 		xpt_done(ccb);
1639 		return (-1);
1640 	}
1641 	return (0);
1642 }
1643 
1644 static void
1645 ataaction(struct cam_sim *sim, union ccb *ccb)
1646 {
1647 	device_t dev, parent;
1648 	struct ata_channel *ch;
1649 
1650 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
1651 	    ccb->ccb_h.func_code));
1652 
1653 	ch = (struct ata_channel *)cam_sim_softc(sim);
1654 	dev = ch->dev;
1655 	switch (ccb->ccb_h.func_code) {
1656 	/* Common cases first */
1657 	case XPT_ATA_IO:	/* Execute the requested I/O operation */
1658 	case XPT_SCSI_IO:
1659 		if (ata_check_ids(dev, ccb))
1660 			return;
1661 		if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
1662 		    << ccb->ccb_h.target_id)) == 0) {
1663 			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1664 			break;
1665 		}
1666 		if (ch->running)
1667 			device_printf(dev, "already running!\n");
1668 		if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1669 		    (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1670 		    (ccb->ataio.cmd.control & ATA_A_RESET)) {
1671 			struct ata_res *res = &ccb->ataio.res;
1672 
1673 			bzero(res, sizeof(*res));
1674 			if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
1675 				res->lba_high = 0;
1676 				res->lba_mid = 0;
1677 			} else {
1678 				res->lba_high = 0xeb;
1679 				res->lba_mid = 0x14;
1680 			}
1681 			ccb->ccb_h.status = CAM_REQ_CMP;
1682 			break;
1683 		}
1684 		ata_cam_begin_transaction(dev, ccb);
1685 		return;
1686 	case XPT_EN_LUN:		/* Enable LUN as a target */
1687 	case XPT_TARGET_IO:		/* Execute target I/O request */
1688 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
1689 	case XPT_CONT_TARGET_IO:	/* Continue Host Target I/O Connection*/
1690 	case XPT_ABORT:			/* Abort the specified CCB */
1691 		/* XXX Implement */
1692 		ccb->ccb_h.status = CAM_REQ_INVALID;
1693 		break;
1694 	case XPT_SET_TRAN_SETTINGS:
1695 	{
1696 		struct	ccb_trans_settings *cts = &ccb->cts;
1697 		struct	ata_cam_device *d;
1698 
1699 		if (ata_check_ids(dev, ccb))
1700 			return;
1701 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1702 			d = &ch->curr[ccb->ccb_h.target_id];
1703 		else
1704 			d = &ch->user[ccb->ccb_h.target_id];
1705 		if (ch->flags & ATA_SATA) {
1706 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
1707 				d->revision = cts->xport_specific.sata.revision;
1708 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) {
1709 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1710 					d->mode = ATA_SETMODE(ch->dev,
1711 					    ccb->ccb_h.target_id,
1712 					    cts->xport_specific.sata.mode);
1713 				} else
1714 					d->mode = cts->xport_specific.sata.mode;
1715 			}
1716 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
1717 				d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
1718 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
1719 				d->atapi = cts->xport_specific.sata.atapi;
1720 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
1721 				d->caps = cts->xport_specific.sata.caps;
1722 		} else {
1723 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
1724 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1725 					d->mode = ATA_SETMODE(ch->dev,
1726 					    ccb->ccb_h.target_id,
1727 					    cts->xport_specific.ata.mode);
1728 				} else
1729 					d->mode = cts->xport_specific.ata.mode;
1730 			}
1731 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
1732 				d->bytecount = cts->xport_specific.ata.bytecount;
1733 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI)
1734 				d->atapi = cts->xport_specific.ata.atapi;
1735 		}
1736 		ccb->ccb_h.status = CAM_REQ_CMP;
1737 		break;
1738 	}
1739 	case XPT_GET_TRAN_SETTINGS:
1740 	{
1741 		struct	ccb_trans_settings *cts = &ccb->cts;
1742 		struct  ata_cam_device *d;
1743 
1744 		if (ata_check_ids(dev, ccb))
1745 			return;
1746 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1747 			d = &ch->curr[ccb->ccb_h.target_id];
1748 		else
1749 			d = &ch->user[ccb->ccb_h.target_id];
1750 		cts->protocol = PROTO_ATA;
1751 		cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1752 		if (ch->flags & ATA_SATA) {
1753 			cts->transport = XPORT_SATA;
1754 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1755 			cts->xport_specific.sata.valid = 0;
1756 			cts->xport_specific.sata.mode = d->mode;
1757 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1758 			cts->xport_specific.sata.bytecount = d->bytecount;
1759 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1760 			if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1761 				cts->xport_specific.sata.revision =
1762 				    ATA_GETREV(dev, ccb->ccb_h.target_id);
1763 				if (cts->xport_specific.sata.revision != 0xff) {
1764 					cts->xport_specific.sata.valid |=
1765 					    CTS_SATA_VALID_REVISION;
1766 				}
1767 				cts->xport_specific.sata.caps =
1768 				    d->caps & CTS_SATA_CAPS_D;
1769 				if (ch->pm_level) {
1770 					cts->xport_specific.sata.caps |=
1771 					    CTS_SATA_CAPS_H_PMREQ;
1772 				}
1773 				cts->xport_specific.sata.caps &=
1774 				    ch->user[ccb->ccb_h.target_id].caps;
1775 				cts->xport_specific.sata.valid |=
1776 				    CTS_SATA_VALID_CAPS;
1777 			} else {
1778 				cts->xport_specific.sata.revision = d->revision;
1779 				cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1780 				cts->xport_specific.sata.caps = d->caps;
1781 				cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
1782 			}
1783 			cts->xport_specific.sata.atapi = d->atapi;
1784 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
1785 		} else {
1786 			cts->transport = XPORT_ATA;
1787 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1788 			cts->xport_specific.ata.valid = 0;
1789 			cts->xport_specific.ata.mode = d->mode;
1790 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1791 			cts->xport_specific.ata.bytecount = d->bytecount;
1792 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1793 			cts->xport_specific.ata.atapi = d->atapi;
1794 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI;
1795 		}
1796 		ccb->ccb_h.status = CAM_REQ_CMP;
1797 		break;
1798 	}
1799 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
1800 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
1801 		ata_reinit(dev);
1802 		ccb->ccb_h.status = CAM_REQ_CMP;
1803 		break;
1804 	case XPT_TERM_IO:		/* Terminate the I/O process */
1805 		/* XXX Implement */
1806 		ccb->ccb_h.status = CAM_REQ_INVALID;
1807 		break;
1808 	case XPT_PATH_INQ:		/* Path routing inquiry */
1809 	{
1810 		struct ccb_pathinq *cpi = &ccb->cpi;
1811 
1812 		parent = device_get_parent(dev);
1813 		cpi->version_num = 1; /* XXX??? */
1814 		cpi->hba_inquiry = PI_SDTR_ABLE;
1815 		cpi->target_sprt = 0;
1816 		cpi->hba_misc = PIM_SEQSCAN;
1817 		cpi->hba_eng_cnt = 0;
1818 		if (ch->flags & ATA_NO_SLAVE)
1819 			cpi->max_target = 0;
1820 		else
1821 			cpi->max_target = 1;
1822 		cpi->max_lun = 0;
1823 		cpi->initiator_id = 0;
1824 		cpi->bus_id = cam_sim_bus(sim);
1825 		if (ch->flags & ATA_SATA)
1826 			cpi->base_transfer_speed = 150000;
1827 		else
1828 			cpi->base_transfer_speed = 3300;
1829 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1830 		strncpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1831 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1832 		cpi->unit_number = cam_sim_unit(sim);
1833 		if (ch->flags & ATA_SATA)
1834 			cpi->transport = XPORT_SATA;
1835 		else
1836 			cpi->transport = XPORT_ATA;
1837 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1838 		cpi->protocol = PROTO_ATA;
1839 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1840 		cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1841 		if (device_get_devclass(device_get_parent(parent)) ==
1842 		    devclass_find("pci")) {
1843 			cpi->hba_vendor = pci_get_vendor(parent);
1844 			cpi->hba_device = pci_get_device(parent);
1845 			cpi->hba_subvendor = pci_get_subvendor(parent);
1846 			cpi->hba_subdevice = pci_get_subdevice(parent);
1847 		}
1848 		cpi->ccb_h.status = CAM_REQ_CMP;
1849 		break;
1850 	}
1851 	default:
1852 		ccb->ccb_h.status = CAM_REQ_INVALID;
1853 		break;
1854 	}
1855 	xpt_done(ccb);
1856 }
1857 
1858 static void
1859 atapoll(struct cam_sim *sim)
1860 {
1861 	struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1862 
1863 	ata_interrupt_locked(ch);
1864 }
1865 #endif
1866 
1867 /*
1868  * module handeling
1869  */
1870 static int
1871 ata_module_event_handler(module_t mod, int what, void *arg)
1872 {
1873 #ifndef ATA_CAM
1874     static struct cdev *atacdev;
1875 #endif
1876 
1877     switch (what) {
1878     case MOD_LOAD:
1879 #ifndef ATA_CAM
1880 	/* register controlling device */
1881 	atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1882 
1883 	if (cold) {
1884 	    /* register boot attach to be run when interrupts are enabled */
1885 	    if (!(ata_delayed_attach = (struct intr_config_hook *)
1886 				       malloc(sizeof(struct intr_config_hook),
1887 					      M_TEMP, M_NOWAIT | M_ZERO))) {
1888 		printf("ata: malloc of delayed attach hook failed\n");
1889 		return EIO;
1890 	    }
1891 	    ata_delayed_attach->ich_func = (void*)ata_boot_attach;
1892 	    if (config_intrhook_establish(ata_delayed_attach) != 0) {
1893 		printf("ata: config_intrhook_establish failed\n");
1894 		free(ata_delayed_attach, M_TEMP);
1895 	    }
1896 	}
1897 #endif
1898 	return 0;
1899 
1900     case MOD_UNLOAD:
1901 #ifndef ATA_CAM
1902 	/* deregister controlling device */
1903 	destroy_dev(atacdev);
1904 #endif
1905 	return 0;
1906 
1907     default:
1908 	return EOPNOTSUPP;
1909     }
1910 }
1911 
1912 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1913 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1914 MODULE_VERSION(ata, 1);
1915 #ifdef ATA_CAM
1916 MODULE_DEPEND(ata, cam, 1, 1, 1);
1917 #endif
1918 
1919 static void
1920 ata_init(void)
1921 {
1922     ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request),
1923 				   NULL, NULL, NULL, NULL, 0, 0);
1924     ata_composite_zone = uma_zcreate("ata_composite",
1925 				     sizeof(struct ata_composite),
1926 				     NULL, NULL, NULL, NULL, 0, 0);
1927 }
1928 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1929 
1930 static void
1931 ata_uninit(void)
1932 {
1933     uma_zdestroy(ata_composite_zone);
1934     uma_zdestroy(ata_request_zone);
1935 }
1936 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);
1937