xref: /freebsd/sys/dev/ata/ata-all.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ata.h"
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/ata.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/endian.h>
37 #include <sys/ctype.h>
38 #include <sys/conf.h>
39 #include <sys/bus.h>
40 #include <sys/bio.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
43 #include <sys/sema.h>
44 #include <sys/taskqueue.h>
45 #include <vm/uma.h>
46 #include <machine/stdarg.h>
47 #include <machine/resource.h>
48 #include <machine/bus.h>
49 #include <sys/rman.h>
50 #include <dev/ata/ata-all.h>
51 #include <dev/pci/pcivar.h>
52 #include <ata_if.h>
53 
54 #ifdef ATA_CAM
55 #include <cam/cam.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_debug.h>
60 #endif
61 
62 #ifndef ATA_CAM
63 /* device structure */
64 static  d_ioctl_t       ata_ioctl;
65 static struct cdevsw ata_cdevsw = {
66 	.d_version =    D_VERSION,
67 	.d_flags =      D_NEEDGIANT, /* we need this as newbus isn't mpsafe */
68 	.d_ioctl =      ata_ioctl,
69 	.d_name =       "ata",
70 };
71 #endif
72 
73 /* prototypes */
74 #ifndef ATA_CAM
75 static void ata_boot_attach(void);
76 static device_t ata_add_child(device_t, struct ata_device *, int);
77 #else
78 static void ataaction(struct cam_sim *sim, union ccb *ccb);
79 static void atapoll(struct cam_sim *sim);
80 #endif
81 static void ata_conn_event(void *, int);
82 static void bswap(int8_t *, int);
83 static void btrim(int8_t *, int);
84 static void bpack(int8_t *, int8_t *, int);
85 static void ata_interrupt_locked(void *data);
86 #ifdef ATA_CAM
87 static void ata_periodic_poll(void *data);
88 #endif
89 
90 /* global vars */
91 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
92 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
93 struct intr_config_hook *ata_delayed_attach = NULL;
94 devclass_t ata_devclass;
95 uma_zone_t ata_request_zone;
96 uma_zone_t ata_composite_zone;
97 int ata_wc = 1;
98 int ata_setmax = 0;
99 int ata_dma_check_80pin = 1;
100 
101 /* local vars */
102 static int ata_dma = 1;
103 static int atapi_dma = 1;
104 
105 /* sysctl vars */
106 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
107 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
108 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0,
109 	   "ATA disk DMA mode control");
110 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
111 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
112 	   CTLFLAG_RW, &ata_dma_check_80pin, 1,
113 	   "Check for 80pin cable before setting ATA DMA mode");
114 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
115 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0,
116 	   "ATAPI device DMA mode control");
117 TUNABLE_INT("hw.ata.wc", &ata_wc);
118 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0,
119 	   "ATA disk write caching");
120 TUNABLE_INT("hw.ata.setmax", &ata_setmax);
121 SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0,
122 	   "ATA disk set max native address");
123 
124 /*
125  * newbus device interface related functions
126  */
127 int
128 ata_probe(device_t dev)
129 {
130     return 0;
131 }
132 
133 int
134 ata_attach(device_t dev)
135 {
136     struct ata_channel *ch = device_get_softc(dev);
137     int error, rid;
138 #ifdef ATA_CAM
139     struct cam_devq *devq;
140     const char *res;
141     char buf[64];
142     int i, mode;
143 #endif
144 
145     /* check that we have a virgin channel to attach */
146     if (ch->r_irq)
147 	return EEXIST;
148 
149     /* initialize the softc basics */
150     ch->dev = dev;
151     ch->state = ATA_IDLE;
152     bzero(&ch->state_mtx, sizeof(struct mtx));
153     mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
154     bzero(&ch->queue_mtx, sizeof(struct mtx));
155     mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF);
156     TAILQ_INIT(&ch->ata_queue);
157     TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
158 #ifdef ATA_CAM
159 	for (i = 0; i < 16; i++) {
160 		ch->user[i].mode = 0;
161 		snprintf(buf, sizeof(buf), "dev%d.mode", i);
162 		if (resource_string_value(device_get_name(dev),
163 		    device_get_unit(dev), buf, &res) == 0)
164 			mode = ata_str2mode(res);
165 		else if (resource_string_value(device_get_name(dev),
166 		    device_get_unit(dev), "mode", &res) == 0)
167 			mode = ata_str2mode(res);
168 		else
169 			mode = -1;
170 		if (mode >= 0)
171 			ch->user[i].mode = mode;
172 		if (ch->flags & ATA_SATA)
173 			ch->user[i].bytecount = 8192;
174 		else
175 			ch->user[i].bytecount = MAXPHYS;
176 		ch->user[i].caps = 0;
177 		ch->curr[i] = ch->user[i];
178 		if (ch->pm_level > 0)
179 			ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ;
180 		if (ch->pm_level > 1)
181 			ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ;
182 	}
183 	callout_init(&ch->poll_callout, 1);
184 #endif
185 
186     /* reset the controller HW, the channel and device(s) */
187     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
188 	pause("ataatch", 1);
189 #ifndef ATA_CAM
190     ATA_RESET(dev);
191 #endif
192     ATA_LOCKING(dev, ATA_LF_UNLOCK);
193 
194     /* allocate DMA resources if DMA HW present*/
195     if (ch->dma.alloc)
196 	ch->dma.alloc(dev);
197 
198     /* setup interrupt delivery */
199     rid = ATA_IRQ_RID;
200     ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
201 				       RF_SHAREABLE | RF_ACTIVE);
202     if (!ch->r_irq) {
203 	device_printf(dev, "unable to allocate interrupt\n");
204 	return ENXIO;
205     }
206     if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
207 				ata_interrupt, ch, &ch->ih))) {
208 	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
209 	device_printf(dev, "unable to setup interrupt\n");
210 	return error;
211     }
212 
213 #ifndef ATA_CAM
214     /* probe and attach devices on this channel unless we are in early boot */
215     if (!ata_delayed_attach)
216 	ata_identify(dev);
217     return (0);
218 #else
219 	if (ch->flags & ATA_PERIODIC_POLL)
220 		callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
221 	mtx_lock(&ch->state_mtx);
222 	/* Create the device queue for our SIM. */
223 	devq = cam_simq_alloc(1);
224 	if (devq == NULL) {
225 		device_printf(dev, "Unable to allocate simq\n");
226 		error = ENOMEM;
227 		goto err1;
228 	}
229 	/* Construct SIM entry */
230 	ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
231 	    device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
232 	if (ch->sim == NULL) {
233 		device_printf(dev, "unable to allocate sim\n");
234 		cam_simq_free(devq);
235 		error = ENOMEM;
236 		goto err1;
237 	}
238 	if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
239 		device_printf(dev, "unable to register xpt bus\n");
240 		error = ENXIO;
241 		goto err2;
242 	}
243 	if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
244 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
245 		device_printf(dev, "unable to create path\n");
246 		error = ENXIO;
247 		goto err3;
248 	}
249 	mtx_unlock(&ch->state_mtx);
250 	return (0);
251 
252 err3:
253 	xpt_bus_deregister(cam_sim_path(ch->sim));
254 err2:
255 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
256 	ch->sim = NULL;
257 err1:
258 	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
259 	mtx_unlock(&ch->state_mtx);
260 	if (ch->flags & ATA_PERIODIC_POLL)
261 		callout_drain(&ch->poll_callout);
262 	return (error);
263 #endif
264 }
265 
266 int
267 ata_detach(device_t dev)
268 {
269     struct ata_channel *ch = device_get_softc(dev);
270 #ifndef ATA_CAM
271     device_t *children;
272     int nchildren, i;
273 #endif
274 
275     /* check that we have a valid channel to detach */
276     if (!ch->r_irq)
277 	return ENXIO;
278 
279     /* grap the channel lock so no new requests gets launched */
280     mtx_lock(&ch->state_mtx);
281     ch->state |= ATA_STALL_QUEUE;
282     mtx_unlock(&ch->state_mtx);
283 #ifdef ATA_CAM
284     if (ch->flags & ATA_PERIODIC_POLL)
285 	callout_drain(&ch->poll_callout);
286 #endif
287 
288 #ifndef ATA_CAM
289     /* detach & delete all children */
290     if (!device_get_children(dev, &children, &nchildren)) {
291 	for (i = 0; i < nchildren; i++)
292 	    if (children[i])
293 		device_delete_child(dev, children[i]);
294 	free(children, M_TEMP);
295     }
296 #endif
297     taskqueue_drain(taskqueue_thread, &ch->conntask);
298 
299 #ifdef ATA_CAM
300 	mtx_lock(&ch->state_mtx);
301 	xpt_async(AC_LOST_DEVICE, ch->path, NULL);
302 	xpt_free_path(ch->path);
303 	xpt_bus_deregister(cam_sim_path(ch->sim));
304 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
305 	ch->sim = NULL;
306 	mtx_unlock(&ch->state_mtx);
307 #endif
308 
309     /* release resources */
310     bus_teardown_intr(dev, ch->r_irq, ch->ih);
311     bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
312     ch->r_irq = NULL;
313 
314     /* free DMA resources if DMA HW present*/
315     if (ch->dma.free)
316 	ch->dma.free(dev);
317 
318     mtx_destroy(&ch->state_mtx);
319     mtx_destroy(&ch->queue_mtx);
320     return 0;
321 }
322 
323 static void
324 ata_conn_event(void *context, int dummy)
325 {
326 	device_t dev = (device_t)context;
327 #ifdef ATA_CAM
328 	struct ata_channel *ch = device_get_softc(dev);
329 	union ccb *ccb;
330 
331 	mtx_lock(&ch->state_mtx);
332 	if (ch->sim == NULL) {
333 		mtx_unlock(&ch->state_mtx);
334 		return;
335 	}
336 	ata_reinit(dev);
337 	if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
338 		return;
339 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
340 	    cam_sim_path(ch->sim),
341 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
342 		xpt_free_ccb(ccb);
343 		return;
344 	}
345 	xpt_rescan(ccb);
346 	mtx_unlock(&ch->state_mtx);
347 #else
348 	ata_reinit(dev);
349 #endif
350 }
351 
352 int
353 ata_reinit(device_t dev)
354 {
355     struct ata_channel *ch = device_get_softc(dev);
356     struct ata_request *request;
357 #ifndef ATA_CAM
358     device_t *children;
359     int nchildren, i;
360 
361     /* check that we have a valid channel to reinit */
362     if (!ch || !ch->r_irq)
363 	return ENXIO;
364 
365     if (bootverbose)
366 	device_printf(dev, "reiniting channel ..\n");
367 
368     /* poll for locking the channel */
369     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
370 	pause("atarini", 1);
371 
372     /* catch eventual request in ch->running */
373     mtx_lock(&ch->state_mtx);
374     if (ch->state & ATA_STALL_QUEUE) {
375 	/* Recursive reinits and reinits during detach prohobited. */
376 	mtx_unlock(&ch->state_mtx);
377 	return (ENXIO);
378     }
379     if ((request = ch->running))
380 	callout_stop(&request->callout);
381     ch->running = NULL;
382 
383     /* unconditionally grap the channel lock */
384     ch->state |= ATA_STALL_QUEUE;
385     mtx_unlock(&ch->state_mtx);
386 
387     /* reset the controller HW, the channel and device(s) */
388     ATA_RESET(dev);
389 
390     /* reinit the children and delete any that fails */
391     if (!device_get_children(dev, &children, &nchildren)) {
392 	mtx_lock(&Giant);       /* newbus suckage it needs Giant */
393 	for (i = 0; i < nchildren; i++) {
394 	    /* did any children go missing ? */
395 	    if (children[i] && device_is_attached(children[i]) &&
396 		ATA_REINIT(children[i])) {
397 		/*
398 		 * if we had a running request and its device matches
399 		 * this child we need to inform the request that the
400 		 * device is gone.
401 		 */
402 		if (request && request->dev == children[i]) {
403 		    request->result = ENXIO;
404 		    device_printf(request->dev, "FAILURE - device detached\n");
405 
406 		    /* if not timeout finish request here */
407 		    if (!(request->flags & ATA_R_TIMEOUT))
408 			    ata_finish(request);
409 		    request = NULL;
410 		}
411 		device_delete_child(dev, children[i]);
412 	    }
413 	}
414 	free(children, M_TEMP);
415 	mtx_unlock(&Giant);     /* newbus suckage dealt with, release Giant */
416     }
417 
418     /* if we still have a good request put it on the queue again */
419     if (request && !(request->flags & ATA_R_TIMEOUT)) {
420 	device_printf(request->dev,
421 		      "WARNING - %s requeued due to channel reset",
422 		      ata_cmd2str(request));
423 	if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
424 	    printf(" LBA=%ju", request->u.ata.lba);
425 	printf("\n");
426 	request->flags |= ATA_R_REQUEUE;
427 	ata_queue_request(request);
428     }
429 
430     /* we're done release the channel for new work */
431     mtx_lock(&ch->state_mtx);
432     ch->state = ATA_IDLE;
433     mtx_unlock(&ch->state_mtx);
434     ATA_LOCKING(dev, ATA_LF_UNLOCK);
435 
436     /* Add new children. */
437 /*    ata_identify(dev); */
438 
439     if (bootverbose)
440 	device_printf(dev, "reinit done ..\n");
441 
442     /* kick off requests on the queue */
443     ata_start(dev);
444 #else
445 	xpt_freeze_simq(ch->sim, 1);
446 	if ((request = ch->running)) {
447 		ch->running = NULL;
448 		if (ch->state == ATA_ACTIVE)
449 		    ch->state = ATA_IDLE;
450 		callout_stop(&request->callout);
451 		if (ch->dma.unload)
452 		    ch->dma.unload(request);
453 		request->result = ERESTART;
454 		ata_cam_end_transaction(dev, request);
455 	}
456 	/* reset the controller HW, the channel and device(s) */
457 	ATA_RESET(dev);
458 	/* Tell the XPT about the event */
459 	xpt_async(AC_BUS_RESET, ch->path, NULL);
460 	xpt_release_simq(ch->sim, TRUE);
461 #endif
462 	return(0);
463 }
464 
465 int
466 ata_suspend(device_t dev)
467 {
468     struct ata_channel *ch;
469 
470     /* check for valid device */
471     if (!dev || !(ch = device_get_softc(dev)))
472 	return ENXIO;
473 
474 #ifdef ATA_CAM
475 	if (ch->flags & ATA_PERIODIC_POLL)
476 		callout_drain(&ch->poll_callout);
477 	mtx_lock(&ch->state_mtx);
478 	xpt_freeze_simq(ch->sim, 1);
479 	while (ch->state != ATA_IDLE)
480 		msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100);
481 	mtx_unlock(&ch->state_mtx);
482 #else
483     /* wait for the channel to be IDLE or detached before suspending */
484     while (ch->r_irq) {
485 	mtx_lock(&ch->state_mtx);
486 	if (ch->state == ATA_IDLE) {
487 	    ch->state = ATA_ACTIVE;
488 	    mtx_unlock(&ch->state_mtx);
489 	    break;
490 	}
491 	mtx_unlock(&ch->state_mtx);
492 	tsleep(ch, PRIBIO, "atasusp", hz/10);
493     }
494     ATA_LOCKING(dev, ATA_LF_UNLOCK);
495 #endif
496     return(0);
497 }
498 
499 int
500 ata_resume(device_t dev)
501 {
502     struct ata_channel *ch;
503     int error;
504 
505     /* check for valid device */
506     if (!dev || !(ch = device_get_softc(dev)))
507 	return ENXIO;
508 
509 #ifdef ATA_CAM
510 	mtx_lock(&ch->state_mtx);
511 	error = ata_reinit(dev);
512 	xpt_release_simq(ch->sim, TRUE);
513 	mtx_unlock(&ch->state_mtx);
514 	if (ch->flags & ATA_PERIODIC_POLL)
515 		callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
516 #else
517     /* reinit the devices, we dont know what mode/state they are in */
518     error = ata_reinit(dev);
519     /* kick off requests on the queue */
520     ata_start(dev);
521 #endif
522     return error;
523 }
524 
525 void
526 ata_interrupt(void *data)
527 {
528 #ifdef ATA_CAM
529     struct ata_channel *ch = (struct ata_channel *)data;
530 
531     mtx_lock(&ch->state_mtx);
532 #endif
533     ata_interrupt_locked(data);
534 #ifdef ATA_CAM
535     mtx_unlock(&ch->state_mtx);
536 #endif
537 }
538 
539 static void
540 ata_interrupt_locked(void *data)
541 {
542     struct ata_channel *ch = (struct ata_channel *)data;
543     struct ata_request *request;
544 
545 #ifndef ATA_CAM
546     mtx_lock(&ch->state_mtx);
547 #endif
548     do {
549 	/* ignore interrupt if its not for us */
550 	if (ch->hw.status && !ch->hw.status(ch->dev))
551 	    break;
552 
553 	/* do we have a running request */
554 	if (!(request = ch->running))
555 	    break;
556 
557 	ATA_DEBUG_RQ(request, "interrupt");
558 
559 	/* safetycheck for the right state */
560 	if (ch->state == ATA_IDLE) {
561 	    device_printf(request->dev, "interrupt on idle channel ignored\n");
562 	    break;
563 	}
564 
565 	/*
566 	 * we have the HW locks, so end the transaction for this request
567 	 * if it finishes immediately otherwise wait for next interrupt
568 	 */
569 	if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
570 	    ch->running = NULL;
571 	    if (ch->state == ATA_ACTIVE)
572 		ch->state = ATA_IDLE;
573 #ifdef ATA_CAM
574 	    ata_cam_end_transaction(ch->dev, request);
575 #else
576 	    mtx_unlock(&ch->state_mtx);
577 	    ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
578 	    ata_finish(request);
579 #endif
580 	    return;
581 	}
582     } while (0);
583 #ifndef ATA_CAM
584     mtx_unlock(&ch->state_mtx);
585 #endif
586 }
587 
588 #ifdef ATA_CAM
589 static void
590 ata_periodic_poll(void *data)
591 {
592     struct ata_channel *ch = (struct ata_channel *)data;
593 
594     callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
595     ata_interrupt(ch);
596 }
597 #endif
598 
599 void
600 ata_print_cable(device_t dev, u_int8_t *who)
601 {
602     device_printf(dev,
603                   "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
604 }
605 
606 int
607 ata_check_80pin(device_t dev, int mode)
608 {
609     struct ata_device *atadev = device_get_softc(dev);
610 
611     if (!ata_dma_check_80pin) {
612         if (bootverbose)
613             device_printf(dev, "Skipping 80pin cable check\n");
614         return mode;
615     }
616 
617     if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) {
618         ata_print_cable(dev, "device");
619         mode = ATA_UDMA2;
620     }
621     return mode;
622 }
623 
624 void
625 ata_setmode(device_t dev)
626 {
627 	struct ata_channel *ch = device_get_softc(device_get_parent(dev));
628 	struct ata_device *atadev = device_get_softc(dev);
629 	int error, mode, pmode;
630 
631 	mode = atadev->mode;
632 	do {
633 		pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX);
634 		mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode);
635 		if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0)
636 			mode = ata_check_80pin(dev, mode);
637 	} while (pmode != mode); /* Interate till successfull negotiation. */
638 	error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
639 	if (bootverbose)
640 	        device_printf(dev, "%ssetting %s\n",
641 		    (error) ? "FAILURE " : "", ata_mode2str(mode));
642 	atadev->mode = mode;
643 }
644 
645 /*
646  * device related interfaces
647  */
648 #ifndef ATA_CAM
649 static int
650 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
651 	  int32_t flag, struct thread *td)
652 {
653     device_t device, *children;
654     struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data;
655     int *value = (int *)data;
656     int i, nchildren, error = ENOTTY;
657 
658     switch (cmd) {
659     case IOCATAGMAXCHANNEL:
660 	/* In case we have channel 0..n this will return n+1. */
661 	*value = devclass_get_maxunit(ata_devclass);
662 	error = 0;
663 	break;
664 
665     case IOCATAREINIT:
666 	if (*value >= devclass_get_maxunit(ata_devclass) ||
667 	    !(device = devclass_get_device(ata_devclass, *value)) ||
668 	    !device_is_attached(device))
669 	    return ENXIO;
670 	error = ata_reinit(device);
671 	break;
672 
673     case IOCATAATTACH:
674 	if (*value >= devclass_get_maxunit(ata_devclass) ||
675 	    !(device = devclass_get_device(ata_devclass, *value)) ||
676 	    !device_is_attached(device))
677 	    return ENXIO;
678 	error = DEVICE_ATTACH(device);
679 	break;
680 
681     case IOCATADETACH:
682 	if (*value >= devclass_get_maxunit(ata_devclass) ||
683 	    !(device = devclass_get_device(ata_devclass, *value)) ||
684 	    !device_is_attached(device))
685 	    return ENXIO;
686 	error = DEVICE_DETACH(device);
687 	break;
688 
689     case IOCATADEVICES:
690 	if (devices->channel >= devclass_get_maxunit(ata_devclass) ||
691 	    !(device = devclass_get_device(ata_devclass, devices->channel)) ||
692 	    !device_is_attached(device))
693 	    return ENXIO;
694 	bzero(devices->name[0], 32);
695 	bzero(&devices->params[0], sizeof(struct ata_params));
696 	bzero(devices->name[1], 32);
697 	bzero(&devices->params[1], sizeof(struct ata_params));
698 	if (!device_get_children(device, &children, &nchildren)) {
699 	    for (i = 0; i < nchildren; i++) {
700 		if (children[i] && device_is_attached(children[i])) {
701 		    struct ata_device *atadev = device_get_softc(children[i]);
702 
703 		    if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */
704 			strncpy(devices->name[0],
705 				device_get_nameunit(children[i]), 32);
706 			bcopy(&atadev->param, &devices->params[0],
707 			      sizeof(struct ata_params));
708 		    }
709 		    if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */
710 			strncpy(devices->name[1],
711 				device_get_nameunit(children[i]), 32);
712 			bcopy(&atadev->param, &devices->params[1],
713 			      sizeof(struct ata_params));
714 		    }
715 		}
716 	    }
717 	    free(children, M_TEMP);
718 	    error = 0;
719 	}
720 	else
721 	    error = ENODEV;
722 	break;
723 
724     default:
725 	if (ata_raid_ioctl_func)
726 	    error = ata_raid_ioctl_func(cmd, data);
727     }
728     return error;
729 }
730 #endif
731 
732 int
733 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
734 {
735     struct ata_device *atadev = device_get_softc(dev);
736     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
737     struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
738     struct ata_params *params = (struct ata_params *)data;
739     int *mode = (int *)data;
740     struct ata_request *request;
741     caddr_t buf;
742     int error;
743 
744     switch (cmd) {
745     case IOCATAREQUEST:
746 	if (ioc_request->count >
747 	    (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) {
748 		return (EFBIG);
749 	}
750 	if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) {
751 	    return ENOMEM;
752 	}
753 	if (!(request = ata_alloc_request())) {
754 	    free(buf, M_ATA);
755 	    return  ENOMEM;
756 	}
757 	request->dev = atadev->dev;
758 	if (ioc_request->flags & ATA_CMD_WRITE) {
759 	    error = copyin(ioc_request->data, buf, ioc_request->count);
760 	    if (error) {
761 		free(buf, M_ATA);
762 		ata_free_request(request);
763 		return error;
764 	    }
765 	}
766 	if (ioc_request->flags & ATA_CMD_ATAPI) {
767 	    request->flags = ATA_R_ATAPI;
768 	    bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
769 	}
770 	else {
771 	    request->u.ata.command = ioc_request->u.ata.command;
772 	    request->u.ata.feature = ioc_request->u.ata.feature;
773 	    request->u.ata.lba = ioc_request->u.ata.lba;
774 	    request->u.ata.count = ioc_request->u.ata.count;
775 	}
776 	request->timeout = ioc_request->timeout;
777 	request->data = buf;
778 	request->bytecount = ioc_request->count;
779 	request->transfersize = request->bytecount;
780 	if (ioc_request->flags & ATA_CMD_CONTROL)
781 	    request->flags |= ATA_R_CONTROL;
782 	if (ioc_request->flags & ATA_CMD_READ)
783 	    request->flags |= ATA_R_READ;
784 	if (ioc_request->flags & ATA_CMD_WRITE)
785 	    request->flags |= ATA_R_WRITE;
786 	ata_queue_request(request);
787 	if (request->flags & ATA_R_ATAPI) {
788 	    bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
789 		  sizeof(struct atapi_sense));
790 	}
791 	else {
792 	    ioc_request->u.ata.command = request->u.ata.command;
793 	    ioc_request->u.ata.feature = request->u.ata.feature;
794 	    ioc_request->u.ata.lba = request->u.ata.lba;
795 	    ioc_request->u.ata.count = request->u.ata.count;
796 	}
797 	ioc_request->error = request->result;
798 	if (ioc_request->flags & ATA_CMD_READ)
799 	    error = copyout(buf, ioc_request->data, ioc_request->count);
800 	else
801 	    error = 0;
802 	free(buf, M_ATA);
803 	ata_free_request(request);
804 	return error;
805 
806     case IOCATAGPARM:
807 	ata_getparam(atadev, 0);
808 	bcopy(&atadev->param, params, sizeof(struct ata_params));
809 	return 0;
810 
811     case IOCATASMODE:
812 	atadev->mode = *mode;
813 	ata_setmode(dev);
814 	return 0;
815 
816     case IOCATAGMODE:
817 	*mode = atadev->mode |
818 	    (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8);
819 	return 0;
820     case IOCATASSPINDOWN:
821 	atadev->spindown = *mode;
822 	return 0;
823     case IOCATAGSPINDOWN:
824 	*mode = atadev->spindown;
825 	return 0;
826     default:
827 	return ENOTTY;
828     }
829 }
830 
831 #ifndef ATA_CAM
832 static void
833 ata_boot_attach(void)
834 {
835     struct ata_channel *ch;
836     int ctlr;
837 
838     mtx_lock(&Giant);       /* newbus suckage it needs Giant */
839 
840     /* kick of probe and attach on all channels */
841     for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
842 	if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
843 	    ata_identify(ch->dev);
844 	}
845     }
846 
847     /* release the hook that got us here, we are only needed once during boot */
848     if (ata_delayed_attach) {
849 	config_intrhook_disestablish(ata_delayed_attach);
850 	free(ata_delayed_attach, M_TEMP);
851 	ata_delayed_attach = NULL;
852     }
853 
854     mtx_unlock(&Giant);     /* newbus suckage dealt with, release Giant */
855 }
856 #endif
857 
858 /*
859  * misc support functions
860  */
861 #ifndef ATA_CAM
862 static device_t
863 ata_add_child(device_t parent, struct ata_device *atadev, int unit)
864 {
865     device_t child;
866 
867     if ((child = device_add_child(parent, NULL, unit))) {
868 	device_set_softc(child, atadev);
869 	device_quiet(child);
870 	atadev->dev = child;
871 	atadev->max_iosize = DEV_BSIZE;
872 	atadev->mode = ATA_PIO_MAX;
873     }
874     return child;
875 }
876 #endif
877 
878 int
879 ata_getparam(struct ata_device *atadev, int init)
880 {
881     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
882     struct ata_request *request;
883     const char *res;
884     char buf[64];
885     u_int8_t command = 0;
886     int error = ENOMEM, retries = 2, mode = -1;
887 
888     if (ch->devices & (ATA_ATA_MASTER << atadev->unit))
889 	command = ATA_ATA_IDENTIFY;
890     if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit))
891 	command = ATA_ATAPI_IDENTIFY;
892     if (!command)
893 	return ENXIO;
894 
895     while (retries-- > 0 && error) {
896 	if (!(request = ata_alloc_request()))
897 	    break;
898 	request->dev = atadev->dev;
899 	request->timeout = 1;
900 	request->retries = 0;
901 	request->u.ata.command = command;
902 	request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT);
903 	if (!bootverbose)
904 	    request->flags |= ATA_R_QUIET;
905 	request->data = (void *)&atadev->param;
906 	request->bytecount = sizeof(struct ata_params);
907 	request->donecount = 0;
908 	request->transfersize = DEV_BSIZE;
909 	ata_queue_request(request);
910 	error = request->result;
911 	ata_free_request(request);
912     }
913 
914     if (!error && (isprint(atadev->param.model[0]) ||
915 		   isprint(atadev->param.model[1]))) {
916 	struct ata_params *atacap = &atadev->param;
917 	int16_t *ptr;
918 
919 	for (ptr = (int16_t *)atacap;
920 	     ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
921 	    *ptr = le16toh(*ptr);
922 	}
923 	if (!(!strncmp(atacap->model, "FX", 2) ||
924 	      !strncmp(atacap->model, "NEC", 3) ||
925 	      !strncmp(atacap->model, "Pioneer", 7) ||
926 	      !strncmp(atacap->model, "SHARP", 5))) {
927 	    bswap(atacap->model, sizeof(atacap->model));
928 	    bswap(atacap->revision, sizeof(atacap->revision));
929 	    bswap(atacap->serial, sizeof(atacap->serial));
930 	}
931 	btrim(atacap->model, sizeof(atacap->model));
932 	bpack(atacap->model, atacap->model, sizeof(atacap->model));
933 	btrim(atacap->revision, sizeof(atacap->revision));
934 	bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
935 	btrim(atacap->serial, sizeof(atacap->serial));
936 	bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
937 
938 	if (bootverbose)
939 	    printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
940 		   device_get_unit(ch->dev),
941 		   ata_unit2str(atadev),
942 		   ata_mode2str(ata_pmode(atacap)),
943 		   ata_mode2str(ata_wmode(atacap)),
944 		   ata_mode2str(ata_umode(atacap)),
945 		   (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
946 
947 	if (init) {
948 	    char buffer[64];
949 
950 	    sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
951 	    device_set_desc_copy(atadev->dev, buffer);
952 	    if ((atadev->param.config & ATA_PROTO_ATAPI) &&
953 		(atadev->param.config != ATA_CFA_MAGIC1) &&
954 		(atadev->param.config != ATA_CFA_MAGIC2)) {
955 		if (atapi_dma &&
956 		    (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
957 		    ata_umode(&atadev->param) >= ATA_UDMA2)
958 		    atadev->mode = ATA_DMA_MAX;
959 	    }
960 	    else {
961 		if (ata_dma &&
962 		    (ata_umode(&atadev->param) > 0 ||
963 		     ata_wmode(&atadev->param) > 0))
964 		    atadev->mode = ATA_DMA_MAX;
965 	    }
966 	    snprintf(buf, sizeof(buf), "dev%d.mode", atadev->unit);
967 	    if (resource_string_value(device_get_name(ch->dev),
968 	        device_get_unit(ch->dev), buf, &res) == 0)
969 		    mode = ata_str2mode(res);
970 	    else if (resource_string_value(device_get_name(ch->dev),
971 		device_get_unit(ch->dev), "mode", &res) == 0)
972 		    mode = ata_str2mode(res);
973 	    if (mode >= 0)
974 		    atadev->mode = mode;
975 	}
976     }
977     else {
978 	if (!error)
979 	    error = ENXIO;
980     }
981     return error;
982 }
983 
984 #ifndef ATA_CAM
985 int
986 ata_identify(device_t dev)
987 {
988     struct ata_channel *ch = device_get_softc(dev);
989     struct ata_device *atadev;
990     device_t *children;
991     device_t child, master = NULL;
992     int nchildren, i, n = ch->devices;
993 
994     if (bootverbose)
995 	device_printf(dev, "Identifying devices: %08x\n", ch->devices);
996 
997     mtx_lock(&Giant);
998     /* Skip existing devices. */
999     if (!device_get_children(dev, &children, &nchildren)) {
1000 	for (i = 0; i < nchildren; i++) {
1001 	    if (children[i] && (atadev = device_get_softc(children[i])))
1002 		n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit);
1003 	}
1004 	free(children, M_TEMP);
1005     }
1006     /* Create new devices. */
1007     if (bootverbose)
1008 	device_printf(dev, "New devices: %08x\n", n);
1009     if (n == 0) {
1010 	mtx_unlock(&Giant);
1011 	return (0);
1012     }
1013     for (i = 0; i < ATA_PM; ++i) {
1014 	if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) {
1015 	    int unit = -1;
1016 
1017 	    if (!(atadev = malloc(sizeof(struct ata_device),
1018 				  M_ATA, M_NOWAIT | M_ZERO))) {
1019 		device_printf(dev, "out of memory\n");
1020 		return ENOMEM;
1021 	    }
1022 	    atadev->unit = i;
1023 #ifdef ATA_STATIC_ID
1024 	    if (n & (ATA_ATA_MASTER << i))
1025 		unit = (device_get_unit(dev) << 1) + i;
1026 #endif
1027 	    if ((child = ata_add_child(dev, atadev, unit))) {
1028 		/*
1029 		 * PATA slave should be identified first, to allow
1030 		 * device cable detection on master to work properly.
1031 		 */
1032 		if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 &&
1033 			(n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) {
1034 		    master = child;
1035 		    continue;
1036 		}
1037 		if (ata_getparam(atadev, 1)) {
1038 		    device_delete_child(dev, child);
1039 		    free(atadev, M_ATA);
1040 		}
1041 	    }
1042 	    else
1043 		free(atadev, M_ATA);
1044 	}
1045     }
1046     if (master) {
1047 	atadev = device_get_softc(master);
1048 	if (ata_getparam(atadev, 1)) {
1049 	    device_delete_child(dev, master);
1050 	    free(atadev, M_ATA);
1051 	}
1052     }
1053     bus_generic_probe(dev);
1054     bus_generic_attach(dev);
1055     mtx_unlock(&Giant);
1056     return 0;
1057 }
1058 #endif
1059 
1060 void
1061 ata_default_registers(device_t dev)
1062 {
1063     struct ata_channel *ch = device_get_softc(dev);
1064 
1065     /* fill in the defaults from whats setup already */
1066     ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
1067     ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
1068     ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
1069     ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
1070     ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
1071     ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
1072     ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
1073     ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
1074 }
1075 
1076 void
1077 ata_modify_if_48bit(struct ata_request *request)
1078 {
1079     struct ata_channel *ch = device_get_softc(request->parent);
1080     struct ata_device *atadev = device_get_softc(request->dev);
1081 
1082     request->flags &= ~ATA_R_48BIT;
1083 
1084     if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA ||
1085 	 request->u.ata.count > 256) &&
1086 	atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1087 
1088 	/* translate command into 48bit version */
1089 	switch (request->u.ata.command) {
1090 	case ATA_READ:
1091 	    request->u.ata.command = ATA_READ48;
1092 	    break;
1093 	case ATA_READ_MUL:
1094 	    request->u.ata.command = ATA_READ_MUL48;
1095 	    break;
1096 	case ATA_READ_DMA:
1097 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1098 		if (request->transfersize > DEV_BSIZE)
1099 		    request->u.ata.command = ATA_READ_MUL48;
1100 		else
1101 		    request->u.ata.command = ATA_READ48;
1102 		request->flags &= ~ATA_R_DMA;
1103 	    }
1104 	    else
1105 		request->u.ata.command = ATA_READ_DMA48;
1106 	    break;
1107 	case ATA_READ_DMA_QUEUED:
1108 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1109 		if (request->transfersize > DEV_BSIZE)
1110 		    request->u.ata.command = ATA_READ_MUL48;
1111 		else
1112 		    request->u.ata.command = ATA_READ48;
1113 		request->flags &= ~ATA_R_DMA;
1114 	    }
1115 	    else
1116 		request->u.ata.command = ATA_READ_DMA_QUEUED48;
1117 	    break;
1118 	case ATA_WRITE:
1119 	    request->u.ata.command = ATA_WRITE48;
1120 	    break;
1121 	case ATA_WRITE_MUL:
1122 	    request->u.ata.command = ATA_WRITE_MUL48;
1123 	    break;
1124 	case ATA_WRITE_DMA:
1125 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1126 		if (request->transfersize > DEV_BSIZE)
1127 		    request->u.ata.command = ATA_WRITE_MUL48;
1128 		else
1129 		    request->u.ata.command = ATA_WRITE48;
1130 		request->flags &= ~ATA_R_DMA;
1131 	    }
1132 	    else
1133 		request->u.ata.command = ATA_WRITE_DMA48;
1134 	    break;
1135 	case ATA_WRITE_DMA_QUEUED:
1136 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1137 		if (request->transfersize > DEV_BSIZE)
1138 		    request->u.ata.command = ATA_WRITE_MUL48;
1139 		else
1140 		    request->u.ata.command = ATA_WRITE48;
1141 		request->u.ata.command = ATA_WRITE48;
1142 		request->flags &= ~ATA_R_DMA;
1143 	    }
1144 	    else
1145 		request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
1146 	    break;
1147 	case ATA_FLUSHCACHE:
1148 	    request->u.ata.command = ATA_FLUSHCACHE48;
1149 	    break;
1150 	case ATA_SET_MAX_ADDRESS:
1151 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1152 	    break;
1153 	default:
1154 	    return;
1155 	}
1156 	request->flags |= ATA_R_48BIT;
1157     }
1158     else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1159 
1160 	/* translate command into 48bit version */
1161 	switch (request->u.ata.command) {
1162 	case ATA_FLUSHCACHE:
1163 	    request->u.ata.command = ATA_FLUSHCACHE48;
1164 	    break;
1165 	case ATA_READ_NATIVE_MAX_ADDRESS:
1166 	    request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48;
1167 	    break;
1168 	case ATA_SET_MAX_ADDRESS:
1169 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1170 	    break;
1171 	default:
1172 	    return;
1173 	}
1174 	request->flags |= ATA_R_48BIT;
1175     }
1176 }
1177 
1178 void
1179 ata_udelay(int interval)
1180 {
1181     /* for now just use DELAY, the timer/sleep subsytems are not there yet */
1182     if (1 || interval < (1000000/hz) || ata_delayed_attach)
1183 	DELAY(interval);
1184     else
1185 	pause("ataslp", interval/(1000000/hz));
1186 }
1187 
1188 char *
1189 ata_unit2str(struct ata_device *atadev)
1190 {
1191     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
1192     static char str[8];
1193 
1194     if (ch->devices & ATA_PORTMULTIPLIER)
1195 	sprintf(str, "port%d", atadev->unit);
1196     else
1197 	sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave");
1198     return str;
1199 }
1200 
1201 const char *
1202 ata_mode2str(int mode)
1203 {
1204     switch (mode) {
1205     case -1: return "UNSUPPORTED";
1206     case ATA_PIO0: return "PIO0";
1207     case ATA_PIO1: return "PIO1";
1208     case ATA_PIO2: return "PIO2";
1209     case ATA_PIO3: return "PIO3";
1210     case ATA_PIO4: return "PIO4";
1211     case ATA_WDMA0: return "WDMA0";
1212     case ATA_WDMA1: return "WDMA1";
1213     case ATA_WDMA2: return "WDMA2";
1214     case ATA_UDMA0: return "UDMA16";
1215     case ATA_UDMA1: return "UDMA25";
1216     case ATA_UDMA2: return "UDMA33";
1217     case ATA_UDMA3: return "UDMA40";
1218     case ATA_UDMA4: return "UDMA66";
1219     case ATA_UDMA5: return "UDMA100";
1220     case ATA_UDMA6: return "UDMA133";
1221     case ATA_SA150: return "SATA150";
1222     case ATA_SA300: return "SATA300";
1223     default:
1224 	if (mode & ATA_DMA_MASK)
1225 	    return "BIOSDMA";
1226 	else
1227 	    return "BIOSPIO";
1228     }
1229 }
1230 
1231 int
1232 ata_str2mode(const char *str)
1233 {
1234 
1235 	if (!strcasecmp(str, "PIO0")) return (ATA_PIO0);
1236 	if (!strcasecmp(str, "PIO1")) return (ATA_PIO1);
1237 	if (!strcasecmp(str, "PIO2")) return (ATA_PIO2);
1238 	if (!strcasecmp(str, "PIO3")) return (ATA_PIO3);
1239 	if (!strcasecmp(str, "PIO4")) return (ATA_PIO4);
1240 	if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0);
1241 	if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1);
1242 	if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2);
1243 	if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0);
1244 	if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0);
1245 	if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1);
1246 	if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1);
1247 	if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2);
1248 	if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2);
1249 	if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3);
1250 	if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3);
1251 	if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4);
1252 	if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4);
1253 	if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5);
1254 	if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5);
1255 	if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6);
1256 	if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6);
1257 	return (-1);
1258 }
1259 
1260 const char *
1261 ata_satarev2str(int rev)
1262 {
1263 	switch (rev) {
1264 	case 0: return "";
1265 	case 1: return "SATA 1.5Gb/s";
1266 	case 2: return "SATA 3Gb/s";
1267 	case 3: return "SATA 6Gb/s";
1268 	case 0xff: return "SATA";
1269 	default: return "???";
1270 	}
1271 }
1272 
1273 int
1274 ata_atapi(device_t dev, int target)
1275 {
1276     struct ata_channel *ch = device_get_softc(dev);
1277 
1278     return (ch->devices & (ATA_ATAPI_MASTER << target));
1279 }
1280 
1281 int
1282 ata_pmode(struct ata_params *ap)
1283 {
1284     if (ap->atavalid & ATA_FLAG_64_70) {
1285 	if (ap->apiomodes & 0x02)
1286 	    return ATA_PIO4;
1287 	if (ap->apiomodes & 0x01)
1288 	    return ATA_PIO3;
1289     }
1290     if (ap->mwdmamodes & 0x04)
1291 	return ATA_PIO4;
1292     if (ap->mwdmamodes & 0x02)
1293 	return ATA_PIO3;
1294     if (ap->mwdmamodes & 0x01)
1295 	return ATA_PIO2;
1296     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
1297 	return ATA_PIO2;
1298     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
1299 	return ATA_PIO1;
1300     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
1301 	return ATA_PIO0;
1302     return ATA_PIO0;
1303 }
1304 
1305 int
1306 ata_wmode(struct ata_params *ap)
1307 {
1308     if (ap->mwdmamodes & 0x04)
1309 	return ATA_WDMA2;
1310     if (ap->mwdmamodes & 0x02)
1311 	return ATA_WDMA1;
1312     if (ap->mwdmamodes & 0x01)
1313 	return ATA_WDMA0;
1314     return -1;
1315 }
1316 
1317 int
1318 ata_umode(struct ata_params *ap)
1319 {
1320     if (ap->atavalid & ATA_FLAG_88) {
1321 	if (ap->udmamodes & 0x40)
1322 	    return ATA_UDMA6;
1323 	if (ap->udmamodes & 0x20)
1324 	    return ATA_UDMA5;
1325 	if (ap->udmamodes & 0x10)
1326 	    return ATA_UDMA4;
1327 	if (ap->udmamodes & 0x08)
1328 	    return ATA_UDMA3;
1329 	if (ap->udmamodes & 0x04)
1330 	    return ATA_UDMA2;
1331 	if (ap->udmamodes & 0x02)
1332 	    return ATA_UDMA1;
1333 	if (ap->udmamodes & 0x01)
1334 	    return ATA_UDMA0;
1335     }
1336     return -1;
1337 }
1338 
1339 int
1340 ata_limit_mode(device_t dev, int mode, int maxmode)
1341 {
1342     struct ata_device *atadev = device_get_softc(dev);
1343 
1344     if (maxmode && mode > maxmode)
1345 	mode = maxmode;
1346 
1347     if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
1348 	return min(mode, ata_umode(&atadev->param));
1349 
1350     if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
1351 	return min(mode, ata_wmode(&atadev->param));
1352 
1353     if (mode > ata_pmode(&atadev->param))
1354 	return min(mode, ata_pmode(&atadev->param));
1355 
1356     return mode;
1357 }
1358 
1359 static void
1360 bswap(int8_t *buf, int len)
1361 {
1362     u_int16_t *ptr = (u_int16_t*)(buf + len);
1363 
1364     while (--ptr >= (u_int16_t*)buf)
1365 	*ptr = ntohs(*ptr);
1366 }
1367 
1368 static void
1369 btrim(int8_t *buf, int len)
1370 {
1371     int8_t *ptr;
1372 
1373     for (ptr = buf; ptr < buf+len; ++ptr)
1374 	if (!*ptr || *ptr == '_')
1375 	    *ptr = ' ';
1376     for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
1377 	*ptr = 0;
1378 }
1379 
1380 static void
1381 bpack(int8_t *src, int8_t *dst, int len)
1382 {
1383     int i, j, blank;
1384 
1385     for (i = j = blank = 0 ; i < len; i++) {
1386 	if (blank && src[i] == ' ') continue;
1387 	if (blank && src[i] != ' ') {
1388 	    dst[j++] = src[i];
1389 	    blank = 0;
1390 	    continue;
1391 	}
1392 	if (src[i] == ' ') {
1393 	    blank = 1;
1394 	    if (i == 0)
1395 		continue;
1396 	}
1397 	dst[j++] = src[i];
1398     }
1399     if (j < len)
1400 	dst[j] = 0x00;
1401 }
1402 
1403 #ifdef ATA_CAM
1404 void
1405 ata_cam_begin_transaction(device_t dev, union ccb *ccb)
1406 {
1407 	struct ata_channel *ch = device_get_softc(dev);
1408 	struct ata_request *request;
1409 
1410 	if (!(request = ata_alloc_request())) {
1411 		device_printf(dev, "FAILURE - out of memory in start\n");
1412 		ccb->ccb_h.status = CAM_REQ_INVALID;
1413 		xpt_done(ccb);
1414 		return;
1415 	}
1416 	bzero(request, sizeof(*request));
1417 
1418 	/* setup request */
1419 	request->dev = NULL;
1420 	request->parent = dev;
1421 	request->unit = ccb->ccb_h.target_id;
1422 	if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1423 		request->data = ccb->ataio.data_ptr;
1424 		request->bytecount = ccb->ataio.dxfer_len;
1425 		request->u.ata.command = ccb->ataio.cmd.command;
1426 		request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
1427 					  (uint16_t)ccb->ataio.cmd.features;
1428 		request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
1429 					(uint16_t)ccb->ataio.cmd.sector_count;
1430 		if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
1431 			request->flags |= ATA_R_48BIT;
1432 			request->u.ata.lba =
1433 				     ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
1434 				     ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
1435 				     ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
1436 		} else {
1437 			request->u.ata.lba =
1438 				     ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
1439 		}
1440 		request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
1441 				      ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
1442 				       (uint64_t)ccb->ataio.cmd.lba_low;
1443 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1444 		    ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
1445 			request->flags |= ATA_R_DMA;
1446 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1447 			request->flags |= ATA_R_READ;
1448 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1449 			request->flags |= ATA_R_WRITE;
1450 	} else {
1451 		request->data = ccb->csio.data_ptr;
1452 		request->bytecount = ccb->csio.dxfer_len;
1453 		bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1454 		    ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
1455 		    request->u.atapi.ccb, ccb->csio.cdb_len);
1456 		request->flags |= ATA_R_ATAPI;
1457 		if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1458 			request->flags |= ATA_R_ATAPI16;
1459 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1460 		    ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1461 			request->flags |= ATA_R_DMA;
1462 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1463 			request->flags |= ATA_R_READ;
1464 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1465 			request->flags |= ATA_R_WRITE;
1466 	}
1467 	request->transfersize = min(request->bytecount,
1468 	    ch->curr[ccb->ccb_h.target_id].bytecount);
1469 	request->retries = 0;
1470 	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1471 	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1472 	request->ccb = ccb;
1473 
1474 	ch->running = request;
1475 	ch->state = ATA_ACTIVE;
1476 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1477 	    ch->running = NULL;
1478 	    ch->state = ATA_IDLE;
1479 	    ata_cam_end_transaction(dev, request);
1480 	    return;
1481 	}
1482 }
1483 
1484 static void
1485 ata_cam_request_sense(device_t dev, struct ata_request *request)
1486 {
1487 	struct ata_channel *ch = device_get_softc(dev);
1488 	union ccb *ccb = request->ccb;
1489 
1490 	ch->requestsense = 1;
1491 
1492 	bzero(request, sizeof(&request));
1493 	request->dev = NULL;
1494 	request->parent = dev;
1495 	request->unit = ccb->ccb_h.target_id;
1496 	request->data = (void *)&ccb->csio.sense_data;
1497 	request->bytecount = ccb->csio.sense_len;
1498 	request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE;
1499 	request->u.atapi.ccb[4] = ccb->csio.sense_len;
1500 	request->flags |= ATA_R_ATAPI;
1501 	if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1502 		request->flags |= ATA_R_ATAPI16;
1503 	if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1504 		request->flags |= ATA_R_DMA;
1505 	request->flags |= ATA_R_READ;
1506 	request->transfersize = min(request->bytecount,
1507 	    ch->curr[ccb->ccb_h.target_id].bytecount);
1508 	request->retries = 0;
1509 	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1510 	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1511 	request->ccb = ccb;
1512 
1513 	ch->running = request;
1514 	ch->state = ATA_ACTIVE;
1515 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1516 		ch->running = NULL;
1517 		ch->state = ATA_IDLE;
1518 		ata_cam_end_transaction(dev, request);
1519 		return;
1520 	}
1521 }
1522 
1523 static void
1524 ata_cam_process_sense(device_t dev, struct ata_request *request)
1525 {
1526 	struct ata_channel *ch = device_get_softc(dev);
1527 	union ccb *ccb = request->ccb;
1528 	int fatalerr = 0;
1529 
1530 	ch->requestsense = 0;
1531 
1532 	if (request->flags & ATA_R_TIMEOUT)
1533 		fatalerr = 1;
1534 	if ((request->flags & ATA_R_TIMEOUT) == 0 &&
1535 	    (request->status & ATA_S_ERROR) == 0 &&
1536 	    request->result == 0) {
1537 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1538 	} else {
1539 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1540 		ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1541 	}
1542 
1543 	ata_free_request(request);
1544 	xpt_done(ccb);
1545 	/* Do error recovery if needed. */
1546 	if (fatalerr)
1547 		ata_reinit(dev);
1548 }
1549 
1550 void
1551 ata_cam_end_transaction(device_t dev, struct ata_request *request)
1552 {
1553 	struct ata_channel *ch = device_get_softc(dev);
1554 	union ccb *ccb = request->ccb;
1555 	int fatalerr = 0;
1556 
1557 	if (ch->requestsense) {
1558 		ata_cam_process_sense(dev, request);
1559 		return;
1560 	}
1561 
1562 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1563 	if (request->flags & ATA_R_TIMEOUT) {
1564 		xpt_freeze_simq(ch->sim, 1);
1565 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1566 		ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
1567 		fatalerr = 1;
1568 	} else if (request->status & ATA_S_ERROR) {
1569 		if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1570 			ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1571 		} else {
1572 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1573 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1574 		}
1575 	} else if (request->result == ERESTART)
1576 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1577 	else if (request->result != 0)
1578 		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1579 	else
1580 		ccb->ccb_h.status |= CAM_REQ_CMP;
1581 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
1582 	    !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1583 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1584 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1585 	}
1586 	if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1587 	    ((request->status & ATA_S_ERROR) ||
1588 	    (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
1589 		struct ata_res *res = &ccb->ataio.res;
1590 		res->status = request->status;
1591 		res->error = request->error;
1592 		res->lba_low = request->u.ata.lba;
1593 		res->lba_mid = request->u.ata.lba >> 8;
1594 		res->lba_high = request->u.ata.lba >> 16;
1595 		res->device = request->u.ata.lba >> 24;
1596 		res->lba_low_exp = request->u.ata.lba >> 24;
1597 		res->lba_mid_exp = request->u.ata.lba >> 32;
1598 		res->lba_high_exp = request->u.ata.lba >> 40;
1599 		res->sector_count = request->u.ata.count;
1600 		res->sector_count_exp = request->u.ata.count >> 8;
1601 	}
1602 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1603 		if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1604 			ccb->ataio.resid =
1605 			    ccb->ataio.dxfer_len - request->donecount;
1606 		} else {
1607 			ccb->csio.resid =
1608 			    ccb->csio.dxfer_len - request->donecount;
1609 		}
1610 	}
1611 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
1612 	    (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1613 		ata_cam_request_sense(dev, request);
1614 	else {
1615 		ata_free_request(request);
1616 		xpt_done(ccb);
1617 	}
1618 	/* Do error recovery if needed. */
1619 	if (fatalerr)
1620 		ata_reinit(dev);
1621 }
1622 
1623 static int
1624 ata_check_ids(device_t dev, union ccb *ccb)
1625 {
1626 	struct ata_channel *ch = device_get_softc(dev);
1627 
1628 	if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) {
1629 		ccb->ccb_h.status = CAM_TID_INVALID;
1630 		xpt_done(ccb);
1631 		return (-1);
1632 	}
1633 	if (ccb->ccb_h.target_lun != 0) {
1634 		ccb->ccb_h.status = CAM_LUN_INVALID;
1635 		xpt_done(ccb);
1636 		return (-1);
1637 	}
1638 	return (0);
1639 }
1640 
1641 static void
1642 ataaction(struct cam_sim *sim, union ccb *ccb)
1643 {
1644 	device_t dev, parent;
1645 	struct ata_channel *ch;
1646 
1647 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
1648 	    ccb->ccb_h.func_code));
1649 
1650 	ch = (struct ata_channel *)cam_sim_softc(sim);
1651 	dev = ch->dev;
1652 	switch (ccb->ccb_h.func_code) {
1653 	/* Common cases first */
1654 	case XPT_ATA_IO:	/* Execute the requested I/O operation */
1655 	case XPT_SCSI_IO:
1656 		if (ata_check_ids(dev, ccb))
1657 			return;
1658 		if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
1659 		    << ccb->ccb_h.target_id)) == 0) {
1660 			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1661 			break;
1662 		}
1663 		if (ch->running)
1664 			device_printf(dev, "already running!\n");
1665 		if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1666 		    (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1667 		    (ccb->ataio.cmd.control & ATA_A_RESET)) {
1668 			struct ata_res *res = &ccb->ataio.res;
1669 
1670 			bzero(res, sizeof(*res));
1671 			if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
1672 				res->lba_high = 0;
1673 				res->lba_mid = 0;
1674 			} else {
1675 				res->lba_high = 0xeb;
1676 				res->lba_mid = 0x14;
1677 			}
1678 			ccb->ccb_h.status = CAM_REQ_CMP;
1679 			break;
1680 		}
1681 		ata_cam_begin_transaction(dev, ccb);
1682 		return;
1683 	case XPT_EN_LUN:		/* Enable LUN as a target */
1684 	case XPT_TARGET_IO:		/* Execute target I/O request */
1685 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
1686 	case XPT_CONT_TARGET_IO:	/* Continue Host Target I/O Connection*/
1687 	case XPT_ABORT:			/* Abort the specified CCB */
1688 		/* XXX Implement */
1689 		ccb->ccb_h.status = CAM_REQ_INVALID;
1690 		break;
1691 	case XPT_SET_TRAN_SETTINGS:
1692 	{
1693 		struct	ccb_trans_settings *cts = &ccb->cts;
1694 		struct	ata_cam_device *d;
1695 
1696 		if (ata_check_ids(dev, ccb))
1697 			return;
1698 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1699 			d = &ch->curr[ccb->ccb_h.target_id];
1700 		else
1701 			d = &ch->user[ccb->ccb_h.target_id];
1702 		if (ch->flags & ATA_SATA) {
1703 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
1704 				d->revision = cts->xport_specific.sata.revision;
1705 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) {
1706 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1707 					d->mode = ATA_SETMODE(ch->dev,
1708 					    ccb->ccb_h.target_id,
1709 					    cts->xport_specific.sata.mode);
1710 				} else
1711 					d->mode = cts->xport_specific.sata.mode;
1712 			}
1713 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
1714 				d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
1715 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
1716 				d->atapi = cts->xport_specific.sata.atapi;
1717 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
1718 				d->caps = cts->xport_specific.sata.caps;
1719 		} else {
1720 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
1721 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1722 					d->mode = ATA_SETMODE(ch->dev,
1723 					    ccb->ccb_h.target_id,
1724 					    cts->xport_specific.ata.mode);
1725 				} else
1726 					d->mode = cts->xport_specific.ata.mode;
1727 			}
1728 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
1729 				d->bytecount = cts->xport_specific.ata.bytecount;
1730 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI)
1731 				d->atapi = cts->xport_specific.ata.atapi;
1732 		}
1733 		ccb->ccb_h.status = CAM_REQ_CMP;
1734 		break;
1735 	}
1736 	case XPT_GET_TRAN_SETTINGS:
1737 	{
1738 		struct	ccb_trans_settings *cts = &ccb->cts;
1739 		struct  ata_cam_device *d;
1740 
1741 		if (ata_check_ids(dev, ccb))
1742 			return;
1743 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1744 			d = &ch->curr[ccb->ccb_h.target_id];
1745 		else
1746 			d = &ch->user[ccb->ccb_h.target_id];
1747 		cts->protocol = PROTO_ATA;
1748 		cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1749 		if (ch->flags & ATA_SATA) {
1750 			cts->transport = XPORT_SATA;
1751 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1752 			cts->xport_specific.sata.valid = 0;
1753 			cts->xport_specific.sata.mode = d->mode;
1754 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1755 			cts->xport_specific.sata.bytecount = d->bytecount;
1756 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1757 			if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1758 				cts->xport_specific.sata.revision =
1759 				    ATA_GETREV(dev, ccb->ccb_h.target_id);
1760 				if (cts->xport_specific.sata.revision != 0xff) {
1761 					cts->xport_specific.sata.valid |=
1762 					    CTS_SATA_VALID_REVISION;
1763 				}
1764 				cts->xport_specific.sata.caps =
1765 				    d->caps & CTS_SATA_CAPS_D;
1766 				if (ch->pm_level) {
1767 					cts->xport_specific.sata.caps |=
1768 					    CTS_SATA_CAPS_H_PMREQ;
1769 				}
1770 				cts->xport_specific.sata.caps &=
1771 				    ch->user[ccb->ccb_h.target_id].caps;
1772 				cts->xport_specific.sata.valid |=
1773 				    CTS_SATA_VALID_CAPS;
1774 			} else {
1775 				cts->xport_specific.sata.revision = d->revision;
1776 				cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1777 				cts->xport_specific.sata.caps = d->caps;
1778 				cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
1779 			}
1780 			cts->xport_specific.sata.atapi = d->atapi;
1781 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
1782 		} else {
1783 			cts->transport = XPORT_ATA;
1784 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1785 			cts->xport_specific.ata.valid = 0;
1786 			cts->xport_specific.ata.mode = d->mode;
1787 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1788 			cts->xport_specific.ata.bytecount = d->bytecount;
1789 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1790 			cts->xport_specific.ata.atapi = d->atapi;
1791 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI;
1792 		}
1793 		ccb->ccb_h.status = CAM_REQ_CMP;
1794 		break;
1795 	}
1796 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
1797 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
1798 		ata_reinit(dev);
1799 		ccb->ccb_h.status = CAM_REQ_CMP;
1800 		break;
1801 	case XPT_TERM_IO:		/* Terminate the I/O process */
1802 		/* XXX Implement */
1803 		ccb->ccb_h.status = CAM_REQ_INVALID;
1804 		break;
1805 	case XPT_PATH_INQ:		/* Path routing inquiry */
1806 	{
1807 		struct ccb_pathinq *cpi = &ccb->cpi;
1808 
1809 		parent = device_get_parent(dev);
1810 		cpi->version_num = 1; /* XXX??? */
1811 		cpi->hba_inquiry = PI_SDTR_ABLE;
1812 		cpi->target_sprt = 0;
1813 		cpi->hba_misc = PIM_SEQSCAN;
1814 		cpi->hba_eng_cnt = 0;
1815 		if (ch->flags & ATA_NO_SLAVE)
1816 			cpi->max_target = 0;
1817 		else
1818 			cpi->max_target = 1;
1819 		cpi->max_lun = 0;
1820 		cpi->initiator_id = 0;
1821 		cpi->bus_id = cam_sim_bus(sim);
1822 		if (ch->flags & ATA_SATA)
1823 			cpi->base_transfer_speed = 150000;
1824 		else
1825 			cpi->base_transfer_speed = 3300;
1826 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1827 		strncpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1828 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1829 		cpi->unit_number = cam_sim_unit(sim);
1830 		if (ch->flags & ATA_SATA)
1831 			cpi->transport = XPORT_SATA;
1832 		else
1833 			cpi->transport = XPORT_ATA;
1834 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1835 		cpi->protocol = PROTO_ATA;
1836 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1837 		cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1838 		if (device_get_devclass(device_get_parent(parent)) ==
1839 		    devclass_find("pci")) {
1840 			cpi->hba_vendor = pci_get_vendor(parent);
1841 			cpi->hba_device = pci_get_device(parent);
1842 			cpi->hba_subvendor = pci_get_subvendor(parent);
1843 			cpi->hba_subdevice = pci_get_subdevice(parent);
1844 		}
1845 		cpi->ccb_h.status = CAM_REQ_CMP;
1846 		break;
1847 	}
1848 	default:
1849 		ccb->ccb_h.status = CAM_REQ_INVALID;
1850 		break;
1851 	}
1852 	xpt_done(ccb);
1853 }
1854 
1855 static void
1856 atapoll(struct cam_sim *sim)
1857 {
1858 	struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1859 
1860 	ata_interrupt_locked(ch);
1861 }
1862 #endif
1863 
1864 /*
1865  * module handeling
1866  */
1867 static int
1868 ata_module_event_handler(module_t mod, int what, void *arg)
1869 {
1870 #ifndef ATA_CAM
1871     static struct cdev *atacdev;
1872 #endif
1873 
1874     switch (what) {
1875     case MOD_LOAD:
1876 #ifndef ATA_CAM
1877 	/* register controlling device */
1878 	atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1879 
1880 	if (cold) {
1881 	    /* register boot attach to be run when interrupts are enabled */
1882 	    if (!(ata_delayed_attach = (struct intr_config_hook *)
1883 				       malloc(sizeof(struct intr_config_hook),
1884 					      M_TEMP, M_NOWAIT | M_ZERO))) {
1885 		printf("ata: malloc of delayed attach hook failed\n");
1886 		return EIO;
1887 	    }
1888 	    ata_delayed_attach->ich_func = (void*)ata_boot_attach;
1889 	    if (config_intrhook_establish(ata_delayed_attach) != 0) {
1890 		printf("ata: config_intrhook_establish failed\n");
1891 		free(ata_delayed_attach, M_TEMP);
1892 	    }
1893 	}
1894 #endif
1895 	return 0;
1896 
1897     case MOD_UNLOAD:
1898 #ifndef ATA_CAM
1899 	/* deregister controlling device */
1900 	destroy_dev(atacdev);
1901 #endif
1902 	return 0;
1903 
1904     default:
1905 	return EOPNOTSUPP;
1906     }
1907 }
1908 
1909 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1910 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1911 MODULE_VERSION(ata, 1);
1912 #ifdef ATA_CAM
1913 MODULE_DEPEND(ata, cam, 1, 1, 1);
1914 #endif
1915 
1916 static void
1917 ata_init(void)
1918 {
1919     ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request),
1920 				   NULL, NULL, NULL, NULL, 0, 0);
1921     ata_composite_zone = uma_zcreate("ata_composite",
1922 				     sizeof(struct ata_composite),
1923 				     NULL, NULL, NULL, NULL, 0, 0);
1924 }
1925 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1926 
1927 static void
1928 ata_uninit(void)
1929 {
1930     uma_zdestroy(ata_composite_zone);
1931     uma_zdestroy(ata_request_zone);
1932 }
1933 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);
1934