xref: /freebsd/sys/dev/ata/ata-all.c (revision 9fd69f37d28cfd7438cac3eeb45fe9dd46b4d7dd)
1 /*-
2  * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ata.h"
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/ata.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/endian.h>
37 #include <sys/ctype.h>
38 #include <sys/conf.h>
39 #include <sys/bus.h>
40 #include <sys/bio.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
43 #include <sys/sema.h>
44 #include <sys/taskqueue.h>
45 #include <vm/uma.h>
46 #include <machine/stdarg.h>
47 #include <machine/resource.h>
48 #include <machine/bus.h>
49 #include <sys/rman.h>
50 #include <dev/ata/ata-all.h>
51 #include <ata_if.h>
52 
53 #ifdef ATA_CAM
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_xpt_sim.h>
58 #include <cam/cam_debug.h>
59 #endif
60 
61 #ifndef ATA_CAM
62 /* device structure */
63 static  d_ioctl_t       ata_ioctl;
64 static struct cdevsw ata_cdevsw = {
65 	.d_version =    D_VERSION,
66 	.d_flags =      D_NEEDGIANT, /* we need this as newbus isn't mpsafe */
67 	.d_ioctl =      ata_ioctl,
68 	.d_name =       "ata",
69 };
70 #endif
71 
72 /* prototypes */
73 #ifndef ATA_CAM
74 static void ata_boot_attach(void);
75 static device_t ata_add_child(device_t, struct ata_device *, int);
76 #else
77 static void ataaction(struct cam_sim *sim, union ccb *ccb);
78 static void atapoll(struct cam_sim *sim);
79 #endif
80 static void ata_conn_event(void *, int);
81 static void bswap(int8_t *, int);
82 static void btrim(int8_t *, int);
83 static void bpack(int8_t *, int8_t *, int);
84 static void ata_interrupt_locked(void *data);
85 
86 /* global vars */
87 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
88 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
89 struct intr_config_hook *ata_delayed_attach = NULL;
90 devclass_t ata_devclass;
91 uma_zone_t ata_request_zone;
92 uma_zone_t ata_composite_zone;
93 int ata_wc = 1;
94 int ata_setmax = 0;
95 int ata_dma_check_80pin = 1;
96 
97 /* local vars */
98 static int ata_dma = 1;
99 static int atapi_dma = 1;
100 
101 /* sysctl vars */
102 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
103 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
104 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0,
105 	   "ATA disk DMA mode control");
106 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
107 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
108 	   CTLFLAG_RDTUN, &ata_dma_check_80pin, 1,
109 	   "Check for 80pin cable before setting ATA DMA mode");
110 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
111 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0,
112 	   "ATAPI device DMA mode control");
113 TUNABLE_INT("hw.ata.wc", &ata_wc);
114 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0,
115 	   "ATA disk write caching");
116 TUNABLE_INT("hw.ata.setmax", &ata_setmax);
117 SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0,
118 	   "ATA disk set max native address");
119 
120 /*
121  * newbus device interface related functions
122  */
123 int
124 ata_probe(device_t dev)
125 {
126     return 0;
127 }
128 
129 int
130 ata_attach(device_t dev)
131 {
132     struct ata_channel *ch = device_get_softc(dev);
133     int error, rid;
134 #ifdef ATA_CAM
135     struct cam_devq *devq;
136     int i;
137 #endif
138 
139     /* check that we have a virgin channel to attach */
140     if (ch->r_irq)
141 	return EEXIST;
142 
143     /* initialize the softc basics */
144     ch->dev = dev;
145     ch->state = ATA_IDLE;
146     bzero(&ch->state_mtx, sizeof(struct mtx));
147     mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
148     bzero(&ch->queue_mtx, sizeof(struct mtx));
149     mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF);
150     TAILQ_INIT(&ch->ata_queue);
151     TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
152 #ifdef ATA_CAM
153 	for (i = 0; i < 16; i++) {
154 		ch->user[i].mode = 0;
155 		if (ch->flags & ATA_SATA)
156 			ch->user[i].bytecount = 8192;
157 		else
158 			ch->user[i].bytecount = MAXPHYS;
159 		ch->curr[i] = ch->user[i];
160 	}
161 #endif
162 
163     /* reset the controller HW, the channel and device(s) */
164     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
165 	pause("ataatch", 1);
166 #ifndef ATA_CAM
167     ATA_RESET(dev);
168 #endif
169     ATA_LOCKING(dev, ATA_LF_UNLOCK);
170 
171     /* allocate DMA resources if DMA HW present*/
172     if (ch->dma.alloc)
173 	ch->dma.alloc(dev);
174 
175     /* setup interrupt delivery */
176     rid = ATA_IRQ_RID;
177     ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
178 				       RF_SHAREABLE | RF_ACTIVE);
179     if (!ch->r_irq) {
180 	device_printf(dev, "unable to allocate interrupt\n");
181 	return ENXIO;
182     }
183     if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
184 				ata_interrupt, ch, &ch->ih))) {
185 	device_printf(dev, "unable to setup interrupt\n");
186 	return error;
187     }
188 
189 #ifndef ATA_CAM
190     /* probe and attach devices on this channel unless we are in early boot */
191     if (!ata_delayed_attach)
192 	ata_identify(dev);
193     return (0);
194 #else
195 	mtx_lock(&ch->state_mtx);
196 	/* Create the device queue for our SIM. */
197 	devq = cam_simq_alloc(1);
198 	if (devq == NULL) {
199 		device_printf(dev, "Unable to allocate simq\n");
200 		error = ENOMEM;
201 		goto err1;
202 	}
203 	/* Construct SIM entry */
204 	ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
205 	    device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
206 	if (ch->sim == NULL) {
207 		device_printf(dev, "unable to allocate sim\n");
208 		error = ENOMEM;
209 		goto err2;
210 	}
211 	if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
212 		device_printf(dev, "unable to register xpt bus\n");
213 		error = ENXIO;
214 		goto err2;
215 	}
216 	if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
217 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
218 		device_printf(dev, "unable to create path\n");
219 		error = ENXIO;
220 		goto err3;
221 	}
222 	mtx_unlock(&ch->state_mtx);
223 	return (0);
224 
225 err3:
226 	xpt_bus_deregister(cam_sim_path(ch->sim));
227 err2:
228 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
229 err1:
230 	bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
231 	mtx_unlock(&ch->state_mtx);
232 	return (error);
233 #endif
234 }
235 
236 int
237 ata_detach(device_t dev)
238 {
239     struct ata_channel *ch = device_get_softc(dev);
240 #ifndef ATA_CAM
241     device_t *children;
242     int nchildren, i;
243 #endif
244 
245     /* check that we have a valid channel to detach */
246     if (!ch->r_irq)
247 	return ENXIO;
248 
249     /* grap the channel lock so no new requests gets launched */
250     mtx_lock(&ch->state_mtx);
251     ch->state |= ATA_STALL_QUEUE;
252     mtx_unlock(&ch->state_mtx);
253 
254 #ifndef ATA_CAM
255     /* detach & delete all children */
256     if (!device_get_children(dev, &children, &nchildren)) {
257 	for (i = 0; i < nchildren; i++)
258 	    if (children[i])
259 		device_delete_child(dev, children[i]);
260 	free(children, M_TEMP);
261     }
262 #endif
263     taskqueue_drain(taskqueue_thread, &ch->conntask);
264 
265 #ifdef ATA_CAM
266 	mtx_lock(&ch->state_mtx);
267 	xpt_async(AC_LOST_DEVICE, ch->path, NULL);
268 	xpt_free_path(ch->path);
269 	xpt_bus_deregister(cam_sim_path(ch->sim));
270 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
271 	mtx_unlock(&ch->state_mtx);
272 #endif
273 
274     /* release resources */
275     bus_teardown_intr(dev, ch->r_irq, ch->ih);
276     bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
277     ch->r_irq = NULL;
278 
279     /* free DMA resources if DMA HW present*/
280     if (ch->dma.free)
281 	ch->dma.free(dev);
282 
283     mtx_destroy(&ch->state_mtx);
284     mtx_destroy(&ch->queue_mtx);
285     return 0;
286 }
287 
288 static void
289 ata_conn_event(void *context, int dummy)
290 {
291 	device_t dev = (device_t)context;
292 #ifdef ATA_CAM
293 	struct ata_channel *ch = device_get_softc(dev);
294 	union ccb *ccb;
295 
296 	mtx_lock(&ch->state_mtx);
297 	ata_reinit(dev);
298 	mtx_unlock(&ch->state_mtx);
299 	if ((ccb = xpt_alloc_ccb()) == NULL)
300 		return;
301 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
302 	    cam_sim_path(ch->sim),
303 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
304 		xpt_free_ccb(ccb);
305 		return;
306 	}
307 	xpt_rescan(ccb);
308 #else
309 	ata_reinit(dev);
310 #endif
311 }
312 
313 int
314 ata_reinit(device_t dev)
315 {
316     struct ata_channel *ch = device_get_softc(dev);
317     struct ata_request *request;
318 #ifndef ATA_CAM
319     device_t *children;
320     int nchildren, i;
321 
322     /* check that we have a valid channel to reinit */
323     if (!ch || !ch->r_irq)
324 	return ENXIO;
325 
326     if (bootverbose)
327 	device_printf(dev, "reiniting channel ..\n");
328 
329     /* poll for locking the channel */
330     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
331 	pause("atarini", 1);
332 
333     /* catch eventual request in ch->running */
334     mtx_lock(&ch->state_mtx);
335     if (ch->state & ATA_STALL_QUEUE) {
336 	/* Recursive reinits and reinits during detach prohobited. */
337 	mtx_unlock(&ch->state_mtx);
338 	return (ENXIO);
339     }
340     if ((request = ch->running))
341 	callout_stop(&request->callout);
342     ch->running = NULL;
343 
344     /* unconditionally grap the channel lock */
345     ch->state |= ATA_STALL_QUEUE;
346     mtx_unlock(&ch->state_mtx);
347 
348     /* reset the controller HW, the channel and device(s) */
349     ATA_RESET(dev);
350 
351     /* reinit the children and delete any that fails */
352     if (!device_get_children(dev, &children, &nchildren)) {
353 	mtx_lock(&Giant);       /* newbus suckage it needs Giant */
354 	for (i = 0; i < nchildren; i++) {
355 	    /* did any children go missing ? */
356 	    if (children[i] && device_is_attached(children[i]) &&
357 		ATA_REINIT(children[i])) {
358 		/*
359 		 * if we had a running request and its device matches
360 		 * this child we need to inform the request that the
361 		 * device is gone.
362 		 */
363 		if (request && request->dev == children[i]) {
364 		    request->result = ENXIO;
365 		    device_printf(request->dev, "FAILURE - device detached\n");
366 
367 		    /* if not timeout finish request here */
368 		    if (!(request->flags & ATA_R_TIMEOUT))
369 			    ata_finish(request);
370 		    request = NULL;
371 		}
372 		device_delete_child(dev, children[i]);
373 	    }
374 	}
375 	free(children, M_TEMP);
376 	mtx_unlock(&Giant);     /* newbus suckage dealt with, release Giant */
377     }
378 
379     /* if we still have a good request put it on the queue again */
380     if (request && !(request->flags & ATA_R_TIMEOUT)) {
381 	device_printf(request->dev,
382 		      "WARNING - %s requeued due to channel reset",
383 		      ata_cmd2str(request));
384 	if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
385 	    printf(" LBA=%ju", request->u.ata.lba);
386 	printf("\n");
387 	request->flags |= ATA_R_REQUEUE;
388 	ata_queue_request(request);
389     }
390 
391     /* we're done release the channel for new work */
392     mtx_lock(&ch->state_mtx);
393     ch->state = ATA_IDLE;
394     mtx_unlock(&ch->state_mtx);
395     ATA_LOCKING(dev, ATA_LF_UNLOCK);
396 
397     /* Add new children. */
398 /*    ata_identify(dev); */
399 
400     if (bootverbose)
401 	device_printf(dev, "reinit done ..\n");
402 
403     /* kick off requests on the queue */
404     ata_start(dev);
405 #else
406 	xpt_freeze_simq(ch->sim, 1);
407 	if ((request = ch->running)) {
408 		ch->running = NULL;
409 		if (ch->state == ATA_ACTIVE)
410 		    ch->state = ATA_IDLE;
411 		callout_stop(&request->callout);
412 		if (ch->dma.unload)
413 		    ch->dma.unload(request);
414 		request->result = ERESTART;
415 		ata_cam_end_transaction(dev, request);
416 	}
417 	/* reset the controller HW, the channel and device(s) */
418 	ATA_RESET(dev);
419 	/* Tell the XPT about the event */
420 	xpt_async(AC_BUS_RESET, ch->path, NULL);
421 	xpt_release_simq(ch->sim, TRUE);
422 #endif
423 	return(0);
424 }
425 
426 int
427 ata_suspend(device_t dev)
428 {
429     struct ata_channel *ch;
430 
431     /* check for valid device */
432     if (!dev || !(ch = device_get_softc(dev)))
433 	return ENXIO;
434 
435 #ifndef ATA_CAM
436     /* wait for the channel to be IDLE or detached before suspending */
437     while (ch->r_irq) {
438 	mtx_lock(&ch->state_mtx);
439 	if (ch->state == ATA_IDLE) {
440 	    ch->state = ATA_ACTIVE;
441 	    mtx_unlock(&ch->state_mtx);
442 	    break;
443 	}
444 	mtx_unlock(&ch->state_mtx);
445 	tsleep(ch, PRIBIO, "atasusp", hz/10);
446     }
447     ATA_LOCKING(dev, ATA_LF_UNLOCK);
448 #endif
449     return(0);
450 }
451 
452 int
453 ata_resume(device_t dev)
454 {
455     int error;
456 
457     /* check for valid device */
458     if (!dev || !device_get_softc(dev))
459 	return ENXIO;
460 
461     /* reinit the devices, we dont know what mode/state they are in */
462     error = ata_reinit(dev);
463 
464 #ifndef ATA_CAM
465     /* kick off requests on the queue */
466     ata_start(dev);
467 #endif
468     return error;
469 }
470 
471 void
472 ata_interrupt(void *data)
473 {
474 #ifdef ATA_CAM
475     struct ata_channel *ch = (struct ata_channel *)data;
476 
477     mtx_lock(&ch->state_mtx);
478 #endif
479     ata_interrupt_locked(data);
480 #ifdef ATA_CAM
481     mtx_unlock(&ch->state_mtx);
482 #endif
483 }
484 
485 static void
486 ata_interrupt_locked(void *data)
487 {
488     struct ata_channel *ch = (struct ata_channel *)data;
489     struct ata_request *request;
490 
491 #ifndef ATA_CAM
492     mtx_lock(&ch->state_mtx);
493 #endif
494     do {
495 	/* ignore interrupt if its not for us */
496 	if (ch->hw.status && !ch->hw.status(ch->dev))
497 	    break;
498 
499 	/* do we have a running request */
500 	if (!(request = ch->running))
501 	    break;
502 
503 	ATA_DEBUG_RQ(request, "interrupt");
504 
505 	/* safetycheck for the right state */
506 	if (ch->state == ATA_IDLE) {
507 	    device_printf(request->dev, "interrupt on idle channel ignored\n");
508 	    break;
509 	}
510 
511 	/*
512 	 * we have the HW locks, so end the transaction for this request
513 	 * if it finishes immediately otherwise wait for next interrupt
514 	 */
515 	if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
516 	    ch->running = NULL;
517 	    if (ch->state == ATA_ACTIVE)
518 		ch->state = ATA_IDLE;
519 #ifdef ATA_CAM
520 	    ata_cam_end_transaction(ch->dev, request);
521 #else
522 	    mtx_unlock(&ch->state_mtx);
523 	    ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
524 	    ata_finish(request);
525 #endif
526 	    return;
527 	}
528     } while (0);
529 #ifndef ATA_CAM
530     mtx_unlock(&ch->state_mtx);
531 #endif
532 }
533 
534 void
535 ata_print_cable(device_t dev, u_int8_t *who)
536 {
537     device_printf(dev,
538                   "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
539 }
540 
541 int
542 ata_check_80pin(device_t dev, int mode)
543 {
544     struct ata_device *atadev = device_get_softc(dev);
545 
546     if (!ata_dma_check_80pin) {
547         if (bootverbose)
548             device_printf(dev, "Skipping 80pin cable check\n");
549         return mode;
550     }
551 
552     if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) {
553         ata_print_cable(dev, "device");
554         mode = ATA_UDMA2;
555     }
556     return mode;
557 }
558 
559 void
560 ata_setmode(device_t dev)
561 {
562 	struct ata_channel *ch = device_get_softc(device_get_parent(dev));
563 	struct ata_device *atadev = device_get_softc(dev);
564 	int error, mode, pmode;
565 
566 	mode = atadev->mode;
567 	do {
568 		pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX);
569 		mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode);
570 		if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0)
571 			mode = ata_check_80pin(dev, mode);
572 	} while (pmode != mode); /* Interate till successfull negotiation. */
573 	error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
574 	if (bootverbose)
575 	        device_printf(dev, "%ssetting %s\n",
576 		    (error) ? "FAILURE " : "", ata_mode2str(mode));
577 	atadev->mode = mode;
578 }
579 
580 /*
581  * device related interfaces
582  */
583 #ifndef ATA_CAM
584 static int
585 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
586 	  int32_t flag, struct thread *td)
587 {
588     device_t device, *children;
589     struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data;
590     int *value = (int *)data;
591     int i, nchildren, error = ENOTTY;
592 
593     switch (cmd) {
594     case IOCATAGMAXCHANNEL:
595 	/* In case we have channel 0..n this will return n+1. */
596 	*value = devclass_get_maxunit(ata_devclass);
597 	error = 0;
598 	break;
599 
600     case IOCATAREINIT:
601 	if (*value >= devclass_get_maxunit(ata_devclass) ||
602 	    !(device = devclass_get_device(ata_devclass, *value)) ||
603 	    !device_is_attached(device))
604 	    return ENXIO;
605 	error = ata_reinit(device);
606 	break;
607 
608     case IOCATAATTACH:
609 	if (*value >= devclass_get_maxunit(ata_devclass) ||
610 	    !(device = devclass_get_device(ata_devclass, *value)) ||
611 	    !device_is_attached(device))
612 	    return ENXIO;
613 	error = DEVICE_ATTACH(device);
614 	break;
615 
616     case IOCATADETACH:
617 	if (*value >= devclass_get_maxunit(ata_devclass) ||
618 	    !(device = devclass_get_device(ata_devclass, *value)) ||
619 	    !device_is_attached(device))
620 	    return ENXIO;
621 	error = DEVICE_DETACH(device);
622 	break;
623 
624     case IOCATADEVICES:
625 	if (devices->channel >= devclass_get_maxunit(ata_devclass) ||
626 	    !(device = devclass_get_device(ata_devclass, devices->channel)) ||
627 	    !device_is_attached(device))
628 	    return ENXIO;
629 	bzero(devices->name[0], 32);
630 	bzero(&devices->params[0], sizeof(struct ata_params));
631 	bzero(devices->name[1], 32);
632 	bzero(&devices->params[1], sizeof(struct ata_params));
633 	if (!device_get_children(device, &children, &nchildren)) {
634 	    for (i = 0; i < nchildren; i++) {
635 		if (children[i] && device_is_attached(children[i])) {
636 		    struct ata_device *atadev = device_get_softc(children[i]);
637 
638 		    if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */
639 			strncpy(devices->name[0],
640 				device_get_nameunit(children[i]), 32);
641 			bcopy(&atadev->param, &devices->params[0],
642 			      sizeof(struct ata_params));
643 		    }
644 		    if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */
645 			strncpy(devices->name[1],
646 				device_get_nameunit(children[i]), 32);
647 			bcopy(&atadev->param, &devices->params[1],
648 			      sizeof(struct ata_params));
649 		    }
650 		}
651 	    }
652 	    free(children, M_TEMP);
653 	    error = 0;
654 	}
655 	else
656 	    error = ENODEV;
657 	break;
658 
659     default:
660 	if (ata_raid_ioctl_func)
661 	    error = ata_raid_ioctl_func(cmd, data);
662     }
663     return error;
664 }
665 #endif
666 
667 int
668 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
669 {
670     struct ata_device *atadev = device_get_softc(dev);
671     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
672     struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
673     struct ata_params *params = (struct ata_params *)data;
674     int *mode = (int *)data;
675     struct ata_request *request;
676     caddr_t buf;
677     int error;
678 
679     switch (cmd) {
680     case IOCATAREQUEST:
681 	if (ioc_request->count >
682 	    (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) {
683 		return (EFBIG);
684 	}
685 	if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) {
686 	    return ENOMEM;
687 	}
688 	if (!(request = ata_alloc_request())) {
689 	    free(buf, M_ATA);
690 	    return  ENOMEM;
691 	}
692 	request->dev = atadev->dev;
693 	if (ioc_request->flags & ATA_CMD_WRITE) {
694 	    error = copyin(ioc_request->data, buf, ioc_request->count);
695 	    if (error) {
696 		free(buf, M_ATA);
697 		ata_free_request(request);
698 		return error;
699 	    }
700 	}
701 	if (ioc_request->flags & ATA_CMD_ATAPI) {
702 	    request->flags = ATA_R_ATAPI;
703 	    bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
704 	}
705 	else {
706 	    request->u.ata.command = ioc_request->u.ata.command;
707 	    request->u.ata.feature = ioc_request->u.ata.feature;
708 	    request->u.ata.lba = ioc_request->u.ata.lba;
709 	    request->u.ata.count = ioc_request->u.ata.count;
710 	}
711 	request->timeout = ioc_request->timeout;
712 	request->data = buf;
713 	request->bytecount = ioc_request->count;
714 	request->transfersize = request->bytecount;
715 	if (ioc_request->flags & ATA_CMD_CONTROL)
716 	    request->flags |= ATA_R_CONTROL;
717 	if (ioc_request->flags & ATA_CMD_READ)
718 	    request->flags |= ATA_R_READ;
719 	if (ioc_request->flags & ATA_CMD_WRITE)
720 	    request->flags |= ATA_R_WRITE;
721 	ata_queue_request(request);
722 	if (request->flags & ATA_R_ATAPI) {
723 	    bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
724 		  sizeof(struct atapi_sense));
725 	}
726 	else {
727 	    ioc_request->u.ata.command = request->u.ata.command;
728 	    ioc_request->u.ata.feature = request->u.ata.feature;
729 	    ioc_request->u.ata.lba = request->u.ata.lba;
730 	    ioc_request->u.ata.count = request->u.ata.count;
731 	}
732 	ioc_request->error = request->result;
733 	if (ioc_request->flags & ATA_CMD_READ)
734 	    error = copyout(buf, ioc_request->data, ioc_request->count);
735 	else
736 	    error = 0;
737 	free(buf, M_ATA);
738 	ata_free_request(request);
739 	return error;
740 
741     case IOCATAGPARM:
742 	ata_getparam(atadev, 0);
743 	bcopy(&atadev->param, params, sizeof(struct ata_params));
744 	return 0;
745 
746     case IOCATASMODE:
747 	atadev->mode = *mode;
748 	ata_setmode(dev);
749 	return 0;
750 
751     case IOCATAGMODE:
752 	*mode = atadev->mode |
753 	    (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8);
754 	return 0;
755     case IOCATASSPINDOWN:
756 	atadev->spindown = *mode;
757 	return 0;
758     case IOCATAGSPINDOWN:
759 	*mode = atadev->spindown;
760 	return 0;
761     default:
762 	return ENOTTY;
763     }
764 }
765 
766 #ifndef ATA_CAM
767 static void
768 ata_boot_attach(void)
769 {
770     struct ata_channel *ch;
771     int ctlr;
772 
773     mtx_lock(&Giant);       /* newbus suckage it needs Giant */
774 
775     /* kick of probe and attach on all channels */
776     for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
777 	if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
778 	    ata_identify(ch->dev);
779 	}
780     }
781 
782     /* release the hook that got us here, we are only needed once during boot */
783     if (ata_delayed_attach) {
784 	config_intrhook_disestablish(ata_delayed_attach);
785 	free(ata_delayed_attach, M_TEMP);
786 	ata_delayed_attach = NULL;
787     }
788 
789     mtx_unlock(&Giant);     /* newbus suckage dealt with, release Giant */
790 }
791 #endif
792 
793 /*
794  * misc support functions
795  */
796 #ifndef ATA_CAM
797 static device_t
798 ata_add_child(device_t parent, struct ata_device *atadev, int unit)
799 {
800     device_t child;
801 
802     if ((child = device_add_child(parent, NULL, unit))) {
803 	device_set_softc(child, atadev);
804 	device_quiet(child);
805 	atadev->dev = child;
806 	atadev->max_iosize = DEV_BSIZE;
807 	atadev->mode = ATA_PIO_MAX;
808     }
809     return child;
810 }
811 #endif
812 
813 int
814 ata_getparam(struct ata_device *atadev, int init)
815 {
816     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
817     struct ata_request *request;
818     u_int8_t command = 0;
819     int error = ENOMEM, retries = 2;
820 
821     if (ch->devices & (ATA_ATA_MASTER << atadev->unit))
822 	command = ATA_ATA_IDENTIFY;
823     if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit))
824 	command = ATA_ATAPI_IDENTIFY;
825     if (!command)
826 	return ENXIO;
827 
828     while (retries-- > 0 && error) {
829 	if (!(request = ata_alloc_request()))
830 	    break;
831 	request->dev = atadev->dev;
832 	request->timeout = 1;
833 	request->retries = 0;
834 	request->u.ata.command = command;
835 	request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT);
836 	if (!bootverbose)
837 	    request->flags |= ATA_R_QUIET;
838 	request->data = (void *)&atadev->param;
839 	request->bytecount = sizeof(struct ata_params);
840 	request->donecount = 0;
841 	request->transfersize = DEV_BSIZE;
842 	ata_queue_request(request);
843 	error = request->result;
844 	ata_free_request(request);
845     }
846 
847     if (!error && (isprint(atadev->param.model[0]) ||
848 		   isprint(atadev->param.model[1]))) {
849 	struct ata_params *atacap = &atadev->param;
850 	int16_t *ptr;
851 
852 	for (ptr = (int16_t *)atacap;
853 	     ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
854 	    *ptr = le16toh(*ptr);
855 	}
856 	if (!(!strncmp(atacap->model, "FX", 2) ||
857 	      !strncmp(atacap->model, "NEC", 3) ||
858 	      !strncmp(atacap->model, "Pioneer", 7) ||
859 	      !strncmp(atacap->model, "SHARP", 5))) {
860 	    bswap(atacap->model, sizeof(atacap->model));
861 	    bswap(atacap->revision, sizeof(atacap->revision));
862 	    bswap(atacap->serial, sizeof(atacap->serial));
863 	}
864 	btrim(atacap->model, sizeof(atacap->model));
865 	bpack(atacap->model, atacap->model, sizeof(atacap->model));
866 	btrim(atacap->revision, sizeof(atacap->revision));
867 	bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
868 	btrim(atacap->serial, sizeof(atacap->serial));
869 	bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
870 
871 	if (bootverbose)
872 	    printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
873 		   device_get_unit(ch->dev),
874 		   ata_unit2str(atadev),
875 		   ata_mode2str(ata_pmode(atacap)),
876 		   ata_mode2str(ata_wmode(atacap)),
877 		   ata_mode2str(ata_umode(atacap)),
878 		   (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
879 
880 	if (init) {
881 	    char buffer[64];
882 
883 	    sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
884 	    device_set_desc_copy(atadev->dev, buffer);
885 	    if ((atadev->param.config & ATA_PROTO_ATAPI) &&
886 		(atadev->param.config != ATA_CFA_MAGIC1) &&
887 		(atadev->param.config != ATA_CFA_MAGIC2)) {
888 		if (atapi_dma &&
889 		    (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
890 		    ata_umode(&atadev->param) >= ATA_UDMA2)
891 		    atadev->mode = ATA_DMA_MAX;
892 	    }
893 	    else {
894 		if (ata_dma &&
895 		    (ata_umode(&atadev->param) > 0 ||
896 		     ata_wmode(&atadev->param) > 0))
897 		    atadev->mode = ATA_DMA_MAX;
898 	    }
899 	}
900     }
901     else {
902 	if (!error)
903 	    error = ENXIO;
904     }
905     return error;
906 }
907 
908 #ifndef ATA_CAM
909 int
910 ata_identify(device_t dev)
911 {
912     struct ata_channel *ch = device_get_softc(dev);
913     struct ata_device *atadev;
914     device_t *children;
915     device_t child, master = NULL;
916     int nchildren, i, n = ch->devices;
917 
918     if (bootverbose)
919 	device_printf(dev, "Identifying devices: %08x\n", ch->devices);
920 
921     mtx_lock(&Giant);
922     /* Skip existing devices. */
923     if (!device_get_children(dev, &children, &nchildren)) {
924 	for (i = 0; i < nchildren; i++) {
925 	    if (children[i] && (atadev = device_get_softc(children[i])))
926 		n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit);
927 	}
928 	free(children, M_TEMP);
929     }
930     /* Create new devices. */
931     if (bootverbose)
932 	device_printf(dev, "New devices: %08x\n", n);
933     if (n == 0) {
934 	mtx_unlock(&Giant);
935 	return (0);
936     }
937     for (i = 0; i < ATA_PM; ++i) {
938 	if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) {
939 	    int unit = -1;
940 
941 	    if (!(atadev = malloc(sizeof(struct ata_device),
942 				  M_ATA, M_NOWAIT | M_ZERO))) {
943 		device_printf(dev, "out of memory\n");
944 		return ENOMEM;
945 	    }
946 	    atadev->unit = i;
947 #ifdef ATA_STATIC_ID
948 	    if (n & (ATA_ATA_MASTER << i))
949 		unit = (device_get_unit(dev) << 1) + i;
950 #endif
951 	    if ((child = ata_add_child(dev, atadev, unit))) {
952 		/*
953 		 * PATA slave should be identified first, to allow
954 		 * device cable detection on master to work properly.
955 		 */
956 		if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 &&
957 			(n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) {
958 		    master = child;
959 		    continue;
960 		}
961 		if (ata_getparam(atadev, 1)) {
962 		    device_delete_child(dev, child);
963 		    free(atadev, M_ATA);
964 		}
965 	    }
966 	    else
967 		free(atadev, M_ATA);
968 	}
969     }
970     if (master) {
971 	atadev = device_get_softc(master);
972 	if (ata_getparam(atadev, 1)) {
973 	    device_delete_child(dev, master);
974 	    free(atadev, M_ATA);
975 	}
976     }
977     bus_generic_probe(dev);
978     bus_generic_attach(dev);
979     mtx_unlock(&Giant);
980     return 0;
981 }
982 #endif
983 
984 void
985 ata_default_registers(device_t dev)
986 {
987     struct ata_channel *ch = device_get_softc(dev);
988 
989     /* fill in the defaults from whats setup already */
990     ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
991     ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
992     ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
993     ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
994     ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
995     ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
996     ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
997     ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
998 }
999 
1000 void
1001 ata_modify_if_48bit(struct ata_request *request)
1002 {
1003     struct ata_channel *ch = device_get_softc(request->parent);
1004     struct ata_device *atadev = device_get_softc(request->dev);
1005 
1006     request->flags &= ~ATA_R_48BIT;
1007 
1008     if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA ||
1009 	 request->u.ata.count > 256) &&
1010 	atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1011 
1012 	/* translate command into 48bit version */
1013 	switch (request->u.ata.command) {
1014 	case ATA_READ:
1015 	    request->u.ata.command = ATA_READ48;
1016 	    break;
1017 	case ATA_READ_MUL:
1018 	    request->u.ata.command = ATA_READ_MUL48;
1019 	    break;
1020 	case ATA_READ_DMA:
1021 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1022 		if (request->transfersize > DEV_BSIZE)
1023 		    request->u.ata.command = ATA_READ_MUL48;
1024 		else
1025 		    request->u.ata.command = ATA_READ48;
1026 		request->flags &= ~ATA_R_DMA;
1027 	    }
1028 	    else
1029 		request->u.ata.command = ATA_READ_DMA48;
1030 	    break;
1031 	case ATA_READ_DMA_QUEUED:
1032 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1033 		if (request->transfersize > DEV_BSIZE)
1034 		    request->u.ata.command = ATA_READ_MUL48;
1035 		else
1036 		    request->u.ata.command = ATA_READ48;
1037 		request->flags &= ~ATA_R_DMA;
1038 	    }
1039 	    else
1040 		request->u.ata.command = ATA_READ_DMA_QUEUED48;
1041 	    break;
1042 	case ATA_WRITE:
1043 	    request->u.ata.command = ATA_WRITE48;
1044 	    break;
1045 	case ATA_WRITE_MUL:
1046 	    request->u.ata.command = ATA_WRITE_MUL48;
1047 	    break;
1048 	case ATA_WRITE_DMA:
1049 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1050 		if (request->transfersize > DEV_BSIZE)
1051 		    request->u.ata.command = ATA_WRITE_MUL48;
1052 		else
1053 		    request->u.ata.command = ATA_WRITE48;
1054 		request->flags &= ~ATA_R_DMA;
1055 	    }
1056 	    else
1057 		request->u.ata.command = ATA_WRITE_DMA48;
1058 	    break;
1059 	case ATA_WRITE_DMA_QUEUED:
1060 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1061 		if (request->transfersize > DEV_BSIZE)
1062 		    request->u.ata.command = ATA_WRITE_MUL48;
1063 		else
1064 		    request->u.ata.command = ATA_WRITE48;
1065 		request->u.ata.command = ATA_WRITE48;
1066 		request->flags &= ~ATA_R_DMA;
1067 	    }
1068 	    else
1069 		request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
1070 	    break;
1071 	case ATA_FLUSHCACHE:
1072 	    request->u.ata.command = ATA_FLUSHCACHE48;
1073 	    break;
1074 	case ATA_SET_MAX_ADDRESS:
1075 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1076 	    break;
1077 	default:
1078 	    return;
1079 	}
1080 	request->flags |= ATA_R_48BIT;
1081     }
1082     else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1083 
1084 	/* translate command into 48bit version */
1085 	switch (request->u.ata.command) {
1086 	case ATA_FLUSHCACHE:
1087 	    request->u.ata.command = ATA_FLUSHCACHE48;
1088 	    break;
1089 	case ATA_READ_NATIVE_MAX_ADDRESS:
1090 	    request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48;
1091 	    break;
1092 	case ATA_SET_MAX_ADDRESS:
1093 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1094 	    break;
1095 	default:
1096 	    return;
1097 	}
1098 	request->flags |= ATA_R_48BIT;
1099     }
1100 }
1101 
1102 void
1103 ata_udelay(int interval)
1104 {
1105     /* for now just use DELAY, the timer/sleep subsytems are not there yet */
1106     if (1 || interval < (1000000/hz) || ata_delayed_attach)
1107 	DELAY(interval);
1108     else
1109 	pause("ataslp", interval/(1000000/hz));
1110 }
1111 
1112 char *
1113 ata_unit2str(struct ata_device *atadev)
1114 {
1115     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
1116     static char str[8];
1117 
1118     if (ch->devices & ATA_PORTMULTIPLIER)
1119 	sprintf(str, "port%d", atadev->unit);
1120     else
1121 	sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave");
1122     return str;
1123 }
1124 
1125 const char *
1126 ata_mode2str(int mode)
1127 {
1128     switch (mode) {
1129     case -1: return "UNSUPPORTED";
1130     case ATA_PIO0: return "PIO0";
1131     case ATA_PIO1: return "PIO1";
1132     case ATA_PIO2: return "PIO2";
1133     case ATA_PIO3: return "PIO3";
1134     case ATA_PIO4: return "PIO4";
1135     case ATA_WDMA0: return "WDMA0";
1136     case ATA_WDMA1: return "WDMA1";
1137     case ATA_WDMA2: return "WDMA2";
1138     case ATA_UDMA0: return "UDMA16";
1139     case ATA_UDMA1: return "UDMA25";
1140     case ATA_UDMA2: return "UDMA33";
1141     case ATA_UDMA3: return "UDMA40";
1142     case ATA_UDMA4: return "UDMA66";
1143     case ATA_UDMA5: return "UDMA100";
1144     case ATA_UDMA6: return "UDMA133";
1145     case ATA_SA150: return "SATA150";
1146     case ATA_SA300: return "SATA300";
1147     default:
1148 	if (mode & ATA_DMA_MASK)
1149 	    return "BIOSDMA";
1150 	else
1151 	    return "BIOSPIO";
1152     }
1153 }
1154 
1155 const char *
1156 ata_satarev2str(int rev)
1157 {
1158 	switch (rev) {
1159 	case 0: return "";
1160 	case 1: return "SATA 1.5Gb/s";
1161 	case 2: return "SATA 3Gb/s";
1162 	case 3: return "SATA 6Gb/s";
1163 	case 0xff: return "SATA";
1164 	default: return "???";
1165 	}
1166 }
1167 
1168 int
1169 ata_atapi(device_t dev, int target)
1170 {
1171     struct ata_channel *ch = device_get_softc(dev);
1172 
1173     return (ch->devices & (ATA_ATAPI_MASTER << target));
1174 }
1175 
1176 int
1177 ata_pmode(struct ata_params *ap)
1178 {
1179     if (ap->atavalid & ATA_FLAG_64_70) {
1180 	if (ap->apiomodes & 0x02)
1181 	    return ATA_PIO4;
1182 	if (ap->apiomodes & 0x01)
1183 	    return ATA_PIO3;
1184     }
1185     if (ap->mwdmamodes & 0x04)
1186 	return ATA_PIO4;
1187     if (ap->mwdmamodes & 0x02)
1188 	return ATA_PIO3;
1189     if (ap->mwdmamodes & 0x01)
1190 	return ATA_PIO2;
1191     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
1192 	return ATA_PIO2;
1193     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
1194 	return ATA_PIO1;
1195     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
1196 	return ATA_PIO0;
1197     return ATA_PIO0;
1198 }
1199 
1200 int
1201 ata_wmode(struct ata_params *ap)
1202 {
1203     if (ap->mwdmamodes & 0x04)
1204 	return ATA_WDMA2;
1205     if (ap->mwdmamodes & 0x02)
1206 	return ATA_WDMA1;
1207     if (ap->mwdmamodes & 0x01)
1208 	return ATA_WDMA0;
1209     return -1;
1210 }
1211 
1212 int
1213 ata_umode(struct ata_params *ap)
1214 {
1215     if (ap->atavalid & ATA_FLAG_88) {
1216 	if (ap->udmamodes & 0x40)
1217 	    return ATA_UDMA6;
1218 	if (ap->udmamodes & 0x20)
1219 	    return ATA_UDMA5;
1220 	if (ap->udmamodes & 0x10)
1221 	    return ATA_UDMA4;
1222 	if (ap->udmamodes & 0x08)
1223 	    return ATA_UDMA3;
1224 	if (ap->udmamodes & 0x04)
1225 	    return ATA_UDMA2;
1226 	if (ap->udmamodes & 0x02)
1227 	    return ATA_UDMA1;
1228 	if (ap->udmamodes & 0x01)
1229 	    return ATA_UDMA0;
1230     }
1231     return -1;
1232 }
1233 
1234 int
1235 ata_limit_mode(device_t dev, int mode, int maxmode)
1236 {
1237     struct ata_device *atadev = device_get_softc(dev);
1238 
1239     if (maxmode && mode > maxmode)
1240 	mode = maxmode;
1241 
1242     if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
1243 	return min(mode, ata_umode(&atadev->param));
1244 
1245     if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
1246 	return min(mode, ata_wmode(&atadev->param));
1247 
1248     if (mode > ata_pmode(&atadev->param))
1249 	return min(mode, ata_pmode(&atadev->param));
1250 
1251     return mode;
1252 }
1253 
1254 static void
1255 bswap(int8_t *buf, int len)
1256 {
1257     u_int16_t *ptr = (u_int16_t*)(buf + len);
1258 
1259     while (--ptr >= (u_int16_t*)buf)
1260 	*ptr = ntohs(*ptr);
1261 }
1262 
1263 static void
1264 btrim(int8_t *buf, int len)
1265 {
1266     int8_t *ptr;
1267 
1268     for (ptr = buf; ptr < buf+len; ++ptr)
1269 	if (!*ptr || *ptr == '_')
1270 	    *ptr = ' ';
1271     for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
1272 	*ptr = 0;
1273 }
1274 
1275 static void
1276 bpack(int8_t *src, int8_t *dst, int len)
1277 {
1278     int i, j, blank;
1279 
1280     for (i = j = blank = 0 ; i < len; i++) {
1281 	if (blank && src[i] == ' ') continue;
1282 	if (blank && src[i] != ' ') {
1283 	    dst[j++] = src[i];
1284 	    blank = 0;
1285 	    continue;
1286 	}
1287 	if (src[i] == ' ') {
1288 	    blank = 1;
1289 	    if (i == 0)
1290 		continue;
1291 	}
1292 	dst[j++] = src[i];
1293     }
1294     if (j < len)
1295 	dst[j] = 0x00;
1296 }
1297 
1298 #ifdef ATA_CAM
1299 void
1300 ata_cam_begin_transaction(device_t dev, union ccb *ccb)
1301 {
1302 	struct ata_channel *ch = device_get_softc(dev);
1303 	struct ata_request *request;
1304 
1305 	if (!(request = ata_alloc_request())) {
1306 		device_printf(dev, "FAILURE - out of memory in start\n");
1307 		ccb->ccb_h.status = CAM_REQ_INVALID;
1308 		xpt_done(ccb);
1309 		return;
1310 	}
1311 	bzero(request, sizeof(*request));
1312 
1313 	/* setup request */
1314 	request->dev = NULL;
1315 	request->parent = dev;
1316 	request->unit = ccb->ccb_h.target_id;
1317 	if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1318 		request->data = ccb->ataio.data_ptr;
1319 		request->bytecount = ccb->ataio.dxfer_len;
1320 		request->u.ata.command = ccb->ataio.cmd.command;
1321 		request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
1322 					  (uint16_t)ccb->ataio.cmd.features;
1323 		request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
1324 					(uint16_t)ccb->ataio.cmd.sector_count;
1325 		if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
1326 			request->flags |= ATA_R_48BIT;
1327 			request->u.ata.lba =
1328 				     ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
1329 				     ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
1330 				     ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
1331 		} else {
1332 			request->u.ata.lba =
1333 				     ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
1334 		}
1335 		request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
1336 				      ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
1337 				       (uint64_t)ccb->ataio.cmd.lba_low;
1338 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1339 		    ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
1340 			request->flags |= ATA_R_DMA;
1341 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1342 			request->flags |= ATA_R_READ;
1343 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1344 			request->flags |= ATA_R_WRITE;
1345 	} else {
1346 		request->data = ccb->csio.data_ptr;
1347 		request->bytecount = ccb->csio.dxfer_len;
1348 		bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1349 		    ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
1350 		    request->u.atapi.ccb, ccb->csio.cdb_len);
1351 		request->flags |= ATA_R_ATAPI;
1352 		if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1353 			request->flags |= ATA_R_ATAPI16;
1354 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1355 		    ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1356 			request->flags |= ATA_R_DMA;
1357 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1358 			request->flags |= ATA_R_READ;
1359 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1360 			request->flags |= ATA_R_WRITE;
1361 	}
1362 	request->transfersize = min(request->bytecount,
1363 	    ch->curr[ccb->ccb_h.target_id].bytecount);
1364 	request->retries = 0;
1365 	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1366 	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1367 	request->ccb = ccb;
1368 
1369 	ch->running = request;
1370 	ch->state = ATA_ACTIVE;
1371 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1372 	    ch->running = NULL;
1373 	    ch->state = ATA_IDLE;
1374 	    ata_cam_end_transaction(dev, request);
1375 	    return;
1376 	}
1377 }
1378 
1379 void
1380 ata_cam_end_transaction(device_t dev, struct ata_request *request)
1381 {
1382 	struct ata_channel *ch = device_get_softc(dev);
1383 	union ccb *ccb = request->ccb;
1384 	int fatalerr = 0;
1385 
1386 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1387 	if (request->flags & ATA_R_TIMEOUT) {
1388 		xpt_freeze_simq(ch->sim, 1);
1389 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1390 		ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
1391 		fatalerr = 1;
1392 	} else if (request->status & ATA_S_ERROR) {
1393 		if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1394 			ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1395 		} else {
1396 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1397 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1398 		}
1399 	} else if (request->result == ERESTART)
1400 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1401 	else if (request->result != 0)
1402 		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1403 	else
1404 		ccb->ccb_h.status |= CAM_REQ_CMP;
1405 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
1406 	    !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1407 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1408 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1409 	}
1410 	if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1411 	    ((request->status & ATA_S_ERROR) ||
1412 	    (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
1413 		struct ata_res *res = &ccb->ataio.res;
1414 		res->status = request->status;
1415 		res->error = request->error;
1416 		res->lba_low = request->u.ata.lba;
1417 		res->lba_mid = request->u.ata.lba >> 8;
1418 		res->lba_high = request->u.ata.lba >> 16;
1419 		res->device = request->u.ata.lba >> 24;
1420 		res->lba_low_exp = request->u.ata.lba >> 24;
1421 		res->lba_mid_exp = request->u.ata.lba >> 32;
1422 		res->lba_high_exp = request->u.ata.lba >> 40;
1423 		res->sector_count = request->u.ata.count;
1424 		res->sector_count_exp = request->u.ata.count >> 8;
1425 	}
1426 	ata_free_request(request);
1427 	xpt_done(ccb);
1428 	/* Do error recovery if needed. */
1429 	if (fatalerr)
1430 		ata_reinit(dev);
1431 }
1432 
1433 static void
1434 ataaction(struct cam_sim *sim, union ccb *ccb)
1435 {
1436 	device_t dev;
1437 	struct ata_channel *ch;
1438 
1439 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
1440 	    ccb->ccb_h.func_code));
1441 
1442 	ch = (struct ata_channel *)cam_sim_softc(sim);
1443 	dev = ch->dev;
1444 	switch (ccb->ccb_h.func_code) {
1445 	/* Common cases first */
1446 	case XPT_ATA_IO:	/* Execute the requested I/O operation */
1447 	case XPT_SCSI_IO:
1448 		if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
1449 		    << ccb->ccb_h.target_id)) == 0) {
1450 			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1451 			xpt_done(ccb);
1452 			break;
1453 		}
1454 		if (ch->running)
1455 			device_printf(dev, "already running!\n");
1456 		if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1457 		    (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1458 		    (ccb->ataio.cmd.control & ATA_A_RESET)) {
1459 			struct ata_res *res = &ccb->ataio.res;
1460 
1461 			bzero(res, sizeof(*res));
1462 			if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
1463 				res->lba_high = 0;
1464 				res->lba_mid = 0;
1465 			} else {
1466 				res->lba_high = 0xeb;
1467 				res->lba_mid = 0x14;
1468 			}
1469 			ccb->ccb_h.status = CAM_REQ_CMP;
1470 			xpt_done(ccb);
1471 			break;
1472 		}
1473 		ata_cam_begin_transaction(dev, ccb);
1474 		break;
1475 	case XPT_EN_LUN:		/* Enable LUN as a target */
1476 	case XPT_TARGET_IO:		/* Execute target I/O request */
1477 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
1478 	case XPT_CONT_TARGET_IO:	/* Continue Host Target I/O Connection*/
1479 	case XPT_ABORT:			/* Abort the specified CCB */
1480 		/* XXX Implement */
1481 		ccb->ccb_h.status = CAM_REQ_INVALID;
1482 		xpt_done(ccb);
1483 		break;
1484 	case XPT_SET_TRAN_SETTINGS:
1485 	{
1486 		struct	ccb_trans_settings *cts = &ccb->cts;
1487 		struct	ata_cam_device *d;
1488 
1489 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1490 			d = &ch->curr[ccb->ccb_h.target_id];
1491 		else
1492 			d = &ch->user[ccb->ccb_h.target_id];
1493 		if (ch->flags & ATA_SATA) {
1494 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
1495 				d->revision = cts->xport_specific.sata.revision;
1496 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) {
1497 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1498 					d->mode = ATA_SETMODE(ch->dev,
1499 					    ccb->ccb_h.target_id,
1500 					    cts->xport_specific.sata.mode);
1501 				} else
1502 					d->mode = cts->xport_specific.sata.mode;
1503 			}
1504 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
1505 				d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
1506 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
1507 				d->atapi = cts->xport_specific.sata.atapi;
1508 		} else {
1509 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
1510 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1511 					d->mode = ATA_SETMODE(ch->dev,
1512 					    ccb->ccb_h.target_id,
1513 					    cts->xport_specific.ata.mode);
1514 				} else
1515 					d->mode = cts->xport_specific.ata.mode;
1516 			}
1517 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
1518 				d->bytecount = cts->xport_specific.ata.bytecount;
1519 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI)
1520 				d->atapi = cts->xport_specific.ata.atapi;
1521 		}
1522 		ccb->ccb_h.status = CAM_REQ_CMP;
1523 		xpt_done(ccb);
1524 		break;
1525 	}
1526 	case XPT_GET_TRAN_SETTINGS:
1527 	{
1528 		struct	ccb_trans_settings *cts = &ccb->cts;
1529 		struct  ata_cam_device *d;
1530 
1531 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1532 			d = &ch->curr[ccb->ccb_h.target_id];
1533 		else
1534 			d = &ch->user[ccb->ccb_h.target_id];
1535 		cts->protocol = PROTO_ATA;
1536 		cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1537 		if (ch->flags & ATA_SATA) {
1538 			cts->transport = XPORT_SATA;
1539 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1540 			cts->xport_specific.sata.valid = 0;
1541 			cts->xport_specific.sata.mode = d->mode;
1542 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1543 			cts->xport_specific.sata.bytecount = d->bytecount;
1544 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1545 			if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1546 				cts->xport_specific.sata.revision =
1547 				    ATA_GETREV(dev, ccb->ccb_h.target_id);
1548 				if (cts->xport_specific.sata.revision != 0xff) {
1549 					cts->xport_specific.sata.valid |=
1550 					    CTS_SATA_VALID_REVISION;
1551 				}
1552 			} else {
1553 				cts->xport_specific.sata.revision = d->revision;
1554 				cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1555 			}
1556 			cts->xport_specific.sata.atapi = d->atapi;
1557 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
1558 		} else {
1559 			cts->transport = XPORT_ATA;
1560 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1561 			cts->xport_specific.ata.valid = 0;
1562 			cts->xport_specific.ata.mode = d->mode;
1563 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1564 			cts->xport_specific.ata.bytecount = d->bytecount;
1565 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1566 			cts->xport_specific.ata.atapi = d->atapi;
1567 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI;
1568 		}
1569 		ccb->ccb_h.status = CAM_REQ_CMP;
1570 		xpt_done(ccb);
1571 		break;
1572 	}
1573 #if 0
1574 	case XPT_CALC_GEOMETRY:
1575 	{
1576 		struct	  ccb_calc_geometry *ccg;
1577 		uint32_t size_mb;
1578 		uint32_t secs_per_cylinder;
1579 
1580 		ccg = &ccb->ccg;
1581 		size_mb = ccg->volume_size
1582 			/ ((1024L * 1024L) / ccg->block_size);
1583 		if (size_mb >= 1024 && (aha->extended_trans != 0)) {
1584 			if (size_mb >= 2048) {
1585 				ccg->heads = 255;
1586 				ccg->secs_per_track = 63;
1587 			} else {
1588 				ccg->heads = 128;
1589 				ccg->secs_per_track = 32;
1590 			}
1591 		} else {
1592 			ccg->heads = 64;
1593 			ccg->secs_per_track = 32;
1594 		}
1595 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1596 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1597 		ccb->ccb_h.status = CAM_REQ_CMP;
1598 		xpt_done(ccb);
1599 		break;
1600 	}
1601 #endif
1602 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
1603 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
1604 		ata_reinit(dev);
1605 		ccb->ccb_h.status = CAM_REQ_CMP;
1606 		xpt_done(ccb);
1607 		break;
1608 	case XPT_TERM_IO:		/* Terminate the I/O process */
1609 		/* XXX Implement */
1610 		ccb->ccb_h.status = CAM_REQ_INVALID;
1611 		xpt_done(ccb);
1612 		break;
1613 	case XPT_PATH_INQ:		/* Path routing inquiry */
1614 	{
1615 		struct ccb_pathinq *cpi = &ccb->cpi;
1616 
1617 		cpi->version_num = 1; /* XXX??? */
1618 		cpi->hba_inquiry = PI_SDTR_ABLE;
1619 		cpi->target_sprt = 0;
1620 		cpi->hba_misc = PIM_SEQSCAN;
1621 		cpi->hba_eng_cnt = 0;
1622 		if (ch->flags & ATA_NO_SLAVE)
1623 			cpi->max_target = 0;
1624 		else
1625 			cpi->max_target = 1;
1626 		cpi->max_lun = 0;
1627 		cpi->initiator_id = 0;
1628 		cpi->bus_id = cam_sim_bus(sim);
1629 		if (ch->flags & ATA_SATA)
1630 			cpi->base_transfer_speed = 150000;
1631 		else
1632 			cpi->base_transfer_speed = 3300;
1633 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1634 		strncpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1635 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1636 		cpi->unit_number = cam_sim_unit(sim);
1637 		if (ch->flags & ATA_SATA)
1638 			cpi->transport = XPORT_SATA;
1639 		else
1640 			cpi->transport = XPORT_ATA;
1641 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1642 		cpi->protocol = PROTO_ATA;
1643 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1644 		cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1645 		cpi->ccb_h.status = CAM_REQ_CMP;
1646 		xpt_done(ccb);
1647 		break;
1648 	}
1649 	default:
1650 		ccb->ccb_h.status = CAM_REQ_INVALID;
1651 		xpt_done(ccb);
1652 		break;
1653 	}
1654 }
1655 
1656 static void
1657 atapoll(struct cam_sim *sim)
1658 {
1659 	struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1660 
1661 	ata_interrupt_locked(ch);
1662 }
1663 #endif
1664 
1665 /*
1666  * module handeling
1667  */
1668 static int
1669 ata_module_event_handler(module_t mod, int what, void *arg)
1670 {
1671 #ifndef ATA_CAM
1672     static struct cdev *atacdev;
1673 #endif
1674 
1675     switch (what) {
1676     case MOD_LOAD:
1677 #ifndef ATA_CAM
1678 	/* register controlling device */
1679 	atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1680 
1681 	if (cold) {
1682 	    /* register boot attach to be run when interrupts are enabled */
1683 	    if (!(ata_delayed_attach = (struct intr_config_hook *)
1684 				       malloc(sizeof(struct intr_config_hook),
1685 					      M_TEMP, M_NOWAIT | M_ZERO))) {
1686 		printf("ata: malloc of delayed attach hook failed\n");
1687 		return EIO;
1688 	    }
1689 	    ata_delayed_attach->ich_func = (void*)ata_boot_attach;
1690 	    if (config_intrhook_establish(ata_delayed_attach) != 0) {
1691 		printf("ata: config_intrhook_establish failed\n");
1692 		free(ata_delayed_attach, M_TEMP);
1693 	    }
1694 	}
1695 #endif
1696 	return 0;
1697 
1698     case MOD_UNLOAD:
1699 #ifndef ATA_CAM
1700 	/* deregister controlling device */
1701 	destroy_dev(atacdev);
1702 #endif
1703 	return 0;
1704 
1705     default:
1706 	return EOPNOTSUPP;
1707     }
1708 }
1709 
1710 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1711 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1712 MODULE_VERSION(ata, 1);
1713 #ifdef ATA_CAM
1714 MODULE_DEPEND(ata, cam, 1, 1, 1);
1715 #endif
1716 
1717 static void
1718 ata_init(void)
1719 {
1720     ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request),
1721 				   NULL, NULL, NULL, NULL, 0, 0);
1722     ata_composite_zone = uma_zcreate("ata_composite",
1723 				     sizeof(struct ata_composite),
1724 				     NULL, NULL, NULL, NULL, 0, 0);
1725 }
1726 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1727 
1728 static void
1729 ata_uninit(void)
1730 {
1731     uma_zdestroy(ata_composite_zone);
1732     uma_zdestroy(ata_request_zone);
1733 }
1734 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);
1735