xref: /freebsd/sys/dev/ata/ata-all.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /*-
2  * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ata.h"
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/ata.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/endian.h>
37 #include <sys/ctype.h>
38 #include <sys/conf.h>
39 #include <sys/bus.h>
40 #include <sys/bio.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
43 #include <sys/sema.h>
44 #include <sys/taskqueue.h>
45 #include <vm/uma.h>
46 #include <machine/stdarg.h>
47 #include <machine/resource.h>
48 #include <machine/bus.h>
49 #include <sys/rman.h>
50 #include <dev/ata/ata-all.h>
51 #include <ata_if.h>
52 
53 #ifdef ATA_CAM
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_xpt_sim.h>
58 #include <cam/cam_xpt_periph.h>
59 #include <cam/cam_debug.h>
60 #endif
61 
62 #ifndef ATA_CAM
63 /* device structure */
64 static  d_ioctl_t       ata_ioctl;
65 static struct cdevsw ata_cdevsw = {
66 	.d_version =    D_VERSION,
67 	.d_flags =      D_NEEDGIANT, /* we need this as newbus isn't mpsafe */
68 	.d_ioctl =      ata_ioctl,
69 	.d_name =       "ata",
70 };
71 #endif
72 
73 /* prototypes */
74 #ifndef ATA_CAM
75 static void ata_boot_attach(void);
76 static device_t ata_add_child(device_t, struct ata_device *, int);
77 #else
78 static void ataaction(struct cam_sim *sim, union ccb *ccb);
79 static void atapoll(struct cam_sim *sim);
80 #endif
81 static void ata_conn_event(void *, int);
82 static void bswap(int8_t *, int);
83 static void btrim(int8_t *, int);
84 static void bpack(int8_t *, int8_t *, int);
85 static void ata_interrupt_locked(void *data);
86 
87 /* global vars */
88 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
89 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
90 struct intr_config_hook *ata_delayed_attach = NULL;
91 devclass_t ata_devclass;
92 uma_zone_t ata_request_zone;
93 uma_zone_t ata_composite_zone;
94 int ata_wc = 1;
95 int ata_setmax = 0;
96 int ata_dma_check_80pin = 1;
97 
98 /* local vars */
99 static int ata_dma = 1;
100 static int atapi_dma = 1;
101 
102 /* sysctl vars */
103 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
104 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
105 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0,
106 	   "ATA disk DMA mode control");
107 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
108 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
109 	   CTLFLAG_RDTUN, &ata_dma_check_80pin, 1,
110 	   "Check for 80pin cable before setting ATA DMA mode");
111 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
112 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0,
113 	   "ATAPI device DMA mode control");
114 TUNABLE_INT("hw.ata.wc", &ata_wc);
115 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0,
116 	   "ATA disk write caching");
117 TUNABLE_INT("hw.ata.setmax", &ata_setmax);
118 SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0,
119 	   "ATA disk set max native address");
120 
121 /*
122  * newbus device interface related functions
123  */
124 int
125 ata_probe(device_t dev)
126 {
127     return 0;
128 }
129 
130 int
131 ata_attach(device_t dev)
132 {
133     struct ata_channel *ch = device_get_softc(dev);
134     int error, rid;
135 #ifdef ATA_CAM
136     struct cam_devq *devq;
137     int i;
138 #endif
139 
140     /* check that we have a virgin channel to attach */
141     if (ch->r_irq)
142 	return EEXIST;
143 
144     /* initialize the softc basics */
145     ch->dev = dev;
146     ch->state = ATA_IDLE;
147     bzero(&ch->state_mtx, sizeof(struct mtx));
148     mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
149     bzero(&ch->queue_mtx, sizeof(struct mtx));
150     mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF);
151     TAILQ_INIT(&ch->ata_queue);
152     TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
153 #ifdef ATA_CAM
154 	for (i = 0; i < 16; i++) {
155 		ch->user[i].mode = 0;
156 		if (ch->flags & ATA_SATA)
157 			ch->user[i].bytecount = 8192;
158 		else
159 			ch->user[i].bytecount = MAXPHYS;
160 		ch->curr[i] = ch->user[i];
161 	}
162 #endif
163 
164     /* reset the controller HW, the channel and device(s) */
165     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
166 	pause("ataatch", 1);
167 #ifndef ATA_CAM
168     ATA_RESET(dev);
169 #endif
170     ATA_LOCKING(dev, ATA_LF_UNLOCK);
171 
172     /* allocate DMA resources if DMA HW present*/
173     if (ch->dma.alloc)
174 	ch->dma.alloc(dev);
175 
176     /* setup interrupt delivery */
177     rid = ATA_IRQ_RID;
178     ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
179 				       RF_SHAREABLE | RF_ACTIVE);
180     if (!ch->r_irq) {
181 	device_printf(dev, "unable to allocate interrupt\n");
182 	return ENXIO;
183     }
184     if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
185 				ata_interrupt, ch, &ch->ih))) {
186 	device_printf(dev, "unable to setup interrupt\n");
187 	return error;
188     }
189 
190 #ifndef ATA_CAM
191     /* probe and attach devices on this channel unless we are in early boot */
192     if (!ata_delayed_attach)
193 	ata_identify(dev);
194     return (0);
195 #else
196 	mtx_lock(&ch->state_mtx);
197 	/* Create the device queue for our SIM. */
198 	devq = cam_simq_alloc(1);
199 	if (devq == NULL) {
200 		device_printf(dev, "Unable to allocate simq\n");
201 		error = ENOMEM;
202 		goto err1;
203 	}
204 	/* Construct SIM entry */
205 	ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
206 	    device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
207 	if (ch->sim == NULL) {
208 		device_printf(dev, "unable to allocate sim\n");
209 		error = ENOMEM;
210 		goto err2;
211 	}
212 	if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
213 		device_printf(dev, "unable to register xpt bus\n");
214 		error = ENXIO;
215 		goto err2;
216 	}
217 	if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
218 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
219 		device_printf(dev, "unable to create path\n");
220 		error = ENXIO;
221 		goto err3;
222 	}
223 	mtx_unlock(&ch->state_mtx);
224 	return (0);
225 
226 err3:
227 	xpt_bus_deregister(cam_sim_path(ch->sim));
228 err2:
229 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
230 err1:
231 	bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
232 	mtx_unlock(&ch->state_mtx);
233 	return (error);
234 #endif
235 }
236 
237 int
238 ata_detach(device_t dev)
239 {
240     struct ata_channel *ch = device_get_softc(dev);
241 #ifndef ATA_CAM
242     device_t *children;
243     int nchildren, i;
244 #endif
245 
246     /* check that we have a valid channel to detach */
247     if (!ch->r_irq)
248 	return ENXIO;
249 
250     /* grap the channel lock so no new requests gets launched */
251     mtx_lock(&ch->state_mtx);
252     ch->state |= ATA_STALL_QUEUE;
253     mtx_unlock(&ch->state_mtx);
254 
255 #ifndef ATA_CAM
256     /* detach & delete all children */
257     if (!device_get_children(dev, &children, &nchildren)) {
258 	for (i = 0; i < nchildren; i++)
259 	    if (children[i])
260 		device_delete_child(dev, children[i]);
261 	free(children, M_TEMP);
262     }
263 #endif
264     taskqueue_drain(taskqueue_thread, &ch->conntask);
265 
266 #ifdef ATA_CAM
267 	mtx_lock(&ch->state_mtx);
268 	xpt_async(AC_LOST_DEVICE, ch->path, NULL);
269 	xpt_free_path(ch->path);
270 	xpt_bus_deregister(cam_sim_path(ch->sim));
271 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
272 	mtx_unlock(&ch->state_mtx);
273 #endif
274 
275     /* release resources */
276     bus_teardown_intr(dev, ch->r_irq, ch->ih);
277     bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
278     ch->r_irq = NULL;
279 
280     /* free DMA resources if DMA HW present*/
281     if (ch->dma.free)
282 	ch->dma.free(dev);
283 
284     mtx_destroy(&ch->state_mtx);
285     mtx_destroy(&ch->queue_mtx);
286     return 0;
287 }
288 
289 static void
290 ata_conn_event(void *context, int dummy)
291 {
292     device_t dev = (device_t)context;
293     struct ata_channel *ch = device_get_softc(dev);
294 
295     mtx_lock(&ch->state_mtx);
296     ata_reinit(dev);
297     mtx_unlock(&ch->state_mtx);
298 }
299 
300 int
301 ata_reinit(device_t dev)
302 {
303     struct ata_channel *ch = device_get_softc(dev);
304     struct ata_request *request;
305 #ifndef ATA_CAM
306     device_t *children;
307     int nchildren, i;
308 
309     /* check that we have a valid channel to reinit */
310     if (!ch || !ch->r_irq)
311 	return ENXIO;
312 
313     if (bootverbose)
314 	device_printf(dev, "reiniting channel ..\n");
315 
316     /* poll for locking the channel */
317     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
318 	pause("atarini", 1);
319 
320     /* catch eventual request in ch->running */
321     mtx_lock(&ch->state_mtx);
322     if (ch->state & ATA_STALL_QUEUE) {
323 	/* Recursive reinits and reinits during detach prohobited. */
324 	mtx_unlock(&ch->state_mtx);
325 	return (ENXIO);
326     }
327     if ((request = ch->running))
328 	callout_stop(&request->callout);
329     ch->running = NULL;
330 
331     /* unconditionally grap the channel lock */
332     ch->state |= ATA_STALL_QUEUE;
333     mtx_unlock(&ch->state_mtx);
334 
335     /* reset the controller HW, the channel and device(s) */
336     ATA_RESET(dev);
337 
338     /* reinit the children and delete any that fails */
339     if (!device_get_children(dev, &children, &nchildren)) {
340 	mtx_lock(&Giant);       /* newbus suckage it needs Giant */
341 	for (i = 0; i < nchildren; i++) {
342 	    /* did any children go missing ? */
343 	    if (children[i] && device_is_attached(children[i]) &&
344 		ATA_REINIT(children[i])) {
345 		/*
346 		 * if we had a running request and its device matches
347 		 * this child we need to inform the request that the
348 		 * device is gone.
349 		 */
350 		if (request && request->dev == children[i]) {
351 		    request->result = ENXIO;
352 		    device_printf(request->dev, "FAILURE - device detached\n");
353 
354 		    /* if not timeout finish request here */
355 		    if (!(request->flags & ATA_R_TIMEOUT))
356 			    ata_finish(request);
357 		    request = NULL;
358 		}
359 		device_delete_child(dev, children[i]);
360 	    }
361 	}
362 	free(children, M_TEMP);
363 	mtx_unlock(&Giant);     /* newbus suckage dealt with, release Giant */
364     }
365 
366     /* if we still have a good request put it on the queue again */
367     if (request && !(request->flags & ATA_R_TIMEOUT)) {
368 	device_printf(request->dev,
369 		      "WARNING - %s requeued due to channel reset",
370 		      ata_cmd2str(request));
371 	if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
372 	    printf(" LBA=%ju", request->u.ata.lba);
373 	printf("\n");
374 	request->flags |= ATA_R_REQUEUE;
375 	ata_queue_request(request);
376     }
377 
378     /* we're done release the channel for new work */
379     mtx_lock(&ch->state_mtx);
380     ch->state = ATA_IDLE;
381     mtx_unlock(&ch->state_mtx);
382     ATA_LOCKING(dev, ATA_LF_UNLOCK);
383 
384     /* Add new children. */
385 /*    ata_identify(dev); */
386 
387     if (bootverbose)
388 	device_printf(dev, "reinit done ..\n");
389 
390     /* kick off requests on the queue */
391     ata_start(dev);
392 #else
393 	if ((request = ch->running)) {
394 		ch->running = NULL;
395 		if (ch->state == ATA_ACTIVE)
396 		    ch->state = ATA_IDLE;
397 		callout_stop(&request->callout);
398 		if (ch->dma.unload)
399 		    ch->dma.unload(request);
400 		request->result = ERESTART;
401 		ata_cam_end_transaction(dev, request);
402 	}
403 	/* reset the controller HW, the channel and device(s) */
404 	ATA_RESET(dev);
405 	/* Tell the XPT about the event */
406 	xpt_async(AC_BUS_RESET, ch->path, NULL);
407 #endif
408 	return(0);
409 }
410 
411 int
412 ata_suspend(device_t dev)
413 {
414     struct ata_channel *ch;
415 
416     /* check for valid device */
417     if (!dev || !(ch = device_get_softc(dev)))
418 	return ENXIO;
419 
420 #ifndef ATA_CAM
421     /* wait for the channel to be IDLE or detached before suspending */
422     while (ch->r_irq) {
423 	mtx_lock(&ch->state_mtx);
424 	if (ch->state == ATA_IDLE) {
425 	    ch->state = ATA_ACTIVE;
426 	    mtx_unlock(&ch->state_mtx);
427 	    break;
428 	}
429 	mtx_unlock(&ch->state_mtx);
430 	tsleep(ch, PRIBIO, "atasusp", hz/10);
431     }
432     ATA_LOCKING(dev, ATA_LF_UNLOCK);
433 #endif
434     return(0);
435 }
436 
437 int
438 ata_resume(device_t dev)
439 {
440     int error;
441 
442     /* check for valid device */
443     if (!dev || !device_get_softc(dev))
444 	return ENXIO;
445 
446     /* reinit the devices, we dont know what mode/state they are in */
447     error = ata_reinit(dev);
448 
449 #ifndef ATA_CAM
450     /* kick off requests on the queue */
451     ata_start(dev);
452 #endif
453     return error;
454 }
455 
456 void
457 ata_interrupt(void *data)
458 {
459 #ifdef ATA_CAM
460     struct ata_channel *ch = (struct ata_channel *)data;
461 
462     mtx_lock(&ch->state_mtx);
463 #endif
464     ata_interrupt_locked(data);
465 #ifdef ATA_CAM
466     mtx_unlock(&ch->state_mtx);
467 #endif
468 }
469 
470 static void
471 ata_interrupt_locked(void *data)
472 {
473     struct ata_channel *ch = (struct ata_channel *)data;
474     struct ata_request *request;
475 
476 #ifndef ATA_CAM
477     mtx_lock(&ch->state_mtx);
478 #endif
479     do {
480 	/* ignore interrupt if its not for us */
481 	if (ch->hw.status && !ch->hw.status(ch->dev))
482 	    break;
483 
484 	/* do we have a running request */
485 	if (!(request = ch->running))
486 	    break;
487 
488 	ATA_DEBUG_RQ(request, "interrupt");
489 
490 	/* safetycheck for the right state */
491 	if (ch->state == ATA_IDLE) {
492 	    device_printf(request->dev, "interrupt on idle channel ignored\n");
493 	    break;
494 	}
495 
496 	/*
497 	 * we have the HW locks, so end the transaction for this request
498 	 * if it finishes immediately otherwise wait for next interrupt
499 	 */
500 	if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
501 	    ch->running = NULL;
502 	    if (ch->state == ATA_ACTIVE)
503 		ch->state = ATA_IDLE;
504 #ifdef ATA_CAM
505 	    ata_cam_end_transaction(ch->dev, request);
506 #else
507 	    mtx_unlock(&ch->state_mtx);
508 	    ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
509 	    ata_finish(request);
510 #endif
511 	    return;
512 	}
513     } while (0);
514 #ifndef ATA_CAM
515     mtx_unlock(&ch->state_mtx);
516 #endif
517 }
518 
519 void
520 ata_print_cable(device_t dev, u_int8_t *who)
521 {
522     device_printf(dev,
523                   "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
524 }
525 
526 int
527 ata_check_80pin(device_t dev, int mode)
528 {
529     struct ata_device *atadev = device_get_softc(dev);
530 
531     if (!ata_dma_check_80pin) {
532         if (bootverbose)
533             device_printf(dev, "Skipping 80pin cable check\n");
534         return mode;
535     }
536 
537     if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) {
538         ata_print_cable(dev, "device");
539         mode = ATA_UDMA2;
540     }
541     return mode;
542 }
543 
544 void
545 ata_setmode(device_t dev)
546 {
547 	struct ata_channel *ch = device_get_softc(device_get_parent(dev));
548 	struct ata_device *atadev = device_get_softc(dev);
549 	int error, mode, pmode;
550 
551 	mode = atadev->mode;
552 	do {
553 		pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX);
554 		mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode);
555 		if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0)
556 			mode = ata_check_80pin(dev, mode);
557 	} while (pmode != mode); /* Interate till successfull negotiation. */
558 	error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
559 	if (bootverbose)
560 	        device_printf(dev, "%ssetting %s\n",
561 		    (error) ? "FAILURE " : "", ata_mode2str(mode));
562 	atadev->mode = mode;
563 }
564 
565 /*
566  * device related interfaces
567  */
568 #ifndef ATA_CAM
569 static int
570 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
571 	  int32_t flag, struct thread *td)
572 {
573     device_t device, *children;
574     struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data;
575     int *value = (int *)data;
576     int i, nchildren, error = ENOTTY;
577 
578     switch (cmd) {
579     case IOCATAGMAXCHANNEL:
580 	/* In case we have channel 0..n this will return n+1. */
581 	*value = devclass_get_maxunit(ata_devclass);
582 	error = 0;
583 	break;
584 
585     case IOCATAREINIT:
586 	if (*value >= devclass_get_maxunit(ata_devclass) ||
587 	    !(device = devclass_get_device(ata_devclass, *value)) ||
588 	    !device_is_attached(device))
589 	    return ENXIO;
590 	error = ata_reinit(device);
591 	break;
592 
593     case IOCATAATTACH:
594 	if (*value >= devclass_get_maxunit(ata_devclass) ||
595 	    !(device = devclass_get_device(ata_devclass, *value)) ||
596 	    !device_is_attached(device))
597 	    return ENXIO;
598 	error = DEVICE_ATTACH(device);
599 	break;
600 
601     case IOCATADETACH:
602 	if (*value >= devclass_get_maxunit(ata_devclass) ||
603 	    !(device = devclass_get_device(ata_devclass, *value)) ||
604 	    !device_is_attached(device))
605 	    return ENXIO;
606 	error = DEVICE_DETACH(device);
607 	break;
608 
609     case IOCATADEVICES:
610 	if (devices->channel >= devclass_get_maxunit(ata_devclass) ||
611 	    !(device = devclass_get_device(ata_devclass, devices->channel)) ||
612 	    !device_is_attached(device))
613 	    return ENXIO;
614 	bzero(devices->name[0], 32);
615 	bzero(&devices->params[0], sizeof(struct ata_params));
616 	bzero(devices->name[1], 32);
617 	bzero(&devices->params[1], sizeof(struct ata_params));
618 	if (!device_get_children(device, &children, &nchildren)) {
619 	    for (i = 0; i < nchildren; i++) {
620 		if (children[i] && device_is_attached(children[i])) {
621 		    struct ata_device *atadev = device_get_softc(children[i]);
622 
623 		    if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */
624 			strncpy(devices->name[0],
625 				device_get_nameunit(children[i]), 32);
626 			bcopy(&atadev->param, &devices->params[0],
627 			      sizeof(struct ata_params));
628 		    }
629 		    if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */
630 			strncpy(devices->name[1],
631 				device_get_nameunit(children[i]), 32);
632 			bcopy(&atadev->param, &devices->params[1],
633 			      sizeof(struct ata_params));
634 		    }
635 		}
636 	    }
637 	    free(children, M_TEMP);
638 	    error = 0;
639 	}
640 	else
641 	    error = ENODEV;
642 	break;
643 
644     default:
645 	if (ata_raid_ioctl_func)
646 	    error = ata_raid_ioctl_func(cmd, data);
647     }
648     return error;
649 }
650 #endif
651 
652 int
653 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
654 {
655     struct ata_device *atadev = device_get_softc(dev);
656     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
657     struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
658     struct ata_params *params = (struct ata_params *)data;
659     int *mode = (int *)data;
660     struct ata_request *request;
661     caddr_t buf;
662     int error;
663 
664     switch (cmd) {
665     case IOCATAREQUEST:
666 	if (ioc_request->count >
667 	    (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) {
668 		return (EFBIG);
669 	}
670 	if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) {
671 	    return ENOMEM;
672 	}
673 	if (!(request = ata_alloc_request())) {
674 	    free(buf, M_ATA);
675 	    return  ENOMEM;
676 	}
677 	request->dev = atadev->dev;
678 	if (ioc_request->flags & ATA_CMD_WRITE) {
679 	    error = copyin(ioc_request->data, buf, ioc_request->count);
680 	    if (error) {
681 		free(buf, M_ATA);
682 		ata_free_request(request);
683 		return error;
684 	    }
685 	}
686 	if (ioc_request->flags & ATA_CMD_ATAPI) {
687 	    request->flags = ATA_R_ATAPI;
688 	    bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
689 	}
690 	else {
691 	    request->u.ata.command = ioc_request->u.ata.command;
692 	    request->u.ata.feature = ioc_request->u.ata.feature;
693 	    request->u.ata.lba = ioc_request->u.ata.lba;
694 	    request->u.ata.count = ioc_request->u.ata.count;
695 	}
696 	request->timeout = ioc_request->timeout;
697 	request->data = buf;
698 	request->bytecount = ioc_request->count;
699 	request->transfersize = request->bytecount;
700 	if (ioc_request->flags & ATA_CMD_CONTROL)
701 	    request->flags |= ATA_R_CONTROL;
702 	if (ioc_request->flags & ATA_CMD_READ)
703 	    request->flags |= ATA_R_READ;
704 	if (ioc_request->flags & ATA_CMD_WRITE)
705 	    request->flags |= ATA_R_WRITE;
706 	ata_queue_request(request);
707 	if (request->flags & ATA_R_ATAPI) {
708 	    bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
709 		  sizeof(struct atapi_sense));
710 	}
711 	else {
712 	    ioc_request->u.ata.command = request->u.ata.command;
713 	    ioc_request->u.ata.feature = request->u.ata.feature;
714 	    ioc_request->u.ata.lba = request->u.ata.lba;
715 	    ioc_request->u.ata.count = request->u.ata.count;
716 	}
717 	ioc_request->error = request->result;
718 	if (ioc_request->flags & ATA_CMD_READ)
719 	    error = copyout(buf, ioc_request->data, ioc_request->count);
720 	else
721 	    error = 0;
722 	free(buf, M_ATA);
723 	ata_free_request(request);
724 	return error;
725 
726     case IOCATAGPARM:
727 	ata_getparam(atadev, 0);
728 	bcopy(&atadev->param, params, sizeof(struct ata_params));
729 	return 0;
730 
731     case IOCATASMODE:
732 	atadev->mode = *mode;
733 	ata_setmode(dev);
734 	return 0;
735 
736     case IOCATAGMODE:
737 	*mode = atadev->mode |
738 	    (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8);
739 	return 0;
740     case IOCATASSPINDOWN:
741 	atadev->spindown = *mode;
742 	return 0;
743     case IOCATAGSPINDOWN:
744 	*mode = atadev->spindown;
745 	return 0;
746     default:
747 	return ENOTTY;
748     }
749 }
750 
751 #ifndef ATA_CAM
752 static void
753 ata_boot_attach(void)
754 {
755     struct ata_channel *ch;
756     int ctlr;
757 
758     mtx_lock(&Giant);       /* newbus suckage it needs Giant */
759 
760     /* kick of probe and attach on all channels */
761     for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
762 	if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
763 	    ata_identify(ch->dev);
764 	}
765     }
766 
767     /* release the hook that got us here, we are only needed once during boot */
768     if (ata_delayed_attach) {
769 	config_intrhook_disestablish(ata_delayed_attach);
770 	free(ata_delayed_attach, M_TEMP);
771 	ata_delayed_attach = NULL;
772     }
773 
774     mtx_unlock(&Giant);     /* newbus suckage dealt with, release Giant */
775 }
776 #endif
777 
778 /*
779  * misc support functions
780  */
781 #ifndef ATA_CAM
782 static device_t
783 ata_add_child(device_t parent, struct ata_device *atadev, int unit)
784 {
785     device_t child;
786 
787     if ((child = device_add_child(parent, NULL, unit))) {
788 	device_set_softc(child, atadev);
789 	device_quiet(child);
790 	atadev->dev = child;
791 	atadev->max_iosize = DEV_BSIZE;
792 	atadev->mode = ATA_PIO_MAX;
793     }
794     return child;
795 }
796 #endif
797 
798 int
799 ata_getparam(struct ata_device *atadev, int init)
800 {
801     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
802     struct ata_request *request;
803     u_int8_t command = 0;
804     int error = ENOMEM, retries = 2;
805 
806     if (ch->devices & (ATA_ATA_MASTER << atadev->unit))
807 	command = ATA_ATA_IDENTIFY;
808     if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit))
809 	command = ATA_ATAPI_IDENTIFY;
810     if (!command)
811 	return ENXIO;
812 
813     while (retries-- > 0 && error) {
814 	if (!(request = ata_alloc_request()))
815 	    break;
816 	request->dev = atadev->dev;
817 	request->timeout = 1;
818 	request->retries = 0;
819 	request->u.ata.command = command;
820 	request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT);
821 	if (!bootverbose)
822 	    request->flags |= ATA_R_QUIET;
823 	request->data = (void *)&atadev->param;
824 	request->bytecount = sizeof(struct ata_params);
825 	request->donecount = 0;
826 	request->transfersize = DEV_BSIZE;
827 	ata_queue_request(request);
828 	error = request->result;
829 	ata_free_request(request);
830     }
831 
832     if (!error && (isprint(atadev->param.model[0]) ||
833 		   isprint(atadev->param.model[1]))) {
834 	struct ata_params *atacap = &atadev->param;
835 	int16_t *ptr;
836 
837 	for (ptr = (int16_t *)atacap;
838 	     ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
839 	    *ptr = le16toh(*ptr);
840 	}
841 	if (!(!strncmp(atacap->model, "FX", 2) ||
842 	      !strncmp(atacap->model, "NEC", 3) ||
843 	      !strncmp(atacap->model, "Pioneer", 7) ||
844 	      !strncmp(atacap->model, "SHARP", 5))) {
845 	    bswap(atacap->model, sizeof(atacap->model));
846 	    bswap(atacap->revision, sizeof(atacap->revision));
847 	    bswap(atacap->serial, sizeof(atacap->serial));
848 	}
849 	btrim(atacap->model, sizeof(atacap->model));
850 	bpack(atacap->model, atacap->model, sizeof(atacap->model));
851 	btrim(atacap->revision, sizeof(atacap->revision));
852 	bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
853 	btrim(atacap->serial, sizeof(atacap->serial));
854 	bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
855 
856 	if (bootverbose)
857 	    printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
858 		   device_get_unit(ch->dev),
859 		   ata_unit2str(atadev),
860 		   ata_mode2str(ata_pmode(atacap)),
861 		   ata_mode2str(ata_wmode(atacap)),
862 		   ata_mode2str(ata_umode(atacap)),
863 		   (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
864 
865 	if (init) {
866 	    char buffer[64];
867 
868 	    sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
869 	    device_set_desc_copy(atadev->dev, buffer);
870 	    if ((atadev->param.config & ATA_PROTO_ATAPI) &&
871 		(atadev->param.config != ATA_CFA_MAGIC1) &&
872 		(atadev->param.config != ATA_CFA_MAGIC2)) {
873 		if (atapi_dma &&
874 		    (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
875 		    ata_umode(&atadev->param) >= ATA_UDMA2)
876 		    atadev->mode = ATA_DMA_MAX;
877 	    }
878 	    else {
879 		if (ata_dma &&
880 		    (ata_umode(&atadev->param) > 0 ||
881 		     ata_wmode(&atadev->param) > 0))
882 		    atadev->mode = ATA_DMA_MAX;
883 	    }
884 	}
885     }
886     else {
887 	if (!error)
888 	    error = ENXIO;
889     }
890     return error;
891 }
892 
893 #ifndef ATA_CAM
894 int
895 ata_identify(device_t dev)
896 {
897     struct ata_channel *ch = device_get_softc(dev);
898     struct ata_device *atadev;
899     device_t *children;
900     device_t child, master = NULL;
901     int nchildren, i, n = ch->devices;
902 
903     if (bootverbose)
904 	device_printf(dev, "Identifying devices: %08x\n", ch->devices);
905 
906     mtx_lock(&Giant);
907     /* Skip existing devices. */
908     if (!device_get_children(dev, &children, &nchildren)) {
909 	for (i = 0; i < nchildren; i++) {
910 	    if (children[i] && (atadev = device_get_softc(children[i])))
911 		n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit);
912 	}
913 	free(children, M_TEMP);
914     }
915     /* Create new devices. */
916     if (bootverbose)
917 	device_printf(dev, "New devices: %08x\n", n);
918     if (n == 0) {
919 	mtx_unlock(&Giant);
920 	return (0);
921     }
922     for (i = 0; i < ATA_PM; ++i) {
923 	if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) {
924 	    int unit = -1;
925 
926 	    if (!(atadev = malloc(sizeof(struct ata_device),
927 				  M_ATA, M_NOWAIT | M_ZERO))) {
928 		device_printf(dev, "out of memory\n");
929 		return ENOMEM;
930 	    }
931 	    atadev->unit = i;
932 #ifdef ATA_STATIC_ID
933 	    if (n & (ATA_ATA_MASTER << i))
934 		unit = (device_get_unit(dev) << 1) + i;
935 #endif
936 	    if ((child = ata_add_child(dev, atadev, unit))) {
937 		/*
938 		 * PATA slave should be identified first, to allow
939 		 * device cable detection on master to work properly.
940 		 */
941 		if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 &&
942 			(n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) {
943 		    master = child;
944 		    continue;
945 		}
946 		if (ata_getparam(atadev, 1)) {
947 		    device_delete_child(dev, child);
948 		    free(atadev, M_ATA);
949 		}
950 	    }
951 	    else
952 		free(atadev, M_ATA);
953 	}
954     }
955     if (master) {
956 	atadev = device_get_softc(master);
957 	if (ata_getparam(atadev, 1)) {
958 	    device_delete_child(dev, master);
959 	    free(atadev, M_ATA);
960 	}
961     }
962     bus_generic_probe(dev);
963     bus_generic_attach(dev);
964     mtx_unlock(&Giant);
965     return 0;
966 }
967 #endif
968 
969 void
970 ata_default_registers(device_t dev)
971 {
972     struct ata_channel *ch = device_get_softc(dev);
973 
974     /* fill in the defaults from whats setup already */
975     ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
976     ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
977     ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
978     ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
979     ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
980     ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
981     ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
982     ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
983 }
984 
985 void
986 ata_modify_if_48bit(struct ata_request *request)
987 {
988     struct ata_channel *ch = device_get_softc(request->parent);
989     struct ata_device *atadev = device_get_softc(request->dev);
990 
991     request->flags &= ~ATA_R_48BIT;
992 
993     if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA ||
994 	 request->u.ata.count > 256) &&
995 	atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
996 
997 	/* translate command into 48bit version */
998 	switch (request->u.ata.command) {
999 	case ATA_READ:
1000 	    request->u.ata.command = ATA_READ48;
1001 	    break;
1002 	case ATA_READ_MUL:
1003 	    request->u.ata.command = ATA_READ_MUL48;
1004 	    break;
1005 	case ATA_READ_DMA:
1006 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1007 		if (request->transfersize > DEV_BSIZE)
1008 		    request->u.ata.command = ATA_READ_MUL48;
1009 		else
1010 		    request->u.ata.command = ATA_READ48;
1011 		request->flags &= ~ATA_R_DMA;
1012 	    }
1013 	    else
1014 		request->u.ata.command = ATA_READ_DMA48;
1015 	    break;
1016 	case ATA_READ_DMA_QUEUED:
1017 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1018 		if (request->transfersize > DEV_BSIZE)
1019 		    request->u.ata.command = ATA_READ_MUL48;
1020 		else
1021 		    request->u.ata.command = ATA_READ48;
1022 		request->flags &= ~ATA_R_DMA;
1023 	    }
1024 	    else
1025 		request->u.ata.command = ATA_READ_DMA_QUEUED48;
1026 	    break;
1027 	case ATA_WRITE:
1028 	    request->u.ata.command = ATA_WRITE48;
1029 	    break;
1030 	case ATA_WRITE_MUL:
1031 	    request->u.ata.command = ATA_WRITE_MUL48;
1032 	    break;
1033 	case ATA_WRITE_DMA:
1034 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1035 		if (request->transfersize > DEV_BSIZE)
1036 		    request->u.ata.command = ATA_WRITE_MUL48;
1037 		else
1038 		    request->u.ata.command = ATA_WRITE48;
1039 		request->flags &= ~ATA_R_DMA;
1040 	    }
1041 	    else
1042 		request->u.ata.command = ATA_WRITE_DMA48;
1043 	    break;
1044 	case ATA_WRITE_DMA_QUEUED:
1045 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1046 		if (request->transfersize > DEV_BSIZE)
1047 		    request->u.ata.command = ATA_WRITE_MUL48;
1048 		else
1049 		    request->u.ata.command = ATA_WRITE48;
1050 		request->u.ata.command = ATA_WRITE48;
1051 		request->flags &= ~ATA_R_DMA;
1052 	    }
1053 	    else
1054 		request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
1055 	    break;
1056 	case ATA_FLUSHCACHE:
1057 	    request->u.ata.command = ATA_FLUSHCACHE48;
1058 	    break;
1059 	case ATA_SET_MAX_ADDRESS:
1060 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1061 	    break;
1062 	default:
1063 	    return;
1064 	}
1065 	request->flags |= ATA_R_48BIT;
1066     }
1067     else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1068 
1069 	/* translate command into 48bit version */
1070 	switch (request->u.ata.command) {
1071 	case ATA_FLUSHCACHE:
1072 	    request->u.ata.command = ATA_FLUSHCACHE48;
1073 	    break;
1074 	case ATA_READ_NATIVE_MAX_ADDRESS:
1075 	    request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48;
1076 	    break;
1077 	case ATA_SET_MAX_ADDRESS:
1078 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1079 	    break;
1080 	default:
1081 	    return;
1082 	}
1083 	request->flags |= ATA_R_48BIT;
1084     }
1085 }
1086 
1087 void
1088 ata_udelay(int interval)
1089 {
1090     /* for now just use DELAY, the timer/sleep subsytems are not there yet */
1091     if (1 || interval < (1000000/hz) || ata_delayed_attach)
1092 	DELAY(interval);
1093     else
1094 	pause("ataslp", interval/(1000000/hz));
1095 }
1096 
1097 char *
1098 ata_unit2str(struct ata_device *atadev)
1099 {
1100     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
1101     static char str[8];
1102 
1103     if (ch->devices & ATA_PORTMULTIPLIER)
1104 	sprintf(str, "port%d", atadev->unit);
1105     else
1106 	sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave");
1107     return str;
1108 }
1109 
1110 const char *
1111 ata_mode2str(int mode)
1112 {
1113     switch (mode) {
1114     case -1: return "UNSUPPORTED";
1115     case ATA_PIO0: return "PIO0";
1116     case ATA_PIO1: return "PIO1";
1117     case ATA_PIO2: return "PIO2";
1118     case ATA_PIO3: return "PIO3";
1119     case ATA_PIO4: return "PIO4";
1120     case ATA_WDMA0: return "WDMA0";
1121     case ATA_WDMA1: return "WDMA1";
1122     case ATA_WDMA2: return "WDMA2";
1123     case ATA_UDMA0: return "UDMA16";
1124     case ATA_UDMA1: return "UDMA25";
1125     case ATA_UDMA2: return "UDMA33";
1126     case ATA_UDMA3: return "UDMA40";
1127     case ATA_UDMA4: return "UDMA66";
1128     case ATA_UDMA5: return "UDMA100";
1129     case ATA_UDMA6: return "UDMA133";
1130     case ATA_SA150: return "SATA150";
1131     case ATA_SA300: return "SATA300";
1132     default:
1133 	if (mode & ATA_DMA_MASK)
1134 	    return "BIOSDMA";
1135 	else
1136 	    return "BIOSPIO";
1137     }
1138 }
1139 
1140 const char *
1141 ata_satarev2str(int rev)
1142 {
1143 	switch (rev) {
1144 	case 0: return "";
1145 	case 1: return "SATA 1.5Gb/s";
1146 	case 2: return "SATA 3Gb/s";
1147 	case 3: return "SATA 6Gb/s";
1148 	default: return "???";
1149 	}
1150 }
1151 
1152 int
1153 ata_atapi(device_t dev, int target)
1154 {
1155     struct ata_channel *ch = device_get_softc(dev);
1156 
1157     return (ch->devices & (ATA_ATAPI_MASTER << target));
1158 }
1159 
1160 int
1161 ata_pmode(struct ata_params *ap)
1162 {
1163     if (ap->atavalid & ATA_FLAG_64_70) {
1164 	if (ap->apiomodes & 0x02)
1165 	    return ATA_PIO4;
1166 	if (ap->apiomodes & 0x01)
1167 	    return ATA_PIO3;
1168     }
1169     if (ap->mwdmamodes & 0x04)
1170 	return ATA_PIO4;
1171     if (ap->mwdmamodes & 0x02)
1172 	return ATA_PIO3;
1173     if (ap->mwdmamodes & 0x01)
1174 	return ATA_PIO2;
1175     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
1176 	return ATA_PIO2;
1177     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
1178 	return ATA_PIO1;
1179     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
1180 	return ATA_PIO0;
1181     return ATA_PIO0;
1182 }
1183 
1184 int
1185 ata_wmode(struct ata_params *ap)
1186 {
1187     if (ap->mwdmamodes & 0x04)
1188 	return ATA_WDMA2;
1189     if (ap->mwdmamodes & 0x02)
1190 	return ATA_WDMA1;
1191     if (ap->mwdmamodes & 0x01)
1192 	return ATA_WDMA0;
1193     return -1;
1194 }
1195 
1196 int
1197 ata_umode(struct ata_params *ap)
1198 {
1199     if (ap->atavalid & ATA_FLAG_88) {
1200 	if (ap->udmamodes & 0x40)
1201 	    return ATA_UDMA6;
1202 	if (ap->udmamodes & 0x20)
1203 	    return ATA_UDMA5;
1204 	if (ap->udmamodes & 0x10)
1205 	    return ATA_UDMA4;
1206 	if (ap->udmamodes & 0x08)
1207 	    return ATA_UDMA3;
1208 	if (ap->udmamodes & 0x04)
1209 	    return ATA_UDMA2;
1210 	if (ap->udmamodes & 0x02)
1211 	    return ATA_UDMA1;
1212 	if (ap->udmamodes & 0x01)
1213 	    return ATA_UDMA0;
1214     }
1215     return -1;
1216 }
1217 
1218 int
1219 ata_limit_mode(device_t dev, int mode, int maxmode)
1220 {
1221     struct ata_device *atadev = device_get_softc(dev);
1222 
1223     if (maxmode && mode > maxmode)
1224 	mode = maxmode;
1225 
1226     if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
1227 	return min(mode, ata_umode(&atadev->param));
1228 
1229     if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
1230 	return min(mode, ata_wmode(&atadev->param));
1231 
1232     if (mode > ata_pmode(&atadev->param))
1233 	return min(mode, ata_pmode(&atadev->param));
1234 
1235     return mode;
1236 }
1237 
1238 static void
1239 bswap(int8_t *buf, int len)
1240 {
1241     u_int16_t *ptr = (u_int16_t*)(buf + len);
1242 
1243     while (--ptr >= (u_int16_t*)buf)
1244 	*ptr = ntohs(*ptr);
1245 }
1246 
1247 static void
1248 btrim(int8_t *buf, int len)
1249 {
1250     int8_t *ptr;
1251 
1252     for (ptr = buf; ptr < buf+len; ++ptr)
1253 	if (!*ptr || *ptr == '_')
1254 	    *ptr = ' ';
1255     for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
1256 	*ptr = 0;
1257 }
1258 
1259 static void
1260 bpack(int8_t *src, int8_t *dst, int len)
1261 {
1262     int i, j, blank;
1263 
1264     for (i = j = blank = 0 ; i < len; i++) {
1265 	if (blank && src[i] == ' ') continue;
1266 	if (blank && src[i] != ' ') {
1267 	    dst[j++] = src[i];
1268 	    blank = 0;
1269 	    continue;
1270 	}
1271 	if (src[i] == ' ') {
1272 	    blank = 1;
1273 	    if (i == 0)
1274 		continue;
1275 	}
1276 	dst[j++] = src[i];
1277     }
1278     if (j < len)
1279 	dst[j] = 0x00;
1280 }
1281 
1282 #ifdef ATA_CAM
1283 void
1284 ata_cam_begin_transaction(device_t dev, union ccb *ccb)
1285 {
1286 	struct ata_channel *ch = device_get_softc(dev);
1287 	struct ata_request *request;
1288 
1289 	if (!(request = ata_alloc_request())) {
1290 		device_printf(dev, "FAILURE - out of memory in start\n");
1291 		ccb->ccb_h.status = CAM_REQ_INVALID;
1292 		xpt_done(ccb);
1293 		return;
1294 	}
1295 	bzero(request, sizeof(*request));
1296 
1297 	/* setup request */
1298 	request->dev = NULL;
1299 	request->parent = dev;
1300 	request->unit = ccb->ccb_h.target_id;
1301 	if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1302 		request->data = ccb->ataio.data_ptr;
1303 		request->bytecount = ccb->ataio.dxfer_len;
1304 		request->u.ata.command = ccb->ataio.cmd.command;
1305 		request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
1306 					  (uint16_t)ccb->ataio.cmd.features;
1307 		request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
1308 					(uint16_t)ccb->ataio.cmd.sector_count;
1309 		if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
1310 			request->flags |= ATA_R_48BIT;
1311 			request->u.ata.lba =
1312 				     ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
1313 				     ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
1314 				     ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
1315 		} else {
1316 			request->u.ata.lba =
1317 				     ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
1318 		}
1319 		request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
1320 				      ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
1321 				       (uint64_t)ccb->ataio.cmd.lba_low;
1322 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1323 		    ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
1324 			request->flags |= ATA_R_DMA;
1325 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1326 			request->flags |= ATA_R_READ;
1327 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1328 			request->flags |= ATA_R_WRITE;
1329 	} else {
1330 		request->data = ccb->csio.data_ptr;
1331 		request->bytecount = ccb->csio.dxfer_len;
1332 		bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1333 		    ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
1334 		    request->u.atapi.ccb, ccb->csio.cdb_len);
1335 		request->flags |= ATA_R_ATAPI;
1336 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1337 		    ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1338 			request->flags |= ATA_R_DMA;
1339 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1340 			request->flags |= ATA_R_READ;
1341 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1342 			request->flags |= ATA_R_WRITE;
1343 	}
1344 	request->transfersize = min(request->bytecount,
1345 	    ch->curr[ccb->ccb_h.target_id].bytecount);
1346 //	request->callback = ad_done;
1347 	request->retries = 0;
1348 	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1349 	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1350 	request->ccb = ccb;
1351 
1352 	ch->running = request;
1353 	ch->state = ATA_ACTIVE;
1354 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1355 	    ch->running = NULL;
1356 	    ch->state = ATA_IDLE;
1357 	    ata_cam_end_transaction(dev, request);
1358 	    return;
1359 	}
1360 }
1361 
1362 void
1363 ata_cam_end_transaction(device_t dev, struct ata_request *request)
1364 {
1365 	struct ata_channel *ch = device_get_softc(dev);
1366 	union ccb *ccb = request->ccb;
1367 	int fatalerr = 0;
1368 
1369 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1370 	if (request->flags & ATA_R_TIMEOUT) {
1371 		xpt_freeze_simq(ch->sim, 1);
1372 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1373 		ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
1374 		fatalerr = 1;
1375 	} else if (request->status & ATA_S_ERROR) {
1376 		if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1377 			ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1378 		} else {
1379 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1380 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1381 		}
1382 	} else if (request->result == ERESTART)
1383 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1384 	else if (request->result != 0)
1385 		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1386 	else
1387 		ccb->ccb_h.status |= CAM_REQ_CMP;
1388 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
1389 	    !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1390 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1391 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1392 	}
1393 	if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1394 	    ((request->status & ATA_S_ERROR) ||
1395 	    (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
1396 		struct ata_res *res = &ccb->ataio.res;
1397 		res->status = request->status;
1398 		res->error = request->error;
1399 		res->lba_low = request->u.ata.lba;
1400 		res->lba_mid = request->u.ata.lba >> 8;
1401 		res->lba_high = request->u.ata.lba >> 16;
1402 		res->device = request->u.ata.lba >> 24;
1403 		res->lba_low_exp = request->u.ata.lba >> 24;
1404 		res->lba_mid_exp = request->u.ata.lba >> 32;
1405 		res->lba_high_exp = request->u.ata.lba >> 40;
1406 		res->sector_count = request->u.ata.count;
1407 		res->sector_count_exp = request->u.ata.count >> 8;
1408 	}
1409 	ata_free_request(request);
1410 	xpt_done(ccb);
1411 	/* Do error recovery if needed. */
1412 	if (fatalerr)
1413 		ata_reinit(dev);
1414 }
1415 
1416 static void
1417 ataaction(struct cam_sim *sim, union ccb *ccb)
1418 {
1419 	device_t dev;
1420 	struct ata_channel *ch;
1421 
1422 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
1423 	    ccb->ccb_h.func_code));
1424 
1425 	ch = (struct ata_channel *)cam_sim_softc(sim);
1426 	dev = ch->dev;
1427 	switch (ccb->ccb_h.func_code) {
1428 	/* Common cases first */
1429 	case XPT_ATA_IO:	/* Execute the requested I/O operation */
1430 	case XPT_SCSI_IO:
1431 		if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
1432 		    << ccb->ccb_h.target_id)) == 0) {
1433 			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1434 			xpt_done(ccb);
1435 			break;
1436 		}
1437 		if (ch->running)
1438 			device_printf(dev, "already running!\n");
1439 		if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1440 		    (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1441 		    (ccb->ataio.cmd.control & ATA_A_RESET)) {
1442 			struct ata_res *res = &ccb->ataio.res;
1443 
1444 			bzero(res, sizeof(*res));
1445 			if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
1446 				res->lba_high = 0;
1447 				res->lba_mid = 0;
1448 			} else {
1449 				res->lba_high = 0xeb;
1450 				res->lba_mid = 0x14;
1451 			}
1452 			ccb->ccb_h.status = CAM_REQ_CMP;
1453 			xpt_done(ccb);
1454 			break;
1455 		}
1456 		ata_cam_begin_transaction(dev, ccb);
1457 		break;
1458 	case XPT_EN_LUN:		/* Enable LUN as a target */
1459 	case XPT_TARGET_IO:		/* Execute target I/O request */
1460 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
1461 	case XPT_CONT_TARGET_IO:	/* Continue Host Target I/O Connection*/
1462 	case XPT_ABORT:			/* Abort the specified CCB */
1463 		/* XXX Implement */
1464 		ccb->ccb_h.status = CAM_REQ_INVALID;
1465 		xpt_done(ccb);
1466 		break;
1467 	case XPT_SET_TRAN_SETTINGS:
1468 	{
1469 		struct	ccb_trans_settings *cts = &ccb->cts;
1470 		struct	ata_cam_device *d;
1471 
1472 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1473 			d = &ch->curr[ccb->ccb_h.target_id];
1474 		else
1475 			d = &ch->user[ccb->ccb_h.target_id];
1476 		if ((ch->flags & ATA_SATA) && (ch->flags & ATA_NO_SLAVE)) {
1477 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
1478 				d->revision = cts->xport_specific.sata.revision;
1479 			if (cts->xport_specific.ata.valid & CTS_SATA_VALID_MODE) {
1480 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1481 					d->mode = ATA_SETMODE(ch->dev,
1482 					    ccb->ccb_h.target_id,
1483 					    cts->xport_specific.sata.mode);
1484 				} else
1485 					d->mode = cts->xport_specific.sata.mode;
1486 			}
1487 			if (cts->xport_specific.ata.valid & CTS_SATA_VALID_BYTECOUNT)
1488 				d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
1489 		} else {
1490 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
1491 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1492 					d->mode = ATA_SETMODE(ch->dev,
1493 					    ccb->ccb_h.target_id,
1494 					    cts->xport_specific.ata.mode);
1495 				} else
1496 					d->mode = cts->xport_specific.ata.mode;
1497 			}
1498 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
1499 				d->bytecount = cts->xport_specific.ata.bytecount;
1500 			if (ch->flags & ATA_SATA)
1501 				d->bytecount = min(8192, d->bytecount);
1502 		}
1503 		ccb->ccb_h.status = CAM_REQ_CMP;
1504 		xpt_done(ccb);
1505 		break;
1506 	}
1507 	case XPT_GET_TRAN_SETTINGS:
1508 	{
1509 		struct	ccb_trans_settings *cts = &ccb->cts;
1510 		struct  ata_cam_device *d;
1511 
1512 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1513 			d = &ch->curr[ccb->ccb_h.target_id];
1514 		else
1515 			d = &ch->user[ccb->ccb_h.target_id];
1516 		cts->protocol = PROTO_ATA;
1517 		cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1518 		if ((ch->flags & ATA_SATA) && (ch->flags & ATA_NO_SLAVE)) {
1519 			cts->transport = XPORT_SATA;
1520 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1521 			cts->xport_specific.sata.mode = d->mode;
1522 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1523 			cts->xport_specific.sata.bytecount = d->bytecount;
1524 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1525 			if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1526 				cts->xport_specific.sata.revision =
1527 				    ATA_GETREV(dev, ccb->ccb_h.target_id);
1528 			} else
1529 				cts->xport_specific.sata.revision = d->revision;
1530 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1531 		} else {
1532 			cts->transport = XPORT_ATA;
1533 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1534 			cts->xport_specific.ata.mode = d->mode;
1535 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1536 			cts->xport_specific.ata.bytecount = d->bytecount;
1537 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1538 		}
1539 		ccb->ccb_h.status = CAM_REQ_CMP;
1540 		xpt_done(ccb);
1541 		break;
1542 	}
1543 #if 0
1544 	case XPT_CALC_GEOMETRY:
1545 	{
1546 		struct	  ccb_calc_geometry *ccg;
1547 		uint32_t size_mb;
1548 		uint32_t secs_per_cylinder;
1549 
1550 		ccg = &ccb->ccg;
1551 		size_mb = ccg->volume_size
1552 			/ ((1024L * 1024L) / ccg->block_size);
1553 		if (size_mb >= 1024 && (aha->extended_trans != 0)) {
1554 			if (size_mb >= 2048) {
1555 				ccg->heads = 255;
1556 				ccg->secs_per_track = 63;
1557 			} else {
1558 				ccg->heads = 128;
1559 				ccg->secs_per_track = 32;
1560 			}
1561 		} else {
1562 			ccg->heads = 64;
1563 			ccg->secs_per_track = 32;
1564 		}
1565 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1566 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1567 		ccb->ccb_h.status = CAM_REQ_CMP;
1568 		xpt_done(ccb);
1569 		break;
1570 	}
1571 #endif
1572 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
1573 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
1574 		ata_reinit(dev);
1575 		ccb->ccb_h.status = CAM_REQ_CMP;
1576 		xpt_done(ccb);
1577 		break;
1578 	case XPT_TERM_IO:		/* Terminate the I/O process */
1579 		/* XXX Implement */
1580 		ccb->ccb_h.status = CAM_REQ_INVALID;
1581 		xpt_done(ccb);
1582 		break;
1583 	case XPT_PATH_INQ:		/* Path routing inquiry */
1584 	{
1585 		struct ccb_pathinq *cpi = &ccb->cpi;
1586 
1587 		cpi->version_num = 1; /* XXX??? */
1588 		cpi->hba_inquiry = PI_SDTR_ABLE;
1589 		cpi->target_sprt = 0;
1590 		cpi->hba_misc = PIM_SEQSCAN;
1591 		cpi->hba_eng_cnt = 0;
1592 		if (ch->flags & ATA_NO_SLAVE)
1593 			cpi->max_target = 0;
1594 		else
1595 			cpi->max_target = 1;
1596 		cpi->max_lun = 0;
1597 		cpi->initiator_id = 0;
1598 		cpi->bus_id = cam_sim_bus(sim);
1599 		if (ch->flags & ATA_SATA)
1600 			cpi->base_transfer_speed = 150000;
1601 		else
1602 			cpi->base_transfer_speed = 3300;
1603 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1604 		strncpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1605 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1606 		cpi->unit_number = cam_sim_unit(sim);
1607 		if ((ch->flags & ATA_SATA) && (ch->flags & ATA_NO_SLAVE))
1608 			cpi->transport = XPORT_SATA;
1609 		else
1610 			cpi->transport = XPORT_ATA;
1611 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1612 		cpi->protocol = PROTO_ATA;
1613 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1614 		cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1615 		cpi->ccb_h.status = CAM_REQ_CMP;
1616 		xpt_done(ccb);
1617 		break;
1618 	}
1619 	default:
1620 		ccb->ccb_h.status = CAM_REQ_INVALID;
1621 		xpt_done(ccb);
1622 		break;
1623 	}
1624 }
1625 
1626 static void
1627 atapoll(struct cam_sim *sim)
1628 {
1629 	struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1630 
1631 	ata_interrupt_locked(ch);
1632 }
1633 #endif
1634 
1635 /*
1636  * module handeling
1637  */
1638 static int
1639 ata_module_event_handler(module_t mod, int what, void *arg)
1640 {
1641 #ifndef ATA_CAM
1642     static struct cdev *atacdev;
1643 #endif
1644 
1645     switch (what) {
1646     case MOD_LOAD:
1647 #ifndef ATA_CAM
1648 	/* register controlling device */
1649 	atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1650 
1651 	if (cold) {
1652 	    /* register boot attach to be run when interrupts are enabled */
1653 	    if (!(ata_delayed_attach = (struct intr_config_hook *)
1654 				       malloc(sizeof(struct intr_config_hook),
1655 					      M_TEMP, M_NOWAIT | M_ZERO))) {
1656 		printf("ata: malloc of delayed attach hook failed\n");
1657 		return EIO;
1658 	    }
1659 	    ata_delayed_attach->ich_func = (void*)ata_boot_attach;
1660 	    if (config_intrhook_establish(ata_delayed_attach) != 0) {
1661 		printf("ata: config_intrhook_establish failed\n");
1662 		free(ata_delayed_attach, M_TEMP);
1663 	    }
1664 	}
1665 #endif
1666 	return 0;
1667 
1668     case MOD_UNLOAD:
1669 #ifndef ATA_CAM
1670 	/* deregister controlling device */
1671 	destroy_dev(atacdev);
1672 #endif
1673 	return 0;
1674 
1675     default:
1676 	return EOPNOTSUPP;
1677     }
1678 }
1679 
1680 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1681 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1682 MODULE_VERSION(ata, 1);
1683 #ifdef ATA_CAM
1684 MODULE_DEPEND(ata, cam, 1, 1, 1);
1685 #endif
1686 
1687 static void
1688 ata_init(void)
1689 {
1690     ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request),
1691 				   NULL, NULL, NULL, NULL, 0, 0);
1692     ata_composite_zone = uma_zcreate("ata_composite",
1693 				     sizeof(struct ata_composite),
1694 				     NULL, NULL, NULL, NULL, 0, 0);
1695 }
1696 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1697 
1698 static void
1699 ata_uninit(void)
1700 {
1701     uma_zdestroy(ata_composite_zone);
1702     uma_zdestroy(ata_request_zone);
1703 }
1704 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);
1705