xref: /freebsd/sys/dev/ata/ata-all.c (revision 774f94f14c92bf94afc21d8c8d7a1e8f2fdf5a48)
1 /*-
2  * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ata.h"
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/ata.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/endian.h>
37 #include <sys/ctype.h>
38 #include <sys/conf.h>
39 #include <sys/bus.h>
40 #include <sys/bio.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
43 #include <sys/sema.h>
44 #include <sys/taskqueue.h>
45 #include <vm/uma.h>
46 #include <machine/stdarg.h>
47 #include <machine/resource.h>
48 #include <machine/bus.h>
49 #include <sys/rman.h>
50 #include <dev/ata/ata-all.h>
51 #include <ata_if.h>
52 
53 #ifdef ATA_CAM
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_xpt_sim.h>
58 #include <cam/cam_debug.h>
59 #endif
60 
61 #ifndef ATA_CAM
62 /* device structure */
63 static  d_ioctl_t       ata_ioctl;
64 static struct cdevsw ata_cdevsw = {
65 	.d_version =    D_VERSION,
66 	.d_flags =      D_NEEDGIANT, /* we need this as newbus isn't mpsafe */
67 	.d_ioctl =      ata_ioctl,
68 	.d_name =       "ata",
69 };
70 #endif
71 
72 /* prototypes */
73 #ifndef ATA_CAM
74 static void ata_boot_attach(void);
75 static device_t ata_add_child(device_t, struct ata_device *, int);
76 #else
77 static void ataaction(struct cam_sim *sim, union ccb *ccb);
78 static void atapoll(struct cam_sim *sim);
79 #endif
80 static void ata_conn_event(void *, int);
81 static void bswap(int8_t *, int);
82 static void btrim(int8_t *, int);
83 static void bpack(int8_t *, int8_t *, int);
84 static void ata_interrupt_locked(void *data);
85 
86 /* global vars */
87 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
88 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
89 struct intr_config_hook *ata_delayed_attach = NULL;
90 devclass_t ata_devclass;
91 uma_zone_t ata_request_zone;
92 uma_zone_t ata_composite_zone;
93 int ata_wc = 1;
94 int ata_setmax = 0;
95 int ata_dma_check_80pin = 1;
96 
97 /* local vars */
98 static int ata_dma = 1;
99 static int atapi_dma = 1;
100 
101 /* sysctl vars */
102 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
103 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
104 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0,
105 	   "ATA disk DMA mode control");
106 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
107 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
108 	   CTLFLAG_RW, &ata_dma_check_80pin, 1,
109 	   "Check for 80pin cable before setting ATA DMA mode");
110 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
111 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0,
112 	   "ATAPI device DMA mode control");
113 TUNABLE_INT("hw.ata.wc", &ata_wc);
114 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0,
115 	   "ATA disk write caching");
116 TUNABLE_INT("hw.ata.setmax", &ata_setmax);
117 SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0,
118 	   "ATA disk set max native address");
119 
120 /*
121  * newbus device interface related functions
122  */
123 int
124 ata_probe(device_t dev)
125 {
126     return 0;
127 }
128 
129 int
130 ata_attach(device_t dev)
131 {
132     struct ata_channel *ch = device_get_softc(dev);
133     int error, rid;
134 #ifdef ATA_CAM
135     struct cam_devq *devq;
136     const char *res;
137     char buf[64];
138     int i, mode;
139 #endif
140 
141     /* check that we have a virgin channel to attach */
142     if (ch->r_irq)
143 	return EEXIST;
144 
145     /* initialize the softc basics */
146     ch->dev = dev;
147     ch->state = ATA_IDLE;
148     bzero(&ch->state_mtx, sizeof(struct mtx));
149     mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
150     bzero(&ch->queue_mtx, sizeof(struct mtx));
151     mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF);
152     TAILQ_INIT(&ch->ata_queue);
153     TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
154 #ifdef ATA_CAM
155 	for (i = 0; i < 16; i++) {
156 		ch->user[i].mode = 0;
157 		snprintf(buf, sizeof(buf), "dev%d.mode", i);
158 		if (resource_string_value(device_get_name(dev),
159 		    device_get_unit(dev), buf, &res) == 0)
160 			mode = ata_str2mode(res);
161 		else if (resource_string_value(device_get_name(dev),
162 		    device_get_unit(dev), "mode", &res) == 0)
163 			mode = ata_str2mode(res);
164 		else
165 			mode = -1;
166 		if (mode >= 0)
167 			ch->user[i].mode = mode;
168 		if (ch->flags & ATA_SATA)
169 			ch->user[i].bytecount = 8192;
170 		else
171 			ch->user[i].bytecount = MAXPHYS;
172 		ch->curr[i] = ch->user[i];
173 	}
174 #endif
175 
176     /* reset the controller HW, the channel and device(s) */
177     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
178 	pause("ataatch", 1);
179 #ifndef ATA_CAM
180     ATA_RESET(dev);
181 #endif
182     ATA_LOCKING(dev, ATA_LF_UNLOCK);
183 
184     /* allocate DMA resources if DMA HW present*/
185     if (ch->dma.alloc)
186 	ch->dma.alloc(dev);
187 
188     /* setup interrupt delivery */
189     rid = ATA_IRQ_RID;
190     ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
191 				       RF_SHAREABLE | RF_ACTIVE);
192     if (!ch->r_irq) {
193 	device_printf(dev, "unable to allocate interrupt\n");
194 	return ENXIO;
195     }
196     if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
197 				ata_interrupt, ch, &ch->ih))) {
198 	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
199 	device_printf(dev, "unable to setup interrupt\n");
200 	return error;
201     }
202 
203 #ifndef ATA_CAM
204     /* probe and attach devices on this channel unless we are in early boot */
205     if (!ata_delayed_attach)
206 	ata_identify(dev);
207     return (0);
208 #else
209 	mtx_lock(&ch->state_mtx);
210 	/* Create the device queue for our SIM. */
211 	devq = cam_simq_alloc(1);
212 	if (devq == NULL) {
213 		device_printf(dev, "Unable to allocate simq\n");
214 		error = ENOMEM;
215 		goto err1;
216 	}
217 	/* Construct SIM entry */
218 	ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
219 	    device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
220 	if (ch->sim == NULL) {
221 		device_printf(dev, "unable to allocate sim\n");
222 		cam_simq_free(devq);
223 		error = ENOMEM;
224 		goto err1;
225 	}
226 	if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
227 		device_printf(dev, "unable to register xpt bus\n");
228 		error = ENXIO;
229 		goto err2;
230 	}
231 	if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
232 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
233 		device_printf(dev, "unable to create path\n");
234 		error = ENXIO;
235 		goto err3;
236 	}
237 	mtx_unlock(&ch->state_mtx);
238 	return (0);
239 
240 err3:
241 	xpt_bus_deregister(cam_sim_path(ch->sim));
242 err2:
243 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
244 	ch->sim = NULL;
245 err1:
246 	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
247 	mtx_unlock(&ch->state_mtx);
248 	return (error);
249 #endif
250 }
251 
252 int
253 ata_detach(device_t dev)
254 {
255     struct ata_channel *ch = device_get_softc(dev);
256 #ifndef ATA_CAM
257     device_t *children;
258     int nchildren, i;
259 #endif
260 
261     /* check that we have a valid channel to detach */
262     if (!ch->r_irq)
263 	return ENXIO;
264 
265     /* grap the channel lock so no new requests gets launched */
266     mtx_lock(&ch->state_mtx);
267     ch->state |= ATA_STALL_QUEUE;
268     mtx_unlock(&ch->state_mtx);
269 
270 #ifndef ATA_CAM
271     /* detach & delete all children */
272     if (!device_get_children(dev, &children, &nchildren)) {
273 	for (i = 0; i < nchildren; i++)
274 	    if (children[i])
275 		device_delete_child(dev, children[i]);
276 	free(children, M_TEMP);
277     }
278 #endif
279     taskqueue_drain(taskqueue_thread, &ch->conntask);
280 
281 #ifdef ATA_CAM
282 	mtx_lock(&ch->state_mtx);
283 	xpt_async(AC_LOST_DEVICE, ch->path, NULL);
284 	xpt_free_path(ch->path);
285 	xpt_bus_deregister(cam_sim_path(ch->sim));
286 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
287 	ch->sim = NULL;
288 	mtx_unlock(&ch->state_mtx);
289 #endif
290 
291     /* release resources */
292     bus_teardown_intr(dev, ch->r_irq, ch->ih);
293     bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
294     ch->r_irq = NULL;
295 
296     /* free DMA resources if DMA HW present*/
297     if (ch->dma.free)
298 	ch->dma.free(dev);
299 
300     mtx_destroy(&ch->state_mtx);
301     mtx_destroy(&ch->queue_mtx);
302     return 0;
303 }
304 
305 static void
306 ata_conn_event(void *context, int dummy)
307 {
308 	device_t dev = (device_t)context;
309 #ifdef ATA_CAM
310 	struct ata_channel *ch = device_get_softc(dev);
311 	union ccb *ccb;
312 
313 	mtx_lock(&ch->state_mtx);
314 	if (ch->sim == NULL) {
315 		mtx_unlock(&ch->state_mtx);
316 		return;
317 	}
318 	ata_reinit(dev);
319 	if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
320 		return;
321 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
322 	    cam_sim_path(ch->sim),
323 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
324 		xpt_free_ccb(ccb);
325 		return;
326 	}
327 	xpt_rescan(ccb);
328 	mtx_unlock(&ch->state_mtx);
329 #else
330 	ata_reinit(dev);
331 #endif
332 }
333 
334 int
335 ata_reinit(device_t dev)
336 {
337     struct ata_channel *ch = device_get_softc(dev);
338     struct ata_request *request;
339 #ifndef ATA_CAM
340     device_t *children;
341     int nchildren, i;
342 
343     /* check that we have a valid channel to reinit */
344     if (!ch || !ch->r_irq)
345 	return ENXIO;
346 
347     if (bootverbose)
348 	device_printf(dev, "reiniting channel ..\n");
349 
350     /* poll for locking the channel */
351     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
352 	pause("atarini", 1);
353 
354     /* catch eventual request in ch->running */
355     mtx_lock(&ch->state_mtx);
356     if (ch->state & ATA_STALL_QUEUE) {
357 	/* Recursive reinits and reinits during detach prohobited. */
358 	mtx_unlock(&ch->state_mtx);
359 	return (ENXIO);
360     }
361     if ((request = ch->running))
362 	callout_stop(&request->callout);
363     ch->running = NULL;
364 
365     /* unconditionally grap the channel lock */
366     ch->state |= ATA_STALL_QUEUE;
367     mtx_unlock(&ch->state_mtx);
368 
369     /* reset the controller HW, the channel and device(s) */
370     ATA_RESET(dev);
371 
372     /* reinit the children and delete any that fails */
373     if (!device_get_children(dev, &children, &nchildren)) {
374 	mtx_lock(&Giant);       /* newbus suckage it needs Giant */
375 	for (i = 0; i < nchildren; i++) {
376 	    /* did any children go missing ? */
377 	    if (children[i] && device_is_attached(children[i]) &&
378 		ATA_REINIT(children[i])) {
379 		/*
380 		 * if we had a running request and its device matches
381 		 * this child we need to inform the request that the
382 		 * device is gone.
383 		 */
384 		if (request && request->dev == children[i]) {
385 		    request->result = ENXIO;
386 		    device_printf(request->dev, "FAILURE - device detached\n");
387 
388 		    /* if not timeout finish request here */
389 		    if (!(request->flags & ATA_R_TIMEOUT))
390 			    ata_finish(request);
391 		    request = NULL;
392 		}
393 		device_delete_child(dev, children[i]);
394 	    }
395 	}
396 	free(children, M_TEMP);
397 	mtx_unlock(&Giant);     /* newbus suckage dealt with, release Giant */
398     }
399 
400     /* if we still have a good request put it on the queue again */
401     if (request && !(request->flags & ATA_R_TIMEOUT)) {
402 	device_printf(request->dev,
403 		      "WARNING - %s requeued due to channel reset",
404 		      ata_cmd2str(request));
405 	if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
406 	    printf(" LBA=%ju", request->u.ata.lba);
407 	printf("\n");
408 	request->flags |= ATA_R_REQUEUE;
409 	ata_queue_request(request);
410     }
411 
412     /* we're done release the channel for new work */
413     mtx_lock(&ch->state_mtx);
414     ch->state = ATA_IDLE;
415     mtx_unlock(&ch->state_mtx);
416     ATA_LOCKING(dev, ATA_LF_UNLOCK);
417 
418     /* Add new children. */
419 /*    ata_identify(dev); */
420 
421     if (bootverbose)
422 	device_printf(dev, "reinit done ..\n");
423 
424     /* kick off requests on the queue */
425     ata_start(dev);
426 #else
427 	xpt_freeze_simq(ch->sim, 1);
428 	if ((request = ch->running)) {
429 		ch->running = NULL;
430 		if (ch->state == ATA_ACTIVE)
431 		    ch->state = ATA_IDLE;
432 		callout_stop(&request->callout);
433 		if (ch->dma.unload)
434 		    ch->dma.unload(request);
435 		request->result = ERESTART;
436 		ata_cam_end_transaction(dev, request);
437 	}
438 	/* reset the controller HW, the channel and device(s) */
439 	ATA_RESET(dev);
440 	/* Tell the XPT about the event */
441 	xpt_async(AC_BUS_RESET, ch->path, NULL);
442 	xpt_release_simq(ch->sim, TRUE);
443 #endif
444 	return(0);
445 }
446 
447 int
448 ata_suspend(device_t dev)
449 {
450     struct ata_channel *ch;
451 
452     /* check for valid device */
453     if (!dev || !(ch = device_get_softc(dev)))
454 	return ENXIO;
455 
456 #ifdef ATA_CAM
457 	mtx_lock(&ch->state_mtx);
458 	xpt_freeze_simq(ch->sim, 1);
459 	while (ch->state != ATA_IDLE)
460 		msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100);
461 	mtx_unlock(&ch->state_mtx);
462 #else
463     /* wait for the channel to be IDLE or detached before suspending */
464     while (ch->r_irq) {
465 	mtx_lock(&ch->state_mtx);
466 	if (ch->state == ATA_IDLE) {
467 	    ch->state = ATA_ACTIVE;
468 	    mtx_unlock(&ch->state_mtx);
469 	    break;
470 	}
471 	mtx_unlock(&ch->state_mtx);
472 	tsleep(ch, PRIBIO, "atasusp", hz/10);
473     }
474     ATA_LOCKING(dev, ATA_LF_UNLOCK);
475 #endif
476     return(0);
477 }
478 
479 int
480 ata_resume(device_t dev)
481 {
482     struct ata_channel *ch;
483     int error;
484 
485     /* check for valid device */
486     if (!dev || !(ch = device_get_softc(dev)))
487 	return ENXIO;
488 
489 #ifdef ATA_CAM
490 	mtx_lock(&ch->state_mtx);
491 	error = ata_reinit(dev);
492 	xpt_release_simq(ch->sim, TRUE);
493 	mtx_unlock(&ch->state_mtx);
494 #else
495     /* reinit the devices, we dont know what mode/state they are in */
496     error = ata_reinit(dev);
497     /* kick off requests on the queue */
498     ata_start(dev);
499 #endif
500     return error;
501 }
502 
503 void
504 ata_interrupt(void *data)
505 {
506 #ifdef ATA_CAM
507     struct ata_channel *ch = (struct ata_channel *)data;
508 
509     mtx_lock(&ch->state_mtx);
510 #endif
511     ata_interrupt_locked(data);
512 #ifdef ATA_CAM
513     mtx_unlock(&ch->state_mtx);
514 #endif
515 }
516 
517 static void
518 ata_interrupt_locked(void *data)
519 {
520     struct ata_channel *ch = (struct ata_channel *)data;
521     struct ata_request *request;
522 
523 #ifndef ATA_CAM
524     mtx_lock(&ch->state_mtx);
525 #endif
526     do {
527 	/* ignore interrupt if its not for us */
528 	if (ch->hw.status && !ch->hw.status(ch->dev))
529 	    break;
530 
531 	/* do we have a running request */
532 	if (!(request = ch->running))
533 	    break;
534 
535 	ATA_DEBUG_RQ(request, "interrupt");
536 
537 	/* safetycheck for the right state */
538 	if (ch->state == ATA_IDLE) {
539 	    device_printf(request->dev, "interrupt on idle channel ignored\n");
540 	    break;
541 	}
542 
543 	/*
544 	 * we have the HW locks, so end the transaction for this request
545 	 * if it finishes immediately otherwise wait for next interrupt
546 	 */
547 	if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
548 	    ch->running = NULL;
549 	    if (ch->state == ATA_ACTIVE)
550 		ch->state = ATA_IDLE;
551 #ifdef ATA_CAM
552 	    ata_cam_end_transaction(ch->dev, request);
553 #else
554 	    mtx_unlock(&ch->state_mtx);
555 	    ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
556 	    ata_finish(request);
557 #endif
558 	    return;
559 	}
560     } while (0);
561 #ifndef ATA_CAM
562     mtx_unlock(&ch->state_mtx);
563 #endif
564 }
565 
566 void
567 ata_print_cable(device_t dev, u_int8_t *who)
568 {
569     device_printf(dev,
570                   "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
571 }
572 
573 int
574 ata_check_80pin(device_t dev, int mode)
575 {
576     struct ata_device *atadev = device_get_softc(dev);
577 
578     if (!ata_dma_check_80pin) {
579         if (bootverbose)
580             device_printf(dev, "Skipping 80pin cable check\n");
581         return mode;
582     }
583 
584     if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) {
585         ata_print_cable(dev, "device");
586         mode = ATA_UDMA2;
587     }
588     return mode;
589 }
590 
591 void
592 ata_setmode(device_t dev)
593 {
594 	struct ata_channel *ch = device_get_softc(device_get_parent(dev));
595 	struct ata_device *atadev = device_get_softc(dev);
596 	int error, mode, pmode;
597 
598 	mode = atadev->mode;
599 	do {
600 		pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX);
601 		mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode);
602 		if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0)
603 			mode = ata_check_80pin(dev, mode);
604 	} while (pmode != mode); /* Interate till successfull negotiation. */
605 	error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
606 	if (bootverbose)
607 	        device_printf(dev, "%ssetting %s\n",
608 		    (error) ? "FAILURE " : "", ata_mode2str(mode));
609 	atadev->mode = mode;
610 }
611 
612 /*
613  * device related interfaces
614  */
615 #ifndef ATA_CAM
616 static int
617 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
618 	  int32_t flag, struct thread *td)
619 {
620     device_t device, *children;
621     struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data;
622     int *value = (int *)data;
623     int i, nchildren, error = ENOTTY;
624 
625     switch (cmd) {
626     case IOCATAGMAXCHANNEL:
627 	/* In case we have channel 0..n this will return n+1. */
628 	*value = devclass_get_maxunit(ata_devclass);
629 	error = 0;
630 	break;
631 
632     case IOCATAREINIT:
633 	if (*value >= devclass_get_maxunit(ata_devclass) ||
634 	    !(device = devclass_get_device(ata_devclass, *value)) ||
635 	    !device_is_attached(device))
636 	    return ENXIO;
637 	error = ata_reinit(device);
638 	break;
639 
640     case IOCATAATTACH:
641 	if (*value >= devclass_get_maxunit(ata_devclass) ||
642 	    !(device = devclass_get_device(ata_devclass, *value)) ||
643 	    !device_is_attached(device))
644 	    return ENXIO;
645 	error = DEVICE_ATTACH(device);
646 	break;
647 
648     case IOCATADETACH:
649 	if (*value >= devclass_get_maxunit(ata_devclass) ||
650 	    !(device = devclass_get_device(ata_devclass, *value)) ||
651 	    !device_is_attached(device))
652 	    return ENXIO;
653 	error = DEVICE_DETACH(device);
654 	break;
655 
656     case IOCATADEVICES:
657 	if (devices->channel >= devclass_get_maxunit(ata_devclass) ||
658 	    !(device = devclass_get_device(ata_devclass, devices->channel)) ||
659 	    !device_is_attached(device))
660 	    return ENXIO;
661 	bzero(devices->name[0], 32);
662 	bzero(&devices->params[0], sizeof(struct ata_params));
663 	bzero(devices->name[1], 32);
664 	bzero(&devices->params[1], sizeof(struct ata_params));
665 	if (!device_get_children(device, &children, &nchildren)) {
666 	    for (i = 0; i < nchildren; i++) {
667 		if (children[i] && device_is_attached(children[i])) {
668 		    struct ata_device *atadev = device_get_softc(children[i]);
669 
670 		    if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */
671 			strncpy(devices->name[0],
672 				device_get_nameunit(children[i]), 32);
673 			bcopy(&atadev->param, &devices->params[0],
674 			      sizeof(struct ata_params));
675 		    }
676 		    if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */
677 			strncpy(devices->name[1],
678 				device_get_nameunit(children[i]), 32);
679 			bcopy(&atadev->param, &devices->params[1],
680 			      sizeof(struct ata_params));
681 		    }
682 		}
683 	    }
684 	    free(children, M_TEMP);
685 	    error = 0;
686 	}
687 	else
688 	    error = ENODEV;
689 	break;
690 
691     default:
692 	if (ata_raid_ioctl_func)
693 	    error = ata_raid_ioctl_func(cmd, data);
694     }
695     return error;
696 }
697 #endif
698 
699 int
700 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
701 {
702     struct ata_device *atadev = device_get_softc(dev);
703     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
704     struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
705     struct ata_params *params = (struct ata_params *)data;
706     int *mode = (int *)data;
707     struct ata_request *request;
708     caddr_t buf;
709     int error;
710 
711     switch (cmd) {
712     case IOCATAREQUEST:
713 	if (ioc_request->count >
714 	    (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) {
715 		return (EFBIG);
716 	}
717 	if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) {
718 	    return ENOMEM;
719 	}
720 	if (!(request = ata_alloc_request())) {
721 	    free(buf, M_ATA);
722 	    return  ENOMEM;
723 	}
724 	request->dev = atadev->dev;
725 	if (ioc_request->flags & ATA_CMD_WRITE) {
726 	    error = copyin(ioc_request->data, buf, ioc_request->count);
727 	    if (error) {
728 		free(buf, M_ATA);
729 		ata_free_request(request);
730 		return error;
731 	    }
732 	}
733 	if (ioc_request->flags & ATA_CMD_ATAPI) {
734 	    request->flags = ATA_R_ATAPI;
735 	    bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
736 	}
737 	else {
738 	    request->u.ata.command = ioc_request->u.ata.command;
739 	    request->u.ata.feature = ioc_request->u.ata.feature;
740 	    request->u.ata.lba = ioc_request->u.ata.lba;
741 	    request->u.ata.count = ioc_request->u.ata.count;
742 	}
743 	request->timeout = ioc_request->timeout;
744 	request->data = buf;
745 	request->bytecount = ioc_request->count;
746 	request->transfersize = request->bytecount;
747 	if (ioc_request->flags & ATA_CMD_CONTROL)
748 	    request->flags |= ATA_R_CONTROL;
749 	if (ioc_request->flags & ATA_CMD_READ)
750 	    request->flags |= ATA_R_READ;
751 	if (ioc_request->flags & ATA_CMD_WRITE)
752 	    request->flags |= ATA_R_WRITE;
753 	ata_queue_request(request);
754 	if (request->flags & ATA_R_ATAPI) {
755 	    bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
756 		  sizeof(struct atapi_sense));
757 	}
758 	else {
759 	    ioc_request->u.ata.command = request->u.ata.command;
760 	    ioc_request->u.ata.feature = request->u.ata.feature;
761 	    ioc_request->u.ata.lba = request->u.ata.lba;
762 	    ioc_request->u.ata.count = request->u.ata.count;
763 	}
764 	ioc_request->error = request->result;
765 	if (ioc_request->flags & ATA_CMD_READ)
766 	    error = copyout(buf, ioc_request->data, ioc_request->count);
767 	else
768 	    error = 0;
769 	free(buf, M_ATA);
770 	ata_free_request(request);
771 	return error;
772 
773     case IOCATAGPARM:
774 	ata_getparam(atadev, 0);
775 	bcopy(&atadev->param, params, sizeof(struct ata_params));
776 	return 0;
777 
778     case IOCATASMODE:
779 	atadev->mode = *mode;
780 	ata_setmode(dev);
781 	return 0;
782 
783     case IOCATAGMODE:
784 	*mode = atadev->mode |
785 	    (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8);
786 	return 0;
787     case IOCATASSPINDOWN:
788 	atadev->spindown = *mode;
789 	return 0;
790     case IOCATAGSPINDOWN:
791 	*mode = atadev->spindown;
792 	return 0;
793     default:
794 	return ENOTTY;
795     }
796 }
797 
798 #ifndef ATA_CAM
799 static void
800 ata_boot_attach(void)
801 {
802     struct ata_channel *ch;
803     int ctlr;
804 
805     mtx_lock(&Giant);       /* newbus suckage it needs Giant */
806 
807     /* kick of probe and attach on all channels */
808     for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
809 	if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
810 	    ata_identify(ch->dev);
811 	}
812     }
813 
814     /* release the hook that got us here, we are only needed once during boot */
815     if (ata_delayed_attach) {
816 	config_intrhook_disestablish(ata_delayed_attach);
817 	free(ata_delayed_attach, M_TEMP);
818 	ata_delayed_attach = NULL;
819     }
820 
821     mtx_unlock(&Giant);     /* newbus suckage dealt with, release Giant */
822 }
823 #endif
824 
825 /*
826  * misc support functions
827  */
828 #ifndef ATA_CAM
829 static device_t
830 ata_add_child(device_t parent, struct ata_device *atadev, int unit)
831 {
832     device_t child;
833 
834     if ((child = device_add_child(parent, NULL, unit))) {
835 	device_set_softc(child, atadev);
836 	device_quiet(child);
837 	atadev->dev = child;
838 	atadev->max_iosize = DEV_BSIZE;
839 	atadev->mode = ATA_PIO_MAX;
840     }
841     return child;
842 }
843 #endif
844 
845 int
846 ata_getparam(struct ata_device *atadev, int init)
847 {
848     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
849     struct ata_request *request;
850     const char *res;
851     char buf[64];
852     u_int8_t command = 0;
853     int error = ENOMEM, retries = 2, mode = -1;
854 
855     if (ch->devices & (ATA_ATA_MASTER << atadev->unit))
856 	command = ATA_ATA_IDENTIFY;
857     if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit))
858 	command = ATA_ATAPI_IDENTIFY;
859     if (!command)
860 	return ENXIO;
861 
862     while (retries-- > 0 && error) {
863 	if (!(request = ata_alloc_request()))
864 	    break;
865 	request->dev = atadev->dev;
866 	request->timeout = 1;
867 	request->retries = 0;
868 	request->u.ata.command = command;
869 	request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT);
870 	if (!bootverbose)
871 	    request->flags |= ATA_R_QUIET;
872 	request->data = (void *)&atadev->param;
873 	request->bytecount = sizeof(struct ata_params);
874 	request->donecount = 0;
875 	request->transfersize = DEV_BSIZE;
876 	ata_queue_request(request);
877 	error = request->result;
878 	ata_free_request(request);
879     }
880 
881     if (!error && (isprint(atadev->param.model[0]) ||
882 		   isprint(atadev->param.model[1]))) {
883 	struct ata_params *atacap = &atadev->param;
884 	int16_t *ptr;
885 
886 	for (ptr = (int16_t *)atacap;
887 	     ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
888 	    *ptr = le16toh(*ptr);
889 	}
890 	if (!(!strncmp(atacap->model, "FX", 2) ||
891 	      !strncmp(atacap->model, "NEC", 3) ||
892 	      !strncmp(atacap->model, "Pioneer", 7) ||
893 	      !strncmp(atacap->model, "SHARP", 5))) {
894 	    bswap(atacap->model, sizeof(atacap->model));
895 	    bswap(atacap->revision, sizeof(atacap->revision));
896 	    bswap(atacap->serial, sizeof(atacap->serial));
897 	}
898 	btrim(atacap->model, sizeof(atacap->model));
899 	bpack(atacap->model, atacap->model, sizeof(atacap->model));
900 	btrim(atacap->revision, sizeof(atacap->revision));
901 	bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
902 	btrim(atacap->serial, sizeof(atacap->serial));
903 	bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
904 
905 	if (bootverbose)
906 	    printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
907 		   device_get_unit(ch->dev),
908 		   ata_unit2str(atadev),
909 		   ata_mode2str(ata_pmode(atacap)),
910 		   ata_mode2str(ata_wmode(atacap)),
911 		   ata_mode2str(ata_umode(atacap)),
912 		   (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
913 
914 	if (init) {
915 	    char buffer[64];
916 
917 	    sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
918 	    device_set_desc_copy(atadev->dev, buffer);
919 	    if ((atadev->param.config & ATA_PROTO_ATAPI) &&
920 		(atadev->param.config != ATA_CFA_MAGIC1) &&
921 		(atadev->param.config != ATA_CFA_MAGIC2)) {
922 		if (atapi_dma &&
923 		    (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
924 		    ata_umode(&atadev->param) >= ATA_UDMA2)
925 		    atadev->mode = ATA_DMA_MAX;
926 	    }
927 	    else {
928 		if (ata_dma &&
929 		    (ata_umode(&atadev->param) > 0 ||
930 		     ata_wmode(&atadev->param) > 0))
931 		    atadev->mode = ATA_DMA_MAX;
932 	    }
933 	    snprintf(buf, sizeof(buf), "dev%d.mode", atadev->unit);
934 	    if (resource_string_value(device_get_name(ch->dev),
935 	        device_get_unit(ch->dev), buf, &res) == 0)
936 		    mode = ata_str2mode(res);
937 	    else if (resource_string_value(device_get_name(ch->dev),
938 		device_get_unit(ch->dev), "mode", &res) == 0)
939 		    mode = ata_str2mode(res);
940 	    if (mode >= 0)
941 		    atadev->mode = mode;
942 	}
943     }
944     else {
945 	if (!error)
946 	    error = ENXIO;
947     }
948     return error;
949 }
950 
951 #ifndef ATA_CAM
952 int
953 ata_identify(device_t dev)
954 {
955     struct ata_channel *ch = device_get_softc(dev);
956     struct ata_device *atadev;
957     device_t *children;
958     device_t child, master = NULL;
959     int nchildren, i, n = ch->devices;
960 
961     if (bootverbose)
962 	device_printf(dev, "Identifying devices: %08x\n", ch->devices);
963 
964     mtx_lock(&Giant);
965     /* Skip existing devices. */
966     if (!device_get_children(dev, &children, &nchildren)) {
967 	for (i = 0; i < nchildren; i++) {
968 	    if (children[i] && (atadev = device_get_softc(children[i])))
969 		n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit);
970 	}
971 	free(children, M_TEMP);
972     }
973     /* Create new devices. */
974     if (bootverbose)
975 	device_printf(dev, "New devices: %08x\n", n);
976     if (n == 0) {
977 	mtx_unlock(&Giant);
978 	return (0);
979     }
980     for (i = 0; i < ATA_PM; ++i) {
981 	if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) {
982 	    int unit = -1;
983 
984 	    if (!(atadev = malloc(sizeof(struct ata_device),
985 				  M_ATA, M_NOWAIT | M_ZERO))) {
986 		device_printf(dev, "out of memory\n");
987 		return ENOMEM;
988 	    }
989 	    atadev->unit = i;
990 #ifdef ATA_STATIC_ID
991 	    if (n & (ATA_ATA_MASTER << i))
992 		unit = (device_get_unit(dev) << 1) + i;
993 #endif
994 	    if ((child = ata_add_child(dev, atadev, unit))) {
995 		/*
996 		 * PATA slave should be identified first, to allow
997 		 * device cable detection on master to work properly.
998 		 */
999 		if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 &&
1000 			(n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) {
1001 		    master = child;
1002 		    continue;
1003 		}
1004 		if (ata_getparam(atadev, 1)) {
1005 		    device_delete_child(dev, child);
1006 		    free(atadev, M_ATA);
1007 		}
1008 	    }
1009 	    else
1010 		free(atadev, M_ATA);
1011 	}
1012     }
1013     if (master) {
1014 	atadev = device_get_softc(master);
1015 	if (ata_getparam(atadev, 1)) {
1016 	    device_delete_child(dev, master);
1017 	    free(atadev, M_ATA);
1018 	}
1019     }
1020     bus_generic_probe(dev);
1021     bus_generic_attach(dev);
1022     mtx_unlock(&Giant);
1023     return 0;
1024 }
1025 #endif
1026 
1027 void
1028 ata_default_registers(device_t dev)
1029 {
1030     struct ata_channel *ch = device_get_softc(dev);
1031 
1032     /* fill in the defaults from whats setup already */
1033     ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
1034     ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
1035     ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
1036     ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
1037     ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
1038     ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
1039     ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
1040     ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
1041 }
1042 
1043 void
1044 ata_modify_if_48bit(struct ata_request *request)
1045 {
1046     struct ata_channel *ch = device_get_softc(request->parent);
1047     struct ata_device *atadev = device_get_softc(request->dev);
1048 
1049     request->flags &= ~ATA_R_48BIT;
1050 
1051     if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA ||
1052 	 request->u.ata.count > 256) &&
1053 	atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1054 
1055 	/* translate command into 48bit version */
1056 	switch (request->u.ata.command) {
1057 	case ATA_READ:
1058 	    request->u.ata.command = ATA_READ48;
1059 	    break;
1060 	case ATA_READ_MUL:
1061 	    request->u.ata.command = ATA_READ_MUL48;
1062 	    break;
1063 	case ATA_READ_DMA:
1064 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1065 		if (request->transfersize > DEV_BSIZE)
1066 		    request->u.ata.command = ATA_READ_MUL48;
1067 		else
1068 		    request->u.ata.command = ATA_READ48;
1069 		request->flags &= ~ATA_R_DMA;
1070 	    }
1071 	    else
1072 		request->u.ata.command = ATA_READ_DMA48;
1073 	    break;
1074 	case ATA_READ_DMA_QUEUED:
1075 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1076 		if (request->transfersize > DEV_BSIZE)
1077 		    request->u.ata.command = ATA_READ_MUL48;
1078 		else
1079 		    request->u.ata.command = ATA_READ48;
1080 		request->flags &= ~ATA_R_DMA;
1081 	    }
1082 	    else
1083 		request->u.ata.command = ATA_READ_DMA_QUEUED48;
1084 	    break;
1085 	case ATA_WRITE:
1086 	    request->u.ata.command = ATA_WRITE48;
1087 	    break;
1088 	case ATA_WRITE_MUL:
1089 	    request->u.ata.command = ATA_WRITE_MUL48;
1090 	    break;
1091 	case ATA_WRITE_DMA:
1092 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1093 		if (request->transfersize > DEV_BSIZE)
1094 		    request->u.ata.command = ATA_WRITE_MUL48;
1095 		else
1096 		    request->u.ata.command = ATA_WRITE48;
1097 		request->flags &= ~ATA_R_DMA;
1098 	    }
1099 	    else
1100 		request->u.ata.command = ATA_WRITE_DMA48;
1101 	    break;
1102 	case ATA_WRITE_DMA_QUEUED:
1103 	    if (ch->flags & ATA_NO_48BIT_DMA) {
1104 		if (request->transfersize > DEV_BSIZE)
1105 		    request->u.ata.command = ATA_WRITE_MUL48;
1106 		else
1107 		    request->u.ata.command = ATA_WRITE48;
1108 		request->u.ata.command = ATA_WRITE48;
1109 		request->flags &= ~ATA_R_DMA;
1110 	    }
1111 	    else
1112 		request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
1113 	    break;
1114 	case ATA_FLUSHCACHE:
1115 	    request->u.ata.command = ATA_FLUSHCACHE48;
1116 	    break;
1117 	case ATA_SET_MAX_ADDRESS:
1118 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1119 	    break;
1120 	default:
1121 	    return;
1122 	}
1123 	request->flags |= ATA_R_48BIT;
1124     }
1125     else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1126 
1127 	/* translate command into 48bit version */
1128 	switch (request->u.ata.command) {
1129 	case ATA_FLUSHCACHE:
1130 	    request->u.ata.command = ATA_FLUSHCACHE48;
1131 	    break;
1132 	case ATA_READ_NATIVE_MAX_ADDRESS:
1133 	    request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48;
1134 	    break;
1135 	case ATA_SET_MAX_ADDRESS:
1136 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1137 	    break;
1138 	default:
1139 	    return;
1140 	}
1141 	request->flags |= ATA_R_48BIT;
1142     }
1143 }
1144 
1145 void
1146 ata_udelay(int interval)
1147 {
1148     /* for now just use DELAY, the timer/sleep subsytems are not there yet */
1149     if (1 || interval < (1000000/hz) || ata_delayed_attach)
1150 	DELAY(interval);
1151     else
1152 	pause("ataslp", interval/(1000000/hz));
1153 }
1154 
1155 char *
1156 ata_unit2str(struct ata_device *atadev)
1157 {
1158     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
1159     static char str[8];
1160 
1161     if (ch->devices & ATA_PORTMULTIPLIER)
1162 	sprintf(str, "port%d", atadev->unit);
1163     else
1164 	sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave");
1165     return str;
1166 }
1167 
1168 const char *
1169 ata_mode2str(int mode)
1170 {
1171     switch (mode) {
1172     case -1: return "UNSUPPORTED";
1173     case ATA_PIO0: return "PIO0";
1174     case ATA_PIO1: return "PIO1";
1175     case ATA_PIO2: return "PIO2";
1176     case ATA_PIO3: return "PIO3";
1177     case ATA_PIO4: return "PIO4";
1178     case ATA_WDMA0: return "WDMA0";
1179     case ATA_WDMA1: return "WDMA1";
1180     case ATA_WDMA2: return "WDMA2";
1181     case ATA_UDMA0: return "UDMA16";
1182     case ATA_UDMA1: return "UDMA25";
1183     case ATA_UDMA2: return "UDMA33";
1184     case ATA_UDMA3: return "UDMA40";
1185     case ATA_UDMA4: return "UDMA66";
1186     case ATA_UDMA5: return "UDMA100";
1187     case ATA_UDMA6: return "UDMA133";
1188     case ATA_SA150: return "SATA150";
1189     case ATA_SA300: return "SATA300";
1190     default:
1191 	if (mode & ATA_DMA_MASK)
1192 	    return "BIOSDMA";
1193 	else
1194 	    return "BIOSPIO";
1195     }
1196 }
1197 
1198 int
1199 ata_str2mode(const char *str)
1200 {
1201 
1202 	if (!strcasecmp(str, "PIO0")) return (ATA_PIO0);
1203 	if (!strcasecmp(str, "PIO1")) return (ATA_PIO1);
1204 	if (!strcasecmp(str, "PIO2")) return (ATA_PIO2);
1205 	if (!strcasecmp(str, "PIO3")) return (ATA_PIO3);
1206 	if (!strcasecmp(str, "PIO4")) return (ATA_PIO4);
1207 	if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0);
1208 	if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1);
1209 	if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2);
1210 	if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0);
1211 	if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0);
1212 	if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1);
1213 	if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1);
1214 	if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2);
1215 	if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2);
1216 	if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3);
1217 	if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3);
1218 	if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4);
1219 	if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4);
1220 	if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5);
1221 	if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5);
1222 	if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6);
1223 	if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6);
1224 	return (-1);
1225 }
1226 
1227 const char *
1228 ata_satarev2str(int rev)
1229 {
1230 	switch (rev) {
1231 	case 0: return "";
1232 	case 1: return "SATA 1.5Gb/s";
1233 	case 2: return "SATA 3Gb/s";
1234 	case 3: return "SATA 6Gb/s";
1235 	case 0xff: return "SATA";
1236 	default: return "???";
1237 	}
1238 }
1239 
1240 int
1241 ata_atapi(device_t dev, int target)
1242 {
1243     struct ata_channel *ch = device_get_softc(dev);
1244 
1245     return (ch->devices & (ATA_ATAPI_MASTER << target));
1246 }
1247 
1248 int
1249 ata_pmode(struct ata_params *ap)
1250 {
1251     if (ap->atavalid & ATA_FLAG_64_70) {
1252 	if (ap->apiomodes & 0x02)
1253 	    return ATA_PIO4;
1254 	if (ap->apiomodes & 0x01)
1255 	    return ATA_PIO3;
1256     }
1257     if (ap->mwdmamodes & 0x04)
1258 	return ATA_PIO4;
1259     if (ap->mwdmamodes & 0x02)
1260 	return ATA_PIO3;
1261     if (ap->mwdmamodes & 0x01)
1262 	return ATA_PIO2;
1263     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
1264 	return ATA_PIO2;
1265     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
1266 	return ATA_PIO1;
1267     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
1268 	return ATA_PIO0;
1269     return ATA_PIO0;
1270 }
1271 
1272 int
1273 ata_wmode(struct ata_params *ap)
1274 {
1275     if (ap->mwdmamodes & 0x04)
1276 	return ATA_WDMA2;
1277     if (ap->mwdmamodes & 0x02)
1278 	return ATA_WDMA1;
1279     if (ap->mwdmamodes & 0x01)
1280 	return ATA_WDMA0;
1281     return -1;
1282 }
1283 
1284 int
1285 ata_umode(struct ata_params *ap)
1286 {
1287     if (ap->atavalid & ATA_FLAG_88) {
1288 	if (ap->udmamodes & 0x40)
1289 	    return ATA_UDMA6;
1290 	if (ap->udmamodes & 0x20)
1291 	    return ATA_UDMA5;
1292 	if (ap->udmamodes & 0x10)
1293 	    return ATA_UDMA4;
1294 	if (ap->udmamodes & 0x08)
1295 	    return ATA_UDMA3;
1296 	if (ap->udmamodes & 0x04)
1297 	    return ATA_UDMA2;
1298 	if (ap->udmamodes & 0x02)
1299 	    return ATA_UDMA1;
1300 	if (ap->udmamodes & 0x01)
1301 	    return ATA_UDMA0;
1302     }
1303     return -1;
1304 }
1305 
1306 int
1307 ata_limit_mode(device_t dev, int mode, int maxmode)
1308 {
1309     struct ata_device *atadev = device_get_softc(dev);
1310 
1311     if (maxmode && mode > maxmode)
1312 	mode = maxmode;
1313 
1314     if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
1315 	return min(mode, ata_umode(&atadev->param));
1316 
1317     if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
1318 	return min(mode, ata_wmode(&atadev->param));
1319 
1320     if (mode > ata_pmode(&atadev->param))
1321 	return min(mode, ata_pmode(&atadev->param));
1322 
1323     return mode;
1324 }
1325 
1326 static void
1327 bswap(int8_t *buf, int len)
1328 {
1329     u_int16_t *ptr = (u_int16_t*)(buf + len);
1330 
1331     while (--ptr >= (u_int16_t*)buf)
1332 	*ptr = ntohs(*ptr);
1333 }
1334 
1335 static void
1336 btrim(int8_t *buf, int len)
1337 {
1338     int8_t *ptr;
1339 
1340     for (ptr = buf; ptr < buf+len; ++ptr)
1341 	if (!*ptr || *ptr == '_')
1342 	    *ptr = ' ';
1343     for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
1344 	*ptr = 0;
1345 }
1346 
1347 static void
1348 bpack(int8_t *src, int8_t *dst, int len)
1349 {
1350     int i, j, blank;
1351 
1352     for (i = j = blank = 0 ; i < len; i++) {
1353 	if (blank && src[i] == ' ') continue;
1354 	if (blank && src[i] != ' ') {
1355 	    dst[j++] = src[i];
1356 	    blank = 0;
1357 	    continue;
1358 	}
1359 	if (src[i] == ' ') {
1360 	    blank = 1;
1361 	    if (i == 0)
1362 		continue;
1363 	}
1364 	dst[j++] = src[i];
1365     }
1366     if (j < len)
1367 	dst[j] = 0x00;
1368 }
1369 
1370 #ifdef ATA_CAM
1371 void
1372 ata_cam_begin_transaction(device_t dev, union ccb *ccb)
1373 {
1374 	struct ata_channel *ch = device_get_softc(dev);
1375 	struct ata_request *request;
1376 
1377 	if (!(request = ata_alloc_request())) {
1378 		device_printf(dev, "FAILURE - out of memory in start\n");
1379 		ccb->ccb_h.status = CAM_REQ_INVALID;
1380 		xpt_done(ccb);
1381 		return;
1382 	}
1383 	bzero(request, sizeof(*request));
1384 
1385 	/* setup request */
1386 	request->dev = NULL;
1387 	request->parent = dev;
1388 	request->unit = ccb->ccb_h.target_id;
1389 	if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1390 		request->data = ccb->ataio.data_ptr;
1391 		request->bytecount = ccb->ataio.dxfer_len;
1392 		request->u.ata.command = ccb->ataio.cmd.command;
1393 		request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
1394 					  (uint16_t)ccb->ataio.cmd.features;
1395 		request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
1396 					(uint16_t)ccb->ataio.cmd.sector_count;
1397 		if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
1398 			request->flags |= ATA_R_48BIT;
1399 			request->u.ata.lba =
1400 				     ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
1401 				     ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
1402 				     ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
1403 		} else {
1404 			request->u.ata.lba =
1405 				     ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
1406 		}
1407 		request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
1408 				      ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
1409 				       (uint64_t)ccb->ataio.cmd.lba_low;
1410 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1411 		    ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
1412 			request->flags |= ATA_R_DMA;
1413 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1414 			request->flags |= ATA_R_READ;
1415 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1416 			request->flags |= ATA_R_WRITE;
1417 	} else {
1418 		request->data = ccb->csio.data_ptr;
1419 		request->bytecount = ccb->csio.dxfer_len;
1420 		bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1421 		    ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
1422 		    request->u.atapi.ccb, ccb->csio.cdb_len);
1423 		request->flags |= ATA_R_ATAPI;
1424 		if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1425 			request->flags |= ATA_R_ATAPI16;
1426 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1427 		    ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1428 			request->flags |= ATA_R_DMA;
1429 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1430 			request->flags |= ATA_R_READ;
1431 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1432 			request->flags |= ATA_R_WRITE;
1433 	}
1434 	request->transfersize = min(request->bytecount,
1435 	    ch->curr[ccb->ccb_h.target_id].bytecount);
1436 	request->retries = 0;
1437 	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1438 	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1439 	request->ccb = ccb;
1440 
1441 	ch->running = request;
1442 	ch->state = ATA_ACTIVE;
1443 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1444 	    ch->running = NULL;
1445 	    ch->state = ATA_IDLE;
1446 	    ata_cam_end_transaction(dev, request);
1447 	    return;
1448 	}
1449 }
1450 
1451 void
1452 ata_cam_end_transaction(device_t dev, struct ata_request *request)
1453 {
1454 	struct ata_channel *ch = device_get_softc(dev);
1455 	union ccb *ccb = request->ccb;
1456 	int fatalerr = 0;
1457 
1458 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1459 	if (request->flags & ATA_R_TIMEOUT) {
1460 		xpt_freeze_simq(ch->sim, 1);
1461 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1462 		ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
1463 		fatalerr = 1;
1464 	} else if (request->status & ATA_S_ERROR) {
1465 		if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1466 			ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1467 		} else {
1468 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1469 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1470 		}
1471 	} else if (request->result == ERESTART)
1472 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1473 	else if (request->result != 0)
1474 		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1475 	else
1476 		ccb->ccb_h.status |= CAM_REQ_CMP;
1477 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
1478 	    !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1479 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1480 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1481 	}
1482 	if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1483 	    ((request->status & ATA_S_ERROR) ||
1484 	    (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
1485 		struct ata_res *res = &ccb->ataio.res;
1486 		res->status = request->status;
1487 		res->error = request->error;
1488 		res->lba_low = request->u.ata.lba;
1489 		res->lba_mid = request->u.ata.lba >> 8;
1490 		res->lba_high = request->u.ata.lba >> 16;
1491 		res->device = request->u.ata.lba >> 24;
1492 		res->lba_low_exp = request->u.ata.lba >> 24;
1493 		res->lba_mid_exp = request->u.ata.lba >> 32;
1494 		res->lba_high_exp = request->u.ata.lba >> 40;
1495 		res->sector_count = request->u.ata.count;
1496 		res->sector_count_exp = request->u.ata.count >> 8;
1497 	}
1498 	ata_free_request(request);
1499 	xpt_done(ccb);
1500 	/* Do error recovery if needed. */
1501 	if (fatalerr)
1502 		ata_reinit(dev);
1503 }
1504 
1505 static int
1506 ata_check_ids(device_t dev, union ccb *ccb)
1507 {
1508 	struct ata_channel *ch = device_get_softc(dev);
1509 
1510 	if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) {
1511 		ccb->ccb_h.status = CAM_TID_INVALID;
1512 		xpt_done(ccb);
1513 		return (-1);
1514 	}
1515 	if (ccb->ccb_h.target_lun != 0) {
1516 		ccb->ccb_h.status = CAM_LUN_INVALID;
1517 		xpt_done(ccb);
1518 		return (-1);
1519 	}
1520 	return (0);
1521 }
1522 
1523 static void
1524 ataaction(struct cam_sim *sim, union ccb *ccb)
1525 {
1526 	device_t dev;
1527 	struct ata_channel *ch;
1528 
1529 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
1530 	    ccb->ccb_h.func_code));
1531 
1532 	ch = (struct ata_channel *)cam_sim_softc(sim);
1533 	dev = ch->dev;
1534 	switch (ccb->ccb_h.func_code) {
1535 	/* Common cases first */
1536 	case XPT_ATA_IO:	/* Execute the requested I/O operation */
1537 	case XPT_SCSI_IO:
1538 		if (ata_check_ids(dev, ccb))
1539 			return;
1540 		if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
1541 		    << ccb->ccb_h.target_id)) == 0) {
1542 			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1543 			break;
1544 		}
1545 		if (ch->running)
1546 			device_printf(dev, "already running!\n");
1547 		if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1548 		    (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1549 		    (ccb->ataio.cmd.control & ATA_A_RESET)) {
1550 			struct ata_res *res = &ccb->ataio.res;
1551 
1552 			bzero(res, sizeof(*res));
1553 			if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
1554 				res->lba_high = 0;
1555 				res->lba_mid = 0;
1556 			} else {
1557 				res->lba_high = 0xeb;
1558 				res->lba_mid = 0x14;
1559 			}
1560 			ccb->ccb_h.status = CAM_REQ_CMP;
1561 			break;
1562 		}
1563 		ata_cam_begin_transaction(dev, ccb);
1564 		return;
1565 	case XPT_EN_LUN:		/* Enable LUN as a target */
1566 	case XPT_TARGET_IO:		/* Execute target I/O request */
1567 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
1568 	case XPT_CONT_TARGET_IO:	/* Continue Host Target I/O Connection*/
1569 	case XPT_ABORT:			/* Abort the specified CCB */
1570 		/* XXX Implement */
1571 		ccb->ccb_h.status = CAM_REQ_INVALID;
1572 		break;
1573 	case XPT_SET_TRAN_SETTINGS:
1574 	{
1575 		struct	ccb_trans_settings *cts = &ccb->cts;
1576 		struct	ata_cam_device *d;
1577 
1578 		if (ata_check_ids(dev, ccb))
1579 			return;
1580 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1581 			d = &ch->curr[ccb->ccb_h.target_id];
1582 		else
1583 			d = &ch->user[ccb->ccb_h.target_id];
1584 		if (ch->flags & ATA_SATA) {
1585 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
1586 				d->revision = cts->xport_specific.sata.revision;
1587 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) {
1588 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1589 					d->mode = ATA_SETMODE(ch->dev,
1590 					    ccb->ccb_h.target_id,
1591 					    cts->xport_specific.sata.mode);
1592 				} else
1593 					d->mode = cts->xport_specific.sata.mode;
1594 			}
1595 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
1596 				d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
1597 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
1598 				d->atapi = cts->xport_specific.sata.atapi;
1599 		} else {
1600 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
1601 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1602 					d->mode = ATA_SETMODE(ch->dev,
1603 					    ccb->ccb_h.target_id,
1604 					    cts->xport_specific.ata.mode);
1605 				} else
1606 					d->mode = cts->xport_specific.ata.mode;
1607 			}
1608 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
1609 				d->bytecount = cts->xport_specific.ata.bytecount;
1610 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI)
1611 				d->atapi = cts->xport_specific.ata.atapi;
1612 		}
1613 		ccb->ccb_h.status = CAM_REQ_CMP;
1614 		break;
1615 	}
1616 	case XPT_GET_TRAN_SETTINGS:
1617 	{
1618 		struct	ccb_trans_settings *cts = &ccb->cts;
1619 		struct  ata_cam_device *d;
1620 
1621 		if (ata_check_ids(dev, ccb))
1622 			return;
1623 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1624 			d = &ch->curr[ccb->ccb_h.target_id];
1625 		else
1626 			d = &ch->user[ccb->ccb_h.target_id];
1627 		cts->protocol = PROTO_ATA;
1628 		cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1629 		if (ch->flags & ATA_SATA) {
1630 			cts->transport = XPORT_SATA;
1631 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1632 			cts->xport_specific.sata.valid = 0;
1633 			cts->xport_specific.sata.mode = d->mode;
1634 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1635 			cts->xport_specific.sata.bytecount = d->bytecount;
1636 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1637 			if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1638 				cts->xport_specific.sata.revision =
1639 				    ATA_GETREV(dev, ccb->ccb_h.target_id);
1640 				if (cts->xport_specific.sata.revision != 0xff) {
1641 					cts->xport_specific.sata.valid |=
1642 					    CTS_SATA_VALID_REVISION;
1643 				}
1644 			} else {
1645 				cts->xport_specific.sata.revision = d->revision;
1646 				cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1647 			}
1648 			cts->xport_specific.sata.atapi = d->atapi;
1649 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
1650 		} else {
1651 			cts->transport = XPORT_ATA;
1652 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1653 			cts->xport_specific.ata.valid = 0;
1654 			cts->xport_specific.ata.mode = d->mode;
1655 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1656 			cts->xport_specific.ata.bytecount = d->bytecount;
1657 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1658 			cts->xport_specific.ata.atapi = d->atapi;
1659 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI;
1660 		}
1661 		ccb->ccb_h.status = CAM_REQ_CMP;
1662 		break;
1663 	}
1664 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
1665 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
1666 		ata_reinit(dev);
1667 		ccb->ccb_h.status = CAM_REQ_CMP;
1668 		break;
1669 	case XPT_TERM_IO:		/* Terminate the I/O process */
1670 		/* XXX Implement */
1671 		ccb->ccb_h.status = CAM_REQ_INVALID;
1672 		break;
1673 	case XPT_PATH_INQ:		/* Path routing inquiry */
1674 	{
1675 		struct ccb_pathinq *cpi = &ccb->cpi;
1676 
1677 		cpi->version_num = 1; /* XXX??? */
1678 		cpi->hba_inquiry = PI_SDTR_ABLE;
1679 		cpi->target_sprt = 0;
1680 		cpi->hba_misc = PIM_SEQSCAN;
1681 		cpi->hba_eng_cnt = 0;
1682 		if (ch->flags & ATA_NO_SLAVE)
1683 			cpi->max_target = 0;
1684 		else
1685 			cpi->max_target = 1;
1686 		cpi->max_lun = 0;
1687 		cpi->initiator_id = 0;
1688 		cpi->bus_id = cam_sim_bus(sim);
1689 		if (ch->flags & ATA_SATA)
1690 			cpi->base_transfer_speed = 150000;
1691 		else
1692 			cpi->base_transfer_speed = 3300;
1693 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1694 		strncpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1695 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1696 		cpi->unit_number = cam_sim_unit(sim);
1697 		if (ch->flags & ATA_SATA)
1698 			cpi->transport = XPORT_SATA;
1699 		else
1700 			cpi->transport = XPORT_ATA;
1701 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1702 		cpi->protocol = PROTO_ATA;
1703 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1704 		cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1705 		cpi->ccb_h.status = CAM_REQ_CMP;
1706 		break;
1707 	}
1708 	default:
1709 		ccb->ccb_h.status = CAM_REQ_INVALID;
1710 		break;
1711 	}
1712 	xpt_done(ccb);
1713 }
1714 
1715 static void
1716 atapoll(struct cam_sim *sim)
1717 {
1718 	struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1719 
1720 	ata_interrupt_locked(ch);
1721 }
1722 #endif
1723 
1724 /*
1725  * module handeling
1726  */
1727 static int
1728 ata_module_event_handler(module_t mod, int what, void *arg)
1729 {
1730 #ifndef ATA_CAM
1731     static struct cdev *atacdev;
1732 #endif
1733 
1734     switch (what) {
1735     case MOD_LOAD:
1736 #ifndef ATA_CAM
1737 	/* register controlling device */
1738 	atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1739 
1740 	if (cold) {
1741 	    /* register boot attach to be run when interrupts are enabled */
1742 	    if (!(ata_delayed_attach = (struct intr_config_hook *)
1743 				       malloc(sizeof(struct intr_config_hook),
1744 					      M_TEMP, M_NOWAIT | M_ZERO))) {
1745 		printf("ata: malloc of delayed attach hook failed\n");
1746 		return EIO;
1747 	    }
1748 	    ata_delayed_attach->ich_func = (void*)ata_boot_attach;
1749 	    if (config_intrhook_establish(ata_delayed_attach) != 0) {
1750 		printf("ata: config_intrhook_establish failed\n");
1751 		free(ata_delayed_attach, M_TEMP);
1752 	    }
1753 	}
1754 #endif
1755 	return 0;
1756 
1757     case MOD_UNLOAD:
1758 #ifndef ATA_CAM
1759 	/* deregister controlling device */
1760 	destroy_dev(atacdev);
1761 #endif
1762 	return 0;
1763 
1764     default:
1765 	return EOPNOTSUPP;
1766     }
1767 }
1768 
1769 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1770 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1771 MODULE_VERSION(ata, 1);
1772 #ifdef ATA_CAM
1773 MODULE_DEPEND(ata, cam, 1, 1, 1);
1774 #endif
1775 
1776 static void
1777 ata_init(void)
1778 {
1779     ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request),
1780 				   NULL, NULL, NULL, NULL, 0, 0);
1781     ata_composite_zone = uma_zcreate("ata_composite",
1782 				     sizeof(struct ata_composite),
1783 				     NULL, NULL, NULL, NULL, 0, 0);
1784 }
1785 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1786 
1787 static void
1788 ata_uninit(void)
1789 {
1790     uma_zdestroy(ata_composite_zone);
1791     uma_zdestroy(ata_request_zone);
1792 }
1793 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);
1794