xref: /freebsd/sys/dev/ata/ata-all.c (revision a18eacbefdfa1085ca3db829e86ece78cd416493)
1 /*-
2  * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/ata.h>
33 #include <sys/kernel.h>
34 #include <sys/module.h>
35 #include <sys/endian.h>
36 #include <sys/ctype.h>
37 #include <sys/conf.h>
38 #include <sys/bus.h>
39 #include <sys/bio.h>
40 #include <sys/malloc.h>
41 #include <sys/sysctl.h>
42 #include <sys/sema.h>
43 #include <sys/taskqueue.h>
44 #include <vm/uma.h>
45 #include <machine/stdarg.h>
46 #include <machine/resource.h>
47 #include <machine/bus.h>
48 #include <sys/rman.h>
49 #include <dev/ata/ata-all.h>
50 #include <dev/pci/pcivar.h>
51 #include <ata_if.h>
52 
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_sim.h>
56 #include <cam/cam_xpt_sim.h>
57 #include <cam/cam_debug.h>
58 
59 /* prototypes */
60 static void ataaction(struct cam_sim *sim, union ccb *ccb);
61 static void atapoll(struct cam_sim *sim);
62 static void ata_cam_begin_transaction(device_t dev, union ccb *ccb);
63 static void ata_cam_end_transaction(device_t dev, struct ata_request *request);
64 static void ata_cam_request_sense(device_t dev, struct ata_request *request);
65 static int ata_check_ids(device_t dev, union ccb *ccb);
66 static void ata_conn_event(void *context, int dummy);
67 static void ata_init(void);
68 static void ata_interrupt_locked(void *data);
69 static int ata_module_event_handler(module_t mod, int what, void *arg);
70 static void ata_periodic_poll(void *data);
71 static int ata_str2mode(const char *str);
72 static void ata_uninit(void);
73 
74 /* global vars */
75 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
76 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
77 devclass_t ata_devclass;
78 uma_zone_t ata_request_zone;
79 int ata_dma_check_80pin = 1;
80 
81 /* sysctl vars */
82 static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
83 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
84 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
85 	   CTLFLAG_RW, &ata_dma_check_80pin, 1,
86 	   "Check for 80pin cable before setting ATA DMA mode");
87 FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver");
88 
89 /*
90  * newbus device interface related functions
91  */
92 int
93 ata_probe(device_t dev)
94 {
95     return (BUS_PROBE_DEFAULT);
96 }
97 
98 int
99 ata_attach(device_t dev)
100 {
101     struct ata_channel *ch = device_get_softc(dev);
102     int error, rid;
103     struct cam_devq *devq;
104     const char *res;
105     char buf[64];
106     int i, mode;
107 
108     /* check that we have a virgin channel to attach */
109     if (ch->r_irq)
110 	return EEXIST;
111 
112     /* initialize the softc basics */
113     ch->dev = dev;
114     ch->state = ATA_IDLE;
115     bzero(&ch->state_mtx, sizeof(struct mtx));
116     mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
117     TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
118 	for (i = 0; i < 16; i++) {
119 		ch->user[i].revision = 0;
120 		snprintf(buf, sizeof(buf), "dev%d.sata_rev", i);
121 		if (resource_int_value(device_get_name(dev),
122 		    device_get_unit(dev), buf, &mode) != 0 &&
123 		    resource_int_value(device_get_name(dev),
124 		    device_get_unit(dev), "sata_rev", &mode) != 0)
125 			mode = -1;
126 		if (mode >= 0)
127 			ch->user[i].revision = mode;
128 		ch->user[i].mode = 0;
129 		snprintf(buf, sizeof(buf), "dev%d.mode", i);
130 		if (resource_string_value(device_get_name(dev),
131 		    device_get_unit(dev), buf, &res) == 0)
132 			mode = ata_str2mode(res);
133 		else if (resource_string_value(device_get_name(dev),
134 		    device_get_unit(dev), "mode", &res) == 0)
135 			mode = ata_str2mode(res);
136 		else
137 			mode = -1;
138 		if (mode >= 0)
139 			ch->user[i].mode = mode;
140 		if (ch->flags & ATA_SATA)
141 			ch->user[i].bytecount = 8192;
142 		else
143 			ch->user[i].bytecount = MAXPHYS;
144 		ch->user[i].caps = 0;
145 		ch->curr[i] = ch->user[i];
146 		if (ch->flags & ATA_SATA) {
147 			if (ch->pm_level > 0)
148 				ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ;
149 			if (ch->pm_level > 1)
150 				ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ;
151 		} else {
152 			if (!(ch->flags & ATA_NO_48BIT_DMA))
153 				ch->user[i].caps |= CTS_ATA_CAPS_H_DMA48;
154 		}
155 	}
156 	callout_init(&ch->poll_callout, 1);
157 
158     /* allocate DMA resources if DMA HW present*/
159     if (ch->dma.alloc)
160 	ch->dma.alloc(dev);
161 
162     /* setup interrupt delivery */
163     rid = ATA_IRQ_RID;
164     ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
165 				       RF_SHAREABLE | RF_ACTIVE);
166     if (!ch->r_irq) {
167 	device_printf(dev, "unable to allocate interrupt\n");
168 	return ENXIO;
169     }
170     if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
171 				ata_interrupt, ch, &ch->ih))) {
172 	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
173 	device_printf(dev, "unable to setup interrupt\n");
174 	return error;
175     }
176 
177 	if (ch->flags & ATA_PERIODIC_POLL)
178 		callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
179 	mtx_lock(&ch->state_mtx);
180 	/* Create the device queue for our SIM. */
181 	devq = cam_simq_alloc(1);
182 	if (devq == NULL) {
183 		device_printf(dev, "Unable to allocate simq\n");
184 		error = ENOMEM;
185 		goto err1;
186 	}
187 	/* Construct SIM entry */
188 	ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
189 	    device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
190 	if (ch->sim == NULL) {
191 		device_printf(dev, "unable to allocate sim\n");
192 		cam_simq_free(devq);
193 		error = ENOMEM;
194 		goto err1;
195 	}
196 	if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
197 		device_printf(dev, "unable to register xpt bus\n");
198 		error = ENXIO;
199 		goto err2;
200 	}
201 	if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
202 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
203 		device_printf(dev, "unable to create path\n");
204 		error = ENXIO;
205 		goto err3;
206 	}
207 	mtx_unlock(&ch->state_mtx);
208 	return (0);
209 
210 err3:
211 	xpt_bus_deregister(cam_sim_path(ch->sim));
212 err2:
213 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
214 	ch->sim = NULL;
215 err1:
216 	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
217 	mtx_unlock(&ch->state_mtx);
218 	if (ch->flags & ATA_PERIODIC_POLL)
219 		callout_drain(&ch->poll_callout);
220 	return (error);
221 }
222 
223 int
224 ata_detach(device_t dev)
225 {
226     struct ata_channel *ch = device_get_softc(dev);
227 
228     /* check that we have a valid channel to detach */
229     if (!ch->r_irq)
230 	return ENXIO;
231 
232     /* grap the channel lock so no new requests gets launched */
233     mtx_lock(&ch->state_mtx);
234     ch->state |= ATA_STALL_QUEUE;
235     mtx_unlock(&ch->state_mtx);
236     if (ch->flags & ATA_PERIODIC_POLL)
237 	callout_drain(&ch->poll_callout);
238 
239     taskqueue_drain(taskqueue_thread, &ch->conntask);
240 
241 	mtx_lock(&ch->state_mtx);
242 	xpt_async(AC_LOST_DEVICE, ch->path, NULL);
243 	xpt_free_path(ch->path);
244 	xpt_bus_deregister(cam_sim_path(ch->sim));
245 	cam_sim_free(ch->sim, /*free_devq*/TRUE);
246 	ch->sim = NULL;
247 	mtx_unlock(&ch->state_mtx);
248 
249     /* release resources */
250     bus_teardown_intr(dev, ch->r_irq, ch->ih);
251     bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
252     ch->r_irq = NULL;
253 
254     /* free DMA resources if DMA HW present*/
255     if (ch->dma.free)
256 	ch->dma.free(dev);
257 
258     mtx_destroy(&ch->state_mtx);
259     return 0;
260 }
261 
262 static void
263 ata_conn_event(void *context, int dummy)
264 {
265 	device_t dev = (device_t)context;
266 	struct ata_channel *ch = device_get_softc(dev);
267 	union ccb *ccb;
268 
269 	mtx_lock(&ch->state_mtx);
270 	if (ch->sim == NULL) {
271 		mtx_unlock(&ch->state_mtx);
272 		return;
273 	}
274 	ata_reinit(dev);
275 	if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
276 		return;
277 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
278 	    cam_sim_path(ch->sim),
279 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
280 		xpt_free_ccb(ccb);
281 		return;
282 	}
283 	xpt_rescan(ccb);
284 	mtx_unlock(&ch->state_mtx);
285 }
286 
287 int
288 ata_reinit(device_t dev)
289 {
290     struct ata_channel *ch = device_get_softc(dev);
291     struct ata_request *request;
292 
293 	xpt_freeze_simq(ch->sim, 1);
294 	if ((request = ch->running)) {
295 		ch->running = NULL;
296 		if (ch->state == ATA_ACTIVE)
297 		    ch->state = ATA_IDLE;
298 		callout_stop(&request->callout);
299 		if (ch->dma.unload)
300 		    ch->dma.unload(request);
301 		request->result = ERESTART;
302 		ata_cam_end_transaction(dev, request);
303 	}
304 	/* reset the controller HW, the channel and device(s) */
305 	ATA_RESET(dev);
306 	/* Tell the XPT about the event */
307 	xpt_async(AC_BUS_RESET, ch->path, NULL);
308 	xpt_release_simq(ch->sim, TRUE);
309 	return(0);
310 }
311 
312 int
313 ata_suspend(device_t dev)
314 {
315     struct ata_channel *ch;
316 
317     /* check for valid device */
318     if (!dev || !(ch = device_get_softc(dev)))
319 	return ENXIO;
320 
321 	if (ch->flags & ATA_PERIODIC_POLL)
322 		callout_drain(&ch->poll_callout);
323 	mtx_lock(&ch->state_mtx);
324 	xpt_freeze_simq(ch->sim, 1);
325 	while (ch->state != ATA_IDLE)
326 		msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100);
327 	mtx_unlock(&ch->state_mtx);
328     return(0);
329 }
330 
331 int
332 ata_resume(device_t dev)
333 {
334     struct ata_channel *ch;
335     int error;
336 
337     /* check for valid device */
338     if (!dev || !(ch = device_get_softc(dev)))
339 	return ENXIO;
340 
341 	mtx_lock(&ch->state_mtx);
342 	error = ata_reinit(dev);
343 	xpt_release_simq(ch->sim, TRUE);
344 	mtx_unlock(&ch->state_mtx);
345 	if (ch->flags & ATA_PERIODIC_POLL)
346 		callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
347     return error;
348 }
349 
350 void
351 ata_interrupt(void *data)
352 {
353     struct ata_channel *ch = (struct ata_channel *)data;
354 
355     mtx_lock(&ch->state_mtx);
356     ata_interrupt_locked(data);
357     mtx_unlock(&ch->state_mtx);
358 }
359 
360 static void
361 ata_interrupt_locked(void *data)
362 {
363     struct ata_channel *ch = (struct ata_channel *)data;
364     struct ata_request *request;
365 
366     do {
367 	/* ignore interrupt if its not for us */
368 	if (ch->hw.status && !ch->hw.status(ch->dev))
369 	    break;
370 
371 	/* do we have a running request */
372 	if (!(request = ch->running))
373 	    break;
374 
375 	ATA_DEBUG_RQ(request, "interrupt");
376 
377 	/* safetycheck for the right state */
378 	if (ch->state == ATA_IDLE) {
379 	    device_printf(request->dev, "interrupt on idle channel ignored\n");
380 	    break;
381 	}
382 
383 	/*
384 	 * we have the HW locks, so end the transaction for this request
385 	 * if it finishes immediately otherwise wait for next interrupt
386 	 */
387 	if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
388 	    ch->running = NULL;
389 	    if (ch->state == ATA_ACTIVE)
390 		ch->state = ATA_IDLE;
391 	    ata_cam_end_transaction(ch->dev, request);
392 	    return;
393 	}
394     } while (0);
395 }
396 
397 static void
398 ata_periodic_poll(void *data)
399 {
400     struct ata_channel *ch = (struct ata_channel *)data;
401 
402     callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
403     ata_interrupt(ch);
404 }
405 
406 void
407 ata_print_cable(device_t dev, u_int8_t *who)
408 {
409     device_printf(dev,
410                   "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
411 }
412 
413 /*
414  * misc support functions
415  */
416 void
417 ata_default_registers(device_t dev)
418 {
419     struct ata_channel *ch = device_get_softc(dev);
420 
421     /* fill in the defaults from whats setup already */
422     ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
423     ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
424     ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
425     ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
426     ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
427     ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
428     ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
429     ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
430 }
431 
432 void
433 ata_udelay(int interval)
434 {
435     /* for now just use DELAY, the timer/sleep subsytems are not there yet */
436     if (1 || interval < (1000000/hz) || ata_delayed_attach)
437 	DELAY(interval);
438     else
439 	pause("ataslp", interval/(1000000/hz));
440 }
441 
442 const char *
443 ata_cmd2str(struct ata_request *request)
444 {
445 	static char buffer[20];
446 
447 	if (request->flags & ATA_R_ATAPI) {
448 		switch (request->u.atapi.sense.key ?
449 		    request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) {
450 		case 0x00: return ("TEST_UNIT_READY");
451 		case 0x01: return ("REZERO");
452 		case 0x03: return ("REQUEST_SENSE");
453 		case 0x04: return ("FORMAT");
454 		case 0x08: return ("READ");
455 		case 0x0a: return ("WRITE");
456 		case 0x10: return ("WEOF");
457 		case 0x11: return ("SPACE");
458 		case 0x12: return ("INQUIRY");
459 		case 0x15: return ("MODE_SELECT");
460 		case 0x19: return ("ERASE");
461 		case 0x1a: return ("MODE_SENSE");
462 		case 0x1b: return ("START_STOP");
463 		case 0x1e: return ("PREVENT_ALLOW");
464 		case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES");
465 		case 0x25: return ("READ_CAPACITY");
466 		case 0x28: return ("READ_BIG");
467 		case 0x2a: return ("WRITE_BIG");
468 		case 0x2b: return ("LOCATE");
469 		case 0x34: return ("READ_POSITION");
470 		case 0x35: return ("SYNCHRONIZE_CACHE");
471 		case 0x3b: return ("WRITE_BUFFER");
472 		case 0x3c: return ("READ_BUFFER");
473 		case 0x42: return ("READ_SUBCHANNEL");
474 		case 0x43: return ("READ_TOC");
475 		case 0x45: return ("PLAY_10");
476 		case 0x47: return ("PLAY_MSF");
477 		case 0x48: return ("PLAY_TRACK");
478 		case 0x4b: return ("PAUSE");
479 		case 0x51: return ("READ_DISK_INFO");
480 		case 0x52: return ("READ_TRACK_INFO");
481 		case 0x53: return ("RESERVE_TRACK");
482 		case 0x54: return ("SEND_OPC_INFO");
483 		case 0x55: return ("MODE_SELECT_BIG");
484 		case 0x58: return ("REPAIR_TRACK");
485 		case 0x59: return ("READ_MASTER_CUE");
486 		case 0x5a: return ("MODE_SENSE_BIG");
487 		case 0x5b: return ("CLOSE_TRACK/SESSION");
488 		case 0x5c: return ("READ_BUFFER_CAPACITY");
489 		case 0x5d: return ("SEND_CUE_SHEET");
490 		case 0x96: return ("SERVICE_ACTION_IN");
491 		case 0xa1: return ("BLANK_CMD");
492 		case 0xa3: return ("SEND_KEY");
493 		case 0xa4: return ("REPORT_KEY");
494 		case 0xa5: return ("PLAY_12");
495 		case 0xa6: return ("LOAD_UNLOAD");
496 		case 0xad: return ("READ_DVD_STRUCTURE");
497 		case 0xb4: return ("PLAY_CD");
498 		case 0xbb: return ("SET_SPEED");
499 		case 0xbd: return ("MECH_STATUS");
500 		case 0xbe: return ("READ_CD");
501 		case 0xff: return ("POLL_DSC");
502 		}
503 	} else {
504 		switch (request->u.ata.command) {
505 		case 0x00: return ("NOP");
506 		case 0x08: return ("DEVICE_RESET");
507 		case 0x20: return ("READ");
508 		case 0x24: return ("READ48");
509 		case 0x25: return ("READ_DMA48");
510 		case 0x26: return ("READ_DMA_QUEUED48");
511 		case 0x27: return ("READ_NATIVE_MAX_ADDRESS48");
512 		case 0x29: return ("READ_MUL48");
513 		case 0x30: return ("WRITE");
514 		case 0x34: return ("WRITE48");
515 		case 0x35: return ("WRITE_DMA48");
516 		case 0x36: return ("WRITE_DMA_QUEUED48");
517 		case 0x37: return ("SET_MAX_ADDRESS48");
518 		case 0x39: return ("WRITE_MUL48");
519 		case 0x70: return ("SEEK");
520 		case 0xa0: return ("PACKET_CMD");
521 		case 0xa1: return ("ATAPI_IDENTIFY");
522 		case 0xa2: return ("SERVICE");
523 		case 0xb0: return ("SMART");
524 		case 0xc0: return ("CFA ERASE");
525 		case 0xc4: return ("READ_MUL");
526 		case 0xc5: return ("WRITE_MUL");
527 		case 0xc6: return ("SET_MULTI");
528 		case 0xc7: return ("READ_DMA_QUEUED");
529 		case 0xc8: return ("READ_DMA");
530 		case 0xca: return ("WRITE_DMA");
531 		case 0xcc: return ("WRITE_DMA_QUEUED");
532 		case 0xe6: return ("SLEEP");
533 		case 0xe7: return ("FLUSHCACHE");
534 		case 0xea: return ("FLUSHCACHE48");
535 		case 0xec: return ("ATA_IDENTIFY");
536 		case 0xef:
537 			switch (request->u.ata.feature) {
538 			case 0x03: return ("SETFEATURES SET TRANSFER MODE");
539 			case 0x02: return ("SETFEATURES ENABLE WCACHE");
540 			case 0x82: return ("SETFEATURES DISABLE WCACHE");
541 			case 0xaa: return ("SETFEATURES ENABLE RCACHE");
542 			case 0x55: return ("SETFEATURES DISABLE RCACHE");
543 			}
544 			sprintf(buffer, "SETFEATURES 0x%02x",
545 			    request->u.ata.feature);
546 			return (buffer);
547 		case 0xf5: return ("SECURITY_FREE_LOCK");
548 		case 0xf8: return ("READ_NATIVE_MAX_ADDRESS");
549 		case 0xf9: return ("SET_MAX_ADDRESS");
550 		}
551 	}
552 	sprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command);
553 	return (buffer);
554 }
555 
556 const char *
557 ata_mode2str(int mode)
558 {
559     switch (mode) {
560     case -1: return "UNSUPPORTED";
561     case ATA_PIO0: return "PIO0";
562     case ATA_PIO1: return "PIO1";
563     case ATA_PIO2: return "PIO2";
564     case ATA_PIO3: return "PIO3";
565     case ATA_PIO4: return "PIO4";
566     case ATA_WDMA0: return "WDMA0";
567     case ATA_WDMA1: return "WDMA1";
568     case ATA_WDMA2: return "WDMA2";
569     case ATA_UDMA0: return "UDMA16";
570     case ATA_UDMA1: return "UDMA25";
571     case ATA_UDMA2: return "UDMA33";
572     case ATA_UDMA3: return "UDMA40";
573     case ATA_UDMA4: return "UDMA66";
574     case ATA_UDMA5: return "UDMA100";
575     case ATA_UDMA6: return "UDMA133";
576     case ATA_SA150: return "SATA150";
577     case ATA_SA300: return "SATA300";
578     default:
579 	if (mode & ATA_DMA_MASK)
580 	    return "BIOSDMA";
581 	else
582 	    return "BIOSPIO";
583     }
584 }
585 
586 static int
587 ata_str2mode(const char *str)
588 {
589 
590 	if (!strcasecmp(str, "PIO0")) return (ATA_PIO0);
591 	if (!strcasecmp(str, "PIO1")) return (ATA_PIO1);
592 	if (!strcasecmp(str, "PIO2")) return (ATA_PIO2);
593 	if (!strcasecmp(str, "PIO3")) return (ATA_PIO3);
594 	if (!strcasecmp(str, "PIO4")) return (ATA_PIO4);
595 	if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0);
596 	if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1);
597 	if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2);
598 	if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0);
599 	if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0);
600 	if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1);
601 	if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1);
602 	if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2);
603 	if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2);
604 	if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3);
605 	if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3);
606 	if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4);
607 	if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4);
608 	if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5);
609 	if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5);
610 	if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6);
611 	if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6);
612 	return (-1);
613 }
614 
615 int
616 ata_atapi(device_t dev, int target)
617 {
618     struct ata_channel *ch = device_get_softc(dev);
619 
620     return (ch->devices & (ATA_ATAPI_MASTER << target));
621 }
622 
623 void
624 ata_timeout(struct ata_request *request)
625 {
626 	struct ata_channel *ch;
627 
628 	ch = device_get_softc(request->parent);
629 	//request->flags |= ATA_R_DEBUG;
630 	ATA_DEBUG_RQ(request, "timeout");
631 
632 	/*
633 	 * If we have an ATA_ACTIVE request running, we flag the request
634 	 * ATA_R_TIMEOUT so ata_cam_end_transaction() will handle it correctly.
635 	 * Also, NULL out the running request so we wont loose the race with
636 	 * an eventual interrupt arriving late.
637 	 */
638 	if (ch->state == ATA_ACTIVE) {
639 		request->flags |= ATA_R_TIMEOUT;
640 		if (ch->dma.unload)
641 			ch->dma.unload(request);
642 		ch->running = NULL;
643 		ch->state = ATA_IDLE;
644 		ata_cam_end_transaction(ch->dev, request);
645 	}
646 	mtx_unlock(&ch->state_mtx);
647 }
648 
649 static void
650 ata_cam_begin_transaction(device_t dev, union ccb *ccb)
651 {
652 	struct ata_channel *ch = device_get_softc(dev);
653 	struct ata_request *request;
654 
655 	if (!(request = ata_alloc_request())) {
656 		device_printf(dev, "FAILURE - out of memory in start\n");
657 		ccb->ccb_h.status = CAM_REQ_INVALID;
658 		xpt_done(ccb);
659 		return;
660 	}
661 	bzero(request, sizeof(*request));
662 
663 	/* setup request */
664 	request->dev = NULL;
665 	request->parent = dev;
666 	request->unit = ccb->ccb_h.target_id;
667 	if (ccb->ccb_h.func_code == XPT_ATA_IO) {
668 		request->data = ccb->ataio.data_ptr;
669 		request->bytecount = ccb->ataio.dxfer_len;
670 		request->u.ata.command = ccb->ataio.cmd.command;
671 		request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
672 					  (uint16_t)ccb->ataio.cmd.features;
673 		request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
674 					(uint16_t)ccb->ataio.cmd.sector_count;
675 		if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
676 			request->flags |= ATA_R_48BIT;
677 			request->u.ata.lba =
678 				     ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
679 				     ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
680 				     ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
681 		} else {
682 			request->u.ata.lba =
683 				     ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
684 		}
685 		request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
686 				      ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
687 				       (uint64_t)ccb->ataio.cmd.lba_low;
688 		if (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)
689 			request->flags |= ATA_R_NEEDRESULT;
690 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
691 		    ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
692 			request->flags |= ATA_R_DMA;
693 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
694 			request->flags |= ATA_R_READ;
695 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
696 			request->flags |= ATA_R_WRITE;
697 		if (ccb->ataio.cmd.command == ATA_READ_MUL ||
698 		    ccb->ataio.cmd.command == ATA_READ_MUL48 ||
699 		    ccb->ataio.cmd.command == ATA_WRITE_MUL ||
700 		    ccb->ataio.cmd.command == ATA_WRITE_MUL48) {
701 			request->transfersize = min(request->bytecount,
702 			    ch->curr[ccb->ccb_h.target_id].bytecount);
703 		} else
704 			request->transfersize = min(request->bytecount, 512);
705 	} else {
706 		request->data = ccb->csio.data_ptr;
707 		request->bytecount = ccb->csio.dxfer_len;
708 		bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
709 		    ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
710 		    request->u.atapi.ccb, ccb->csio.cdb_len);
711 		request->flags |= ATA_R_ATAPI;
712 		if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
713 			request->flags |= ATA_R_ATAPI16;
714 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
715 		    ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
716 			request->flags |= ATA_R_DMA;
717 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
718 			request->flags |= ATA_R_READ;
719 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
720 			request->flags |= ATA_R_WRITE;
721 		request->transfersize = min(request->bytecount,
722 		    ch->curr[ccb->ccb_h.target_id].bytecount);
723 	}
724 	request->retries = 0;
725 	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
726 	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
727 	request->ccb = ccb;
728 	request->flags |= ATA_R_DATA_IN_CCB;
729 
730 	ch->running = request;
731 	ch->state = ATA_ACTIVE;
732 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
733 	    ch->running = NULL;
734 	    ch->state = ATA_IDLE;
735 	    ata_cam_end_transaction(dev, request);
736 	    return;
737 	}
738 }
739 
740 static void
741 ata_cam_request_sense(device_t dev, struct ata_request *request)
742 {
743 	struct ata_channel *ch = device_get_softc(dev);
744 	union ccb *ccb = request->ccb;
745 
746 	ch->requestsense = 1;
747 
748 	bzero(request, sizeof(*request));
749 	request->dev = NULL;
750 	request->parent = dev;
751 	request->unit = ccb->ccb_h.target_id;
752 	request->data = (void *)&ccb->csio.sense_data;
753 	request->bytecount = ccb->csio.sense_len;
754 	request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE;
755 	request->u.atapi.ccb[4] = ccb->csio.sense_len;
756 	request->flags |= ATA_R_ATAPI;
757 	if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
758 		request->flags |= ATA_R_ATAPI16;
759 	if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
760 		request->flags |= ATA_R_DMA;
761 	request->flags |= ATA_R_READ;
762 	request->transfersize = min(request->bytecount,
763 	    ch->curr[ccb->ccb_h.target_id].bytecount);
764 	request->retries = 0;
765 	request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
766 	callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
767 	request->ccb = ccb;
768 
769 	ch->running = request;
770 	ch->state = ATA_ACTIVE;
771 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
772 		ch->running = NULL;
773 		ch->state = ATA_IDLE;
774 		ata_cam_end_transaction(dev, request);
775 		return;
776 	}
777 }
778 
779 static void
780 ata_cam_process_sense(device_t dev, struct ata_request *request)
781 {
782 	struct ata_channel *ch = device_get_softc(dev);
783 	union ccb *ccb = request->ccb;
784 	int fatalerr = 0;
785 
786 	ch->requestsense = 0;
787 
788 	if (request->flags & ATA_R_TIMEOUT)
789 		fatalerr = 1;
790 	if ((request->flags & ATA_R_TIMEOUT) == 0 &&
791 	    (request->status & ATA_S_ERROR) == 0 &&
792 	    request->result == 0) {
793 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
794 	} else {
795 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
796 		ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
797 	}
798 
799 	ata_free_request(request);
800 	xpt_done(ccb);
801 	/* Do error recovery if needed. */
802 	if (fatalerr)
803 		ata_reinit(dev);
804 }
805 
806 static void
807 ata_cam_end_transaction(device_t dev, struct ata_request *request)
808 {
809 	struct ata_channel *ch = device_get_softc(dev);
810 	union ccb *ccb = request->ccb;
811 	int fatalerr = 0;
812 
813 	if (ch->requestsense) {
814 		ata_cam_process_sense(dev, request);
815 		return;
816 	}
817 
818 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
819 	if (request->flags & ATA_R_TIMEOUT) {
820 		xpt_freeze_simq(ch->sim, 1);
821 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
822 		ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
823 		fatalerr = 1;
824 	} else if (request->status & ATA_S_ERROR) {
825 		if (ccb->ccb_h.func_code == XPT_ATA_IO) {
826 			ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
827 		} else {
828 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
829 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
830 		}
831 	} else if (request->result == ERESTART)
832 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
833 	else if (request->result != 0)
834 		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
835 	else
836 		ccb->ccb_h.status |= CAM_REQ_CMP;
837 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
838 	    !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
839 		xpt_freeze_devq(ccb->ccb_h.path, 1);
840 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
841 	}
842 	if (ccb->ccb_h.func_code == XPT_ATA_IO &&
843 	    ((request->status & ATA_S_ERROR) ||
844 	    (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
845 		struct ata_res *res = &ccb->ataio.res;
846 		res->status = request->status;
847 		res->error = request->error;
848 		res->lba_low = request->u.ata.lba;
849 		res->lba_mid = request->u.ata.lba >> 8;
850 		res->lba_high = request->u.ata.lba >> 16;
851 		res->device = request->u.ata.lba >> 24;
852 		res->lba_low_exp = request->u.ata.lba >> 24;
853 		res->lba_mid_exp = request->u.ata.lba >> 32;
854 		res->lba_high_exp = request->u.ata.lba >> 40;
855 		res->sector_count = request->u.ata.count;
856 		res->sector_count_exp = request->u.ata.count >> 8;
857 	}
858 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
859 		if (ccb->ccb_h.func_code == XPT_ATA_IO) {
860 			ccb->ataio.resid =
861 			    ccb->ataio.dxfer_len - request->donecount;
862 		} else {
863 			ccb->csio.resid =
864 			    ccb->csio.dxfer_len - request->donecount;
865 		}
866 	}
867 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
868 	    (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
869 		ata_cam_request_sense(dev, request);
870 	else {
871 		ata_free_request(request);
872 		xpt_done(ccb);
873 	}
874 	/* Do error recovery if needed. */
875 	if (fatalerr)
876 		ata_reinit(dev);
877 }
878 
879 static int
880 ata_check_ids(device_t dev, union ccb *ccb)
881 {
882 	struct ata_channel *ch = device_get_softc(dev);
883 
884 	if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) {
885 		ccb->ccb_h.status = CAM_TID_INVALID;
886 		xpt_done(ccb);
887 		return (-1);
888 	}
889 	if (ccb->ccb_h.target_lun != 0) {
890 		ccb->ccb_h.status = CAM_LUN_INVALID;
891 		xpt_done(ccb);
892 		return (-1);
893 	}
894 	return (0);
895 }
896 
897 static void
898 ataaction(struct cam_sim *sim, union ccb *ccb)
899 {
900 	device_t dev, parent;
901 	struct ata_channel *ch;
902 
903 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
904 	    ccb->ccb_h.func_code));
905 
906 	ch = (struct ata_channel *)cam_sim_softc(sim);
907 	dev = ch->dev;
908 	switch (ccb->ccb_h.func_code) {
909 	/* Common cases first */
910 	case XPT_ATA_IO:	/* Execute the requested I/O operation */
911 	case XPT_SCSI_IO:
912 		if (ata_check_ids(dev, ccb))
913 			return;
914 		if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
915 		    << ccb->ccb_h.target_id)) == 0) {
916 			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
917 			break;
918 		}
919 		if (ch->running)
920 			device_printf(dev, "already running!\n");
921 		if (ccb->ccb_h.func_code == XPT_ATA_IO &&
922 		    (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
923 		    (ccb->ataio.cmd.control & ATA_A_RESET)) {
924 			struct ata_res *res = &ccb->ataio.res;
925 
926 			bzero(res, sizeof(*res));
927 			if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
928 				res->lba_high = 0;
929 				res->lba_mid = 0;
930 			} else {
931 				res->lba_high = 0xeb;
932 				res->lba_mid = 0x14;
933 			}
934 			ccb->ccb_h.status = CAM_REQ_CMP;
935 			break;
936 		}
937 		ata_cam_begin_transaction(dev, ccb);
938 		return;
939 	case XPT_EN_LUN:		/* Enable LUN as a target */
940 	case XPT_TARGET_IO:		/* Execute target I/O request */
941 	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
942 	case XPT_CONT_TARGET_IO:	/* Continue Host Target I/O Connection*/
943 	case XPT_ABORT:			/* Abort the specified CCB */
944 		/* XXX Implement */
945 		ccb->ccb_h.status = CAM_REQ_INVALID;
946 		break;
947 	case XPT_SET_TRAN_SETTINGS:
948 	{
949 		struct	ccb_trans_settings *cts = &ccb->cts;
950 		struct	ata_cam_device *d;
951 
952 		if (ata_check_ids(dev, ccb))
953 			return;
954 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
955 			d = &ch->curr[ccb->ccb_h.target_id];
956 		else
957 			d = &ch->user[ccb->ccb_h.target_id];
958 		if (ch->flags & ATA_SATA) {
959 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
960 				d->revision = cts->xport_specific.sata.revision;
961 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) {
962 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
963 					d->mode = ATA_SETMODE(ch->dev,
964 					    ccb->ccb_h.target_id,
965 					    cts->xport_specific.sata.mode);
966 				} else
967 					d->mode = cts->xport_specific.sata.mode;
968 			}
969 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
970 				d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
971 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
972 				d->atapi = cts->xport_specific.sata.atapi;
973 			if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
974 				d->caps = cts->xport_specific.sata.caps;
975 		} else {
976 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
977 				if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
978 					d->mode = ATA_SETMODE(ch->dev,
979 					    ccb->ccb_h.target_id,
980 					    cts->xport_specific.ata.mode);
981 				} else
982 					d->mode = cts->xport_specific.ata.mode;
983 			}
984 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
985 				d->bytecount = cts->xport_specific.ata.bytecount;
986 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI)
987 				d->atapi = cts->xport_specific.ata.atapi;
988 			if (cts->xport_specific.ata.valid & CTS_ATA_VALID_CAPS)
989 				d->caps = cts->xport_specific.ata.caps;
990 		}
991 		ccb->ccb_h.status = CAM_REQ_CMP;
992 		break;
993 	}
994 	case XPT_GET_TRAN_SETTINGS:
995 	{
996 		struct	ccb_trans_settings *cts = &ccb->cts;
997 		struct  ata_cam_device *d;
998 
999 		if (ata_check_ids(dev, ccb))
1000 			return;
1001 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1002 			d = &ch->curr[ccb->ccb_h.target_id];
1003 		else
1004 			d = &ch->user[ccb->ccb_h.target_id];
1005 		cts->protocol = PROTO_UNSPECIFIED;
1006 		cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1007 		if (ch->flags & ATA_SATA) {
1008 			cts->transport = XPORT_SATA;
1009 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1010 			cts->xport_specific.sata.valid = 0;
1011 			cts->xport_specific.sata.mode = d->mode;
1012 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1013 			cts->xport_specific.sata.bytecount = d->bytecount;
1014 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1015 			if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1016 				cts->xport_specific.sata.revision =
1017 				    ATA_GETREV(dev, ccb->ccb_h.target_id);
1018 				if (cts->xport_specific.sata.revision != 0xff) {
1019 					cts->xport_specific.sata.valid |=
1020 					    CTS_SATA_VALID_REVISION;
1021 				}
1022 				cts->xport_specific.sata.caps =
1023 				    d->caps & CTS_SATA_CAPS_D;
1024 				if (ch->pm_level) {
1025 					cts->xport_specific.sata.caps |=
1026 					    CTS_SATA_CAPS_H_PMREQ;
1027 				}
1028 				cts->xport_specific.sata.caps &=
1029 				    ch->user[ccb->ccb_h.target_id].caps;
1030 			} else {
1031 				cts->xport_specific.sata.revision = d->revision;
1032 				cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1033 				cts->xport_specific.sata.caps = d->caps;
1034 			}
1035 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
1036 			cts->xport_specific.sata.atapi = d->atapi;
1037 			cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
1038 		} else {
1039 			cts->transport = XPORT_ATA;
1040 			cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1041 			cts->xport_specific.ata.valid = 0;
1042 			cts->xport_specific.ata.mode = d->mode;
1043 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1044 			cts->xport_specific.ata.bytecount = d->bytecount;
1045 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1046 			if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1047 				cts->xport_specific.ata.caps =
1048 				    d->caps & CTS_ATA_CAPS_D;
1049 				if (!(ch->flags & ATA_NO_48BIT_DMA))
1050 					cts->xport_specific.ata.caps |=
1051 					    CTS_ATA_CAPS_H_DMA48;
1052 				cts->xport_specific.ata.caps &=
1053 				    ch->user[ccb->ccb_h.target_id].caps;
1054 			} else
1055 				cts->xport_specific.ata.caps = d->caps;
1056 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_CAPS;
1057 			cts->xport_specific.ata.atapi = d->atapi;
1058 			cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI;
1059 		}
1060 		ccb->ccb_h.status = CAM_REQ_CMP;
1061 		break;
1062 	}
1063 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
1064 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
1065 		ata_reinit(dev);
1066 		ccb->ccb_h.status = CAM_REQ_CMP;
1067 		break;
1068 	case XPT_TERM_IO:		/* Terminate the I/O process */
1069 		/* XXX Implement */
1070 		ccb->ccb_h.status = CAM_REQ_INVALID;
1071 		break;
1072 	case XPT_PATH_INQ:		/* Path routing inquiry */
1073 	{
1074 		struct ccb_pathinq *cpi = &ccb->cpi;
1075 
1076 		parent = device_get_parent(dev);
1077 		cpi->version_num = 1; /* XXX??? */
1078 		cpi->hba_inquiry = PI_SDTR_ABLE;
1079 		cpi->target_sprt = 0;
1080 		cpi->hba_misc = PIM_SEQSCAN;
1081 		cpi->hba_eng_cnt = 0;
1082 		if (ch->flags & ATA_NO_SLAVE)
1083 			cpi->max_target = 0;
1084 		else
1085 			cpi->max_target = 1;
1086 		cpi->max_lun = 0;
1087 		cpi->initiator_id = 0;
1088 		cpi->bus_id = cam_sim_bus(sim);
1089 		if (ch->flags & ATA_SATA)
1090 			cpi->base_transfer_speed = 150000;
1091 		else
1092 			cpi->base_transfer_speed = 3300;
1093 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1094 		strncpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1095 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1096 		cpi->unit_number = cam_sim_unit(sim);
1097 		if (ch->flags & ATA_SATA)
1098 			cpi->transport = XPORT_SATA;
1099 		else
1100 			cpi->transport = XPORT_ATA;
1101 		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1102 		cpi->protocol = PROTO_ATA;
1103 		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1104 		cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1105 		if (device_get_devclass(device_get_parent(parent)) ==
1106 		    devclass_find("pci")) {
1107 			cpi->hba_vendor = pci_get_vendor(parent);
1108 			cpi->hba_device = pci_get_device(parent);
1109 			cpi->hba_subvendor = pci_get_subvendor(parent);
1110 			cpi->hba_subdevice = pci_get_subdevice(parent);
1111 		}
1112 		cpi->ccb_h.status = CAM_REQ_CMP;
1113 		break;
1114 	}
1115 	default:
1116 		ccb->ccb_h.status = CAM_REQ_INVALID;
1117 		break;
1118 	}
1119 	xpt_done(ccb);
1120 }
1121 
1122 static void
1123 atapoll(struct cam_sim *sim)
1124 {
1125 	struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1126 
1127 	ata_interrupt_locked(ch);
1128 }
1129 
1130 /*
1131  * module handeling
1132  */
1133 static int
1134 ata_module_event_handler(module_t mod, int what, void *arg)
1135 {
1136 
1137     switch (what) {
1138     case MOD_LOAD:
1139 	return 0;
1140 
1141     case MOD_UNLOAD:
1142 	return 0;
1143 
1144     default:
1145 	return EOPNOTSUPP;
1146     }
1147 }
1148 
1149 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1150 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1151 MODULE_VERSION(ata, 1);
1152 MODULE_DEPEND(ata, cam, 1, 1, 1);
1153 
1154 static void
1155 ata_init(void)
1156 {
1157     ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request),
1158 				   NULL, NULL, NULL, NULL, 0, 0);
1159 }
1160 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1161 
1162 static void
1163 ata_uninit(void)
1164 {
1165     uma_zdestroy(ata_request_zone);
1166 }
1167 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);
1168