xref: /freebsd/sys/dev/ida/ida.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 1999,2000 Jonathan Lemon
3  * All rights reserved.
4  *
5  # Derived from the original IDA Compaq RAID driver, which is
6  * Copyright (c) 1996, 1997, 1998, 1999
7  *    Mark Dawson and David James. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Generic driver for Compaq SMART RAID adapters.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/stat.h>
43 
44 #include <sys/bio.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/endian.h>
48 
49 #include <machine/bus_memio.h>
50 #include <machine/bus_pio.h>
51 #include <machine/bus.h>
52 #include <sys/rman.h>
53 
54 #include <geom/geom_disk.h>
55 
56 #include <dev/ida/idareg.h>
57 #include <dev/ida/idavar.h>
58 #include <dev/ida/idaio.h>
59 
60 /* prototypes */
61 static void ida_alloc_qcb(struct ida_softc *ida);
62 static void ida_construct_qcb(struct ida_softc *ida);
63 static void ida_start(struct ida_softc *ida);
64 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb);
65 static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb);
66 static void ida_timeout (void *arg);
67 
68 static d_ioctl_t ida_ioctl;
69 static struct cdevsw ida_cdevsw = {
70 	.d_version =	D_VERSION,
71 	.d_flags =	D_NEEDGIANT,
72 	.d_ioctl =	ida_ioctl,
73 	.d_name =	"ida",
74 };
75 
76 void
77 ida_free(struct ida_softc *ida)
78 {
79 	int i;
80 
81 	callout_stop(&ida->ch);
82 
83 	for (i = 0; i < ida->num_qcbs; i++)
84 		bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap);
85 
86 	if (ida->hwqcb_busaddr)
87 		bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap);
88 
89 	if (ida->hwqcbs)
90 		bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs,
91 		    ida->hwqcb_dmamap);
92 
93 	if (ida->buffer_dmat)
94 		bus_dma_tag_destroy(ida->buffer_dmat);
95 
96 	if (ida->hwqcb_dmat)
97 		bus_dma_tag_destroy(ida->hwqcb_dmat);
98 
99 	if (ida->qcbs != NULL)
100 		free(ida->qcbs, M_DEVBUF);
101 
102 	if (ida->ih != NULL)
103                 bus_teardown_intr(ida->dev, ida->irq, ida->ih);
104 
105 	if (ida->irq != NULL)
106 		bus_release_resource(ida->dev, ida->irq_res_type,
107 		    0, ida->irq);
108 
109 	if (ida->parent_dmat != NULL)
110 		bus_dma_tag_destroy(ida->parent_dmat);
111 
112 	if (ida->regs != NULL)
113 		bus_release_resource(ida->dev, ida->regs_res_type,
114 		    ida->regs_res_id, ida->regs);
115 }
116 
117 /*
118  * record bus address from bus_dmamap_load
119  */
120 static void
121 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
122 {
123         bus_addr_t *baddr;
124 
125         baddr = (bus_addr_t *)arg;
126         *baddr = segs->ds_addr;
127 }
128 
129 static __inline struct ida_qcb *
130 ida_get_qcb(struct ida_softc *ida)
131 {
132 	struct ida_qcb *qcb;
133 
134 	if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) {
135 		SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
136 	} else {
137 		ida_alloc_qcb(ida);
138 		if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL)
139 			SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
140 	}
141 	return (qcb);
142 }
143 
144 static __inline bus_addr_t
145 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb)
146 {
147 	return (ida->hwqcb_busaddr +
148 	    ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs));
149 }
150 
151 static __inline struct ida_qcb *
152 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr)
153 {
154 	struct ida_hardware_qcb *hwqcb;
155 
156 	hwqcb = (struct ida_hardware_qcb *)
157 	    ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr));
158 	return (hwqcb->qcb);
159 }
160 
161 /*
162  * XXX
163  * since we allocate all QCB space up front during initialization, then
164  * why bother with this routine?
165  */
166 static void
167 ida_alloc_qcb(struct ida_softc *ida)
168 {
169 	struct ida_qcb *qcb;
170 	int error;
171 
172 	if (ida->num_qcbs >= IDA_QCB_MAX)
173 		return;
174 
175 	qcb = &ida->qcbs[ida->num_qcbs];
176 
177 	error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap);
178 	if (error != 0)
179 		return;
180 
181 	qcb->flags = QCB_FREE;
182 	qcb->hwqcb = &ida->hwqcbs[ida->num_qcbs];
183 	qcb->hwqcb->qcb = qcb;
184 	qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb);
185 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
186 	ida->num_qcbs++;
187 }
188 
189 int
190 ida_init(struct ida_softc *ida)
191 {
192 	int error;
193 
194 	ida->unit = device_get_unit(ida->dev);
195 	ida->tag = rman_get_bustag(ida->regs);
196 	ida->bsh = rman_get_bushandle(ida->regs);
197 
198 	SLIST_INIT(&ida->free_qcbs);
199 	STAILQ_INIT(&ida->qcb_queue);
200         bioq_init(&ida->bio_queue);
201 
202 	ida->qcbs = (struct ida_qcb *)
203 	    malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF,
204 		M_NOWAIT | M_ZERO);
205 	if (ida->qcbs == NULL)
206 		return (ENOMEM);
207 
208 	/*
209 	 * Create our DMA tags
210 	 */
211 
212 	/* DMA tag for our hardware QCB structures */
213 	error = bus_dma_tag_create(
214 		/* parent	*/ ida->parent_dmat,
215 		/* alignment	*/ 1,
216 		/* boundary	*/ 0,
217 		/* lowaddr	*/ BUS_SPACE_MAXADDR,
218 		/* highaddr	*/ BUS_SPACE_MAXADDR,
219 		/* filter	*/ NULL,
220 		/* filterarg	*/ NULL,
221 		/* maxsize	*/ IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
222 		/* nsegments	*/ 1,
223 		/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
224 		/* flags	*/ 0,
225 		/* lockfunc	*/ busdma_lock_mutex,
226 		/* lockarg	*/ &Giant,
227 		&ida->hwqcb_dmat);
228 	if (error)
229                 return (ENOMEM);
230 
231 	/* DMA tag for mapping buffers into device space */
232 	error = bus_dma_tag_create(
233 		/* parent 	*/ ida->parent_dmat,
234 		/* alignment	*/ 1,
235 		/* boundary	*/ 0,
236 		/* lowaddr	*/ BUS_SPACE_MAXADDR,
237 		/* highaddr	*/ BUS_SPACE_MAXADDR,
238 		/* filter	*/ NULL,
239 		/* filterarg	*/ NULL,
240 		/* maxsize	*/ MAXBSIZE,
241 		/* nsegments	*/ IDA_NSEG,
242 		/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
243 		/* flags	*/ 0,
244 		/* lockfunc	*/ busdma_lock_mutex,
245 		/* lockarg	*/ &Giant,
246 		&ida->buffer_dmat);
247 	if (error)
248                 return (ENOMEM);
249 
250         /* Allocation of hardware QCBs */
251 	/* XXX allocation is rounded to hardware page size */
252 	error = bus_dmamem_alloc(ida->hwqcb_dmat,
253 	    (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap);
254 	if (error)
255                 return (ENOMEM);
256 
257         /* And permanently map them in */
258         bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap,
259 	    ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
260 	    ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0);
261 
262 	bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb));
263 
264 	ida_alloc_qcb(ida);		/* allocate an initial qcb */
265 
266 	callout_init(&ida->ch, CALLOUT_MPSAFE);
267 
268 	return (0);
269 }
270 
271 void
272 ida_attach(struct ida_softc *ida)
273 {
274 	struct ida_controller_info cinfo;
275 	int error, i;
276 
277 	ida->cmd.int_enable(ida, 0);
278 
279 	error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo),
280 	    IDA_CONTROLLER, 0, DMA_DATA_IN);
281 	if (error) {
282 		device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n");
283 		return;
284 	}
285 
286 	device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n",
287 	    cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1],
288 	    cinfo.firm_rev[2], cinfo.firm_rev[3]);
289 
290 	if (ida->flags & IDA_FIRMWARE) {
291 		int data;
292 
293 		error = ida_command(ida, CMD_START_FIRMWARE,
294 		    &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN);
295 		if (error) {
296 			device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n");
297 			return;
298 		}
299 	}
300 
301 	ida->ida_dev_t = make_dev(&ida_cdevsw, ida->unit,
302 				 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
303 				 "ida%d", ida->unit);
304 	ida->ida_dev_t->si_drv1 = ida;
305 
306 	ida->num_drives = 0;
307 	for (i = 0; i < cinfo.num_drvs; i++)
308 		device_add_child(ida->dev, /*"idad"*/NULL, -1);
309 
310 	bus_generic_attach(ida->dev);
311 
312 	ida->cmd.int_enable(ida, 1);
313 }
314 
315 int
316 ida_detach(device_t dev)
317 {
318 	struct ida_softc *ida;
319 	int error = 0;
320 
321         ida = (struct ida_softc *)device_get_softc(dev);
322 
323 	/*
324 	 * XXX
325 	 * before detaching, we must make sure that the system is
326 	 * quiescent; nothing mounted, no pending activity.
327 	 */
328 
329 	/*
330 	 * XXX
331 	 * now, how are we supposed to maintain a list of our drives?
332 	 * iterate over our "child devices"?
333 	 */
334 
335 	destroy_dev(ida->ida_dev_t);
336 	ida_free(ida);
337 	return (error);
338 }
339 
340 static void
341 ida_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
342 {
343 	struct ida_hardware_qcb *hwqcb = (struct ida_hardware_qcb *)arg;
344 	int i;
345 
346 	hwqcb->hdr.size = htole16((sizeof(struct ida_req) +
347 	    sizeof(struct ida_sgb) * IDA_NSEG) >> 2);
348 
349 	for (i = 0; i < nsegments; i++) {
350 		hwqcb->seg[i].addr = htole32(segs[i].ds_addr);
351 		hwqcb->seg[i].length = htole32(segs[i].ds_len);
352 	}
353 	hwqcb->req.sgcount = nsegments;
354 }
355 
356 int
357 ida_command(struct ida_softc *ida, int command, void *data, int datasize,
358 	int drive, u_int32_t pblkno, int flags)
359 {
360 	struct ida_hardware_qcb *hwqcb;
361 	struct ida_qcb *qcb;
362 	bus_dmasync_op_t op;
363 	int s, error;
364 
365 	s = splbio();
366 	qcb = ida_get_qcb(ida);
367 	splx(s);
368 
369 	if (qcb == NULL) {
370 		printf("ida_command: out of QCBs");
371 		return (EAGAIN);
372 	}
373 
374 	hwqcb = qcb->hwqcb;
375 	bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
376 
377 	bus_dmamap_load(ida->buffer_dmat, qcb->dmamap,
378 	    (void *)data, datasize, ida_setup_dmamap, hwqcb, 0);
379 	op = qcb->flags & DMA_DATA_IN ?
380 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
381 	bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
382 
383 	hwqcb->hdr.drive = drive;
384 	hwqcb->req.blkno = htole32(pblkno);
385 	hwqcb->req.bcount = htole16(howmany(datasize, DEV_BSIZE));
386 	hwqcb->req.command = command;
387 
388 	qcb->flags = flags | IDA_COMMAND;
389 
390 	s = splbio();
391 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
392 	ida_start(ida);
393 	error = ida_wait(ida, qcb);
394 	splx(s);
395 
396 	/* XXX should have status returned here? */
397 	/* XXX have "status pointer" area in QCB? */
398 
399 	return (error);
400 }
401 
402 void
403 ida_submit_buf(struct ida_softc *ida, struct bio *bp)
404 {
405         bioq_insert_tail(&ida->bio_queue, bp);
406         ida_construct_qcb(ida);
407 	ida_start(ida);
408 }
409 
410 static void
411 ida_construct_qcb(struct ida_softc *ida)
412 {
413 	struct ida_hardware_qcb *hwqcb;
414 	struct ida_qcb *qcb;
415 	bus_dmasync_op_t op;
416 	struct bio *bp;
417 
418 	bp = bioq_first(&ida->bio_queue);
419 	if (bp == NULL)
420 		return;				/* no more buffers */
421 
422 	qcb = ida_get_qcb(ida);
423 	if (qcb == NULL)
424 		return;				/* out of resources */
425 
426 	bioq_remove(&ida->bio_queue, bp);
427 	qcb->buf = bp;
428 	qcb->flags = bp->bio_cmd == BIO_READ ? DMA_DATA_IN : DMA_DATA_OUT;
429 
430 	hwqcb = qcb->hwqcb;
431 	bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
432 
433 	bus_dmamap_load(ida->buffer_dmat, qcb->dmamap,
434 	    (void *)bp->bio_data, bp->bio_bcount, ida_setup_dmamap, hwqcb, 0);
435 	op = qcb->flags & DMA_DATA_IN ?
436 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
437 	bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
438 
439 	{
440 		struct idad_softc *drv = (struct idad_softc *)bp->bio_driver1;
441 		hwqcb->hdr.drive = drv->drive;
442 	}
443 
444 	hwqcb->req.blkno = bp->bio_pblkno;
445 	hwqcb->req.bcount = howmany(bp->bio_bcount, DEV_BSIZE);
446 	hwqcb->req.command = bp->bio_cmd == BIO_READ ? CMD_READ : CMD_WRITE;
447 
448 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
449 }
450 
451 /*
452  * This routine will be called from ida_intr in order to queue up more
453  * I/O, meaning that we may be in an interrupt context.  Hence, we should
454  * not muck around with spl() in this routine.
455  */
456 static void
457 ida_start(struct ida_softc *ida)
458 {
459 	struct ida_qcb *qcb;
460 
461 	while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) {
462 		if (ida->cmd.fifo_full(ida))
463 			break;
464 		STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe);
465 		/*
466 		 * XXX
467 		 * place the qcb on an active list?
468 		 */
469 
470 		/* Set a timeout. */
471 		if (!ida->qactive)
472 			callout_reset(&ida->ch, hz * 5, ida_timeout, ida);
473 		ida->qactive++;
474 
475 		qcb->state = QCB_ACTIVE;
476 		ida->cmd.submit(ida, qcb);
477 	}
478 }
479 
480 static int
481 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb)
482 {
483 	struct ida_qcb *qcb_done = NULL;
484 	bus_addr_t completed;
485 	int delay;
486 
487 	if (ida->flags & IDA_INTERRUPTS) {
488 		if (tsleep(qcb, PRIBIO, "idacmd", 5 * hz))
489 			return (ETIMEDOUT);
490 		return (0);
491 	}
492 
493 again:
494 	delay = 5 * 1000 * 100;			/* 5 sec delay */
495 	while ((completed = ida->cmd.done(ida)) == 0) {
496 		if (delay-- == 0)
497 			return (ETIMEDOUT);
498 		DELAY(10);
499 	}
500 
501 	qcb_done = idahwqcbptov(ida, completed & ~3);
502 	if (qcb_done != qcb)
503 		goto again;
504 	ida_done(ida, qcb);
505 	return (0);
506 }
507 
508 void
509 ida_intr(void *data)
510 {
511 	struct ida_softc *ida;
512 	struct ida_qcb *qcb;
513 	bus_addr_t completed;
514 
515 	ida = (struct ida_softc *)data;
516 
517 	if (ida->cmd.int_pending(ida) == 0)
518 		return;				/* not our interrupt */
519 
520 	while ((completed = ida->cmd.done(ida)) != 0) {
521 		qcb = idahwqcbptov(ida, completed & ~3);
522 
523 		if (qcb == NULL || qcb->state != QCB_ACTIVE) {
524 			device_printf(ida->dev,
525 			    "ignoring completion %jx\n", (intmax_t)completed);
526 			continue;
527 		}
528 		/* Handle "Bad Command List" errors. */
529 		if ((completed & 3) && (qcb->hwqcb->req.error == 0))
530 			qcb->hwqcb->req.error = CMD_REJECTED;
531 		ida_done(ida, qcb);
532 	}
533 	ida_start(ida);
534 }
535 
536 /*
537  * should switch out command type; may be status, not just I/O.
538  */
539 static void
540 ida_done(struct ida_softc *ida, struct ida_qcb *qcb)
541 {
542 	int error = 0;
543 
544 	/*
545 	 * finish up command
546 	 */
547 	if (qcb->flags & DMA_DATA_TRANSFER) {
548 		bus_dmasync_op_t op;
549 
550 		op = qcb->flags & DMA_DATA_IN ?
551 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE;
552 		bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
553 		bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap);
554 	}
555 
556 	if (qcb->hwqcb->req.error & SOFT_ERROR) {
557 		if (qcb->buf)
558 			device_printf(ida->dev, "soft %s error\n",
559 				qcb->buf->bio_cmd == BIO_READ ?
560 					"read" : "write");
561 		else
562 			device_printf(ida->dev, "soft error\n");
563 	}
564 	if (qcb->hwqcb->req.error & HARD_ERROR) {
565 		error = 1;
566 		if (qcb->buf)
567 			device_printf(ida->dev, "hard %s error\n",
568 				qcb->buf->bio_cmd == BIO_READ ?
569 					"read" : "write");
570 		else
571 			device_printf(ida->dev, "hard error\n");
572 	}
573 	if (qcb->hwqcb->req.error & CMD_REJECTED) {
574 		error = 1;
575 		device_printf(ida->dev, "invalid request\n");
576 	}
577 
578 	if (qcb->flags & IDA_COMMAND) {
579 		if (ida->flags & IDA_INTERRUPTS)
580 			wakeup(qcb);
581 	} else {
582 		if (error)
583 			qcb->buf->bio_flags |= BIO_ERROR;
584 		idad_intr(qcb->buf);
585 	}
586 
587 	ida->qactive--;
588 	/* Reschedule or cancel timeout */
589 	if (ida->qactive)
590 		callout_reset(&ida->ch, hz * 5, ida_timeout, ida);
591 	else
592 		callout_stop(&ida->ch);
593 
594 	qcb->state = QCB_FREE;
595 	qcb->buf = NULL;
596 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
597 	ida_construct_qcb(ida);
598 }
599 
600 static void
601 ida_timeout (void *arg)
602 {
603 	struct ida_softc *ida;
604 
605 	ida = (struct ida_softc *)arg;
606 	device_printf(ida->dev, "%s() qactive %d\n", __func__, ida->qactive);
607 
608 	if (ida->flags & IDA_INTERRUPTS)
609 		device_printf(ida->dev, "IDA_INTERRUPTS\n");
610 
611 	device_printf(ida->dev,	"\t   R_CMD_FIFO: %08x\n"
612 				"\t  R_DONE_FIFO: %08x\n"
613 				"\t   R_INT_MASK: %08x\n"
614 				"\t     R_STATUS: %08x\n"
615 				"\tR_INT_PENDING: %08x\n",
616 					ida_inl(ida, R_CMD_FIFO),
617 					ida_inl(ida, R_DONE_FIFO),
618 					ida_inl(ida, R_INT_MASK),
619 					ida_inl(ida, R_STATUS),
620 					ida_inl(ida, R_INT_PENDING));
621 
622 	return;
623 }
624 
625 /*
626  * IOCTL stuff follows.
627  */
628 struct cmd_info {
629 	int	cmd;
630 	int	len;
631 	int	flags;
632 };
633 static struct cmd_info *ida_cmd_lookup(int);
634 
635 static int
636 ida_ioctl (struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td)
637 {
638 	struct ida_softc *sc;
639 	struct ida_user_command *uc;
640 	struct cmd_info *ci;
641 	int len;
642 	int flags;
643 	int error;
644 	int data;
645 	void *daddr;
646 
647 	sc = (struct ida_softc *)dev->si_drv1;
648 	uc = (struct ida_user_command *)addr;
649 	error = 0;
650 
651 	switch (cmd) {
652 	case IDAIO_COMMAND:
653 		ci = ida_cmd_lookup(uc->command);
654 		if (ci == NULL) {
655 			error = EINVAL;
656 			break;
657 		}
658 		len = ci->len;
659 		flags = ci->flags;
660 		if (len)
661 			daddr = &uc->d.buf;
662 		else {
663 			daddr = &data;
664 			len = sizeof(data);
665 		}
666 		error = ida_command(sc, uc->command, daddr, len,
667 				    uc->drive, uc->blkno, flags);
668 		break;
669 	default:
670 		error = ENOIOCTL;
671 		break;
672 	}
673 	return (error);
674 }
675 
676 static struct cmd_info ci_list[] = {
677 	{ CMD_GET_LOG_DRV_INFO,
678 			sizeof(struct ida_drive_info), DMA_DATA_IN },
679 	{ CMD_GET_CTRL_INFO,
680 			sizeof(struct ida_controller_info), DMA_DATA_IN },
681 	{ CMD_SENSE_DRV_STATUS,
682 			sizeof(struct ida_drive_status), DMA_DATA_IN },
683 	{ CMD_START_RECOVERY,		0, 0 },
684 	{ CMD_GET_PHYS_DRV_INFO,
685 			sizeof(struct ida_phys_drv_info), DMA_DATA_TRANSFER },
686 	{ CMD_BLINK_DRV_LEDS,
687 			sizeof(struct ida_blink_drv_leds), DMA_DATA_OUT },
688 	{ CMD_SENSE_DRV_LEDS,
689 			sizeof(struct ida_blink_drv_leds), DMA_DATA_IN },
690 	{ CMD_GET_LOG_DRV_EXT,
691 			sizeof(struct ida_drive_info_ext), DMA_DATA_IN },
692 	{ CMD_RESET_CTRL,		0, 0 },
693 	{ CMD_GET_CONFIG,		0, 0 },
694 	{ CMD_SET_CONFIG,		0, 0 },
695 	{ CMD_LABEL_LOG_DRV,
696 			sizeof(struct ida_label_logical), DMA_DATA_OUT },
697 	{ CMD_SET_SURFACE_DELAY,	0, 0 },
698 	{ CMD_SENSE_BUS_PARAMS,		0, 0 },
699 	{ CMD_SENSE_SUBSYS_INFO,	0, 0 },
700 	{ CMD_SENSE_SURFACE_ATS,	0, 0 },
701 	{ CMD_PASSTHROUGH,		0, 0 },
702 	{ CMD_RESET_SCSI_DEV,		0, 0 },
703 	{ CMD_PAUSE_BG_ACT,		0, 0 },
704 	{ CMD_RESUME_BG_ACT,		0, 0 },
705 	{ CMD_START_FIRMWARE,		0, 0 },
706 	{ CMD_SENSE_DRV_ERR_LOG,	0, 0 },
707 	{ CMD_START_CPM,		0, 0 },
708 	{ CMD_SENSE_CP,			0, 0 },
709 	{ CMD_STOP_CPM,			0, 0 },
710 	{ CMD_FLUSH_CACHE,		0, 0 },
711 	{ CMD_ACCEPT_MEDIA_EXCH,	0, 0 },
712 	{ 0, 0, 0 }
713 };
714 
715 static struct cmd_info *
716 ida_cmd_lookup (int command)
717 {
718 	struct cmd_info *ci;
719 
720 	ci = ci_list;
721 	while (ci->cmd) {
722 		if (ci->cmd == command)
723 			return (ci);
724 		ci++;
725 	}
726 	return (NULL);
727 }
728