xref: /freebsd/sys/dev/ida/ida.c (revision 06064893b3c62c648518be78604fac29fc0d9d61)
1 /*-
2  * Copyright (c) 1999,2000 Jonathan Lemon
3  * All rights reserved.
4  *
5  # Derived from the original IDA Compaq RAID driver, which is
6  * Copyright (c) 1996, 1997, 1998, 1999
7  *    Mark Dawson and David James. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Generic driver for Compaq SMART RAID adapters.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/stat.h>
43 
44 #include <sys/bio.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/endian.h>
48 
49 #include <machine/bus_memio.h>
50 #include <machine/bus_pio.h>
51 #include <machine/bus.h>
52 #include <sys/rman.h>
53 
54 #include <geom/geom_disk.h>
55 
56 #include <dev/ida/idareg.h>
57 #include <dev/ida/idavar.h>
58 #include <dev/ida/idaio.h>
59 
60 /* prototypes */
61 static void ida_alloc_qcb(struct ida_softc *ida);
62 static void ida_construct_qcb(struct ida_softc *ida);
63 static void ida_start(struct ida_softc *ida);
64 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb);
65 static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb);
66 
67 static d_ioctl_t ida_ioctl;
68 static struct cdevsw ida_cdevsw = {
69 	.d_version =	D_VERSION,
70 	.d_flags =	D_NEEDGIANT,
71 	.d_ioctl =	ida_ioctl,
72 	.d_name =	"ida",
73 };
74 
75 void
76 ida_free(struct ida_softc *ida)
77 {
78 	int i;
79 
80 	for (i = 0; i < ida->num_qcbs; i++)
81 		bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap);
82 
83 	if (ida->hwqcb_busaddr)
84 		bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap);
85 
86 	if (ida->hwqcbs)
87 		bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs,
88 		    ida->hwqcb_dmamap);
89 
90 	if (ida->buffer_dmat)
91 		bus_dma_tag_destroy(ida->buffer_dmat);
92 
93 	if (ida->hwqcb_dmat)
94 		bus_dma_tag_destroy(ida->hwqcb_dmat);
95 
96 	if (ida->qcbs != NULL)
97 		free(ida->qcbs, M_DEVBUF);
98 
99 	if (ida->ih != NULL)
100                 bus_teardown_intr(ida->dev, ida->irq, ida->ih);
101 
102 	if (ida->irq != NULL)
103 		bus_release_resource(ida->dev, ida->irq_res_type,
104 		    0, ida->irq);
105 
106 	if (ida->parent_dmat != NULL)
107 		bus_dma_tag_destroy(ida->parent_dmat);
108 
109 	if (ida->regs != NULL)
110 		bus_release_resource(ida->dev, ida->regs_res_type,
111 		    ida->regs_res_id, ida->regs);
112 }
113 
114 /*
115  * record bus address from bus_dmamap_load
116  */
117 static void
118 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
119 {
120         bus_addr_t *baddr;
121 
122         baddr = (bus_addr_t *)arg;
123         *baddr = segs->ds_addr;
124 }
125 
126 static __inline struct ida_qcb *
127 ida_get_qcb(struct ida_softc *ida)
128 {
129 	struct ida_qcb *qcb;
130 
131 	if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) {
132 		SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
133 	} else {
134 		ida_alloc_qcb(ida);
135 		if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL)
136 			SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
137 	}
138 	return (qcb);
139 }
140 
141 static __inline bus_addr_t
142 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb)
143 {
144 	return (ida->hwqcb_busaddr +
145 	    ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs));
146 }
147 
148 static __inline struct ida_qcb *
149 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr)
150 {
151 	struct ida_hardware_qcb *hwqcb;
152 
153 	hwqcb = (struct ida_hardware_qcb *)
154 	    ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr));
155 	return (hwqcb->qcb);
156 }
157 
158 /*
159  * XXX
160  * since we allocate all QCB space up front during initialization, then
161  * why bother with this routine?
162  */
163 static void
164 ida_alloc_qcb(struct ida_softc *ida)
165 {
166 	struct ida_qcb *qcb;
167 	int error;
168 
169 	if (ida->num_qcbs >= IDA_QCB_MAX)
170 		return;
171 
172 	qcb = &ida->qcbs[ida->num_qcbs];
173 
174 	error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap);
175 	if (error != 0)
176 		return;
177 
178 	qcb->flags = QCB_FREE;
179 	qcb->hwqcb = &ida->hwqcbs[ida->num_qcbs];
180 	qcb->hwqcb->qcb = qcb;
181 	qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb);
182 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
183 	ida->num_qcbs++;
184 }
185 
186 int
187 ida_init(struct ida_softc *ida)
188 {
189 	int error;
190 
191 	ida->unit = device_get_unit(ida->dev);
192 	ida->tag = rman_get_bustag(ida->regs);
193 	ida->bsh = rman_get_bushandle(ida->regs);
194 
195 	SLIST_INIT(&ida->free_qcbs);
196 	STAILQ_INIT(&ida->qcb_queue);
197         bioq_init(&ida->bio_queue);
198 
199 	ida->qcbs = (struct ida_qcb *)
200 	    malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF,
201 		M_NOWAIT | M_ZERO);
202 	if (ida->qcbs == NULL)
203 		return (ENOMEM);
204 
205 	/*
206 	 * Create our DMA tags
207 	 */
208 
209 	/* DMA tag for our hardware QCB structures */
210 	error = bus_dma_tag_create(ida->parent_dmat,
211 	    /*alignment*/1, /*boundary*/0,
212 	    /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR,
213 	    /*filter*/NULL, /*filterarg*/NULL,
214 	    IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
215 	    /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
216 	    /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant,
217 	    &ida->hwqcb_dmat);
218 	if (error)
219                 return (ENOMEM);
220 
221 	/* DMA tag for mapping buffers into device space */
222 	error = bus_dma_tag_create(ida->parent_dmat,
223 	    /*alignment*/1, /*boundary*/0,
224 	    /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR,
225 	    /*filter*/NULL, /*filterarg*/NULL,
226 	    /*maxsize*/MAXBSIZE, /*nsegments*/IDA_NSEG,
227 	    /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0,
228 	    /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &ida->buffer_dmat);
229 	if (error)
230                 return (ENOMEM);
231 
232         /* Allocation of hardware QCBs */
233 	/* XXX allocation is rounded to hardware page size */
234 	error = bus_dmamem_alloc(ida->hwqcb_dmat,
235 	    (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap);
236 	if (error)
237                 return (ENOMEM);
238 
239         /* And permanently map them in */
240         bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap,
241 	    ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
242 	    ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0);
243 
244 	bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb));
245 
246 	ida_alloc_qcb(ida);		/* allocate an initial qcb */
247 
248 	return (0);
249 }
250 
251 void
252 ida_attach(struct ida_softc *ida)
253 {
254 	struct ida_controller_info cinfo;
255 	int error, i;
256 
257 	ida->cmd.int_enable(ida, 0);
258 
259 	error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo),
260 	    IDA_CONTROLLER, 0, DMA_DATA_IN);
261 	if (error) {
262 		device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n");
263 		return;
264 	}
265 
266 	device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n",
267 	    cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1],
268 	    cinfo.firm_rev[2], cinfo.firm_rev[3]);
269 
270 	if (ida->flags & IDA_FIRMWARE) {
271 		int data;
272 
273 		error = ida_command(ida, CMD_START_FIRMWARE,
274 		    &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN);
275 		if (error) {
276 			device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n");
277 			return;
278 		}
279 	}
280 
281 	ida->ida_dev_t = make_dev(&ida_cdevsw, ida->unit,
282 				 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
283 				 "ida%d", ida->unit);
284 	ida->ida_dev_t->si_drv1 = ida;
285 
286 	ida->num_drives = 0;
287 	for (i = 0; i < cinfo.num_drvs; i++)
288 		device_add_child(ida->dev, /*"idad"*/NULL, -1);
289 
290 	bus_generic_attach(ida->dev);
291 
292 	ida->cmd.int_enable(ida, 1);
293 }
294 
295 int
296 ida_detach(device_t dev)
297 {
298 	struct ida_softc *ida;
299 	int error = 0;
300 
301         ida = (struct ida_softc *)device_get_softc(dev);
302 
303 	/*
304 	 * XXX
305 	 * before detaching, we must make sure that the system is
306 	 * quiescent; nothing mounted, no pending activity.
307 	 */
308 
309 	/*
310 	 * XXX
311 	 * now, how are we supposed to maintain a list of our drives?
312 	 * iterate over our "child devices"?
313 	 */
314 
315 	destroy_dev(ida->ida_dev_t);
316 	ida_free(ida);
317 	return (error);
318 }
319 
320 static void
321 ida_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
322 {
323 	struct ida_hardware_qcb *hwqcb = (struct ida_hardware_qcb *)arg;
324 	int i;
325 
326 	hwqcb->hdr.size = htole16((sizeof(struct ida_req) +
327 	    sizeof(struct ida_sgb) * IDA_NSEG) >> 2);
328 
329 	for (i = 0; i < nsegments; i++) {
330 		hwqcb->seg[i].addr = htole32(segs[i].ds_addr);
331 		hwqcb->seg[i].length = htole32(segs[i].ds_len);
332 	}
333 	hwqcb->req.sgcount = nsegments;
334 }
335 
336 int
337 ida_command(struct ida_softc *ida, int command, void *data, int datasize,
338 	int drive, u_int32_t pblkno, int flags)
339 {
340 	struct ida_hardware_qcb *hwqcb;
341 	struct ida_qcb *qcb;
342 	bus_dmasync_op_t op;
343 	int s, error;
344 
345 	s = splbio();
346 	qcb = ida_get_qcb(ida);
347 	splx(s);
348 
349 	if (qcb == NULL) {
350 		printf("ida_command: out of QCBs");
351 		return (EAGAIN);
352 	}
353 
354 	hwqcb = qcb->hwqcb;
355 	bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
356 
357 	bus_dmamap_load(ida->buffer_dmat, qcb->dmamap,
358 	    (void *)data, datasize, ida_setup_dmamap, hwqcb, 0);
359 	op = qcb->flags & DMA_DATA_IN ?
360 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
361 	bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
362 
363 	hwqcb->hdr.drive = drive;
364 	hwqcb->req.blkno = htole32(pblkno);
365 	hwqcb->req.bcount = htole16(howmany(datasize, DEV_BSIZE));
366 	hwqcb->req.command = command;
367 
368 	qcb->flags = flags | IDA_COMMAND;
369 
370 	s = splbio();
371 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
372 	ida_start(ida);
373 	error = ida_wait(ida, qcb);
374 	splx(s);
375 
376 	/* XXX should have status returned here? */
377 	/* XXX have "status pointer" area in QCB? */
378 
379 	return (error);
380 }
381 
382 void
383 ida_submit_buf(struct ida_softc *ida, struct bio *bp)
384 {
385         bioq_insert_tail(&ida->bio_queue, bp);
386         ida_construct_qcb(ida);
387 	ida_start(ida);
388 }
389 
390 static void
391 ida_construct_qcb(struct ida_softc *ida)
392 {
393 	struct ida_hardware_qcb *hwqcb;
394 	struct ida_qcb *qcb;
395 	bus_dmasync_op_t op;
396 	struct bio *bp;
397 
398 	bp = bioq_first(&ida->bio_queue);
399 	if (bp == NULL)
400 		return;				/* no more buffers */
401 
402 	qcb = ida_get_qcb(ida);
403 	if (qcb == NULL)
404 		return;				/* out of resources */
405 
406 	bioq_remove(&ida->bio_queue, bp);
407 	qcb->buf = bp;
408 	qcb->flags = bp->bio_cmd == BIO_READ ? DMA_DATA_IN : DMA_DATA_OUT;
409 
410 	hwqcb = qcb->hwqcb;
411 	bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
412 
413 	bus_dmamap_load(ida->buffer_dmat, qcb->dmamap,
414 	    (void *)bp->bio_data, bp->bio_bcount, ida_setup_dmamap, hwqcb, 0);
415 	op = qcb->flags & DMA_DATA_IN ?
416 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
417 	bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
418 
419 	{
420 		struct idad_softc *drv = (struct idad_softc *)bp->bio_driver1;
421 		hwqcb->hdr.drive = drv->drive;
422 	}
423 
424 	hwqcb->req.blkno = bp->bio_pblkno;
425 	hwqcb->req.bcount = howmany(bp->bio_bcount, DEV_BSIZE);
426 	hwqcb->req.command = bp->bio_cmd == BIO_READ ? CMD_READ : CMD_WRITE;
427 
428 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
429 }
430 
431 /*
432  * This routine will be called from ida_intr in order to queue up more
433  * I/O, meaning that we may be in an interrupt context.  Hence, we should
434  * not muck around with spl() in this routine.
435  */
436 static void
437 ida_start(struct ida_softc *ida)
438 {
439 	struct ida_qcb *qcb;
440 
441 	while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) {
442 		if (ida->cmd.fifo_full(ida))
443 			break;
444 		STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe);
445 		/*
446 		 * XXX
447 		 * place the qcb on an active list and set a timeout?
448 		 */
449 		qcb->state = QCB_ACTIVE;
450 		ida->cmd.submit(ida, qcb);
451 	}
452 }
453 
454 static int
455 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb)
456 {
457 	struct ida_qcb *qcb_done = NULL;
458 	bus_addr_t completed;
459 	int delay;
460 
461 	if (ida->flags & IDA_INTERRUPTS) {
462 		if (tsleep(qcb, PRIBIO, "idacmd", 5 * hz))
463 			return (ETIMEDOUT);
464 		return (0);
465 	}
466 
467 again:
468 	delay = 5 * 1000 * 100;			/* 5 sec delay */
469 	while ((completed = ida->cmd.done(ida)) == 0) {
470 		if (delay-- == 0)
471 			return (ETIMEDOUT);
472 		DELAY(10);
473 	}
474 
475 	qcb_done = idahwqcbptov(ida, completed & ~3);
476 	if (qcb_done != qcb)
477 		goto again;
478 	ida_done(ida, qcb);
479 	return (0);
480 }
481 
482 void
483 ida_intr(void *data)
484 {
485 	struct ida_softc *ida;
486 	struct ida_qcb *qcb;
487 	bus_addr_t completed;
488 
489 	ida = (struct ida_softc *)data;
490 
491 	if (ida->cmd.int_pending(ida) == 0)
492 		return;				/* not our interrupt */
493 
494 	while ((completed = ida->cmd.done(ida)) != 0) {
495 		qcb = idahwqcbptov(ida, completed & ~3);
496 
497 		if (qcb == NULL || qcb->state != QCB_ACTIVE) {
498 			device_printf(ida->dev,
499 			    "ignoring completion %jx\n", (intmax_t)completed);
500 			continue;
501 		}
502 		/* Handle "Bad Command List" errors. */
503 		if ((completed & 3) && (qcb->hwqcb->req.error == 0))
504 			qcb->hwqcb->req.error = CMD_REJECTED;
505 		ida_done(ida, qcb);
506 	}
507 	ida_start(ida);
508 }
509 
510 /*
511  * should switch out command type; may be status, not just I/O.
512  */
513 static void
514 ida_done(struct ida_softc *ida, struct ida_qcb *qcb)
515 {
516 	int error = 0;
517 
518 	/*
519 	 * finish up command
520 	 */
521 	if (qcb->flags & DMA_DATA_TRANSFER) {
522 		bus_dmasync_op_t op;
523 
524 		op = qcb->flags & DMA_DATA_IN ?
525 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE;
526 		bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
527 		bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap);
528 	}
529 
530 	if (qcb->hwqcb->req.error & SOFT_ERROR) {
531 		if (qcb->buf)
532 			device_printf(ida->dev, "soft %s error\n",
533 				qcb->buf->bio_cmd == BIO_READ ?
534 					"read" : "write");
535 		else
536 			device_printf(ida->dev, "soft error\n");
537 	}
538 	if (qcb->hwqcb->req.error & HARD_ERROR) {
539 		error = 1;
540 		if (qcb->buf)
541 			device_printf(ida->dev, "hard %s error\n",
542 				qcb->buf->bio_cmd == BIO_READ ?
543 					"read" : "write");
544 		else
545 			device_printf(ida->dev, "hard error\n");
546 	}
547 	if (qcb->hwqcb->req.error & CMD_REJECTED) {
548 		error = 1;
549 		device_printf(ida->dev, "invalid request\n");
550 	}
551 
552 	if (qcb->flags & IDA_COMMAND) {
553 		if (ida->flags & IDA_INTERRUPTS)
554 			wakeup(qcb);
555 	} else {
556 		if (error)
557 			qcb->buf->bio_flags |= BIO_ERROR;
558 		idad_intr(qcb->buf);
559 	}
560 
561 	qcb->state = QCB_FREE;
562 	qcb->buf = NULL;
563 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
564 	ida_construct_qcb(ida);
565 }
566 
567 /*
568  * IOCTL stuff follows.
569  */
570 struct cmd_info {
571 	int	cmd;
572 	int	len;
573 	int	flags;
574 };
575 static struct cmd_info *ida_cmd_lookup(int);
576 
577 static int
578 ida_ioctl (struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td)
579 {
580 	struct ida_softc *sc;
581 	struct ida_user_command *uc;
582 	struct cmd_info *ci;
583 	int len;
584 	int flags;
585 	int error;
586 	int data;
587 	void *daddr;
588 
589 	sc = (struct ida_softc *)dev->si_drv1;
590 	uc = (struct ida_user_command *)addr;
591 	error = 0;
592 
593 	switch (cmd) {
594 	case IDAIO_COMMAND:
595 		ci = ida_cmd_lookup(uc->command);
596 		if (ci == NULL) {
597 			error = EINVAL;
598 			break;
599 		}
600 		len = ci->len;
601 		flags = ci->flags;
602 		if (len)
603 			daddr = &uc->d.buf;
604 		else {
605 			daddr = &data;
606 			len = sizeof(data);
607 		}
608 		error = ida_command(sc, uc->command, daddr, len,
609 				    uc->drive, uc->blkno, flags);
610 		break;
611 	default:
612 		error = ENOIOCTL;
613 		break;
614 	}
615 	return (error);
616 }
617 
618 static struct cmd_info ci_list[] = {
619 	{ CMD_GET_LOG_DRV_INFO,
620 			sizeof(struct ida_drive_info), DMA_DATA_IN },
621 	{ CMD_GET_CTRL_INFO,
622 			sizeof(struct ida_controller_info), DMA_DATA_IN },
623 	{ CMD_SENSE_DRV_STATUS,
624 			sizeof(struct ida_drive_status), DMA_DATA_IN },
625 	{ CMD_START_RECOVERY,		0, 0 },
626 	{ CMD_GET_PHYS_DRV_INFO,
627 			sizeof(struct ida_phys_drv_info), DMA_DATA_TRANSFER },
628 	{ CMD_BLINK_DRV_LEDS,
629 			sizeof(struct ida_blink_drv_leds), DMA_DATA_OUT },
630 	{ CMD_SENSE_DRV_LEDS,
631 			sizeof(struct ida_blink_drv_leds), DMA_DATA_IN },
632 	{ CMD_GET_LOG_DRV_EXT,
633 			sizeof(struct ida_drive_info_ext), DMA_DATA_IN },
634 	{ CMD_RESET_CTRL,		0, 0 },
635 	{ CMD_GET_CONFIG,		0, 0 },
636 	{ CMD_SET_CONFIG,		0, 0 },
637 	{ CMD_LABEL_LOG_DRV,
638 			sizeof(struct ida_label_logical), DMA_DATA_OUT },
639 	{ CMD_SET_SURFACE_DELAY,	0, 0 },
640 	{ CMD_SENSE_BUS_PARAMS,		0, 0 },
641 	{ CMD_SENSE_SUBSYS_INFO,	0, 0 },
642 	{ CMD_SENSE_SURFACE_ATS,	0, 0 },
643 	{ CMD_PASSTHROUGH,		0, 0 },
644 	{ CMD_RESET_SCSI_DEV,		0, 0 },
645 	{ CMD_PAUSE_BG_ACT,		0, 0 },
646 	{ CMD_RESUME_BG_ACT,		0, 0 },
647 	{ CMD_START_FIRMWARE,		0, 0 },
648 	{ CMD_SENSE_DRV_ERR_LOG,	0, 0 },
649 	{ CMD_START_CPM,		0, 0 },
650 	{ CMD_SENSE_CP,			0, 0 },
651 	{ CMD_STOP_CPM,			0, 0 },
652 	{ CMD_FLUSH_CACHE,		0, 0 },
653 	{ CMD_ACCEPT_MEDIA_EXCH,	0, 0 },
654 	{ 0, 0, 0 }
655 };
656 
657 static struct cmd_info *
658 ida_cmd_lookup (int command)
659 {
660 	struct cmd_info *ci;
661 
662 	ci = ci_list;
663 	while (ci->cmd) {
664 		if (ci->cmd == command)
665 			return (ci);
666 		ci++;
667 	}
668 	return (NULL);
669 }
670