xref: /freebsd/sys/dev/ida/ida.c (revision 5d3e7166f6a0187fa3f8831b16a06bd9955c21ff)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1999,2000 Jonathan Lemon
5  * All rights reserved.
6  *
7  # Derived from the original IDA Compaq RAID driver, which is
8  * Copyright (c) 1996, 1997, 1998, 1999
9  *    Mark Dawson and David James. All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Generic driver for Compaq SMART RAID adapters.
38  */
39 
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/stat.h>
47 
48 #include <sys/bio.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 
53 #include <machine/bus.h>
54 #include <sys/rman.h>
55 
56 #include <geom/geom_disk.h>
57 
58 #include <dev/ida/idareg.h>
59 #include <dev/ida/idavar.h>
60 #include <dev/ida/idaio.h>
61 
62 /* prototypes */
63 static int ida_alloc_qcbs(struct ida_softc *ida);
64 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb);
65 static void ida_start(struct ida_softc *ida);
66 static void ida_startio(struct ida_softc *ida);
67 static void ida_startup(void *arg);
68 static void ida_timeout(void *arg);
69 static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb);
70 
71 static d_ioctl_t ida_ioctl;
72 static struct cdevsw ida_cdevsw = {
73 	.d_version =	D_VERSION,
74 	.d_ioctl =	ida_ioctl,
75 	.d_name =	"ida",
76 };
77 
78 void
79 ida_free(struct ida_softc *ida)
80 {
81 	int i;
82 
83 	if (ida->ih != NULL)
84 		bus_teardown_intr(ida->dev, ida->irq, ida->ih);
85 
86 	mtx_lock(&ida->lock);
87 	callout_stop(&ida->ch);
88 	mtx_unlock(&ida->lock);
89 	callout_drain(&ida->ch);
90 
91 	if (ida->buffer_dmat) {
92 		for (i = 0; i < IDA_QCB_MAX; i++)
93 			bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap);
94 		bus_dma_tag_destroy(ida->buffer_dmat);
95 	}
96 
97 	if (ida->hwqcb_dmat) {
98 		if (ida->hwqcb_busaddr)
99 			bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap);
100 		if (ida->hwqcbs)
101 			bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs,
102 			    ida->hwqcb_dmamap);
103 		bus_dma_tag_destroy(ida->hwqcb_dmat);
104 	}
105 
106 	if (ida->qcbs != NULL)
107 		free(ida->qcbs, M_DEVBUF);
108 
109 	if (ida->irq != NULL)
110 		bus_release_resource(ida->dev, ida->irq_res_type,
111 		    0, ida->irq);
112 
113 	if (ida->parent_dmat != NULL)
114 		bus_dma_tag_destroy(ida->parent_dmat);
115 
116 	if (ida->regs != NULL)
117 		bus_release_resource(ida->dev, ida->regs_res_type,
118 		    ida->regs_res_id, ida->regs);
119 
120 	mtx_destroy(&ida->lock);
121 }
122 
123 /*
124  * record bus address from bus_dmamap_load
125  */
126 static void
127 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
128 {
129 	bus_addr_t *baddr;
130 
131 	baddr = (bus_addr_t *)arg;
132 	*baddr = segs->ds_addr;
133 }
134 
135 static __inline struct ida_qcb *
136 ida_get_qcb(struct ida_softc *ida)
137 {
138 	struct ida_qcb *qcb;
139 
140 	if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) {
141 		SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle);
142 		bzero(qcb->hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req));
143 	}
144 	return (qcb);
145 }
146 
147 static __inline void
148 ida_free_qcb(struct ida_softc *ida, struct ida_qcb *qcb)
149 {
150 
151 	qcb->state = QCB_FREE;
152 	qcb->buf = NULL;
153 	qcb->error = 0;
154 	SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
155 }
156 
157 static __inline bus_addr_t
158 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb)
159 {
160 	return (ida->hwqcb_busaddr +
161 	    ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs));
162 }
163 
164 static __inline struct ida_qcb *
165 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr)
166 {
167 	struct ida_hardware_qcb *hwqcb;
168 
169 	hwqcb = (struct ida_hardware_qcb *)
170 	    ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr));
171 	return (hwqcb->qcb);
172 }
173 
174 static int
175 ida_alloc_qcbs(struct ida_softc *ida)
176 {
177 	struct ida_qcb *qcb;
178 	int error, i;
179 
180 	for (i = 0; i < IDA_QCB_MAX; i++) {
181 		qcb = &ida->qcbs[i];
182 
183 		error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap);
184 		if (error != 0)
185 			return (error);
186 
187 		qcb->ida = ida;
188 		qcb->flags = QCB_FREE;
189 		qcb->hwqcb = &ida->hwqcbs[i];
190 		qcb->hwqcb->qcb = qcb;
191 		qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb);
192 		SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle);
193 	}
194 	return (0);
195 }
196 
197 int
198 ida_setup(struct ida_softc *ida)
199 {
200 	struct ida_controller_info cinfo;
201 	device_t child;
202 	int error, i, unit;
203 
204 	SLIST_INIT(&ida->free_qcbs);
205 	STAILQ_INIT(&ida->qcb_queue);
206 	bioq_init(&ida->bio_queue);
207 
208 	ida->qcbs = (struct ida_qcb *)
209 	    malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF,
210 		M_NOWAIT | M_ZERO);
211 	if (ida->qcbs == NULL)
212 		return (ENOMEM);
213 
214 	/*
215 	 * Create our DMA tags
216 	 */
217 
218 	/* DMA tag for our hardware QCB structures */
219 	error = bus_dma_tag_create(
220 		/* parent	*/ ida->parent_dmat,
221 		/* alignment	*/ 1,
222 		/* boundary	*/ 0,
223 		/* lowaddr	*/ BUS_SPACE_MAXADDR,
224 		/* highaddr	*/ BUS_SPACE_MAXADDR,
225 		/* filter	*/ NULL,
226 		/* filterarg	*/ NULL,
227 		/* maxsize	*/ IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
228 		/* nsegments	*/ 1,
229 		/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
230 		/* flags	*/ 0,
231 		/* lockfunc	*/ NULL,
232 		/* lockarg	*/ NULL,
233 		&ida->hwqcb_dmat);
234 	if (error)
235 		return (ENOMEM);
236 
237 	/* DMA tag for mapping buffers into device space */
238 	error = bus_dma_tag_create(
239 		/* parent 	*/ ida->parent_dmat,
240 		/* alignment	*/ 1,
241 		/* boundary	*/ 0,
242 		/* lowaddr	*/ BUS_SPACE_MAXADDR,
243 		/* highaddr	*/ BUS_SPACE_MAXADDR,
244 		/* filter	*/ NULL,
245 		/* filterarg	*/ NULL,
246 		/* maxsize	*/ DFLTPHYS,
247 		/* nsegments	*/ IDA_NSEG,
248 		/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
249 		/* flags	*/ 0,
250 		/* lockfunc	*/ busdma_lock_mutex,
251 		/* lockarg	*/ &ida->lock,
252 		&ida->buffer_dmat);
253 	if (error)
254 		return (ENOMEM);
255 
256 	/* Allocation of hardware QCBs */
257 	/* XXX allocation is rounded to hardware page size */
258 	error = bus_dmamem_alloc(ida->hwqcb_dmat,
259 	    (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap);
260 	if (error)
261 		return (ENOMEM);
262 
263 	/* And permanently map them in */
264 	bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap,
265 	    ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
266 	    ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0);
267 
268 	bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb));
269 
270 	error = ida_alloc_qcbs(ida);
271 	if (error)
272 		return (error);
273 
274 	mtx_lock(&ida->lock);
275 	ida->cmd.int_enable(ida, 0);
276 
277 	error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo),
278 	    IDA_CONTROLLER, 0, DMA_DATA_IN);
279 	if (error) {
280 		mtx_unlock(&ida->lock);
281 		device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n");
282 		return (error);
283 	}
284 
285 	device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n",
286 	    cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1],
287 	    cinfo.firm_rev[2], cinfo.firm_rev[3]);
288 
289 	if (ida->flags & IDA_FIRMWARE) {
290 		int data;
291 
292 		error = ida_command(ida, CMD_START_FIRMWARE,
293 		    &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN);
294 		if (error) {
295 			mtx_unlock(&ida->lock);
296 			device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n");
297 			return (error);
298 		}
299 	}
300 
301 	ida->cmd.int_enable(ida, 1);
302 	ida->flags |= IDA_ATTACHED;
303 	mtx_unlock(&ida->lock);
304 
305 	for (i = 0; i < cinfo.num_drvs; i++) {
306 		child = device_add_child(ida->dev, /*"idad"*/NULL, -1);
307 		if (child != NULL)
308 			device_set_ivars(child, (void *)(intptr_t)i);
309 	}
310 
311 	ida->ich.ich_func = ida_startup;
312 	ida->ich.ich_arg = ida;
313 	if (config_intrhook_establish(&ida->ich) != 0) {
314 		device_delete_children(ida->dev);
315 		device_printf(ida->dev, "Cannot establish configuration hook\n");
316 		return (error);
317 	}
318 
319 	unit = device_get_unit(ida->dev);
320 	ida->ida_dev_t = make_dev(&ida_cdevsw, unit,
321 				 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
322 				 "ida%d", unit);
323 	ida->ida_dev_t->si_drv1 = ida;
324 
325 	return (0);
326 }
327 
328 static void
329 ida_startup(void *arg)
330 {
331 	struct ida_softc *ida;
332 
333 	ida = arg;
334 
335 	config_intrhook_disestablish(&ida->ich);
336 
337 	bus_topo_lock();
338 	bus_generic_attach(ida->dev);
339 	bus_topo_unlock();
340 }
341 
342 int
343 ida_detach(device_t dev)
344 {
345 	struct ida_softc *ida;
346 	int error;
347 
348 	ida = (struct ida_softc *)device_get_softc(dev);
349 
350 	error = bus_generic_detach(dev);
351 	if (error)
352 		return (error);
353 	error = device_delete_children(dev);
354 	if (error)
355 		return (error);
356 
357 	/*
358 	 * XXX
359 	 * before detaching, we must make sure that the system is
360 	 * quiescent; nothing mounted, no pending activity.
361 	 */
362 
363 	/*
364 	 * XXX
365 	 * now, how are we supposed to maintain a list of our drives?
366 	 * iterate over our "child devices"?
367 	 */
368 
369 	destroy_dev(ida->ida_dev_t);
370 	ida_free(ida);
371 	return (error);
372 }
373 
374 static void
375 ida_data_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
376 {
377 	struct ida_hardware_qcb *hwqcb;
378 	struct ida_softc *ida;
379 	struct ida_qcb *qcb;
380 	bus_dmasync_op_t op;
381 	int i;
382 
383 	qcb = arg;
384 	ida = qcb->ida;
385 	if (!dumping)
386 		mtx_assert(&ida->lock, MA_OWNED);
387 	if (error) {
388 		qcb->error = error;
389 		ida_done(ida, qcb);
390 		return;
391 	}
392 
393 	hwqcb = qcb->hwqcb;
394 	hwqcb->hdr.size = htole16((sizeof(struct ida_req) +
395 	    sizeof(struct ida_sgb) * IDA_NSEG) >> 2);
396 
397 	for (i = 0; i < nsegments; i++) {
398 		hwqcb->seg[i].addr = htole32(segs[i].ds_addr);
399 		hwqcb->seg[i].length = htole32(segs[i].ds_len);
400 	}
401 	hwqcb->req.sgcount = nsegments;
402 	if (qcb->flags & DMA_DATA_TRANSFER) {
403 		switch (qcb->flags & DMA_DATA_TRANSFER) {
404 		case DMA_DATA_TRANSFER:
405 			op = BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE;
406 			break;
407 		case DMA_DATA_IN:
408 			op = BUS_DMASYNC_PREREAD;
409 			break;
410 		default:
411 			KASSERT((qcb->flags & DMA_DATA_TRANSFER) ==
412 			    DMA_DATA_OUT, ("bad DMA data flags"));
413 			op = BUS_DMASYNC_PREWRITE;
414 			break;
415 		}
416 		bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
417 	}
418 	bus_dmamap_sync(ida->hwqcb_dmat, ida->hwqcb_dmamap,
419 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
420 
421 	STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe);
422 	ida_start(ida);
423 	ida->flags &= ~IDA_QFROZEN;
424 }
425 
426 static int
427 ida_map_qcb(struct ida_softc *ida, struct ida_qcb *qcb, void *data,
428     bus_size_t datasize)
429 {
430 	int error, flags;
431 
432 	if (ida->flags & IDA_INTERRUPTS)
433 		flags = BUS_DMA_WAITOK;
434 	else
435 		flags = BUS_DMA_NOWAIT;
436 	error = bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, data, datasize,
437 	    ida_data_cb, qcb, flags);
438 	if (error == EINPROGRESS) {
439 		ida->flags |= IDA_QFROZEN;
440 		error = 0;
441 	}
442 	return (error);
443 }
444 
445 int
446 ida_command(struct ida_softc *ida, int command, void *data, int datasize,
447 	int drive, u_int32_t pblkno, int flags)
448 {
449 	struct ida_hardware_qcb *hwqcb;
450 	struct ida_qcb *qcb;
451 	int error;
452 
453 	if (!dumping)
454 		mtx_assert(&ida->lock, MA_OWNED);
455 	qcb = ida_get_qcb(ida);
456 
457 	if (qcb == NULL) {
458 		device_printf(ida->dev, "out of QCBs\n");
459 		return (EAGAIN);
460 	}
461 
462 	qcb->flags = flags | IDA_COMMAND;
463 	hwqcb = qcb->hwqcb;
464 	hwqcb->hdr.drive = drive;
465 	hwqcb->req.blkno = htole32(pblkno);
466 	hwqcb->req.bcount = htole16(howmany(datasize, DEV_BSIZE));
467 	hwqcb->req.command = command;
468 
469 	error = ida_map_qcb(ida, qcb, data, datasize);
470 	if (error == 0) {
471 		error = ida_wait(ida, qcb);
472 		/* Don't free QCB on a timeout in case it later completes. */
473 		if (error)
474 			return (error);
475 		error = qcb->error;
476 	}
477 
478 	/* XXX should have status returned here? */
479 	/* XXX have "status pointer" area in QCB? */
480 
481 	ida_free_qcb(ida, qcb);
482 	return (error);
483 }
484 
485 void
486 ida_submit_buf(struct ida_softc *ida, struct bio *bp)
487 {
488 	mtx_lock(&ida->lock);
489 	bioq_insert_tail(&ida->bio_queue, bp);
490 	ida_startio(ida);
491 	mtx_unlock(&ida->lock);
492 }
493 
494 static void
495 ida_startio(struct ida_softc *ida)
496 {
497 	struct ida_hardware_qcb *hwqcb;
498 	struct ida_qcb *qcb;
499 	struct idad_softc *drv;
500 	struct bio *bp;
501 	int error;
502 
503 	mtx_assert(&ida->lock, MA_OWNED);
504 	for (;;) {
505 		if (ida->flags & IDA_QFROZEN)
506 			return;
507 		bp = bioq_first(&ida->bio_queue);
508 		if (bp == NULL)
509 			return;				/* no more buffers */
510 
511 		qcb = ida_get_qcb(ida);
512 		if (qcb == NULL)
513 			return;				/* out of resources */
514 
515 		bioq_remove(&ida->bio_queue, bp);
516 		qcb->buf = bp;
517 		qcb->flags = bp->bio_cmd == BIO_READ ? DMA_DATA_IN : DMA_DATA_OUT;
518 
519 		hwqcb = qcb->hwqcb;
520 		drv = bp->bio_driver1;
521 		hwqcb->hdr.drive = drv->drive;
522 		hwqcb->req.blkno = bp->bio_pblkno;
523 		hwqcb->req.bcount = howmany(bp->bio_bcount, DEV_BSIZE);
524 		hwqcb->req.command = bp->bio_cmd == BIO_READ ? CMD_READ : CMD_WRITE;
525 
526 		error = ida_map_qcb(ida, qcb, bp->bio_data, bp->bio_bcount);
527 		if (error) {
528 			qcb->error = error;
529 			ida_done(ida, qcb);
530 		}
531 	}
532 }
533 
534 static void
535 ida_start(struct ida_softc *ida)
536 {
537 	struct ida_qcb *qcb;
538 
539 	if (!dumping)
540 		mtx_assert(&ida->lock, MA_OWNED);
541 	while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) {
542 		if (ida->cmd.fifo_full(ida))
543 			break;
544 		STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe);
545 		/*
546 		 * XXX
547 		 * place the qcb on an active list?
548 		 */
549 
550 		/* Set a timeout. */
551 		if (!ida->qactive && !dumping)
552 			callout_reset(&ida->ch, hz * 5, ida_timeout, ida);
553 		ida->qactive++;
554 
555 		qcb->state = QCB_ACTIVE;
556 		ida->cmd.submit(ida, qcb);
557 	}
558 }
559 
560 static int
561 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb)
562 {
563 	struct ida_qcb *qcb_done = NULL;
564 	bus_addr_t completed;
565 	int delay;
566 
567 	if (!dumping)
568 		mtx_assert(&ida->lock, MA_OWNED);
569 	if (ida->flags & IDA_INTERRUPTS) {
570 		if (mtx_sleep(qcb, &ida->lock, PRIBIO, "idacmd", 5 * hz)) {
571 			qcb->state = QCB_TIMEDOUT;
572 			return (ETIMEDOUT);
573 		}
574 		return (0);
575 	}
576 
577 again:
578 	delay = 5 * 1000 * 100;			/* 5 sec delay */
579 	while ((completed = ida->cmd.done(ida)) == 0) {
580 		if (delay-- == 0) {
581 			qcb->state = QCB_TIMEDOUT;
582 			return (ETIMEDOUT);
583 		}
584 		DELAY(10);
585 	}
586 
587 	qcb_done = idahwqcbptov(ida, completed & ~3);
588 	if (qcb_done != qcb)
589 		goto again;
590 	ida_done(ida, qcb);
591 	return (0);
592 }
593 
594 void
595 ida_intr(void *data)
596 {
597 	struct ida_softc *ida;
598 	struct ida_qcb *qcb;
599 	bus_addr_t completed;
600 
601 	ida = (struct ida_softc *)data;
602 
603 	mtx_lock(&ida->lock);
604 	if (ida->cmd.int_pending(ida) == 0) {
605 		mtx_unlock(&ida->lock);
606 		return;				/* not our interrupt */
607 	}
608 
609 	while ((completed = ida->cmd.done(ida)) != 0) {
610 		qcb = idahwqcbptov(ida, completed & ~3);
611 
612 		if (qcb == NULL || qcb->state != QCB_ACTIVE) {
613 			device_printf(ida->dev,
614 			    "ignoring completion %jx\n", (intmax_t)completed);
615 			continue;
616 		}
617 		/* Handle "Bad Command List" errors. */
618 		if ((completed & 3) && (qcb->hwqcb->req.error == 0))
619 			qcb->hwqcb->req.error = CMD_REJECTED;
620 		ida_done(ida, qcb);
621 	}
622 	ida_startio(ida);
623 	mtx_unlock(&ida->lock);
624 }
625 
626 /*
627  * should switch out command type; may be status, not just I/O.
628  */
629 static void
630 ida_done(struct ida_softc *ida, struct ida_qcb *qcb)
631 {
632 	bus_dmasync_op_t op;
633 	int active, error = 0;
634 
635 	/*
636 	 * finish up command
637 	 */
638 	if (!dumping)
639 		mtx_assert(&ida->lock, MA_OWNED);
640 	active = (qcb->state != QCB_FREE);
641 	if (qcb->flags & DMA_DATA_TRANSFER && active) {
642 		switch (qcb->flags & DMA_DATA_TRANSFER) {
643 		case DMA_DATA_TRANSFER:
644 			op = BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE;
645 			break;
646 		case DMA_DATA_IN:
647 			op = BUS_DMASYNC_POSTREAD;
648 			break;
649 		default:
650 			KASSERT((qcb->flags & DMA_DATA_TRANSFER) ==
651 			    DMA_DATA_OUT, ("bad DMA data flags"));
652 			op = BUS_DMASYNC_POSTWRITE;
653 			break;
654 		}
655 		bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op);
656 		bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap);
657 	}
658 	if (active)
659 		bus_dmamap_sync(ida->hwqcb_dmat, ida->hwqcb_dmamap,
660 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
661 
662 	if (qcb->hwqcb->req.error & SOFT_ERROR) {
663 		if (qcb->buf)
664 			device_printf(ida->dev, "soft %s error\n",
665 				qcb->buf->bio_cmd == BIO_READ ?
666 					"read" : "write");
667 		else
668 			device_printf(ida->dev, "soft error\n");
669 	}
670 	if (qcb->hwqcb->req.error & HARD_ERROR) {
671 		error = 1;
672 		if (qcb->buf)
673 			device_printf(ida->dev, "hard %s error\n",
674 				qcb->buf->bio_cmd == BIO_READ ?
675 					"read" : "write");
676 		else
677 			device_printf(ida->dev, "hard error\n");
678 	}
679 	if (qcb->hwqcb->req.error & CMD_REJECTED) {
680 		error = 1;
681 		device_printf(ida->dev, "invalid request\n");
682 	}
683 	if (qcb->error) {
684 		error = 1;
685 		device_printf(ida->dev, "request failed to map: %d\n", qcb->error);
686 	}
687 
688 	if (qcb->flags & IDA_COMMAND) {
689 		if (ida->flags & IDA_INTERRUPTS)
690 			wakeup(qcb);
691 		if (qcb->state == QCB_TIMEDOUT)
692 			ida_free_qcb(ida, qcb);
693 	} else {
694 		KASSERT(qcb->buf != NULL, ("ida_done(): qcb->buf is NULL!"));
695 		if (error)
696 			qcb->buf->bio_flags |= BIO_ERROR;
697 		idad_intr(qcb->buf);
698 		ida_free_qcb(ida, qcb);
699 	}
700 
701 	if (!active)
702 		return;
703 
704 	ida->qactive--;
705 	/* Reschedule or cancel timeout */
706 	if (ida->qactive)
707 		callout_reset(&ida->ch, hz * 5, ida_timeout, ida);
708 	else
709 		callout_stop(&ida->ch);
710 }
711 
712 static void
713 ida_timeout(void *arg)
714 {
715 	struct ida_softc *ida;
716 
717 	ida = (struct ida_softc *)arg;
718 	device_printf(ida->dev, "%s() qactive %d\n", __func__, ida->qactive);
719 
720 	if (ida->flags & IDA_INTERRUPTS)
721 		device_printf(ida->dev, "IDA_INTERRUPTS\n");
722 
723 	device_printf(ida->dev,	"\t   R_CMD_FIFO: %08x\n"
724 				"\t  R_DONE_FIFO: %08x\n"
725 				"\t   R_INT_MASK: %08x\n"
726 				"\t     R_STATUS: %08x\n"
727 				"\tR_INT_PENDING: %08x\n",
728 					ida_inl(ida, R_CMD_FIFO),
729 					ida_inl(ida, R_DONE_FIFO),
730 					ida_inl(ida, R_INT_MASK),
731 					ida_inl(ida, R_STATUS),
732 					ida_inl(ida, R_INT_PENDING));
733 
734 	return;
735 }
736 
737 /*
738  * IOCTL stuff follows.
739  */
740 struct cmd_info {
741 	int	cmd;
742 	int	len;
743 	int	flags;
744 };
745 static struct cmd_info *ida_cmd_lookup(int);
746 
747 static int
748 ida_ioctl (struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td)
749 {
750 	struct ida_softc *sc;
751 	struct ida_user_command *uc;
752 	struct cmd_info *ci;
753 	int len;
754 	int flags;
755 	int error;
756 	int data;
757 	void *daddr;
758 
759 	sc = (struct ida_softc *)dev->si_drv1;
760 	uc = (struct ida_user_command *)addr;
761 	error = 0;
762 
763 	switch (cmd) {
764 	case IDAIO_COMMAND:
765 		ci = ida_cmd_lookup(uc->command);
766 		if (ci == NULL) {
767 			error = EINVAL;
768 			break;
769 		}
770 		len = ci->len;
771 		flags = ci->flags;
772 		if (len)
773 			daddr = &uc->d.buf;
774 		else {
775 			daddr = &data;
776 			len = sizeof(data);
777 		}
778 		mtx_lock(&sc->lock);
779 		error = ida_command(sc, uc->command, daddr, len,
780 				    uc->drive, uc->blkno, flags);
781 		mtx_unlock(&sc->lock);
782 		break;
783 	default:
784 		error = ENOIOCTL;
785 		break;
786 	}
787 	return (error);
788 }
789 
790 static struct cmd_info ci_list[] = {
791 	{ CMD_GET_LOG_DRV_INFO,
792 			sizeof(struct ida_drive_info), DMA_DATA_IN },
793 	{ CMD_GET_CTRL_INFO,
794 			sizeof(struct ida_controller_info), DMA_DATA_IN },
795 	{ CMD_SENSE_DRV_STATUS,
796 			sizeof(struct ida_drive_status), DMA_DATA_IN },
797 	{ CMD_START_RECOVERY,		0, 0 },
798 	{ CMD_GET_PHYS_DRV_INFO,
799 			sizeof(struct ida_phys_drv_info), DMA_DATA_TRANSFER },
800 	{ CMD_BLINK_DRV_LEDS,
801 			sizeof(struct ida_blink_drv_leds), DMA_DATA_OUT },
802 	{ CMD_SENSE_DRV_LEDS,
803 			sizeof(struct ida_blink_drv_leds), DMA_DATA_IN },
804 	{ CMD_GET_LOG_DRV_EXT,
805 			sizeof(struct ida_drive_info_ext), DMA_DATA_IN },
806 	{ CMD_RESET_CTRL,		0, 0 },
807 	{ CMD_GET_CONFIG,		0, 0 },
808 	{ CMD_SET_CONFIG,		0, 0 },
809 	{ CMD_LABEL_LOG_DRV,
810 			sizeof(struct ida_label_logical), DMA_DATA_OUT },
811 	{ CMD_SET_SURFACE_DELAY,	0, 0 },
812 	{ CMD_SENSE_BUS_PARAMS,		0, 0 },
813 	{ CMD_SENSE_SUBSYS_INFO,	0, 0 },
814 	{ CMD_SENSE_SURFACE_ATS,	0, 0 },
815 	{ CMD_PASSTHROUGH,		0, 0 },
816 	{ CMD_RESET_SCSI_DEV,		0, 0 },
817 	{ CMD_PAUSE_BG_ACT,		0, 0 },
818 	{ CMD_RESUME_BG_ACT,		0, 0 },
819 	{ CMD_START_FIRMWARE,		0, 0 },
820 	{ CMD_SENSE_DRV_ERR_LOG,	0, 0 },
821 	{ CMD_START_CPM,		0, 0 },
822 	{ CMD_SENSE_CP,			0, 0 },
823 	{ CMD_STOP_CPM,			0, 0 },
824 	{ CMD_FLUSH_CACHE,		0, 0 },
825 	{ CMD_ACCEPT_MEDIA_EXCH,	0, 0 },
826 	{ 0, 0, 0 }
827 };
828 
829 static struct cmd_info *
830 ida_cmd_lookup (int command)
831 {
832 	struct cmd_info *ci;
833 
834 	ci = ci_list;
835 	while (ci->cmd) {
836 		if (ci->cmd == command)
837 			return (ci);
838 		ci++;
839 	}
840 	return (NULL);
841 }
842