xref: /freebsd/sys/powerpc/powermac/cuda.c (revision 830940567b49bb0c08dfaed40418999e76616909)
1 /*-
2  * Copyright (c) 2006 Michael Lorenz
3  * Copyright 2008 by Nathan Whitehorn
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/module.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/kernel.h>
40 
41 #include <dev/ofw/ofw_bus.h>
42 #include <dev/ofw/openfirm.h>
43 
44 #include <machine/bus.h>
45 #include <machine/intr.h>
46 #include <machine/intr_machdep.h>
47 #include <machine/md_var.h>
48 #include <machine/pio.h>
49 #include <machine/resource.h>
50 
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 
54 #include <sys/rman.h>
55 
56 #include <dev/adb/adb.h>
57 
58 #include "cudavar.h"
59 #include "viareg.h"
60 
61 /*
62  * MacIO interface
63  */
64 static int	cuda_probe(device_t);
65 static int	cuda_attach(device_t);
66 static int	cuda_detach(device_t);
67 
68 static u_int	cuda_adb_send(device_t dev, u_char command_byte, int len,
69     u_char *data, u_char poll);
70 static u_int	cuda_adb_autopoll(device_t dev, uint16_t mask);
71 static u_int	cuda_poll(device_t dev);
72 static void	cuda_send_inbound(struct cuda_softc *sc);
73 static void	cuda_send_outbound(struct cuda_softc *sc);
74 
75 static device_method_t  cuda_methods[] = {
76 	/* Device interface */
77 	DEVMETHOD(device_probe,		cuda_probe),
78 	DEVMETHOD(device_attach,	cuda_attach),
79         DEVMETHOD(device_detach,        cuda_detach),
80         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
81         DEVMETHOD(device_suspend,       bus_generic_suspend),
82         DEVMETHOD(device_resume,        bus_generic_resume),
83 
84 	/* bus interface, for ADB root */
85         DEVMETHOD(bus_print_child,      bus_generic_print_child),
86         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
87 
88 	/* ADB bus interface */
89 	DEVMETHOD(adb_hb_send_raw_packet,	cuda_adb_send),
90 	DEVMETHOD(adb_hb_controller_poll,	cuda_poll),
91 	DEVMETHOD(adb_hb_set_autopoll_mask,	cuda_adb_autopoll),
92 
93 	{ 0, 0 },
94 };
95 
96 static driver_t cuda_driver = {
97 	"cuda",
98 	cuda_methods,
99 	sizeof(struct cuda_softc),
100 };
101 
102 static devclass_t cuda_devclass;
103 
104 DRIVER_MODULE(cuda, macio, cuda_driver, cuda_devclass, 0, 0);
105 DRIVER_MODULE(adb, cuda, adb_driver, adb_devclass, 0, 0);
106 
107 static void cuda_intr(void *arg);
108 static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset);
109 static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value);
110 static void cuda_idle(struct cuda_softc *);
111 static void cuda_tip(struct cuda_softc *);
112 static void cuda_clear_tip(struct cuda_softc *);
113 static void cuda_in(struct cuda_softc *);
114 static void cuda_out(struct cuda_softc *);
115 static void cuda_toggle_ack(struct cuda_softc *);
116 static void cuda_ack_off(struct cuda_softc *);
117 static int cuda_intr_state(struct cuda_softc *);
118 
119 static int
120 cuda_probe(device_t dev)
121 {
122 	const char *type = ofw_bus_get_type(dev);
123 
124 	if (strcmp(type, "via-cuda") != 0)
125                 return (ENXIO);
126 
127 	device_set_desc(dev, CUDA_DEVSTR);
128 	return (0);
129 }
130 
131 static int
132 cuda_attach(device_t dev)
133 {
134 	struct cuda_softc *sc;
135 
136 	volatile int i;
137 	uint8_t reg;
138 	phandle_t node,child;
139 
140 	sc = device_get_softc(dev);
141 	sc->sc_dev = dev;
142 
143 	sc->sc_memrid = 0;
144 	sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
145 	    &sc->sc_memrid, RF_ACTIVE);
146 
147 	if (sc->sc_memr == NULL) {
148 		device_printf(dev, "Could not alloc mem resource!\n");
149 		return (ENXIO);
150 	}
151 
152 	sc->sc_irqrid = 0;
153 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid,
154             	RF_ACTIVE);
155         if (sc->sc_irq == NULL) {
156                 device_printf(dev, "could not allocate interrupt\n");
157                 return (ENXIO);
158         }
159 
160 	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE
161 	    | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) {
162                 device_printf(dev, "could not setup interrupt\n");
163                 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid,
164                     sc->sc_irq);
165                 return (ENXIO);
166         }
167 
168 	mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE);
169 
170 	sc->sc_sent = 0;
171 	sc->sc_received = 0;
172 	sc->sc_waiting = 0;
173 	sc->sc_polling = 0;
174 	sc->sc_state = CUDA_NOTREADY;
175 	sc->sc_autopoll = 0;
176 
177 	STAILQ_INIT(&sc->sc_inq);
178 	STAILQ_INIT(&sc->sc_outq);
179 	STAILQ_INIT(&sc->sc_freeq);
180 
181 	for (i = 0; i < CUDA_MAXPACKETS; i++)
182 		STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q);
183 
184 	/* Init CUDA */
185 
186 	reg = cuda_read_reg(sc, vDirB);
187 	reg |= 0x30;	/* register B bits 4 and 5: outputs */
188 	cuda_write_reg(sc, vDirB, reg);
189 
190 	reg = cuda_read_reg(sc, vDirB);
191 	reg &= 0xf7;	/* register B bit 3: input */
192 	cuda_write_reg(sc, vDirB, reg);
193 
194 	reg = cuda_read_reg(sc, vACR);
195 	reg &= ~vSR_OUT;	/* make sure SR is set to IN */
196 	cuda_write_reg(sc, vACR, reg);
197 
198 	cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10);
199 
200 	sc->sc_state = CUDA_IDLE;	/* used by all types of hardware */
201 
202 	cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */
203 
204 	cuda_idle(sc);	/* reset ADB */
205 
206 	/* Reset CUDA */
207 
208 	i = cuda_read_reg(sc, vSR);	/* clear interrupt */
209 	cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */
210 	cuda_idle(sc);	/* reset state to idle */
211 	DELAY(150);
212 	cuda_tip(sc);	/* signal start of frame */
213 	DELAY(150);
214 	cuda_toggle_ack(sc);
215 	DELAY(150);
216 	cuda_clear_tip(sc);
217 	DELAY(150);
218 	cuda_idle(sc);	/* back to idle state */
219 	i = cuda_read_reg(sc, vSR);	/* clear interrupt */
220 	cuda_write_reg(sc, vIER, 0x84);	/* ints ok now */
221 
222 	/* Initialize child buses (ADB) */
223 	node = ofw_bus_get_node(dev);
224 
225 	for (child = OF_child(node); child != 0; child = OF_peer(child)) {
226 		char name[32];
227 
228 		memset(name, 0, sizeof(name));
229 		OF_getprop(child, "name", name, sizeof(name));
230 
231 		if (bootverbose)
232 			device_printf(dev, "CUDA child <%s>\n",name);
233 
234 		if (strncmp(name, "adb", 4) == 0) {
235 			sc->adb_bus = device_add_child(dev,"adb",-1);
236 		}
237 	}
238 
239 	return (bus_generic_attach(dev));
240 }
241 
242 static int cuda_detach(device_t dev) {
243 	struct cuda_softc *sc;
244 
245 	sc = device_get_softc(dev);
246 
247 	bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
248 	bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq);
249 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr);
250 	mtx_destroy(&sc->sc_mutex);
251 
252 	return (bus_generic_detach(dev));
253 }
254 
255 static uint8_t
256 cuda_read_reg(struct cuda_softc *sc, u_int offset) {
257 	return (bus_read_1(sc->sc_memr, offset));
258 }
259 
260 static void
261 cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value) {
262 	bus_write_1(sc->sc_memr, offset, value);
263 }
264 
265 static void
266 cuda_idle(struct cuda_softc *sc)
267 {
268 	uint8_t reg;
269 
270 	reg = cuda_read_reg(sc, vBufB);
271 	reg |= (vPB4 | vPB5);
272 	cuda_write_reg(sc, vBufB, reg);
273 }
274 
275 static void
276 cuda_tip(struct cuda_softc *sc)
277 {
278 	uint8_t reg;
279 
280 	reg = cuda_read_reg(sc, vBufB);
281 	reg &= ~vPB5;
282 	cuda_write_reg(sc, vBufB, reg);
283 }
284 
285 static void
286 cuda_clear_tip(struct cuda_softc *sc)
287 {
288 	uint8_t reg;
289 
290 	reg = cuda_read_reg(sc, vBufB);
291 	reg |= vPB5;
292 	cuda_write_reg(sc, vBufB, reg);
293 }
294 
295 static void
296 cuda_in(struct cuda_softc *sc)
297 {
298 	uint8_t reg;
299 
300 	reg = cuda_read_reg(sc, vACR);
301 	reg &= ~vSR_OUT;
302 	cuda_write_reg(sc, vACR, reg);
303 }
304 
305 static void
306 cuda_out(struct cuda_softc *sc)
307 {
308 	uint8_t reg;
309 
310 	reg = cuda_read_reg(sc, vACR);
311 	reg |= vSR_OUT;
312 	cuda_write_reg(sc, vACR, reg);
313 }
314 
315 static void
316 cuda_toggle_ack(struct cuda_softc *sc)
317 {
318 	uint8_t reg;
319 
320 	reg = cuda_read_reg(sc, vBufB);
321 	reg ^= vPB4;
322 	cuda_write_reg(sc, vBufB, reg);
323 }
324 
325 static void
326 cuda_ack_off(struct cuda_softc *sc)
327 {
328 	uint8_t reg;
329 
330 	reg = cuda_read_reg(sc, vBufB);
331 	reg |= vPB4;
332 	cuda_write_reg(sc, vBufB, reg);
333 }
334 
335 static int
336 cuda_intr_state(struct cuda_softc *sc)
337 {
338 	return ((cuda_read_reg(sc, vBufB) & vPB3) == 0);
339 }
340 
341 static int
342 cuda_send(void *cookie, int poll, int length, uint8_t *msg)
343 {
344 	struct cuda_softc *sc = cookie;
345 	device_t dev = sc->sc_dev;
346 	struct cuda_packet *pkt;
347 
348 	if (sc->sc_state == CUDA_NOTREADY)
349 		return (-1);
350 
351 	mtx_lock(&sc->sc_mutex);
352 
353 	pkt = STAILQ_FIRST(&sc->sc_freeq);
354 	if (pkt == NULL) {
355 		mtx_unlock(&sc->sc_mutex);
356 		return (-1);
357 	}
358 
359 	pkt->len = length - 1;
360 	pkt->type = msg[0];
361 	memcpy(pkt->data, &msg[1], pkt->len);
362 
363 	STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
364 	STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q);
365 
366 	/*
367 	 * If we already are sending a packet, we should bail now that this
368 	 * one has been added to the queue.
369 	 */
370 
371 	if (sc->sc_waiting) {
372 		mtx_unlock(&sc->sc_mutex);
373 		return (0);
374 	}
375 
376 	cuda_send_outbound(sc);
377 	mtx_unlock(&sc->sc_mutex);
378 
379 	if (sc->sc_polling || poll || cold)
380 		cuda_poll(dev);
381 
382 	return (0);
383 }
384 
385 static void
386 cuda_send_outbound(struct cuda_softc *sc)
387 {
388 	struct cuda_packet *pkt;
389 
390 	mtx_assert(&sc->sc_mutex, MA_OWNED);
391 
392 	pkt = STAILQ_FIRST(&sc->sc_outq);
393 	if (pkt == NULL)
394 		return;
395 
396 	sc->sc_out_length = pkt->len + 1;
397 	memcpy(sc->sc_out, &pkt->type, pkt->len + 1);
398 	sc->sc_sent = 0;
399 
400 	STAILQ_REMOVE_HEAD(&sc->sc_outq, pkt_q);
401 	STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
402 
403 	sc->sc_waiting = 1;
404 
405 	cuda_poll(sc->sc_dev);
406 
407 	DELAY(150);
408 
409 	if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc)) {
410 		sc->sc_state = CUDA_OUT;
411 		cuda_out(sc);
412 		cuda_write_reg(sc, vSR, sc->sc_out[0]);
413 		cuda_ack_off(sc);
414 		cuda_tip(sc);
415 	}
416 }
417 
418 static void
419 cuda_send_inbound(struct cuda_softc *sc)
420 {
421 	device_t dev;
422 	struct cuda_packet *pkt;
423 
424 	dev = sc->sc_dev;
425 
426 	mtx_lock(&sc->sc_mutex);
427 
428 	while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) {
429 		STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q);
430 
431 		mtx_unlock(&sc->sc_mutex);
432 
433 		/* check if we have a handler for this message */
434 		switch (pkt->type) {
435 		   case CUDA_ADB:
436 			if (pkt->len > 2) {
437 				adb_receive_raw_packet(sc->adb_bus,
438 				    pkt->data[0],pkt->data[1],
439 				    pkt->len - 2,&pkt->data[2]);
440 			} else {
441 				adb_receive_raw_packet(sc->adb_bus,
442 				    pkt->data[0],pkt->data[1],0,NULL);
443 			}
444 			break;
445 		   case CUDA_PSEUDO:
446 			mtx_lock(&sc->sc_mutex);
447 			if (pkt->data[0] == CMD_AUTOPOLL)
448 				sc->sc_autopoll = 1;
449 			mtx_unlock(&sc->sc_mutex);
450 			break;
451 		   case CUDA_ERROR:
452 			/*
453 			 * CUDA will throw errors if we miss a race between
454 			 * sending and receiving packets. This is already
455 			 * handled when we abort packet output to handle
456 			 * this packet in cuda_intr(). Thus, we ignore
457 			 * these messages.
458 			 */
459 			break;
460 		   default:
461 			device_printf(dev,"unknown CUDA command %d\n",
462 			    pkt->type);
463 			break;
464 		}
465 
466 		mtx_lock(&sc->sc_mutex);
467 
468 		STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
469 	}
470 
471 	mtx_unlock(&sc->sc_mutex);
472 }
473 
474 static u_int
475 cuda_poll(device_t dev)
476 {
477 	struct cuda_softc *sc = device_get_softc(dev);
478 
479 	if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc) &&
480 	    !sc->sc_waiting)
481 		return (0);
482 
483 	cuda_intr(dev);
484 	return (0);
485 }
486 
487 static void
488 cuda_intr(void *arg)
489 {
490 	device_t        dev;
491 	struct cuda_softc *sc;
492 
493 	int i, ending, restart_send, process_inbound;
494 	uint8_t reg;
495 
496         dev = (device_t)arg;
497 	sc = device_get_softc(dev);
498 
499 	mtx_lock(&sc->sc_mutex);
500 
501 	restart_send = 0;
502 	process_inbound = 0;
503 	reg = cuda_read_reg(sc, vIFR);
504 	if ((reg & vSR_INT) != vSR_INT) {
505 		mtx_unlock(&sc->sc_mutex);
506 		return;
507 	}
508 
509 	cuda_write_reg(sc, vIFR, 0x7f);	/* Clear interrupt */
510 
511 switch_start:
512 	switch (sc->sc_state) {
513 	case CUDA_IDLE:
514 		/*
515 		 * This is an unexpected packet, so grab the first (dummy)
516 		 * byte, set up the proper vars, and tell the chip we are
517 		 * starting to receive the packet by setting the TIP bit.
518 		 */
519 		sc->sc_in[1] = cuda_read_reg(sc, vSR);
520 
521 		if (cuda_intr_state(sc) == 0) {
522 			/* must have been a fake start */
523 
524 			if (sc->sc_waiting) {
525 				/* start over */
526 				DELAY(150);
527 				sc->sc_state = CUDA_OUT;
528 				sc->sc_sent = 0;
529 				cuda_out(sc);
530 				cuda_write_reg(sc, vSR, sc->sc_out[1]);
531 				cuda_ack_off(sc);
532 				cuda_tip(sc);
533 			}
534 			break;
535 		}
536 
537 		cuda_in(sc);
538 		cuda_tip(sc);
539 
540 		sc->sc_received = 1;
541 		sc->sc_state = CUDA_IN;
542 		break;
543 
544 	case CUDA_IN:
545 		sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR);
546 		ending = 0;
547 
548 		if (sc->sc_received > 255) {
549 			/* bitch only once */
550 			if (sc->sc_received == 256) {
551 				device_printf(dev,"input overflow\n");
552 				ending = 1;
553 			}
554 		} else
555 			sc->sc_received++;
556 
557 		/* intr off means this is the last byte (end of frame) */
558 		if (cuda_intr_state(sc) == 0) {
559 			ending = 1;
560 		} else {
561 			cuda_toggle_ack(sc);
562 		}
563 
564 		if (ending == 1) {	/* end of message? */
565 			struct cuda_packet *pkt;
566 
567 			/* reset vars and signal the end of this frame */
568 			cuda_idle(sc);
569 
570 			/* Queue up the packet */
571 			pkt = STAILQ_FIRST(&sc->sc_freeq);
572 			if (pkt != NULL) {
573 				/* If we have a free packet, process it */
574 
575 				pkt->len = sc->sc_received - 2;
576 				pkt->type = sc->sc_in[1];
577 				memcpy(pkt->data, &sc->sc_in[2], pkt->len);
578 
579 				STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
580 				STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q);
581 
582 				process_inbound = 1;
583 			}
584 
585 			sc->sc_state = CUDA_IDLE;
586 			sc->sc_received = 0;
587 
588 			/*
589 			 * If there is something waiting to be sent out,
590 			 * set everything up and send the first byte.
591 			 */
592 			if (sc->sc_waiting == 1) {
593 				DELAY(1500);	/* required */
594 				sc->sc_sent = 0;
595 				sc->sc_state = CUDA_OUT;
596 
597 				/*
598 				 * If the interrupt is on, we were too slow
599 				 * and the chip has already started to send
600 				 * something to us, so back out of the write
601 				 * and start a read cycle.
602 				 */
603 				if (cuda_intr_state(sc)) {
604 					cuda_in(sc);
605 					cuda_idle(sc);
606 					sc->sc_sent = 0;
607 					sc->sc_state = CUDA_IDLE;
608 					sc->sc_received = 0;
609 					DELAY(150);
610 					goto switch_start;
611 				}
612 
613 				/*
614 				 * If we got here, it's ok to start sending
615 				 * so load the first byte and tell the chip
616 				 * we want to send.
617 				 */
618 				cuda_out(sc);
619 				cuda_write_reg(sc, vSR,
620 				    sc->sc_out[sc->sc_sent]);
621 				cuda_ack_off(sc);
622 				cuda_tip(sc);
623 			}
624 		}
625 		break;
626 
627 	case CUDA_OUT:
628 		i = cuda_read_reg(sc, vSR);	/* reset SR-intr in IFR */
629 
630 		sc->sc_sent++;
631 		if (cuda_intr_state(sc)) {	/* ADB intr low during write */
632 			cuda_in(sc);	/* make sure SR is set to IN */
633 			cuda_idle(sc);
634 			sc->sc_sent = 0;	/* must start all over */
635 			sc->sc_state = CUDA_IDLE;	/* new state */
636 			sc->sc_received = 0;
637 			sc->sc_waiting = 1;	/* must retry when done with
638 						 * read */
639 			DELAY(150);
640 			goto switch_start;	/* process next state right
641 						 * now */
642 			break;
643 		}
644 		if (sc->sc_out_length == sc->sc_sent) {	/* check for done */
645 			sc->sc_waiting = 0;	/* done writing */
646 			sc->sc_state = CUDA_IDLE;	/* signal bus is idle */
647 			cuda_in(sc);
648 			cuda_idle(sc);
649 		} else {
650 			/* send next byte */
651 			cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]);
652 			cuda_toggle_ack(sc);	/* signal byte ready to
653 							 * shift */
654 		}
655 		break;
656 
657 	case CUDA_NOTREADY:
658 		break;
659 
660 	default:
661 		break;
662 	}
663 
664 	mtx_unlock(&sc->sc_mutex);
665 
666 	if (process_inbound)
667 		cuda_send_inbound(sc);
668 
669 	mtx_lock(&sc->sc_mutex);
670 	/* If we have another packet waiting, set it up */
671 	if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE)
672 		cuda_send_outbound(sc);
673 
674 	mtx_unlock(&sc->sc_mutex);
675 
676 }
677 
678 static u_int
679 cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data,
680     u_char poll)
681 {
682 	struct cuda_softc *sc = device_get_softc(dev);
683 	uint8_t packet[16];
684 	int i;
685 
686 	/* construct an ADB command packet and send it */
687 	packet[0] = CUDA_ADB;
688 	packet[1] = command_byte;
689 	for (i = 0; i < len; i++)
690 		packet[i + 2] = data[i];
691 
692 	cuda_send(sc, poll, len + 2, packet);
693 
694 	return (0);
695 }
696 
697 static u_int
698 cuda_adb_autopoll(device_t dev, uint16_t mask) {
699 	struct cuda_softc *sc = device_get_softc(dev);
700 
701 	uint8_t cmd[] = {CUDA_PSEUDO, CMD_AUTOPOLL, mask != 0};
702 
703 	mtx_lock(&sc->sc_mutex);
704 
705 	if (cmd[2] == sc->sc_autopoll) {
706 		mtx_unlock(&sc->sc_mutex);
707 		return (0);
708 	}
709 
710 	sc->sc_autopoll = -1;
711 	cuda_send(sc, 1, 3, cmd);
712 
713 	mtx_unlock(&sc->sc_mutex);
714 
715 	return (0);
716 }
717 
718