xref: /freebsd/sys/powerpc/powermac/cuda.c (revision 8ddb146abcdf061be9f2c0db7e391697dafad85c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2006 Michael Lorenz
5  * Copyright 2008 by Nathan Whitehorn
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/module.h>
39 #include <sys/bus.h>
40 #include <sys/conf.h>
41 #include <sys/eventhandler.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/clock.h>
46 #include <sys/reboot.h>
47 
48 #include <dev/ofw/ofw_bus.h>
49 #include <dev/ofw/openfirm.h>
50 
51 #include <machine/bus.h>
52 #include <machine/intr_machdep.h>
53 #include <machine/md_var.h>
54 #include <machine/pio.h>
55 #include <machine/resource.h>
56 
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 
60 #include <sys/rman.h>
61 
62 #include <dev/adb/adb.h>
63 
64 #include "clock_if.h"
65 #include "cudavar.h"
66 #include "viareg.h"
67 
68 /*
69  * MacIO interface
70  */
71 static int	cuda_probe(device_t);
72 static int	cuda_attach(device_t);
73 static int	cuda_detach(device_t);
74 
75 static u_int	cuda_adb_send(device_t dev, u_char command_byte, int len,
76     u_char *data, u_char poll);
77 static u_int	cuda_adb_autopoll(device_t dev, uint16_t mask);
78 static u_int	cuda_poll(device_t dev);
79 static void	cuda_send_inbound(struct cuda_softc *sc);
80 static void	cuda_send_outbound(struct cuda_softc *sc);
81 static void	cuda_shutdown(void *xsc, int howto);
82 
83 /*
84  * Clock interface
85  */
86 static int cuda_gettime(device_t dev, struct timespec *ts);
87 static int cuda_settime(device_t dev, struct timespec *ts);
88 
89 static device_method_t  cuda_methods[] = {
90 	/* Device interface */
91 	DEVMETHOD(device_probe,		cuda_probe),
92 	DEVMETHOD(device_attach,	cuda_attach),
93         DEVMETHOD(device_detach,        cuda_detach),
94         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
95         DEVMETHOD(device_suspend,       bus_generic_suspend),
96         DEVMETHOD(device_resume,        bus_generic_resume),
97 
98 	/* ADB bus interface */
99 	DEVMETHOD(adb_hb_send_raw_packet,	cuda_adb_send),
100 	DEVMETHOD(adb_hb_controller_poll,	cuda_poll),
101 	DEVMETHOD(adb_hb_set_autopoll_mask,	cuda_adb_autopoll),
102 
103 	/* Clock interface */
104 	DEVMETHOD(clock_gettime,	cuda_gettime),
105 	DEVMETHOD(clock_settime,	cuda_settime),
106 
107 	DEVMETHOD_END
108 };
109 
110 static driver_t cuda_driver = {
111 	"cuda",
112 	cuda_methods,
113 	sizeof(struct cuda_softc),
114 };
115 
116 DRIVER_MODULE(cuda, macio, cuda_driver, 0, 0);
117 DRIVER_MODULE(adb, cuda, adb_driver, 0, 0);
118 
119 static void cuda_intr(void *arg);
120 static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset);
121 static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value);
122 static void cuda_idle(struct cuda_softc *);
123 static void cuda_tip(struct cuda_softc *);
124 static void cuda_clear_tip(struct cuda_softc *);
125 static void cuda_in(struct cuda_softc *);
126 static void cuda_out(struct cuda_softc *);
127 static void cuda_toggle_ack(struct cuda_softc *);
128 static void cuda_ack_off(struct cuda_softc *);
129 static int cuda_intr_state(struct cuda_softc *);
130 
131 static int
132 cuda_probe(device_t dev)
133 {
134 	const char *type = ofw_bus_get_type(dev);
135 
136 	if (strcmp(type, "via-cuda") != 0)
137                 return (ENXIO);
138 
139 	device_set_desc(dev, CUDA_DEVSTR);
140 	return (0);
141 }
142 
143 static int
144 cuda_attach(device_t dev)
145 {
146 	struct cuda_softc *sc;
147 
148 	volatile int i;
149 	uint8_t reg;
150 	phandle_t node,child;
151 
152 	sc = device_get_softc(dev);
153 	sc->sc_dev = dev;
154 
155 	sc->sc_memrid = 0;
156 	sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
157 	    &sc->sc_memrid, RF_ACTIVE);
158 
159 	if (sc->sc_memr == NULL) {
160 		device_printf(dev, "Could not alloc mem resource!\n");
161 		return (ENXIO);
162 	}
163 
164 	sc->sc_irqrid = 0;
165 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid,
166             	RF_ACTIVE);
167         if (sc->sc_irq == NULL) {
168                 device_printf(dev, "could not allocate interrupt\n");
169                 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid,
170                     sc->sc_memr);
171                 return (ENXIO);
172         }
173 
174 	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE
175 	    | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) {
176                 device_printf(dev, "could not setup interrupt\n");
177                 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid,
178                     sc->sc_memr);
179                 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid,
180                     sc->sc_irq);
181                 return (ENXIO);
182         }
183 
184 	mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE);
185 
186 	sc->sc_sent = 0;
187 	sc->sc_received = 0;
188 	sc->sc_waiting = 0;
189 	sc->sc_polling = 0;
190 	sc->sc_state = CUDA_NOTREADY;
191 	sc->sc_autopoll = 0;
192 	sc->sc_rtc = -1;
193 
194 	STAILQ_INIT(&sc->sc_inq);
195 	STAILQ_INIT(&sc->sc_outq);
196 	STAILQ_INIT(&sc->sc_freeq);
197 
198 	for (i = 0; i < CUDA_MAXPACKETS; i++)
199 		STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q);
200 
201 	/* Init CUDA */
202 
203 	reg = cuda_read_reg(sc, vDirB);
204 	reg |= 0x30;	/* register B bits 4 and 5: outputs */
205 	cuda_write_reg(sc, vDirB, reg);
206 
207 	reg = cuda_read_reg(sc, vDirB);
208 	reg &= 0xf7;	/* register B bit 3: input */
209 	cuda_write_reg(sc, vDirB, reg);
210 
211 	reg = cuda_read_reg(sc, vACR);
212 	reg &= ~vSR_OUT;	/* make sure SR is set to IN */
213 	cuda_write_reg(sc, vACR, reg);
214 
215 	cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10);
216 
217 	sc->sc_state = CUDA_IDLE;	/* used by all types of hardware */
218 
219 	cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */
220 
221 	cuda_idle(sc);	/* reset ADB */
222 
223 	/* Reset CUDA */
224 
225 	i = cuda_read_reg(sc, vSR);	/* clear interrupt */
226 	cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */
227 	cuda_idle(sc);	/* reset state to idle */
228 	DELAY(150);
229 	cuda_tip(sc);	/* signal start of frame */
230 	DELAY(150);
231 	cuda_toggle_ack(sc);
232 	DELAY(150);
233 	cuda_clear_tip(sc);
234 	DELAY(150);
235 	cuda_idle(sc);	/* back to idle state */
236 	i = cuda_read_reg(sc, vSR);	/* clear interrupt */
237 	cuda_write_reg(sc, vIER, 0x84);	/* ints ok now */
238 
239 	/* Initialize child buses (ADB) */
240 	node = ofw_bus_get_node(dev);
241 
242 	for (child = OF_child(node); child != 0; child = OF_peer(child)) {
243 		char name[32];
244 
245 		memset(name, 0, sizeof(name));
246 		OF_getprop(child, "name", name, sizeof(name));
247 
248 		if (bootverbose)
249 			device_printf(dev, "CUDA child <%s>\n",name);
250 
251 		if (strncmp(name, "adb", 4) == 0) {
252 			sc->adb_bus = device_add_child(dev,"adb",-1);
253 		}
254 	}
255 
256 	clock_register(dev, 1000);
257 	EVENTHANDLER_REGISTER(shutdown_final, cuda_shutdown, sc,
258 	    SHUTDOWN_PRI_LAST);
259 
260 	return (bus_generic_attach(dev));
261 }
262 
263 static int cuda_detach(device_t dev) {
264 	struct cuda_softc *sc;
265 
266 	sc = device_get_softc(dev);
267 
268 	bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
269 	bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq);
270 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr);
271 	mtx_destroy(&sc->sc_mutex);
272 
273 	return (bus_generic_detach(dev));
274 }
275 
276 static uint8_t
277 cuda_read_reg(struct cuda_softc *sc, u_int offset) {
278 	return (bus_read_1(sc->sc_memr, offset));
279 }
280 
281 static void
282 cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value) {
283 	bus_write_1(sc->sc_memr, offset, value);
284 }
285 
286 static void
287 cuda_idle(struct cuda_softc *sc)
288 {
289 	uint8_t reg;
290 
291 	reg = cuda_read_reg(sc, vBufB);
292 	reg |= (vPB4 | vPB5);
293 	cuda_write_reg(sc, vBufB, reg);
294 }
295 
296 static void
297 cuda_tip(struct cuda_softc *sc)
298 {
299 	uint8_t reg;
300 
301 	reg = cuda_read_reg(sc, vBufB);
302 	reg &= ~vPB5;
303 	cuda_write_reg(sc, vBufB, reg);
304 }
305 
306 static void
307 cuda_clear_tip(struct cuda_softc *sc)
308 {
309 	uint8_t reg;
310 
311 	reg = cuda_read_reg(sc, vBufB);
312 	reg |= vPB5;
313 	cuda_write_reg(sc, vBufB, reg);
314 }
315 
316 static void
317 cuda_in(struct cuda_softc *sc)
318 {
319 	uint8_t reg;
320 
321 	reg = cuda_read_reg(sc, vACR);
322 	reg &= ~vSR_OUT;
323 	cuda_write_reg(sc, vACR, reg);
324 }
325 
326 static void
327 cuda_out(struct cuda_softc *sc)
328 {
329 	uint8_t reg;
330 
331 	reg = cuda_read_reg(sc, vACR);
332 	reg |= vSR_OUT;
333 	cuda_write_reg(sc, vACR, reg);
334 }
335 
336 static void
337 cuda_toggle_ack(struct cuda_softc *sc)
338 {
339 	uint8_t reg;
340 
341 	reg = cuda_read_reg(sc, vBufB);
342 	reg ^= vPB4;
343 	cuda_write_reg(sc, vBufB, reg);
344 }
345 
346 static void
347 cuda_ack_off(struct cuda_softc *sc)
348 {
349 	uint8_t reg;
350 
351 	reg = cuda_read_reg(sc, vBufB);
352 	reg |= vPB4;
353 	cuda_write_reg(sc, vBufB, reg);
354 }
355 
356 static int
357 cuda_intr_state(struct cuda_softc *sc)
358 {
359 	return ((cuda_read_reg(sc, vBufB) & vPB3) == 0);
360 }
361 
362 static int
363 cuda_send(void *cookie, int poll, int length, uint8_t *msg)
364 {
365 	struct cuda_softc *sc = cookie;
366 	device_t dev = sc->sc_dev;
367 	struct cuda_packet *pkt;
368 
369 	if (sc->sc_state == CUDA_NOTREADY)
370 		return (-1);
371 
372 	mtx_lock(&sc->sc_mutex);
373 
374 	pkt = STAILQ_FIRST(&sc->sc_freeq);
375 	if (pkt == NULL) {
376 		mtx_unlock(&sc->sc_mutex);
377 		return (-1);
378 	}
379 
380 	pkt->len = length - 1;
381 	pkt->type = msg[0];
382 	memcpy(pkt->data, &msg[1], pkt->len);
383 
384 	STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
385 	STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q);
386 
387 	/*
388 	 * If we already are sending a packet, we should bail now that this
389 	 * one has been added to the queue.
390 	 */
391 
392 	if (sc->sc_waiting) {
393 		mtx_unlock(&sc->sc_mutex);
394 		return (0);
395 	}
396 
397 	cuda_send_outbound(sc);
398 	mtx_unlock(&sc->sc_mutex);
399 
400 	if (sc->sc_polling || poll || cold)
401 		cuda_poll(dev);
402 
403 	return (0);
404 }
405 
406 static void
407 cuda_send_outbound(struct cuda_softc *sc)
408 {
409 	struct cuda_packet *pkt;
410 
411 	mtx_assert(&sc->sc_mutex, MA_OWNED);
412 
413 	pkt = STAILQ_FIRST(&sc->sc_outq);
414 	if (pkt == NULL)
415 		return;
416 
417 	sc->sc_out_length = pkt->len + 1;
418 	memcpy(sc->sc_out, &pkt->type, pkt->len + 1);
419 	sc->sc_sent = 0;
420 
421 	STAILQ_REMOVE_HEAD(&sc->sc_outq, pkt_q);
422 	STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
423 
424 	sc->sc_waiting = 1;
425 
426 	cuda_poll(sc->sc_dev);
427 
428 	DELAY(150);
429 
430 	if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc)) {
431 		sc->sc_state = CUDA_OUT;
432 		cuda_out(sc);
433 		cuda_write_reg(sc, vSR, sc->sc_out[0]);
434 		cuda_ack_off(sc);
435 		cuda_tip(sc);
436 	}
437 }
438 
439 static void
440 cuda_send_inbound(struct cuda_softc *sc)
441 {
442 	device_t dev;
443 	struct cuda_packet *pkt;
444 
445 	dev = sc->sc_dev;
446 
447 	mtx_lock(&sc->sc_mutex);
448 
449 	while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) {
450 		STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q);
451 
452 		mtx_unlock(&sc->sc_mutex);
453 
454 		/* check if we have a handler for this message */
455 		switch (pkt->type) {
456 		   case CUDA_ADB:
457 			if (pkt->len > 2) {
458 				adb_receive_raw_packet(sc->adb_bus,
459 				    pkt->data[0],pkt->data[1],
460 				    pkt->len - 2,&pkt->data[2]);
461 			} else {
462 				adb_receive_raw_packet(sc->adb_bus,
463 				    pkt->data[0],pkt->data[1],0,NULL);
464 			}
465 			break;
466 		   case CUDA_PSEUDO:
467 			mtx_lock(&sc->sc_mutex);
468 			switch (pkt->data[1]) {
469 			case CMD_AUTOPOLL:
470 				sc->sc_autopoll = 1;
471 				break;
472 			case CMD_READ_RTC:
473 				memcpy(&sc->sc_rtc, &pkt->data[2],
474 				    sizeof(sc->sc_rtc));
475 				wakeup(&sc->sc_rtc);
476 				break;
477 			case CMD_WRITE_RTC:
478 				break;
479 			}
480 			mtx_unlock(&sc->sc_mutex);
481 			break;
482 		   case CUDA_ERROR:
483 			/*
484 			 * CUDA will throw errors if we miss a race between
485 			 * sending and receiving packets. This is already
486 			 * handled when we abort packet output to handle
487 			 * this packet in cuda_intr(). Thus, we ignore
488 			 * these messages.
489 			 */
490 			break;
491 		   default:
492 			device_printf(dev,"unknown CUDA command %d\n",
493 			    pkt->type);
494 			break;
495 		}
496 
497 		mtx_lock(&sc->sc_mutex);
498 
499 		STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
500 	}
501 
502 	mtx_unlock(&sc->sc_mutex);
503 }
504 
505 static u_int
506 cuda_poll(device_t dev)
507 {
508 	struct cuda_softc *sc = device_get_softc(dev);
509 
510 	if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc) &&
511 	    !sc->sc_waiting)
512 		return (0);
513 
514 	cuda_intr(dev);
515 	return (0);
516 }
517 
518 static void
519 cuda_intr(void *arg)
520 {
521 	device_t        dev;
522 	struct cuda_softc *sc;
523 	int ending, process_inbound;
524 	uint8_t reg;
525 
526         dev = (device_t)arg;
527 	sc = device_get_softc(dev);
528 
529 	mtx_lock(&sc->sc_mutex);
530 
531 	process_inbound = 0;
532 	reg = cuda_read_reg(sc, vIFR);
533 	if ((reg & vSR_INT) != vSR_INT) {
534 		mtx_unlock(&sc->sc_mutex);
535 		return;
536 	}
537 
538 	cuda_write_reg(sc, vIFR, 0x7f);	/* Clear interrupt */
539 
540 switch_start:
541 	switch (sc->sc_state) {
542 	case CUDA_IDLE:
543 		/*
544 		 * This is an unexpected packet, so grab the first (dummy)
545 		 * byte, set up the proper vars, and tell the chip we are
546 		 * starting to receive the packet by setting the TIP bit.
547 		 */
548 		sc->sc_in[1] = cuda_read_reg(sc, vSR);
549 
550 		if (cuda_intr_state(sc) == 0) {
551 			/* must have been a fake start */
552 
553 			if (sc->sc_waiting) {
554 				/* start over */
555 				DELAY(150);
556 				sc->sc_state = CUDA_OUT;
557 				sc->sc_sent = 0;
558 				cuda_out(sc);
559 				cuda_write_reg(sc, vSR, sc->sc_out[1]);
560 				cuda_ack_off(sc);
561 				cuda_tip(sc);
562 			}
563 			break;
564 		}
565 
566 		cuda_in(sc);
567 		cuda_tip(sc);
568 
569 		sc->sc_received = 1;
570 		sc->sc_state = CUDA_IN;
571 		break;
572 
573 	case CUDA_IN:
574 		sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR);
575 		ending = 0;
576 
577 		if (sc->sc_received > 255) {
578 			/* bitch only once */
579 			if (sc->sc_received == 256) {
580 				device_printf(dev,"input overflow\n");
581 				ending = 1;
582 			}
583 		} else
584 			sc->sc_received++;
585 
586 		/* intr off means this is the last byte (end of frame) */
587 		if (cuda_intr_state(sc) == 0) {
588 			ending = 1;
589 		} else {
590 			cuda_toggle_ack(sc);
591 		}
592 
593 		if (ending == 1) {	/* end of message? */
594 			struct cuda_packet *pkt;
595 
596 			/* reset vars and signal the end of this frame */
597 			cuda_idle(sc);
598 
599 			/* Queue up the packet */
600 			pkt = STAILQ_FIRST(&sc->sc_freeq);
601 			if (pkt != NULL) {
602 				/* If we have a free packet, process it */
603 
604 				pkt->len = sc->sc_received - 2;
605 				pkt->type = sc->sc_in[1];
606 				memcpy(pkt->data, &sc->sc_in[2], pkt->len);
607 
608 				STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
609 				STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q);
610 
611 				process_inbound = 1;
612 			}
613 
614 			sc->sc_state = CUDA_IDLE;
615 			sc->sc_received = 0;
616 
617 			/*
618 			 * If there is something waiting to be sent out,
619 			 * set everything up and send the first byte.
620 			 */
621 			if (sc->sc_waiting == 1) {
622 				DELAY(1500);	/* required */
623 				sc->sc_sent = 0;
624 				sc->sc_state = CUDA_OUT;
625 
626 				/*
627 				 * If the interrupt is on, we were too slow
628 				 * and the chip has already started to send
629 				 * something to us, so back out of the write
630 				 * and start a read cycle.
631 				 */
632 				if (cuda_intr_state(sc)) {
633 					cuda_in(sc);
634 					cuda_idle(sc);
635 					sc->sc_sent = 0;
636 					sc->sc_state = CUDA_IDLE;
637 					sc->sc_received = 0;
638 					DELAY(150);
639 					goto switch_start;
640 				}
641 
642 				/*
643 				 * If we got here, it's ok to start sending
644 				 * so load the first byte and tell the chip
645 				 * we want to send.
646 				 */
647 				cuda_out(sc);
648 				cuda_write_reg(sc, vSR,
649 				    sc->sc_out[sc->sc_sent]);
650 				cuda_ack_off(sc);
651 				cuda_tip(sc);
652 			}
653 		}
654 		break;
655 
656 	case CUDA_OUT:
657 		cuda_read_reg(sc, vSR);	/* reset SR-intr in IFR */
658 
659 		sc->sc_sent++;
660 		if (cuda_intr_state(sc)) {	/* ADB intr low during write */
661 			cuda_in(sc);	/* make sure SR is set to IN */
662 			cuda_idle(sc);
663 			sc->sc_sent = 0;	/* must start all over */
664 			sc->sc_state = CUDA_IDLE;	/* new state */
665 			sc->sc_received = 0;
666 			sc->sc_waiting = 1;	/* must retry when done with
667 						 * read */
668 			DELAY(150);
669 			goto switch_start;	/* process next state right
670 						 * now */
671 			break;
672 		}
673 		if (sc->sc_out_length == sc->sc_sent) {	/* check for done */
674 			sc->sc_waiting = 0;	/* done writing */
675 			sc->sc_state = CUDA_IDLE;	/* signal bus is idle */
676 			cuda_in(sc);
677 			cuda_idle(sc);
678 		} else {
679 			/* send next byte */
680 			cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]);
681 			cuda_toggle_ack(sc);	/* signal byte ready to
682 							 * shift */
683 		}
684 		break;
685 
686 	case CUDA_NOTREADY:
687 		break;
688 
689 	default:
690 		break;
691 	}
692 
693 	mtx_unlock(&sc->sc_mutex);
694 
695 	if (process_inbound)
696 		cuda_send_inbound(sc);
697 
698 	mtx_lock(&sc->sc_mutex);
699 	/* If we have another packet waiting, set it up */
700 	if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE)
701 		cuda_send_outbound(sc);
702 
703 	mtx_unlock(&sc->sc_mutex);
704 
705 }
706 
707 static u_int
708 cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data,
709     u_char poll)
710 {
711 	struct cuda_softc *sc = device_get_softc(dev);
712 	uint8_t packet[16];
713 	int i;
714 
715 	/* construct an ADB command packet and send it */
716 	packet[0] = CUDA_ADB;
717 	packet[1] = command_byte;
718 	for (i = 0; i < len; i++)
719 		packet[i + 2] = data[i];
720 
721 	cuda_send(sc, poll, len + 2, packet);
722 
723 	return (0);
724 }
725 
726 static u_int
727 cuda_adb_autopoll(device_t dev, uint16_t mask) {
728 	struct cuda_softc *sc = device_get_softc(dev);
729 
730 	uint8_t cmd[] = {CUDA_PSEUDO, CMD_AUTOPOLL, mask != 0};
731 
732 	mtx_lock(&sc->sc_mutex);
733 
734 	if (cmd[2] == sc->sc_autopoll) {
735 		mtx_unlock(&sc->sc_mutex);
736 		return (0);
737 	}
738 
739 	sc->sc_autopoll = -1;
740 	cuda_send(sc, 1, 3, cmd);
741 
742 	mtx_unlock(&sc->sc_mutex);
743 
744 	return (0);
745 }
746 
747 static void
748 cuda_shutdown(void *xsc, int howto)
749 {
750 	struct cuda_softc *sc = xsc;
751 	uint8_t cmd[] = {CUDA_PSEUDO, 0};
752 
753 	cmd[1] = (howto & RB_HALT) ? CMD_POWEROFF : CMD_RESET;
754 	cuda_poll(sc->sc_dev);
755 	cuda_send(sc, 1, 2, cmd);
756 
757 	while (1)
758 		cuda_poll(sc->sc_dev);
759 }
760 
761 #define DIFF19041970	2082844800
762 
763 static int
764 cuda_gettime(device_t dev, struct timespec *ts)
765 {
766 	struct cuda_softc *sc = device_get_softc(dev);
767 	uint8_t cmd[] = {CUDA_PSEUDO, CMD_READ_RTC};
768 
769 	mtx_lock(&sc->sc_mutex);
770 	sc->sc_rtc = -1;
771 	cuda_send(sc, 1, 2, cmd);
772 	if (sc->sc_rtc == -1)
773 		mtx_sleep(&sc->sc_rtc, &sc->sc_mutex, 0, "rtc", 100);
774 
775 	ts->tv_sec = sc->sc_rtc - DIFF19041970;
776 	ts->tv_nsec = 0;
777 	mtx_unlock(&sc->sc_mutex);
778 
779 	return (0);
780 }
781 
782 static int
783 cuda_settime(device_t dev, struct timespec *ts)
784 {
785 	struct cuda_softc *sc = device_get_softc(dev);
786 	uint8_t cmd[] = {CUDA_PSEUDO, CMD_WRITE_RTC, 0, 0, 0, 0};
787 	uint32_t sec;
788 
789 	sec = ts->tv_sec + DIFF19041970;
790 	memcpy(&cmd[2], &sec, sizeof(sec));
791 
792 	mtx_lock(&sc->sc_mutex);
793 	cuda_send(sc, 0, 6, cmd);
794 	mtx_unlock(&sc->sc_mutex);
795 
796 	return (0);
797 }
798