xref: /freebsd/sys/powerpc/powermac/cuda.c (revision d8a0fe102c0cfdfcd5b818f850eff09d8536c9bc)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2006 Michael Lorenz
5  * Copyright 2008 by Nathan Whitehorn
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/module.h>
39 #include <sys/bus.h>
40 #include <sys/conf.h>
41 #include <sys/kernel.h>
42 #include <sys/clock.h>
43 #include <sys/reboot.h>
44 
45 #include <dev/ofw/ofw_bus.h>
46 #include <dev/ofw/openfirm.h>
47 
48 #include <machine/bus.h>
49 #include <machine/intr_machdep.h>
50 #include <machine/md_var.h>
51 #include <machine/pio.h>
52 #include <machine/resource.h>
53 
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 
57 #include <sys/rman.h>
58 
59 #include <dev/adb/adb.h>
60 
61 #include "clock_if.h"
62 #include "cudavar.h"
63 #include "viareg.h"
64 
65 /*
66  * MacIO interface
67  */
68 static int	cuda_probe(device_t);
69 static int	cuda_attach(device_t);
70 static int	cuda_detach(device_t);
71 
72 static u_int	cuda_adb_send(device_t dev, u_char command_byte, int len,
73     u_char *data, u_char poll);
74 static u_int	cuda_adb_autopoll(device_t dev, uint16_t mask);
75 static u_int	cuda_poll(device_t dev);
76 static void	cuda_send_inbound(struct cuda_softc *sc);
77 static void	cuda_send_outbound(struct cuda_softc *sc);
78 static void	cuda_shutdown(void *xsc, int howto);
79 
80 /*
81  * Clock interface
82  */
83 static int cuda_gettime(device_t dev, struct timespec *ts);
84 static int cuda_settime(device_t dev, struct timespec *ts);
85 
86 static device_method_t  cuda_methods[] = {
87 	/* Device interface */
88 	DEVMETHOD(device_probe,		cuda_probe),
89 	DEVMETHOD(device_attach,	cuda_attach),
90         DEVMETHOD(device_detach,        cuda_detach),
91         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
92         DEVMETHOD(device_suspend,       bus_generic_suspend),
93         DEVMETHOD(device_resume,        bus_generic_resume),
94 
95 	/* ADB bus interface */
96 	DEVMETHOD(adb_hb_send_raw_packet,	cuda_adb_send),
97 	DEVMETHOD(adb_hb_controller_poll,	cuda_poll),
98 	DEVMETHOD(adb_hb_set_autopoll_mask,	cuda_adb_autopoll),
99 
100 	/* Clock interface */
101 	DEVMETHOD(clock_gettime,	cuda_gettime),
102 	DEVMETHOD(clock_settime,	cuda_settime),
103 
104 	DEVMETHOD_END
105 };
106 
107 static driver_t cuda_driver = {
108 	"cuda",
109 	cuda_methods,
110 	sizeof(struct cuda_softc),
111 };
112 
113 static devclass_t cuda_devclass;
114 
115 DRIVER_MODULE(cuda, macio, cuda_driver, cuda_devclass, 0, 0);
116 DRIVER_MODULE(adb, cuda, adb_driver, adb_devclass, 0, 0);
117 
118 static void cuda_intr(void *arg);
119 static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset);
120 static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value);
121 static void cuda_idle(struct cuda_softc *);
122 static void cuda_tip(struct cuda_softc *);
123 static void cuda_clear_tip(struct cuda_softc *);
124 static void cuda_in(struct cuda_softc *);
125 static void cuda_out(struct cuda_softc *);
126 static void cuda_toggle_ack(struct cuda_softc *);
127 static void cuda_ack_off(struct cuda_softc *);
128 static int cuda_intr_state(struct cuda_softc *);
129 
130 static int
131 cuda_probe(device_t dev)
132 {
133 	const char *type = ofw_bus_get_type(dev);
134 
135 	if (strcmp(type, "via-cuda") != 0)
136                 return (ENXIO);
137 
138 	device_set_desc(dev, CUDA_DEVSTR);
139 	return (0);
140 }
141 
142 static int
143 cuda_attach(device_t dev)
144 {
145 	struct cuda_softc *sc;
146 
147 	volatile int i;
148 	uint8_t reg;
149 	phandle_t node,child;
150 
151 	sc = device_get_softc(dev);
152 	sc->sc_dev = dev;
153 
154 	sc->sc_memrid = 0;
155 	sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
156 	    &sc->sc_memrid, RF_ACTIVE);
157 
158 	if (sc->sc_memr == NULL) {
159 		device_printf(dev, "Could not alloc mem resource!\n");
160 		return (ENXIO);
161 	}
162 
163 	sc->sc_irqrid = 0;
164 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid,
165             	RF_ACTIVE);
166         if (sc->sc_irq == NULL) {
167                 device_printf(dev, "could not allocate interrupt\n");
168                 return (ENXIO);
169         }
170 
171 	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE
172 	    | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) {
173                 device_printf(dev, "could not setup interrupt\n");
174                 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid,
175                     sc->sc_irq);
176                 return (ENXIO);
177         }
178 
179 	mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE);
180 
181 	sc->sc_sent = 0;
182 	sc->sc_received = 0;
183 	sc->sc_waiting = 0;
184 	sc->sc_polling = 0;
185 	sc->sc_state = CUDA_NOTREADY;
186 	sc->sc_autopoll = 0;
187 	sc->sc_rtc = -1;
188 
189 	STAILQ_INIT(&sc->sc_inq);
190 	STAILQ_INIT(&sc->sc_outq);
191 	STAILQ_INIT(&sc->sc_freeq);
192 
193 	for (i = 0; i < CUDA_MAXPACKETS; i++)
194 		STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q);
195 
196 	/* Init CUDA */
197 
198 	reg = cuda_read_reg(sc, vDirB);
199 	reg |= 0x30;	/* register B bits 4 and 5: outputs */
200 	cuda_write_reg(sc, vDirB, reg);
201 
202 	reg = cuda_read_reg(sc, vDirB);
203 	reg &= 0xf7;	/* register B bit 3: input */
204 	cuda_write_reg(sc, vDirB, reg);
205 
206 	reg = cuda_read_reg(sc, vACR);
207 	reg &= ~vSR_OUT;	/* make sure SR is set to IN */
208 	cuda_write_reg(sc, vACR, reg);
209 
210 	cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10);
211 
212 	sc->sc_state = CUDA_IDLE;	/* used by all types of hardware */
213 
214 	cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */
215 
216 	cuda_idle(sc);	/* reset ADB */
217 
218 	/* Reset CUDA */
219 
220 	i = cuda_read_reg(sc, vSR);	/* clear interrupt */
221 	cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */
222 	cuda_idle(sc);	/* reset state to idle */
223 	DELAY(150);
224 	cuda_tip(sc);	/* signal start of frame */
225 	DELAY(150);
226 	cuda_toggle_ack(sc);
227 	DELAY(150);
228 	cuda_clear_tip(sc);
229 	DELAY(150);
230 	cuda_idle(sc);	/* back to idle state */
231 	i = cuda_read_reg(sc, vSR);	/* clear interrupt */
232 	cuda_write_reg(sc, vIER, 0x84);	/* ints ok now */
233 
234 	/* Initialize child buses (ADB) */
235 	node = ofw_bus_get_node(dev);
236 
237 	for (child = OF_child(node); child != 0; child = OF_peer(child)) {
238 		char name[32];
239 
240 		memset(name, 0, sizeof(name));
241 		OF_getprop(child, "name", name, sizeof(name));
242 
243 		if (bootverbose)
244 			device_printf(dev, "CUDA child <%s>\n",name);
245 
246 		if (strncmp(name, "adb", 4) == 0) {
247 			sc->adb_bus = device_add_child(dev,"adb",-1);
248 		}
249 	}
250 
251 	clock_register(dev, 1000);
252 	EVENTHANDLER_REGISTER(shutdown_final, cuda_shutdown, sc,
253 	    SHUTDOWN_PRI_LAST);
254 
255 	return (bus_generic_attach(dev));
256 }
257 
258 static int cuda_detach(device_t dev) {
259 	struct cuda_softc *sc;
260 
261 	sc = device_get_softc(dev);
262 
263 	bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
264 	bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq);
265 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr);
266 	mtx_destroy(&sc->sc_mutex);
267 
268 	return (bus_generic_detach(dev));
269 }
270 
271 static uint8_t
272 cuda_read_reg(struct cuda_softc *sc, u_int offset) {
273 	return (bus_read_1(sc->sc_memr, offset));
274 }
275 
276 static void
277 cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value) {
278 	bus_write_1(sc->sc_memr, offset, value);
279 }
280 
281 static void
282 cuda_idle(struct cuda_softc *sc)
283 {
284 	uint8_t reg;
285 
286 	reg = cuda_read_reg(sc, vBufB);
287 	reg |= (vPB4 | vPB5);
288 	cuda_write_reg(sc, vBufB, reg);
289 }
290 
291 static void
292 cuda_tip(struct cuda_softc *sc)
293 {
294 	uint8_t reg;
295 
296 	reg = cuda_read_reg(sc, vBufB);
297 	reg &= ~vPB5;
298 	cuda_write_reg(sc, vBufB, reg);
299 }
300 
301 static void
302 cuda_clear_tip(struct cuda_softc *sc)
303 {
304 	uint8_t reg;
305 
306 	reg = cuda_read_reg(sc, vBufB);
307 	reg |= vPB5;
308 	cuda_write_reg(sc, vBufB, reg);
309 }
310 
311 static void
312 cuda_in(struct cuda_softc *sc)
313 {
314 	uint8_t reg;
315 
316 	reg = cuda_read_reg(sc, vACR);
317 	reg &= ~vSR_OUT;
318 	cuda_write_reg(sc, vACR, reg);
319 }
320 
321 static void
322 cuda_out(struct cuda_softc *sc)
323 {
324 	uint8_t reg;
325 
326 	reg = cuda_read_reg(sc, vACR);
327 	reg |= vSR_OUT;
328 	cuda_write_reg(sc, vACR, reg);
329 }
330 
331 static void
332 cuda_toggle_ack(struct cuda_softc *sc)
333 {
334 	uint8_t reg;
335 
336 	reg = cuda_read_reg(sc, vBufB);
337 	reg ^= vPB4;
338 	cuda_write_reg(sc, vBufB, reg);
339 }
340 
341 static void
342 cuda_ack_off(struct cuda_softc *sc)
343 {
344 	uint8_t reg;
345 
346 	reg = cuda_read_reg(sc, vBufB);
347 	reg |= vPB4;
348 	cuda_write_reg(sc, vBufB, reg);
349 }
350 
351 static int
352 cuda_intr_state(struct cuda_softc *sc)
353 {
354 	return ((cuda_read_reg(sc, vBufB) & vPB3) == 0);
355 }
356 
357 static int
358 cuda_send(void *cookie, int poll, int length, uint8_t *msg)
359 {
360 	struct cuda_softc *sc = cookie;
361 	device_t dev = sc->sc_dev;
362 	struct cuda_packet *pkt;
363 
364 	if (sc->sc_state == CUDA_NOTREADY)
365 		return (-1);
366 
367 	mtx_lock(&sc->sc_mutex);
368 
369 	pkt = STAILQ_FIRST(&sc->sc_freeq);
370 	if (pkt == NULL) {
371 		mtx_unlock(&sc->sc_mutex);
372 		return (-1);
373 	}
374 
375 	pkt->len = length - 1;
376 	pkt->type = msg[0];
377 	memcpy(pkt->data, &msg[1], pkt->len);
378 
379 	STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
380 	STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q);
381 
382 	/*
383 	 * If we already are sending a packet, we should bail now that this
384 	 * one has been added to the queue.
385 	 */
386 
387 	if (sc->sc_waiting) {
388 		mtx_unlock(&sc->sc_mutex);
389 		return (0);
390 	}
391 
392 	cuda_send_outbound(sc);
393 	mtx_unlock(&sc->sc_mutex);
394 
395 	if (sc->sc_polling || poll || cold)
396 		cuda_poll(dev);
397 
398 	return (0);
399 }
400 
401 static void
402 cuda_send_outbound(struct cuda_softc *sc)
403 {
404 	struct cuda_packet *pkt;
405 
406 	mtx_assert(&sc->sc_mutex, MA_OWNED);
407 
408 	pkt = STAILQ_FIRST(&sc->sc_outq);
409 	if (pkt == NULL)
410 		return;
411 
412 	sc->sc_out_length = pkt->len + 1;
413 	memcpy(sc->sc_out, &pkt->type, pkt->len + 1);
414 	sc->sc_sent = 0;
415 
416 	STAILQ_REMOVE_HEAD(&sc->sc_outq, pkt_q);
417 	STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
418 
419 	sc->sc_waiting = 1;
420 
421 	cuda_poll(sc->sc_dev);
422 
423 	DELAY(150);
424 
425 	if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc)) {
426 		sc->sc_state = CUDA_OUT;
427 		cuda_out(sc);
428 		cuda_write_reg(sc, vSR, sc->sc_out[0]);
429 		cuda_ack_off(sc);
430 		cuda_tip(sc);
431 	}
432 }
433 
434 static void
435 cuda_send_inbound(struct cuda_softc *sc)
436 {
437 	device_t dev;
438 	struct cuda_packet *pkt;
439 
440 	dev = sc->sc_dev;
441 
442 	mtx_lock(&sc->sc_mutex);
443 
444 	while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) {
445 		STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q);
446 
447 		mtx_unlock(&sc->sc_mutex);
448 
449 		/* check if we have a handler for this message */
450 		switch (pkt->type) {
451 		   case CUDA_ADB:
452 			if (pkt->len > 2) {
453 				adb_receive_raw_packet(sc->adb_bus,
454 				    pkt->data[0],pkt->data[1],
455 				    pkt->len - 2,&pkt->data[2]);
456 			} else {
457 				adb_receive_raw_packet(sc->adb_bus,
458 				    pkt->data[0],pkt->data[1],0,NULL);
459 			}
460 			break;
461 		   case CUDA_PSEUDO:
462 			mtx_lock(&sc->sc_mutex);
463 			switch (pkt->data[1]) {
464 			case CMD_AUTOPOLL:
465 				sc->sc_autopoll = 1;
466 				break;
467 			case CMD_READ_RTC:
468 				memcpy(&sc->sc_rtc, &pkt->data[2],
469 				    sizeof(sc->sc_rtc));
470 				wakeup(&sc->sc_rtc);
471 				break;
472 			case CMD_WRITE_RTC:
473 				break;
474 			}
475 			mtx_unlock(&sc->sc_mutex);
476 			break;
477 		   case CUDA_ERROR:
478 			/*
479 			 * CUDA will throw errors if we miss a race between
480 			 * sending and receiving packets. This is already
481 			 * handled when we abort packet output to handle
482 			 * this packet in cuda_intr(). Thus, we ignore
483 			 * these messages.
484 			 */
485 			break;
486 		   default:
487 			device_printf(dev,"unknown CUDA command %d\n",
488 			    pkt->type);
489 			break;
490 		}
491 
492 		mtx_lock(&sc->sc_mutex);
493 
494 		STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
495 	}
496 
497 	mtx_unlock(&sc->sc_mutex);
498 }
499 
500 static u_int
501 cuda_poll(device_t dev)
502 {
503 	struct cuda_softc *sc = device_get_softc(dev);
504 
505 	if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc) &&
506 	    !sc->sc_waiting)
507 		return (0);
508 
509 	cuda_intr(dev);
510 	return (0);
511 }
512 
513 static void
514 cuda_intr(void *arg)
515 {
516 	device_t        dev;
517 	struct cuda_softc *sc;
518 
519 	int i, ending, restart_send, process_inbound;
520 	uint8_t reg;
521 
522         dev = (device_t)arg;
523 	sc = device_get_softc(dev);
524 
525 	mtx_lock(&sc->sc_mutex);
526 
527 	restart_send = 0;
528 	process_inbound = 0;
529 	reg = cuda_read_reg(sc, vIFR);
530 	if ((reg & vSR_INT) != vSR_INT) {
531 		mtx_unlock(&sc->sc_mutex);
532 		return;
533 	}
534 
535 	cuda_write_reg(sc, vIFR, 0x7f);	/* Clear interrupt */
536 
537 switch_start:
538 	switch (sc->sc_state) {
539 	case CUDA_IDLE:
540 		/*
541 		 * This is an unexpected packet, so grab the first (dummy)
542 		 * byte, set up the proper vars, and tell the chip we are
543 		 * starting to receive the packet by setting the TIP bit.
544 		 */
545 		sc->sc_in[1] = cuda_read_reg(sc, vSR);
546 
547 		if (cuda_intr_state(sc) == 0) {
548 			/* must have been a fake start */
549 
550 			if (sc->sc_waiting) {
551 				/* start over */
552 				DELAY(150);
553 				sc->sc_state = CUDA_OUT;
554 				sc->sc_sent = 0;
555 				cuda_out(sc);
556 				cuda_write_reg(sc, vSR, sc->sc_out[1]);
557 				cuda_ack_off(sc);
558 				cuda_tip(sc);
559 			}
560 			break;
561 		}
562 
563 		cuda_in(sc);
564 		cuda_tip(sc);
565 
566 		sc->sc_received = 1;
567 		sc->sc_state = CUDA_IN;
568 		break;
569 
570 	case CUDA_IN:
571 		sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR);
572 		ending = 0;
573 
574 		if (sc->sc_received > 255) {
575 			/* bitch only once */
576 			if (sc->sc_received == 256) {
577 				device_printf(dev,"input overflow\n");
578 				ending = 1;
579 			}
580 		} else
581 			sc->sc_received++;
582 
583 		/* intr off means this is the last byte (end of frame) */
584 		if (cuda_intr_state(sc) == 0) {
585 			ending = 1;
586 		} else {
587 			cuda_toggle_ack(sc);
588 		}
589 
590 		if (ending == 1) {	/* end of message? */
591 			struct cuda_packet *pkt;
592 
593 			/* reset vars and signal the end of this frame */
594 			cuda_idle(sc);
595 
596 			/* Queue up the packet */
597 			pkt = STAILQ_FIRST(&sc->sc_freeq);
598 			if (pkt != NULL) {
599 				/* If we have a free packet, process it */
600 
601 				pkt->len = sc->sc_received - 2;
602 				pkt->type = sc->sc_in[1];
603 				memcpy(pkt->data, &sc->sc_in[2], pkt->len);
604 
605 				STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
606 				STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q);
607 
608 				process_inbound = 1;
609 			}
610 
611 			sc->sc_state = CUDA_IDLE;
612 			sc->sc_received = 0;
613 
614 			/*
615 			 * If there is something waiting to be sent out,
616 			 * set everything up and send the first byte.
617 			 */
618 			if (sc->sc_waiting == 1) {
619 				DELAY(1500);	/* required */
620 				sc->sc_sent = 0;
621 				sc->sc_state = CUDA_OUT;
622 
623 				/*
624 				 * If the interrupt is on, we were too slow
625 				 * and the chip has already started to send
626 				 * something to us, so back out of the write
627 				 * and start a read cycle.
628 				 */
629 				if (cuda_intr_state(sc)) {
630 					cuda_in(sc);
631 					cuda_idle(sc);
632 					sc->sc_sent = 0;
633 					sc->sc_state = CUDA_IDLE;
634 					sc->sc_received = 0;
635 					DELAY(150);
636 					goto switch_start;
637 				}
638 
639 				/*
640 				 * If we got here, it's ok to start sending
641 				 * so load the first byte and tell the chip
642 				 * we want to send.
643 				 */
644 				cuda_out(sc);
645 				cuda_write_reg(sc, vSR,
646 				    sc->sc_out[sc->sc_sent]);
647 				cuda_ack_off(sc);
648 				cuda_tip(sc);
649 			}
650 		}
651 		break;
652 
653 	case CUDA_OUT:
654 		i = cuda_read_reg(sc, vSR);	/* reset SR-intr in IFR */
655 
656 		sc->sc_sent++;
657 		if (cuda_intr_state(sc)) {	/* ADB intr low during write */
658 			cuda_in(sc);	/* make sure SR is set to IN */
659 			cuda_idle(sc);
660 			sc->sc_sent = 0;	/* must start all over */
661 			sc->sc_state = CUDA_IDLE;	/* new state */
662 			sc->sc_received = 0;
663 			sc->sc_waiting = 1;	/* must retry when done with
664 						 * read */
665 			DELAY(150);
666 			goto switch_start;	/* process next state right
667 						 * now */
668 			break;
669 		}
670 		if (sc->sc_out_length == sc->sc_sent) {	/* check for done */
671 			sc->sc_waiting = 0;	/* done writing */
672 			sc->sc_state = CUDA_IDLE;	/* signal bus is idle */
673 			cuda_in(sc);
674 			cuda_idle(sc);
675 		} else {
676 			/* send next byte */
677 			cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]);
678 			cuda_toggle_ack(sc);	/* signal byte ready to
679 							 * shift */
680 		}
681 		break;
682 
683 	case CUDA_NOTREADY:
684 		break;
685 
686 	default:
687 		break;
688 	}
689 
690 	mtx_unlock(&sc->sc_mutex);
691 
692 	if (process_inbound)
693 		cuda_send_inbound(sc);
694 
695 	mtx_lock(&sc->sc_mutex);
696 	/* If we have another packet waiting, set it up */
697 	if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE)
698 		cuda_send_outbound(sc);
699 
700 	mtx_unlock(&sc->sc_mutex);
701 
702 }
703 
704 static u_int
705 cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data,
706     u_char poll)
707 {
708 	struct cuda_softc *sc = device_get_softc(dev);
709 	uint8_t packet[16];
710 	int i;
711 
712 	/* construct an ADB command packet and send it */
713 	packet[0] = CUDA_ADB;
714 	packet[1] = command_byte;
715 	for (i = 0; i < len; i++)
716 		packet[i + 2] = data[i];
717 
718 	cuda_send(sc, poll, len + 2, packet);
719 
720 	return (0);
721 }
722 
723 static u_int
724 cuda_adb_autopoll(device_t dev, uint16_t mask) {
725 	struct cuda_softc *sc = device_get_softc(dev);
726 
727 	uint8_t cmd[] = {CUDA_PSEUDO, CMD_AUTOPOLL, mask != 0};
728 
729 	mtx_lock(&sc->sc_mutex);
730 
731 	if (cmd[2] == sc->sc_autopoll) {
732 		mtx_unlock(&sc->sc_mutex);
733 		return (0);
734 	}
735 
736 	sc->sc_autopoll = -1;
737 	cuda_send(sc, 1, 3, cmd);
738 
739 	mtx_unlock(&sc->sc_mutex);
740 
741 	return (0);
742 }
743 
744 static void
745 cuda_shutdown(void *xsc, int howto)
746 {
747 	struct cuda_softc *sc = xsc;
748 	uint8_t cmd[] = {CUDA_PSEUDO, 0};
749 
750 	cmd[1] = (howto & RB_HALT) ? CMD_POWEROFF : CMD_RESET;
751 	cuda_poll(sc->sc_dev);
752 	cuda_send(sc, 1, 2, cmd);
753 
754 	while (1)
755 		cuda_poll(sc->sc_dev);
756 }
757 
758 #define DIFF19041970	2082844800
759 
760 static int
761 cuda_gettime(device_t dev, struct timespec *ts)
762 {
763 	struct cuda_softc *sc = device_get_softc(dev);
764 	uint8_t cmd[] = {CUDA_PSEUDO, CMD_READ_RTC};
765 
766 	mtx_lock(&sc->sc_mutex);
767 	sc->sc_rtc = -1;
768 	cuda_send(sc, 1, 2, cmd);
769 	if (sc->sc_rtc == -1)
770 		mtx_sleep(&sc->sc_rtc, &sc->sc_mutex, 0, "rtc", 100);
771 
772 	ts->tv_sec = sc->sc_rtc - DIFF19041970;
773 	ts->tv_nsec = 0;
774 	mtx_unlock(&sc->sc_mutex);
775 
776 	return (0);
777 }
778 
779 static int
780 cuda_settime(device_t dev, struct timespec *ts)
781 {
782 	struct cuda_softc *sc = device_get_softc(dev);
783 	uint8_t cmd[] = {CUDA_PSEUDO, CMD_WRITE_RTC, 0, 0, 0, 0};
784 	uint32_t sec;
785 
786 	sec = ts->tv_sec + DIFF19041970;
787 	memcpy(&cmd[2], &sec, sizeof(sec));
788 
789 	mtx_lock(&sc->sc_mutex);
790 	cuda_send(sc, 0, 6, cmd);
791 	mtx_unlock(&sc->sc_mutex);
792 
793 	return (0);
794 }
795 
796