xref: /freebsd/sys/powerpc/powermac/cuda.c (revision 39ee7a7a6bdd1557b1c3532abf60d139798ac88b)
1 /*-
2  * Copyright (c) 2006 Michael Lorenz
3  * Copyright 2008 by Nathan Whitehorn
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/module.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/kernel.h>
40 #include <sys/clock.h>
41 #include <sys/reboot.h>
42 
43 #include <dev/ofw/ofw_bus.h>
44 #include <dev/ofw/openfirm.h>
45 
46 #include <machine/bus.h>
47 #include <machine/intr_machdep.h>
48 #include <machine/md_var.h>
49 #include <machine/pio.h>
50 #include <machine/resource.h>
51 
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54 
55 #include <sys/rman.h>
56 
57 #include <dev/adb/adb.h>
58 
59 #include "clock_if.h"
60 #include "cudavar.h"
61 #include "viareg.h"
62 
63 /*
64  * MacIO interface
65  */
66 static int	cuda_probe(device_t);
67 static int	cuda_attach(device_t);
68 static int	cuda_detach(device_t);
69 
70 static u_int	cuda_adb_send(device_t dev, u_char command_byte, int len,
71     u_char *data, u_char poll);
72 static u_int	cuda_adb_autopoll(device_t dev, uint16_t mask);
73 static u_int	cuda_poll(device_t dev);
74 static void	cuda_send_inbound(struct cuda_softc *sc);
75 static void	cuda_send_outbound(struct cuda_softc *sc);
76 static void	cuda_shutdown(void *xsc, int howto);
77 
78 /*
79  * Clock interface
80  */
81 static int cuda_gettime(device_t dev, struct timespec *ts);
82 static int cuda_settime(device_t dev, struct timespec *ts);
83 
84 static device_method_t  cuda_methods[] = {
85 	/* Device interface */
86 	DEVMETHOD(device_probe,		cuda_probe),
87 	DEVMETHOD(device_attach,	cuda_attach),
88         DEVMETHOD(device_detach,        cuda_detach),
89         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
90         DEVMETHOD(device_suspend,       bus_generic_suspend),
91         DEVMETHOD(device_resume,        bus_generic_resume),
92 
93 	/* ADB bus interface */
94 	DEVMETHOD(adb_hb_send_raw_packet,	cuda_adb_send),
95 	DEVMETHOD(adb_hb_controller_poll,	cuda_poll),
96 	DEVMETHOD(adb_hb_set_autopoll_mask,	cuda_adb_autopoll),
97 
98 	/* Clock interface */
99 	DEVMETHOD(clock_gettime,	cuda_gettime),
100 	DEVMETHOD(clock_settime,	cuda_settime),
101 
102 	DEVMETHOD_END
103 };
104 
105 static driver_t cuda_driver = {
106 	"cuda",
107 	cuda_methods,
108 	sizeof(struct cuda_softc),
109 };
110 
111 static devclass_t cuda_devclass;
112 
113 DRIVER_MODULE(cuda, macio, cuda_driver, cuda_devclass, 0, 0);
114 DRIVER_MODULE(adb, cuda, adb_driver, adb_devclass, 0, 0);
115 
116 static void cuda_intr(void *arg);
117 static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset);
118 static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value);
119 static void cuda_idle(struct cuda_softc *);
120 static void cuda_tip(struct cuda_softc *);
121 static void cuda_clear_tip(struct cuda_softc *);
122 static void cuda_in(struct cuda_softc *);
123 static void cuda_out(struct cuda_softc *);
124 static void cuda_toggle_ack(struct cuda_softc *);
125 static void cuda_ack_off(struct cuda_softc *);
126 static int cuda_intr_state(struct cuda_softc *);
127 
128 static int
129 cuda_probe(device_t dev)
130 {
131 	const char *type = ofw_bus_get_type(dev);
132 
133 	if (strcmp(type, "via-cuda") != 0)
134                 return (ENXIO);
135 
136 	device_set_desc(dev, CUDA_DEVSTR);
137 	return (0);
138 }
139 
140 static int
141 cuda_attach(device_t dev)
142 {
143 	struct cuda_softc *sc;
144 
145 	volatile int i;
146 	uint8_t reg;
147 	phandle_t node,child;
148 
149 	sc = device_get_softc(dev);
150 	sc->sc_dev = dev;
151 
152 	sc->sc_memrid = 0;
153 	sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
154 	    &sc->sc_memrid, RF_ACTIVE);
155 
156 	if (sc->sc_memr == NULL) {
157 		device_printf(dev, "Could not alloc mem resource!\n");
158 		return (ENXIO);
159 	}
160 
161 	sc->sc_irqrid = 0;
162 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid,
163             	RF_ACTIVE);
164         if (sc->sc_irq == NULL) {
165                 device_printf(dev, "could not allocate interrupt\n");
166                 return (ENXIO);
167         }
168 
169 	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE
170 	    | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) {
171                 device_printf(dev, "could not setup interrupt\n");
172                 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid,
173                     sc->sc_irq);
174                 return (ENXIO);
175         }
176 
177 	mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE);
178 
179 	sc->sc_sent = 0;
180 	sc->sc_received = 0;
181 	sc->sc_waiting = 0;
182 	sc->sc_polling = 0;
183 	sc->sc_state = CUDA_NOTREADY;
184 	sc->sc_autopoll = 0;
185 	sc->sc_rtc = -1;
186 
187 	STAILQ_INIT(&sc->sc_inq);
188 	STAILQ_INIT(&sc->sc_outq);
189 	STAILQ_INIT(&sc->sc_freeq);
190 
191 	for (i = 0; i < CUDA_MAXPACKETS; i++)
192 		STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q);
193 
194 	/* Init CUDA */
195 
196 	reg = cuda_read_reg(sc, vDirB);
197 	reg |= 0x30;	/* register B bits 4 and 5: outputs */
198 	cuda_write_reg(sc, vDirB, reg);
199 
200 	reg = cuda_read_reg(sc, vDirB);
201 	reg &= 0xf7;	/* register B bit 3: input */
202 	cuda_write_reg(sc, vDirB, reg);
203 
204 	reg = cuda_read_reg(sc, vACR);
205 	reg &= ~vSR_OUT;	/* make sure SR is set to IN */
206 	cuda_write_reg(sc, vACR, reg);
207 
208 	cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10);
209 
210 	sc->sc_state = CUDA_IDLE;	/* used by all types of hardware */
211 
212 	cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */
213 
214 	cuda_idle(sc);	/* reset ADB */
215 
216 	/* Reset CUDA */
217 
218 	i = cuda_read_reg(sc, vSR);	/* clear interrupt */
219 	cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */
220 	cuda_idle(sc);	/* reset state to idle */
221 	DELAY(150);
222 	cuda_tip(sc);	/* signal start of frame */
223 	DELAY(150);
224 	cuda_toggle_ack(sc);
225 	DELAY(150);
226 	cuda_clear_tip(sc);
227 	DELAY(150);
228 	cuda_idle(sc);	/* back to idle state */
229 	i = cuda_read_reg(sc, vSR);	/* clear interrupt */
230 	cuda_write_reg(sc, vIER, 0x84);	/* ints ok now */
231 
232 	/* Initialize child buses (ADB) */
233 	node = ofw_bus_get_node(dev);
234 
235 	for (child = OF_child(node); child != 0; child = OF_peer(child)) {
236 		char name[32];
237 
238 		memset(name, 0, sizeof(name));
239 		OF_getprop(child, "name", name, sizeof(name));
240 
241 		if (bootverbose)
242 			device_printf(dev, "CUDA child <%s>\n",name);
243 
244 		if (strncmp(name, "adb", 4) == 0) {
245 			sc->adb_bus = device_add_child(dev,"adb",-1);
246 		}
247 	}
248 
249 	clock_register(dev, 1000);
250 	EVENTHANDLER_REGISTER(shutdown_final, cuda_shutdown, sc,
251 	    SHUTDOWN_PRI_LAST);
252 
253 	return (bus_generic_attach(dev));
254 }
255 
256 static int cuda_detach(device_t dev) {
257 	struct cuda_softc *sc;
258 
259 	sc = device_get_softc(dev);
260 
261 	bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
262 	bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq);
263 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr);
264 	mtx_destroy(&sc->sc_mutex);
265 
266 	return (bus_generic_detach(dev));
267 }
268 
269 static uint8_t
270 cuda_read_reg(struct cuda_softc *sc, u_int offset) {
271 	return (bus_read_1(sc->sc_memr, offset));
272 }
273 
274 static void
275 cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value) {
276 	bus_write_1(sc->sc_memr, offset, value);
277 }
278 
279 static void
280 cuda_idle(struct cuda_softc *sc)
281 {
282 	uint8_t reg;
283 
284 	reg = cuda_read_reg(sc, vBufB);
285 	reg |= (vPB4 | vPB5);
286 	cuda_write_reg(sc, vBufB, reg);
287 }
288 
289 static void
290 cuda_tip(struct cuda_softc *sc)
291 {
292 	uint8_t reg;
293 
294 	reg = cuda_read_reg(sc, vBufB);
295 	reg &= ~vPB5;
296 	cuda_write_reg(sc, vBufB, reg);
297 }
298 
299 static void
300 cuda_clear_tip(struct cuda_softc *sc)
301 {
302 	uint8_t reg;
303 
304 	reg = cuda_read_reg(sc, vBufB);
305 	reg |= vPB5;
306 	cuda_write_reg(sc, vBufB, reg);
307 }
308 
309 static void
310 cuda_in(struct cuda_softc *sc)
311 {
312 	uint8_t reg;
313 
314 	reg = cuda_read_reg(sc, vACR);
315 	reg &= ~vSR_OUT;
316 	cuda_write_reg(sc, vACR, reg);
317 }
318 
319 static void
320 cuda_out(struct cuda_softc *sc)
321 {
322 	uint8_t reg;
323 
324 	reg = cuda_read_reg(sc, vACR);
325 	reg |= vSR_OUT;
326 	cuda_write_reg(sc, vACR, reg);
327 }
328 
329 static void
330 cuda_toggle_ack(struct cuda_softc *sc)
331 {
332 	uint8_t reg;
333 
334 	reg = cuda_read_reg(sc, vBufB);
335 	reg ^= vPB4;
336 	cuda_write_reg(sc, vBufB, reg);
337 }
338 
339 static void
340 cuda_ack_off(struct cuda_softc *sc)
341 {
342 	uint8_t reg;
343 
344 	reg = cuda_read_reg(sc, vBufB);
345 	reg |= vPB4;
346 	cuda_write_reg(sc, vBufB, reg);
347 }
348 
349 static int
350 cuda_intr_state(struct cuda_softc *sc)
351 {
352 	return ((cuda_read_reg(sc, vBufB) & vPB3) == 0);
353 }
354 
355 static int
356 cuda_send(void *cookie, int poll, int length, uint8_t *msg)
357 {
358 	struct cuda_softc *sc = cookie;
359 	device_t dev = sc->sc_dev;
360 	struct cuda_packet *pkt;
361 
362 	if (sc->sc_state == CUDA_NOTREADY)
363 		return (-1);
364 
365 	mtx_lock(&sc->sc_mutex);
366 
367 	pkt = STAILQ_FIRST(&sc->sc_freeq);
368 	if (pkt == NULL) {
369 		mtx_unlock(&sc->sc_mutex);
370 		return (-1);
371 	}
372 
373 	pkt->len = length - 1;
374 	pkt->type = msg[0];
375 	memcpy(pkt->data, &msg[1], pkt->len);
376 
377 	STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
378 	STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q);
379 
380 	/*
381 	 * If we already are sending a packet, we should bail now that this
382 	 * one has been added to the queue.
383 	 */
384 
385 	if (sc->sc_waiting) {
386 		mtx_unlock(&sc->sc_mutex);
387 		return (0);
388 	}
389 
390 	cuda_send_outbound(sc);
391 	mtx_unlock(&sc->sc_mutex);
392 
393 	if (sc->sc_polling || poll || cold)
394 		cuda_poll(dev);
395 
396 	return (0);
397 }
398 
399 static void
400 cuda_send_outbound(struct cuda_softc *sc)
401 {
402 	struct cuda_packet *pkt;
403 
404 	mtx_assert(&sc->sc_mutex, MA_OWNED);
405 
406 	pkt = STAILQ_FIRST(&sc->sc_outq);
407 	if (pkt == NULL)
408 		return;
409 
410 	sc->sc_out_length = pkt->len + 1;
411 	memcpy(sc->sc_out, &pkt->type, pkt->len + 1);
412 	sc->sc_sent = 0;
413 
414 	STAILQ_REMOVE_HEAD(&sc->sc_outq, pkt_q);
415 	STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
416 
417 	sc->sc_waiting = 1;
418 
419 	cuda_poll(sc->sc_dev);
420 
421 	DELAY(150);
422 
423 	if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc)) {
424 		sc->sc_state = CUDA_OUT;
425 		cuda_out(sc);
426 		cuda_write_reg(sc, vSR, sc->sc_out[0]);
427 		cuda_ack_off(sc);
428 		cuda_tip(sc);
429 	}
430 }
431 
432 static void
433 cuda_send_inbound(struct cuda_softc *sc)
434 {
435 	device_t dev;
436 	struct cuda_packet *pkt;
437 
438 	dev = sc->sc_dev;
439 
440 	mtx_lock(&sc->sc_mutex);
441 
442 	while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) {
443 		STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q);
444 
445 		mtx_unlock(&sc->sc_mutex);
446 
447 		/* check if we have a handler for this message */
448 		switch (pkt->type) {
449 		   case CUDA_ADB:
450 			if (pkt->len > 2) {
451 				adb_receive_raw_packet(sc->adb_bus,
452 				    pkt->data[0],pkt->data[1],
453 				    pkt->len - 2,&pkt->data[2]);
454 			} else {
455 				adb_receive_raw_packet(sc->adb_bus,
456 				    pkt->data[0],pkt->data[1],0,NULL);
457 			}
458 			break;
459 		   case CUDA_PSEUDO:
460 			mtx_lock(&sc->sc_mutex);
461 			switch (pkt->data[1]) {
462 			case CMD_AUTOPOLL:
463 				sc->sc_autopoll = 1;
464 				break;
465 			case CMD_READ_RTC:
466 				memcpy(&sc->sc_rtc, &pkt->data[2],
467 				    sizeof(sc->sc_rtc));
468 				wakeup(&sc->sc_rtc);
469 				break;
470 			case CMD_WRITE_RTC:
471 				break;
472 			}
473 			mtx_unlock(&sc->sc_mutex);
474 			break;
475 		   case CUDA_ERROR:
476 			/*
477 			 * CUDA will throw errors if we miss a race between
478 			 * sending and receiving packets. This is already
479 			 * handled when we abort packet output to handle
480 			 * this packet in cuda_intr(). Thus, we ignore
481 			 * these messages.
482 			 */
483 			break;
484 		   default:
485 			device_printf(dev,"unknown CUDA command %d\n",
486 			    pkt->type);
487 			break;
488 		}
489 
490 		mtx_lock(&sc->sc_mutex);
491 
492 		STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
493 	}
494 
495 	mtx_unlock(&sc->sc_mutex);
496 }
497 
498 static u_int
499 cuda_poll(device_t dev)
500 {
501 	struct cuda_softc *sc = device_get_softc(dev);
502 
503 	if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc) &&
504 	    !sc->sc_waiting)
505 		return (0);
506 
507 	cuda_intr(dev);
508 	return (0);
509 }
510 
511 static void
512 cuda_intr(void *arg)
513 {
514 	device_t        dev;
515 	struct cuda_softc *sc;
516 
517 	int i, ending, restart_send, process_inbound;
518 	uint8_t reg;
519 
520         dev = (device_t)arg;
521 	sc = device_get_softc(dev);
522 
523 	mtx_lock(&sc->sc_mutex);
524 
525 	restart_send = 0;
526 	process_inbound = 0;
527 	reg = cuda_read_reg(sc, vIFR);
528 	if ((reg & vSR_INT) != vSR_INT) {
529 		mtx_unlock(&sc->sc_mutex);
530 		return;
531 	}
532 
533 	cuda_write_reg(sc, vIFR, 0x7f);	/* Clear interrupt */
534 
535 switch_start:
536 	switch (sc->sc_state) {
537 	case CUDA_IDLE:
538 		/*
539 		 * This is an unexpected packet, so grab the first (dummy)
540 		 * byte, set up the proper vars, and tell the chip we are
541 		 * starting to receive the packet by setting the TIP bit.
542 		 */
543 		sc->sc_in[1] = cuda_read_reg(sc, vSR);
544 
545 		if (cuda_intr_state(sc) == 0) {
546 			/* must have been a fake start */
547 
548 			if (sc->sc_waiting) {
549 				/* start over */
550 				DELAY(150);
551 				sc->sc_state = CUDA_OUT;
552 				sc->sc_sent = 0;
553 				cuda_out(sc);
554 				cuda_write_reg(sc, vSR, sc->sc_out[1]);
555 				cuda_ack_off(sc);
556 				cuda_tip(sc);
557 			}
558 			break;
559 		}
560 
561 		cuda_in(sc);
562 		cuda_tip(sc);
563 
564 		sc->sc_received = 1;
565 		sc->sc_state = CUDA_IN;
566 		break;
567 
568 	case CUDA_IN:
569 		sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR);
570 		ending = 0;
571 
572 		if (sc->sc_received > 255) {
573 			/* bitch only once */
574 			if (sc->sc_received == 256) {
575 				device_printf(dev,"input overflow\n");
576 				ending = 1;
577 			}
578 		} else
579 			sc->sc_received++;
580 
581 		/* intr off means this is the last byte (end of frame) */
582 		if (cuda_intr_state(sc) == 0) {
583 			ending = 1;
584 		} else {
585 			cuda_toggle_ack(sc);
586 		}
587 
588 		if (ending == 1) {	/* end of message? */
589 			struct cuda_packet *pkt;
590 
591 			/* reset vars and signal the end of this frame */
592 			cuda_idle(sc);
593 
594 			/* Queue up the packet */
595 			pkt = STAILQ_FIRST(&sc->sc_freeq);
596 			if (pkt != NULL) {
597 				/* If we have a free packet, process it */
598 
599 				pkt->len = sc->sc_received - 2;
600 				pkt->type = sc->sc_in[1];
601 				memcpy(pkt->data, &sc->sc_in[2], pkt->len);
602 
603 				STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
604 				STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q);
605 
606 				process_inbound = 1;
607 			}
608 
609 			sc->sc_state = CUDA_IDLE;
610 			sc->sc_received = 0;
611 
612 			/*
613 			 * If there is something waiting to be sent out,
614 			 * set everything up and send the first byte.
615 			 */
616 			if (sc->sc_waiting == 1) {
617 				DELAY(1500);	/* required */
618 				sc->sc_sent = 0;
619 				sc->sc_state = CUDA_OUT;
620 
621 				/*
622 				 * If the interrupt is on, we were too slow
623 				 * and the chip has already started to send
624 				 * something to us, so back out of the write
625 				 * and start a read cycle.
626 				 */
627 				if (cuda_intr_state(sc)) {
628 					cuda_in(sc);
629 					cuda_idle(sc);
630 					sc->sc_sent = 0;
631 					sc->sc_state = CUDA_IDLE;
632 					sc->sc_received = 0;
633 					DELAY(150);
634 					goto switch_start;
635 				}
636 
637 				/*
638 				 * If we got here, it's ok to start sending
639 				 * so load the first byte and tell the chip
640 				 * we want to send.
641 				 */
642 				cuda_out(sc);
643 				cuda_write_reg(sc, vSR,
644 				    sc->sc_out[sc->sc_sent]);
645 				cuda_ack_off(sc);
646 				cuda_tip(sc);
647 			}
648 		}
649 		break;
650 
651 	case CUDA_OUT:
652 		i = cuda_read_reg(sc, vSR);	/* reset SR-intr in IFR */
653 
654 		sc->sc_sent++;
655 		if (cuda_intr_state(sc)) {	/* ADB intr low during write */
656 			cuda_in(sc);	/* make sure SR is set to IN */
657 			cuda_idle(sc);
658 			sc->sc_sent = 0;	/* must start all over */
659 			sc->sc_state = CUDA_IDLE;	/* new state */
660 			sc->sc_received = 0;
661 			sc->sc_waiting = 1;	/* must retry when done with
662 						 * read */
663 			DELAY(150);
664 			goto switch_start;	/* process next state right
665 						 * now */
666 			break;
667 		}
668 		if (sc->sc_out_length == sc->sc_sent) {	/* check for done */
669 			sc->sc_waiting = 0;	/* done writing */
670 			sc->sc_state = CUDA_IDLE;	/* signal bus is idle */
671 			cuda_in(sc);
672 			cuda_idle(sc);
673 		} else {
674 			/* send next byte */
675 			cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]);
676 			cuda_toggle_ack(sc);	/* signal byte ready to
677 							 * shift */
678 		}
679 		break;
680 
681 	case CUDA_NOTREADY:
682 		break;
683 
684 	default:
685 		break;
686 	}
687 
688 	mtx_unlock(&sc->sc_mutex);
689 
690 	if (process_inbound)
691 		cuda_send_inbound(sc);
692 
693 	mtx_lock(&sc->sc_mutex);
694 	/* If we have another packet waiting, set it up */
695 	if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE)
696 		cuda_send_outbound(sc);
697 
698 	mtx_unlock(&sc->sc_mutex);
699 
700 }
701 
702 static u_int
703 cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data,
704     u_char poll)
705 {
706 	struct cuda_softc *sc = device_get_softc(dev);
707 	uint8_t packet[16];
708 	int i;
709 
710 	/* construct an ADB command packet and send it */
711 	packet[0] = CUDA_ADB;
712 	packet[1] = command_byte;
713 	for (i = 0; i < len; i++)
714 		packet[i + 2] = data[i];
715 
716 	cuda_send(sc, poll, len + 2, packet);
717 
718 	return (0);
719 }
720 
721 static u_int
722 cuda_adb_autopoll(device_t dev, uint16_t mask) {
723 	struct cuda_softc *sc = device_get_softc(dev);
724 
725 	uint8_t cmd[] = {CUDA_PSEUDO, CMD_AUTOPOLL, mask != 0};
726 
727 	mtx_lock(&sc->sc_mutex);
728 
729 	if (cmd[2] == sc->sc_autopoll) {
730 		mtx_unlock(&sc->sc_mutex);
731 		return (0);
732 	}
733 
734 	sc->sc_autopoll = -1;
735 	cuda_send(sc, 1, 3, cmd);
736 
737 	mtx_unlock(&sc->sc_mutex);
738 
739 	return (0);
740 }
741 
742 static void
743 cuda_shutdown(void *xsc, int howto)
744 {
745 	struct cuda_softc *sc = xsc;
746 	uint8_t cmd[] = {CUDA_PSEUDO, 0};
747 
748 	cmd[1] = (howto & RB_HALT) ? CMD_POWEROFF : CMD_RESET;
749 	cuda_poll(sc->sc_dev);
750 	cuda_send(sc, 1, 2, cmd);
751 
752 	while (1)
753 		cuda_poll(sc->sc_dev);
754 }
755 
756 #define DIFF19041970	2082844800
757 
758 static int
759 cuda_gettime(device_t dev, struct timespec *ts)
760 {
761 	struct cuda_softc *sc = device_get_softc(dev);
762 	uint8_t cmd[] = {CUDA_PSEUDO, CMD_READ_RTC};
763 
764 	mtx_lock(&sc->sc_mutex);
765 	sc->sc_rtc = -1;
766 	cuda_send(sc, 1, 2, cmd);
767 	if (sc->sc_rtc == -1)
768 		mtx_sleep(&sc->sc_rtc, &sc->sc_mutex, 0, "rtc", 100);
769 
770 	ts->tv_sec = sc->sc_rtc - DIFF19041970;
771 	ts->tv_nsec = 0;
772 	mtx_unlock(&sc->sc_mutex);
773 
774 	return (0);
775 }
776 
777 static int
778 cuda_settime(device_t dev, struct timespec *ts)
779 {
780 	struct cuda_softc *sc = device_get_softc(dev);
781 	uint8_t cmd[] = {CUDA_PSEUDO, CMD_WRITE_RTC, 0, 0, 0, 0};
782 	uint32_t sec;
783 
784 	sec = ts->tv_sec + DIFF19041970;
785 	memcpy(&cmd[2], &sec, sizeof(sec));
786 
787 	mtx_lock(&sc->sc_mutex);
788 	cuda_send(sc, 0, 6, cmd);
789 	mtx_unlock(&sc->sc_mutex);
790 
791 	return (0);
792 }
793 
794