1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2006 Michael Lorenz
5 * Copyright 2008 by Nathan Whitehorn
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/module.h>
36 #include <sys/bus.h>
37 #include <sys/conf.h>
38 #include <sys/eventhandler.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/clock.h>
43 #include <sys/reboot.h>
44
45 #include <dev/ofw/ofw_bus.h>
46 #include <dev/ofw/openfirm.h>
47
48 #include <machine/bus.h>
49 #include <machine/intr_machdep.h>
50 #include <machine/md_var.h>
51 #include <machine/pio.h>
52 #include <machine/resource.h>
53
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56
57 #include <sys/rman.h>
58
59 #include <dev/adb/adb.h>
60
61 #include "clock_if.h"
62 #include "cudavar.h"
63 #include "viareg.h"
64
65 /*
66 * MacIO interface
67 */
68 static int cuda_probe(device_t);
69 static int cuda_attach(device_t);
70 static int cuda_detach(device_t);
71
72 static u_int cuda_adb_send(device_t dev, u_char command_byte, int len,
73 u_char *data, u_char poll);
74 static u_int cuda_adb_autopoll(device_t dev, uint16_t mask);
75 static u_int cuda_poll(device_t dev);
76 static void cuda_send_inbound(struct cuda_softc *sc);
77 static void cuda_send_outbound(struct cuda_softc *sc);
78 static void cuda_shutdown(void *xsc, int howto);
79
80 /*
81 * Clock interface
82 */
83 static int cuda_gettime(device_t dev, struct timespec *ts);
84 static int cuda_settime(device_t dev, struct timespec *ts);
85
86 static device_method_t cuda_methods[] = {
87 /* Device interface */
88 DEVMETHOD(device_probe, cuda_probe),
89 DEVMETHOD(device_attach, cuda_attach),
90 DEVMETHOD(device_detach, cuda_detach),
91 DEVMETHOD(device_shutdown, bus_generic_shutdown),
92 DEVMETHOD(device_suspend, bus_generic_suspend),
93 DEVMETHOD(device_resume, bus_generic_resume),
94
95 /* ADB bus interface */
96 DEVMETHOD(adb_hb_send_raw_packet, cuda_adb_send),
97 DEVMETHOD(adb_hb_controller_poll, cuda_poll),
98 DEVMETHOD(adb_hb_set_autopoll_mask, cuda_adb_autopoll),
99
100 /* Clock interface */
101 DEVMETHOD(clock_gettime, cuda_gettime),
102 DEVMETHOD(clock_settime, cuda_settime),
103
104 DEVMETHOD_END
105 };
106
107 static driver_t cuda_driver = {
108 "cuda",
109 cuda_methods,
110 sizeof(struct cuda_softc),
111 };
112
113 DRIVER_MODULE(cuda, macio, cuda_driver, 0, 0);
114 DRIVER_MODULE(adb, cuda, adb_driver, 0, 0);
115
116 static void cuda_intr(void *arg);
117 static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset);
118 static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value);
119 static void cuda_idle(struct cuda_softc *);
120 static void cuda_tip(struct cuda_softc *);
121 static void cuda_clear_tip(struct cuda_softc *);
122 static void cuda_in(struct cuda_softc *);
123 static void cuda_out(struct cuda_softc *);
124 static void cuda_toggle_ack(struct cuda_softc *);
125 static void cuda_ack_off(struct cuda_softc *);
126 static int cuda_intr_state(struct cuda_softc *);
127
128 static int
cuda_probe(device_t dev)129 cuda_probe(device_t dev)
130 {
131 const char *type = ofw_bus_get_type(dev);
132
133 if (strcmp(type, "via-cuda") != 0)
134 return (ENXIO);
135
136 device_set_desc(dev, CUDA_DEVSTR);
137 return (0);
138 }
139
140 static int
cuda_attach(device_t dev)141 cuda_attach(device_t dev)
142 {
143 struct cuda_softc *sc;
144
145 volatile int i;
146 uint8_t reg;
147 phandle_t node,child;
148
149 sc = device_get_softc(dev);
150 sc->sc_dev = dev;
151
152 sc->sc_memrid = 0;
153 sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
154 &sc->sc_memrid, RF_ACTIVE);
155
156 if (sc->sc_memr == NULL) {
157 device_printf(dev, "Could not alloc mem resource!\n");
158 return (ENXIO);
159 }
160
161 sc->sc_irqrid = 0;
162 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid,
163 RF_ACTIVE);
164 if (sc->sc_irq == NULL) {
165 device_printf(dev, "could not allocate interrupt\n");
166 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid,
167 sc->sc_memr);
168 return (ENXIO);
169 }
170
171 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE
172 | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) {
173 device_printf(dev, "could not setup interrupt\n");
174 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid,
175 sc->sc_memr);
176 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid,
177 sc->sc_irq);
178 return (ENXIO);
179 }
180
181 mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE);
182
183 sc->sc_sent = 0;
184 sc->sc_received = 0;
185 sc->sc_waiting = 0;
186 sc->sc_polling = 0;
187 sc->sc_state = CUDA_NOTREADY;
188 sc->sc_autopoll = 0;
189 sc->sc_rtc = -1;
190
191 STAILQ_INIT(&sc->sc_inq);
192 STAILQ_INIT(&sc->sc_outq);
193 STAILQ_INIT(&sc->sc_freeq);
194
195 for (i = 0; i < CUDA_MAXPACKETS; i++)
196 STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q);
197
198 /* Init CUDA */
199
200 reg = cuda_read_reg(sc, vDirB);
201 reg |= 0x30; /* register B bits 4 and 5: outputs */
202 cuda_write_reg(sc, vDirB, reg);
203
204 reg = cuda_read_reg(sc, vDirB);
205 reg &= 0xf7; /* register B bit 3: input */
206 cuda_write_reg(sc, vDirB, reg);
207
208 reg = cuda_read_reg(sc, vACR);
209 reg &= ~vSR_OUT; /* make sure SR is set to IN */
210 cuda_write_reg(sc, vACR, reg);
211
212 cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10);
213
214 sc->sc_state = CUDA_IDLE; /* used by all types of hardware */
215
216 cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */
217
218 cuda_idle(sc); /* reset ADB */
219
220 /* Reset CUDA */
221
222 i = cuda_read_reg(sc, vSR); /* clear interrupt */
223 cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */
224 cuda_idle(sc); /* reset state to idle */
225 DELAY(150);
226 cuda_tip(sc); /* signal start of frame */
227 DELAY(150);
228 cuda_toggle_ack(sc);
229 DELAY(150);
230 cuda_clear_tip(sc);
231 DELAY(150);
232 cuda_idle(sc); /* back to idle state */
233 i = cuda_read_reg(sc, vSR); /* clear interrupt */
234 cuda_write_reg(sc, vIER, 0x84); /* ints ok now */
235
236 /* Initialize child buses (ADB) */
237 node = ofw_bus_get_node(dev);
238
239 for (child = OF_child(node); child != 0; child = OF_peer(child)) {
240 char name[32];
241
242 memset(name, 0, sizeof(name));
243 OF_getprop(child, "name", name, sizeof(name));
244
245 if (bootverbose)
246 device_printf(dev, "CUDA child <%s>\n",name);
247
248 if (strncmp(name, "adb", 4) == 0) {
249 sc->adb_bus = device_add_child(dev,"adb",DEVICE_UNIT_ANY);
250 }
251 }
252
253 clock_register(dev, 1000);
254 EVENTHANDLER_REGISTER(shutdown_final, cuda_shutdown, sc,
255 SHUTDOWN_PRI_LAST);
256
257 bus_attach_children(dev);
258 return (0);
259 }
260
cuda_detach(device_t dev)261 static int cuda_detach(device_t dev) {
262 struct cuda_softc *sc;
263 int error;
264
265 error = bus_generic_detach(dev);
266 if (error != 0)
267 return (error);
268
269 sc = device_get_softc(dev);
270
271 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
272 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq);
273 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr);
274 mtx_destroy(&sc->sc_mutex);
275
276 return (0);
277 }
278
279 static uint8_t
cuda_read_reg(struct cuda_softc * sc,u_int offset)280 cuda_read_reg(struct cuda_softc *sc, u_int offset) {
281 return (bus_read_1(sc->sc_memr, offset));
282 }
283
284 static void
cuda_write_reg(struct cuda_softc * sc,u_int offset,uint8_t value)285 cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value) {
286 bus_write_1(sc->sc_memr, offset, value);
287 }
288
289 static void
cuda_idle(struct cuda_softc * sc)290 cuda_idle(struct cuda_softc *sc)
291 {
292 uint8_t reg;
293
294 reg = cuda_read_reg(sc, vBufB);
295 reg |= (vPB4 | vPB5);
296 cuda_write_reg(sc, vBufB, reg);
297 }
298
299 static void
cuda_tip(struct cuda_softc * sc)300 cuda_tip(struct cuda_softc *sc)
301 {
302 uint8_t reg;
303
304 reg = cuda_read_reg(sc, vBufB);
305 reg &= ~vPB5;
306 cuda_write_reg(sc, vBufB, reg);
307 }
308
309 static void
cuda_clear_tip(struct cuda_softc * sc)310 cuda_clear_tip(struct cuda_softc *sc)
311 {
312 uint8_t reg;
313
314 reg = cuda_read_reg(sc, vBufB);
315 reg |= vPB5;
316 cuda_write_reg(sc, vBufB, reg);
317 }
318
319 static void
cuda_in(struct cuda_softc * sc)320 cuda_in(struct cuda_softc *sc)
321 {
322 uint8_t reg;
323
324 reg = cuda_read_reg(sc, vACR);
325 reg &= ~vSR_OUT;
326 cuda_write_reg(sc, vACR, reg);
327 }
328
329 static void
cuda_out(struct cuda_softc * sc)330 cuda_out(struct cuda_softc *sc)
331 {
332 uint8_t reg;
333
334 reg = cuda_read_reg(sc, vACR);
335 reg |= vSR_OUT;
336 cuda_write_reg(sc, vACR, reg);
337 }
338
339 static void
cuda_toggle_ack(struct cuda_softc * sc)340 cuda_toggle_ack(struct cuda_softc *sc)
341 {
342 uint8_t reg;
343
344 reg = cuda_read_reg(sc, vBufB);
345 reg ^= vPB4;
346 cuda_write_reg(sc, vBufB, reg);
347 }
348
349 static void
cuda_ack_off(struct cuda_softc * sc)350 cuda_ack_off(struct cuda_softc *sc)
351 {
352 uint8_t reg;
353
354 reg = cuda_read_reg(sc, vBufB);
355 reg |= vPB4;
356 cuda_write_reg(sc, vBufB, reg);
357 }
358
359 static int
cuda_intr_state(struct cuda_softc * sc)360 cuda_intr_state(struct cuda_softc *sc)
361 {
362 return ((cuda_read_reg(sc, vBufB) & vPB3) == 0);
363 }
364
365 static int
cuda_send(void * cookie,int poll,int length,uint8_t * msg)366 cuda_send(void *cookie, int poll, int length, uint8_t *msg)
367 {
368 struct cuda_softc *sc = cookie;
369 device_t dev = sc->sc_dev;
370 struct cuda_packet *pkt;
371
372 if (sc->sc_state == CUDA_NOTREADY)
373 return (-1);
374
375 mtx_lock(&sc->sc_mutex);
376
377 pkt = STAILQ_FIRST(&sc->sc_freeq);
378 if (pkt == NULL) {
379 mtx_unlock(&sc->sc_mutex);
380 return (-1);
381 }
382
383 pkt->len = length - 1;
384 pkt->type = msg[0];
385 memcpy(pkt->data, &msg[1], pkt->len);
386
387 STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
388 STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q);
389
390 /*
391 * If we already are sending a packet, we should bail now that this
392 * one has been added to the queue.
393 */
394
395 if (sc->sc_waiting) {
396 mtx_unlock(&sc->sc_mutex);
397 return (0);
398 }
399
400 cuda_send_outbound(sc);
401 mtx_unlock(&sc->sc_mutex);
402
403 if (sc->sc_polling || poll || cold)
404 cuda_poll(dev);
405
406 return (0);
407 }
408
409 static void
cuda_send_outbound(struct cuda_softc * sc)410 cuda_send_outbound(struct cuda_softc *sc)
411 {
412 struct cuda_packet *pkt;
413
414 mtx_assert(&sc->sc_mutex, MA_OWNED);
415
416 pkt = STAILQ_FIRST(&sc->sc_outq);
417 if (pkt == NULL)
418 return;
419
420 sc->sc_out_length = pkt->len + 1;
421 memcpy(sc->sc_out, &pkt->type, pkt->len + 1);
422 sc->sc_sent = 0;
423
424 STAILQ_REMOVE_HEAD(&sc->sc_outq, pkt_q);
425 STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
426
427 sc->sc_waiting = 1;
428
429 cuda_poll(sc->sc_dev);
430
431 DELAY(150);
432
433 if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc)) {
434 sc->sc_state = CUDA_OUT;
435 cuda_out(sc);
436 cuda_write_reg(sc, vSR, sc->sc_out[0]);
437 cuda_ack_off(sc);
438 cuda_tip(sc);
439 }
440 }
441
442 static void
cuda_send_inbound(struct cuda_softc * sc)443 cuda_send_inbound(struct cuda_softc *sc)
444 {
445 device_t dev;
446 struct cuda_packet *pkt;
447
448 dev = sc->sc_dev;
449
450 mtx_lock(&sc->sc_mutex);
451
452 while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) {
453 STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q);
454
455 mtx_unlock(&sc->sc_mutex);
456
457 /* check if we have a handler for this message */
458 switch (pkt->type) {
459 case CUDA_ADB:
460 if (pkt->len > 2) {
461 adb_receive_raw_packet(sc->adb_bus,
462 pkt->data[0],pkt->data[1],
463 pkt->len - 2,&pkt->data[2]);
464 } else {
465 adb_receive_raw_packet(sc->adb_bus,
466 pkt->data[0],pkt->data[1],0,NULL);
467 }
468 break;
469 case CUDA_PSEUDO:
470 mtx_lock(&sc->sc_mutex);
471 switch (pkt->data[1]) {
472 case CMD_AUTOPOLL:
473 sc->sc_autopoll = 1;
474 break;
475 case CMD_READ_RTC:
476 memcpy(&sc->sc_rtc, &pkt->data[2],
477 sizeof(sc->sc_rtc));
478 wakeup(&sc->sc_rtc);
479 break;
480 case CMD_WRITE_RTC:
481 break;
482 }
483 mtx_unlock(&sc->sc_mutex);
484 break;
485 case CUDA_ERROR:
486 /*
487 * CUDA will throw errors if we miss a race between
488 * sending and receiving packets. This is already
489 * handled when we abort packet output to handle
490 * this packet in cuda_intr(). Thus, we ignore
491 * these messages.
492 */
493 break;
494 default:
495 device_printf(dev,"unknown CUDA command %d\n",
496 pkt->type);
497 break;
498 }
499
500 mtx_lock(&sc->sc_mutex);
501
502 STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q);
503 }
504
505 mtx_unlock(&sc->sc_mutex);
506 }
507
508 static u_int
cuda_poll(device_t dev)509 cuda_poll(device_t dev)
510 {
511 struct cuda_softc *sc = device_get_softc(dev);
512
513 if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc) &&
514 !sc->sc_waiting)
515 return (0);
516
517 cuda_intr(dev);
518 return (0);
519 }
520
521 static void
cuda_intr(void * arg)522 cuda_intr(void *arg)
523 {
524 device_t dev;
525 struct cuda_softc *sc;
526 int ending, process_inbound;
527 uint8_t reg;
528
529 dev = (device_t)arg;
530 sc = device_get_softc(dev);
531
532 mtx_lock(&sc->sc_mutex);
533
534 process_inbound = 0;
535 reg = cuda_read_reg(sc, vIFR);
536 if ((reg & vSR_INT) != vSR_INT) {
537 mtx_unlock(&sc->sc_mutex);
538 return;
539 }
540
541 cuda_write_reg(sc, vIFR, 0x7f); /* Clear interrupt */
542
543 switch_start:
544 switch (sc->sc_state) {
545 case CUDA_IDLE:
546 /*
547 * This is an unexpected packet, so grab the first (dummy)
548 * byte, set up the proper vars, and tell the chip we are
549 * starting to receive the packet by setting the TIP bit.
550 */
551 sc->sc_in[1] = cuda_read_reg(sc, vSR);
552
553 if (cuda_intr_state(sc) == 0) {
554 /* must have been a fake start */
555
556 if (sc->sc_waiting) {
557 /* start over */
558 DELAY(150);
559 sc->sc_state = CUDA_OUT;
560 sc->sc_sent = 0;
561 cuda_out(sc);
562 cuda_write_reg(sc, vSR, sc->sc_out[1]);
563 cuda_ack_off(sc);
564 cuda_tip(sc);
565 }
566 break;
567 }
568
569 cuda_in(sc);
570 cuda_tip(sc);
571
572 sc->sc_received = 1;
573 sc->sc_state = CUDA_IN;
574 break;
575
576 case CUDA_IN:
577 sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR);
578 ending = 0;
579
580 if (sc->sc_received > 255) {
581 /* bitch only once */
582 if (sc->sc_received == 256) {
583 device_printf(dev,"input overflow\n");
584 ending = 1;
585 }
586 } else
587 sc->sc_received++;
588
589 /* intr off means this is the last byte (end of frame) */
590 if (cuda_intr_state(sc) == 0) {
591 ending = 1;
592 } else {
593 cuda_toggle_ack(sc);
594 }
595
596 if (ending == 1) { /* end of message? */
597 struct cuda_packet *pkt;
598
599 /* reset vars and signal the end of this frame */
600 cuda_idle(sc);
601
602 /* Queue up the packet */
603 pkt = STAILQ_FIRST(&sc->sc_freeq);
604 if (pkt != NULL) {
605 /* If we have a free packet, process it */
606
607 pkt->len = sc->sc_received - 2;
608 pkt->type = sc->sc_in[1];
609 memcpy(pkt->data, &sc->sc_in[2], pkt->len);
610
611 STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
612 STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q);
613
614 process_inbound = 1;
615 }
616
617 sc->sc_state = CUDA_IDLE;
618 sc->sc_received = 0;
619
620 /*
621 * If there is something waiting to be sent out,
622 * set everything up and send the first byte.
623 */
624 if (sc->sc_waiting == 1) {
625 DELAY(1500); /* required */
626 sc->sc_sent = 0;
627 sc->sc_state = CUDA_OUT;
628
629 /*
630 * If the interrupt is on, we were too slow
631 * and the chip has already started to send
632 * something to us, so back out of the write
633 * and start a read cycle.
634 */
635 if (cuda_intr_state(sc)) {
636 cuda_in(sc);
637 cuda_idle(sc);
638 sc->sc_sent = 0;
639 sc->sc_state = CUDA_IDLE;
640 sc->sc_received = 0;
641 DELAY(150);
642 goto switch_start;
643 }
644
645 /*
646 * If we got here, it's ok to start sending
647 * so load the first byte and tell the chip
648 * we want to send.
649 */
650 cuda_out(sc);
651 cuda_write_reg(sc, vSR,
652 sc->sc_out[sc->sc_sent]);
653 cuda_ack_off(sc);
654 cuda_tip(sc);
655 }
656 }
657 break;
658
659 case CUDA_OUT:
660 cuda_read_reg(sc, vSR); /* reset SR-intr in IFR */
661
662 sc->sc_sent++;
663 if (cuda_intr_state(sc)) { /* ADB intr low during write */
664 cuda_in(sc); /* make sure SR is set to IN */
665 cuda_idle(sc);
666 sc->sc_sent = 0; /* must start all over */
667 sc->sc_state = CUDA_IDLE; /* new state */
668 sc->sc_received = 0;
669 sc->sc_waiting = 1; /* must retry when done with
670 * read */
671 DELAY(150);
672 goto switch_start; /* process next state right
673 * now */
674 break;
675 }
676 if (sc->sc_out_length == sc->sc_sent) { /* check for done */
677 sc->sc_waiting = 0; /* done writing */
678 sc->sc_state = CUDA_IDLE; /* signal bus is idle */
679 cuda_in(sc);
680 cuda_idle(sc);
681 } else {
682 /* send next byte */
683 cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]);
684 cuda_toggle_ack(sc); /* signal byte ready to
685 * shift */
686 }
687 break;
688
689 case CUDA_NOTREADY:
690 break;
691
692 default:
693 break;
694 }
695
696 mtx_unlock(&sc->sc_mutex);
697
698 if (process_inbound)
699 cuda_send_inbound(sc);
700
701 mtx_lock(&sc->sc_mutex);
702 /* If we have another packet waiting, set it up */
703 if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE)
704 cuda_send_outbound(sc);
705
706 mtx_unlock(&sc->sc_mutex);
707
708 }
709
710 static u_int
cuda_adb_send(device_t dev,u_char command_byte,int len,u_char * data,u_char poll)711 cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data,
712 u_char poll)
713 {
714 struct cuda_softc *sc = device_get_softc(dev);
715 uint8_t packet[16];
716 int i;
717
718 /* construct an ADB command packet and send it */
719 packet[0] = CUDA_ADB;
720 packet[1] = command_byte;
721 for (i = 0; i < len; i++)
722 packet[i + 2] = data[i];
723
724 cuda_send(sc, poll, len + 2, packet);
725
726 return (0);
727 }
728
729 static u_int
cuda_adb_autopoll(device_t dev,uint16_t mask)730 cuda_adb_autopoll(device_t dev, uint16_t mask) {
731 struct cuda_softc *sc = device_get_softc(dev);
732
733 uint8_t cmd[] = {CUDA_PSEUDO, CMD_AUTOPOLL, mask != 0};
734
735 mtx_lock(&sc->sc_mutex);
736
737 if (cmd[2] == sc->sc_autopoll) {
738 mtx_unlock(&sc->sc_mutex);
739 return (0);
740 }
741
742 sc->sc_autopoll = -1;
743 cuda_send(sc, 1, 3, cmd);
744
745 mtx_unlock(&sc->sc_mutex);
746
747 return (0);
748 }
749
750 static void
cuda_shutdown(void * xsc,int howto)751 cuda_shutdown(void *xsc, int howto)
752 {
753 struct cuda_softc *sc = xsc;
754 uint8_t cmd[] = {CUDA_PSEUDO, 0};
755
756 if ((howto & RB_POWEROFF) != 0)
757 cmd[1] = CMD_POWEROFF;
758 else if ((howto & RB_HALT) == 0)
759 cmd[1] = CMD_RESET;
760 else
761 return;
762
763 cuda_poll(sc->sc_dev);
764 cuda_send(sc, 1, 2, cmd);
765
766 while (1)
767 cuda_poll(sc->sc_dev);
768 }
769
770 #define DIFF19041970 2082844800
771
772 static int
cuda_gettime(device_t dev,struct timespec * ts)773 cuda_gettime(device_t dev, struct timespec *ts)
774 {
775 struct cuda_softc *sc = device_get_softc(dev);
776 uint8_t cmd[] = {CUDA_PSEUDO, CMD_READ_RTC};
777
778 mtx_lock(&sc->sc_mutex);
779 sc->sc_rtc = -1;
780 cuda_send(sc, 1, 2, cmd);
781 if (sc->sc_rtc == -1)
782 mtx_sleep(&sc->sc_rtc, &sc->sc_mutex, 0, "rtc", 100);
783
784 ts->tv_sec = sc->sc_rtc - DIFF19041970;
785 ts->tv_nsec = 0;
786 mtx_unlock(&sc->sc_mutex);
787
788 return (0);
789 }
790
791 static int
cuda_settime(device_t dev,struct timespec * ts)792 cuda_settime(device_t dev, struct timespec *ts)
793 {
794 struct cuda_softc *sc = device_get_softc(dev);
795 uint8_t cmd[] = {CUDA_PSEUDO, CMD_WRITE_RTC, 0, 0, 0, 0};
796 uint32_t sec;
797
798 sec = ts->tv_sec + DIFF19041970;
799 memcpy(&cmd[2], &sec, sizeof(sec));
800
801 mtx_lock(&sc->sc_mutex);
802 cuda_send(sc, 0, 6, cmd);
803 mtx_unlock(&sc->sc_mutex);
804
805 return (0);
806 }
807