1 /*- 2 * Copyright (c) 2006 Michael Lorenz 3 * Copyright 2008 by Nathan Whitehorn 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/module.h> 37 #include <sys/bus.h> 38 #include <sys/conf.h> 39 #include <sys/kernel.h> 40 #include <sys/clock.h> 41 #include <sys/reboot.h> 42 43 #include <dev/ofw/ofw_bus.h> 44 #include <dev/ofw/openfirm.h> 45 46 #include <machine/bus.h> 47 #include <machine/intr_machdep.h> 48 #include <machine/md_var.h> 49 #include <machine/pio.h> 50 #include <machine/resource.h> 51 52 #include <vm/vm.h> 53 #include <vm/pmap.h> 54 55 #include <sys/rman.h> 56 57 #include <dev/adb/adb.h> 58 59 #include "clock_if.h" 60 #include "cudavar.h" 61 #include "viareg.h" 62 63 /* 64 * MacIO interface 65 */ 66 static int cuda_probe(device_t); 67 static int cuda_attach(device_t); 68 static int cuda_detach(device_t); 69 70 static u_int cuda_adb_send(device_t dev, u_char command_byte, int len, 71 u_char *data, u_char poll); 72 static u_int cuda_adb_autopoll(device_t dev, uint16_t mask); 73 static u_int cuda_poll(device_t dev); 74 static void cuda_send_inbound(struct cuda_softc *sc); 75 static void cuda_send_outbound(struct cuda_softc *sc); 76 static void cuda_shutdown(void *xsc, int howto); 77 78 /* 79 * Clock interface 80 */ 81 static int cuda_gettime(device_t dev, struct timespec *ts); 82 static int cuda_settime(device_t dev, struct timespec *ts); 83 84 static device_method_t cuda_methods[] = { 85 /* Device interface */ 86 DEVMETHOD(device_probe, cuda_probe), 87 DEVMETHOD(device_attach, cuda_attach), 88 DEVMETHOD(device_detach, cuda_detach), 89 DEVMETHOD(device_shutdown, bus_generic_shutdown), 90 DEVMETHOD(device_suspend, bus_generic_suspend), 91 DEVMETHOD(device_resume, bus_generic_resume), 92 93 /* bus interface, for ADB root */ 94 DEVMETHOD(bus_print_child, bus_generic_print_child), 95 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 96 97 /* ADB bus interface */ 98 DEVMETHOD(adb_hb_send_raw_packet, cuda_adb_send), 99 DEVMETHOD(adb_hb_controller_poll, cuda_poll), 100 DEVMETHOD(adb_hb_set_autopoll_mask, cuda_adb_autopoll), 101 102 /* Clock interface */ 103 DEVMETHOD(clock_gettime, cuda_gettime), 104 DEVMETHOD(clock_settime, cuda_settime), 105 106 { 0, 0 }, 107 }; 108 109 static driver_t cuda_driver = { 110 "cuda", 111 cuda_methods, 112 sizeof(struct cuda_softc), 113 }; 114 115 static devclass_t cuda_devclass; 116 117 DRIVER_MODULE(cuda, macio, cuda_driver, cuda_devclass, 0, 0); 118 DRIVER_MODULE(adb, cuda, adb_driver, adb_devclass, 0, 0); 119 120 static void cuda_intr(void *arg); 121 static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset); 122 static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value); 123 static void cuda_idle(struct cuda_softc *); 124 static void cuda_tip(struct cuda_softc *); 125 static void cuda_clear_tip(struct cuda_softc *); 126 static void cuda_in(struct cuda_softc *); 127 static void cuda_out(struct cuda_softc *); 128 static void cuda_toggle_ack(struct cuda_softc *); 129 static void cuda_ack_off(struct cuda_softc *); 130 static int cuda_intr_state(struct cuda_softc *); 131 132 static int 133 cuda_probe(device_t dev) 134 { 135 const char *type = ofw_bus_get_type(dev); 136 137 if (strcmp(type, "via-cuda") != 0) 138 return (ENXIO); 139 140 device_set_desc(dev, CUDA_DEVSTR); 141 return (0); 142 } 143 144 static int 145 cuda_attach(device_t dev) 146 { 147 struct cuda_softc *sc; 148 149 volatile int i; 150 uint8_t reg; 151 phandle_t node,child; 152 153 sc = device_get_softc(dev); 154 sc->sc_dev = dev; 155 156 sc->sc_memrid = 0; 157 sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 158 &sc->sc_memrid, RF_ACTIVE); 159 160 if (sc->sc_memr == NULL) { 161 device_printf(dev, "Could not alloc mem resource!\n"); 162 return (ENXIO); 163 } 164 165 sc->sc_irqrid = 0; 166 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid, 167 RF_ACTIVE); 168 if (sc->sc_irq == NULL) { 169 device_printf(dev, "could not allocate interrupt\n"); 170 return (ENXIO); 171 } 172 173 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE 174 | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) { 175 device_printf(dev, "could not setup interrupt\n"); 176 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, 177 sc->sc_irq); 178 return (ENXIO); 179 } 180 181 mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE); 182 183 sc->sc_sent = 0; 184 sc->sc_received = 0; 185 sc->sc_waiting = 0; 186 sc->sc_polling = 0; 187 sc->sc_state = CUDA_NOTREADY; 188 sc->sc_autopoll = 0; 189 sc->sc_rtc = -1; 190 191 STAILQ_INIT(&sc->sc_inq); 192 STAILQ_INIT(&sc->sc_outq); 193 STAILQ_INIT(&sc->sc_freeq); 194 195 for (i = 0; i < CUDA_MAXPACKETS; i++) 196 STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q); 197 198 /* Init CUDA */ 199 200 reg = cuda_read_reg(sc, vDirB); 201 reg |= 0x30; /* register B bits 4 and 5: outputs */ 202 cuda_write_reg(sc, vDirB, reg); 203 204 reg = cuda_read_reg(sc, vDirB); 205 reg &= 0xf7; /* register B bit 3: input */ 206 cuda_write_reg(sc, vDirB, reg); 207 208 reg = cuda_read_reg(sc, vACR); 209 reg &= ~vSR_OUT; /* make sure SR is set to IN */ 210 cuda_write_reg(sc, vACR, reg); 211 212 cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10); 213 214 sc->sc_state = CUDA_IDLE; /* used by all types of hardware */ 215 216 cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */ 217 218 cuda_idle(sc); /* reset ADB */ 219 220 /* Reset CUDA */ 221 222 i = cuda_read_reg(sc, vSR); /* clear interrupt */ 223 cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */ 224 cuda_idle(sc); /* reset state to idle */ 225 DELAY(150); 226 cuda_tip(sc); /* signal start of frame */ 227 DELAY(150); 228 cuda_toggle_ack(sc); 229 DELAY(150); 230 cuda_clear_tip(sc); 231 DELAY(150); 232 cuda_idle(sc); /* back to idle state */ 233 i = cuda_read_reg(sc, vSR); /* clear interrupt */ 234 cuda_write_reg(sc, vIER, 0x84); /* ints ok now */ 235 236 /* Initialize child buses (ADB) */ 237 node = ofw_bus_get_node(dev); 238 239 for (child = OF_child(node); child != 0; child = OF_peer(child)) { 240 char name[32]; 241 242 memset(name, 0, sizeof(name)); 243 OF_getprop(child, "name", name, sizeof(name)); 244 245 if (bootverbose) 246 device_printf(dev, "CUDA child <%s>\n",name); 247 248 if (strncmp(name, "adb", 4) == 0) { 249 sc->adb_bus = device_add_child(dev,"adb",-1); 250 } 251 } 252 253 clock_register(dev, 1000); 254 EVENTHANDLER_REGISTER(shutdown_final, cuda_shutdown, sc, 255 SHUTDOWN_PRI_LAST); 256 257 return (bus_generic_attach(dev)); 258 } 259 260 static int cuda_detach(device_t dev) { 261 struct cuda_softc *sc; 262 263 sc = device_get_softc(dev); 264 265 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 266 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq); 267 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr); 268 mtx_destroy(&sc->sc_mutex); 269 270 return (bus_generic_detach(dev)); 271 } 272 273 static uint8_t 274 cuda_read_reg(struct cuda_softc *sc, u_int offset) { 275 return (bus_read_1(sc->sc_memr, offset)); 276 } 277 278 static void 279 cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value) { 280 bus_write_1(sc->sc_memr, offset, value); 281 } 282 283 static void 284 cuda_idle(struct cuda_softc *sc) 285 { 286 uint8_t reg; 287 288 reg = cuda_read_reg(sc, vBufB); 289 reg |= (vPB4 | vPB5); 290 cuda_write_reg(sc, vBufB, reg); 291 } 292 293 static void 294 cuda_tip(struct cuda_softc *sc) 295 { 296 uint8_t reg; 297 298 reg = cuda_read_reg(sc, vBufB); 299 reg &= ~vPB5; 300 cuda_write_reg(sc, vBufB, reg); 301 } 302 303 static void 304 cuda_clear_tip(struct cuda_softc *sc) 305 { 306 uint8_t reg; 307 308 reg = cuda_read_reg(sc, vBufB); 309 reg |= vPB5; 310 cuda_write_reg(sc, vBufB, reg); 311 } 312 313 static void 314 cuda_in(struct cuda_softc *sc) 315 { 316 uint8_t reg; 317 318 reg = cuda_read_reg(sc, vACR); 319 reg &= ~vSR_OUT; 320 cuda_write_reg(sc, vACR, reg); 321 } 322 323 static void 324 cuda_out(struct cuda_softc *sc) 325 { 326 uint8_t reg; 327 328 reg = cuda_read_reg(sc, vACR); 329 reg |= vSR_OUT; 330 cuda_write_reg(sc, vACR, reg); 331 } 332 333 static void 334 cuda_toggle_ack(struct cuda_softc *sc) 335 { 336 uint8_t reg; 337 338 reg = cuda_read_reg(sc, vBufB); 339 reg ^= vPB4; 340 cuda_write_reg(sc, vBufB, reg); 341 } 342 343 static void 344 cuda_ack_off(struct cuda_softc *sc) 345 { 346 uint8_t reg; 347 348 reg = cuda_read_reg(sc, vBufB); 349 reg |= vPB4; 350 cuda_write_reg(sc, vBufB, reg); 351 } 352 353 static int 354 cuda_intr_state(struct cuda_softc *sc) 355 { 356 return ((cuda_read_reg(sc, vBufB) & vPB3) == 0); 357 } 358 359 static int 360 cuda_send(void *cookie, int poll, int length, uint8_t *msg) 361 { 362 struct cuda_softc *sc = cookie; 363 device_t dev = sc->sc_dev; 364 struct cuda_packet *pkt; 365 366 if (sc->sc_state == CUDA_NOTREADY) 367 return (-1); 368 369 mtx_lock(&sc->sc_mutex); 370 371 pkt = STAILQ_FIRST(&sc->sc_freeq); 372 if (pkt == NULL) { 373 mtx_unlock(&sc->sc_mutex); 374 return (-1); 375 } 376 377 pkt->len = length - 1; 378 pkt->type = msg[0]; 379 memcpy(pkt->data, &msg[1], pkt->len); 380 381 STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q); 382 STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q); 383 384 /* 385 * If we already are sending a packet, we should bail now that this 386 * one has been added to the queue. 387 */ 388 389 if (sc->sc_waiting) { 390 mtx_unlock(&sc->sc_mutex); 391 return (0); 392 } 393 394 cuda_send_outbound(sc); 395 mtx_unlock(&sc->sc_mutex); 396 397 if (sc->sc_polling || poll || cold) 398 cuda_poll(dev); 399 400 return (0); 401 } 402 403 static void 404 cuda_send_outbound(struct cuda_softc *sc) 405 { 406 struct cuda_packet *pkt; 407 408 mtx_assert(&sc->sc_mutex, MA_OWNED); 409 410 pkt = STAILQ_FIRST(&sc->sc_outq); 411 if (pkt == NULL) 412 return; 413 414 sc->sc_out_length = pkt->len + 1; 415 memcpy(sc->sc_out, &pkt->type, pkt->len + 1); 416 sc->sc_sent = 0; 417 418 STAILQ_REMOVE_HEAD(&sc->sc_outq, pkt_q); 419 STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q); 420 421 sc->sc_waiting = 1; 422 423 cuda_poll(sc->sc_dev); 424 425 DELAY(150); 426 427 if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc)) { 428 sc->sc_state = CUDA_OUT; 429 cuda_out(sc); 430 cuda_write_reg(sc, vSR, sc->sc_out[0]); 431 cuda_ack_off(sc); 432 cuda_tip(sc); 433 } 434 } 435 436 static void 437 cuda_send_inbound(struct cuda_softc *sc) 438 { 439 device_t dev; 440 struct cuda_packet *pkt; 441 442 dev = sc->sc_dev; 443 444 mtx_lock(&sc->sc_mutex); 445 446 while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) { 447 STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q); 448 449 mtx_unlock(&sc->sc_mutex); 450 451 /* check if we have a handler for this message */ 452 switch (pkt->type) { 453 case CUDA_ADB: 454 if (pkt->len > 2) { 455 adb_receive_raw_packet(sc->adb_bus, 456 pkt->data[0],pkt->data[1], 457 pkt->len - 2,&pkt->data[2]); 458 } else { 459 adb_receive_raw_packet(sc->adb_bus, 460 pkt->data[0],pkt->data[1],0,NULL); 461 } 462 break; 463 case CUDA_PSEUDO: 464 mtx_lock(&sc->sc_mutex); 465 switch (pkt->data[1]) { 466 case CMD_AUTOPOLL: 467 sc->sc_autopoll = 1; 468 break; 469 case CMD_READ_RTC: 470 memcpy(&sc->sc_rtc, &pkt->data[2], 471 sizeof(sc->sc_rtc)); 472 wakeup(&sc->sc_rtc); 473 break; 474 case CMD_WRITE_RTC: 475 break; 476 } 477 mtx_unlock(&sc->sc_mutex); 478 break; 479 case CUDA_ERROR: 480 /* 481 * CUDA will throw errors if we miss a race between 482 * sending and receiving packets. This is already 483 * handled when we abort packet output to handle 484 * this packet in cuda_intr(). Thus, we ignore 485 * these messages. 486 */ 487 break; 488 default: 489 device_printf(dev,"unknown CUDA command %d\n", 490 pkt->type); 491 break; 492 } 493 494 mtx_lock(&sc->sc_mutex); 495 496 STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q); 497 } 498 499 mtx_unlock(&sc->sc_mutex); 500 } 501 502 static u_int 503 cuda_poll(device_t dev) 504 { 505 struct cuda_softc *sc = device_get_softc(dev); 506 507 if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc) && 508 !sc->sc_waiting) 509 return (0); 510 511 cuda_intr(dev); 512 return (0); 513 } 514 515 static void 516 cuda_intr(void *arg) 517 { 518 device_t dev; 519 struct cuda_softc *sc; 520 521 int i, ending, restart_send, process_inbound; 522 uint8_t reg; 523 524 dev = (device_t)arg; 525 sc = device_get_softc(dev); 526 527 mtx_lock(&sc->sc_mutex); 528 529 restart_send = 0; 530 process_inbound = 0; 531 reg = cuda_read_reg(sc, vIFR); 532 if ((reg & vSR_INT) != vSR_INT) { 533 mtx_unlock(&sc->sc_mutex); 534 return; 535 } 536 537 cuda_write_reg(sc, vIFR, 0x7f); /* Clear interrupt */ 538 539 switch_start: 540 switch (sc->sc_state) { 541 case CUDA_IDLE: 542 /* 543 * This is an unexpected packet, so grab the first (dummy) 544 * byte, set up the proper vars, and tell the chip we are 545 * starting to receive the packet by setting the TIP bit. 546 */ 547 sc->sc_in[1] = cuda_read_reg(sc, vSR); 548 549 if (cuda_intr_state(sc) == 0) { 550 /* must have been a fake start */ 551 552 if (sc->sc_waiting) { 553 /* start over */ 554 DELAY(150); 555 sc->sc_state = CUDA_OUT; 556 sc->sc_sent = 0; 557 cuda_out(sc); 558 cuda_write_reg(sc, vSR, sc->sc_out[1]); 559 cuda_ack_off(sc); 560 cuda_tip(sc); 561 } 562 break; 563 } 564 565 cuda_in(sc); 566 cuda_tip(sc); 567 568 sc->sc_received = 1; 569 sc->sc_state = CUDA_IN; 570 break; 571 572 case CUDA_IN: 573 sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR); 574 ending = 0; 575 576 if (sc->sc_received > 255) { 577 /* bitch only once */ 578 if (sc->sc_received == 256) { 579 device_printf(dev,"input overflow\n"); 580 ending = 1; 581 } 582 } else 583 sc->sc_received++; 584 585 /* intr off means this is the last byte (end of frame) */ 586 if (cuda_intr_state(sc) == 0) { 587 ending = 1; 588 } else { 589 cuda_toggle_ack(sc); 590 } 591 592 if (ending == 1) { /* end of message? */ 593 struct cuda_packet *pkt; 594 595 /* reset vars and signal the end of this frame */ 596 cuda_idle(sc); 597 598 /* Queue up the packet */ 599 pkt = STAILQ_FIRST(&sc->sc_freeq); 600 if (pkt != NULL) { 601 /* If we have a free packet, process it */ 602 603 pkt->len = sc->sc_received - 2; 604 pkt->type = sc->sc_in[1]; 605 memcpy(pkt->data, &sc->sc_in[2], pkt->len); 606 607 STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q); 608 STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q); 609 610 process_inbound = 1; 611 } 612 613 sc->sc_state = CUDA_IDLE; 614 sc->sc_received = 0; 615 616 /* 617 * If there is something waiting to be sent out, 618 * set everything up and send the first byte. 619 */ 620 if (sc->sc_waiting == 1) { 621 DELAY(1500); /* required */ 622 sc->sc_sent = 0; 623 sc->sc_state = CUDA_OUT; 624 625 /* 626 * If the interrupt is on, we were too slow 627 * and the chip has already started to send 628 * something to us, so back out of the write 629 * and start a read cycle. 630 */ 631 if (cuda_intr_state(sc)) { 632 cuda_in(sc); 633 cuda_idle(sc); 634 sc->sc_sent = 0; 635 sc->sc_state = CUDA_IDLE; 636 sc->sc_received = 0; 637 DELAY(150); 638 goto switch_start; 639 } 640 641 /* 642 * If we got here, it's ok to start sending 643 * so load the first byte and tell the chip 644 * we want to send. 645 */ 646 cuda_out(sc); 647 cuda_write_reg(sc, vSR, 648 sc->sc_out[sc->sc_sent]); 649 cuda_ack_off(sc); 650 cuda_tip(sc); 651 } 652 } 653 break; 654 655 case CUDA_OUT: 656 i = cuda_read_reg(sc, vSR); /* reset SR-intr in IFR */ 657 658 sc->sc_sent++; 659 if (cuda_intr_state(sc)) { /* ADB intr low during write */ 660 cuda_in(sc); /* make sure SR is set to IN */ 661 cuda_idle(sc); 662 sc->sc_sent = 0; /* must start all over */ 663 sc->sc_state = CUDA_IDLE; /* new state */ 664 sc->sc_received = 0; 665 sc->sc_waiting = 1; /* must retry when done with 666 * read */ 667 DELAY(150); 668 goto switch_start; /* process next state right 669 * now */ 670 break; 671 } 672 if (sc->sc_out_length == sc->sc_sent) { /* check for done */ 673 sc->sc_waiting = 0; /* done writing */ 674 sc->sc_state = CUDA_IDLE; /* signal bus is idle */ 675 cuda_in(sc); 676 cuda_idle(sc); 677 } else { 678 /* send next byte */ 679 cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]); 680 cuda_toggle_ack(sc); /* signal byte ready to 681 * shift */ 682 } 683 break; 684 685 case CUDA_NOTREADY: 686 break; 687 688 default: 689 break; 690 } 691 692 mtx_unlock(&sc->sc_mutex); 693 694 if (process_inbound) 695 cuda_send_inbound(sc); 696 697 mtx_lock(&sc->sc_mutex); 698 /* If we have another packet waiting, set it up */ 699 if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE) 700 cuda_send_outbound(sc); 701 702 mtx_unlock(&sc->sc_mutex); 703 704 } 705 706 static u_int 707 cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data, 708 u_char poll) 709 { 710 struct cuda_softc *sc = device_get_softc(dev); 711 uint8_t packet[16]; 712 int i; 713 714 /* construct an ADB command packet and send it */ 715 packet[0] = CUDA_ADB; 716 packet[1] = command_byte; 717 for (i = 0; i < len; i++) 718 packet[i + 2] = data[i]; 719 720 cuda_send(sc, poll, len + 2, packet); 721 722 return (0); 723 } 724 725 static u_int 726 cuda_adb_autopoll(device_t dev, uint16_t mask) { 727 struct cuda_softc *sc = device_get_softc(dev); 728 729 uint8_t cmd[] = {CUDA_PSEUDO, CMD_AUTOPOLL, mask != 0}; 730 731 mtx_lock(&sc->sc_mutex); 732 733 if (cmd[2] == sc->sc_autopoll) { 734 mtx_unlock(&sc->sc_mutex); 735 return (0); 736 } 737 738 sc->sc_autopoll = -1; 739 cuda_send(sc, 1, 3, cmd); 740 741 mtx_unlock(&sc->sc_mutex); 742 743 return (0); 744 } 745 746 static void 747 cuda_shutdown(void *xsc, int howto) 748 { 749 struct cuda_softc *sc = xsc; 750 uint8_t cmd[] = {CUDA_PSEUDO, 0}; 751 752 cmd[1] = (howto & RB_HALT) ? CMD_POWEROFF : CMD_RESET; 753 cuda_poll(sc->sc_dev); 754 cuda_send(sc, 1, 2, cmd); 755 756 while (1) 757 cuda_poll(sc->sc_dev); 758 } 759 760 #define DIFF19041970 2082844800 761 762 static int 763 cuda_gettime(device_t dev, struct timespec *ts) 764 { 765 struct cuda_softc *sc = device_get_softc(dev); 766 uint8_t cmd[] = {CUDA_PSEUDO, CMD_READ_RTC}; 767 768 mtx_lock(&sc->sc_mutex); 769 sc->sc_rtc = -1; 770 cuda_send(sc, 1, 2, cmd); 771 if (sc->sc_rtc == -1) 772 mtx_sleep(&sc->sc_rtc, &sc->sc_mutex, 0, "rtc", 100); 773 774 ts->tv_sec = sc->sc_rtc - DIFF19041970; 775 ts->tv_nsec = 0; 776 mtx_unlock(&sc->sc_mutex); 777 778 return (0); 779 } 780 781 static int 782 cuda_settime(device_t dev, struct timespec *ts) 783 { 784 struct cuda_softc *sc = device_get_softc(dev); 785 uint8_t cmd[] = {CUDA_PSEUDO, CMD_WRITE_RTC, 0, 0, 0, 0}; 786 uint32_t sec; 787 788 sec = ts->tv_sec + DIFF19041970; 789 memcpy(&cmd[2], &sec, sizeof(sec)); 790 791 mtx_lock(&sc->sc_mutex); 792 cuda_send(sc, 0, 6, cmd); 793 mtx_unlock(&sc->sc_mutex); 794 795 return (0); 796 } 797 798