1 /*- 2 * Copyright (c) 2006 Michael Lorenz 3 * Copyright 2008 by Nathan Whitehorn 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/module.h> 37 #include <sys/bus.h> 38 #include <sys/conf.h> 39 #include <sys/kernel.h> 40 #include <sys/clock.h> 41 42 #include <dev/ofw/ofw_bus.h> 43 #include <dev/ofw/openfirm.h> 44 45 #include <machine/bus.h> 46 #include <machine/intr.h> 47 #include <machine/intr_machdep.h> 48 #include <machine/md_var.h> 49 #include <machine/pio.h> 50 #include <machine/resource.h> 51 52 #include <vm/vm.h> 53 #include <vm/pmap.h> 54 55 #include <sys/rman.h> 56 57 #include <dev/adb/adb.h> 58 59 #include "clock_if.h" 60 #include "cudavar.h" 61 #include "viareg.h" 62 63 /* 64 * MacIO interface 65 */ 66 static int cuda_probe(device_t); 67 static int cuda_attach(device_t); 68 static int cuda_detach(device_t); 69 70 static u_int cuda_adb_send(device_t dev, u_char command_byte, int len, 71 u_char *data, u_char poll); 72 static u_int cuda_adb_autopoll(device_t dev, uint16_t mask); 73 static u_int cuda_poll(device_t dev); 74 static void cuda_send_inbound(struct cuda_softc *sc); 75 static void cuda_send_outbound(struct cuda_softc *sc); 76 77 /* 78 * Clock interface 79 */ 80 static int cuda_gettime(device_t dev, struct timespec *ts); 81 static int cuda_settime(device_t dev, struct timespec *ts); 82 83 static device_method_t cuda_methods[] = { 84 /* Device interface */ 85 DEVMETHOD(device_probe, cuda_probe), 86 DEVMETHOD(device_attach, cuda_attach), 87 DEVMETHOD(device_detach, cuda_detach), 88 DEVMETHOD(device_shutdown, bus_generic_shutdown), 89 DEVMETHOD(device_suspend, bus_generic_suspend), 90 DEVMETHOD(device_resume, bus_generic_resume), 91 92 /* bus interface, for ADB root */ 93 DEVMETHOD(bus_print_child, bus_generic_print_child), 94 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 95 96 /* ADB bus interface */ 97 DEVMETHOD(adb_hb_send_raw_packet, cuda_adb_send), 98 DEVMETHOD(adb_hb_controller_poll, cuda_poll), 99 DEVMETHOD(adb_hb_set_autopoll_mask, cuda_adb_autopoll), 100 101 /* Clock interface */ 102 DEVMETHOD(clock_gettime, cuda_gettime), 103 DEVMETHOD(clock_settime, cuda_settime), 104 105 { 0, 0 }, 106 }; 107 108 static driver_t cuda_driver = { 109 "cuda", 110 cuda_methods, 111 sizeof(struct cuda_softc), 112 }; 113 114 static devclass_t cuda_devclass; 115 116 DRIVER_MODULE(cuda, macio, cuda_driver, cuda_devclass, 0, 0); 117 DRIVER_MODULE(adb, cuda, adb_driver, adb_devclass, 0, 0); 118 119 static void cuda_intr(void *arg); 120 static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset); 121 static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value); 122 static void cuda_idle(struct cuda_softc *); 123 static void cuda_tip(struct cuda_softc *); 124 static void cuda_clear_tip(struct cuda_softc *); 125 static void cuda_in(struct cuda_softc *); 126 static void cuda_out(struct cuda_softc *); 127 static void cuda_toggle_ack(struct cuda_softc *); 128 static void cuda_ack_off(struct cuda_softc *); 129 static int cuda_intr_state(struct cuda_softc *); 130 131 static int 132 cuda_probe(device_t dev) 133 { 134 const char *type = ofw_bus_get_type(dev); 135 136 if (strcmp(type, "via-cuda") != 0) 137 return (ENXIO); 138 139 device_set_desc(dev, CUDA_DEVSTR); 140 return (0); 141 } 142 143 static int 144 cuda_attach(device_t dev) 145 { 146 struct cuda_softc *sc; 147 148 volatile int i; 149 uint8_t reg; 150 phandle_t node,child; 151 152 sc = device_get_softc(dev); 153 sc->sc_dev = dev; 154 155 sc->sc_memrid = 0; 156 sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 157 &sc->sc_memrid, RF_ACTIVE); 158 159 if (sc->sc_memr == NULL) { 160 device_printf(dev, "Could not alloc mem resource!\n"); 161 return (ENXIO); 162 } 163 164 sc->sc_irqrid = 0; 165 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid, 166 RF_ACTIVE); 167 if (sc->sc_irq == NULL) { 168 device_printf(dev, "could not allocate interrupt\n"); 169 return (ENXIO); 170 } 171 172 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE 173 | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) { 174 device_printf(dev, "could not setup interrupt\n"); 175 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, 176 sc->sc_irq); 177 return (ENXIO); 178 } 179 180 mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE); 181 182 sc->sc_sent = 0; 183 sc->sc_received = 0; 184 sc->sc_waiting = 0; 185 sc->sc_polling = 0; 186 sc->sc_state = CUDA_NOTREADY; 187 sc->sc_autopoll = 0; 188 sc->sc_rtc = -1; 189 190 STAILQ_INIT(&sc->sc_inq); 191 STAILQ_INIT(&sc->sc_outq); 192 STAILQ_INIT(&sc->sc_freeq); 193 194 for (i = 0; i < CUDA_MAXPACKETS; i++) 195 STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q); 196 197 /* Init CUDA */ 198 199 reg = cuda_read_reg(sc, vDirB); 200 reg |= 0x30; /* register B bits 4 and 5: outputs */ 201 cuda_write_reg(sc, vDirB, reg); 202 203 reg = cuda_read_reg(sc, vDirB); 204 reg &= 0xf7; /* register B bit 3: input */ 205 cuda_write_reg(sc, vDirB, reg); 206 207 reg = cuda_read_reg(sc, vACR); 208 reg &= ~vSR_OUT; /* make sure SR is set to IN */ 209 cuda_write_reg(sc, vACR, reg); 210 211 cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10); 212 213 sc->sc_state = CUDA_IDLE; /* used by all types of hardware */ 214 215 cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */ 216 217 cuda_idle(sc); /* reset ADB */ 218 219 /* Reset CUDA */ 220 221 i = cuda_read_reg(sc, vSR); /* clear interrupt */ 222 cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */ 223 cuda_idle(sc); /* reset state to idle */ 224 DELAY(150); 225 cuda_tip(sc); /* signal start of frame */ 226 DELAY(150); 227 cuda_toggle_ack(sc); 228 DELAY(150); 229 cuda_clear_tip(sc); 230 DELAY(150); 231 cuda_idle(sc); /* back to idle state */ 232 i = cuda_read_reg(sc, vSR); /* clear interrupt */ 233 cuda_write_reg(sc, vIER, 0x84); /* ints ok now */ 234 235 /* Initialize child buses (ADB) */ 236 node = ofw_bus_get_node(dev); 237 238 for (child = OF_child(node); child != 0; child = OF_peer(child)) { 239 char name[32]; 240 241 memset(name, 0, sizeof(name)); 242 OF_getprop(child, "name", name, sizeof(name)); 243 244 if (bootverbose) 245 device_printf(dev, "CUDA child <%s>\n",name); 246 247 if (strncmp(name, "adb", 4) == 0) { 248 sc->adb_bus = device_add_child(dev,"adb",-1); 249 } 250 } 251 252 clock_register(dev, 1000); 253 254 return (bus_generic_attach(dev)); 255 } 256 257 static int cuda_detach(device_t dev) { 258 struct cuda_softc *sc; 259 260 sc = device_get_softc(dev); 261 262 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 263 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq); 264 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr); 265 mtx_destroy(&sc->sc_mutex); 266 267 return (bus_generic_detach(dev)); 268 } 269 270 static uint8_t 271 cuda_read_reg(struct cuda_softc *sc, u_int offset) { 272 return (bus_read_1(sc->sc_memr, offset)); 273 } 274 275 static void 276 cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value) { 277 bus_write_1(sc->sc_memr, offset, value); 278 } 279 280 static void 281 cuda_idle(struct cuda_softc *sc) 282 { 283 uint8_t reg; 284 285 reg = cuda_read_reg(sc, vBufB); 286 reg |= (vPB4 | vPB5); 287 cuda_write_reg(sc, vBufB, reg); 288 } 289 290 static void 291 cuda_tip(struct cuda_softc *sc) 292 { 293 uint8_t reg; 294 295 reg = cuda_read_reg(sc, vBufB); 296 reg &= ~vPB5; 297 cuda_write_reg(sc, vBufB, reg); 298 } 299 300 static void 301 cuda_clear_tip(struct cuda_softc *sc) 302 { 303 uint8_t reg; 304 305 reg = cuda_read_reg(sc, vBufB); 306 reg |= vPB5; 307 cuda_write_reg(sc, vBufB, reg); 308 } 309 310 static void 311 cuda_in(struct cuda_softc *sc) 312 { 313 uint8_t reg; 314 315 reg = cuda_read_reg(sc, vACR); 316 reg &= ~vSR_OUT; 317 cuda_write_reg(sc, vACR, reg); 318 } 319 320 static void 321 cuda_out(struct cuda_softc *sc) 322 { 323 uint8_t reg; 324 325 reg = cuda_read_reg(sc, vACR); 326 reg |= vSR_OUT; 327 cuda_write_reg(sc, vACR, reg); 328 } 329 330 static void 331 cuda_toggle_ack(struct cuda_softc *sc) 332 { 333 uint8_t reg; 334 335 reg = cuda_read_reg(sc, vBufB); 336 reg ^= vPB4; 337 cuda_write_reg(sc, vBufB, reg); 338 } 339 340 static void 341 cuda_ack_off(struct cuda_softc *sc) 342 { 343 uint8_t reg; 344 345 reg = cuda_read_reg(sc, vBufB); 346 reg |= vPB4; 347 cuda_write_reg(sc, vBufB, reg); 348 } 349 350 static int 351 cuda_intr_state(struct cuda_softc *sc) 352 { 353 return ((cuda_read_reg(sc, vBufB) & vPB3) == 0); 354 } 355 356 static int 357 cuda_send(void *cookie, int poll, int length, uint8_t *msg) 358 { 359 struct cuda_softc *sc = cookie; 360 device_t dev = sc->sc_dev; 361 struct cuda_packet *pkt; 362 363 if (sc->sc_state == CUDA_NOTREADY) 364 return (-1); 365 366 mtx_lock(&sc->sc_mutex); 367 368 pkt = STAILQ_FIRST(&sc->sc_freeq); 369 if (pkt == NULL) { 370 mtx_unlock(&sc->sc_mutex); 371 return (-1); 372 } 373 374 pkt->len = length - 1; 375 pkt->type = msg[0]; 376 memcpy(pkt->data, &msg[1], pkt->len); 377 378 STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q); 379 STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q); 380 381 /* 382 * If we already are sending a packet, we should bail now that this 383 * one has been added to the queue. 384 */ 385 386 if (sc->sc_waiting) { 387 mtx_unlock(&sc->sc_mutex); 388 return (0); 389 } 390 391 cuda_send_outbound(sc); 392 mtx_unlock(&sc->sc_mutex); 393 394 if (sc->sc_polling || poll || cold) 395 cuda_poll(dev); 396 397 return (0); 398 } 399 400 static void 401 cuda_send_outbound(struct cuda_softc *sc) 402 { 403 struct cuda_packet *pkt; 404 405 mtx_assert(&sc->sc_mutex, MA_OWNED); 406 407 pkt = STAILQ_FIRST(&sc->sc_outq); 408 if (pkt == NULL) 409 return; 410 411 sc->sc_out_length = pkt->len + 1; 412 memcpy(sc->sc_out, &pkt->type, pkt->len + 1); 413 sc->sc_sent = 0; 414 415 STAILQ_REMOVE_HEAD(&sc->sc_outq, pkt_q); 416 STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q); 417 418 sc->sc_waiting = 1; 419 420 cuda_poll(sc->sc_dev); 421 422 DELAY(150); 423 424 if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc)) { 425 sc->sc_state = CUDA_OUT; 426 cuda_out(sc); 427 cuda_write_reg(sc, vSR, sc->sc_out[0]); 428 cuda_ack_off(sc); 429 cuda_tip(sc); 430 } 431 } 432 433 static void 434 cuda_send_inbound(struct cuda_softc *sc) 435 { 436 device_t dev; 437 struct cuda_packet *pkt; 438 439 dev = sc->sc_dev; 440 441 mtx_lock(&sc->sc_mutex); 442 443 while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) { 444 STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q); 445 446 mtx_unlock(&sc->sc_mutex); 447 448 /* check if we have a handler for this message */ 449 switch (pkt->type) { 450 case CUDA_ADB: 451 if (pkt->len > 2) { 452 adb_receive_raw_packet(sc->adb_bus, 453 pkt->data[0],pkt->data[1], 454 pkt->len - 2,&pkt->data[2]); 455 } else { 456 adb_receive_raw_packet(sc->adb_bus, 457 pkt->data[0],pkt->data[1],0,NULL); 458 } 459 break; 460 case CUDA_PSEUDO: 461 mtx_lock(&sc->sc_mutex); 462 switch (pkt->data[1]) { 463 case CMD_AUTOPOLL: 464 sc->sc_autopoll = 1; 465 break; 466 case CMD_READ_RTC: 467 memcpy(&sc->sc_rtc, &pkt->data[2], 468 sizeof(sc->sc_rtc)); 469 wakeup(&sc->sc_rtc); 470 break; 471 case CMD_WRITE_RTC: 472 break; 473 } 474 mtx_unlock(&sc->sc_mutex); 475 break; 476 case CUDA_ERROR: 477 /* 478 * CUDA will throw errors if we miss a race between 479 * sending and receiving packets. This is already 480 * handled when we abort packet output to handle 481 * this packet in cuda_intr(). Thus, we ignore 482 * these messages. 483 */ 484 break; 485 default: 486 device_printf(dev,"unknown CUDA command %d\n", 487 pkt->type); 488 break; 489 } 490 491 mtx_lock(&sc->sc_mutex); 492 493 STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q); 494 } 495 496 mtx_unlock(&sc->sc_mutex); 497 } 498 499 static u_int 500 cuda_poll(device_t dev) 501 { 502 struct cuda_softc *sc = device_get_softc(dev); 503 504 if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc) && 505 !sc->sc_waiting) 506 return (0); 507 508 cuda_intr(dev); 509 return (0); 510 } 511 512 static void 513 cuda_intr(void *arg) 514 { 515 device_t dev; 516 struct cuda_softc *sc; 517 518 int i, ending, restart_send, process_inbound; 519 uint8_t reg; 520 521 dev = (device_t)arg; 522 sc = device_get_softc(dev); 523 524 mtx_lock(&sc->sc_mutex); 525 526 restart_send = 0; 527 process_inbound = 0; 528 reg = cuda_read_reg(sc, vIFR); 529 if ((reg & vSR_INT) != vSR_INT) { 530 mtx_unlock(&sc->sc_mutex); 531 return; 532 } 533 534 cuda_write_reg(sc, vIFR, 0x7f); /* Clear interrupt */ 535 536 switch_start: 537 switch (sc->sc_state) { 538 case CUDA_IDLE: 539 /* 540 * This is an unexpected packet, so grab the first (dummy) 541 * byte, set up the proper vars, and tell the chip we are 542 * starting to receive the packet by setting the TIP bit. 543 */ 544 sc->sc_in[1] = cuda_read_reg(sc, vSR); 545 546 if (cuda_intr_state(sc) == 0) { 547 /* must have been a fake start */ 548 549 if (sc->sc_waiting) { 550 /* start over */ 551 DELAY(150); 552 sc->sc_state = CUDA_OUT; 553 sc->sc_sent = 0; 554 cuda_out(sc); 555 cuda_write_reg(sc, vSR, sc->sc_out[1]); 556 cuda_ack_off(sc); 557 cuda_tip(sc); 558 } 559 break; 560 } 561 562 cuda_in(sc); 563 cuda_tip(sc); 564 565 sc->sc_received = 1; 566 sc->sc_state = CUDA_IN; 567 break; 568 569 case CUDA_IN: 570 sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR); 571 ending = 0; 572 573 if (sc->sc_received > 255) { 574 /* bitch only once */ 575 if (sc->sc_received == 256) { 576 device_printf(dev,"input overflow\n"); 577 ending = 1; 578 } 579 } else 580 sc->sc_received++; 581 582 /* intr off means this is the last byte (end of frame) */ 583 if (cuda_intr_state(sc) == 0) { 584 ending = 1; 585 } else { 586 cuda_toggle_ack(sc); 587 } 588 589 if (ending == 1) { /* end of message? */ 590 struct cuda_packet *pkt; 591 592 /* reset vars and signal the end of this frame */ 593 cuda_idle(sc); 594 595 /* Queue up the packet */ 596 pkt = STAILQ_FIRST(&sc->sc_freeq); 597 if (pkt != NULL) { 598 /* If we have a free packet, process it */ 599 600 pkt->len = sc->sc_received - 2; 601 pkt->type = sc->sc_in[1]; 602 memcpy(pkt->data, &sc->sc_in[2], pkt->len); 603 604 STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q); 605 STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q); 606 607 process_inbound = 1; 608 } 609 610 sc->sc_state = CUDA_IDLE; 611 sc->sc_received = 0; 612 613 /* 614 * If there is something waiting to be sent out, 615 * set everything up and send the first byte. 616 */ 617 if (sc->sc_waiting == 1) { 618 DELAY(1500); /* required */ 619 sc->sc_sent = 0; 620 sc->sc_state = CUDA_OUT; 621 622 /* 623 * If the interrupt is on, we were too slow 624 * and the chip has already started to send 625 * something to us, so back out of the write 626 * and start a read cycle. 627 */ 628 if (cuda_intr_state(sc)) { 629 cuda_in(sc); 630 cuda_idle(sc); 631 sc->sc_sent = 0; 632 sc->sc_state = CUDA_IDLE; 633 sc->sc_received = 0; 634 DELAY(150); 635 goto switch_start; 636 } 637 638 /* 639 * If we got here, it's ok to start sending 640 * so load the first byte and tell the chip 641 * we want to send. 642 */ 643 cuda_out(sc); 644 cuda_write_reg(sc, vSR, 645 sc->sc_out[sc->sc_sent]); 646 cuda_ack_off(sc); 647 cuda_tip(sc); 648 } 649 } 650 break; 651 652 case CUDA_OUT: 653 i = cuda_read_reg(sc, vSR); /* reset SR-intr in IFR */ 654 655 sc->sc_sent++; 656 if (cuda_intr_state(sc)) { /* ADB intr low during write */ 657 cuda_in(sc); /* make sure SR is set to IN */ 658 cuda_idle(sc); 659 sc->sc_sent = 0; /* must start all over */ 660 sc->sc_state = CUDA_IDLE; /* new state */ 661 sc->sc_received = 0; 662 sc->sc_waiting = 1; /* must retry when done with 663 * read */ 664 DELAY(150); 665 goto switch_start; /* process next state right 666 * now */ 667 break; 668 } 669 if (sc->sc_out_length == sc->sc_sent) { /* check for done */ 670 sc->sc_waiting = 0; /* done writing */ 671 sc->sc_state = CUDA_IDLE; /* signal bus is idle */ 672 cuda_in(sc); 673 cuda_idle(sc); 674 } else { 675 /* send next byte */ 676 cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]); 677 cuda_toggle_ack(sc); /* signal byte ready to 678 * shift */ 679 } 680 break; 681 682 case CUDA_NOTREADY: 683 break; 684 685 default: 686 break; 687 } 688 689 mtx_unlock(&sc->sc_mutex); 690 691 if (process_inbound) 692 cuda_send_inbound(sc); 693 694 mtx_lock(&sc->sc_mutex); 695 /* If we have another packet waiting, set it up */ 696 if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE) 697 cuda_send_outbound(sc); 698 699 mtx_unlock(&sc->sc_mutex); 700 701 } 702 703 static u_int 704 cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data, 705 u_char poll) 706 { 707 struct cuda_softc *sc = device_get_softc(dev); 708 uint8_t packet[16]; 709 int i; 710 711 /* construct an ADB command packet and send it */ 712 packet[0] = CUDA_ADB; 713 packet[1] = command_byte; 714 for (i = 0; i < len; i++) 715 packet[i + 2] = data[i]; 716 717 cuda_send(sc, poll, len + 2, packet); 718 719 return (0); 720 } 721 722 static u_int 723 cuda_adb_autopoll(device_t dev, uint16_t mask) { 724 struct cuda_softc *sc = device_get_softc(dev); 725 726 uint8_t cmd[] = {CUDA_PSEUDO, CMD_AUTOPOLL, mask != 0}; 727 728 mtx_lock(&sc->sc_mutex); 729 730 if (cmd[2] == sc->sc_autopoll) { 731 mtx_unlock(&sc->sc_mutex); 732 return (0); 733 } 734 735 sc->sc_autopoll = -1; 736 cuda_send(sc, 1, 3, cmd); 737 738 mtx_unlock(&sc->sc_mutex); 739 740 return (0); 741 } 742 743 #define DIFF19041970 2082844800 744 745 static int 746 cuda_gettime(device_t dev, struct timespec *ts) 747 { 748 struct cuda_softc *sc = device_get_softc(dev); 749 uint8_t cmd[] = {CUDA_PSEUDO, CMD_READ_RTC}; 750 751 mtx_lock(&sc->sc_mutex); 752 sc->sc_rtc = -1; 753 cuda_send(sc, 1, 2, cmd); 754 if (sc->sc_rtc == -1) 755 mtx_sleep(&sc->sc_rtc, &sc->sc_mutex, 0, "rtc", 100); 756 757 ts->tv_sec = sc->sc_rtc - DIFF19041970; 758 ts->tv_nsec = 0; 759 mtx_unlock(&sc->sc_mutex); 760 761 return (0); 762 } 763 764 static int 765 cuda_settime(device_t dev, struct timespec *ts) 766 { 767 struct cuda_softc *sc = device_get_softc(dev); 768 uint8_t cmd[] = {CUDA_PSEUDO, CMD_WRITE_RTC, 0, 0, 0, 0}; 769 uint32_t sec; 770 771 sec = ts->tv_sec + DIFF19041970; 772 memcpy(&cmd[2], &sec, sizeof(sec)); 773 774 mtx_lock(&sc->sc_mutex); 775 cuda_send(sc, 0, 6, cmd); 776 mtx_unlock(&sc->sc_mutex); 777 778 return (0); 779 } 780 781