1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2006 Michael Lorenz 5 * Copyright 2008 by Nathan Whitehorn 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/module.h> 37 #include <sys/bus.h> 38 #include <sys/conf.h> 39 #include <sys/eventhandler.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/clock.h> 44 #include <sys/reboot.h> 45 46 #include <dev/ofw/ofw_bus.h> 47 #include <dev/ofw/openfirm.h> 48 49 #include <machine/bus.h> 50 #include <machine/intr_machdep.h> 51 #include <machine/md_var.h> 52 #include <machine/pio.h> 53 #include <machine/resource.h> 54 55 #include <vm/vm.h> 56 #include <vm/pmap.h> 57 58 #include <sys/rman.h> 59 60 #include <dev/adb/adb.h> 61 62 #include "clock_if.h" 63 #include "cudavar.h" 64 #include "viareg.h" 65 66 /* 67 * MacIO interface 68 */ 69 static int cuda_probe(device_t); 70 static int cuda_attach(device_t); 71 static int cuda_detach(device_t); 72 73 static u_int cuda_adb_send(device_t dev, u_char command_byte, int len, 74 u_char *data, u_char poll); 75 static u_int cuda_adb_autopoll(device_t dev, uint16_t mask); 76 static u_int cuda_poll(device_t dev); 77 static void cuda_send_inbound(struct cuda_softc *sc); 78 static void cuda_send_outbound(struct cuda_softc *sc); 79 static void cuda_shutdown(void *xsc, int howto); 80 81 /* 82 * Clock interface 83 */ 84 static int cuda_gettime(device_t dev, struct timespec *ts); 85 static int cuda_settime(device_t dev, struct timespec *ts); 86 87 static device_method_t cuda_methods[] = { 88 /* Device interface */ 89 DEVMETHOD(device_probe, cuda_probe), 90 DEVMETHOD(device_attach, cuda_attach), 91 DEVMETHOD(device_detach, cuda_detach), 92 DEVMETHOD(device_shutdown, bus_generic_shutdown), 93 DEVMETHOD(device_suspend, bus_generic_suspend), 94 DEVMETHOD(device_resume, bus_generic_resume), 95 96 /* ADB bus interface */ 97 DEVMETHOD(adb_hb_send_raw_packet, cuda_adb_send), 98 DEVMETHOD(adb_hb_controller_poll, cuda_poll), 99 DEVMETHOD(adb_hb_set_autopoll_mask, cuda_adb_autopoll), 100 101 /* Clock interface */ 102 DEVMETHOD(clock_gettime, cuda_gettime), 103 DEVMETHOD(clock_settime, cuda_settime), 104 105 DEVMETHOD_END 106 }; 107 108 static driver_t cuda_driver = { 109 "cuda", 110 cuda_methods, 111 sizeof(struct cuda_softc), 112 }; 113 114 DRIVER_MODULE(cuda, macio, cuda_driver, 0, 0); 115 DRIVER_MODULE(adb, cuda, adb_driver, 0, 0); 116 117 static void cuda_intr(void *arg); 118 static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset); 119 static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value); 120 static void cuda_idle(struct cuda_softc *); 121 static void cuda_tip(struct cuda_softc *); 122 static void cuda_clear_tip(struct cuda_softc *); 123 static void cuda_in(struct cuda_softc *); 124 static void cuda_out(struct cuda_softc *); 125 static void cuda_toggle_ack(struct cuda_softc *); 126 static void cuda_ack_off(struct cuda_softc *); 127 static int cuda_intr_state(struct cuda_softc *); 128 129 static int 130 cuda_probe(device_t dev) 131 { 132 const char *type = ofw_bus_get_type(dev); 133 134 if (strcmp(type, "via-cuda") != 0) 135 return (ENXIO); 136 137 device_set_desc(dev, CUDA_DEVSTR); 138 return (0); 139 } 140 141 static int 142 cuda_attach(device_t dev) 143 { 144 struct cuda_softc *sc; 145 146 volatile int i; 147 uint8_t reg; 148 phandle_t node,child; 149 150 sc = device_get_softc(dev); 151 sc->sc_dev = dev; 152 153 sc->sc_memrid = 0; 154 sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 155 &sc->sc_memrid, RF_ACTIVE); 156 157 if (sc->sc_memr == NULL) { 158 device_printf(dev, "Could not alloc mem resource!\n"); 159 return (ENXIO); 160 } 161 162 sc->sc_irqrid = 0; 163 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid, 164 RF_ACTIVE); 165 if (sc->sc_irq == NULL) { 166 device_printf(dev, "could not allocate interrupt\n"); 167 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, 168 sc->sc_memr); 169 return (ENXIO); 170 } 171 172 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE 173 | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) { 174 device_printf(dev, "could not setup interrupt\n"); 175 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, 176 sc->sc_memr); 177 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, 178 sc->sc_irq); 179 return (ENXIO); 180 } 181 182 mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE); 183 184 sc->sc_sent = 0; 185 sc->sc_received = 0; 186 sc->sc_waiting = 0; 187 sc->sc_polling = 0; 188 sc->sc_state = CUDA_NOTREADY; 189 sc->sc_autopoll = 0; 190 sc->sc_rtc = -1; 191 192 STAILQ_INIT(&sc->sc_inq); 193 STAILQ_INIT(&sc->sc_outq); 194 STAILQ_INIT(&sc->sc_freeq); 195 196 for (i = 0; i < CUDA_MAXPACKETS; i++) 197 STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q); 198 199 /* Init CUDA */ 200 201 reg = cuda_read_reg(sc, vDirB); 202 reg |= 0x30; /* register B bits 4 and 5: outputs */ 203 cuda_write_reg(sc, vDirB, reg); 204 205 reg = cuda_read_reg(sc, vDirB); 206 reg &= 0xf7; /* register B bit 3: input */ 207 cuda_write_reg(sc, vDirB, reg); 208 209 reg = cuda_read_reg(sc, vACR); 210 reg &= ~vSR_OUT; /* make sure SR is set to IN */ 211 cuda_write_reg(sc, vACR, reg); 212 213 cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10); 214 215 sc->sc_state = CUDA_IDLE; /* used by all types of hardware */ 216 217 cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */ 218 219 cuda_idle(sc); /* reset ADB */ 220 221 /* Reset CUDA */ 222 223 i = cuda_read_reg(sc, vSR); /* clear interrupt */ 224 cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */ 225 cuda_idle(sc); /* reset state to idle */ 226 DELAY(150); 227 cuda_tip(sc); /* signal start of frame */ 228 DELAY(150); 229 cuda_toggle_ack(sc); 230 DELAY(150); 231 cuda_clear_tip(sc); 232 DELAY(150); 233 cuda_idle(sc); /* back to idle state */ 234 i = cuda_read_reg(sc, vSR); /* clear interrupt */ 235 cuda_write_reg(sc, vIER, 0x84); /* ints ok now */ 236 237 /* Initialize child buses (ADB) */ 238 node = ofw_bus_get_node(dev); 239 240 for (child = OF_child(node); child != 0; child = OF_peer(child)) { 241 char name[32]; 242 243 memset(name, 0, sizeof(name)); 244 OF_getprop(child, "name", name, sizeof(name)); 245 246 if (bootverbose) 247 device_printf(dev, "CUDA child <%s>\n",name); 248 249 if (strncmp(name, "adb", 4) == 0) { 250 sc->adb_bus = device_add_child(dev,"adb",-1); 251 } 252 } 253 254 clock_register(dev, 1000); 255 EVENTHANDLER_REGISTER(shutdown_final, cuda_shutdown, sc, 256 SHUTDOWN_PRI_LAST); 257 258 return (bus_generic_attach(dev)); 259 } 260 261 static int cuda_detach(device_t dev) { 262 struct cuda_softc *sc; 263 264 sc = device_get_softc(dev); 265 266 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 267 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq); 268 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr); 269 mtx_destroy(&sc->sc_mutex); 270 271 return (bus_generic_detach(dev)); 272 } 273 274 static uint8_t 275 cuda_read_reg(struct cuda_softc *sc, u_int offset) { 276 return (bus_read_1(sc->sc_memr, offset)); 277 } 278 279 static void 280 cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value) { 281 bus_write_1(sc->sc_memr, offset, value); 282 } 283 284 static void 285 cuda_idle(struct cuda_softc *sc) 286 { 287 uint8_t reg; 288 289 reg = cuda_read_reg(sc, vBufB); 290 reg |= (vPB4 | vPB5); 291 cuda_write_reg(sc, vBufB, reg); 292 } 293 294 static void 295 cuda_tip(struct cuda_softc *sc) 296 { 297 uint8_t reg; 298 299 reg = cuda_read_reg(sc, vBufB); 300 reg &= ~vPB5; 301 cuda_write_reg(sc, vBufB, reg); 302 } 303 304 static void 305 cuda_clear_tip(struct cuda_softc *sc) 306 { 307 uint8_t reg; 308 309 reg = cuda_read_reg(sc, vBufB); 310 reg |= vPB5; 311 cuda_write_reg(sc, vBufB, reg); 312 } 313 314 static void 315 cuda_in(struct cuda_softc *sc) 316 { 317 uint8_t reg; 318 319 reg = cuda_read_reg(sc, vACR); 320 reg &= ~vSR_OUT; 321 cuda_write_reg(sc, vACR, reg); 322 } 323 324 static void 325 cuda_out(struct cuda_softc *sc) 326 { 327 uint8_t reg; 328 329 reg = cuda_read_reg(sc, vACR); 330 reg |= vSR_OUT; 331 cuda_write_reg(sc, vACR, reg); 332 } 333 334 static void 335 cuda_toggle_ack(struct cuda_softc *sc) 336 { 337 uint8_t reg; 338 339 reg = cuda_read_reg(sc, vBufB); 340 reg ^= vPB4; 341 cuda_write_reg(sc, vBufB, reg); 342 } 343 344 static void 345 cuda_ack_off(struct cuda_softc *sc) 346 { 347 uint8_t reg; 348 349 reg = cuda_read_reg(sc, vBufB); 350 reg |= vPB4; 351 cuda_write_reg(sc, vBufB, reg); 352 } 353 354 static int 355 cuda_intr_state(struct cuda_softc *sc) 356 { 357 return ((cuda_read_reg(sc, vBufB) & vPB3) == 0); 358 } 359 360 static int 361 cuda_send(void *cookie, int poll, int length, uint8_t *msg) 362 { 363 struct cuda_softc *sc = cookie; 364 device_t dev = sc->sc_dev; 365 struct cuda_packet *pkt; 366 367 if (sc->sc_state == CUDA_NOTREADY) 368 return (-1); 369 370 mtx_lock(&sc->sc_mutex); 371 372 pkt = STAILQ_FIRST(&sc->sc_freeq); 373 if (pkt == NULL) { 374 mtx_unlock(&sc->sc_mutex); 375 return (-1); 376 } 377 378 pkt->len = length - 1; 379 pkt->type = msg[0]; 380 memcpy(pkt->data, &msg[1], pkt->len); 381 382 STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q); 383 STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q); 384 385 /* 386 * If we already are sending a packet, we should bail now that this 387 * one has been added to the queue. 388 */ 389 390 if (sc->sc_waiting) { 391 mtx_unlock(&sc->sc_mutex); 392 return (0); 393 } 394 395 cuda_send_outbound(sc); 396 mtx_unlock(&sc->sc_mutex); 397 398 if (sc->sc_polling || poll || cold) 399 cuda_poll(dev); 400 401 return (0); 402 } 403 404 static void 405 cuda_send_outbound(struct cuda_softc *sc) 406 { 407 struct cuda_packet *pkt; 408 409 mtx_assert(&sc->sc_mutex, MA_OWNED); 410 411 pkt = STAILQ_FIRST(&sc->sc_outq); 412 if (pkt == NULL) 413 return; 414 415 sc->sc_out_length = pkt->len + 1; 416 memcpy(sc->sc_out, &pkt->type, pkt->len + 1); 417 sc->sc_sent = 0; 418 419 STAILQ_REMOVE_HEAD(&sc->sc_outq, pkt_q); 420 STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q); 421 422 sc->sc_waiting = 1; 423 424 cuda_poll(sc->sc_dev); 425 426 DELAY(150); 427 428 if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc)) { 429 sc->sc_state = CUDA_OUT; 430 cuda_out(sc); 431 cuda_write_reg(sc, vSR, sc->sc_out[0]); 432 cuda_ack_off(sc); 433 cuda_tip(sc); 434 } 435 } 436 437 static void 438 cuda_send_inbound(struct cuda_softc *sc) 439 { 440 device_t dev; 441 struct cuda_packet *pkt; 442 443 dev = sc->sc_dev; 444 445 mtx_lock(&sc->sc_mutex); 446 447 while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) { 448 STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q); 449 450 mtx_unlock(&sc->sc_mutex); 451 452 /* check if we have a handler for this message */ 453 switch (pkt->type) { 454 case CUDA_ADB: 455 if (pkt->len > 2) { 456 adb_receive_raw_packet(sc->adb_bus, 457 pkt->data[0],pkt->data[1], 458 pkt->len - 2,&pkt->data[2]); 459 } else { 460 adb_receive_raw_packet(sc->adb_bus, 461 pkt->data[0],pkt->data[1],0,NULL); 462 } 463 break; 464 case CUDA_PSEUDO: 465 mtx_lock(&sc->sc_mutex); 466 switch (pkt->data[1]) { 467 case CMD_AUTOPOLL: 468 sc->sc_autopoll = 1; 469 break; 470 case CMD_READ_RTC: 471 memcpy(&sc->sc_rtc, &pkt->data[2], 472 sizeof(sc->sc_rtc)); 473 wakeup(&sc->sc_rtc); 474 break; 475 case CMD_WRITE_RTC: 476 break; 477 } 478 mtx_unlock(&sc->sc_mutex); 479 break; 480 case CUDA_ERROR: 481 /* 482 * CUDA will throw errors if we miss a race between 483 * sending and receiving packets. This is already 484 * handled when we abort packet output to handle 485 * this packet in cuda_intr(). Thus, we ignore 486 * these messages. 487 */ 488 break; 489 default: 490 device_printf(dev,"unknown CUDA command %d\n", 491 pkt->type); 492 break; 493 } 494 495 mtx_lock(&sc->sc_mutex); 496 497 STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q); 498 } 499 500 mtx_unlock(&sc->sc_mutex); 501 } 502 503 static u_int 504 cuda_poll(device_t dev) 505 { 506 struct cuda_softc *sc = device_get_softc(dev); 507 508 if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc) && 509 !sc->sc_waiting) 510 return (0); 511 512 cuda_intr(dev); 513 return (0); 514 } 515 516 static void 517 cuda_intr(void *arg) 518 { 519 device_t dev; 520 struct cuda_softc *sc; 521 int ending, process_inbound; 522 uint8_t reg; 523 524 dev = (device_t)arg; 525 sc = device_get_softc(dev); 526 527 mtx_lock(&sc->sc_mutex); 528 529 process_inbound = 0; 530 reg = cuda_read_reg(sc, vIFR); 531 if ((reg & vSR_INT) != vSR_INT) { 532 mtx_unlock(&sc->sc_mutex); 533 return; 534 } 535 536 cuda_write_reg(sc, vIFR, 0x7f); /* Clear interrupt */ 537 538 switch_start: 539 switch (sc->sc_state) { 540 case CUDA_IDLE: 541 /* 542 * This is an unexpected packet, so grab the first (dummy) 543 * byte, set up the proper vars, and tell the chip we are 544 * starting to receive the packet by setting the TIP bit. 545 */ 546 sc->sc_in[1] = cuda_read_reg(sc, vSR); 547 548 if (cuda_intr_state(sc) == 0) { 549 /* must have been a fake start */ 550 551 if (sc->sc_waiting) { 552 /* start over */ 553 DELAY(150); 554 sc->sc_state = CUDA_OUT; 555 sc->sc_sent = 0; 556 cuda_out(sc); 557 cuda_write_reg(sc, vSR, sc->sc_out[1]); 558 cuda_ack_off(sc); 559 cuda_tip(sc); 560 } 561 break; 562 } 563 564 cuda_in(sc); 565 cuda_tip(sc); 566 567 sc->sc_received = 1; 568 sc->sc_state = CUDA_IN; 569 break; 570 571 case CUDA_IN: 572 sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR); 573 ending = 0; 574 575 if (sc->sc_received > 255) { 576 /* bitch only once */ 577 if (sc->sc_received == 256) { 578 device_printf(dev,"input overflow\n"); 579 ending = 1; 580 } 581 } else 582 sc->sc_received++; 583 584 /* intr off means this is the last byte (end of frame) */ 585 if (cuda_intr_state(sc) == 0) { 586 ending = 1; 587 } else { 588 cuda_toggle_ack(sc); 589 } 590 591 if (ending == 1) { /* end of message? */ 592 struct cuda_packet *pkt; 593 594 /* reset vars and signal the end of this frame */ 595 cuda_idle(sc); 596 597 /* Queue up the packet */ 598 pkt = STAILQ_FIRST(&sc->sc_freeq); 599 if (pkt != NULL) { 600 /* If we have a free packet, process it */ 601 602 pkt->len = sc->sc_received - 2; 603 pkt->type = sc->sc_in[1]; 604 memcpy(pkt->data, &sc->sc_in[2], pkt->len); 605 606 STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q); 607 STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q); 608 609 process_inbound = 1; 610 } 611 612 sc->sc_state = CUDA_IDLE; 613 sc->sc_received = 0; 614 615 /* 616 * If there is something waiting to be sent out, 617 * set everything up and send the first byte. 618 */ 619 if (sc->sc_waiting == 1) { 620 DELAY(1500); /* required */ 621 sc->sc_sent = 0; 622 sc->sc_state = CUDA_OUT; 623 624 /* 625 * If the interrupt is on, we were too slow 626 * and the chip has already started to send 627 * something to us, so back out of the write 628 * and start a read cycle. 629 */ 630 if (cuda_intr_state(sc)) { 631 cuda_in(sc); 632 cuda_idle(sc); 633 sc->sc_sent = 0; 634 sc->sc_state = CUDA_IDLE; 635 sc->sc_received = 0; 636 DELAY(150); 637 goto switch_start; 638 } 639 640 /* 641 * If we got here, it's ok to start sending 642 * so load the first byte and tell the chip 643 * we want to send. 644 */ 645 cuda_out(sc); 646 cuda_write_reg(sc, vSR, 647 sc->sc_out[sc->sc_sent]); 648 cuda_ack_off(sc); 649 cuda_tip(sc); 650 } 651 } 652 break; 653 654 case CUDA_OUT: 655 cuda_read_reg(sc, vSR); /* reset SR-intr in IFR */ 656 657 sc->sc_sent++; 658 if (cuda_intr_state(sc)) { /* ADB intr low during write */ 659 cuda_in(sc); /* make sure SR is set to IN */ 660 cuda_idle(sc); 661 sc->sc_sent = 0; /* must start all over */ 662 sc->sc_state = CUDA_IDLE; /* new state */ 663 sc->sc_received = 0; 664 sc->sc_waiting = 1; /* must retry when done with 665 * read */ 666 DELAY(150); 667 goto switch_start; /* process next state right 668 * now */ 669 break; 670 } 671 if (sc->sc_out_length == sc->sc_sent) { /* check for done */ 672 sc->sc_waiting = 0; /* done writing */ 673 sc->sc_state = CUDA_IDLE; /* signal bus is idle */ 674 cuda_in(sc); 675 cuda_idle(sc); 676 } else { 677 /* send next byte */ 678 cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]); 679 cuda_toggle_ack(sc); /* signal byte ready to 680 * shift */ 681 } 682 break; 683 684 case CUDA_NOTREADY: 685 break; 686 687 default: 688 break; 689 } 690 691 mtx_unlock(&sc->sc_mutex); 692 693 if (process_inbound) 694 cuda_send_inbound(sc); 695 696 mtx_lock(&sc->sc_mutex); 697 /* If we have another packet waiting, set it up */ 698 if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE) 699 cuda_send_outbound(sc); 700 701 mtx_unlock(&sc->sc_mutex); 702 703 } 704 705 static u_int 706 cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data, 707 u_char poll) 708 { 709 struct cuda_softc *sc = device_get_softc(dev); 710 uint8_t packet[16]; 711 int i; 712 713 /* construct an ADB command packet and send it */ 714 packet[0] = CUDA_ADB; 715 packet[1] = command_byte; 716 for (i = 0; i < len; i++) 717 packet[i + 2] = data[i]; 718 719 cuda_send(sc, poll, len + 2, packet); 720 721 return (0); 722 } 723 724 static u_int 725 cuda_adb_autopoll(device_t dev, uint16_t mask) { 726 struct cuda_softc *sc = device_get_softc(dev); 727 728 uint8_t cmd[] = {CUDA_PSEUDO, CMD_AUTOPOLL, mask != 0}; 729 730 mtx_lock(&sc->sc_mutex); 731 732 if (cmd[2] == sc->sc_autopoll) { 733 mtx_unlock(&sc->sc_mutex); 734 return (0); 735 } 736 737 sc->sc_autopoll = -1; 738 cuda_send(sc, 1, 3, cmd); 739 740 mtx_unlock(&sc->sc_mutex); 741 742 return (0); 743 } 744 745 static void 746 cuda_shutdown(void *xsc, int howto) 747 { 748 struct cuda_softc *sc = xsc; 749 uint8_t cmd[] = {CUDA_PSEUDO, 0}; 750 751 cmd[1] = (howto & RB_HALT) ? CMD_POWEROFF : CMD_RESET; 752 cuda_poll(sc->sc_dev); 753 cuda_send(sc, 1, 2, cmd); 754 755 while (1) 756 cuda_poll(sc->sc_dev); 757 } 758 759 #define DIFF19041970 2082844800 760 761 static int 762 cuda_gettime(device_t dev, struct timespec *ts) 763 { 764 struct cuda_softc *sc = device_get_softc(dev); 765 uint8_t cmd[] = {CUDA_PSEUDO, CMD_READ_RTC}; 766 767 mtx_lock(&sc->sc_mutex); 768 sc->sc_rtc = -1; 769 cuda_send(sc, 1, 2, cmd); 770 if (sc->sc_rtc == -1) 771 mtx_sleep(&sc->sc_rtc, &sc->sc_mutex, 0, "rtc", 100); 772 773 ts->tv_sec = sc->sc_rtc - DIFF19041970; 774 ts->tv_nsec = 0; 775 mtx_unlock(&sc->sc_mutex); 776 777 return (0); 778 } 779 780 static int 781 cuda_settime(device_t dev, struct timespec *ts) 782 { 783 struct cuda_softc *sc = device_get_softc(dev); 784 uint8_t cmd[] = {CUDA_PSEUDO, CMD_WRITE_RTC, 0, 0, 0, 0}; 785 uint32_t sec; 786 787 sec = ts->tv_sec + DIFF19041970; 788 memcpy(&cmd[2], &sec, sizeof(sec)); 789 790 mtx_lock(&sc->sc_mutex); 791 cuda_send(sc, 0, 6, cmd); 792 mtx_unlock(&sc->sc_mutex); 793 794 return (0); 795 } 796