1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2006 Michael Lorenz 5 * Copyright 2008 by Nathan Whitehorn 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/module.h> 39 #include <sys/bus.h> 40 #include <sys/conf.h> 41 #include <sys/eventhandler.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/clock.h> 46 #include <sys/reboot.h> 47 48 #include <dev/ofw/ofw_bus.h> 49 #include <dev/ofw/openfirm.h> 50 51 #include <machine/bus.h> 52 #include <machine/intr_machdep.h> 53 #include <machine/md_var.h> 54 #include <machine/pio.h> 55 #include <machine/resource.h> 56 57 #include <vm/vm.h> 58 #include <vm/pmap.h> 59 60 #include <sys/rman.h> 61 62 #include <dev/adb/adb.h> 63 64 #include "clock_if.h" 65 #include "cudavar.h" 66 #include "viareg.h" 67 68 /* 69 * MacIO interface 70 */ 71 static int cuda_probe(device_t); 72 static int cuda_attach(device_t); 73 static int cuda_detach(device_t); 74 75 static u_int cuda_adb_send(device_t dev, u_char command_byte, int len, 76 u_char *data, u_char poll); 77 static u_int cuda_adb_autopoll(device_t dev, uint16_t mask); 78 static u_int cuda_poll(device_t dev); 79 static void cuda_send_inbound(struct cuda_softc *sc); 80 static void cuda_send_outbound(struct cuda_softc *sc); 81 static void cuda_shutdown(void *xsc, int howto); 82 83 /* 84 * Clock interface 85 */ 86 static int cuda_gettime(device_t dev, struct timespec *ts); 87 static int cuda_settime(device_t dev, struct timespec *ts); 88 89 static device_method_t cuda_methods[] = { 90 /* Device interface */ 91 DEVMETHOD(device_probe, cuda_probe), 92 DEVMETHOD(device_attach, cuda_attach), 93 DEVMETHOD(device_detach, cuda_detach), 94 DEVMETHOD(device_shutdown, bus_generic_shutdown), 95 DEVMETHOD(device_suspend, bus_generic_suspend), 96 DEVMETHOD(device_resume, bus_generic_resume), 97 98 /* ADB bus interface */ 99 DEVMETHOD(adb_hb_send_raw_packet, cuda_adb_send), 100 DEVMETHOD(adb_hb_controller_poll, cuda_poll), 101 DEVMETHOD(adb_hb_set_autopoll_mask, cuda_adb_autopoll), 102 103 /* Clock interface */ 104 DEVMETHOD(clock_gettime, cuda_gettime), 105 DEVMETHOD(clock_settime, cuda_settime), 106 107 DEVMETHOD_END 108 }; 109 110 static driver_t cuda_driver = { 111 "cuda", 112 cuda_methods, 113 sizeof(struct cuda_softc), 114 }; 115 116 static devclass_t cuda_devclass; 117 118 DRIVER_MODULE(cuda, macio, cuda_driver, cuda_devclass, 0, 0); 119 DRIVER_MODULE(adb, cuda, adb_driver, adb_devclass, 0, 0); 120 121 static void cuda_intr(void *arg); 122 static uint8_t cuda_read_reg(struct cuda_softc *sc, u_int offset); 123 static void cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value); 124 static void cuda_idle(struct cuda_softc *); 125 static void cuda_tip(struct cuda_softc *); 126 static void cuda_clear_tip(struct cuda_softc *); 127 static void cuda_in(struct cuda_softc *); 128 static void cuda_out(struct cuda_softc *); 129 static void cuda_toggle_ack(struct cuda_softc *); 130 static void cuda_ack_off(struct cuda_softc *); 131 static int cuda_intr_state(struct cuda_softc *); 132 133 static int 134 cuda_probe(device_t dev) 135 { 136 const char *type = ofw_bus_get_type(dev); 137 138 if (strcmp(type, "via-cuda") != 0) 139 return (ENXIO); 140 141 device_set_desc(dev, CUDA_DEVSTR); 142 return (0); 143 } 144 145 static int 146 cuda_attach(device_t dev) 147 { 148 struct cuda_softc *sc; 149 150 volatile int i; 151 uint8_t reg; 152 phandle_t node,child; 153 154 sc = device_get_softc(dev); 155 sc->sc_dev = dev; 156 157 sc->sc_memrid = 0; 158 sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 159 &sc->sc_memrid, RF_ACTIVE); 160 161 if (sc->sc_memr == NULL) { 162 device_printf(dev, "Could not alloc mem resource!\n"); 163 return (ENXIO); 164 } 165 166 sc->sc_irqrid = 0; 167 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid, 168 RF_ACTIVE); 169 if (sc->sc_irq == NULL) { 170 device_printf(dev, "could not allocate interrupt\n"); 171 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, 172 sc->sc_memr); 173 return (ENXIO); 174 } 175 176 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE 177 | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) { 178 device_printf(dev, "could not setup interrupt\n"); 179 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, 180 sc->sc_memr); 181 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, 182 sc->sc_irq); 183 return (ENXIO); 184 } 185 186 mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE); 187 188 sc->sc_sent = 0; 189 sc->sc_received = 0; 190 sc->sc_waiting = 0; 191 sc->sc_polling = 0; 192 sc->sc_state = CUDA_NOTREADY; 193 sc->sc_autopoll = 0; 194 sc->sc_rtc = -1; 195 196 STAILQ_INIT(&sc->sc_inq); 197 STAILQ_INIT(&sc->sc_outq); 198 STAILQ_INIT(&sc->sc_freeq); 199 200 for (i = 0; i < CUDA_MAXPACKETS; i++) 201 STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q); 202 203 /* Init CUDA */ 204 205 reg = cuda_read_reg(sc, vDirB); 206 reg |= 0x30; /* register B bits 4 and 5: outputs */ 207 cuda_write_reg(sc, vDirB, reg); 208 209 reg = cuda_read_reg(sc, vDirB); 210 reg &= 0xf7; /* register B bit 3: input */ 211 cuda_write_reg(sc, vDirB, reg); 212 213 reg = cuda_read_reg(sc, vACR); 214 reg &= ~vSR_OUT; /* make sure SR is set to IN */ 215 cuda_write_reg(sc, vACR, reg); 216 217 cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10); 218 219 sc->sc_state = CUDA_IDLE; /* used by all types of hardware */ 220 221 cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */ 222 223 cuda_idle(sc); /* reset ADB */ 224 225 /* Reset CUDA */ 226 227 i = cuda_read_reg(sc, vSR); /* clear interrupt */ 228 cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */ 229 cuda_idle(sc); /* reset state to idle */ 230 DELAY(150); 231 cuda_tip(sc); /* signal start of frame */ 232 DELAY(150); 233 cuda_toggle_ack(sc); 234 DELAY(150); 235 cuda_clear_tip(sc); 236 DELAY(150); 237 cuda_idle(sc); /* back to idle state */ 238 i = cuda_read_reg(sc, vSR); /* clear interrupt */ 239 cuda_write_reg(sc, vIER, 0x84); /* ints ok now */ 240 241 /* Initialize child buses (ADB) */ 242 node = ofw_bus_get_node(dev); 243 244 for (child = OF_child(node); child != 0; child = OF_peer(child)) { 245 char name[32]; 246 247 memset(name, 0, sizeof(name)); 248 OF_getprop(child, "name", name, sizeof(name)); 249 250 if (bootverbose) 251 device_printf(dev, "CUDA child <%s>\n",name); 252 253 if (strncmp(name, "adb", 4) == 0) { 254 sc->adb_bus = device_add_child(dev,"adb",-1); 255 } 256 } 257 258 clock_register(dev, 1000); 259 EVENTHANDLER_REGISTER(shutdown_final, cuda_shutdown, sc, 260 SHUTDOWN_PRI_LAST); 261 262 return (bus_generic_attach(dev)); 263 } 264 265 static int cuda_detach(device_t dev) { 266 struct cuda_softc *sc; 267 268 sc = device_get_softc(dev); 269 270 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 271 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq); 272 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr); 273 mtx_destroy(&sc->sc_mutex); 274 275 return (bus_generic_detach(dev)); 276 } 277 278 static uint8_t 279 cuda_read_reg(struct cuda_softc *sc, u_int offset) { 280 return (bus_read_1(sc->sc_memr, offset)); 281 } 282 283 static void 284 cuda_write_reg(struct cuda_softc *sc, u_int offset, uint8_t value) { 285 bus_write_1(sc->sc_memr, offset, value); 286 } 287 288 static void 289 cuda_idle(struct cuda_softc *sc) 290 { 291 uint8_t reg; 292 293 reg = cuda_read_reg(sc, vBufB); 294 reg |= (vPB4 | vPB5); 295 cuda_write_reg(sc, vBufB, reg); 296 } 297 298 static void 299 cuda_tip(struct cuda_softc *sc) 300 { 301 uint8_t reg; 302 303 reg = cuda_read_reg(sc, vBufB); 304 reg &= ~vPB5; 305 cuda_write_reg(sc, vBufB, reg); 306 } 307 308 static void 309 cuda_clear_tip(struct cuda_softc *sc) 310 { 311 uint8_t reg; 312 313 reg = cuda_read_reg(sc, vBufB); 314 reg |= vPB5; 315 cuda_write_reg(sc, vBufB, reg); 316 } 317 318 static void 319 cuda_in(struct cuda_softc *sc) 320 { 321 uint8_t reg; 322 323 reg = cuda_read_reg(sc, vACR); 324 reg &= ~vSR_OUT; 325 cuda_write_reg(sc, vACR, reg); 326 } 327 328 static void 329 cuda_out(struct cuda_softc *sc) 330 { 331 uint8_t reg; 332 333 reg = cuda_read_reg(sc, vACR); 334 reg |= vSR_OUT; 335 cuda_write_reg(sc, vACR, reg); 336 } 337 338 static void 339 cuda_toggle_ack(struct cuda_softc *sc) 340 { 341 uint8_t reg; 342 343 reg = cuda_read_reg(sc, vBufB); 344 reg ^= vPB4; 345 cuda_write_reg(sc, vBufB, reg); 346 } 347 348 static void 349 cuda_ack_off(struct cuda_softc *sc) 350 { 351 uint8_t reg; 352 353 reg = cuda_read_reg(sc, vBufB); 354 reg |= vPB4; 355 cuda_write_reg(sc, vBufB, reg); 356 } 357 358 static int 359 cuda_intr_state(struct cuda_softc *sc) 360 { 361 return ((cuda_read_reg(sc, vBufB) & vPB3) == 0); 362 } 363 364 static int 365 cuda_send(void *cookie, int poll, int length, uint8_t *msg) 366 { 367 struct cuda_softc *sc = cookie; 368 device_t dev = sc->sc_dev; 369 struct cuda_packet *pkt; 370 371 if (sc->sc_state == CUDA_NOTREADY) 372 return (-1); 373 374 mtx_lock(&sc->sc_mutex); 375 376 pkt = STAILQ_FIRST(&sc->sc_freeq); 377 if (pkt == NULL) { 378 mtx_unlock(&sc->sc_mutex); 379 return (-1); 380 } 381 382 pkt->len = length - 1; 383 pkt->type = msg[0]; 384 memcpy(pkt->data, &msg[1], pkt->len); 385 386 STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q); 387 STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q); 388 389 /* 390 * If we already are sending a packet, we should bail now that this 391 * one has been added to the queue. 392 */ 393 394 if (sc->sc_waiting) { 395 mtx_unlock(&sc->sc_mutex); 396 return (0); 397 } 398 399 cuda_send_outbound(sc); 400 mtx_unlock(&sc->sc_mutex); 401 402 if (sc->sc_polling || poll || cold) 403 cuda_poll(dev); 404 405 return (0); 406 } 407 408 static void 409 cuda_send_outbound(struct cuda_softc *sc) 410 { 411 struct cuda_packet *pkt; 412 413 mtx_assert(&sc->sc_mutex, MA_OWNED); 414 415 pkt = STAILQ_FIRST(&sc->sc_outq); 416 if (pkt == NULL) 417 return; 418 419 sc->sc_out_length = pkt->len + 1; 420 memcpy(sc->sc_out, &pkt->type, pkt->len + 1); 421 sc->sc_sent = 0; 422 423 STAILQ_REMOVE_HEAD(&sc->sc_outq, pkt_q); 424 STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q); 425 426 sc->sc_waiting = 1; 427 428 cuda_poll(sc->sc_dev); 429 430 DELAY(150); 431 432 if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc)) { 433 sc->sc_state = CUDA_OUT; 434 cuda_out(sc); 435 cuda_write_reg(sc, vSR, sc->sc_out[0]); 436 cuda_ack_off(sc); 437 cuda_tip(sc); 438 } 439 } 440 441 static void 442 cuda_send_inbound(struct cuda_softc *sc) 443 { 444 device_t dev; 445 struct cuda_packet *pkt; 446 447 dev = sc->sc_dev; 448 449 mtx_lock(&sc->sc_mutex); 450 451 while ((pkt = STAILQ_FIRST(&sc->sc_inq)) != NULL) { 452 STAILQ_REMOVE_HEAD(&sc->sc_inq, pkt_q); 453 454 mtx_unlock(&sc->sc_mutex); 455 456 /* check if we have a handler for this message */ 457 switch (pkt->type) { 458 case CUDA_ADB: 459 if (pkt->len > 2) { 460 adb_receive_raw_packet(sc->adb_bus, 461 pkt->data[0],pkt->data[1], 462 pkt->len - 2,&pkt->data[2]); 463 } else { 464 adb_receive_raw_packet(sc->adb_bus, 465 pkt->data[0],pkt->data[1],0,NULL); 466 } 467 break; 468 case CUDA_PSEUDO: 469 mtx_lock(&sc->sc_mutex); 470 switch (pkt->data[1]) { 471 case CMD_AUTOPOLL: 472 sc->sc_autopoll = 1; 473 break; 474 case CMD_READ_RTC: 475 memcpy(&sc->sc_rtc, &pkt->data[2], 476 sizeof(sc->sc_rtc)); 477 wakeup(&sc->sc_rtc); 478 break; 479 case CMD_WRITE_RTC: 480 break; 481 } 482 mtx_unlock(&sc->sc_mutex); 483 break; 484 case CUDA_ERROR: 485 /* 486 * CUDA will throw errors if we miss a race between 487 * sending and receiving packets. This is already 488 * handled when we abort packet output to handle 489 * this packet in cuda_intr(). Thus, we ignore 490 * these messages. 491 */ 492 break; 493 default: 494 device_printf(dev,"unknown CUDA command %d\n", 495 pkt->type); 496 break; 497 } 498 499 mtx_lock(&sc->sc_mutex); 500 501 STAILQ_INSERT_TAIL(&sc->sc_freeq, pkt, pkt_q); 502 } 503 504 mtx_unlock(&sc->sc_mutex); 505 } 506 507 static u_int 508 cuda_poll(device_t dev) 509 { 510 struct cuda_softc *sc = device_get_softc(dev); 511 512 if (sc->sc_state == CUDA_IDLE && !cuda_intr_state(sc) && 513 !sc->sc_waiting) 514 return (0); 515 516 cuda_intr(dev); 517 return (0); 518 } 519 520 static void 521 cuda_intr(void *arg) 522 { 523 device_t dev; 524 struct cuda_softc *sc; 525 526 int i, ending, restart_send, process_inbound; 527 uint8_t reg; 528 529 dev = (device_t)arg; 530 sc = device_get_softc(dev); 531 532 mtx_lock(&sc->sc_mutex); 533 534 restart_send = 0; 535 process_inbound = 0; 536 reg = cuda_read_reg(sc, vIFR); 537 if ((reg & vSR_INT) != vSR_INT) { 538 mtx_unlock(&sc->sc_mutex); 539 return; 540 } 541 542 cuda_write_reg(sc, vIFR, 0x7f); /* Clear interrupt */ 543 544 switch_start: 545 switch (sc->sc_state) { 546 case CUDA_IDLE: 547 /* 548 * This is an unexpected packet, so grab the first (dummy) 549 * byte, set up the proper vars, and tell the chip we are 550 * starting to receive the packet by setting the TIP bit. 551 */ 552 sc->sc_in[1] = cuda_read_reg(sc, vSR); 553 554 if (cuda_intr_state(sc) == 0) { 555 /* must have been a fake start */ 556 557 if (sc->sc_waiting) { 558 /* start over */ 559 DELAY(150); 560 sc->sc_state = CUDA_OUT; 561 sc->sc_sent = 0; 562 cuda_out(sc); 563 cuda_write_reg(sc, vSR, sc->sc_out[1]); 564 cuda_ack_off(sc); 565 cuda_tip(sc); 566 } 567 break; 568 } 569 570 cuda_in(sc); 571 cuda_tip(sc); 572 573 sc->sc_received = 1; 574 sc->sc_state = CUDA_IN; 575 break; 576 577 case CUDA_IN: 578 sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR); 579 ending = 0; 580 581 if (sc->sc_received > 255) { 582 /* bitch only once */ 583 if (sc->sc_received == 256) { 584 device_printf(dev,"input overflow\n"); 585 ending = 1; 586 } 587 } else 588 sc->sc_received++; 589 590 /* intr off means this is the last byte (end of frame) */ 591 if (cuda_intr_state(sc) == 0) { 592 ending = 1; 593 } else { 594 cuda_toggle_ack(sc); 595 } 596 597 if (ending == 1) { /* end of message? */ 598 struct cuda_packet *pkt; 599 600 /* reset vars and signal the end of this frame */ 601 cuda_idle(sc); 602 603 /* Queue up the packet */ 604 pkt = STAILQ_FIRST(&sc->sc_freeq); 605 if (pkt != NULL) { 606 /* If we have a free packet, process it */ 607 608 pkt->len = sc->sc_received - 2; 609 pkt->type = sc->sc_in[1]; 610 memcpy(pkt->data, &sc->sc_in[2], pkt->len); 611 612 STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q); 613 STAILQ_INSERT_TAIL(&sc->sc_inq, pkt, pkt_q); 614 615 process_inbound = 1; 616 } 617 618 sc->sc_state = CUDA_IDLE; 619 sc->sc_received = 0; 620 621 /* 622 * If there is something waiting to be sent out, 623 * set everything up and send the first byte. 624 */ 625 if (sc->sc_waiting == 1) { 626 DELAY(1500); /* required */ 627 sc->sc_sent = 0; 628 sc->sc_state = CUDA_OUT; 629 630 /* 631 * If the interrupt is on, we were too slow 632 * and the chip has already started to send 633 * something to us, so back out of the write 634 * and start a read cycle. 635 */ 636 if (cuda_intr_state(sc)) { 637 cuda_in(sc); 638 cuda_idle(sc); 639 sc->sc_sent = 0; 640 sc->sc_state = CUDA_IDLE; 641 sc->sc_received = 0; 642 DELAY(150); 643 goto switch_start; 644 } 645 646 /* 647 * If we got here, it's ok to start sending 648 * so load the first byte and tell the chip 649 * we want to send. 650 */ 651 cuda_out(sc); 652 cuda_write_reg(sc, vSR, 653 sc->sc_out[sc->sc_sent]); 654 cuda_ack_off(sc); 655 cuda_tip(sc); 656 } 657 } 658 break; 659 660 case CUDA_OUT: 661 i = cuda_read_reg(sc, vSR); /* reset SR-intr in IFR */ 662 663 sc->sc_sent++; 664 if (cuda_intr_state(sc)) { /* ADB intr low during write */ 665 cuda_in(sc); /* make sure SR is set to IN */ 666 cuda_idle(sc); 667 sc->sc_sent = 0; /* must start all over */ 668 sc->sc_state = CUDA_IDLE; /* new state */ 669 sc->sc_received = 0; 670 sc->sc_waiting = 1; /* must retry when done with 671 * read */ 672 DELAY(150); 673 goto switch_start; /* process next state right 674 * now */ 675 break; 676 } 677 if (sc->sc_out_length == sc->sc_sent) { /* check for done */ 678 sc->sc_waiting = 0; /* done writing */ 679 sc->sc_state = CUDA_IDLE; /* signal bus is idle */ 680 cuda_in(sc); 681 cuda_idle(sc); 682 } else { 683 /* send next byte */ 684 cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]); 685 cuda_toggle_ack(sc); /* signal byte ready to 686 * shift */ 687 } 688 break; 689 690 case CUDA_NOTREADY: 691 break; 692 693 default: 694 break; 695 } 696 697 mtx_unlock(&sc->sc_mutex); 698 699 if (process_inbound) 700 cuda_send_inbound(sc); 701 702 mtx_lock(&sc->sc_mutex); 703 /* If we have another packet waiting, set it up */ 704 if (!sc->sc_waiting && sc->sc_state == CUDA_IDLE) 705 cuda_send_outbound(sc); 706 707 mtx_unlock(&sc->sc_mutex); 708 709 } 710 711 static u_int 712 cuda_adb_send(device_t dev, u_char command_byte, int len, u_char *data, 713 u_char poll) 714 { 715 struct cuda_softc *sc = device_get_softc(dev); 716 uint8_t packet[16]; 717 int i; 718 719 /* construct an ADB command packet and send it */ 720 packet[0] = CUDA_ADB; 721 packet[1] = command_byte; 722 for (i = 0; i < len; i++) 723 packet[i + 2] = data[i]; 724 725 cuda_send(sc, poll, len + 2, packet); 726 727 return (0); 728 } 729 730 static u_int 731 cuda_adb_autopoll(device_t dev, uint16_t mask) { 732 struct cuda_softc *sc = device_get_softc(dev); 733 734 uint8_t cmd[] = {CUDA_PSEUDO, CMD_AUTOPOLL, mask != 0}; 735 736 mtx_lock(&sc->sc_mutex); 737 738 if (cmd[2] == sc->sc_autopoll) { 739 mtx_unlock(&sc->sc_mutex); 740 return (0); 741 } 742 743 sc->sc_autopoll = -1; 744 cuda_send(sc, 1, 3, cmd); 745 746 mtx_unlock(&sc->sc_mutex); 747 748 return (0); 749 } 750 751 static void 752 cuda_shutdown(void *xsc, int howto) 753 { 754 struct cuda_softc *sc = xsc; 755 uint8_t cmd[] = {CUDA_PSEUDO, 0}; 756 757 cmd[1] = (howto & RB_HALT) ? CMD_POWEROFF : CMD_RESET; 758 cuda_poll(sc->sc_dev); 759 cuda_send(sc, 1, 2, cmd); 760 761 while (1) 762 cuda_poll(sc->sc_dev); 763 } 764 765 #define DIFF19041970 2082844800 766 767 static int 768 cuda_gettime(device_t dev, struct timespec *ts) 769 { 770 struct cuda_softc *sc = device_get_softc(dev); 771 uint8_t cmd[] = {CUDA_PSEUDO, CMD_READ_RTC}; 772 773 mtx_lock(&sc->sc_mutex); 774 sc->sc_rtc = -1; 775 cuda_send(sc, 1, 2, cmd); 776 if (sc->sc_rtc == -1) 777 mtx_sleep(&sc->sc_rtc, &sc->sc_mutex, 0, "rtc", 100); 778 779 ts->tv_sec = sc->sc_rtc - DIFF19041970; 780 ts->tv_nsec = 0; 781 mtx_unlock(&sc->sc_mutex); 782 783 return (0); 784 } 785 786 static int 787 cuda_settime(device_t dev, struct timespec *ts) 788 { 789 struct cuda_softc *sc = device_get_softc(dev); 790 uint8_t cmd[] = {CUDA_PSEUDO, CMD_WRITE_RTC, 0, 0, 0, 0}; 791 uint32_t sec; 792 793 sec = ts->tv_sec + DIFF19041970; 794 memcpy(&cmd[2], &sec, sizeof(sec)); 795 796 mtx_lock(&sc->sc_mutex); 797 cuda_send(sc, 0, 6, cmd); 798 mtx_unlock(&sc->sc_mutex); 799 800 return (0); 801 } 802 803