1 /*- 2 * Copyright (c) 2008 Benno Rice. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25 #include <sys/cdefs.h> 26 __FBSDID("$FreeBSD$"); 27 28 /* 29 * Driver for SMSC LAN91C111, may work for older variants. 30 */ 31 32 #ifdef HAVE_KERNEL_OPTION_HEADERS 33 #include "opt_device_polling.h" 34 #endif 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/errno.h> 39 #include <sys/kernel.h> 40 #include <sys/sockio.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/queue.h> 44 #include <sys/socket.h> 45 #include <sys/syslog.h> 46 #include <sys/taskqueue.h> 47 48 #include <sys/module.h> 49 #include <sys/bus.h> 50 51 #include <machine/bus.h> 52 #include <machine/resource.h> 53 #include <sys/rman.h> 54 55 #include <net/ethernet.h> 56 #include <net/if.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_types.h> 60 #include <net/if_mib.h> 61 #include <net/if_media.h> 62 63 #ifdef INET 64 #include <netinet/in.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/in_var.h> 67 #include <netinet/ip.h> 68 #endif 69 70 #include <net/bpf.h> 71 #include <net/bpfdesc.h> 72 73 #include <dev/smc/if_smcreg.h> 74 #include <dev/smc/if_smcvar.h> 75 76 #include <dev/mii/mii.h> 77 #include <dev/mii/miivar.h> 78 79 #define SMC_LOCK(sc) mtx_lock(&(sc)->smc_mtx) 80 #define SMC_UNLOCK(sc) mtx_unlock(&(sc)->smc_mtx) 81 #define SMC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->smc_mtx, MA_OWNED) 82 83 #define SMC_INTR_PRIORITY 0 84 #define SMC_RX_PRIORITY 5 85 #define SMC_TX_PRIORITY 10 86 87 devclass_t smc_devclass; 88 89 static const char *smc_chip_ids[16] = { 90 NULL, NULL, NULL, 91 /* 3 */ "SMSC LAN91C90 or LAN91C92", 92 /* 4 */ "SMSC LAN91C94", 93 /* 5 */ "SMSC LAN91C95", 94 /* 6 */ "SMSC LAN91C96", 95 /* 7 */ "SMSC LAN91C100", 96 /* 8 */ "SMSC LAN91C100FD", 97 /* 9 */ "SMSC LAN91C110FD or LAN91C111FD", 98 NULL, NULL, NULL, 99 NULL, NULL, NULL 100 }; 101 102 static void smc_init(void *); 103 static void smc_start(struct ifnet *); 104 static void smc_stop(struct smc_softc *); 105 static int smc_ioctl(struct ifnet *, u_long, caddr_t); 106 107 static void smc_init_locked(struct smc_softc *); 108 static void smc_start_locked(struct ifnet *); 109 static void smc_reset(struct smc_softc *); 110 static int smc_mii_ifmedia_upd(struct ifnet *); 111 static void smc_mii_ifmedia_sts(struct ifnet *, struct ifmediareq *); 112 static void smc_mii_tick(void *); 113 static void smc_mii_mediachg(struct smc_softc *); 114 static int smc_mii_mediaioctl(struct smc_softc *, struct ifreq *, u_long); 115 116 static void smc_task_intr(void *, int); 117 static void smc_task_rx(void *, int); 118 static void smc_task_tx(void *, int); 119 120 static driver_filter_t smc_intr; 121 static timeout_t smc_watchdog; 122 #ifdef DEVICE_POLLING 123 static poll_handler_t smc_poll; 124 #endif 125 126 static __inline void 127 smc_select_bank(struct smc_softc *sc, uint16_t bank) 128 { 129 130 bus_write_2(sc->smc_reg, BSR, bank & BSR_BANK_MASK); 131 } 132 133 /* Never call this when not in bank 2. */ 134 static __inline void 135 smc_mmu_wait(struct smc_softc *sc) 136 { 137 138 KASSERT((bus_read_2(sc->smc_reg, BSR) & 139 BSR_BANK_MASK) == 2, ("%s: smc_mmu_wait called when not in bank 2", 140 device_get_nameunit(sc->smc_dev))); 141 while (bus_read_2(sc->smc_reg, MMUCR) & MMUCR_BUSY) 142 ; 143 } 144 145 static __inline uint8_t 146 smc_read_1(struct smc_softc *sc, bus_addr_t offset) 147 { 148 149 return (bus_read_1(sc->smc_reg, offset)); 150 } 151 152 static __inline void 153 smc_write_1(struct smc_softc *sc, bus_addr_t offset, uint8_t val) 154 { 155 156 bus_write_1(sc->smc_reg, offset, val); 157 } 158 159 static __inline uint16_t 160 smc_read_2(struct smc_softc *sc, bus_addr_t offset) 161 { 162 163 return (bus_read_2(sc->smc_reg, offset)); 164 } 165 166 static __inline void 167 smc_write_2(struct smc_softc *sc, bus_addr_t offset, uint16_t val) 168 { 169 170 bus_write_2(sc->smc_reg, offset, val); 171 } 172 173 static __inline void 174 smc_read_multi_2(struct smc_softc *sc, bus_addr_t offset, uint16_t *datap, 175 bus_size_t count) 176 { 177 178 bus_read_multi_2(sc->smc_reg, offset, datap, count); 179 } 180 181 static __inline void 182 smc_write_multi_2(struct smc_softc *sc, bus_addr_t offset, uint16_t *datap, 183 bus_size_t count) 184 { 185 186 bus_write_multi_2(sc->smc_reg, offset, datap, count); 187 } 188 189 int 190 smc_probe(device_t dev) 191 { 192 int rid, type, error; 193 uint16_t val; 194 struct smc_softc *sc; 195 struct resource *reg; 196 197 sc = device_get_softc(dev); 198 rid = 0; 199 type = SYS_RES_IOPORT; 200 error = 0; 201 202 if (sc->smc_usemem) 203 type = SYS_RES_MEMORY; 204 205 reg = bus_alloc_resource(dev, type, &rid, 0, ~0, 16, RF_ACTIVE); 206 if (reg == NULL) { 207 if (bootverbose) 208 device_printf(dev, 209 "could not allocate I/O resource for probe\n"); 210 return (ENXIO); 211 } 212 213 /* Check for the identification value in the BSR. */ 214 val = bus_read_2(reg, BSR); 215 if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { 216 if (bootverbose) 217 device_printf(dev, "identification value not in BSR\n"); 218 error = ENXIO; 219 goto done; 220 } 221 222 /* 223 * Try switching banks and make sure we still get the identification 224 * value. 225 */ 226 bus_write_2(reg, BSR, 0); 227 val = bus_read_2(reg, BSR); 228 if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { 229 if (bootverbose) 230 device_printf(dev, 231 "identification value not in BSR after write\n"); 232 error = ENXIO; 233 goto done; 234 } 235 236 #if 0 237 /* Check the BAR. */ 238 bus_write_2(reg, BSR, 1); 239 val = bus_read_2(reg, BAR); 240 val = BAR_ADDRESS(val); 241 if (rman_get_start(reg) != val) { 242 if (bootverbose) 243 device_printf(dev, "BAR address %x does not match " 244 "I/O resource address %lx\n", val, 245 rman_get_start(reg)); 246 error = ENXIO; 247 goto done; 248 } 249 #endif 250 251 /* Compare REV against known chip revisions. */ 252 bus_write_2(reg, BSR, 3); 253 val = bus_read_2(reg, REV); 254 val = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; 255 if (smc_chip_ids[val] == NULL) { 256 if (bootverbose) 257 device_printf(dev, "Unknown chip revision: %d\n", val); 258 error = ENXIO; 259 goto done; 260 } 261 262 device_set_desc(dev, smc_chip_ids[val]); 263 264 done: 265 bus_release_resource(dev, type, rid, reg); 266 return (error); 267 } 268 269 int 270 smc_attach(device_t dev) 271 { 272 int type, error; 273 uint16_t val; 274 u_char eaddr[ETHER_ADDR_LEN]; 275 struct smc_softc *sc; 276 struct ifnet *ifp; 277 278 sc = device_get_softc(dev); 279 error = 0; 280 281 sc->smc_dev = dev; 282 283 ifp = sc->smc_ifp = if_alloc(IFT_ETHER); 284 if (ifp == NULL) { 285 error = ENOSPC; 286 goto done; 287 } 288 289 mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 290 291 /* Set up watchdog callout. */ 292 callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0); 293 294 type = SYS_RES_IOPORT; 295 if (sc->smc_usemem) 296 type = SYS_RES_MEMORY; 297 298 sc->smc_reg_rid = 0; 299 sc->smc_reg = bus_alloc_resource(dev, type, &sc->smc_reg_rid, 0, ~0, 300 16, RF_ACTIVE); 301 if (sc->smc_reg == NULL) { 302 error = ENXIO; 303 goto done; 304 } 305 306 sc->smc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 0, 307 ~0, 1, RF_ACTIVE | RF_SHAREABLE); 308 if (sc->smc_irq == NULL) { 309 error = ENXIO; 310 goto done; 311 } 312 313 SMC_LOCK(sc); 314 smc_reset(sc); 315 SMC_UNLOCK(sc); 316 317 smc_select_bank(sc, 3); 318 val = smc_read_2(sc, REV); 319 sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; 320 sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT; 321 if (bootverbose) 322 device_printf(dev, "revision %x\n", sc->smc_rev); 323 324 callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx, 325 CALLOUT_RETURNUNLOCKED); 326 if (sc->smc_chip >= REV_CHIP_91110FD) { 327 mii_phy_probe(dev, &sc->smc_miibus, smc_mii_ifmedia_upd, 328 smc_mii_ifmedia_sts); 329 if (sc->smc_miibus != NULL) { 330 sc->smc_mii_tick = smc_mii_tick; 331 sc->smc_mii_mediachg = smc_mii_mediachg; 332 sc->smc_mii_mediaioctl = smc_mii_mediaioctl; 333 } 334 } 335 336 smc_select_bank(sc, 1); 337 eaddr[0] = smc_read_1(sc, IAR0); 338 eaddr[1] = smc_read_1(sc, IAR1); 339 eaddr[2] = smc_read_1(sc, IAR2); 340 eaddr[3] = smc_read_1(sc, IAR3); 341 eaddr[4] = smc_read_1(sc, IAR4); 342 eaddr[5] = smc_read_1(sc, IAR5); 343 344 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 345 ifp->if_softc = sc; 346 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 347 ifp->if_init = smc_init; 348 ifp->if_ioctl = smc_ioctl; 349 ifp->if_start = smc_start; 350 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 351 IFQ_SET_READY(&ifp->if_snd); 352 353 ifp->if_capabilities = ifp->if_capenable = 0; 354 355 #ifdef DEVICE_POLLING 356 ifp->if_capabilities |= IFCAP_POLLING; 357 #endif 358 359 ether_ifattach(ifp, eaddr); 360 361 /* Set up taskqueue */ 362 TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp); 363 TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); 364 TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp); 365 sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT, 366 taskqueue_thread_enqueue, &sc->smc_tq); 367 taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq", 368 device_get_nameunit(sc->smc_dev)); 369 370 /* Mask all interrupts. */ 371 sc->smc_mask = 0; 372 smc_write_1(sc, MSK, 0); 373 374 /* Wire up interrupt */ 375 error = bus_setup_intr(dev, sc->smc_irq, 376 INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih); 377 if (error != 0) 378 goto done; 379 380 done: 381 if (error != 0) 382 smc_detach(dev); 383 return (error); 384 } 385 386 int 387 smc_detach(device_t dev) 388 { 389 int type; 390 struct smc_softc *sc; 391 392 sc = device_get_softc(dev); 393 SMC_LOCK(sc); 394 smc_stop(sc); 395 SMC_UNLOCK(sc); 396 397 if (sc->smc_ifp != NULL) { 398 ether_ifdetach(sc->smc_ifp); 399 } 400 401 callout_drain(&sc->smc_watchdog); 402 callout_drain(&sc->smc_mii_tick_ch); 403 404 #ifdef DEVICE_POLLING 405 if (sc->smc_ifp->if_capenable & IFCAP_POLLING) 406 ether_poll_deregister(sc->smc_ifp); 407 #endif 408 409 if (sc->smc_ih != NULL) 410 bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih); 411 412 if (sc->smc_tq != NULL) { 413 taskqueue_drain(sc->smc_tq, &sc->smc_intr); 414 taskqueue_drain(sc->smc_tq, &sc->smc_rx); 415 taskqueue_drain(sc->smc_tq, &sc->smc_tx); 416 taskqueue_free(sc->smc_tq); 417 sc->smc_tq = NULL; 418 } 419 420 if (sc->smc_ifp != NULL) { 421 if_free(sc->smc_ifp); 422 } 423 424 if (sc->smc_miibus != NULL) { 425 device_delete_child(sc->smc_dev, sc->smc_miibus); 426 bus_generic_detach(sc->smc_dev); 427 } 428 429 if (sc->smc_reg != NULL) { 430 type = SYS_RES_IOPORT; 431 if (sc->smc_usemem) 432 type = SYS_RES_MEMORY; 433 434 bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid, 435 sc->smc_reg); 436 } 437 438 if (sc->smc_irq != NULL) 439 bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid, 440 sc->smc_irq); 441 442 if (mtx_initialized(&sc->smc_mtx)) 443 mtx_destroy(&sc->smc_mtx); 444 445 return (0); 446 } 447 448 static void 449 smc_start(struct ifnet *ifp) 450 { 451 struct smc_softc *sc; 452 453 sc = ifp->if_softc; 454 SMC_LOCK(sc); 455 smc_start_locked(ifp); 456 SMC_UNLOCK(sc); 457 } 458 459 static void 460 smc_start_locked(struct ifnet *ifp) 461 { 462 struct smc_softc *sc; 463 struct mbuf *m; 464 u_int len, npages, spin_count; 465 466 sc = ifp->if_softc; 467 SMC_ASSERT_LOCKED(sc); 468 469 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 470 return; 471 if (IFQ_IS_EMPTY(&ifp->if_snd)) 472 return; 473 474 /* 475 * Grab the next packet. If it's too big, drop it. 476 */ 477 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 478 len = m_length(m, NULL); 479 len += (len & 1); 480 if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) { 481 if_printf(ifp, "large packet discarded\n"); 482 ++ifp->if_oerrors; 483 m_freem(m); 484 return; /* XXX readcheck? */ 485 } 486 487 /* 488 * Flag that we're busy. 489 */ 490 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 491 sc->smc_pending = m; 492 493 /* 494 * Work out how many 256 byte "pages" we need. We have to include the 495 * control data for the packet in this calculation. 496 */ 497 npages = (len * PKT_CTRL_DATA_LEN) >> 8; 498 if (npages == 0) 499 npages = 1; 500 501 /* 502 * Request memory. 503 */ 504 smc_select_bank(sc, 2); 505 smc_mmu_wait(sc); 506 smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages); 507 508 /* 509 * Spin briefly to see if the allocation succeeds. 510 */ 511 spin_count = TX_ALLOC_WAIT_TIME; 512 do { 513 if (smc_read_1(sc, IST) & ALLOC_INT) { 514 smc_write_1(sc, ACK, ALLOC_INT); 515 break; 516 } 517 } while (--spin_count); 518 519 /* 520 * If the allocation is taking too long, unmask the alloc interrupt 521 * and wait. 522 */ 523 if (spin_count == 0) { 524 sc->smc_mask |= ALLOC_INT; 525 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 526 smc_write_1(sc, MSK, sc->smc_mask); 527 return; 528 } 529 530 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); 531 } 532 533 static void 534 smc_task_tx(void *context, int pending) 535 { 536 struct ifnet *ifp; 537 struct smc_softc *sc; 538 struct mbuf *m, *m0; 539 u_int packet, len; 540 uint8_t *data; 541 542 (void)pending; 543 ifp = (struct ifnet *)context; 544 sc = ifp->if_softc; 545 546 SMC_LOCK(sc); 547 548 if (sc->smc_pending == NULL) { 549 SMC_UNLOCK(sc); 550 goto next_packet; 551 } 552 553 m = m0 = sc->smc_pending; 554 sc->smc_pending = NULL; 555 smc_select_bank(sc, 2); 556 557 /* 558 * Check the allocation result. 559 */ 560 packet = smc_read_1(sc, ARR); 561 562 /* 563 * If the allocation failed, requeue the packet and retry. 564 */ 565 if (packet & ARR_FAILED) { 566 IFQ_DRV_PREPEND(&ifp->if_snd, m); 567 ++ifp->if_oerrors; 568 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 569 smc_start_locked(ifp); 570 SMC_UNLOCK(sc); 571 return; 572 } 573 574 /* 575 * Tell the device to write to our packet number. 576 */ 577 smc_write_1(sc, PNR, packet); 578 smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR); 579 580 /* 581 * Tell the device how long the packet is (including control data). 582 */ 583 len = m_length(m, 0); 584 len += PKT_CTRL_DATA_LEN; 585 smc_write_2(sc, DATA0, 0); 586 smc_write_2(sc, DATA0, len); 587 588 /* 589 * Push the data out to the device. 590 */ 591 data = NULL; 592 for (; m != NULL; m = m->m_next) { 593 data = mtod(m, uint8_t *); 594 smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2); 595 } 596 597 /* 598 * Push out the control byte and and the odd byte if needed. 599 */ 600 if ((len & 1) != 0 && data != NULL) 601 smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[m->m_len - 1]); 602 else 603 smc_write_2(sc, DATA0, 0); 604 605 /* 606 * Unmask the TX empty interrupt. 607 */ 608 sc->smc_mask |= TX_EMPTY_INT; 609 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 610 smc_write_1(sc, MSK, sc->smc_mask); 611 612 /* 613 * Enqueue the packet. 614 */ 615 smc_mmu_wait(sc); 616 smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE); 617 callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc); 618 619 /* 620 * Finish up. 621 */ 622 ifp->if_opackets++; 623 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 624 SMC_UNLOCK(sc); 625 BPF_MTAP(ifp, m0); 626 m_freem(m0); 627 628 next_packet: 629 /* 630 * See if there's anything else to do. 631 */ 632 smc_start(ifp); 633 } 634 635 static void 636 smc_task_rx(void *context, int pending) 637 { 638 u_int packet, status, len; 639 uint8_t *data; 640 struct ifnet *ifp; 641 struct smc_softc *sc; 642 struct mbuf *m, *mhead, *mtail; 643 644 (void)pending; 645 ifp = (struct ifnet *)context; 646 sc = ifp->if_softc; 647 mhead = mtail = NULL; 648 649 SMC_LOCK(sc); 650 651 packet = smc_read_1(sc, FIFO_RX); 652 while ((packet & FIFO_EMPTY) == 0) { 653 /* 654 * Grab an mbuf and attach a cluster. 655 */ 656 MGETHDR(m, M_DONTWAIT, MT_DATA); 657 if (m == NULL) { 658 break; 659 } 660 MCLGET(m, M_DONTWAIT); 661 if ((m->m_flags & M_EXT) == 0) { 662 m_freem(m); 663 break; 664 } 665 666 /* 667 * Point to the start of the packet. 668 */ 669 smc_select_bank(sc, 2); 670 smc_write_1(sc, PNR, packet); 671 smc_write_2(sc, PTR, 0 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); 672 673 /* 674 * Grab status and packet length. 675 */ 676 status = smc_read_2(sc, DATA0); 677 len = smc_read_2(sc, DATA0) & RX_LEN_MASK; 678 len -= 6; 679 if (status & RX_ODDFRM) 680 len += 1; 681 682 /* 683 * Check for errors. 684 */ 685 if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALGNERR)) { 686 smc_mmu_wait(sc); 687 smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); 688 ifp->if_ierrors++; 689 m_freem(m); 690 break; 691 } 692 693 /* 694 * Set the mbuf up the way we want it. 695 */ 696 m->m_pkthdr.rcvif = ifp; 697 m->m_pkthdr.len = m->m_len = len + 2; /* XXX: Is this right? */ 698 m_adj(m, ETHER_ALIGN); 699 700 /* 701 * Pull the packet out of the device. Make sure we're in the 702 * right bank first as things may have changed while we were 703 * allocating our mbuf. 704 */ 705 smc_select_bank(sc, 2); 706 smc_write_1(sc, PNR, packet); 707 smc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); 708 data = mtod(m, uint8_t *); 709 smc_read_multi_2(sc, DATA0, (uint16_t *)data, len >> 1); 710 if (len & 1) { 711 data += len & ~1; 712 *data = smc_read_1(sc, DATA0); 713 } 714 715 /* 716 * Tell the device we're done. 717 */ 718 smc_mmu_wait(sc); 719 smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); 720 if (m == NULL) { 721 break; 722 } 723 724 if (mhead == NULL) { 725 mhead = mtail = m; 726 m->m_next = NULL; 727 } else { 728 mtail->m_next = m; 729 mtail = m; 730 } 731 packet = smc_read_1(sc, FIFO_RX); 732 } 733 734 sc->smc_mask |= RCV_INT; 735 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 736 smc_write_1(sc, MSK, sc->smc_mask); 737 738 SMC_UNLOCK(sc); 739 740 while (mhead != NULL) { 741 m = mhead; 742 mhead = mhead->m_next; 743 m->m_next = NULL; 744 ifp->if_ipackets++; 745 (*ifp->if_input)(ifp, m); 746 } 747 } 748 749 #ifdef DEVICE_POLLING 750 static void 751 smc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 752 { 753 struct smc_softc *sc; 754 755 sc = ifp->if_softc; 756 757 SMC_LOCK(sc); 758 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 759 SMC_UNLOCK(sc); 760 return; 761 } 762 SMC_UNLOCK(sc); 763 764 if (cmd == POLL_AND_CHECK_STATUS) 765 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); 766 } 767 #endif 768 769 static int 770 smc_intr(void *context) 771 { 772 struct smc_softc *sc; 773 774 sc = (struct smc_softc *)context; 775 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); 776 return (FILTER_HANDLED); 777 } 778 779 static void 780 smc_task_intr(void *context, int pending) 781 { 782 struct smc_softc *sc; 783 struct ifnet *ifp; 784 u_int status, packet, counter, tcr; 785 786 (void)pending; 787 ifp = (struct ifnet *)context; 788 sc = ifp->if_softc; 789 790 SMC_LOCK(sc); 791 792 smc_select_bank(sc, 2); 793 794 /* 795 * Get the current mask, and then block all interrupts while we're 796 * working. 797 */ 798 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 799 smc_write_1(sc, MSK, 0); 800 801 /* 802 * Find out what interrupts are flagged. 803 */ 804 status = smc_read_1(sc, IST) & sc->smc_mask; 805 806 /* 807 * Transmit error 808 */ 809 if (status & TX_INT) { 810 /* 811 * Kill off the packet if there is one and re-enable transmit. 812 */ 813 packet = smc_read_1(sc, FIFO_TX); 814 if ((packet & FIFO_EMPTY) == 0) { 815 smc_write_1(sc, PNR, packet); 816 smc_write_2(sc, PTR, 0 | PTR_READ | 817 PTR_AUTO_INCR); 818 tcr = smc_read_2(sc, DATA0); 819 if ((tcr & EPHSR_TX_SUC) == 0) 820 device_printf(sc->smc_dev, 821 "bad packet\n"); 822 smc_mmu_wait(sc); 823 smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE_PKT); 824 825 smc_select_bank(sc, 0); 826 tcr = smc_read_2(sc, TCR); 827 tcr |= TCR_TXENA | TCR_PAD_EN; 828 smc_write_2(sc, TCR, tcr); 829 smc_select_bank(sc, 2); 830 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); 831 } 832 833 /* 834 * Ack the interrupt. 835 */ 836 smc_write_1(sc, ACK, TX_INT); 837 } 838 839 /* 840 * Receive 841 */ 842 if (status & RCV_INT) { 843 smc_write_1(sc, ACK, RCV_INT); 844 sc->smc_mask &= ~RCV_INT; 845 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_rx); 846 } 847 848 /* 849 * Allocation 850 */ 851 if (status & ALLOC_INT) { 852 smc_write_1(sc, ACK, ALLOC_INT); 853 sc->smc_mask &= ~ALLOC_INT; 854 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); 855 } 856 857 /* 858 * Receive overrun 859 */ 860 if (status & RX_OVRN_INT) { 861 smc_write_1(sc, ACK, RX_OVRN_INT); 862 ifp->if_ierrors++; 863 } 864 865 /* 866 * Transmit empty 867 */ 868 if (status & TX_EMPTY_INT) { 869 smc_write_1(sc, ACK, TX_EMPTY_INT); 870 sc->smc_mask &= ~TX_EMPTY_INT; 871 callout_stop(&sc->smc_watchdog); 872 873 /* 874 * Update collision stats. 875 */ 876 smc_select_bank(sc, 0); 877 counter = smc_read_2(sc, ECR); 878 smc_select_bank(sc, 2); 879 ifp->if_collisions += 880 (counter & ECR_SNGLCOL_MASK) >> ECR_SNGLCOL_SHIFT; 881 ifp->if_collisions += 882 (counter & ECR_MULCOL_MASK) >> ECR_MULCOL_SHIFT; 883 884 /* 885 * See if there are any packets to transmit. 886 */ 887 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); 888 } 889 890 /* 891 * Update the interrupt mask. 892 */ 893 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 894 smc_write_1(sc, MSK, sc->smc_mask); 895 896 SMC_UNLOCK(sc); 897 } 898 899 static u_int 900 smc_mii_readbits(struct smc_softc *sc, int nbits) 901 { 902 u_int mgmt, mask, val; 903 904 SMC_ASSERT_LOCKED(sc); 905 KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, 906 ("%s: smc_mii_readbits called with bank %d (!= 3)", 907 device_get_nameunit(sc->smc_dev), 908 smc_read_2(sc, BSR) & BSR_BANK_MASK)); 909 910 /* 911 * Set up the MGMT (aka MII) register. 912 */ 913 mgmt = smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO); 914 smc_write_2(sc, MGMT, mgmt); 915 916 /* 917 * Read the bits in. 918 */ 919 for (mask = 1 << (nbits - 1), val = 0; mask; mask >>= 1) { 920 if (smc_read_2(sc, MGMT) & MGMT_MDI) 921 val |= mask; 922 923 smc_write_2(sc, MGMT, mgmt); 924 DELAY(1); 925 smc_write_2(sc, MGMT, mgmt | MGMT_MCLK); 926 DELAY(1); 927 } 928 929 return (val); 930 } 931 932 static void 933 smc_mii_writebits(struct smc_softc *sc, u_int val, int nbits) 934 { 935 u_int mgmt, mask; 936 937 SMC_ASSERT_LOCKED(sc); 938 KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, 939 ("%s: smc_mii_writebits called with bank %d (!= 3)", 940 device_get_nameunit(sc->smc_dev), 941 smc_read_2(sc, BSR) & BSR_BANK_MASK)); 942 943 /* 944 * Set up the MGMT (aka MII) register). 945 */ 946 mgmt = smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO); 947 mgmt |= MGMT_MDOE; 948 949 /* 950 * Push the bits out. 951 */ 952 for (mask = 1 << (nbits - 1); mask; mask >>= 1) { 953 if (val & mask) 954 mgmt |= MGMT_MDO; 955 else 956 mgmt &= ~MGMT_MDO; 957 958 smc_write_2(sc, MGMT, mgmt); 959 DELAY(1); 960 smc_write_2(sc, MGMT, mgmt | MGMT_MCLK); 961 DELAY(1); 962 } 963 } 964 965 int 966 smc_miibus_readreg(device_t dev, int phy, int reg) 967 { 968 struct smc_softc *sc; 969 int val; 970 971 sc = device_get_softc(dev); 972 973 SMC_LOCK(sc); 974 975 smc_select_bank(sc, 3); 976 977 /* 978 * Send out the idle pattern. 979 */ 980 smc_mii_writebits(sc, 0xffffffff, 32); 981 982 /* 983 * Start code + read opcode + phy address + phy register 984 */ 985 smc_mii_writebits(sc, 6 << 10 | phy << 5 | reg, 14); 986 987 /* 988 * Turnaround + data 989 */ 990 val = smc_mii_readbits(sc, 18); 991 992 /* 993 * Reset the MDIO interface. 994 */ 995 smc_write_2(sc, MGMT, 996 smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO)); 997 998 SMC_UNLOCK(sc); 999 return (val); 1000 } 1001 1002 int 1003 smc_miibus_writereg(device_t dev, int phy, int reg, int data) 1004 { 1005 struct smc_softc *sc; 1006 1007 sc = device_get_softc(dev); 1008 1009 SMC_LOCK(sc); 1010 1011 smc_select_bank(sc, 3); 1012 1013 /* 1014 * Send idle pattern. 1015 */ 1016 smc_mii_writebits(sc, 0xffffffff, 32); 1017 1018 /* 1019 * Start code + write opcode + phy address + phy register + turnaround 1020 * + data. 1021 */ 1022 smc_mii_writebits(sc, 5 << 28 | phy << 23 | reg << 18 | 2 << 16 | data, 1023 32); 1024 1025 /* 1026 * Reset MDIO interface. 1027 */ 1028 smc_write_2(sc, MGMT, 1029 smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO)); 1030 1031 SMC_UNLOCK(sc); 1032 return (0); 1033 } 1034 1035 void 1036 smc_miibus_statchg(device_t dev) 1037 { 1038 struct smc_softc *sc; 1039 struct mii_data *mii; 1040 uint16_t tcr; 1041 1042 sc = device_get_softc(dev); 1043 mii = device_get_softc(sc->smc_miibus); 1044 1045 SMC_LOCK(sc); 1046 1047 smc_select_bank(sc, 0); 1048 tcr = smc_read_2(sc, TCR); 1049 1050 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 1051 tcr |= TCR_SWFDUP; 1052 else 1053 tcr &= ~TCR_SWFDUP; 1054 1055 smc_write_2(sc, TCR, tcr); 1056 1057 SMC_UNLOCK(sc); 1058 } 1059 1060 static int 1061 smc_mii_ifmedia_upd(struct ifnet *ifp) 1062 { 1063 struct smc_softc *sc; 1064 struct mii_data *mii; 1065 1066 sc = ifp->if_softc; 1067 if (sc->smc_miibus == NULL) 1068 return (ENXIO); 1069 1070 mii = device_get_softc(sc->smc_miibus); 1071 return (mii_mediachg(mii)); 1072 } 1073 1074 static void 1075 smc_mii_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1076 { 1077 struct smc_softc *sc; 1078 struct mii_data *mii; 1079 1080 sc = ifp->if_softc; 1081 if (sc->smc_miibus == NULL) 1082 return; 1083 1084 mii = device_get_softc(sc->smc_miibus); 1085 mii_pollstat(mii); 1086 ifmr->ifm_active = mii->mii_media_active; 1087 ifmr->ifm_status = mii->mii_media_status; 1088 } 1089 1090 static void 1091 smc_mii_tick(void *context) 1092 { 1093 struct smc_softc *sc; 1094 1095 sc = (struct smc_softc *)context; 1096 1097 if (sc->smc_miibus == NULL) 1098 return; 1099 1100 SMC_UNLOCK(sc); 1101 1102 mii_tick(device_get_softc(sc->smc_miibus)); 1103 callout_reset(&sc->smc_mii_tick_ch, hz, smc_mii_tick, sc); 1104 } 1105 1106 static void 1107 smc_mii_mediachg(struct smc_softc *sc) 1108 { 1109 1110 if (sc->smc_miibus == NULL) 1111 return; 1112 mii_mediachg(device_get_softc(sc->smc_miibus)); 1113 } 1114 1115 static int 1116 smc_mii_mediaioctl(struct smc_softc *sc, struct ifreq *ifr, u_long command) 1117 { 1118 struct mii_data *mii; 1119 1120 if (sc->smc_miibus == NULL) 1121 return (EINVAL); 1122 1123 mii = device_get_softc(sc->smc_miibus); 1124 return (ifmedia_ioctl(sc->smc_ifp, ifr, &mii->mii_media, command)); 1125 } 1126 1127 static void 1128 smc_reset(struct smc_softc *sc) 1129 { 1130 u_int ctr; 1131 1132 SMC_ASSERT_LOCKED(sc); 1133 1134 smc_select_bank(sc, 2); 1135 1136 /* 1137 * Mask all interrupts. 1138 */ 1139 smc_write_1(sc, MSK, 0); 1140 1141 /* 1142 * Tell the device to reset. 1143 */ 1144 smc_select_bank(sc, 0); 1145 smc_write_2(sc, RCR, RCR_SOFT_RST); 1146 1147 /* 1148 * Set up the configuration register. 1149 */ 1150 smc_select_bank(sc, 1); 1151 smc_write_2(sc, CR, CR_EPH_POWER_EN); 1152 DELAY(1); 1153 1154 /* 1155 * Turn off transmit and receive. 1156 */ 1157 smc_select_bank(sc, 0); 1158 smc_write_2(sc, TCR, 0); 1159 smc_write_2(sc, RCR, 0); 1160 1161 /* 1162 * Set up the control register. 1163 */ 1164 smc_select_bank(sc, 1); 1165 ctr = smc_read_2(sc, CTR); 1166 ctr |= CTR_LE_ENABLE | CTR_AUTO_RELEASE; 1167 smc_write_2(sc, CTR, ctr); 1168 1169 /* 1170 * Reset the MMU. 1171 */ 1172 smc_select_bank(sc, 2); 1173 smc_mmu_wait(sc); 1174 smc_write_2(sc, MMUCR, MMUCR_CMD_MMU_RESET); 1175 } 1176 1177 static void 1178 smc_enable(struct smc_softc *sc) 1179 { 1180 struct ifnet *ifp; 1181 1182 SMC_ASSERT_LOCKED(sc); 1183 ifp = sc->smc_ifp; 1184 1185 /* 1186 * Set up the receive/PHY control register. 1187 */ 1188 smc_select_bank(sc, 0); 1189 smc_write_2(sc, RPCR, RPCR_ANEG | (RPCR_LED_LINK_ANY << RPCR_LSA_SHIFT) 1190 | (RPCR_LED_ACT_ANY << RPCR_LSB_SHIFT)); 1191 1192 /* 1193 * Set up the transmit and receive control registers. 1194 */ 1195 smc_write_2(sc, TCR, TCR_TXENA | TCR_PAD_EN); 1196 smc_write_2(sc, RCR, RCR_RXEN | RCR_STRIP_CRC); 1197 1198 /* 1199 * Set up the interrupt mask. 1200 */ 1201 smc_select_bank(sc, 2); 1202 sc->smc_mask = EPH_INT | RX_OVRN_INT | RCV_INT | TX_INT; 1203 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1204 smc_write_1(sc, MSK, sc->smc_mask); 1205 } 1206 1207 static void 1208 smc_stop(struct smc_softc *sc) 1209 { 1210 1211 SMC_ASSERT_LOCKED(sc); 1212 1213 /* 1214 * Turn off callouts. 1215 */ 1216 callout_stop(&sc->smc_watchdog); 1217 callout_stop(&sc->smc_mii_tick_ch); 1218 1219 /* 1220 * Mask all interrupts. 1221 */ 1222 smc_select_bank(sc, 2); 1223 sc->smc_mask = 0; 1224 smc_write_1(sc, MSK, 0); 1225 #ifdef DEVICE_POLLING 1226 ether_poll_deregister(sc->smc_ifp); 1227 sc->smc_ifp->if_capenable &= ~IFCAP_POLLING; 1228 sc->smc_ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT; 1229 #endif 1230 1231 /* 1232 * Disable transmit and receive. 1233 */ 1234 smc_select_bank(sc, 0); 1235 smc_write_2(sc, TCR, 0); 1236 smc_write_2(sc, RCR, 0); 1237 1238 sc->smc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1239 } 1240 1241 static void 1242 smc_watchdog(void *arg) 1243 { 1244 struct smc_softc *sc; 1245 1246 sc = (struct smc_softc *)arg; 1247 device_printf(sc->smc_dev, "watchdog timeout\n"); 1248 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); 1249 } 1250 1251 static void 1252 smc_init(void *context) 1253 { 1254 struct smc_softc *sc; 1255 1256 sc = (struct smc_softc *)context; 1257 SMC_LOCK(sc); 1258 smc_init_locked(sc); 1259 SMC_UNLOCK(sc); 1260 } 1261 1262 static void 1263 smc_init_locked(struct smc_softc *sc) 1264 { 1265 struct ifnet *ifp; 1266 1267 ifp = sc->smc_ifp; 1268 1269 SMC_ASSERT_LOCKED(sc); 1270 1271 smc_reset(sc); 1272 smc_enable(sc); 1273 1274 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1275 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1276 1277 smc_start_locked(ifp); 1278 1279 if (sc->smc_mii_tick != NULL) 1280 callout_reset(&sc->smc_mii_tick_ch, hz, sc->smc_mii_tick, sc); 1281 1282 #ifdef DEVICE_POLLING 1283 SMC_UNLOCK(sc); 1284 ether_poll_register(smc_poll, ifp); 1285 SMC_LOCK(sc); 1286 ifp->if_capenable |= IFCAP_POLLING; 1287 ifp->if_capenable |= IFCAP_POLLING_NOCOUNT; 1288 #endif 1289 } 1290 1291 static int 1292 smc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1293 { 1294 struct smc_softc *sc; 1295 int error; 1296 1297 sc = ifp->if_softc; 1298 error = 0; 1299 1300 switch (cmd) { 1301 case SIOCSIFFLAGS: 1302 if ((ifp->if_flags & IFF_UP) == 0 && 1303 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1304 SMC_LOCK(sc); 1305 smc_stop(sc); 1306 SMC_UNLOCK(sc); 1307 } else { 1308 smc_init(sc); 1309 if (sc->smc_mii_mediachg != NULL) 1310 sc->smc_mii_mediachg(sc); 1311 } 1312 break; 1313 1314 case SIOCADDMULTI: 1315 case SIOCDELMULTI: 1316 /* XXX 1317 SMC_LOCK(sc); 1318 smc_setmcast(sc); 1319 SMC_UNLOCK(sc); 1320 */ 1321 error = EINVAL; 1322 break; 1323 1324 case SIOCGIFMEDIA: 1325 case SIOCSIFMEDIA: 1326 if (sc->smc_mii_mediaioctl == NULL) { 1327 error = EINVAL; 1328 break; 1329 } 1330 sc->smc_mii_mediaioctl(sc, (struct ifreq *)data, cmd); 1331 break; 1332 1333 default: 1334 error = ether_ioctl(ifp, cmd, data); 1335 break; 1336 } 1337 1338 return (error); 1339 } 1340