1 /*- 2 * Copyright (c) 2008 Benno Rice. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25 #include <sys/cdefs.h> 26 __FBSDID("$FreeBSD$"); 27 28 /* 29 * Driver for SMSC LAN91C111, may work for older variants. 30 */ 31 32 #ifdef HAVE_KERNEL_OPTION_HEADERS 33 #include "opt_device_polling.h" 34 #endif 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/errno.h> 39 #include <sys/kernel.h> 40 #include <sys/sockio.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/queue.h> 44 #include <sys/socket.h> 45 #include <sys/syslog.h> 46 #include <sys/taskqueue.h> 47 48 #include <sys/module.h> 49 #include <sys/bus.h> 50 51 #include <machine/bus.h> 52 #include <machine/resource.h> 53 #include <sys/rman.h> 54 55 #include <net/ethernet.h> 56 #include <net/if.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_types.h> 60 #include <net/if_mib.h> 61 #include <net/if_media.h> 62 63 #ifdef INET 64 #include <netinet/in.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/in_var.h> 67 #include <netinet/ip.h> 68 #endif 69 70 #include <net/bpf.h> 71 #include <net/bpfdesc.h> 72 73 #include <dev/smc/if_smcreg.h> 74 #include <dev/smc/if_smcvar.h> 75 76 #include <dev/mii/mii.h> 77 #include <dev/mii/miivar.h> 78 79 #define SMC_LOCK(sc) mtx_lock(&(sc)->smc_mtx) 80 #define SMC_UNLOCK(sc) mtx_unlock(&(sc)->smc_mtx) 81 #define SMC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->smc_mtx, MA_OWNED) 82 83 #define SMC_INTR_PRIORITY 0 84 #define SMC_RX_PRIORITY 5 85 #define SMC_TX_PRIORITY 10 86 87 devclass_t smc_devclass; 88 89 static const char *smc_chip_ids[16] = { 90 NULL, NULL, NULL, 91 /* 3 */ "SMSC LAN91C90 or LAN91C92", 92 /* 4 */ "SMSC LAN91C94", 93 /* 5 */ "SMSC LAN91C95", 94 /* 6 */ "SMSC LAN91C96", 95 /* 7 */ "SMSC LAN91C100", 96 /* 8 */ "SMSC LAN91C100FD", 97 /* 9 */ "SMSC LAN91C110FD or LAN91C111FD", 98 NULL, NULL, NULL, 99 NULL, NULL, NULL 100 }; 101 102 static void smc_init(void *); 103 static void smc_start(struct ifnet *); 104 static void smc_stop(struct smc_softc *); 105 static int smc_ioctl(struct ifnet *, u_long, caddr_t); 106 107 static void smc_init_locked(struct smc_softc *); 108 static void smc_start_locked(struct ifnet *); 109 static void smc_reset(struct smc_softc *); 110 static int smc_mii_ifmedia_upd(struct ifnet *); 111 static void smc_mii_ifmedia_sts(struct ifnet *, struct ifmediareq *); 112 static void smc_mii_tick(void *); 113 static void smc_mii_mediachg(struct smc_softc *); 114 static int smc_mii_mediaioctl(struct smc_softc *, struct ifreq *, u_long); 115 116 static void smc_task_intr(void *, int); 117 static void smc_task_rx(void *, int); 118 static void smc_task_tx(void *, int); 119 120 static driver_filter_t smc_intr; 121 static timeout_t smc_watchdog; 122 #ifdef DEVICE_POLLING 123 static poll_handler_t smc_poll; 124 #endif 125 126 static __inline void 127 smc_select_bank(struct smc_softc *sc, uint16_t bank) 128 { 129 130 bus_write_2(sc->smc_reg, BSR, bank & BSR_BANK_MASK); 131 } 132 133 /* Never call this when not in bank 2. */ 134 static __inline void 135 smc_mmu_wait(struct smc_softc *sc) 136 { 137 138 KASSERT((bus_read_2(sc->smc_reg, BSR) & 139 BSR_BANK_MASK) == 2, ("%s: smc_mmu_wait called when not in bank 2", 140 device_get_nameunit(sc->smc_dev))); 141 while (bus_read_2(sc->smc_reg, MMUCR) & MMUCR_BUSY) 142 ; 143 } 144 145 static __inline uint8_t 146 smc_read_1(struct smc_softc *sc, bus_addr_t offset) 147 { 148 149 return (bus_read_1(sc->smc_reg, offset)); 150 } 151 152 static __inline void 153 smc_write_1(struct smc_softc *sc, bus_addr_t offset, uint8_t val) 154 { 155 156 bus_write_1(sc->smc_reg, offset, val); 157 } 158 159 static __inline uint16_t 160 smc_read_2(struct smc_softc *sc, bus_addr_t offset) 161 { 162 163 return (bus_read_2(sc->smc_reg, offset)); 164 } 165 166 static __inline void 167 smc_write_2(struct smc_softc *sc, bus_addr_t offset, uint16_t val) 168 { 169 170 bus_write_2(sc->smc_reg, offset, val); 171 } 172 173 static __inline void 174 smc_read_multi_2(struct smc_softc *sc, bus_addr_t offset, uint16_t *datap, 175 bus_size_t count) 176 { 177 178 bus_read_multi_2(sc->smc_reg, offset, datap, count); 179 } 180 181 static __inline void 182 smc_write_multi_2(struct smc_softc *sc, bus_addr_t offset, uint16_t *datap, 183 bus_size_t count) 184 { 185 186 bus_write_multi_2(sc->smc_reg, offset, datap, count); 187 } 188 189 int 190 smc_probe(device_t dev) 191 { 192 int rid, type, error; 193 uint16_t val; 194 struct smc_softc *sc; 195 struct resource *reg; 196 197 sc = device_get_softc(dev); 198 rid = 0; 199 type = SYS_RES_IOPORT; 200 error = 0; 201 202 if (sc->smc_usemem) 203 type = SYS_RES_MEMORY; 204 205 reg = bus_alloc_resource(dev, type, &rid, 0, ~0, 16, RF_ACTIVE); 206 if (reg == NULL) { 207 if (bootverbose) 208 device_printf(dev, 209 "could not allocate I/O resource for probe\n"); 210 return (ENXIO); 211 } 212 213 /* Check for the identification value in the BSR. */ 214 val = bus_read_2(reg, BSR); 215 if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { 216 if (bootverbose) 217 device_printf(dev, "identification value not in BSR\n"); 218 error = ENXIO; 219 goto done; 220 } 221 222 /* 223 * Try switching banks and make sure we still get the identification 224 * value. 225 */ 226 bus_write_2(reg, BSR, 0); 227 val = bus_read_2(reg, BSR); 228 if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { 229 if (bootverbose) 230 device_printf(dev, 231 "identification value not in BSR after write\n"); 232 error = ENXIO; 233 goto done; 234 } 235 236 #if 0 237 /* Check the BAR. */ 238 bus_write_2(reg, BSR, 1); 239 val = bus_read_2(reg, BAR); 240 val = BAR_ADDRESS(val); 241 if (rman_get_start(reg) != val) { 242 if (bootverbose) 243 device_printf(dev, "BAR address %x does not match " 244 "I/O resource address %lx\n", val, 245 rman_get_start(reg)); 246 error = ENXIO; 247 goto done; 248 } 249 #endif 250 251 /* Compare REV against known chip revisions. */ 252 bus_write_2(reg, BSR, 3); 253 val = bus_read_2(reg, REV); 254 val = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; 255 if (smc_chip_ids[val] == NULL) { 256 if (bootverbose) 257 device_printf(dev, "Unknown chip revision: %d\n", val); 258 error = ENXIO; 259 goto done; 260 } 261 262 device_set_desc(dev, smc_chip_ids[val]); 263 264 done: 265 bus_release_resource(dev, type, rid, reg); 266 return (error); 267 } 268 269 int 270 smc_attach(device_t dev) 271 { 272 int type, error; 273 uint16_t val; 274 u_char eaddr[ETHER_ADDR_LEN]; 275 struct smc_softc *sc; 276 struct ifnet *ifp; 277 278 sc = device_get_softc(dev); 279 error = 0; 280 281 sc->smc_dev = dev; 282 283 ifp = sc->smc_ifp = if_alloc(IFT_ETHER); 284 if (ifp == NULL) { 285 error = ENOSPC; 286 goto done; 287 } 288 289 mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 290 291 /* Set up watchdog callout. */ 292 callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0); 293 294 type = SYS_RES_IOPORT; 295 if (sc->smc_usemem) 296 type = SYS_RES_MEMORY; 297 298 sc->smc_reg_rid = 0; 299 sc->smc_reg = bus_alloc_resource(dev, type, &sc->smc_reg_rid, 0, ~0, 300 16, RF_ACTIVE); 301 if (sc->smc_reg == NULL) { 302 error = ENXIO; 303 goto done; 304 } 305 306 sc->smc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 0, 307 ~0, 1, RF_ACTIVE | RF_SHAREABLE); 308 if (sc->smc_irq == NULL) { 309 error = ENXIO; 310 goto done; 311 } 312 313 SMC_LOCK(sc); 314 smc_reset(sc); 315 SMC_UNLOCK(sc); 316 317 smc_select_bank(sc, 3); 318 val = smc_read_2(sc, REV); 319 sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; 320 sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT; 321 if (bootverbose) 322 device_printf(dev, "revision %x\n", sc->smc_rev); 323 324 callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx, 325 CALLOUT_RETURNUNLOCKED); 326 if (sc->smc_chip >= REV_CHIP_91110FD) { 327 mii_phy_probe(dev, &sc->smc_miibus, smc_mii_ifmedia_upd, 328 smc_mii_ifmedia_sts); 329 if (sc->smc_miibus != NULL) { 330 sc->smc_mii_tick = smc_mii_tick; 331 sc->smc_mii_mediachg = smc_mii_mediachg; 332 sc->smc_mii_mediaioctl = smc_mii_mediaioctl; 333 } 334 } 335 336 smc_select_bank(sc, 1); 337 eaddr[0] = smc_read_1(sc, IAR0); 338 eaddr[1] = smc_read_1(sc, IAR1); 339 eaddr[2] = smc_read_1(sc, IAR2); 340 eaddr[3] = smc_read_1(sc, IAR3); 341 eaddr[4] = smc_read_1(sc, IAR4); 342 eaddr[5] = smc_read_1(sc, IAR5); 343 344 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 345 ifp->if_softc = sc; 346 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 347 ifp->if_init = smc_init; 348 ifp->if_ioctl = smc_ioctl; 349 ifp->if_start = smc_start; 350 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 351 IFQ_SET_READY(&ifp->if_snd); 352 353 ifp->if_capabilities = ifp->if_capenable = 0; 354 355 #ifdef DEVICE_POLLING 356 ifp->if_capabilities |= IFCAP_POLLING; 357 #endif 358 359 ether_ifattach(ifp, eaddr); 360 361 /* Set up taskqueue */ 362 TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp); 363 TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); 364 TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp); 365 sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT, 366 taskqueue_thread_enqueue, &sc->smc_tq); 367 taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq", 368 device_get_nameunit(sc->smc_dev)); 369 370 /* Mask all interrupts. */ 371 sc->smc_mask = 0; 372 smc_write_1(sc, MSK, 0); 373 374 /* Wire up interrupt */ 375 error = bus_setup_intr(dev, sc->smc_irq, 376 INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih); 377 if (error != 0) 378 goto done; 379 380 done: 381 if (error != 0) 382 smc_detach(dev); 383 return (error); 384 } 385 386 int 387 smc_detach(device_t dev) 388 { 389 int type; 390 struct smc_softc *sc; 391 392 sc = device_get_softc(dev); 393 SMC_LOCK(sc); 394 smc_stop(sc); 395 SMC_UNLOCK(sc); 396 397 if (sc->smc_ifp != NULL) { 398 ether_ifdetach(sc->smc_ifp); 399 } 400 401 callout_drain(&sc->smc_watchdog); 402 callout_drain(&sc->smc_mii_tick_ch); 403 404 #ifdef DEVICE_POLLING 405 if (sc->smc_ifp->if_capenable & IFCAP_POLLING) 406 ether_poll_deregister(sc->smc_ifp); 407 #endif 408 409 if (sc->smc_ih != NULL) 410 bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih); 411 412 if (sc->smc_tq != NULL) { 413 taskqueue_drain(sc->smc_tq, &sc->smc_intr); 414 taskqueue_drain(sc->smc_tq, &sc->smc_rx); 415 taskqueue_drain(sc->smc_tq, &sc->smc_tx); 416 taskqueue_free(sc->smc_tq); 417 sc->smc_tq = NULL; 418 } 419 420 if (sc->smc_ifp != NULL) { 421 if_free(sc->smc_ifp); 422 } 423 424 if (sc->smc_miibus != NULL) { 425 device_delete_child(sc->smc_dev, sc->smc_miibus); 426 bus_generic_detach(sc->smc_dev); 427 } 428 429 if (sc->smc_reg != NULL) { 430 type = SYS_RES_IOPORT; 431 if (sc->smc_usemem) 432 type = SYS_RES_MEMORY; 433 434 bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid, 435 sc->smc_reg); 436 } 437 438 if (sc->smc_irq != NULL) 439 bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid, 440 sc->smc_irq); 441 442 if (mtx_initialized(&sc->smc_mtx)) 443 mtx_destroy(&sc->smc_mtx); 444 445 return (0); 446 } 447 448 static void 449 smc_start(struct ifnet *ifp) 450 { 451 struct smc_softc *sc; 452 453 sc = ifp->if_softc; 454 SMC_LOCK(sc); 455 smc_start_locked(ifp); 456 SMC_UNLOCK(sc); 457 } 458 459 static void 460 smc_start_locked(struct ifnet *ifp) 461 { 462 struct smc_softc *sc; 463 struct mbuf *m; 464 u_int len, npages, spin_count; 465 466 sc = ifp->if_softc; 467 SMC_ASSERT_LOCKED(sc); 468 469 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 470 return; 471 if (IFQ_IS_EMPTY(&ifp->if_snd)) 472 return; 473 474 /* 475 * Grab the next packet. If it's too big, drop it. 476 */ 477 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 478 len = m_length(m, NULL); 479 len += (len & 1); 480 if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) { 481 if_printf(ifp, "large packet discarded\n"); 482 ++ifp->if_oerrors; 483 m_freem(m); 484 return; /* XXX readcheck? */ 485 } 486 487 /* 488 * Flag that we're busy. 489 */ 490 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 491 sc->smc_pending = m; 492 493 /* 494 * Work out how many 256 byte "pages" we need. We have to include the 495 * control data for the packet in this calculation. 496 */ 497 npages = (len * PKT_CTRL_DATA_LEN) >> 8; 498 if (npages == 0) 499 npages = 1; 500 501 /* 502 * Request memory. 503 */ 504 smc_select_bank(sc, 2); 505 smc_mmu_wait(sc); 506 smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages); 507 508 /* 509 * Spin briefly to see if the allocation succeeds. 510 */ 511 spin_count = TX_ALLOC_WAIT_TIME; 512 do { 513 if (smc_read_1(sc, IST) & ALLOC_INT) { 514 smc_write_1(sc, ACK, ALLOC_INT); 515 break; 516 } 517 } while (--spin_count); 518 519 /* 520 * If the allocation is taking too long, unmask the alloc interrupt 521 * and wait. 522 */ 523 if (spin_count == 0) { 524 sc->smc_mask |= ALLOC_INT; 525 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 526 smc_write_1(sc, MSK, sc->smc_mask); 527 return; 528 } 529 530 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); 531 } 532 533 static void 534 smc_task_tx(void *context, int pending) 535 { 536 struct ifnet *ifp; 537 struct smc_softc *sc; 538 struct mbuf *m, *m0; 539 u_int packet, len; 540 uint8_t *data; 541 542 (void)pending; 543 ifp = (struct ifnet *)context; 544 sc = ifp->if_softc; 545 546 SMC_LOCK(sc); 547 548 if (sc->smc_pending == NULL) { 549 SMC_UNLOCK(sc); 550 goto next_packet; 551 } 552 553 m = m0 = sc->smc_pending; 554 sc->smc_pending = NULL; 555 smc_select_bank(sc, 2); 556 557 /* 558 * Check the allocation result. 559 */ 560 packet = smc_read_1(sc, ARR); 561 562 /* 563 * If the allocation failed, requeue the packet and retry. 564 */ 565 if (packet & ARR_FAILED) { 566 IFQ_DRV_PREPEND(&ifp->if_snd, m); 567 ++ifp->if_oerrors; 568 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 569 smc_start_locked(ifp); 570 SMC_UNLOCK(sc); 571 return; 572 } 573 574 /* 575 * Tell the device to write to our packet number. 576 */ 577 smc_write_1(sc, PNR, packet); 578 smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR); 579 580 /* 581 * Tell the device how long the packet is (including control data). 582 */ 583 len = m_length(m, 0); 584 len += PKT_CTRL_DATA_LEN; 585 smc_write_2(sc, DATA0, 0); 586 smc_write_2(sc, DATA0, len); 587 588 /* 589 * Push the data out to the device. 590 */ 591 data = NULL; 592 for (; m != NULL; m = m->m_next) { 593 data = mtod(m, uint8_t *); 594 smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2); 595 } 596 597 /* 598 * Push out the control byte and and the odd byte if needed. 599 */ 600 if ((len & 1) != 0 && data != NULL) 601 smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[m->m_len - 1]); 602 else 603 smc_write_2(sc, DATA0, 0); 604 605 /* 606 * Unmask the TX empty interrupt. 607 */ 608 sc->smc_mask |= TX_EMPTY_INT; 609 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 610 smc_write_1(sc, MSK, sc->smc_mask); 611 612 /* 613 * Enqueue the packet. 614 */ 615 smc_mmu_wait(sc); 616 smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE); 617 callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc); 618 619 /* 620 * Finish up. 621 */ 622 ifp->if_opackets++; 623 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 624 SMC_UNLOCK(sc); 625 BPF_MTAP(ifp, m0); 626 m_freem(m0); 627 628 next_packet: 629 /* 630 * See if there's anything else to do. 631 */ 632 smc_start(ifp); 633 } 634 635 static void 636 smc_task_rx(void *context, int pending) 637 { 638 u_int packet, status, len; 639 uint8_t *data; 640 struct ifnet *ifp; 641 struct smc_softc *sc; 642 struct mbuf *m, *mhead, *mtail; 643 644 (void)pending; 645 ifp = (struct ifnet *)context; 646 sc = ifp->if_softc; 647 mhead = mtail = NULL; 648 649 SMC_LOCK(sc); 650 651 packet = smc_read_1(sc, FIFO_RX); 652 while ((packet & FIFO_EMPTY) == 0) { 653 /* 654 * Grab an mbuf and attach a cluster. 655 */ 656 MGETHDR(m, M_DONTWAIT, MT_DATA); 657 if (m == NULL) { 658 break; 659 } 660 MCLGET(m, M_DONTWAIT); 661 if ((m->m_flags & M_EXT) == 0) { 662 m_freem(m); 663 break; 664 } 665 666 /* 667 * Point to the start of the packet. 668 */ 669 smc_select_bank(sc, 2); 670 smc_write_1(sc, PNR, packet); 671 smc_write_2(sc, PTR, 0 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); 672 673 /* 674 * Grab status and packet length. 675 */ 676 status = smc_read_2(sc, DATA0); 677 len = smc_read_2(sc, DATA0) & RX_LEN_MASK; 678 len -= 6; 679 if (status & RX_ODDFRM) 680 len += 1; 681 682 /* 683 * Check for errors. 684 */ 685 if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALGNERR)) { 686 smc_mmu_wait(sc); 687 smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); 688 ifp->if_ierrors++; 689 m_freem(m); 690 break; 691 } 692 693 /* 694 * Set the mbuf up the way we want it. 695 */ 696 m->m_pkthdr.rcvif = ifp; 697 m->m_pkthdr.len = m->m_len = len + 2; /* XXX: Is this right? */ 698 m_adj(m, ETHER_ALIGN); 699 700 /* 701 * Pull the packet out of the device. Make sure we're in the 702 * right bank first as things may have changed while we were 703 * allocating our mbuf. 704 */ 705 smc_select_bank(sc, 2); 706 smc_write_1(sc, PNR, packet); 707 smc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); 708 data = mtod(m, uint8_t *); 709 smc_read_multi_2(sc, DATA0, (uint16_t *)data, len >> 1); 710 if (len & 1) { 711 data += len & ~1; 712 *data = smc_read_1(sc, DATA0); 713 } 714 715 /* 716 * Tell the device we're done. 717 */ 718 smc_mmu_wait(sc); 719 smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); 720 if (m == NULL) { 721 break; 722 } 723 724 if (mhead == NULL) { 725 mhead = mtail = m; 726 m->m_next = NULL; 727 } else { 728 mtail->m_next = m; 729 mtail = m; 730 } 731 packet = smc_read_1(sc, FIFO_RX); 732 } 733 734 sc->smc_mask |= RCV_INT; 735 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 736 smc_write_1(sc, MSK, sc->smc_mask); 737 738 SMC_UNLOCK(sc); 739 740 while (mhead != NULL) { 741 m = mhead; 742 mhead = mhead->m_next; 743 m->m_next = NULL; 744 ifp->if_ipackets++; 745 (*ifp->if_input)(ifp, m); 746 } 747 } 748 749 #ifdef DEVICE_POLLING 750 static void 751 smc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 752 { 753 struct smc_softc *sc; 754 755 sc = ifp->if_softc; 756 757 SMC_LOCK(sc); 758 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 759 SMC_UNLOCK(sc); 760 return; 761 } 762 SMC_UNLOCK(sc); 763 764 if (cmd == POLL_AND_CHECK_STATUS) 765 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); 766 } 767 #endif 768 769 static int 770 smc_intr(void *context) 771 { 772 struct smc_softc *sc; 773 774 sc = (struct smc_softc *)context; 775 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); 776 return (FILTER_HANDLED); 777 } 778 779 static void 780 smc_task_intr(void *context, int pending) 781 { 782 struct smc_softc *sc; 783 struct ifnet *ifp; 784 u_int status, packet, counter, tcr; 785 786 (void)pending; 787 ifp = (struct ifnet *)context; 788 sc = ifp->if_softc; 789 790 SMC_LOCK(sc); 791 792 smc_select_bank(sc, 2); 793 794 /* 795 * Get the current mask, and then block all interrupts while we're 796 * working. 797 */ 798 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 799 smc_write_1(sc, MSK, 0); 800 801 /* 802 * Find out what interrupts are flagged. 803 */ 804 status = smc_read_1(sc, IST) & sc->smc_mask; 805 806 /* 807 * Transmit error 808 */ 809 if (status & TX_INT) { 810 /* 811 * Kill off the packet if there is one and re-enable transmit. 812 */ 813 packet = smc_read_1(sc, FIFO_TX); 814 if ((packet & FIFO_EMPTY) == 0) { 815 smc_write_1(sc, PNR, packet); 816 smc_write_2(sc, PTR, 0 | PTR_READ | 817 PTR_AUTO_INCR); 818 tcr = smc_read_2(sc, DATA0); 819 if ((tcr & EPHSR_TX_SUC) == 0) 820 device_printf(sc->smc_dev, 821 "bad packet\n"); 822 smc_mmu_wait(sc); 823 smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE_PKT); 824 825 smc_select_bank(sc, 0); 826 tcr = smc_read_2(sc, TCR); 827 tcr |= TCR_TXENA | TCR_PAD_EN; 828 smc_write_2(sc, TCR, tcr); 829 smc_select_bank(sc, 2); 830 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); 831 } 832 833 /* 834 * Ack the interrupt. 835 */ 836 smc_write_1(sc, ACK, TX_INT); 837 } 838 839 /* 840 * Receive 841 */ 842 if (status & RCV_INT) { 843 smc_write_1(sc, ACK, RCV_INT); 844 sc->smc_mask &= ~RCV_INT; 845 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_rx); 846 } 847 848 /* 849 * Allocation 850 */ 851 if (status & ALLOC_INT) { 852 smc_write_1(sc, ACK, ALLOC_INT); 853 sc->smc_mask &= ~ALLOC_INT; 854 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); 855 } 856 857 /* 858 * Receive overrun 859 */ 860 if (status & RX_OVRN_INT) { 861 smc_write_1(sc, ACK, RX_OVRN_INT); 862 ifp->if_ierrors++; 863 } 864 865 /* 866 * Transmit empty 867 */ 868 if (status & TX_EMPTY_INT) { 869 smc_write_1(sc, ACK, TX_EMPTY_INT); 870 sc->smc_mask &= ~TX_EMPTY_INT; 871 callout_stop(&sc->smc_watchdog); 872 873 /* 874 * Update collision stats. 875 */ 876 smc_select_bank(sc, 0); 877 counter = smc_read_2(sc, ECR); 878 smc_select_bank(sc, 2); 879 ifp->if_collisions += 880 (counter & ECR_SNGLCOL_MASK) >> ECR_SNGLCOL_SHIFT; 881 ifp->if_collisions += 882 (counter & ECR_MULCOL_MASK) >> ECR_MULCOL_SHIFT; 883 884 /* 885 * See if there are any packets to transmit. 886 */ 887 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); 888 } 889 890 /* 891 * Update the interrupt mask. 892 */ 893 if ((ifp->if_capenable & IFCAP_POLLING) == 0) 894 smc_write_1(sc, MSK, sc->smc_mask); 895 896 SMC_UNLOCK(sc); 897 } 898 899 static u_int 900 smc_mii_readbits(struct smc_softc *sc, int nbits) 901 { 902 u_int mgmt, mask, val; 903 904 SMC_ASSERT_LOCKED(sc); 905 KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, 906 ("%s: smc_mii_readbits called with bank %d (!= 3)", 907 device_get_nameunit(sc->smc_dev), 908 smc_read_2(sc, BSR) & BSR_BANK_MASK)); 909 910 /* 911 * Set up the MGMT (aka MII) register. 912 */ 913 mgmt = smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO); 914 smc_write_2(sc, MGMT, mgmt); 915 916 /* 917 * Read the bits in. 918 */ 919 for (mask = 1 << (nbits - 1), val = 0; mask; mask >>= 1) { 920 if (smc_read_2(sc, MGMT) & MGMT_MDI) 921 val |= mask; 922 923 smc_write_2(sc, MGMT, mgmt); 924 DELAY(1); 925 smc_write_2(sc, MGMT, mgmt | MGMT_MCLK); 926 DELAY(1); 927 } 928 929 return (val); 930 } 931 932 static void 933 smc_mii_writebits(struct smc_softc *sc, u_int val, int nbits) 934 { 935 u_int mgmt, mask; 936 937 SMC_ASSERT_LOCKED(sc); 938 KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, 939 ("%s: smc_mii_writebits called with bank %d (!= 3)", 940 device_get_nameunit(sc->smc_dev), 941 smc_read_2(sc, BSR) & BSR_BANK_MASK)); 942 943 /* 944 * Set up the MGMT (aka MII) register). 945 */ 946 mgmt = smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO); 947 mgmt |= MGMT_MDOE; 948 949 /* 950 * Push the bits out. 951 */ 952 for (mask = 1 << (nbits - 1); mask; mask >>= 1) { 953 if (val & mask) 954 mgmt |= MGMT_MDO; 955 else 956 mgmt &= ~MGMT_MDO; 957 958 smc_write_2(sc, MGMT, mgmt); 959 DELAY(1); 960 smc_write_2(sc, MGMT, mgmt | MGMT_MCLK); 961 DELAY(1); 962 } 963 } 964 965 int 966 smc_miibus_readreg(device_t dev, int phy, int reg) 967 { 968 struct smc_softc *sc; 969 int val; 970 971 sc = device_get_softc(dev); 972 973 SMC_LOCK(sc); 974 975 smc_select_bank(sc, 3); 976 977 /* 978 * Send out the idle pattern. 979 */ 980 smc_mii_writebits(sc, 0xffffffff, 32); 981 982 /* 983 * Start code + read opcode + phy address + phy register 984 */ 985 smc_mii_writebits(sc, 6 << 10 | phy << 5 | reg, 14); 986 987 /* 988 * Turnaround + data 989 */ 990 val = smc_mii_readbits(sc, 18); 991 992 /* 993 * Reset the MDIO interface. 994 */ 995 smc_write_2(sc, MGMT, 996 smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO)); 997 998 SMC_UNLOCK(sc); 999 return (val); 1000 } 1001 1002 void 1003 smc_miibus_writereg(device_t dev, int phy, int reg, int data) 1004 { 1005 struct smc_softc *sc; 1006 1007 sc = device_get_softc(dev); 1008 1009 SMC_LOCK(sc); 1010 1011 smc_select_bank(sc, 3); 1012 1013 /* 1014 * Send idle pattern. 1015 */ 1016 smc_mii_writebits(sc, 0xffffffff, 32); 1017 1018 /* 1019 * Start code + write opcode + phy address + phy register + turnaround 1020 * + data. 1021 */ 1022 smc_mii_writebits(sc, 5 << 28 | phy << 23 | reg << 18 | 2 << 16 | data, 1023 32); 1024 1025 /* 1026 * Reset MDIO interface. 1027 */ 1028 smc_write_2(sc, MGMT, 1029 smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO)); 1030 1031 SMC_UNLOCK(sc); 1032 } 1033 1034 void 1035 smc_miibus_statchg(device_t dev) 1036 { 1037 struct smc_softc *sc; 1038 struct mii_data *mii; 1039 uint16_t tcr; 1040 1041 sc = device_get_softc(dev); 1042 mii = device_get_softc(sc->smc_miibus); 1043 1044 SMC_LOCK(sc); 1045 1046 smc_select_bank(sc, 0); 1047 tcr = smc_read_2(sc, TCR); 1048 1049 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 1050 tcr |= TCR_SWFDUP; 1051 else 1052 tcr &= ~TCR_SWFDUP; 1053 1054 smc_write_2(sc, TCR, tcr); 1055 1056 SMC_UNLOCK(sc); 1057 } 1058 1059 static int 1060 smc_mii_ifmedia_upd(struct ifnet *ifp) 1061 { 1062 struct smc_softc *sc; 1063 struct mii_data *mii; 1064 1065 sc = ifp->if_softc; 1066 if (sc->smc_miibus == NULL) 1067 return (ENXIO); 1068 1069 mii = device_get_softc(sc->smc_miibus); 1070 return (mii_mediachg(mii)); 1071 } 1072 1073 static void 1074 smc_mii_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1075 { 1076 struct smc_softc *sc; 1077 struct mii_data *mii; 1078 1079 sc = ifp->if_softc; 1080 if (sc->smc_miibus == NULL) 1081 return; 1082 1083 mii = device_get_softc(sc->smc_miibus); 1084 mii_pollstat(mii); 1085 ifmr->ifm_active = mii->mii_media_active; 1086 ifmr->ifm_status = mii->mii_media_status; 1087 } 1088 1089 static void 1090 smc_mii_tick(void *context) 1091 { 1092 struct smc_softc *sc; 1093 1094 sc = (struct smc_softc *)context; 1095 1096 if (sc->smc_miibus == NULL) 1097 return; 1098 1099 SMC_UNLOCK(sc); 1100 1101 mii_tick(device_get_softc(sc->smc_miibus)); 1102 callout_reset(&sc->smc_mii_tick_ch, hz, smc_mii_tick, sc); 1103 } 1104 1105 static void 1106 smc_mii_mediachg(struct smc_softc *sc) 1107 { 1108 1109 if (sc->smc_miibus == NULL) 1110 return; 1111 mii_mediachg(device_get_softc(sc->smc_miibus)); 1112 } 1113 1114 static int 1115 smc_mii_mediaioctl(struct smc_softc *sc, struct ifreq *ifr, u_long command) 1116 { 1117 struct mii_data *mii; 1118 1119 if (sc->smc_miibus == NULL) 1120 return (EINVAL); 1121 1122 mii = device_get_softc(sc->smc_miibus); 1123 return (ifmedia_ioctl(sc->smc_ifp, ifr, &mii->mii_media, command)); 1124 } 1125 1126 static void 1127 smc_reset(struct smc_softc *sc) 1128 { 1129 u_int ctr; 1130 1131 SMC_ASSERT_LOCKED(sc); 1132 1133 smc_select_bank(sc, 2); 1134 1135 /* 1136 * Mask all interrupts. 1137 */ 1138 smc_write_1(sc, MSK, 0); 1139 1140 /* 1141 * Tell the device to reset. 1142 */ 1143 smc_select_bank(sc, 0); 1144 smc_write_2(sc, RCR, RCR_SOFT_RST); 1145 1146 /* 1147 * Set up the configuration register. 1148 */ 1149 smc_select_bank(sc, 1); 1150 smc_write_2(sc, CR, CR_EPH_POWER_EN); 1151 DELAY(1); 1152 1153 /* 1154 * Turn off transmit and receive. 1155 */ 1156 smc_select_bank(sc, 0); 1157 smc_write_2(sc, TCR, 0); 1158 smc_write_2(sc, RCR, 0); 1159 1160 /* 1161 * Set up the control register. 1162 */ 1163 smc_select_bank(sc, 1); 1164 ctr = smc_read_2(sc, CTR); 1165 ctr |= CTR_LE_ENABLE | CTR_AUTO_RELEASE; 1166 smc_write_2(sc, CTR, ctr); 1167 1168 /* 1169 * Reset the MMU. 1170 */ 1171 smc_select_bank(sc, 2); 1172 smc_mmu_wait(sc); 1173 smc_write_2(sc, MMUCR, MMUCR_CMD_MMU_RESET); 1174 } 1175 1176 static void 1177 smc_enable(struct smc_softc *sc) 1178 { 1179 struct ifnet *ifp; 1180 1181 SMC_ASSERT_LOCKED(sc); 1182 ifp = sc->smc_ifp; 1183 1184 /* 1185 * Set up the receive/PHY control register. 1186 */ 1187 smc_select_bank(sc, 0); 1188 smc_write_2(sc, RPCR, RPCR_ANEG | (RPCR_LED_LINK_ANY << RPCR_LSA_SHIFT) 1189 | (RPCR_LED_ACT_ANY << RPCR_LSB_SHIFT)); 1190 1191 /* 1192 * Set up the transmit and receive control registers. 1193 */ 1194 smc_write_2(sc, TCR, TCR_TXENA | TCR_PAD_EN); 1195 smc_write_2(sc, RCR, RCR_RXEN | RCR_STRIP_CRC); 1196 1197 /* 1198 * Set up the interrupt mask. 1199 */ 1200 smc_select_bank(sc, 2); 1201 sc->smc_mask = EPH_INT | RX_OVRN_INT | RCV_INT | TX_INT; 1202 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1203 smc_write_1(sc, MSK, sc->smc_mask); 1204 } 1205 1206 static void 1207 smc_stop(struct smc_softc *sc) 1208 { 1209 1210 SMC_ASSERT_LOCKED(sc); 1211 1212 /* 1213 * Turn off callouts. 1214 */ 1215 callout_stop(&sc->smc_watchdog); 1216 callout_stop(&sc->smc_mii_tick_ch); 1217 1218 /* 1219 * Mask all interrupts. 1220 */ 1221 smc_select_bank(sc, 2); 1222 sc->smc_mask = 0; 1223 smc_write_1(sc, MSK, 0); 1224 #ifdef DEVICE_POLLING 1225 ether_poll_deregister(sc->smc_ifp); 1226 sc->smc_ifp->if_capenable &= ~IFCAP_POLLING; 1227 #endif 1228 1229 /* 1230 * Disable transmit and receive. 1231 */ 1232 smc_select_bank(sc, 0); 1233 smc_write_2(sc, TCR, 0); 1234 smc_write_2(sc, RCR, 0); 1235 1236 sc->smc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1237 } 1238 1239 static void 1240 smc_watchdog(void *arg) 1241 { 1242 struct smc_softc *sc; 1243 1244 sc = (struct smc_softc *)arg; 1245 device_printf(sc->smc_dev, "watchdog timeout\n"); 1246 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); 1247 } 1248 1249 static void 1250 smc_init(void *context) 1251 { 1252 struct smc_softc *sc; 1253 1254 sc = (struct smc_softc *)context; 1255 SMC_LOCK(sc); 1256 smc_init_locked(sc); 1257 SMC_UNLOCK(sc); 1258 } 1259 1260 static void 1261 smc_init_locked(struct smc_softc *sc) 1262 { 1263 struct ifnet *ifp; 1264 1265 ifp = sc->smc_ifp; 1266 1267 SMC_ASSERT_LOCKED(sc); 1268 1269 smc_reset(sc); 1270 smc_enable(sc); 1271 1272 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1273 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1274 1275 smc_start_locked(ifp); 1276 1277 if (sc->smc_mii_tick != NULL) 1278 callout_reset(&sc->smc_mii_tick_ch, hz, sc->smc_mii_tick, sc); 1279 1280 #ifdef DEVICE_POLLING 1281 SMC_UNLOCK(sc); 1282 ether_poll_register(smc_poll, ifp); 1283 SMC_LOCK(sc); 1284 ifp->if_capenable |= IFCAP_POLLING; 1285 #endif 1286 } 1287 1288 static int 1289 smc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1290 { 1291 struct smc_softc *sc; 1292 int error; 1293 1294 sc = ifp->if_softc; 1295 error = 0; 1296 1297 switch (cmd) { 1298 case SIOCSIFFLAGS: 1299 if ((ifp->if_flags & IFF_UP) == 0 && 1300 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1301 SMC_LOCK(sc); 1302 smc_stop(sc); 1303 SMC_UNLOCK(sc); 1304 } else { 1305 smc_init(sc); 1306 if (sc->smc_mii_mediachg != NULL) 1307 sc->smc_mii_mediachg(sc); 1308 } 1309 break; 1310 1311 case SIOCADDMULTI: 1312 case SIOCDELMULTI: 1313 /* XXX 1314 SMC_LOCK(sc); 1315 smc_setmcast(sc); 1316 SMC_UNLOCK(sc); 1317 */ 1318 error = EINVAL; 1319 break; 1320 1321 case SIOCGIFMEDIA: 1322 case SIOCSIFMEDIA: 1323 if (sc->smc_mii_mediaioctl == NULL) { 1324 error = EINVAL; 1325 break; 1326 } 1327 sc->smc_mii_mediaioctl(sc, (struct ifreq *)data, cmd); 1328 break; 1329 1330 default: 1331 error = ether_ioctl(ifp, cmd, data); 1332 break; 1333 } 1334 1335 return (error); 1336 } 1337