1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37 #if 0 38 #define GEM_DEBUG 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/bus.h> 44 #include <sys/callout.h> 45 #include <sys/endian.h> 46 #include <sys/mbuf.h> 47 #include <sys/malloc.h> 48 #include <sys/kernel.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 52 #include <net/bpf.h> 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_arp.h> 56 #include <net/if_dl.h> 57 #include <net/if_media.h> 58 59 #include <machine/bus.h> 60 61 #include <dev/mii/mii.h> 62 #include <dev/mii/miivar.h> 63 64 #include <dev/gem/if_gemreg.h> 65 #include <dev/gem/if_gemvar.h> 66 67 #define TRIES 10000 68 69 static void gem_start(struct ifnet *); 70 static void gem_stop(struct ifnet *, int); 71 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 72 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 73 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, 74 bus_size_t, int); 75 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, 76 bus_size_t, int); 77 static void gem_tick(void *); 78 static void gem_watchdog(struct ifnet *); 79 static void gem_init(void *); 80 static void gem_init_regs(struct gem_softc *sc); 81 static int gem_ringsize(int sz); 82 static int gem_meminit(struct gem_softc *); 83 static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); 84 static void gem_mifinit(struct gem_softc *); 85 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 86 u_int32_t clr, u_int32_t set); 87 static int gem_reset_rx(struct gem_softc *); 88 static int gem_reset_tx(struct gem_softc *); 89 static int gem_disable_rx(struct gem_softc *); 90 static int gem_disable_tx(struct gem_softc *); 91 static void gem_rxdrain(struct gem_softc *); 92 static int gem_add_rxbuf(struct gem_softc *, int); 93 static void gem_setladrf(struct gem_softc *); 94 95 struct mbuf *gem_get(struct gem_softc *, int, int); 96 static void gem_eint(struct gem_softc *, u_int); 97 static void gem_rint(struct gem_softc *); 98 #if 0 99 static void gem_rint_timeout(void *); 100 #endif 101 static void gem_tint(struct gem_softc *); 102 #ifdef notyet 103 static void gem_power(int, void *); 104 #endif 105 106 devclass_t gem_devclass; 107 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 108 MODULE_DEPEND(gem, miibus, 1, 1, 1); 109 110 #ifdef GEM_DEBUG 111 #include <sys/ktr.h> 112 #define KTR_GEM KTR_CT2 113 #endif 114 115 #define GEM_NSEGS GEM_NTXDESC 116 117 /* 118 * gem_attach: 119 * 120 * Attach a Gem interface to the system. 121 */ 122 int 123 gem_attach(sc) 124 struct gem_softc *sc; 125 { 126 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 127 struct mii_softc *child; 128 int i, error; 129 u_int32_t v; 130 131 /* Make sure the chip is stopped. */ 132 ifp->if_softc = sc; 133 gem_reset(sc); 134 135 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 136 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 137 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); 138 if (error) 139 return (error); 140 141 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 142 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 143 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, 144 &sc->sc_rdmatag); 145 if (error) 146 goto fail_ptag; 147 148 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 149 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 150 GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, 151 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 152 if (error) 153 goto fail_rtag; 154 155 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 156 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 157 sizeof(struct gem_control_data), 1, 158 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 159 busdma_lock_mutex, &Giant, &sc->sc_cdmatag); 160 if (error) 161 goto fail_ttag; 162 163 /* 164 * Allocate the control data structures, and create and load the 165 * DMA map for it. 166 */ 167 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 168 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 169 device_printf(sc->sc_dev, "unable to allocate control data," 170 " error = %d\n", error); 171 goto fail_ctag; 172 } 173 174 sc->sc_cddma = 0; 175 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 176 sc->sc_control_data, sizeof(struct gem_control_data), 177 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 178 device_printf(sc->sc_dev, "unable to load control data DMA " 179 "map, error = %d\n", error); 180 goto fail_cmem; 181 } 182 183 /* 184 * Initialize the transmit job descriptors. 185 */ 186 STAILQ_INIT(&sc->sc_txfreeq); 187 STAILQ_INIT(&sc->sc_txdirtyq); 188 189 /* 190 * Create the transmit buffer DMA maps. 191 */ 192 error = ENOMEM; 193 for (i = 0; i < GEM_TXQUEUELEN; i++) { 194 struct gem_txsoft *txs; 195 196 txs = &sc->sc_txsoft[i]; 197 txs->txs_mbuf = NULL; 198 txs->txs_ndescs = 0; 199 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 200 &txs->txs_dmamap)) != 0) { 201 device_printf(sc->sc_dev, "unable to create tx DMA map " 202 "%d, error = %d\n", i, error); 203 goto fail_txd; 204 } 205 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 206 } 207 208 /* 209 * Create the receive buffer DMA maps. 210 */ 211 for (i = 0; i < GEM_NRXDESC; i++) { 212 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 213 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 214 device_printf(sc->sc_dev, "unable to create rx DMA map " 215 "%d, error = %d\n", i, error); 216 goto fail_rxd; 217 } 218 sc->sc_rxsoft[i].rxs_mbuf = NULL; 219 } 220 221 222 gem_mifinit(sc); 223 224 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 225 gem_mediastatus)) != 0) { 226 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 227 goto fail_rxd; 228 } 229 sc->sc_mii = device_get_softc(sc->sc_miibus); 230 231 /* 232 * From this point forward, the attachment cannot fail. A failure 233 * before this point releases all resources that may have been 234 * allocated. 235 */ 236 237 /* Get RX FIFO size */ 238 sc->sc_rxfifosize = 64 * 239 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 240 241 /* Get TX FIFO size */ 242 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 243 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 244 sc->sc_rxfifosize / 1024, v / 16); 245 246 /* Initialize ifnet structure. */ 247 ifp->if_softc = sc; 248 if_initname(ifp, device_get_name(sc->sc_dev), 249 device_get_unit(sc->sc_dev)); 250 ifp->if_mtu = ETHERMTU; 251 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 252 ifp->if_start = gem_start; 253 ifp->if_ioctl = gem_ioctl; 254 ifp->if_watchdog = gem_watchdog; 255 ifp->if_init = gem_init; 256 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 257 /* 258 * Walk along the list of attached MII devices and 259 * establish an `MII instance' to `phy number' 260 * mapping. We'll use this mapping in media change 261 * requests to determine which phy to use to program 262 * the MIF configuration register. 263 */ 264 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 265 child = LIST_NEXT(child, mii_list)) { 266 /* 267 * Note: we support just two PHYs: the built-in 268 * internal device and an external on the MII 269 * connector. 270 */ 271 if (child->mii_phy > 1 || child->mii_inst > 1) { 272 device_printf(sc->sc_dev, "cannot accomodate " 273 "MII device %s at phy %d, instance %d\n", 274 device_get_name(child->mii_dev), 275 child->mii_phy, child->mii_inst); 276 continue; 277 } 278 279 sc->sc_phys[child->mii_inst] = child->mii_phy; 280 } 281 282 /* 283 * Now select and activate the PHY we will use. 284 * 285 * The order of preference is External (MDI1), 286 * Internal (MDI0), Serial Link (no MII). 287 */ 288 if (sc->sc_phys[1]) { 289 #ifdef GEM_DEBUG 290 printf("using external phy\n"); 291 #endif 292 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 293 } else { 294 #ifdef GEM_DEBUG 295 printf("using internal phy\n"); 296 #endif 297 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 298 } 299 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 300 sc->sc_mif_config); 301 /* Attach the interface. */ 302 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 303 304 #if notyet 305 /* 306 * Add a suspend hook to make sure we come back up after a 307 * resume. 308 */ 309 sc->sc_powerhook = powerhook_establish(gem_power, sc); 310 if (sc->sc_powerhook == NULL) 311 device_printf(sc->sc_dev, "WARNING: unable to establish power " 312 "hook\n"); 313 #endif 314 315 callout_init(&sc->sc_tick_ch, 0); 316 callout_init(&sc->sc_rx_ch, 0); 317 return (0); 318 319 /* 320 * Free any resources we've allocated during the failed attach 321 * attempt. Do this in reverse order and fall through. 322 */ 323 fail_rxd: 324 for (i = 0; i < GEM_NRXDESC; i++) { 325 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 326 bus_dmamap_destroy(sc->sc_rdmatag, 327 sc->sc_rxsoft[i].rxs_dmamap); 328 } 329 fail_txd: 330 for (i = 0; i < GEM_TXQUEUELEN; i++) { 331 if (sc->sc_txsoft[i].txs_dmamap != NULL) 332 bus_dmamap_destroy(sc->sc_tdmatag, 333 sc->sc_txsoft[i].txs_dmamap); 334 } 335 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 336 fail_cmem: 337 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 338 sc->sc_cddmamap); 339 fail_ctag: 340 bus_dma_tag_destroy(sc->sc_cdmatag); 341 fail_ttag: 342 bus_dma_tag_destroy(sc->sc_tdmatag); 343 fail_rtag: 344 bus_dma_tag_destroy(sc->sc_rdmatag); 345 fail_ptag: 346 bus_dma_tag_destroy(sc->sc_pdmatag); 347 return (error); 348 } 349 350 void 351 gem_detach(sc) 352 struct gem_softc *sc; 353 { 354 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 355 int i; 356 357 ether_ifdetach(ifp); 358 gem_stop(ifp, 1); 359 device_delete_child(sc->sc_dev, sc->sc_miibus); 360 361 for (i = 0; i < GEM_NRXDESC; i++) { 362 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 363 bus_dmamap_destroy(sc->sc_rdmatag, 364 sc->sc_rxsoft[i].rxs_dmamap); 365 } 366 for (i = 0; i < GEM_TXQUEUELEN; i++) { 367 if (sc->sc_txsoft[i].txs_dmamap != NULL) 368 bus_dmamap_destroy(sc->sc_tdmatag, 369 sc->sc_txsoft[i].txs_dmamap); 370 } 371 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 372 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 373 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 374 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 375 sc->sc_cddmamap); 376 bus_dma_tag_destroy(sc->sc_cdmatag); 377 bus_dma_tag_destroy(sc->sc_tdmatag); 378 bus_dma_tag_destroy(sc->sc_rdmatag); 379 bus_dma_tag_destroy(sc->sc_pdmatag); 380 } 381 382 void 383 gem_suspend(sc) 384 struct gem_softc *sc; 385 { 386 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 387 388 gem_stop(ifp, 0); 389 } 390 391 void 392 gem_resume(sc) 393 struct gem_softc *sc; 394 { 395 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 396 397 if (ifp->if_flags & IFF_UP) 398 gem_init(ifp); 399 } 400 401 static void 402 gem_cddma_callback(xsc, segs, nsegs, error) 403 void *xsc; 404 bus_dma_segment_t *segs; 405 int nsegs; 406 int error; 407 { 408 struct gem_softc *sc = (struct gem_softc *)xsc; 409 410 if (error != 0) 411 return; 412 if (nsegs != 1) { 413 /* can't happen... */ 414 panic("gem_cddma_callback: bad control buffer segment count"); 415 } 416 sc->sc_cddma = segs[0].ds_addr; 417 } 418 419 static void 420 gem_rxdma_callback(xsc, segs, nsegs, totsz, error) 421 void *xsc; 422 bus_dma_segment_t *segs; 423 int nsegs; 424 bus_size_t totsz; 425 int error; 426 { 427 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 428 429 if (error != 0) 430 return; 431 KASSERT(nsegs == 1, ("gem_rxdma_callback: bad dma segment count")); 432 rxs->rxs_paddr = segs[0].ds_addr; 433 } 434 435 static void 436 gem_txdma_callback(xsc, segs, nsegs, totsz, error) 437 void *xsc; 438 bus_dma_segment_t *segs; 439 int nsegs; 440 bus_size_t totsz; 441 int error; 442 { 443 struct gem_txdma *txd = (struct gem_txdma *)xsc; 444 struct gem_softc *sc = txd->txd_sc; 445 struct gem_txsoft *txs = txd->txd_txs; 446 bus_size_t len = 0; 447 uint64_t flags = 0; 448 int seg, nexttx; 449 450 if (error != 0) 451 return; 452 /* 453 * Ensure we have enough descriptors free to describe 454 * the packet. Note, we always reserve one descriptor 455 * at the end of the ring as a termination point, to 456 * prevent wrap-around. 457 */ 458 if (nsegs > sc->sc_txfree - 1) { 459 txs->txs_ndescs = -1; 460 return; 461 } 462 txs->txs_ndescs = nsegs; 463 464 nexttx = txs->txs_firstdesc; 465 /* 466 * Initialize the transmit descriptors. 467 */ 468 for (seg = 0; seg < nsegs; 469 seg++, nexttx = GEM_NEXTTX(nexttx)) { 470 #ifdef GEM_DEBUG 471 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 472 "%lx, addr %#lx (%#lx)", seg, nexttx, 473 segs[seg].ds_len, segs[seg].ds_addr, 474 GEM_DMA_WRITE(sc, segs[seg].ds_addr)); 475 #endif 476 477 if (segs[seg].ds_len == 0) 478 continue; 479 sc->sc_txdescs[nexttx].gd_addr = 480 GEM_DMA_WRITE(sc, segs[seg].ds_addr); 481 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, 482 ("gem_txdma_callback: segment size too large!")); 483 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 484 if (len == 0) { 485 #ifdef GEM_DEBUG 486 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 487 "tx %d", seg, nexttx); 488 #endif 489 flags |= GEM_TD_START_OF_PACKET; 490 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 491 sc->sc_txwin = 0; 492 flags |= GEM_TD_INTERRUPT_ME; 493 } 494 } 495 if (len + segs[seg].ds_len == totsz) { 496 #ifdef GEM_DEBUG 497 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 498 "tx %d", seg, nexttx); 499 #endif 500 flags |= GEM_TD_END_OF_PACKET; 501 } 502 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); 503 txs->txs_lastdesc = nexttx; 504 len += segs[seg].ds_len; 505 } 506 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, 507 ("gem_txdma_callback: missed end of packet!")); 508 } 509 510 static void 511 gem_tick(arg) 512 void *arg; 513 { 514 struct gem_softc *sc = arg; 515 int s; 516 517 s = splnet(); 518 mii_tick(sc->sc_mii); 519 splx(s); 520 521 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 522 } 523 524 static int 525 gem_bitwait(sc, r, clr, set) 526 struct gem_softc *sc; 527 bus_addr_t r; 528 u_int32_t clr; 529 u_int32_t set; 530 { 531 int i; 532 u_int32_t reg; 533 534 for (i = TRIES; i--; DELAY(100)) { 535 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 536 if ((r & clr) == 0 && (r & set) == set) 537 return (1); 538 } 539 return (0); 540 } 541 542 void 543 gem_reset(sc) 544 struct gem_softc *sc; 545 { 546 bus_space_tag_t t = sc->sc_bustag; 547 bus_space_handle_t h = sc->sc_h; 548 int s; 549 550 s = splnet(); 551 #ifdef GEM_DEBUG 552 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 553 #endif 554 gem_reset_rx(sc); 555 gem_reset_tx(sc); 556 557 /* Do a full reset */ 558 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 559 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 560 device_printf(sc->sc_dev, "cannot reset device\n"); 561 splx(s); 562 } 563 564 565 /* 566 * gem_rxdrain: 567 * 568 * Drain the receive queue. 569 */ 570 static void 571 gem_rxdrain(sc) 572 struct gem_softc *sc; 573 { 574 struct gem_rxsoft *rxs; 575 int i; 576 577 for (i = 0; i < GEM_NRXDESC; i++) { 578 rxs = &sc->sc_rxsoft[i]; 579 if (rxs->rxs_mbuf != NULL) { 580 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 581 BUS_DMASYNC_POSTREAD); 582 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 583 m_freem(rxs->rxs_mbuf); 584 rxs->rxs_mbuf = NULL; 585 } 586 } 587 } 588 589 /* 590 * Reset the whole thing. 591 */ 592 static void 593 gem_stop(ifp, disable) 594 struct ifnet *ifp; 595 int disable; 596 { 597 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 598 struct gem_txsoft *txs; 599 600 #ifdef GEM_DEBUG 601 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 602 #endif 603 604 callout_stop(&sc->sc_tick_ch); 605 606 /* XXX - Should we reset these instead? */ 607 gem_disable_tx(sc); 608 gem_disable_rx(sc); 609 610 /* 611 * Release any queued transmit buffers. 612 */ 613 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 614 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 615 if (txs->txs_ndescs != 0) { 616 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 617 BUS_DMASYNC_POSTWRITE); 618 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 619 if (txs->txs_mbuf != NULL) { 620 m_freem(txs->txs_mbuf); 621 txs->txs_mbuf = NULL; 622 } 623 } 624 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 625 } 626 627 if (disable) 628 gem_rxdrain(sc); 629 630 /* 631 * Mark the interface down and cancel the watchdog timer. 632 */ 633 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 634 ifp->if_timer = 0; 635 } 636 637 /* 638 * Reset the receiver 639 */ 640 int 641 gem_reset_rx(sc) 642 struct gem_softc *sc; 643 { 644 bus_space_tag_t t = sc->sc_bustag; 645 bus_space_handle_t h = sc->sc_h; 646 647 /* 648 * Resetting while DMA is in progress can cause a bus hang, so we 649 * disable DMA first. 650 */ 651 gem_disable_rx(sc); 652 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 653 /* Wait till it finishes */ 654 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 655 device_printf(sc->sc_dev, "cannot disable read dma\n"); 656 657 /* Wait 5ms extra. */ 658 DELAY(5000); 659 660 /* Finally, reset the ERX */ 661 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 662 /* Wait till it finishes */ 663 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 664 device_printf(sc->sc_dev, "cannot reset receiver\n"); 665 return (1); 666 } 667 return (0); 668 } 669 670 671 /* 672 * Reset the transmitter 673 */ 674 static int 675 gem_reset_tx(sc) 676 struct gem_softc *sc; 677 { 678 bus_space_tag_t t = sc->sc_bustag; 679 bus_space_handle_t h = sc->sc_h; 680 int i; 681 682 /* 683 * Resetting while DMA is in progress can cause a bus hang, so we 684 * disable DMA first. 685 */ 686 gem_disable_tx(sc); 687 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 688 /* Wait till it finishes */ 689 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 690 device_printf(sc->sc_dev, "cannot disable read dma\n"); 691 692 /* Wait 5ms extra. */ 693 DELAY(5000); 694 695 /* Finally, reset the ETX */ 696 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 697 /* Wait till it finishes */ 698 for (i = TRIES; i--; DELAY(100)) 699 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 700 break; 701 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 702 device_printf(sc->sc_dev, "cannot reset receiver\n"); 703 return (1); 704 } 705 return (0); 706 } 707 708 /* 709 * disable receiver. 710 */ 711 static int 712 gem_disable_rx(sc) 713 struct gem_softc *sc; 714 { 715 bus_space_tag_t t = sc->sc_bustag; 716 bus_space_handle_t h = sc->sc_h; 717 u_int32_t cfg; 718 719 /* Flip the enable bit */ 720 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 721 cfg &= ~GEM_MAC_RX_ENABLE; 722 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 723 724 /* Wait for it to finish */ 725 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 726 } 727 728 /* 729 * disable transmitter. 730 */ 731 static int 732 gem_disable_tx(sc) 733 struct gem_softc *sc; 734 { 735 bus_space_tag_t t = sc->sc_bustag; 736 bus_space_handle_t h = sc->sc_h; 737 u_int32_t cfg; 738 739 /* Flip the enable bit */ 740 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 741 cfg &= ~GEM_MAC_TX_ENABLE; 742 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 743 744 /* Wait for it to finish */ 745 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 746 } 747 748 /* 749 * Initialize interface. 750 */ 751 static int 752 gem_meminit(sc) 753 struct gem_softc *sc; 754 { 755 struct gem_rxsoft *rxs; 756 int i, error; 757 758 /* 759 * Initialize the transmit descriptor ring. 760 */ 761 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 762 for (i = 0; i < GEM_NTXDESC; i++) { 763 sc->sc_txdescs[i].gd_flags = 0; 764 sc->sc_txdescs[i].gd_addr = 0; 765 } 766 sc->sc_txfree = GEM_MAXTXFREE; 767 sc->sc_txnext = 0; 768 sc->sc_txwin = 0; 769 770 /* 771 * Initialize the receive descriptor and receive job 772 * descriptor rings. 773 */ 774 for (i = 0; i < GEM_NRXDESC; i++) { 775 rxs = &sc->sc_rxsoft[i]; 776 if (rxs->rxs_mbuf == NULL) { 777 if ((error = gem_add_rxbuf(sc, i)) != 0) { 778 device_printf(sc->sc_dev, "unable to " 779 "allocate or map rx buffer %d, error = " 780 "%d\n", i, error); 781 /* 782 * XXX Should attempt to run with fewer receive 783 * XXX buffers instead of just failing. 784 */ 785 gem_rxdrain(sc); 786 return (1); 787 } 788 } else 789 GEM_INIT_RXDESC(sc, i); 790 } 791 sc->sc_rxptr = 0; 792 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 793 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 794 795 return (0); 796 } 797 798 static int 799 gem_ringsize(sz) 800 int sz; 801 { 802 int v = 0; 803 804 switch (sz) { 805 case 32: 806 v = GEM_RING_SZ_32; 807 break; 808 case 64: 809 v = GEM_RING_SZ_64; 810 break; 811 case 128: 812 v = GEM_RING_SZ_128; 813 break; 814 case 256: 815 v = GEM_RING_SZ_256; 816 break; 817 case 512: 818 v = GEM_RING_SZ_512; 819 break; 820 case 1024: 821 v = GEM_RING_SZ_1024; 822 break; 823 case 2048: 824 v = GEM_RING_SZ_2048; 825 break; 826 case 4096: 827 v = GEM_RING_SZ_4096; 828 break; 829 case 8192: 830 v = GEM_RING_SZ_8192; 831 break; 832 default: 833 printf("gem: invalid Receive Descriptor ring size\n"); 834 break; 835 } 836 return (v); 837 } 838 839 /* 840 * Initialization of interface; set up initialization block 841 * and transmit/receive descriptor rings. 842 */ 843 static void 844 gem_init(xsc) 845 void *xsc; 846 { 847 struct gem_softc *sc = (struct gem_softc *)xsc; 848 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 849 bus_space_tag_t t = sc->sc_bustag; 850 bus_space_handle_t h = sc->sc_h; 851 int s; 852 u_int32_t v; 853 854 s = splnet(); 855 856 #ifdef GEM_DEBUG 857 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 858 #endif 859 /* 860 * Initialization sequence. The numbered steps below correspond 861 * to the sequence outlined in section 6.3.5.1 in the Ethernet 862 * Channel Engine manual (part of the PCIO manual). 863 * See also the STP2002-STQ document from Sun Microsystems. 864 */ 865 866 /* step 1 & 2. Reset the Ethernet Channel */ 867 gem_stop(&sc->sc_arpcom.ac_if, 0); 868 gem_reset(sc); 869 #ifdef GEM_DEBUG 870 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 871 #endif 872 873 /* Re-initialize the MIF */ 874 gem_mifinit(sc); 875 876 /* step 3. Setup data structures in host memory */ 877 gem_meminit(sc); 878 879 /* step 4. TX MAC registers & counters */ 880 gem_init_regs(sc); 881 /* XXX: VLAN code from NetBSD temporarily removed. */ 882 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 883 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 884 885 /* step 5. RX MAC registers & counters */ 886 gem_setladrf(sc); 887 888 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 889 /* NOTE: we use only 32-bit DMA addresses here. */ 890 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 891 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 892 893 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 894 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 895 #ifdef GEM_DEBUG 896 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 897 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 898 #endif 899 900 /* step 8. Global Configuration & Interrupt Mask */ 901 bus_space_write_4(t, h, GEM_INTMASK, 902 ~(GEM_INTR_TX_INTME| 903 GEM_INTR_TX_EMPTY| 904 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 905 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 906 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 907 GEM_INTR_BERR)); 908 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 909 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 910 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 911 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 912 913 /* step 9. ETX Configuration: use mostly default values */ 914 915 /* Enable DMA */ 916 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 917 bus_space_write_4(t, h, GEM_TX_CONFIG, 918 v|GEM_TX_CONFIG_TXDMA_EN| 919 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 920 921 /* step 10. ERX Configuration */ 922 923 /* Encode Receive Descriptor ring size: four possible values */ 924 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 925 926 /* Enable DMA */ 927 bus_space_write_4(t, h, GEM_RX_CONFIG, 928 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 929 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 930 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 931 /* 932 * The following value is for an OFF Threshold of about 3/4 full 933 * and an ON Threshold of 1/4 full. 934 */ 935 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 936 (3 * sc->sc_rxfifosize / 256) | 937 ( (sc->sc_rxfifosize / 256) << 12)); 938 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 939 940 /* step 11. Configure Media */ 941 mii_mediachg(sc->sc_mii); 942 943 /* step 12. RX_MAC Configuration Register */ 944 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 945 v |= GEM_MAC_RX_ENABLE; 946 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 947 948 /* step 14. Issue Transmit Pending command */ 949 950 /* step 15. Give the reciever a swift kick */ 951 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 952 953 /* Start the one second timer. */ 954 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 955 956 ifp->if_flags |= IFF_RUNNING; 957 ifp->if_flags &= ~IFF_OACTIVE; 958 ifp->if_timer = 0; 959 sc->sc_ifflags = ifp->if_flags; 960 splx(s); 961 } 962 963 static int 964 gem_load_txmbuf(sc, m0) 965 struct gem_softc *sc; 966 struct mbuf *m0; 967 { 968 struct gem_txdma txd; 969 struct gem_txsoft *txs; 970 int error; 971 972 /* Get a work queue entry. */ 973 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 974 /* Ran out of descriptors. */ 975 return (-1); 976 } 977 txd.txd_sc = sc; 978 txd.txd_txs = txs; 979 txs->txs_mbuf = m0; 980 txs->txs_firstdesc = sc->sc_txnext; 981 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, 982 gem_txdma_callback, &txd, BUS_DMA_NOWAIT); 983 if (error != 0) 984 goto fail; 985 if (txs->txs_ndescs == -1) { 986 error = -1; 987 goto fail; 988 } 989 990 /* Sync the DMA map. */ 991 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 992 BUS_DMASYNC_PREWRITE); 993 994 #ifdef GEM_DEBUG 995 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 996 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 997 txs->txs_ndescs); 998 #endif 999 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1000 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1001 1002 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1003 sc->sc_txfree -= txs->txs_ndescs; 1004 return (0); 1005 1006 fail: 1007 #ifdef GEM_DEBUG 1008 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); 1009 #endif 1010 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1011 return (error); 1012 } 1013 1014 static void 1015 gem_init_regs(sc) 1016 struct gem_softc *sc; 1017 { 1018 bus_space_tag_t t = sc->sc_bustag; 1019 bus_space_handle_t h = sc->sc_h; 1020 const u_char *laddr = sc->sc_arpcom.ac_enaddr; 1021 u_int32_t v; 1022 1023 /* These regs are not cleared on reset */ 1024 if (!sc->sc_inited) { 1025 1026 /* Wooo. Magic values. */ 1027 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1028 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1029 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1030 1031 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1032 /* Max frame and max burst size */ 1033 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1034 ETHER_MAX_LEN | (0x2000<<16)); 1035 1036 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1037 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1038 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1039 /* Dunno.... */ 1040 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1041 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1042 ((laddr[5]<<8)|laddr[4])&0x3ff); 1043 1044 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1045 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1046 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1047 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1048 1049 /* MAC control addr set to 01:80:c2:00:00:01 */ 1050 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1051 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1052 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1053 1054 /* MAC filter addr set to 0:0:0:0:0:0 */ 1055 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1056 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1057 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1058 1059 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1060 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1061 1062 sc->sc_inited = 1; 1063 } 1064 1065 /* Counters need to be zeroed */ 1066 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1067 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1068 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1069 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1070 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1071 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1072 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1073 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1074 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1075 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1076 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1077 1078 /* Un-pause stuff */ 1079 #if 0 1080 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1081 #else 1082 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1083 #endif 1084 1085 /* 1086 * Set the station address. 1087 */ 1088 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1089 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1090 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1091 1092 /* 1093 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1094 */ 1095 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1096 v = GEM_MAC_XIF_TX_MII_ENA; 1097 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1098 v |= GEM_MAC_XIF_FDPLX_LED; 1099 if (sc->sc_flags & GEM_GIGABIT) 1100 v |= GEM_MAC_XIF_GMII_MODE; 1101 } 1102 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1103 } 1104 1105 static void 1106 gem_start(ifp) 1107 struct ifnet *ifp; 1108 { 1109 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1110 struct mbuf *m0 = NULL; 1111 int firsttx, ntx = 0, ofree, txmfail; 1112 1113 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1114 return; 1115 1116 /* 1117 * Remember the previous number of free descriptors and 1118 * the first descriptor we'll use. 1119 */ 1120 ofree = sc->sc_txfree; 1121 firsttx = sc->sc_txnext; 1122 1123 #ifdef GEM_DEBUG 1124 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1125 device_get_name(sc->sc_dev), ofree, firsttx); 1126 #endif 1127 1128 /* 1129 * Loop through the send queue, setting up transmit descriptors 1130 * until we drain the queue, or use up all available transmit 1131 * descriptors. 1132 */ 1133 txmfail = 0; 1134 do { 1135 /* 1136 * Grab a packet off the queue. 1137 */ 1138 IF_DEQUEUE(&ifp->if_snd, m0); 1139 if (m0 == NULL) 1140 break; 1141 1142 txmfail = gem_load_txmbuf(sc, m0); 1143 if (txmfail > 0) { 1144 /* Drop the mbuf and complain. */ 1145 printf("gem_start: error %d while loading mbuf dma " 1146 "map\n", txmfail); 1147 continue; 1148 } 1149 /* Not enough descriptors. */ 1150 if (txmfail == -1) { 1151 if (sc->sc_txfree == GEM_MAXTXFREE) 1152 panic("gem_start: mbuf chain too long!"); 1153 IF_PREPEND(&ifp->if_snd, m0); 1154 break; 1155 } 1156 1157 ntx++; 1158 /* Kick the transmitter. */ 1159 #ifdef GEM_DEBUG 1160 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", 1161 device_get_name(sc->sc_dev), sc->sc_txnext); 1162 #endif 1163 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1164 sc->sc_txnext); 1165 1166 if (ifp->if_bpf != NULL) 1167 bpf_mtap(ifp->if_bpf, m0); 1168 } while (1); 1169 1170 if (txmfail == -1 || sc->sc_txfree == 0) { 1171 /* No more slots left; notify upper layer. */ 1172 ifp->if_flags |= IFF_OACTIVE; 1173 } 1174 1175 if (ntx > 0) { 1176 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1177 1178 #ifdef GEM_DEBUG 1179 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1180 device_get_name(sc->sc_dev), firsttx); 1181 #endif 1182 1183 /* Set a watchdog timer in case the chip flakes out. */ 1184 ifp->if_timer = 5; 1185 #ifdef GEM_DEBUG 1186 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1187 device_get_name(sc->sc_dev), ifp->if_timer); 1188 #endif 1189 } 1190 } 1191 1192 /* 1193 * Transmit interrupt. 1194 */ 1195 static void 1196 gem_tint(sc) 1197 struct gem_softc *sc; 1198 { 1199 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1200 bus_space_tag_t t = sc->sc_bustag; 1201 bus_space_handle_t mac = sc->sc_h; 1202 struct gem_txsoft *txs; 1203 int txlast; 1204 int progress = 0; 1205 1206 1207 #ifdef GEM_DEBUG 1208 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1209 #endif 1210 1211 /* 1212 * Unload collision counters 1213 */ 1214 ifp->if_collisions += 1215 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1216 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1217 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1218 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1219 1220 /* 1221 * then clear the hardware counters. 1222 */ 1223 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1224 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1225 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1226 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1227 1228 /* 1229 * Go through our Tx list and free mbufs for those 1230 * frames that have been transmitted. 1231 */ 1232 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1233 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1234 1235 #ifdef GEM_DEBUG 1236 if (ifp->if_flags & IFF_DEBUG) { 1237 int i; 1238 printf(" txsoft %p transmit chain:\n", txs); 1239 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1240 printf("descriptor %d: ", i); 1241 printf("gd_flags: 0x%016llx\t", (long long) 1242 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1243 printf("gd_addr: 0x%016llx\n", (long long) 1244 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1245 if (i == txs->txs_lastdesc) 1246 break; 1247 } 1248 } 1249 #endif 1250 1251 /* 1252 * In theory, we could harveast some descriptors before 1253 * the ring is empty, but that's a bit complicated. 1254 * 1255 * GEM_TX_COMPLETION points to the last descriptor 1256 * processed +1. 1257 */ 1258 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1259 #ifdef GEM_DEBUG 1260 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1261 "txs->txs_lastdesc = %d, txlast = %d", 1262 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1263 #endif 1264 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1265 if ((txlast >= txs->txs_firstdesc) && 1266 (txlast <= txs->txs_lastdesc)) 1267 break; 1268 } else { 1269 /* Ick -- this command wraps */ 1270 if ((txlast >= txs->txs_firstdesc) || 1271 (txlast <= txs->txs_lastdesc)) 1272 break; 1273 } 1274 1275 #ifdef GEM_DEBUG 1276 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1277 #endif 1278 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1279 1280 sc->sc_txfree += txs->txs_ndescs; 1281 1282 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1283 BUS_DMASYNC_POSTWRITE); 1284 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1285 if (txs->txs_mbuf != NULL) { 1286 m_freem(txs->txs_mbuf); 1287 txs->txs_mbuf = NULL; 1288 } 1289 1290 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1291 1292 ifp->if_opackets++; 1293 progress = 1; 1294 } 1295 1296 #ifdef GEM_DEBUG 1297 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1298 "GEM_TX_DATA_PTR %llx " 1299 "GEM_TX_COMPLETION %x", 1300 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1301 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1302 GEM_TX_DATA_PTR_HI) << 32) | 1303 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1304 GEM_TX_DATA_PTR_LO), 1305 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1306 #endif 1307 1308 if (progress) { 1309 if (sc->sc_txfree == GEM_NTXDESC - 1) 1310 sc->sc_txwin = 0; 1311 1312 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1313 ifp->if_flags &= ~IFF_OACTIVE; 1314 gem_start(ifp); 1315 1316 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1317 ifp->if_timer = 0; 1318 } 1319 1320 #ifdef GEM_DEBUG 1321 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1322 device_get_name(sc->sc_dev), ifp->if_timer); 1323 #endif 1324 } 1325 1326 #if 0 1327 static void 1328 gem_rint_timeout(arg) 1329 void *arg; 1330 { 1331 1332 gem_rint((struct gem_softc *)arg); 1333 } 1334 #endif 1335 1336 /* 1337 * Receive interrupt. 1338 */ 1339 static void 1340 gem_rint(sc) 1341 struct gem_softc *sc; 1342 { 1343 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1344 bus_space_tag_t t = sc->sc_bustag; 1345 bus_space_handle_t h = sc->sc_h; 1346 struct gem_rxsoft *rxs; 1347 struct mbuf *m; 1348 u_int64_t rxstat; 1349 u_int32_t rxcomp; 1350 int i, len, progress = 0; 1351 1352 callout_stop(&sc->sc_rx_ch); 1353 #ifdef GEM_DEBUG 1354 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1355 #endif 1356 1357 /* 1358 * Read the completion register once. This limits 1359 * how long the following loop can execute. 1360 */ 1361 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1362 1363 #ifdef GEM_DEBUG 1364 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1365 sc->sc_rxptr, rxcomp); 1366 #endif 1367 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1368 for (i = sc->sc_rxptr; i != rxcomp; 1369 i = GEM_NEXTRX(i)) { 1370 rxs = &sc->sc_rxsoft[i]; 1371 1372 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1373 1374 if (rxstat & GEM_RD_OWN) { 1375 #if 0 /* XXX: In case of emergency, re-enable this. */ 1376 /* 1377 * The descriptor is still marked as owned, although 1378 * it is supposed to have completed. This has been 1379 * observed on some machines. Just exiting here 1380 * might leave the packet sitting around until another 1381 * one arrives to trigger a new interrupt, which is 1382 * generally undesirable, so set up a timeout. 1383 */ 1384 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1385 gem_rint_timeout, sc); 1386 #endif 1387 break; 1388 } 1389 1390 progress++; 1391 ifp->if_ipackets++; 1392 1393 if (rxstat & GEM_RD_BAD_CRC) { 1394 ifp->if_ierrors++; 1395 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1396 GEM_INIT_RXDESC(sc, i); 1397 continue; 1398 } 1399 1400 #ifdef GEM_DEBUG 1401 if (ifp->if_flags & IFF_DEBUG) { 1402 printf(" rxsoft %p descriptor %d: ", rxs, i); 1403 printf("gd_flags: 0x%016llx\t", (long long) 1404 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1405 printf("gd_addr: 0x%016llx\n", (long long) 1406 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1407 } 1408 #endif 1409 1410 /* 1411 * No errors; receive the packet. Note the Gem 1412 * includes the CRC with every packet. 1413 */ 1414 len = GEM_RD_BUFLEN(rxstat); 1415 1416 /* 1417 * Allocate a new mbuf cluster. If that fails, we are 1418 * out of memory, and must drop the packet and recycle 1419 * the buffer that's already attached to this descriptor. 1420 */ 1421 m = rxs->rxs_mbuf; 1422 if (gem_add_rxbuf(sc, i) != 0) { 1423 ifp->if_ierrors++; 1424 GEM_INIT_RXDESC(sc, i); 1425 continue; 1426 } 1427 m->m_data += 2; /* We're already off by two */ 1428 1429 m->m_pkthdr.rcvif = ifp; 1430 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1431 1432 /* Pass it on. */ 1433 (*ifp->if_input)(ifp, m); 1434 } 1435 1436 if (progress) { 1437 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1438 /* Update the receive pointer. */ 1439 if (i == sc->sc_rxptr) { 1440 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1441 } 1442 sc->sc_rxptr = i; 1443 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1444 } 1445 1446 #ifdef GEM_DEBUG 1447 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1448 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1449 #endif 1450 } 1451 1452 1453 /* 1454 * gem_add_rxbuf: 1455 * 1456 * Add a receive buffer to the indicated descriptor. 1457 */ 1458 static int 1459 gem_add_rxbuf(sc, idx) 1460 struct gem_softc *sc; 1461 int idx; 1462 { 1463 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1464 struct mbuf *m; 1465 int error; 1466 1467 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1468 if (m == NULL) 1469 return (ENOBUFS); 1470 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1471 1472 #ifdef GEM_DEBUG 1473 /* bzero the packet to check dma */ 1474 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1475 #endif 1476 1477 if (rxs->rxs_mbuf != NULL) { 1478 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1479 BUS_DMASYNC_POSTREAD); 1480 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1481 } 1482 1483 rxs->rxs_mbuf = m; 1484 1485 error = bus_dmamap_load_mbuf(sc->sc_rdmatag, rxs->rxs_dmamap, 1486 m, gem_rxdma_callback, rxs, BUS_DMA_NOWAIT); 1487 if (error != 0 || rxs->rxs_paddr == 0) { 1488 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1489 "%d\n", idx, error); 1490 panic("gem_add_rxbuf"); /* XXX */ 1491 } 1492 1493 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1494 1495 GEM_INIT_RXDESC(sc, idx); 1496 1497 return (0); 1498 } 1499 1500 1501 static void 1502 gem_eint(sc, status) 1503 struct gem_softc *sc; 1504 u_int status; 1505 { 1506 1507 if ((status & GEM_INTR_MIF) != 0) { 1508 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1509 return; 1510 } 1511 1512 device_printf(sc->sc_dev, "status=%x\n", status); 1513 } 1514 1515 1516 void 1517 gem_intr(v) 1518 void *v; 1519 { 1520 struct gem_softc *sc = (struct gem_softc *)v; 1521 bus_space_tag_t t = sc->sc_bustag; 1522 bus_space_handle_t seb = sc->sc_h; 1523 u_int32_t status; 1524 1525 status = bus_space_read_4(t, seb, GEM_STATUS); 1526 #ifdef GEM_DEBUG 1527 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1528 device_get_name(sc->sc_dev), (status>>19), 1529 (u_int)status); 1530 #endif 1531 1532 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1533 gem_eint(sc, status); 1534 1535 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1536 gem_tint(sc); 1537 1538 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1539 gem_rint(sc); 1540 1541 /* We should eventually do more than just print out error stats. */ 1542 if (status & GEM_INTR_TX_MAC) { 1543 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1544 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1545 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1546 txstat); 1547 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1548 gem_init(sc); 1549 } 1550 if (status & GEM_INTR_RX_MAC) { 1551 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1552 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1553 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1554 rxstat); 1555 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1556 gem_init(sc); 1557 } 1558 } 1559 1560 1561 static void 1562 gem_watchdog(ifp) 1563 struct ifnet *ifp; 1564 { 1565 struct gem_softc *sc = ifp->if_softc; 1566 1567 #ifdef GEM_DEBUG 1568 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1569 "GEM_MAC_RX_CONFIG %x", 1570 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1571 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1572 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1573 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1574 "GEM_MAC_TX_CONFIG %x", 1575 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1576 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1577 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1578 #endif 1579 1580 device_printf(sc->sc_dev, "device timeout\n"); 1581 ++ifp->if_oerrors; 1582 1583 /* Try to get more packets going. */ 1584 gem_start(ifp); 1585 } 1586 1587 /* 1588 * Initialize the MII Management Interface 1589 */ 1590 static void 1591 gem_mifinit(sc) 1592 struct gem_softc *sc; 1593 { 1594 bus_space_tag_t t = sc->sc_bustag; 1595 bus_space_handle_t mif = sc->sc_h; 1596 1597 /* Configure the MIF in frame mode */ 1598 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1599 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1600 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1601 } 1602 1603 /* 1604 * MII interface 1605 * 1606 * The GEM MII interface supports at least three different operating modes: 1607 * 1608 * Bitbang mode is implemented using data, clock and output enable registers. 1609 * 1610 * Frame mode is implemented by loading a complete frame into the frame 1611 * register and polling the valid bit for completion. 1612 * 1613 * Polling mode uses the frame register but completion is indicated by 1614 * an interrupt. 1615 * 1616 */ 1617 int 1618 gem_mii_readreg(dev, phy, reg) 1619 device_t dev; 1620 int phy, reg; 1621 { 1622 struct gem_softc *sc = device_get_softc(dev); 1623 bus_space_tag_t t = sc->sc_bustag; 1624 bus_space_handle_t mif = sc->sc_h; 1625 int n; 1626 u_int32_t v; 1627 1628 #ifdef GEM_DEBUG_PHY 1629 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1630 #endif 1631 1632 #if 0 1633 /* Select the desired PHY in the MIF configuration register */ 1634 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1635 /* Clear PHY select bit */ 1636 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1637 if (phy == GEM_PHYAD_EXTERNAL) 1638 /* Set PHY select bit to get at external device */ 1639 v |= GEM_MIF_CONFIG_PHY_SEL; 1640 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1641 #endif 1642 1643 /* Construct the frame command */ 1644 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1645 GEM_MIF_FRAME_READ; 1646 1647 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1648 for (n = 0; n < 100; n++) { 1649 DELAY(1); 1650 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1651 if (v & GEM_MIF_FRAME_TA0) 1652 return (v & GEM_MIF_FRAME_DATA); 1653 } 1654 1655 device_printf(sc->sc_dev, "mii_read timeout\n"); 1656 return (0); 1657 } 1658 1659 int 1660 gem_mii_writereg(dev, phy, reg, val) 1661 device_t dev; 1662 int phy, reg, val; 1663 { 1664 struct gem_softc *sc = device_get_softc(dev); 1665 bus_space_tag_t t = sc->sc_bustag; 1666 bus_space_handle_t mif = sc->sc_h; 1667 int n; 1668 u_int32_t v; 1669 1670 #ifdef GEM_DEBUG_PHY 1671 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1672 #endif 1673 1674 #if 0 1675 /* Select the desired PHY in the MIF configuration register */ 1676 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1677 /* Clear PHY select bit */ 1678 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1679 if (phy == GEM_PHYAD_EXTERNAL) 1680 /* Set PHY select bit to get at external device */ 1681 v |= GEM_MIF_CONFIG_PHY_SEL; 1682 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1683 #endif 1684 /* Construct the frame command */ 1685 v = GEM_MIF_FRAME_WRITE | 1686 (phy << GEM_MIF_PHY_SHIFT) | 1687 (reg << GEM_MIF_REG_SHIFT) | 1688 (val & GEM_MIF_FRAME_DATA); 1689 1690 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1691 for (n = 0; n < 100; n++) { 1692 DELAY(1); 1693 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1694 if (v & GEM_MIF_FRAME_TA0) 1695 return (1); 1696 } 1697 1698 device_printf(sc->sc_dev, "mii_write timeout\n"); 1699 return (0); 1700 } 1701 1702 void 1703 gem_mii_statchg(dev) 1704 device_t dev; 1705 { 1706 struct gem_softc *sc = device_get_softc(dev); 1707 #ifdef GEM_DEBUG 1708 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1709 #endif 1710 bus_space_tag_t t = sc->sc_bustag; 1711 bus_space_handle_t mac = sc->sc_h; 1712 u_int32_t v; 1713 1714 #ifdef GEM_DEBUG 1715 if (sc->sc_debug) 1716 printf("gem_mii_statchg: status change: phy = %d\n", 1717 sc->sc_phys[instance]); 1718 #endif 1719 1720 /* Set tx full duplex options */ 1721 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1722 DELAY(10000); /* reg must be cleared and delay before changing. */ 1723 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1724 GEM_MAC_TX_ENABLE; 1725 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1726 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1727 } 1728 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1729 1730 /* XIF Configuration */ 1731 /* We should really calculate all this rather than rely on defaults */ 1732 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1733 v = GEM_MAC_XIF_LINK_LED; 1734 v |= GEM_MAC_XIF_TX_MII_ENA; 1735 1736 /* If an external transceiver is connected, enable its MII drivers */ 1737 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1738 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1739 /* External MII needs echo disable if half duplex. */ 1740 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1741 /* turn on full duplex LED */ 1742 v |= GEM_MAC_XIF_FDPLX_LED; 1743 else 1744 /* half duplex -- disable echo */ 1745 v |= GEM_MAC_XIF_ECHO_DISABL; 1746 1747 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1748 v |= GEM_MAC_XIF_GMII_MODE; 1749 else 1750 v &= ~GEM_MAC_XIF_GMII_MODE; 1751 } else { 1752 /* Internal MII needs buf enable */ 1753 v |= GEM_MAC_XIF_MII_BUF_ENA; 1754 } 1755 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1756 } 1757 1758 int 1759 gem_mediachange(ifp) 1760 struct ifnet *ifp; 1761 { 1762 struct gem_softc *sc = ifp->if_softc; 1763 1764 /* XXX Add support for serial media. */ 1765 1766 return (mii_mediachg(sc->sc_mii)); 1767 } 1768 1769 void 1770 gem_mediastatus(ifp, ifmr) 1771 struct ifnet *ifp; 1772 struct ifmediareq *ifmr; 1773 { 1774 struct gem_softc *sc = ifp->if_softc; 1775 1776 if ((ifp->if_flags & IFF_UP) == 0) 1777 return; 1778 1779 mii_pollstat(sc->sc_mii); 1780 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1781 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1782 } 1783 1784 /* 1785 * Process an ioctl request. 1786 */ 1787 static int 1788 gem_ioctl(ifp, cmd, data) 1789 struct ifnet *ifp; 1790 u_long cmd; 1791 caddr_t data; 1792 { 1793 struct gem_softc *sc = ifp->if_softc; 1794 struct ifreq *ifr = (struct ifreq *)data; 1795 int s, error = 0; 1796 1797 switch (cmd) { 1798 case SIOCSIFADDR: 1799 case SIOCGIFADDR: 1800 case SIOCSIFMTU: 1801 error = ether_ioctl(ifp, cmd, data); 1802 break; 1803 case SIOCSIFFLAGS: 1804 if (ifp->if_flags & IFF_UP) { 1805 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1806 gem_setladrf(sc); 1807 else 1808 gem_init(sc); 1809 } else { 1810 if (ifp->if_flags & IFF_RUNNING) 1811 gem_stop(ifp, 0); 1812 } 1813 sc->sc_ifflags = ifp->if_flags; 1814 error = 0; 1815 break; 1816 case SIOCADDMULTI: 1817 case SIOCDELMULTI: 1818 gem_setladrf(sc); 1819 error = 0; 1820 break; 1821 case SIOCGIFMEDIA: 1822 case SIOCSIFMEDIA: 1823 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1824 break; 1825 default: 1826 error = ENOTTY; 1827 break; 1828 } 1829 1830 /* Try to get things going again */ 1831 if (ifp->if_flags & IFF_UP) 1832 gem_start(ifp); 1833 splx(s); 1834 return (error); 1835 } 1836 1837 /* 1838 * Set up the logical address filter. 1839 */ 1840 static void 1841 gem_setladrf(sc) 1842 struct gem_softc *sc; 1843 { 1844 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1845 struct ifmultiaddr *inm; 1846 struct sockaddr_dl *sdl; 1847 bus_space_tag_t t = sc->sc_bustag; 1848 bus_space_handle_t h = sc->sc_h; 1849 u_char *cp; 1850 u_int32_t crc; 1851 u_int32_t hash[16]; 1852 u_int32_t v; 1853 int len; 1854 int i; 1855 1856 /* Get current RX configuration */ 1857 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1858 1859 /* 1860 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1861 * and hash filter. Depending on the case, the right bit will be 1862 * enabled. 1863 */ 1864 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1865 GEM_MAC_RX_PROMISC_GRP); 1866 1867 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1868 /* Turn on promiscuous mode */ 1869 v |= GEM_MAC_RX_PROMISCUOUS; 1870 goto chipit; 1871 } 1872 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1873 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1874 ifp->if_flags |= IFF_ALLMULTI; 1875 v |= GEM_MAC_RX_PROMISC_GRP; 1876 goto chipit; 1877 } 1878 1879 /* 1880 * Set up multicast address filter by passing all multicast addresses 1881 * through a crc generator, and then using the high order 8 bits as an 1882 * index into the 256 bit logical address filter. The high order 4 1883 * bits selects the word, while the other 4 bits select the bit within 1884 * the word (where bit 0 is the MSB). 1885 */ 1886 1887 /* Clear hash table */ 1888 memset(hash, 0, sizeof(hash)); 1889 1890 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1891 if (inm->ifma_addr->sa_family != AF_LINK) 1892 continue; 1893 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1894 cp = LLADDR(sdl); 1895 crc = 0xffffffff; 1896 for (len = sdl->sdl_alen; --len >= 0;) { 1897 int octet = *cp++; 1898 int i; 1899 1900 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1901 for (i = 0; i < 8; i++) { 1902 if ((crc & 1) ^ (octet & 1)) { 1903 crc >>= 1; 1904 crc ^= MC_POLY_LE; 1905 } else { 1906 crc >>= 1; 1907 } 1908 octet >>= 1; 1909 } 1910 } 1911 /* Just want the 8 most significant bits. */ 1912 crc >>= 24; 1913 1914 /* Set the corresponding bit in the filter. */ 1915 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1916 } 1917 1918 v |= GEM_MAC_RX_HASH_FILTER; 1919 ifp->if_flags &= ~IFF_ALLMULTI; 1920 1921 /* Now load the hash table into the chip (if we are using it) */ 1922 for (i = 0; i < 16; i++) { 1923 bus_space_write_4(t, h, 1924 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1925 hash[i]); 1926 } 1927 1928 chipit: 1929 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1930 } 1931