1 /* 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * Driver for Sun GEM ethernet controllers. 34 */ 35 36 #define GEM_DEBUG 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bus.h> 41 #include <sys/callout.h> 42 #include <sys/endian.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 49 #include <net/bpf.h> 50 #include <net/ethernet.h> 51 #include <net/if.h> 52 #include <net/if_arp.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 56 #include <machine/bus.h> 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 61 #include <gem/if_gemreg.h> 62 #include <gem/if_gemvar.h> 63 64 #define TRIES 10000 65 66 static void gem_start(struct ifnet *); 67 static void gem_stop(struct ifnet *, int); 68 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 69 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 70 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, 71 bus_size_t, int); 72 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, 73 bus_size_t, int); 74 static void gem_tick(void *); 75 static void gem_watchdog(struct ifnet *); 76 static void gem_init(void *); 77 static void gem_init_regs(struct gem_softc *sc); 78 static int gem_ringsize(int sz); 79 static int gem_meminit(struct gem_softc *); 80 static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); 81 static void gem_mifinit(struct gem_softc *); 82 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 83 u_int32_t clr, u_int32_t set); 84 static int gem_reset_rx(struct gem_softc *); 85 static int gem_reset_tx(struct gem_softc *); 86 static int gem_disable_rx(struct gem_softc *); 87 static int gem_disable_tx(struct gem_softc *); 88 static void gem_rxdrain(struct gem_softc *); 89 static int gem_add_rxbuf(struct gem_softc *, int); 90 static void gem_setladrf(struct gem_softc *); 91 92 struct mbuf *gem_get(struct gem_softc *, int, int); 93 static void gem_eint(struct gem_softc *, u_int); 94 static void gem_rint(struct gem_softc *); 95 #if 0 96 static void gem_rint_timeout(void *); 97 #endif 98 static void gem_tint(struct gem_softc *); 99 #ifdef notyet 100 static void gem_power(int, void *); 101 #endif 102 103 devclass_t gem_devclass; 104 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 105 MODULE_DEPEND(gem, miibus, 1, 1, 1); 106 107 #ifdef GEM_DEBUG 108 #include <sys/ktr.h> 109 #define KTR_GEM KTR_CT2 110 #endif 111 112 #define GEM_NSEGS GEM_NTXSEGS 113 114 /* 115 * gem_attach: 116 * 117 * Attach a Gem interface to the system. 118 */ 119 int 120 gem_attach(sc) 121 struct gem_softc *sc; 122 { 123 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 124 struct mii_softc *child; 125 int i, error; 126 u_int32_t v; 127 128 /* Make sure the chip is stopped. */ 129 ifp->if_softc = sc; 130 gem_reset(sc); 131 132 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 133 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 134 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 135 if (error) 136 return (error); 137 138 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 139 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 140 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 141 &sc->sc_rdmatag); 142 if (error) 143 goto fail_ptag; 144 145 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 146 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 147 GEM_TD_BUFSIZE, GEM_NTXSEGS, BUS_SPACE_MAXSIZE_32BIT, 148 BUS_DMA_ALLOCNOW, &sc->sc_tdmatag); 149 if (error) 150 goto fail_rtag; 151 152 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 153 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 154 sizeof(struct gem_control_data), 1, 155 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 156 &sc->sc_cdmatag); 157 if (error) 158 goto fail_ttag; 159 160 /* 161 * Allocate the control data structures, and create and load the 162 * DMA map for it. 163 */ 164 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 165 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 166 device_printf(sc->sc_dev, "unable to allocate control data," 167 " error = %d\n", error); 168 goto fail_ctag; 169 } 170 171 sc->sc_cddma = 0; 172 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 173 sc->sc_control_data, sizeof(struct gem_control_data), 174 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 175 device_printf(sc->sc_dev, "unable to load control data DMA " 176 "map, error = %d\n", error); 177 goto fail_cmem; 178 } 179 180 /* 181 * Initialize the transmit job descriptors. 182 */ 183 STAILQ_INIT(&sc->sc_txfreeq); 184 STAILQ_INIT(&sc->sc_txdirtyq); 185 186 /* 187 * Create the transmit buffer DMA maps. 188 */ 189 error = ENOMEM; 190 for (i = 0; i < GEM_TXQUEUELEN; i++) { 191 struct gem_txsoft *txs; 192 193 txs = &sc->sc_txsoft[i]; 194 txs->txs_mbuf = NULL; 195 txs->txs_ndescs = 0; 196 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 197 &txs->txs_dmamap)) != 0) { 198 device_printf(sc->sc_dev, "unable to create tx DMA map " 199 "%d, error = %d\n", i, error); 200 goto fail_txd; 201 } 202 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 203 } 204 205 /* 206 * Create the receive buffer DMA maps. 207 */ 208 for (i = 0; i < GEM_NRXDESC; i++) { 209 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 210 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 211 device_printf(sc->sc_dev, "unable to create rx DMA map " 212 "%d, error = %d\n", i, error); 213 goto fail_rxd; 214 } 215 sc->sc_rxsoft[i].rxs_mbuf = NULL; 216 } 217 218 219 gem_mifinit(sc); 220 221 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 222 gem_mediastatus)) != 0) { 223 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 224 goto fail_rxd; 225 } 226 sc->sc_mii = device_get_softc(sc->sc_miibus); 227 228 /* 229 * From this point forward, the attachment cannot fail. A failure 230 * before this point releases all resources that may have been 231 * allocated. 232 */ 233 234 /* Announce ourselves. */ 235 device_printf(sc->sc_dev, "Ethernet address:"); 236 for (i = 0; i < 6; i++) 237 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 238 239 /* Get RX FIFO size */ 240 sc->sc_rxfifosize = 64 * 241 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 242 printf(", %uKB RX fifo", sc->sc_rxfifosize / 1024); 243 244 /* Get TX FIFO size */ 245 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 246 printf(", %uKB TX fifo\n", v / 16); 247 248 /* Initialize ifnet structure. */ 249 ifp->if_softc = sc; 250 ifp->if_unit = device_get_unit(sc->sc_dev); 251 ifp->if_name = "gem"; 252 ifp->if_mtu = ETHERMTU; 253 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 254 ifp->if_start = gem_start; 255 ifp->if_ioctl = gem_ioctl; 256 ifp->if_watchdog = gem_watchdog; 257 ifp->if_init = gem_init; 258 ifp->if_output = ether_output; 259 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 260 /* 261 * Walk along the list of attached MII devices and 262 * establish an `MII instance' to `phy number' 263 * mapping. We'll use this mapping in media change 264 * requests to determine which phy to use to program 265 * the MIF configuration register. 266 */ 267 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 268 child = LIST_NEXT(child, mii_list)) { 269 /* 270 * Note: we support just two PHYs: the built-in 271 * internal device and an external on the MII 272 * connector. 273 */ 274 if (child->mii_phy > 1 || child->mii_inst > 1) { 275 device_printf(sc->sc_dev, "cannot accomodate " 276 "MII device %s at phy %d, instance %d\n", 277 device_get_name(child->mii_dev), 278 child->mii_phy, child->mii_inst); 279 continue; 280 } 281 282 sc->sc_phys[child->mii_inst] = child->mii_phy; 283 } 284 285 /* 286 * Now select and activate the PHY we will use. 287 * 288 * The order of preference is External (MDI1), 289 * Internal (MDI0), Serial Link (no MII). 290 */ 291 if (sc->sc_phys[1]) { 292 #ifdef GEM_DEBUG 293 printf("using external phy\n"); 294 #endif 295 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 296 } else { 297 #ifdef GEM_DEBUG 298 printf("using internal phy\n"); 299 #endif 300 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 301 } 302 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 303 sc->sc_mif_config); 304 /* Attach the interface. */ 305 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 306 307 #if notyet 308 /* 309 * Add a suspend hook to make sure we come back up after a 310 * resume. 311 */ 312 sc->sc_powerhook = powerhook_establish(gem_power, sc); 313 if (sc->sc_powerhook == NULL) 314 device_printf(sc->sc_dev, "WARNING: unable to establish power " 315 "hook\n"); 316 #endif 317 318 callout_init(&sc->sc_tick_ch, 0); 319 callout_init(&sc->sc_rx_ch, 0); 320 return (0); 321 322 /* 323 * Free any resources we've allocated during the failed attach 324 * attempt. Do this in reverse order and fall through. 325 */ 326 fail_rxd: 327 for (i = 0; i < GEM_NRXDESC; i++) { 328 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 329 bus_dmamap_destroy(sc->sc_rdmatag, 330 sc->sc_rxsoft[i].rxs_dmamap); 331 } 332 fail_txd: 333 for (i = 0; i < GEM_TXQUEUELEN; i++) { 334 if (sc->sc_txsoft[i].txs_dmamap != NULL) 335 bus_dmamap_destroy(sc->sc_tdmatag, 336 sc->sc_txsoft[i].txs_dmamap); 337 } 338 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 339 fail_cmem: 340 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 341 sc->sc_cddmamap); 342 fail_ctag: 343 bus_dma_tag_destroy(sc->sc_cdmatag); 344 fail_ttag: 345 bus_dma_tag_destroy(sc->sc_tdmatag); 346 fail_rtag: 347 bus_dma_tag_destroy(sc->sc_rdmatag); 348 fail_ptag: 349 bus_dma_tag_destroy(sc->sc_pdmatag); 350 return (error); 351 } 352 353 static void 354 gem_cddma_callback(xsc, segs, nsegs, error) 355 void *xsc; 356 bus_dma_segment_t *segs; 357 int nsegs; 358 int error; 359 { 360 struct gem_softc *sc = (struct gem_softc *)xsc; 361 362 if (error != 0) 363 return; 364 if (nsegs != 1) { 365 /* can't happen... */ 366 panic("gem_cddma_callback: bad control buffer segment count"); 367 } 368 sc->sc_cddma = segs[0].ds_addr; 369 } 370 371 static void 372 gem_rxdma_callback(xsc, segs, nsegs, totsz, error) 373 void *xsc; 374 bus_dma_segment_t *segs; 375 int nsegs; 376 bus_size_t totsz; 377 int error; 378 { 379 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 380 381 if (error != 0) 382 return; 383 KASSERT(nsegs == 1, ("gem_rxdma_callback: bad dma segment count")); 384 rxs->rxs_paddr = segs[0].ds_addr; 385 } 386 387 static void 388 gem_txdma_callback(xsc, segs, nsegs, totsz, error) 389 void *xsc; 390 bus_dma_segment_t *segs; 391 int nsegs; 392 bus_size_t totsz; 393 int error; 394 { 395 struct gem_txdma *txd = (struct gem_txdma *)xsc; 396 struct gem_softc *sc = txd->txd_sc; 397 struct gem_txsoft *txs = txd->txd_txs; 398 bus_size_t len = 0; 399 uint64_t flags = 0; 400 int seg, nexttx; 401 402 if (error != 0) 403 return; 404 /* 405 * Ensure we have enough descriptors free to describe 406 * the packet. Note, we always reserve one descriptor 407 * at the end of the ring as a termination point, to 408 * prevent wrap-around. 409 */ 410 if (nsegs > sc->sc_txfree - 1) { 411 txs->txs_ndescs = -1; 412 return; 413 } 414 txs->txs_ndescs = nsegs; 415 416 nexttx = txs->txs_firstdesc; 417 /* 418 * Initialize the transmit descriptors. 419 */ 420 for (seg = 0; seg < nsegs; 421 seg++, nexttx = GEM_NEXTTX(nexttx)) { 422 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 423 "%lx, addr %#lx (%#lx)", seg, nexttx, 424 segs[seg].ds_len, segs[seg].ds_addr, 425 GEM_DMA_WRITE(sc, segs[seg].ds_addr)); 426 427 if (segs[seg].ds_len == 0) 428 continue; 429 sc->sc_txdescs[nexttx].gd_addr = 430 GEM_DMA_WRITE(sc, segs[seg].ds_addr); 431 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, 432 ("gem_txdma_callback: segment size too large!")); 433 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 434 if (len == 0) { 435 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 436 "tx %d", seg, nexttx); 437 flags |= GEM_TD_START_OF_PACKET; 438 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 439 sc->sc_txwin = 0; 440 flags |= GEM_TD_INTERRUPT_ME; 441 } 442 } 443 if (len + segs[seg].ds_len == totsz) { 444 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 445 "tx %d", seg, nexttx); 446 flags |= GEM_TD_END_OF_PACKET; 447 } 448 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); 449 txs->txs_lastdesc = nexttx; 450 len += segs[seg].ds_len; 451 } 452 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, 453 ("gem_txdma_callback: missed end of packet!")); 454 } 455 456 static void 457 gem_tick(arg) 458 void *arg; 459 { 460 struct gem_softc *sc = arg; 461 int s; 462 463 s = splnet(); 464 mii_tick(sc->sc_mii); 465 splx(s); 466 467 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 468 } 469 470 static int 471 gem_bitwait(sc, r, clr, set) 472 struct gem_softc *sc; 473 bus_addr_t r; 474 u_int32_t clr; 475 u_int32_t set; 476 { 477 int i; 478 u_int32_t reg; 479 480 for (i = TRIES; i--; DELAY(100)) { 481 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 482 if ((r & clr) == 0 && (r & set) == set) 483 return (1); 484 } 485 return (0); 486 } 487 488 void 489 gem_reset(sc) 490 struct gem_softc *sc; 491 { 492 bus_space_tag_t t = sc->sc_bustag; 493 bus_space_handle_t h = sc->sc_h; 494 int s; 495 496 s = splnet(); 497 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 498 gem_reset_rx(sc); 499 gem_reset_tx(sc); 500 501 /* Do a full reset */ 502 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 503 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 504 device_printf(sc->sc_dev, "cannot reset device\n"); 505 splx(s); 506 } 507 508 509 /* 510 * gem_rxdrain: 511 * 512 * Drain the receive queue. 513 */ 514 static void 515 gem_rxdrain(sc) 516 struct gem_softc *sc; 517 { 518 struct gem_rxsoft *rxs; 519 int i; 520 521 for (i = 0; i < GEM_NRXDESC; i++) { 522 rxs = &sc->sc_rxsoft[i]; 523 if (rxs->rxs_mbuf != NULL) { 524 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 525 m_freem(rxs->rxs_mbuf); 526 rxs->rxs_mbuf = NULL; 527 } 528 } 529 } 530 531 /* 532 * Reset the whole thing. 533 */ 534 static void 535 gem_stop(ifp, disable) 536 struct ifnet *ifp; 537 int disable; 538 { 539 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 540 struct gem_txsoft *txs; 541 542 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 543 544 callout_stop(&sc->sc_tick_ch); 545 546 /* XXX - Should we reset these instead? */ 547 gem_disable_tx(sc); 548 gem_disable_rx(sc); 549 550 /* 551 * Release any queued transmit buffers. 552 */ 553 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 554 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 555 if (txs->txs_ndescs != 0) { 556 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 557 if (txs->txs_mbuf != NULL) { 558 m_freem(txs->txs_mbuf); 559 txs->txs_mbuf = NULL; 560 } 561 } 562 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 563 } 564 565 if (disable) 566 gem_rxdrain(sc); 567 568 /* 569 * Mark the interface down and cancel the watchdog timer. 570 */ 571 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 572 ifp->if_timer = 0; 573 } 574 575 /* 576 * Reset the receiver 577 */ 578 int 579 gem_reset_rx(sc) 580 struct gem_softc *sc; 581 { 582 bus_space_tag_t t = sc->sc_bustag; 583 bus_space_handle_t h = sc->sc_h; 584 585 /* 586 * Resetting while DMA is in progress can cause a bus hang, so we 587 * disable DMA first. 588 */ 589 gem_disable_rx(sc); 590 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 591 /* Wait till it finishes */ 592 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 593 device_printf(sc->sc_dev, "cannot disable read dma\n"); 594 595 /* Wait 5ms extra. */ 596 DELAY(5000); 597 598 /* Finally, reset the ERX */ 599 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 600 /* Wait till it finishes */ 601 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 602 device_printf(sc->sc_dev, "cannot reset receiver\n"); 603 return (1); 604 } 605 return (0); 606 } 607 608 609 /* 610 * Reset the transmitter 611 */ 612 static int 613 gem_reset_tx(sc) 614 struct gem_softc *sc; 615 { 616 bus_space_tag_t t = sc->sc_bustag; 617 bus_space_handle_t h = sc->sc_h; 618 int i; 619 620 /* 621 * Resetting while DMA is in progress can cause a bus hang, so we 622 * disable DMA first. 623 */ 624 gem_disable_tx(sc); 625 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 626 /* Wait till it finishes */ 627 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 628 device_printf(sc->sc_dev, "cannot disable read dma\n"); 629 630 /* Wait 5ms extra. */ 631 DELAY(5000); 632 633 /* Finally, reset the ETX */ 634 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 635 /* Wait till it finishes */ 636 for (i = TRIES; i--; DELAY(100)) 637 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 638 break; 639 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 640 device_printf(sc->sc_dev, "cannot reset receiver\n"); 641 return (1); 642 } 643 return (0); 644 } 645 646 /* 647 * disable receiver. 648 */ 649 static int 650 gem_disable_rx(sc) 651 struct gem_softc *sc; 652 { 653 bus_space_tag_t t = sc->sc_bustag; 654 bus_space_handle_t h = sc->sc_h; 655 u_int32_t cfg; 656 657 /* Flip the enable bit */ 658 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 659 cfg &= ~GEM_MAC_RX_ENABLE; 660 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 661 662 /* Wait for it to finish */ 663 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 664 } 665 666 /* 667 * disable transmitter. 668 */ 669 static int 670 gem_disable_tx(sc) 671 struct gem_softc *sc; 672 { 673 bus_space_tag_t t = sc->sc_bustag; 674 bus_space_handle_t h = sc->sc_h; 675 u_int32_t cfg; 676 677 /* Flip the enable bit */ 678 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 679 cfg &= ~GEM_MAC_TX_ENABLE; 680 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 681 682 /* Wait for it to finish */ 683 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 684 } 685 686 /* 687 * Initialize interface. 688 */ 689 static int 690 gem_meminit(sc) 691 struct gem_softc *sc; 692 { 693 struct gem_rxsoft *rxs; 694 int i, error; 695 696 /* 697 * Initialize the transmit descriptor ring. 698 */ 699 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 700 for (i = 0; i < GEM_NTXDESC; i++) { 701 sc->sc_txdescs[i].gd_flags = 0; 702 sc->sc_txdescs[i].gd_addr = 0; 703 } 704 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 705 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 706 sc->sc_txfree = GEM_MAXTXFREE; 707 sc->sc_txnext = 0; 708 sc->sc_txwin = 0; 709 710 /* 711 * Initialize the receive descriptor and receive job 712 * descriptor rings. 713 */ 714 for (i = 0; i < GEM_NRXDESC; i++) { 715 rxs = &sc->sc_rxsoft[i]; 716 if (rxs->rxs_mbuf == NULL) { 717 if ((error = gem_add_rxbuf(sc, i)) != 0) { 718 device_printf(sc->sc_dev, "unable to " 719 "allocate or map rx buffer %d, error = " 720 "%d\n", i, error); 721 /* 722 * XXX Should attempt to run with fewer receive 723 * XXX buffers instead of just failing. 724 */ 725 gem_rxdrain(sc); 726 return (1); 727 } 728 } else 729 GEM_INIT_RXDESC(sc, i); 730 } 731 sc->sc_rxptr = 0; 732 733 return (0); 734 } 735 736 static int 737 gem_ringsize(sz) 738 int sz; 739 { 740 int v = 0; 741 742 switch (sz) { 743 case 32: 744 v = GEM_RING_SZ_32; 745 break; 746 case 64: 747 v = GEM_RING_SZ_64; 748 break; 749 case 128: 750 v = GEM_RING_SZ_128; 751 break; 752 case 256: 753 v = GEM_RING_SZ_256; 754 break; 755 case 512: 756 v = GEM_RING_SZ_512; 757 break; 758 case 1024: 759 v = GEM_RING_SZ_1024; 760 break; 761 case 2048: 762 v = GEM_RING_SZ_2048; 763 break; 764 case 4096: 765 v = GEM_RING_SZ_4096; 766 break; 767 case 8192: 768 v = GEM_RING_SZ_8192; 769 break; 770 default: 771 printf("gem: invalid Receive Descriptor ring size\n"); 772 break; 773 } 774 return (v); 775 } 776 777 /* 778 * Initialization of interface; set up initialization block 779 * and transmit/receive descriptor rings. 780 */ 781 static void 782 gem_init(xsc) 783 void *xsc; 784 { 785 struct gem_softc *sc = (struct gem_softc *)xsc; 786 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 787 bus_space_tag_t t = sc->sc_bustag; 788 bus_space_handle_t h = sc->sc_h; 789 int s; 790 u_int32_t v; 791 792 s = splnet(); 793 794 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 795 /* 796 * Initialization sequence. The numbered steps below correspond 797 * to the sequence outlined in section 6.3.5.1 in the Ethernet 798 * Channel Engine manual (part of the PCIO manual). 799 * See also the STP2002-STQ document from Sun Microsystems. 800 */ 801 802 /* step 1 & 2. Reset the Ethernet Channel */ 803 gem_stop(&sc->sc_arpcom.ac_if, 0); 804 gem_reset(sc); 805 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 806 807 /* Re-initialize the MIF */ 808 gem_mifinit(sc); 809 810 /* step 3. Setup data structures in host memory */ 811 gem_meminit(sc); 812 813 /* step 4. TX MAC registers & counters */ 814 gem_init_regs(sc); 815 /* XXX: VLAN code from NetBSD temporarily removed. */ 816 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 817 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 818 819 /* step 5. RX MAC registers & counters */ 820 gem_setladrf(sc); 821 822 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 823 /* NOTE: we use only 32-bit DMA addresses here. */ 824 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 825 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 826 827 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 828 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 829 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 830 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 831 832 /* step 8. Global Configuration & Interrupt Mask */ 833 bus_space_write_4(t, h, GEM_INTMASK, 834 ~(GEM_INTR_TX_INTME| 835 GEM_INTR_TX_EMPTY| 836 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 837 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 838 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 839 GEM_INTR_BERR)); 840 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 841 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 842 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 843 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 844 845 /* step 9. ETX Configuration: use mostly default values */ 846 847 /* Enable DMA */ 848 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 849 bus_space_write_4(t, h, GEM_TX_CONFIG, 850 v|GEM_TX_CONFIG_TXDMA_EN| 851 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 852 853 /* step 10. ERX Configuration */ 854 855 /* Encode Receive Descriptor ring size: four possible values */ 856 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 857 858 /* Enable DMA */ 859 bus_space_write_4(t, h, GEM_RX_CONFIG, 860 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 861 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 862 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 863 /* 864 * The following value is for an OFF Threshold of about 3/4 full 865 * and an ON Threshold of 1/4 full. 866 */ 867 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 868 (3 * sc->sc_rxfifosize / 256) | 869 ( (sc->sc_rxfifosize / 256) << 12)); 870 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 871 872 /* step 11. Configure Media */ 873 mii_mediachg(sc->sc_mii); 874 875 /* step 12. RX_MAC Configuration Register */ 876 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 877 v |= GEM_MAC_RX_ENABLE; 878 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 879 880 /* step 14. Issue Transmit Pending command */ 881 882 /* step 15. Give the reciever a swift kick */ 883 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 884 885 /* Start the one second timer. */ 886 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 887 888 ifp->if_flags |= IFF_RUNNING; 889 ifp->if_flags &= ~IFF_OACTIVE; 890 ifp->if_timer = 0; 891 sc->sc_ifflags = ifp->if_flags; 892 splx(s); 893 } 894 895 static int 896 gem_load_txmbuf(sc, m0) 897 struct gem_softc *sc; 898 struct mbuf *m0; 899 { 900 struct gem_txdma txd; 901 struct gem_txsoft *txs; 902 int error; 903 904 /* Get a work queue entry. */ 905 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 906 /* Ran out of descriptors. */ 907 return (-1); 908 } 909 txd.txd_sc = sc; 910 txd.txd_txs = txs; 911 txs->txs_mbuf = m0; 912 txs->txs_firstdesc = sc->sc_txnext; 913 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, 914 gem_txdma_callback, &txd, BUS_DMA_NOWAIT); 915 if (error != 0) 916 goto fail; 917 if (txs->txs_ndescs == -1) { 918 error = -1; 919 goto fail; 920 } 921 922 /* Sync the DMA map. */ 923 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 924 BUS_DMASYNC_PREWRITE); 925 926 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 927 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 928 txs->txs_ndescs); 929 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 930 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 931 932 /* Sync the descriptors we're using. */ 933 GEM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndescs, 934 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 935 936 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 937 sc->sc_txfree -= txs->txs_ndescs; 938 return (0); 939 940 fail: 941 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); 942 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 943 return (error); 944 } 945 946 static void 947 gem_init_regs(sc) 948 struct gem_softc *sc; 949 { 950 bus_space_tag_t t = sc->sc_bustag; 951 bus_space_handle_t h = sc->sc_h; 952 const u_char *laddr = sc->sc_arpcom.ac_enaddr; 953 u_int32_t v; 954 955 /* These regs are not cleared on reset */ 956 if (!sc->sc_inited) { 957 958 /* Wooo. Magic values. */ 959 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 960 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 961 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 962 963 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 964 /* Max frame and max burst size */ 965 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 966 ETHER_MAX_LEN | (0x2000<<16)); 967 968 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 969 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 970 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 971 /* Dunno.... */ 972 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 973 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 974 ((laddr[5]<<8)|laddr[4])&0x3ff); 975 976 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 977 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 978 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 979 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 980 981 /* MAC control addr set to 01:80:c2:00:00:01 */ 982 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 983 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 984 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 985 986 /* MAC filter addr set to 0:0:0:0:0:0 */ 987 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 988 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 989 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 990 991 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 992 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 993 994 sc->sc_inited = 1; 995 } 996 997 /* Counters need to be zeroed */ 998 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 999 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1000 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1001 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1002 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1003 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1004 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1005 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1006 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1007 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1008 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1009 1010 /* Un-pause stuff */ 1011 #if 0 1012 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1013 #else 1014 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1015 #endif 1016 1017 /* 1018 * Set the station address. 1019 */ 1020 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1021 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1022 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1023 1024 /* 1025 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1026 */ 1027 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1028 v = GEM_MAC_XIF_TX_MII_ENA; 1029 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1030 v |= GEM_MAC_XIF_FDPLX_LED; 1031 if (sc->sc_flags & GEM_GIGABIT) 1032 v |= GEM_MAC_XIF_GMII_MODE; 1033 } 1034 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1035 } 1036 1037 static void 1038 gem_start(ifp) 1039 struct ifnet *ifp; 1040 { 1041 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1042 struct mbuf *m0 = NULL; 1043 int firsttx, ntx, ofree, txmfail; 1044 1045 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1046 return; 1047 1048 /* 1049 * Remember the previous number of free descriptors and 1050 * the first descriptor we'll use. 1051 */ 1052 ofree = sc->sc_txfree; 1053 firsttx = sc->sc_txnext; 1054 1055 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1056 device_get_name(sc->sc_dev), ofree, firsttx); 1057 1058 /* 1059 * Loop through the send queue, setting up transmit descriptors 1060 * until we drain the queue, or use up all available transmit 1061 * descriptors. 1062 */ 1063 txmfail = 0; 1064 for (ntx = 0;; ntx++) { 1065 /* 1066 * Grab a packet off the queue. 1067 */ 1068 IF_DEQUEUE(&ifp->if_snd, m0); 1069 if (m0 == NULL) 1070 break; 1071 1072 txmfail = gem_load_txmbuf(sc, m0); 1073 if (txmfail > 0) { 1074 /* Drop the mbuf and complain. */ 1075 printf("gem_start: error %d while loading mbuf dma " 1076 "map\n", txmfail); 1077 continue; 1078 } 1079 /* Not enough descriptors. */ 1080 if (txmfail == -1) { 1081 if (sc->sc_txfree == GEM_MAXTXFREE) 1082 panic("gem_start: mbuf chain too long!"); 1083 IF_PREPEND(&ifp->if_snd, m0); 1084 break; 1085 } 1086 1087 /* Kick the transmitter. */ 1088 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", 1089 device_get_name(sc->sc_dev), sc->sc_txnext); 1090 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1091 sc->sc_txnext); 1092 1093 if (ifp->if_bpf != NULL) 1094 bpf_mtap(ifp->if_bpf, m0); 1095 } 1096 1097 if (txmfail == -1 || sc->sc_txfree == 0) { 1098 /* No more slots left; notify upper layer. */ 1099 ifp->if_flags |= IFF_OACTIVE; 1100 } 1101 1102 if (ntx > 0) { 1103 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1104 device_get_name(sc->sc_dev), firsttx); 1105 1106 /* Set a watchdog timer in case the chip flakes out. */ 1107 ifp->if_timer = 5; 1108 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1109 device_get_name(sc->sc_dev), ifp->if_timer); 1110 } 1111 } 1112 1113 /* 1114 * Transmit interrupt. 1115 */ 1116 static void 1117 gem_tint(sc) 1118 struct gem_softc *sc; 1119 { 1120 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1121 bus_space_tag_t t = sc->sc_bustag; 1122 bus_space_handle_t mac = sc->sc_h; 1123 struct gem_txsoft *txs; 1124 int txlast; 1125 int progress = 0; 1126 1127 1128 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1129 1130 /* 1131 * Unload collision counters 1132 */ 1133 ifp->if_collisions += 1134 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1135 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1136 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1137 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1138 1139 /* 1140 * then clear the hardware counters. 1141 */ 1142 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1143 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1144 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1145 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1146 1147 /* 1148 * Go through our Tx list and free mbufs for those 1149 * frames that have been transmitted. 1150 */ 1151 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1152 GEM_CDTXSYNC(sc, txs->txs_lastdesc, 1153 txs->txs_ndescs, 1154 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1155 1156 #ifdef GEM_DEBUG 1157 if (ifp->if_flags & IFF_DEBUG) { 1158 int i; 1159 printf(" txsoft %p transmit chain:\n", txs); 1160 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1161 printf("descriptor %d: ", i); 1162 printf("gd_flags: 0x%016llx\t", (long long) 1163 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1164 printf("gd_addr: 0x%016llx\n", (long long) 1165 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1166 if (i == txs->txs_lastdesc) 1167 break; 1168 } 1169 } 1170 #endif 1171 1172 /* 1173 * In theory, we could harveast some descriptors before 1174 * the ring is empty, but that's a bit complicated. 1175 * 1176 * GEM_TX_COMPLETION points to the last descriptor 1177 * processed +1. 1178 */ 1179 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1180 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1181 "txs->txs_lastdesc = %d, txlast = %d", 1182 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1183 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1184 if ((txlast >= txs->txs_firstdesc) && 1185 (txlast <= txs->txs_lastdesc)) 1186 break; 1187 } else { 1188 /* Ick -- this command wraps */ 1189 if ((txlast >= txs->txs_firstdesc) || 1190 (txlast <= txs->txs_lastdesc)) 1191 break; 1192 } 1193 1194 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1195 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1196 1197 sc->sc_txfree += txs->txs_ndescs; 1198 1199 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1200 BUS_DMASYNC_POSTWRITE); 1201 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1202 if (txs->txs_mbuf != NULL) { 1203 m_freem(txs->txs_mbuf); 1204 txs->txs_mbuf = NULL; 1205 } 1206 1207 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1208 1209 ifp->if_opackets++; 1210 progress = 1; 1211 } 1212 1213 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1214 "GEM_TX_DATA_PTR %llx " 1215 "GEM_TX_COMPLETION %x", 1216 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1217 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1218 GEM_TX_DATA_PTR_HI) << 32) | 1219 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1220 GEM_TX_DATA_PTR_LO), 1221 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1222 1223 if (progress) { 1224 if (sc->sc_txfree == GEM_NTXDESC - 1) 1225 sc->sc_txwin = 0; 1226 1227 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1228 ifp->if_flags &= ~IFF_OACTIVE; 1229 gem_start(ifp); 1230 1231 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1232 ifp->if_timer = 0; 1233 } 1234 1235 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1236 device_get_name(sc->sc_dev), ifp->if_timer); 1237 } 1238 1239 #if 0 1240 static void 1241 gem_rint_timeout(arg) 1242 void *arg; 1243 { 1244 1245 gem_rint((struct gem_softc *)arg); 1246 } 1247 #endif 1248 1249 /* 1250 * Receive interrupt. 1251 */ 1252 static void 1253 gem_rint(sc) 1254 struct gem_softc *sc; 1255 { 1256 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1257 bus_space_tag_t t = sc->sc_bustag; 1258 bus_space_handle_t h = sc->sc_h; 1259 struct gem_rxsoft *rxs; 1260 struct mbuf *m; 1261 u_int64_t rxstat; 1262 u_int32_t rxcomp; 1263 int i, len, progress = 0; 1264 1265 callout_stop(&sc->sc_rx_ch); 1266 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1267 1268 /* 1269 * Read the completion register once. This limits 1270 * how long the following loop can execute. 1271 */ 1272 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1273 1274 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1275 sc->sc_rxptr, rxcomp); 1276 for (i = sc->sc_rxptr; i != rxcomp; 1277 i = GEM_NEXTRX(i)) { 1278 rxs = &sc->sc_rxsoft[i]; 1279 1280 GEM_CDRXSYNC(sc, i, 1281 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1282 1283 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1284 1285 if (rxstat & GEM_RD_OWN) { 1286 #if 0 /* XXX: In case of emergency, re-enable this. */ 1287 /* 1288 * The descriptor is still marked as owned, although 1289 * it is supposed to have completed. This has been 1290 * observed on some machines. Just exiting here 1291 * might leave the packet sitting around until another 1292 * one arrives to trigger a new interrupt, which is 1293 * generally undesirable, so set up a timeout. 1294 */ 1295 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1296 gem_rint_timeout, sc); 1297 #endif 1298 break; 1299 } 1300 1301 progress++; 1302 ifp->if_ipackets++; 1303 1304 if (rxstat & GEM_RD_BAD_CRC) { 1305 ifp->if_ierrors++; 1306 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1307 GEM_INIT_RXDESC(sc, i); 1308 continue; 1309 } 1310 1311 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1312 BUS_DMASYNC_POSTREAD); 1313 #ifdef GEM_DEBUG 1314 if (ifp->if_flags & IFF_DEBUG) { 1315 printf(" rxsoft %p descriptor %d: ", rxs, i); 1316 printf("gd_flags: 0x%016llx\t", (long long) 1317 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1318 printf("gd_addr: 0x%016llx\n", (long long) 1319 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1320 } 1321 #endif 1322 1323 /* 1324 * No errors; receive the packet. Note the Gem 1325 * includes the CRC with every packet. 1326 */ 1327 len = GEM_RD_BUFLEN(rxstat); 1328 1329 /* 1330 * Allocate a new mbuf cluster. If that fails, we are 1331 * out of memory, and must drop the packet and recycle 1332 * the buffer that's already attached to this descriptor. 1333 */ 1334 m = rxs->rxs_mbuf; 1335 if (gem_add_rxbuf(sc, i) != 0) { 1336 ifp->if_ierrors++; 1337 GEM_INIT_RXDESC(sc, i); 1338 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1339 BUS_DMASYNC_PREREAD); 1340 continue; 1341 } 1342 m->m_data += 2; /* We're already off by two */ 1343 1344 m->m_pkthdr.rcvif = ifp; 1345 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1346 1347 /* Pass it on. */ 1348 (*ifp->if_input)(ifp, m); 1349 } 1350 1351 if (progress) { 1352 /* Update the receive pointer. */ 1353 if (i == sc->sc_rxptr) { 1354 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1355 } 1356 sc->sc_rxptr = i; 1357 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1358 } 1359 1360 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1361 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1362 } 1363 1364 1365 /* 1366 * gem_add_rxbuf: 1367 * 1368 * Add a receive buffer to the indicated descriptor. 1369 */ 1370 static int 1371 gem_add_rxbuf(sc, idx) 1372 struct gem_softc *sc; 1373 int idx; 1374 { 1375 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1376 struct mbuf *m; 1377 int error; 1378 1379 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1380 if (m == NULL) 1381 return (ENOBUFS); 1382 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1383 1384 #ifdef GEM_DEBUG 1385 /* bzero the packet to check dma */ 1386 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1387 #endif 1388 1389 if (rxs->rxs_mbuf != NULL) 1390 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1391 1392 rxs->rxs_mbuf = m; 1393 1394 error = bus_dmamap_load_mbuf(sc->sc_rdmatag, rxs->rxs_dmamap, 1395 m, gem_rxdma_callback, rxs, BUS_DMA_NOWAIT); 1396 if (error != 0 || rxs->rxs_paddr == 0) { 1397 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1398 "%d\n", idx, error); 1399 panic("gem_add_rxbuf"); /* XXX */ 1400 } 1401 1402 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1403 1404 GEM_INIT_RXDESC(sc, idx); 1405 1406 return (0); 1407 } 1408 1409 1410 static void 1411 gem_eint(sc, status) 1412 struct gem_softc *sc; 1413 u_int status; 1414 { 1415 1416 if ((status & GEM_INTR_MIF) != 0) { 1417 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1418 return; 1419 } 1420 1421 device_printf(sc->sc_dev, "status=%x\n", status); 1422 } 1423 1424 1425 void 1426 gem_intr(v) 1427 void *v; 1428 { 1429 struct gem_softc *sc = (struct gem_softc *)v; 1430 bus_space_tag_t t = sc->sc_bustag; 1431 bus_space_handle_t seb = sc->sc_h; 1432 u_int32_t status; 1433 1434 status = bus_space_read_4(t, seb, GEM_STATUS); 1435 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1436 device_get_name(sc->sc_dev), (status>>19), 1437 (u_int)status); 1438 1439 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1440 gem_eint(sc, status); 1441 1442 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1443 gem_tint(sc); 1444 1445 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1446 gem_rint(sc); 1447 1448 /* We should eventually do more than just print out error stats. */ 1449 if (status & GEM_INTR_TX_MAC) { 1450 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1451 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1452 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1453 txstat); 1454 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1455 gem_init(sc); 1456 } 1457 if (status & GEM_INTR_RX_MAC) { 1458 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1459 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1460 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1461 rxstat); 1462 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1463 gem_init(sc); 1464 } 1465 } 1466 1467 1468 static void 1469 gem_watchdog(ifp) 1470 struct ifnet *ifp; 1471 { 1472 struct gem_softc *sc = ifp->if_softc; 1473 1474 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1475 "GEM_MAC_RX_CONFIG %x", 1476 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1477 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1478 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1479 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1480 "GEM_MAC_TX_CONFIG %x", 1481 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1482 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1483 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1484 1485 device_printf(sc->sc_dev, "device timeout\n"); 1486 ++ifp->if_oerrors; 1487 1488 /* Try to get more packets going. */ 1489 gem_start(ifp); 1490 } 1491 1492 /* 1493 * Initialize the MII Management Interface 1494 */ 1495 static void 1496 gem_mifinit(sc) 1497 struct gem_softc *sc; 1498 { 1499 bus_space_tag_t t = sc->sc_bustag; 1500 bus_space_handle_t mif = sc->sc_h; 1501 1502 /* Configure the MIF in frame mode */ 1503 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1504 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1505 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1506 } 1507 1508 /* 1509 * MII interface 1510 * 1511 * The GEM MII interface supports at least three different operating modes: 1512 * 1513 * Bitbang mode is implemented using data, clock and output enable registers. 1514 * 1515 * Frame mode is implemented by loading a complete frame into the frame 1516 * register and polling the valid bit for completion. 1517 * 1518 * Polling mode uses the frame register but completion is indicated by 1519 * an interrupt. 1520 * 1521 */ 1522 int 1523 gem_mii_readreg(dev, phy, reg) 1524 device_t dev; 1525 int phy, reg; 1526 { 1527 struct gem_softc *sc = device_get_softc(dev); 1528 bus_space_tag_t t = sc->sc_bustag; 1529 bus_space_handle_t mif = sc->sc_h; 1530 int n; 1531 u_int32_t v; 1532 1533 #ifdef GEM_DEBUG_PHY 1534 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1535 #endif 1536 1537 #if 0 1538 /* Select the desired PHY in the MIF configuration register */ 1539 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1540 /* Clear PHY select bit */ 1541 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1542 if (phy == GEM_PHYAD_EXTERNAL) 1543 /* Set PHY select bit to get at external device */ 1544 v |= GEM_MIF_CONFIG_PHY_SEL; 1545 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1546 #endif 1547 1548 /* Construct the frame command */ 1549 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1550 GEM_MIF_FRAME_READ; 1551 1552 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1553 for (n = 0; n < 100; n++) { 1554 DELAY(1); 1555 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1556 if (v & GEM_MIF_FRAME_TA0) 1557 return (v & GEM_MIF_FRAME_DATA); 1558 } 1559 1560 device_printf(sc->sc_dev, "mii_read timeout\n"); 1561 return (0); 1562 } 1563 1564 int 1565 gem_mii_writereg(dev, phy, reg, val) 1566 device_t dev; 1567 int phy, reg, val; 1568 { 1569 struct gem_softc *sc = device_get_softc(dev); 1570 bus_space_tag_t t = sc->sc_bustag; 1571 bus_space_handle_t mif = sc->sc_h; 1572 int n; 1573 u_int32_t v; 1574 1575 #ifdef GEM_DEBUG_PHY 1576 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1577 #endif 1578 1579 #if 0 1580 /* Select the desired PHY in the MIF configuration register */ 1581 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1582 /* Clear PHY select bit */ 1583 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1584 if (phy == GEM_PHYAD_EXTERNAL) 1585 /* Set PHY select bit to get at external device */ 1586 v |= GEM_MIF_CONFIG_PHY_SEL; 1587 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1588 #endif 1589 /* Construct the frame command */ 1590 v = GEM_MIF_FRAME_WRITE | 1591 (phy << GEM_MIF_PHY_SHIFT) | 1592 (reg << GEM_MIF_REG_SHIFT) | 1593 (val & GEM_MIF_FRAME_DATA); 1594 1595 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1596 for (n = 0; n < 100; n++) { 1597 DELAY(1); 1598 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1599 if (v & GEM_MIF_FRAME_TA0) 1600 return (1); 1601 } 1602 1603 device_printf(sc->sc_dev, "mii_write timeout\n"); 1604 return (0); 1605 } 1606 1607 void 1608 gem_mii_statchg(dev) 1609 device_t dev; 1610 { 1611 struct gem_softc *sc = device_get_softc(dev); 1612 #ifdef GEM_DEBUG 1613 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1614 #endif 1615 bus_space_tag_t t = sc->sc_bustag; 1616 bus_space_handle_t mac = sc->sc_h; 1617 u_int32_t v; 1618 1619 #ifdef GEM_DEBUG 1620 if (sc->sc_debug) 1621 printf("gem_mii_statchg: status change: phy = %d\n", 1622 sc->sc_phys[instance]); 1623 #endif 1624 1625 /* Set tx full duplex options */ 1626 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1627 DELAY(10000); /* reg must be cleared and delay before changing. */ 1628 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1629 GEM_MAC_TX_ENABLE; 1630 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1631 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1632 } 1633 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1634 1635 /* XIF Configuration */ 1636 /* We should really calculate all this rather than rely on defaults */ 1637 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1638 v = GEM_MAC_XIF_LINK_LED; 1639 v |= GEM_MAC_XIF_TX_MII_ENA; 1640 1641 /* If an external transceiver is connected, enable its MII drivers */ 1642 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1643 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1644 /* External MII needs echo disable if half duplex. */ 1645 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1646 /* turn on full duplex LED */ 1647 v |= GEM_MAC_XIF_FDPLX_LED; 1648 else 1649 /* half duplex -- disable echo */ 1650 v |= GEM_MAC_XIF_ECHO_DISABL; 1651 1652 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1653 v |= GEM_MAC_XIF_GMII_MODE; 1654 else 1655 v &= ~GEM_MAC_XIF_GMII_MODE; 1656 } else { 1657 /* Internal MII needs buf enable */ 1658 v |= GEM_MAC_XIF_MII_BUF_ENA; 1659 } 1660 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1661 } 1662 1663 int 1664 gem_mediachange(ifp) 1665 struct ifnet *ifp; 1666 { 1667 struct gem_softc *sc = ifp->if_softc; 1668 1669 /* XXX Add support for serial media. */ 1670 1671 return (mii_mediachg(sc->sc_mii)); 1672 } 1673 1674 void 1675 gem_mediastatus(ifp, ifmr) 1676 struct ifnet *ifp; 1677 struct ifmediareq *ifmr; 1678 { 1679 struct gem_softc *sc = ifp->if_softc; 1680 1681 if ((ifp->if_flags & IFF_UP) == 0) 1682 return; 1683 1684 mii_pollstat(sc->sc_mii); 1685 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1686 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1687 } 1688 1689 /* 1690 * Process an ioctl request. 1691 */ 1692 static int 1693 gem_ioctl(ifp, cmd, data) 1694 struct ifnet *ifp; 1695 u_long cmd; 1696 caddr_t data; 1697 { 1698 struct gem_softc *sc = ifp->if_softc; 1699 struct ifreq *ifr = (struct ifreq *)data; 1700 int s, error = 0; 1701 1702 switch (cmd) { 1703 case SIOCSIFADDR: 1704 case SIOCGIFADDR: 1705 case SIOCSIFMTU: 1706 error = ether_ioctl(ifp, cmd, data); 1707 break; 1708 case SIOCSIFFLAGS: 1709 if (ifp->if_flags & IFF_UP) { 1710 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1711 gem_setladrf(sc); 1712 else 1713 gem_init(sc); 1714 } else { 1715 if (ifp->if_flags & IFF_RUNNING) 1716 gem_stop(ifp, 0); 1717 } 1718 sc->sc_ifflags = ifp->if_flags; 1719 error = 0; 1720 break; 1721 case SIOCADDMULTI: 1722 case SIOCDELMULTI: 1723 gem_setladrf(sc); 1724 error = 0; 1725 break; 1726 case SIOCGIFMEDIA: 1727 case SIOCSIFMEDIA: 1728 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1729 break; 1730 default: 1731 error = ENOTTY; 1732 break; 1733 } 1734 1735 /* Try to get things going again */ 1736 if (ifp->if_flags & IFF_UP) 1737 gem_start(ifp); 1738 splx(s); 1739 return (error); 1740 } 1741 1742 /* 1743 * Set up the logical address filter. 1744 */ 1745 static void 1746 gem_setladrf(sc) 1747 struct gem_softc *sc; 1748 { 1749 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1750 struct ifmultiaddr *inm; 1751 struct sockaddr_dl *sdl; 1752 bus_space_tag_t t = sc->sc_bustag; 1753 bus_space_handle_t h = sc->sc_h; 1754 u_char *cp; 1755 u_int32_t crc; 1756 u_int32_t hash[16]; 1757 u_int32_t v; 1758 int len; 1759 int i; 1760 1761 /* Get current RX configuration */ 1762 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1763 1764 /* 1765 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1766 * and hash filter. Depending on the case, the right bit will be 1767 * enabled. 1768 */ 1769 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1770 GEM_MAC_RX_PROMISC_GRP); 1771 1772 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1773 /* Turn on promiscuous mode */ 1774 v |= GEM_MAC_RX_PROMISCUOUS; 1775 goto chipit; 1776 } 1777 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1778 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1779 ifp->if_flags |= IFF_ALLMULTI; 1780 v |= GEM_MAC_RX_PROMISC_GRP; 1781 goto chipit; 1782 } 1783 1784 /* 1785 * Set up multicast address filter by passing all multicast addresses 1786 * through a crc generator, and then using the high order 8 bits as an 1787 * index into the 256 bit logical address filter. The high order 4 1788 * bits selects the word, while the other 4 bits select the bit within 1789 * the word (where bit 0 is the MSB). 1790 */ 1791 1792 /* Clear hash table */ 1793 memset(hash, 0, sizeof(hash)); 1794 1795 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1796 if (inm->ifma_addr->sa_family != AF_LINK) 1797 continue; 1798 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1799 cp = LLADDR(sdl); 1800 crc = 0xffffffff; 1801 for (len = sdl->sdl_alen; --len >= 0;) { 1802 int octet = *cp++; 1803 int i; 1804 1805 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1806 for (i = 0; i < 8; i++) { 1807 if ((crc & 1) ^ (octet & 1)) { 1808 crc >>= 1; 1809 crc ^= MC_POLY_LE; 1810 } else { 1811 crc >>= 1; 1812 } 1813 octet >>= 1; 1814 } 1815 } 1816 /* Just want the 8 most significant bits. */ 1817 crc >>= 24; 1818 1819 /* Set the corresponding bit in the filter. */ 1820 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1821 } 1822 1823 v |= GEM_MAC_RX_HASH_FILTER; 1824 ifp->if_flags &= ~IFF_ALLMULTI; 1825 1826 /* Now load the hash table into the chip (if we are using it) */ 1827 for (i = 0; i < 16; i++) { 1828 bus_space_write_4(t, h, 1829 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1830 hash[i]); 1831 } 1832 1833 chipit: 1834 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1835 } 1836 1837 #if notyet 1838 1839 /* 1840 * gem_power: 1841 * 1842 * Power management (suspend/resume) hook. 1843 */ 1844 void 1845 static gem_power(why, arg) 1846 int why; 1847 void *arg; 1848 { 1849 struct gem_softc *sc = arg; 1850 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1851 int s; 1852 1853 s = splnet(); 1854 switch (why) { 1855 case PWR_SUSPEND: 1856 case PWR_STANDBY: 1857 gem_stop(ifp, 1); 1858 if (sc->sc_power != NULL) 1859 (*sc->sc_power)(sc, why); 1860 break; 1861 case PWR_RESUME: 1862 if (ifp->if_flags & IFF_UP) { 1863 if (sc->sc_power != NULL) 1864 (*sc->sc_power)(sc, why); 1865 gem_init(ifp); 1866 } 1867 break; 1868 case PWR_SOFTSUSPEND: 1869 case PWR_SOFTSTANDBY: 1870 case PWR_SOFTRESUME: 1871 break; 1872 } 1873 splx(s); 1874 } 1875 #endif 1876