1 /* 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * Driver for Sun GEM ethernet controllers. 34 */ 35 36 #define GEM_DEBUG 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bus.h> 41 #include <sys/callout.h> 42 #include <sys/endian.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 49 #include <net/bpf.h> 50 #include <net/ethernet.h> 51 #include <net/if.h> 52 #include <net/if_arp.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 56 #include <machine/bus.h> 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 61 #include <gem/if_gemreg.h> 62 #include <gem/if_gemvar.h> 63 64 #define TRIES 10000 65 66 static void gem_start(struct ifnet *); 67 static void gem_stop(struct ifnet *, int); 68 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 69 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 70 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, 71 bus_size_t, int); 72 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, 73 bus_size_t, int); 74 static void gem_tick(void *); 75 static void gem_watchdog(struct ifnet *); 76 static void gem_init(void *); 77 static void gem_init_regs(struct gem_softc *sc); 78 static int gem_ringsize(int sz); 79 static int gem_meminit(struct gem_softc *); 80 static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); 81 static void gem_mifinit(struct gem_softc *); 82 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 83 u_int32_t clr, u_int32_t set); 84 static int gem_reset_rx(struct gem_softc *); 85 static int gem_reset_tx(struct gem_softc *); 86 static int gem_disable_rx(struct gem_softc *); 87 static int gem_disable_tx(struct gem_softc *); 88 static void gem_rxdrain(struct gem_softc *); 89 static int gem_add_rxbuf(struct gem_softc *, int); 90 static void gem_setladrf(struct gem_softc *); 91 92 struct mbuf *gem_get(struct gem_softc *, int, int); 93 static void gem_eint(struct gem_softc *, u_int); 94 static void gem_rint(struct gem_softc *); 95 #if 0 96 static void gem_rint_timeout(void *); 97 #endif 98 static void gem_tint(struct gem_softc *); 99 #ifdef notyet 100 static void gem_power(int, void *); 101 #endif 102 103 devclass_t gem_devclass; 104 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 105 MODULE_DEPEND(gem, miibus, 1, 1, 1); 106 107 #ifdef GEM_DEBUG 108 #include <sys/ktr.h> 109 #define KTR_GEM KTR_CT2 110 #endif 111 112 #define GEM_NSEGS GEM_NTXSEGS 113 114 /* 115 * gem_attach: 116 * 117 * Attach a Gem interface to the system. 118 */ 119 int 120 gem_attach(sc) 121 struct gem_softc *sc; 122 { 123 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 124 struct mii_softc *child; 125 int i, error; 126 u_int32_t v; 127 128 /* Make sure the chip is stopped. */ 129 ifp->if_softc = sc; 130 gem_reset(sc); 131 132 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 133 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 134 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 135 if (error) 136 return (error); 137 138 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 139 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 140 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 141 &sc->sc_rdmatag); 142 if (error) 143 goto fail_ptag; 144 145 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 146 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 147 GEM_TD_BUFSIZE, GEM_NTXSEGS, BUS_SPACE_MAXSIZE_32BIT, 148 BUS_DMA_ALLOCNOW, &sc->sc_tdmatag); 149 if (error) 150 goto fail_rtag; 151 152 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 153 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 154 sizeof(struct gem_control_data), 1, 155 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 156 &sc->sc_cdmatag); 157 if (error) 158 goto fail_ttag; 159 160 /* 161 * Allocate the control data structures, and create and load the 162 * DMA map for it. 163 */ 164 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 165 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 166 device_printf(sc->sc_dev, "unable to allocate control data," 167 " error = %d\n", error); 168 goto fail_ctag; 169 } 170 171 sc->sc_cddma = 0; 172 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 173 sc->sc_control_data, sizeof(struct gem_control_data), 174 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 175 device_printf(sc->sc_dev, "unable to load control data DMA " 176 "map, error = %d\n", error); 177 goto fail_cmem; 178 } 179 180 /* 181 * Initialize the transmit job descriptors. 182 */ 183 STAILQ_INIT(&sc->sc_txfreeq); 184 STAILQ_INIT(&sc->sc_txdirtyq); 185 186 /* 187 * Create the transmit buffer DMA maps. 188 */ 189 error = ENOMEM; 190 for (i = 0; i < GEM_TXQUEUELEN; i++) { 191 struct gem_txsoft *txs; 192 193 txs = &sc->sc_txsoft[i]; 194 txs->txs_mbuf = NULL; 195 txs->txs_ndescs = 0; 196 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 197 &txs->txs_dmamap)) != 0) { 198 device_printf(sc->sc_dev, "unable to create tx DMA map " 199 "%d, error = %d\n", i, error); 200 goto fail_txd; 201 } 202 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 203 } 204 205 /* 206 * Create the receive buffer DMA maps. 207 */ 208 for (i = 0; i < GEM_NRXDESC; i++) { 209 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 210 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 211 device_printf(sc->sc_dev, "unable to create rx DMA map " 212 "%d, error = %d\n", i, error); 213 goto fail_rxd; 214 } 215 sc->sc_rxsoft[i].rxs_mbuf = NULL; 216 } 217 218 219 gem_mifinit(sc); 220 221 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 222 gem_mediastatus)) != 0) { 223 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 224 goto fail_rxd; 225 } 226 sc->sc_mii = device_get_softc(sc->sc_miibus); 227 228 /* 229 * From this point forward, the attachment cannot fail. A failure 230 * before this point releases all resources that may have been 231 * allocated. 232 */ 233 234 /* Announce ourselves. */ 235 device_printf(sc->sc_dev, "Ethernet address:"); 236 for (i = 0; i < 6; i++) 237 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 238 239 /* Get RX FIFO size */ 240 sc->sc_rxfifosize = 64 * 241 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 242 printf(", %uKB RX fifo", sc->sc_rxfifosize / 1024); 243 244 /* Get TX FIFO size */ 245 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 246 printf(", %uKB TX fifo\n", v / 16); 247 248 /* Initialize ifnet structure. */ 249 ifp->if_softc = sc; 250 ifp->if_unit = device_get_unit(sc->sc_dev); 251 ifp->if_name = "gem"; 252 ifp->if_mtu = ETHERMTU; 253 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 254 ifp->if_start = gem_start; 255 ifp->if_ioctl = gem_ioctl; 256 ifp->if_watchdog = gem_watchdog; 257 ifp->if_init = gem_init; 258 ifp->if_output = ether_output; 259 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 260 /* 261 * Walk along the list of attached MII devices and 262 * establish an `MII instance' to `phy number' 263 * mapping. We'll use this mapping in media change 264 * requests to determine which phy to use to program 265 * the MIF configuration register. 266 */ 267 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 268 child = LIST_NEXT(child, mii_list)) { 269 /* 270 * Note: we support just two PHYs: the built-in 271 * internal device and an external on the MII 272 * connector. 273 */ 274 if (child->mii_phy > 1 || child->mii_inst > 1) { 275 device_printf(sc->sc_dev, "cannot accomodate " 276 "MII device %s at phy %d, instance %d\n", 277 device_get_name(child->mii_dev), 278 child->mii_phy, child->mii_inst); 279 continue; 280 } 281 282 sc->sc_phys[child->mii_inst] = child->mii_phy; 283 } 284 285 /* 286 * Now select and activate the PHY we will use. 287 * 288 * The order of preference is External (MDI1), 289 * Internal (MDI0), Serial Link (no MII). 290 */ 291 if (sc->sc_phys[1]) { 292 #ifdef GEM_DEBUG 293 printf("using external phy\n"); 294 #endif 295 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 296 } else { 297 #ifdef GEM_DEBUG 298 printf("using internal phy\n"); 299 #endif 300 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 301 } 302 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 303 sc->sc_mif_config); 304 /* Attach the interface. */ 305 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 306 307 #if notyet 308 /* 309 * Add a suspend hook to make sure we come back up after a 310 * resume. 311 */ 312 sc->sc_powerhook = powerhook_establish(gem_power, sc); 313 if (sc->sc_powerhook == NULL) 314 device_printf(sc->sc_dev, "WARNING: unable to establish power " 315 "hook\n"); 316 #endif 317 318 callout_init(&sc->sc_tick_ch, 0); 319 callout_init(&sc->sc_rx_ch, 0); 320 return (0); 321 322 /* 323 * Free any resources we've allocated during the failed attach 324 * attempt. Do this in reverse order and fall through. 325 */ 326 fail_rxd: 327 for (i = 0; i < GEM_NRXDESC; i++) { 328 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 329 bus_dmamap_destroy(sc->sc_rdmatag, 330 sc->sc_rxsoft[i].rxs_dmamap); 331 } 332 fail_txd: 333 for (i = 0; i < GEM_TXQUEUELEN; i++) { 334 if (sc->sc_txsoft[i].txs_dmamap != NULL) 335 bus_dmamap_destroy(sc->sc_tdmatag, 336 sc->sc_txsoft[i].txs_dmamap); 337 } 338 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 339 fail_cmem: 340 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 341 sc->sc_cddmamap); 342 fail_ctag: 343 bus_dma_tag_destroy(sc->sc_cdmatag); 344 fail_ttag: 345 bus_dma_tag_destroy(sc->sc_tdmatag); 346 fail_rtag: 347 bus_dma_tag_destroy(sc->sc_rdmatag); 348 fail_ptag: 349 bus_dma_tag_destroy(sc->sc_pdmatag); 350 return (error); 351 } 352 353 void 354 gem_detach(sc) 355 struct gem_softc *sc; 356 { 357 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 358 int i; 359 360 ether_ifdetach(ifp); 361 gem_stop(ifp, 1); 362 device_delete_child(sc->sc_dev, sc->sc_miibus); 363 364 for (i = 0; i < GEM_NRXDESC; i++) { 365 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 366 bus_dmamap_destroy(sc->sc_rdmatag, 367 sc->sc_rxsoft[i].rxs_dmamap); 368 } 369 for (i = 0; i < GEM_TXQUEUELEN; i++) { 370 if (sc->sc_txsoft[i].txs_dmamap != NULL) 371 bus_dmamap_destroy(sc->sc_tdmatag, 372 sc->sc_txsoft[i].txs_dmamap); 373 } 374 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 375 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 376 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 377 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 378 sc->sc_cddmamap); 379 bus_dma_tag_destroy(sc->sc_cdmatag); 380 bus_dma_tag_destroy(sc->sc_tdmatag); 381 bus_dma_tag_destroy(sc->sc_rdmatag); 382 bus_dma_tag_destroy(sc->sc_pdmatag); 383 } 384 385 void 386 gem_suspend(sc) 387 struct gem_softc *sc; 388 { 389 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 390 391 gem_stop(ifp, 0); 392 } 393 394 void 395 gem_resume(sc) 396 struct gem_softc *sc; 397 { 398 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 399 400 if (ifp->if_flags & IFF_UP) 401 gem_init(ifp); 402 } 403 404 static void 405 gem_cddma_callback(xsc, segs, nsegs, error) 406 void *xsc; 407 bus_dma_segment_t *segs; 408 int nsegs; 409 int error; 410 { 411 struct gem_softc *sc = (struct gem_softc *)xsc; 412 413 if (error != 0) 414 return; 415 if (nsegs != 1) { 416 /* can't happen... */ 417 panic("gem_cddma_callback: bad control buffer segment count"); 418 } 419 sc->sc_cddma = segs[0].ds_addr; 420 } 421 422 static void 423 gem_rxdma_callback(xsc, segs, nsegs, totsz, error) 424 void *xsc; 425 bus_dma_segment_t *segs; 426 int nsegs; 427 bus_size_t totsz; 428 int error; 429 { 430 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 431 432 if (error != 0) 433 return; 434 KASSERT(nsegs == 1, ("gem_rxdma_callback: bad dma segment count")); 435 rxs->rxs_paddr = segs[0].ds_addr; 436 } 437 438 static void 439 gem_txdma_callback(xsc, segs, nsegs, totsz, error) 440 void *xsc; 441 bus_dma_segment_t *segs; 442 int nsegs; 443 bus_size_t totsz; 444 int error; 445 { 446 struct gem_txdma *txd = (struct gem_txdma *)xsc; 447 struct gem_softc *sc = txd->txd_sc; 448 struct gem_txsoft *txs = txd->txd_txs; 449 bus_size_t len = 0; 450 uint64_t flags = 0; 451 int seg, nexttx; 452 453 if (error != 0) 454 return; 455 /* 456 * Ensure we have enough descriptors free to describe 457 * the packet. Note, we always reserve one descriptor 458 * at the end of the ring as a termination point, to 459 * prevent wrap-around. 460 */ 461 if (nsegs > sc->sc_txfree - 1) { 462 txs->txs_ndescs = -1; 463 return; 464 } 465 txs->txs_ndescs = nsegs; 466 467 nexttx = txs->txs_firstdesc; 468 /* 469 * Initialize the transmit descriptors. 470 */ 471 for (seg = 0; seg < nsegs; 472 seg++, nexttx = GEM_NEXTTX(nexttx)) { 473 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 474 "%lx, addr %#lx (%#lx)", seg, nexttx, 475 segs[seg].ds_len, segs[seg].ds_addr, 476 GEM_DMA_WRITE(sc, segs[seg].ds_addr)); 477 478 if (segs[seg].ds_len == 0) 479 continue; 480 sc->sc_txdescs[nexttx].gd_addr = 481 GEM_DMA_WRITE(sc, segs[seg].ds_addr); 482 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, 483 ("gem_txdma_callback: segment size too large!")); 484 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 485 if (len == 0) { 486 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 487 "tx %d", seg, nexttx); 488 flags |= GEM_TD_START_OF_PACKET; 489 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 490 sc->sc_txwin = 0; 491 flags |= GEM_TD_INTERRUPT_ME; 492 } 493 } 494 if (len + segs[seg].ds_len == totsz) { 495 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 496 "tx %d", seg, nexttx); 497 flags |= GEM_TD_END_OF_PACKET; 498 } 499 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); 500 txs->txs_lastdesc = nexttx; 501 len += segs[seg].ds_len; 502 } 503 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, 504 ("gem_txdma_callback: missed end of packet!")); 505 } 506 507 static void 508 gem_tick(arg) 509 void *arg; 510 { 511 struct gem_softc *sc = arg; 512 int s; 513 514 s = splnet(); 515 mii_tick(sc->sc_mii); 516 splx(s); 517 518 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 519 } 520 521 static int 522 gem_bitwait(sc, r, clr, set) 523 struct gem_softc *sc; 524 bus_addr_t r; 525 u_int32_t clr; 526 u_int32_t set; 527 { 528 int i; 529 u_int32_t reg; 530 531 for (i = TRIES; i--; DELAY(100)) { 532 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 533 if ((r & clr) == 0 && (r & set) == set) 534 return (1); 535 } 536 return (0); 537 } 538 539 void 540 gem_reset(sc) 541 struct gem_softc *sc; 542 { 543 bus_space_tag_t t = sc->sc_bustag; 544 bus_space_handle_t h = sc->sc_h; 545 int s; 546 547 s = splnet(); 548 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 549 gem_reset_rx(sc); 550 gem_reset_tx(sc); 551 552 /* Do a full reset */ 553 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 554 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 555 device_printf(sc->sc_dev, "cannot reset device\n"); 556 splx(s); 557 } 558 559 560 /* 561 * gem_rxdrain: 562 * 563 * Drain the receive queue. 564 */ 565 static void 566 gem_rxdrain(sc) 567 struct gem_softc *sc; 568 { 569 struct gem_rxsoft *rxs; 570 int i; 571 572 for (i = 0; i < GEM_NRXDESC; i++) { 573 rxs = &sc->sc_rxsoft[i]; 574 if (rxs->rxs_mbuf != NULL) { 575 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 576 BUS_DMASYNC_POSTREAD); 577 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 578 m_freem(rxs->rxs_mbuf); 579 rxs->rxs_mbuf = NULL; 580 } 581 } 582 } 583 584 /* 585 * Reset the whole thing. 586 */ 587 static void 588 gem_stop(ifp, disable) 589 struct ifnet *ifp; 590 int disable; 591 { 592 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 593 struct gem_txsoft *txs; 594 595 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 596 597 callout_stop(&sc->sc_tick_ch); 598 599 /* XXX - Should we reset these instead? */ 600 gem_disable_tx(sc); 601 gem_disable_rx(sc); 602 603 /* 604 * Release any queued transmit buffers. 605 */ 606 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 607 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 608 if (txs->txs_ndescs != 0) { 609 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 610 BUS_DMASYNC_POSTWRITE); 611 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 612 if (txs->txs_mbuf != NULL) { 613 m_freem(txs->txs_mbuf); 614 txs->txs_mbuf = NULL; 615 } 616 } 617 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 618 } 619 620 if (disable) 621 gem_rxdrain(sc); 622 623 /* 624 * Mark the interface down and cancel the watchdog timer. 625 */ 626 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 627 ifp->if_timer = 0; 628 } 629 630 /* 631 * Reset the receiver 632 */ 633 int 634 gem_reset_rx(sc) 635 struct gem_softc *sc; 636 { 637 bus_space_tag_t t = sc->sc_bustag; 638 bus_space_handle_t h = sc->sc_h; 639 640 /* 641 * Resetting while DMA is in progress can cause a bus hang, so we 642 * disable DMA first. 643 */ 644 gem_disable_rx(sc); 645 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 646 /* Wait till it finishes */ 647 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 648 device_printf(sc->sc_dev, "cannot disable read dma\n"); 649 650 /* Wait 5ms extra. */ 651 DELAY(5000); 652 653 /* Finally, reset the ERX */ 654 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 655 /* Wait till it finishes */ 656 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 657 device_printf(sc->sc_dev, "cannot reset receiver\n"); 658 return (1); 659 } 660 return (0); 661 } 662 663 664 /* 665 * Reset the transmitter 666 */ 667 static int 668 gem_reset_tx(sc) 669 struct gem_softc *sc; 670 { 671 bus_space_tag_t t = sc->sc_bustag; 672 bus_space_handle_t h = sc->sc_h; 673 int i; 674 675 /* 676 * Resetting while DMA is in progress can cause a bus hang, so we 677 * disable DMA first. 678 */ 679 gem_disable_tx(sc); 680 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 681 /* Wait till it finishes */ 682 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 683 device_printf(sc->sc_dev, "cannot disable read dma\n"); 684 685 /* Wait 5ms extra. */ 686 DELAY(5000); 687 688 /* Finally, reset the ETX */ 689 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 690 /* Wait till it finishes */ 691 for (i = TRIES; i--; DELAY(100)) 692 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 693 break; 694 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 695 device_printf(sc->sc_dev, "cannot reset receiver\n"); 696 return (1); 697 } 698 return (0); 699 } 700 701 /* 702 * disable receiver. 703 */ 704 static int 705 gem_disable_rx(sc) 706 struct gem_softc *sc; 707 { 708 bus_space_tag_t t = sc->sc_bustag; 709 bus_space_handle_t h = sc->sc_h; 710 u_int32_t cfg; 711 712 /* Flip the enable bit */ 713 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 714 cfg &= ~GEM_MAC_RX_ENABLE; 715 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 716 717 /* Wait for it to finish */ 718 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 719 } 720 721 /* 722 * disable transmitter. 723 */ 724 static int 725 gem_disable_tx(sc) 726 struct gem_softc *sc; 727 { 728 bus_space_tag_t t = sc->sc_bustag; 729 bus_space_handle_t h = sc->sc_h; 730 u_int32_t cfg; 731 732 /* Flip the enable bit */ 733 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 734 cfg &= ~GEM_MAC_TX_ENABLE; 735 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 736 737 /* Wait for it to finish */ 738 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 739 } 740 741 /* 742 * Initialize interface. 743 */ 744 static int 745 gem_meminit(sc) 746 struct gem_softc *sc; 747 { 748 struct gem_rxsoft *rxs; 749 int i, error; 750 751 /* 752 * Initialize the transmit descriptor ring. 753 */ 754 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 755 for (i = 0; i < GEM_NTXDESC; i++) { 756 sc->sc_txdescs[i].gd_flags = 0; 757 sc->sc_txdescs[i].gd_addr = 0; 758 } 759 sc->sc_txfree = GEM_MAXTXFREE; 760 sc->sc_txnext = 0; 761 sc->sc_txwin = 0; 762 763 /* 764 * Initialize the receive descriptor and receive job 765 * descriptor rings. 766 */ 767 for (i = 0; i < GEM_NRXDESC; i++) { 768 rxs = &sc->sc_rxsoft[i]; 769 if (rxs->rxs_mbuf == NULL) { 770 if ((error = gem_add_rxbuf(sc, i)) != 0) { 771 device_printf(sc->sc_dev, "unable to " 772 "allocate or map rx buffer %d, error = " 773 "%d\n", i, error); 774 /* 775 * XXX Should attempt to run with fewer receive 776 * XXX buffers instead of just failing. 777 */ 778 gem_rxdrain(sc); 779 return (1); 780 } 781 } else 782 GEM_INIT_RXDESC(sc, i); 783 } 784 sc->sc_rxptr = 0; 785 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 786 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 787 788 return (0); 789 } 790 791 static int 792 gem_ringsize(sz) 793 int sz; 794 { 795 int v = 0; 796 797 switch (sz) { 798 case 32: 799 v = GEM_RING_SZ_32; 800 break; 801 case 64: 802 v = GEM_RING_SZ_64; 803 break; 804 case 128: 805 v = GEM_RING_SZ_128; 806 break; 807 case 256: 808 v = GEM_RING_SZ_256; 809 break; 810 case 512: 811 v = GEM_RING_SZ_512; 812 break; 813 case 1024: 814 v = GEM_RING_SZ_1024; 815 break; 816 case 2048: 817 v = GEM_RING_SZ_2048; 818 break; 819 case 4096: 820 v = GEM_RING_SZ_4096; 821 break; 822 case 8192: 823 v = GEM_RING_SZ_8192; 824 break; 825 default: 826 printf("gem: invalid Receive Descriptor ring size\n"); 827 break; 828 } 829 return (v); 830 } 831 832 /* 833 * Initialization of interface; set up initialization block 834 * and transmit/receive descriptor rings. 835 */ 836 static void 837 gem_init(xsc) 838 void *xsc; 839 { 840 struct gem_softc *sc = (struct gem_softc *)xsc; 841 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 842 bus_space_tag_t t = sc->sc_bustag; 843 bus_space_handle_t h = sc->sc_h; 844 int s; 845 u_int32_t v; 846 847 s = splnet(); 848 849 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 850 /* 851 * Initialization sequence. The numbered steps below correspond 852 * to the sequence outlined in section 6.3.5.1 in the Ethernet 853 * Channel Engine manual (part of the PCIO manual). 854 * See also the STP2002-STQ document from Sun Microsystems. 855 */ 856 857 /* step 1 & 2. Reset the Ethernet Channel */ 858 gem_stop(&sc->sc_arpcom.ac_if, 0); 859 gem_reset(sc); 860 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 861 862 /* Re-initialize the MIF */ 863 gem_mifinit(sc); 864 865 /* step 3. Setup data structures in host memory */ 866 gem_meminit(sc); 867 868 /* step 4. TX MAC registers & counters */ 869 gem_init_regs(sc); 870 /* XXX: VLAN code from NetBSD temporarily removed. */ 871 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 872 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 873 874 /* step 5. RX MAC registers & counters */ 875 gem_setladrf(sc); 876 877 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 878 /* NOTE: we use only 32-bit DMA addresses here. */ 879 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 880 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 881 882 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 883 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 884 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 885 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 886 887 /* step 8. Global Configuration & Interrupt Mask */ 888 bus_space_write_4(t, h, GEM_INTMASK, 889 ~(GEM_INTR_TX_INTME| 890 GEM_INTR_TX_EMPTY| 891 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 892 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 893 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 894 GEM_INTR_BERR)); 895 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 896 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 897 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 898 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 899 900 /* step 9. ETX Configuration: use mostly default values */ 901 902 /* Enable DMA */ 903 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 904 bus_space_write_4(t, h, GEM_TX_CONFIG, 905 v|GEM_TX_CONFIG_TXDMA_EN| 906 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 907 908 /* step 10. ERX Configuration */ 909 910 /* Encode Receive Descriptor ring size: four possible values */ 911 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 912 913 /* Enable DMA */ 914 bus_space_write_4(t, h, GEM_RX_CONFIG, 915 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 916 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 917 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 918 /* 919 * The following value is for an OFF Threshold of about 3/4 full 920 * and an ON Threshold of 1/4 full. 921 */ 922 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 923 (3 * sc->sc_rxfifosize / 256) | 924 ( (sc->sc_rxfifosize / 256) << 12)); 925 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 926 927 /* step 11. Configure Media */ 928 mii_mediachg(sc->sc_mii); 929 930 /* step 12. RX_MAC Configuration Register */ 931 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 932 v |= GEM_MAC_RX_ENABLE; 933 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 934 935 /* step 14. Issue Transmit Pending command */ 936 937 /* step 15. Give the reciever a swift kick */ 938 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 939 940 /* Start the one second timer. */ 941 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 942 943 ifp->if_flags |= IFF_RUNNING; 944 ifp->if_flags &= ~IFF_OACTIVE; 945 ifp->if_timer = 0; 946 sc->sc_ifflags = ifp->if_flags; 947 splx(s); 948 } 949 950 static int 951 gem_load_txmbuf(sc, m0) 952 struct gem_softc *sc; 953 struct mbuf *m0; 954 { 955 struct gem_txdma txd; 956 struct gem_txsoft *txs; 957 int error; 958 959 /* Get a work queue entry. */ 960 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 961 /* Ran out of descriptors. */ 962 return (-1); 963 } 964 txd.txd_sc = sc; 965 txd.txd_txs = txs; 966 txs->txs_mbuf = m0; 967 txs->txs_firstdesc = sc->sc_txnext; 968 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, 969 gem_txdma_callback, &txd, BUS_DMA_NOWAIT); 970 if (error != 0) 971 goto fail; 972 if (txs->txs_ndescs == -1) { 973 error = -1; 974 goto fail; 975 } 976 977 /* Sync the DMA map. */ 978 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 979 BUS_DMASYNC_PREWRITE); 980 981 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 982 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 983 txs->txs_ndescs); 984 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 985 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 986 987 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 988 sc->sc_txfree -= txs->txs_ndescs; 989 return (0); 990 991 fail: 992 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); 993 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 994 return (error); 995 } 996 997 static void 998 gem_init_regs(sc) 999 struct gem_softc *sc; 1000 { 1001 bus_space_tag_t t = sc->sc_bustag; 1002 bus_space_handle_t h = sc->sc_h; 1003 const u_char *laddr = sc->sc_arpcom.ac_enaddr; 1004 u_int32_t v; 1005 1006 /* These regs are not cleared on reset */ 1007 if (!sc->sc_inited) { 1008 1009 /* Wooo. Magic values. */ 1010 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1011 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1012 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1013 1014 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1015 /* Max frame and max burst size */ 1016 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1017 ETHER_MAX_LEN | (0x2000<<16)); 1018 1019 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1020 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1021 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1022 /* Dunno.... */ 1023 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1024 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1025 ((laddr[5]<<8)|laddr[4])&0x3ff); 1026 1027 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1028 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1029 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1030 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1031 1032 /* MAC control addr set to 01:80:c2:00:00:01 */ 1033 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1034 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1035 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1036 1037 /* MAC filter addr set to 0:0:0:0:0:0 */ 1038 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1039 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1040 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1041 1042 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1043 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1044 1045 sc->sc_inited = 1; 1046 } 1047 1048 /* Counters need to be zeroed */ 1049 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1050 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1051 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1052 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1053 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1054 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1055 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1056 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1057 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1058 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1059 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1060 1061 /* Un-pause stuff */ 1062 #if 0 1063 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1064 #else 1065 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1066 #endif 1067 1068 /* 1069 * Set the station address. 1070 */ 1071 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1072 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1073 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1074 1075 /* 1076 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1077 */ 1078 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1079 v = GEM_MAC_XIF_TX_MII_ENA; 1080 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1081 v |= GEM_MAC_XIF_FDPLX_LED; 1082 if (sc->sc_flags & GEM_GIGABIT) 1083 v |= GEM_MAC_XIF_GMII_MODE; 1084 } 1085 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1086 } 1087 1088 static void 1089 gem_start(ifp) 1090 struct ifnet *ifp; 1091 { 1092 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1093 struct mbuf *m0 = NULL; 1094 int firsttx, ntx, ofree, txmfail; 1095 1096 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1097 return; 1098 1099 /* 1100 * Remember the previous number of free descriptors and 1101 * the first descriptor we'll use. 1102 */ 1103 ofree = sc->sc_txfree; 1104 firsttx = sc->sc_txnext; 1105 1106 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1107 device_get_name(sc->sc_dev), ofree, firsttx); 1108 1109 /* 1110 * Loop through the send queue, setting up transmit descriptors 1111 * until we drain the queue, or use up all available transmit 1112 * descriptors. 1113 */ 1114 txmfail = 0; 1115 for (ntx = 0;; ntx++) { 1116 /* 1117 * Grab a packet off the queue. 1118 */ 1119 IF_DEQUEUE(&ifp->if_snd, m0); 1120 if (m0 == NULL) 1121 break; 1122 1123 txmfail = gem_load_txmbuf(sc, m0); 1124 if (txmfail > 0) { 1125 /* Drop the mbuf and complain. */ 1126 printf("gem_start: error %d while loading mbuf dma " 1127 "map\n", txmfail); 1128 continue; 1129 } 1130 /* Not enough descriptors. */ 1131 if (txmfail == -1) { 1132 if (sc->sc_txfree == GEM_MAXTXFREE) 1133 panic("gem_start: mbuf chain too long!"); 1134 IF_PREPEND(&ifp->if_snd, m0); 1135 break; 1136 } 1137 1138 /* Kick the transmitter. */ 1139 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", 1140 device_get_name(sc->sc_dev), sc->sc_txnext); 1141 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1142 sc->sc_txnext); 1143 1144 if (ifp->if_bpf != NULL) 1145 bpf_mtap(ifp->if_bpf, m0); 1146 } 1147 1148 if (txmfail == -1 || sc->sc_txfree == 0) { 1149 /* No more slots left; notify upper layer. */ 1150 ifp->if_flags |= IFF_OACTIVE; 1151 } 1152 1153 if (ntx > 0) { 1154 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1155 1156 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1157 device_get_name(sc->sc_dev), firsttx); 1158 1159 /* Set a watchdog timer in case the chip flakes out. */ 1160 ifp->if_timer = 5; 1161 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1162 device_get_name(sc->sc_dev), ifp->if_timer); 1163 } 1164 } 1165 1166 /* 1167 * Transmit interrupt. 1168 */ 1169 static void 1170 gem_tint(sc) 1171 struct gem_softc *sc; 1172 { 1173 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1174 bus_space_tag_t t = sc->sc_bustag; 1175 bus_space_handle_t mac = sc->sc_h; 1176 struct gem_txsoft *txs; 1177 int txlast; 1178 int progress = 0; 1179 1180 1181 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1182 1183 /* 1184 * Unload collision counters 1185 */ 1186 ifp->if_collisions += 1187 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1188 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1189 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1190 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1191 1192 /* 1193 * then clear the hardware counters. 1194 */ 1195 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1196 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1197 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1198 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1199 1200 /* 1201 * Go through our Tx list and free mbufs for those 1202 * frames that have been transmitted. 1203 */ 1204 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1205 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1206 1207 #ifdef GEM_DEBUG 1208 if (ifp->if_flags & IFF_DEBUG) { 1209 int i; 1210 printf(" txsoft %p transmit chain:\n", txs); 1211 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1212 printf("descriptor %d: ", i); 1213 printf("gd_flags: 0x%016llx\t", (long long) 1214 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1215 printf("gd_addr: 0x%016llx\n", (long long) 1216 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1217 if (i == txs->txs_lastdesc) 1218 break; 1219 } 1220 } 1221 #endif 1222 1223 /* 1224 * In theory, we could harveast some descriptors before 1225 * the ring is empty, but that's a bit complicated. 1226 * 1227 * GEM_TX_COMPLETION points to the last descriptor 1228 * processed +1. 1229 */ 1230 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1231 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1232 "txs->txs_lastdesc = %d, txlast = %d", 1233 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1234 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1235 if ((txlast >= txs->txs_firstdesc) && 1236 (txlast <= txs->txs_lastdesc)) 1237 break; 1238 } else { 1239 /* Ick -- this command wraps */ 1240 if ((txlast >= txs->txs_firstdesc) || 1241 (txlast <= txs->txs_lastdesc)) 1242 break; 1243 } 1244 1245 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1246 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1247 1248 sc->sc_txfree += txs->txs_ndescs; 1249 1250 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1251 BUS_DMASYNC_POSTWRITE); 1252 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1253 if (txs->txs_mbuf != NULL) { 1254 m_freem(txs->txs_mbuf); 1255 txs->txs_mbuf = NULL; 1256 } 1257 1258 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1259 1260 ifp->if_opackets++; 1261 progress = 1; 1262 } 1263 1264 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1265 "GEM_TX_DATA_PTR %llx " 1266 "GEM_TX_COMPLETION %x", 1267 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1268 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1269 GEM_TX_DATA_PTR_HI) << 32) | 1270 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1271 GEM_TX_DATA_PTR_LO), 1272 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1273 1274 if (progress) { 1275 if (sc->sc_txfree == GEM_NTXDESC - 1) 1276 sc->sc_txwin = 0; 1277 1278 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1279 ifp->if_flags &= ~IFF_OACTIVE; 1280 gem_start(ifp); 1281 1282 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1283 ifp->if_timer = 0; 1284 } 1285 1286 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1287 device_get_name(sc->sc_dev), ifp->if_timer); 1288 } 1289 1290 #if 0 1291 static void 1292 gem_rint_timeout(arg) 1293 void *arg; 1294 { 1295 1296 gem_rint((struct gem_softc *)arg); 1297 } 1298 #endif 1299 1300 /* 1301 * Receive interrupt. 1302 */ 1303 static void 1304 gem_rint(sc) 1305 struct gem_softc *sc; 1306 { 1307 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1308 bus_space_tag_t t = sc->sc_bustag; 1309 bus_space_handle_t h = sc->sc_h; 1310 struct gem_rxsoft *rxs; 1311 struct mbuf *m; 1312 u_int64_t rxstat; 1313 u_int32_t rxcomp; 1314 int i, len, progress = 0; 1315 1316 callout_stop(&sc->sc_rx_ch); 1317 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1318 1319 /* 1320 * Read the completion register once. This limits 1321 * how long the following loop can execute. 1322 */ 1323 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1324 1325 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1326 sc->sc_rxptr, rxcomp); 1327 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1328 for (i = sc->sc_rxptr; i != rxcomp; 1329 i = GEM_NEXTRX(i)) { 1330 rxs = &sc->sc_rxsoft[i]; 1331 1332 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1333 1334 if (rxstat & GEM_RD_OWN) { 1335 #if 0 /* XXX: In case of emergency, re-enable this. */ 1336 /* 1337 * The descriptor is still marked as owned, although 1338 * it is supposed to have completed. This has been 1339 * observed on some machines. Just exiting here 1340 * might leave the packet sitting around until another 1341 * one arrives to trigger a new interrupt, which is 1342 * generally undesirable, so set up a timeout. 1343 */ 1344 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1345 gem_rint_timeout, sc); 1346 #endif 1347 break; 1348 } 1349 1350 progress++; 1351 ifp->if_ipackets++; 1352 1353 if (rxstat & GEM_RD_BAD_CRC) { 1354 ifp->if_ierrors++; 1355 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1356 GEM_INIT_RXDESC(sc, i); 1357 continue; 1358 } 1359 1360 #ifdef GEM_DEBUG 1361 if (ifp->if_flags & IFF_DEBUG) { 1362 printf(" rxsoft %p descriptor %d: ", rxs, i); 1363 printf("gd_flags: 0x%016llx\t", (long long) 1364 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1365 printf("gd_addr: 0x%016llx\n", (long long) 1366 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1367 } 1368 #endif 1369 1370 /* 1371 * No errors; receive the packet. Note the Gem 1372 * includes the CRC with every packet. 1373 */ 1374 len = GEM_RD_BUFLEN(rxstat); 1375 1376 /* 1377 * Allocate a new mbuf cluster. If that fails, we are 1378 * out of memory, and must drop the packet and recycle 1379 * the buffer that's already attached to this descriptor. 1380 */ 1381 m = rxs->rxs_mbuf; 1382 if (gem_add_rxbuf(sc, i) != 0) { 1383 ifp->if_ierrors++; 1384 GEM_INIT_RXDESC(sc, i); 1385 continue; 1386 } 1387 m->m_data += 2; /* We're already off by two */ 1388 1389 m->m_pkthdr.rcvif = ifp; 1390 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1391 1392 /* Pass it on. */ 1393 (*ifp->if_input)(ifp, m); 1394 } 1395 1396 if (progress) { 1397 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1398 /* Update the receive pointer. */ 1399 if (i == sc->sc_rxptr) { 1400 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1401 } 1402 sc->sc_rxptr = i; 1403 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1404 } 1405 1406 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1407 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1408 } 1409 1410 1411 /* 1412 * gem_add_rxbuf: 1413 * 1414 * Add a receive buffer to the indicated descriptor. 1415 */ 1416 static int 1417 gem_add_rxbuf(sc, idx) 1418 struct gem_softc *sc; 1419 int idx; 1420 { 1421 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1422 struct mbuf *m; 1423 int error; 1424 1425 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1426 if (m == NULL) 1427 return (ENOBUFS); 1428 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1429 1430 #ifdef GEM_DEBUG 1431 /* bzero the packet to check dma */ 1432 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1433 #endif 1434 1435 if (rxs->rxs_mbuf != NULL) { 1436 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1437 BUS_DMASYNC_POSTREAD); 1438 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1439 } 1440 1441 rxs->rxs_mbuf = m; 1442 1443 error = bus_dmamap_load_mbuf(sc->sc_rdmatag, rxs->rxs_dmamap, 1444 m, gem_rxdma_callback, rxs, BUS_DMA_NOWAIT); 1445 if (error != 0 || rxs->rxs_paddr == 0) { 1446 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1447 "%d\n", idx, error); 1448 panic("gem_add_rxbuf"); /* XXX */ 1449 } 1450 1451 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1452 1453 GEM_INIT_RXDESC(sc, idx); 1454 1455 return (0); 1456 } 1457 1458 1459 static void 1460 gem_eint(sc, status) 1461 struct gem_softc *sc; 1462 u_int status; 1463 { 1464 1465 if ((status & GEM_INTR_MIF) != 0) { 1466 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1467 return; 1468 } 1469 1470 device_printf(sc->sc_dev, "status=%x\n", status); 1471 } 1472 1473 1474 void 1475 gem_intr(v) 1476 void *v; 1477 { 1478 struct gem_softc *sc = (struct gem_softc *)v; 1479 bus_space_tag_t t = sc->sc_bustag; 1480 bus_space_handle_t seb = sc->sc_h; 1481 u_int32_t status; 1482 1483 status = bus_space_read_4(t, seb, GEM_STATUS); 1484 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1485 device_get_name(sc->sc_dev), (status>>19), 1486 (u_int)status); 1487 1488 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1489 gem_eint(sc, status); 1490 1491 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1492 gem_tint(sc); 1493 1494 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1495 gem_rint(sc); 1496 1497 /* We should eventually do more than just print out error stats. */ 1498 if (status & GEM_INTR_TX_MAC) { 1499 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1500 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1501 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1502 txstat); 1503 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1504 gem_init(sc); 1505 } 1506 if (status & GEM_INTR_RX_MAC) { 1507 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1508 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1509 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1510 rxstat); 1511 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1512 gem_init(sc); 1513 } 1514 } 1515 1516 1517 static void 1518 gem_watchdog(ifp) 1519 struct ifnet *ifp; 1520 { 1521 struct gem_softc *sc = ifp->if_softc; 1522 1523 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1524 "GEM_MAC_RX_CONFIG %x", 1525 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1526 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1527 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1528 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1529 "GEM_MAC_TX_CONFIG %x", 1530 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1531 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1532 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1533 1534 device_printf(sc->sc_dev, "device timeout\n"); 1535 ++ifp->if_oerrors; 1536 1537 /* Try to get more packets going. */ 1538 gem_start(ifp); 1539 } 1540 1541 /* 1542 * Initialize the MII Management Interface 1543 */ 1544 static void 1545 gem_mifinit(sc) 1546 struct gem_softc *sc; 1547 { 1548 bus_space_tag_t t = sc->sc_bustag; 1549 bus_space_handle_t mif = sc->sc_h; 1550 1551 /* Configure the MIF in frame mode */ 1552 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1553 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1554 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1555 } 1556 1557 /* 1558 * MII interface 1559 * 1560 * The GEM MII interface supports at least three different operating modes: 1561 * 1562 * Bitbang mode is implemented using data, clock and output enable registers. 1563 * 1564 * Frame mode is implemented by loading a complete frame into the frame 1565 * register and polling the valid bit for completion. 1566 * 1567 * Polling mode uses the frame register but completion is indicated by 1568 * an interrupt. 1569 * 1570 */ 1571 int 1572 gem_mii_readreg(dev, phy, reg) 1573 device_t dev; 1574 int phy, reg; 1575 { 1576 struct gem_softc *sc = device_get_softc(dev); 1577 bus_space_tag_t t = sc->sc_bustag; 1578 bus_space_handle_t mif = sc->sc_h; 1579 int n; 1580 u_int32_t v; 1581 1582 #ifdef GEM_DEBUG_PHY 1583 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1584 #endif 1585 1586 #if 0 1587 /* Select the desired PHY in the MIF configuration register */ 1588 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1589 /* Clear PHY select bit */ 1590 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1591 if (phy == GEM_PHYAD_EXTERNAL) 1592 /* Set PHY select bit to get at external device */ 1593 v |= GEM_MIF_CONFIG_PHY_SEL; 1594 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1595 #endif 1596 1597 /* Construct the frame command */ 1598 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1599 GEM_MIF_FRAME_READ; 1600 1601 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1602 for (n = 0; n < 100; n++) { 1603 DELAY(1); 1604 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1605 if (v & GEM_MIF_FRAME_TA0) 1606 return (v & GEM_MIF_FRAME_DATA); 1607 } 1608 1609 device_printf(sc->sc_dev, "mii_read timeout\n"); 1610 return (0); 1611 } 1612 1613 int 1614 gem_mii_writereg(dev, phy, reg, val) 1615 device_t dev; 1616 int phy, reg, val; 1617 { 1618 struct gem_softc *sc = device_get_softc(dev); 1619 bus_space_tag_t t = sc->sc_bustag; 1620 bus_space_handle_t mif = sc->sc_h; 1621 int n; 1622 u_int32_t v; 1623 1624 #ifdef GEM_DEBUG_PHY 1625 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1626 #endif 1627 1628 #if 0 1629 /* Select the desired PHY in the MIF configuration register */ 1630 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1631 /* Clear PHY select bit */ 1632 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1633 if (phy == GEM_PHYAD_EXTERNAL) 1634 /* Set PHY select bit to get at external device */ 1635 v |= GEM_MIF_CONFIG_PHY_SEL; 1636 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1637 #endif 1638 /* Construct the frame command */ 1639 v = GEM_MIF_FRAME_WRITE | 1640 (phy << GEM_MIF_PHY_SHIFT) | 1641 (reg << GEM_MIF_REG_SHIFT) | 1642 (val & GEM_MIF_FRAME_DATA); 1643 1644 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1645 for (n = 0; n < 100; n++) { 1646 DELAY(1); 1647 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1648 if (v & GEM_MIF_FRAME_TA0) 1649 return (1); 1650 } 1651 1652 device_printf(sc->sc_dev, "mii_write timeout\n"); 1653 return (0); 1654 } 1655 1656 void 1657 gem_mii_statchg(dev) 1658 device_t dev; 1659 { 1660 struct gem_softc *sc = device_get_softc(dev); 1661 #ifdef GEM_DEBUG 1662 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1663 #endif 1664 bus_space_tag_t t = sc->sc_bustag; 1665 bus_space_handle_t mac = sc->sc_h; 1666 u_int32_t v; 1667 1668 #ifdef GEM_DEBUG 1669 if (sc->sc_debug) 1670 printf("gem_mii_statchg: status change: phy = %d\n", 1671 sc->sc_phys[instance]); 1672 #endif 1673 1674 /* Set tx full duplex options */ 1675 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1676 DELAY(10000); /* reg must be cleared and delay before changing. */ 1677 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1678 GEM_MAC_TX_ENABLE; 1679 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1680 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1681 } 1682 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1683 1684 /* XIF Configuration */ 1685 /* We should really calculate all this rather than rely on defaults */ 1686 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1687 v = GEM_MAC_XIF_LINK_LED; 1688 v |= GEM_MAC_XIF_TX_MII_ENA; 1689 1690 /* If an external transceiver is connected, enable its MII drivers */ 1691 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1692 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1693 /* External MII needs echo disable if half duplex. */ 1694 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1695 /* turn on full duplex LED */ 1696 v |= GEM_MAC_XIF_FDPLX_LED; 1697 else 1698 /* half duplex -- disable echo */ 1699 v |= GEM_MAC_XIF_ECHO_DISABL; 1700 1701 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1702 v |= GEM_MAC_XIF_GMII_MODE; 1703 else 1704 v &= ~GEM_MAC_XIF_GMII_MODE; 1705 } else { 1706 /* Internal MII needs buf enable */ 1707 v |= GEM_MAC_XIF_MII_BUF_ENA; 1708 } 1709 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1710 } 1711 1712 int 1713 gem_mediachange(ifp) 1714 struct ifnet *ifp; 1715 { 1716 struct gem_softc *sc = ifp->if_softc; 1717 1718 /* XXX Add support for serial media. */ 1719 1720 return (mii_mediachg(sc->sc_mii)); 1721 } 1722 1723 void 1724 gem_mediastatus(ifp, ifmr) 1725 struct ifnet *ifp; 1726 struct ifmediareq *ifmr; 1727 { 1728 struct gem_softc *sc = ifp->if_softc; 1729 1730 if ((ifp->if_flags & IFF_UP) == 0) 1731 return; 1732 1733 mii_pollstat(sc->sc_mii); 1734 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1735 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1736 } 1737 1738 /* 1739 * Process an ioctl request. 1740 */ 1741 static int 1742 gem_ioctl(ifp, cmd, data) 1743 struct ifnet *ifp; 1744 u_long cmd; 1745 caddr_t data; 1746 { 1747 struct gem_softc *sc = ifp->if_softc; 1748 struct ifreq *ifr = (struct ifreq *)data; 1749 int s, error = 0; 1750 1751 switch (cmd) { 1752 case SIOCSIFADDR: 1753 case SIOCGIFADDR: 1754 case SIOCSIFMTU: 1755 error = ether_ioctl(ifp, cmd, data); 1756 break; 1757 case SIOCSIFFLAGS: 1758 if (ifp->if_flags & IFF_UP) { 1759 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1760 gem_setladrf(sc); 1761 else 1762 gem_init(sc); 1763 } else { 1764 if (ifp->if_flags & IFF_RUNNING) 1765 gem_stop(ifp, 0); 1766 } 1767 sc->sc_ifflags = ifp->if_flags; 1768 error = 0; 1769 break; 1770 case SIOCADDMULTI: 1771 case SIOCDELMULTI: 1772 gem_setladrf(sc); 1773 error = 0; 1774 break; 1775 case SIOCGIFMEDIA: 1776 case SIOCSIFMEDIA: 1777 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1778 break; 1779 default: 1780 error = ENOTTY; 1781 break; 1782 } 1783 1784 /* Try to get things going again */ 1785 if (ifp->if_flags & IFF_UP) 1786 gem_start(ifp); 1787 splx(s); 1788 return (error); 1789 } 1790 1791 /* 1792 * Set up the logical address filter. 1793 */ 1794 static void 1795 gem_setladrf(sc) 1796 struct gem_softc *sc; 1797 { 1798 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1799 struct ifmultiaddr *inm; 1800 struct sockaddr_dl *sdl; 1801 bus_space_tag_t t = sc->sc_bustag; 1802 bus_space_handle_t h = sc->sc_h; 1803 u_char *cp; 1804 u_int32_t crc; 1805 u_int32_t hash[16]; 1806 u_int32_t v; 1807 int len; 1808 int i; 1809 1810 /* Get current RX configuration */ 1811 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1812 1813 /* 1814 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1815 * and hash filter. Depending on the case, the right bit will be 1816 * enabled. 1817 */ 1818 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1819 GEM_MAC_RX_PROMISC_GRP); 1820 1821 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1822 /* Turn on promiscuous mode */ 1823 v |= GEM_MAC_RX_PROMISCUOUS; 1824 goto chipit; 1825 } 1826 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1827 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1828 ifp->if_flags |= IFF_ALLMULTI; 1829 v |= GEM_MAC_RX_PROMISC_GRP; 1830 goto chipit; 1831 } 1832 1833 /* 1834 * Set up multicast address filter by passing all multicast addresses 1835 * through a crc generator, and then using the high order 8 bits as an 1836 * index into the 256 bit logical address filter. The high order 4 1837 * bits selects the word, while the other 4 bits select the bit within 1838 * the word (where bit 0 is the MSB). 1839 */ 1840 1841 /* Clear hash table */ 1842 memset(hash, 0, sizeof(hash)); 1843 1844 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1845 if (inm->ifma_addr->sa_family != AF_LINK) 1846 continue; 1847 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1848 cp = LLADDR(sdl); 1849 crc = 0xffffffff; 1850 for (len = sdl->sdl_alen; --len >= 0;) { 1851 int octet = *cp++; 1852 int i; 1853 1854 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1855 for (i = 0; i < 8; i++) { 1856 if ((crc & 1) ^ (octet & 1)) { 1857 crc >>= 1; 1858 crc ^= MC_POLY_LE; 1859 } else { 1860 crc >>= 1; 1861 } 1862 octet >>= 1; 1863 } 1864 } 1865 /* Just want the 8 most significant bits. */ 1866 crc >>= 24; 1867 1868 /* Set the corresponding bit in the filter. */ 1869 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1870 } 1871 1872 v |= GEM_MAC_RX_HASH_FILTER; 1873 ifp->if_flags &= ~IFF_ALLMULTI; 1874 1875 /* Now load the hash table into the chip (if we are using it) */ 1876 for (i = 0; i < 16; i++) { 1877 bus_space_write_4(t, h, 1878 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1879 hash[i]); 1880 } 1881 1882 chipit: 1883 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1884 } 1885