1 /* 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * Driver for Sun GEM ethernet controllers. 34 */ 35 36 #define GEM_DEBUG 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bus.h> 41 #include <sys/callout.h> 42 #include <sys/endian.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 49 #include <net/bpf.h> 50 #include <net/ethernet.h> 51 #include <net/if.h> 52 #include <net/if_arp.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 56 #include <machine/bus.h> 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 61 #include <gem/if_gemreg.h> 62 #include <gem/if_gemvar.h> 63 64 #define TRIES 10000 65 66 static void gem_start(struct ifnet *); 67 static void gem_stop(struct ifnet *, int); 68 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 69 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 70 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, 71 bus_size_t, int); 72 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, 73 bus_size_t, int); 74 static void gem_tick(void *); 75 static void gem_watchdog(struct ifnet *); 76 static void gem_init(void *); 77 static void gem_init_regs(struct gem_softc *sc); 78 static int gem_ringsize(int sz); 79 static int gem_meminit(struct gem_softc *); 80 static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); 81 static void gem_mifinit(struct gem_softc *); 82 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 83 u_int32_t clr, u_int32_t set); 84 static int gem_reset_rx(struct gem_softc *); 85 static int gem_reset_tx(struct gem_softc *); 86 static int gem_disable_rx(struct gem_softc *); 87 static int gem_disable_tx(struct gem_softc *); 88 static void gem_rxdrain(struct gem_softc *); 89 static int gem_add_rxbuf(struct gem_softc *, int); 90 static void gem_setladrf(struct gem_softc *); 91 92 struct mbuf *gem_get(struct gem_softc *, int, int); 93 static void gem_eint(struct gem_softc *, u_int); 94 static void gem_rint(struct gem_softc *); 95 #if 0 96 static void gem_rint_timeout(void *); 97 #endif 98 static void gem_tint(struct gem_softc *); 99 #ifdef notyet 100 static void gem_power(int, void *); 101 #endif 102 103 devclass_t gem_devclass; 104 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 105 MODULE_DEPEND(gem, miibus, 1, 1, 1); 106 107 #ifdef GEM_DEBUG 108 #include <sys/ktr.h> 109 #define KTR_GEM KTR_CT2 110 #endif 111 112 #define GEM_NSEGS GEM_NTXSEGS 113 114 /* 115 * gem_attach: 116 * 117 * Attach a Gem interface to the system. 118 */ 119 int 120 gem_attach(sc) 121 struct gem_softc *sc; 122 { 123 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 124 struct mii_softc *child; 125 int i, error; 126 u_int32_t v; 127 128 /* Make sure the chip is stopped. */ 129 ifp->if_softc = sc; 130 gem_reset(sc); 131 132 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 133 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 134 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 135 if (error) 136 return (error); 137 138 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 139 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 140 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 141 &sc->sc_rdmatag); 142 if (error) 143 goto fail_ptag; 144 145 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 146 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 147 GEM_TD_BUFSIZE, GEM_NTXSEGS, BUS_SPACE_MAXSIZE_32BIT, 148 BUS_DMA_ALLOCNOW, &sc->sc_tdmatag); 149 if (error) 150 goto fail_rtag; 151 152 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 153 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 154 sizeof(struct gem_control_data), 1, 155 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 156 &sc->sc_cdmatag); 157 if (error) 158 goto fail_ttag; 159 160 /* 161 * Allocate the control data structures, and create and load the 162 * DMA map for it. 163 */ 164 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 165 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 166 device_printf(sc->sc_dev, "unable to allocate control data," 167 " error = %d\n", error); 168 goto fail_ctag; 169 } 170 171 sc->sc_cddma = 0; 172 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 173 sc->sc_control_data, sizeof(struct gem_control_data), 174 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 175 device_printf(sc->sc_dev, "unable to load control data DMA " 176 "map, error = %d\n", error); 177 goto fail_cmem; 178 } 179 180 /* 181 * Initialize the transmit job descriptors. 182 */ 183 STAILQ_INIT(&sc->sc_txfreeq); 184 STAILQ_INIT(&sc->sc_txdirtyq); 185 186 /* 187 * Create the transmit buffer DMA maps. 188 */ 189 error = ENOMEM; 190 for (i = 0; i < GEM_TXQUEUELEN; i++) { 191 struct gem_txsoft *txs; 192 193 txs = &sc->sc_txsoft[i]; 194 txs->txs_mbuf = NULL; 195 txs->txs_ndescs = 0; 196 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 197 &txs->txs_dmamap)) != 0) { 198 device_printf(sc->sc_dev, "unable to create tx DMA map " 199 "%d, error = %d\n", i, error); 200 goto fail_txd; 201 } 202 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 203 } 204 205 /* 206 * Create the receive buffer DMA maps. 207 */ 208 for (i = 0; i < GEM_NRXDESC; i++) { 209 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 210 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 211 device_printf(sc->sc_dev, "unable to create rx DMA map " 212 "%d, error = %d\n", i, error); 213 goto fail_rxd; 214 } 215 sc->sc_rxsoft[i].rxs_mbuf = NULL; 216 } 217 218 219 gem_mifinit(sc); 220 221 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 222 gem_mediastatus)) != 0) { 223 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 224 goto fail_rxd; 225 } 226 sc->sc_mii = device_get_softc(sc->sc_miibus); 227 228 /* 229 * From this point forward, the attachment cannot fail. A failure 230 * before this point releases all resources that may have been 231 * allocated. 232 */ 233 234 /* Announce ourselves. */ 235 device_printf(sc->sc_dev, "Ethernet address:"); 236 for (i = 0; i < 6; i++) 237 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 238 239 /* Get RX FIFO size */ 240 sc->sc_rxfifosize = 64 * 241 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 242 printf(", %uKB RX fifo", sc->sc_rxfifosize / 1024); 243 244 /* Get TX FIFO size */ 245 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 246 printf(", %uKB TX fifo\n", v / 16); 247 248 /* Initialize ifnet structure. */ 249 ifp->if_softc = sc; 250 ifp->if_unit = device_get_unit(sc->sc_dev); 251 ifp->if_name = "gem"; 252 ifp->if_mtu = ETHERMTU; 253 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 254 ifp->if_start = gem_start; 255 ifp->if_ioctl = gem_ioctl; 256 ifp->if_watchdog = gem_watchdog; 257 ifp->if_init = gem_init; 258 ifp->if_output = ether_output; 259 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 260 /* 261 * Walk along the list of attached MII devices and 262 * establish an `MII instance' to `phy number' 263 * mapping. We'll use this mapping in media change 264 * requests to determine which phy to use to program 265 * the MIF configuration register. 266 */ 267 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 268 child = LIST_NEXT(child, mii_list)) { 269 /* 270 * Note: we support just two PHYs: the built-in 271 * internal device and an external on the MII 272 * connector. 273 */ 274 if (child->mii_phy > 1 || child->mii_inst > 1) { 275 device_printf(sc->sc_dev, "cannot accomodate " 276 "MII device %s at phy %d, instance %d\n", 277 device_get_name(child->mii_dev), 278 child->mii_phy, child->mii_inst); 279 continue; 280 } 281 282 sc->sc_phys[child->mii_inst] = child->mii_phy; 283 } 284 285 /* 286 * Now select and activate the PHY we will use. 287 * 288 * The order of preference is External (MDI1), 289 * Internal (MDI0), Serial Link (no MII). 290 */ 291 if (sc->sc_phys[1]) { 292 #ifdef GEM_DEBUG 293 printf("using external phy\n"); 294 #endif 295 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 296 } else { 297 #ifdef GEM_DEBUG 298 printf("using internal phy\n"); 299 #endif 300 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 301 } 302 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 303 sc->sc_mif_config); 304 /* Attach the interface. */ 305 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 306 307 #if notyet 308 /* 309 * Add a suspend hook to make sure we come back up after a 310 * resume. 311 */ 312 sc->sc_powerhook = powerhook_establish(gem_power, sc); 313 if (sc->sc_powerhook == NULL) 314 device_printf(sc->sc_dev, "WARNING: unable to establish power " 315 "hook\n"); 316 #endif 317 318 callout_init(&sc->sc_tick_ch, 0); 319 callout_init(&sc->sc_rx_ch, 0); 320 return (0); 321 322 /* 323 * Free any resources we've allocated during the failed attach 324 * attempt. Do this in reverse order and fall through. 325 */ 326 fail_rxd: 327 for (i = 0; i < GEM_NRXDESC; i++) { 328 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 329 bus_dmamap_destroy(sc->sc_rdmatag, 330 sc->sc_rxsoft[i].rxs_dmamap); 331 } 332 fail_txd: 333 for (i = 0; i < GEM_TXQUEUELEN; i++) { 334 if (sc->sc_txsoft[i].txs_dmamap != NULL) 335 bus_dmamap_destroy(sc->sc_tdmatag, 336 sc->sc_txsoft[i].txs_dmamap); 337 } 338 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 339 fail_cmem: 340 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 341 sc->sc_cddmamap); 342 fail_ctag: 343 bus_dma_tag_destroy(sc->sc_cdmatag); 344 fail_ttag: 345 bus_dma_tag_destroy(sc->sc_tdmatag); 346 fail_rtag: 347 bus_dma_tag_destroy(sc->sc_rdmatag); 348 fail_ptag: 349 bus_dma_tag_destroy(sc->sc_pdmatag); 350 return (error); 351 } 352 353 void 354 gem_detach(sc) 355 struct gem_softc *sc; 356 { 357 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 358 int i; 359 360 ether_ifdetach(ifp); 361 gem_stop(ifp, 1); 362 device_delete_child(sc->sc_dev, sc->sc_miibus); 363 364 for (i = 0; i < GEM_NRXDESC; i++) { 365 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 366 bus_dmamap_destroy(sc->sc_rdmatag, 367 sc->sc_rxsoft[i].rxs_dmamap); 368 } 369 for (i = 0; i < GEM_TXQUEUELEN; i++) { 370 if (sc->sc_txsoft[i].txs_dmamap != NULL) 371 bus_dmamap_destroy(sc->sc_tdmatag, 372 sc->sc_txsoft[i].txs_dmamap); 373 } 374 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 375 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 376 sc->sc_cddmamap); 377 bus_dma_tag_destroy(sc->sc_cdmatag); 378 bus_dma_tag_destroy(sc->sc_tdmatag); 379 bus_dma_tag_destroy(sc->sc_rdmatag); 380 bus_dma_tag_destroy(sc->sc_pdmatag); 381 } 382 383 void 384 gem_suspend(sc) 385 struct gem_softc *sc; 386 { 387 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 388 389 gem_stop(ifp, 0); 390 } 391 392 void 393 gem_resume(sc) 394 struct gem_softc *sc; 395 { 396 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 397 398 if (ifp->if_flags & IFF_UP) 399 gem_init(ifp); 400 } 401 402 static void 403 gem_cddma_callback(xsc, segs, nsegs, error) 404 void *xsc; 405 bus_dma_segment_t *segs; 406 int nsegs; 407 int error; 408 { 409 struct gem_softc *sc = (struct gem_softc *)xsc; 410 411 if (error != 0) 412 return; 413 if (nsegs != 1) { 414 /* can't happen... */ 415 panic("gem_cddma_callback: bad control buffer segment count"); 416 } 417 sc->sc_cddma = segs[0].ds_addr; 418 } 419 420 static void 421 gem_rxdma_callback(xsc, segs, nsegs, totsz, error) 422 void *xsc; 423 bus_dma_segment_t *segs; 424 int nsegs; 425 bus_size_t totsz; 426 int error; 427 { 428 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 429 430 if (error != 0) 431 return; 432 KASSERT(nsegs == 1, ("gem_rxdma_callback: bad dma segment count")); 433 rxs->rxs_paddr = segs[0].ds_addr; 434 } 435 436 static void 437 gem_txdma_callback(xsc, segs, nsegs, totsz, error) 438 void *xsc; 439 bus_dma_segment_t *segs; 440 int nsegs; 441 bus_size_t totsz; 442 int error; 443 { 444 struct gem_txdma *txd = (struct gem_txdma *)xsc; 445 struct gem_softc *sc = txd->txd_sc; 446 struct gem_txsoft *txs = txd->txd_txs; 447 bus_size_t len = 0; 448 uint64_t flags = 0; 449 int seg, nexttx; 450 451 if (error != 0) 452 return; 453 /* 454 * Ensure we have enough descriptors free to describe 455 * the packet. Note, we always reserve one descriptor 456 * at the end of the ring as a termination point, to 457 * prevent wrap-around. 458 */ 459 if (nsegs > sc->sc_txfree - 1) { 460 txs->txs_ndescs = -1; 461 return; 462 } 463 txs->txs_ndescs = nsegs; 464 465 nexttx = txs->txs_firstdesc; 466 /* 467 * Initialize the transmit descriptors. 468 */ 469 for (seg = 0; seg < nsegs; 470 seg++, nexttx = GEM_NEXTTX(nexttx)) { 471 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 472 "%lx, addr %#lx (%#lx)", seg, nexttx, 473 segs[seg].ds_len, segs[seg].ds_addr, 474 GEM_DMA_WRITE(sc, segs[seg].ds_addr)); 475 476 if (segs[seg].ds_len == 0) 477 continue; 478 sc->sc_txdescs[nexttx].gd_addr = 479 GEM_DMA_WRITE(sc, segs[seg].ds_addr); 480 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, 481 ("gem_txdma_callback: segment size too large!")); 482 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 483 if (len == 0) { 484 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 485 "tx %d", seg, nexttx); 486 flags |= GEM_TD_START_OF_PACKET; 487 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 488 sc->sc_txwin = 0; 489 flags |= GEM_TD_INTERRUPT_ME; 490 } 491 } 492 if (len + segs[seg].ds_len == totsz) { 493 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 494 "tx %d", seg, nexttx); 495 flags |= GEM_TD_END_OF_PACKET; 496 } 497 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); 498 txs->txs_lastdesc = nexttx; 499 len += segs[seg].ds_len; 500 } 501 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, 502 ("gem_txdma_callback: missed end of packet!")); 503 } 504 505 static void 506 gem_tick(arg) 507 void *arg; 508 { 509 struct gem_softc *sc = arg; 510 int s; 511 512 s = splnet(); 513 mii_tick(sc->sc_mii); 514 splx(s); 515 516 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 517 } 518 519 static int 520 gem_bitwait(sc, r, clr, set) 521 struct gem_softc *sc; 522 bus_addr_t r; 523 u_int32_t clr; 524 u_int32_t set; 525 { 526 int i; 527 u_int32_t reg; 528 529 for (i = TRIES; i--; DELAY(100)) { 530 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 531 if ((r & clr) == 0 && (r & set) == set) 532 return (1); 533 } 534 return (0); 535 } 536 537 void 538 gem_reset(sc) 539 struct gem_softc *sc; 540 { 541 bus_space_tag_t t = sc->sc_bustag; 542 bus_space_handle_t h = sc->sc_h; 543 int s; 544 545 s = splnet(); 546 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 547 gem_reset_rx(sc); 548 gem_reset_tx(sc); 549 550 /* Do a full reset */ 551 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 552 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 553 device_printf(sc->sc_dev, "cannot reset device\n"); 554 splx(s); 555 } 556 557 558 /* 559 * gem_rxdrain: 560 * 561 * Drain the receive queue. 562 */ 563 static void 564 gem_rxdrain(sc) 565 struct gem_softc *sc; 566 { 567 struct gem_rxsoft *rxs; 568 int i; 569 570 for (i = 0; i < GEM_NRXDESC; i++) { 571 rxs = &sc->sc_rxsoft[i]; 572 if (rxs->rxs_mbuf != NULL) { 573 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 574 m_freem(rxs->rxs_mbuf); 575 rxs->rxs_mbuf = NULL; 576 } 577 } 578 } 579 580 /* 581 * Reset the whole thing. 582 */ 583 static void 584 gem_stop(ifp, disable) 585 struct ifnet *ifp; 586 int disable; 587 { 588 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 589 struct gem_txsoft *txs; 590 591 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 592 593 callout_stop(&sc->sc_tick_ch); 594 595 /* XXX - Should we reset these instead? */ 596 gem_disable_tx(sc); 597 gem_disable_rx(sc); 598 599 /* 600 * Release any queued transmit buffers. 601 */ 602 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 603 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 604 if (txs->txs_ndescs != 0) { 605 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 606 if (txs->txs_mbuf != NULL) { 607 m_freem(txs->txs_mbuf); 608 txs->txs_mbuf = NULL; 609 } 610 } 611 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 612 } 613 614 if (disable) 615 gem_rxdrain(sc); 616 617 /* 618 * Mark the interface down and cancel the watchdog timer. 619 */ 620 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 621 ifp->if_timer = 0; 622 } 623 624 /* 625 * Reset the receiver 626 */ 627 int 628 gem_reset_rx(sc) 629 struct gem_softc *sc; 630 { 631 bus_space_tag_t t = sc->sc_bustag; 632 bus_space_handle_t h = sc->sc_h; 633 634 /* 635 * Resetting while DMA is in progress can cause a bus hang, so we 636 * disable DMA first. 637 */ 638 gem_disable_rx(sc); 639 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 640 /* Wait till it finishes */ 641 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 642 device_printf(sc->sc_dev, "cannot disable read dma\n"); 643 644 /* Wait 5ms extra. */ 645 DELAY(5000); 646 647 /* Finally, reset the ERX */ 648 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 649 /* Wait till it finishes */ 650 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 651 device_printf(sc->sc_dev, "cannot reset receiver\n"); 652 return (1); 653 } 654 return (0); 655 } 656 657 658 /* 659 * Reset the transmitter 660 */ 661 static int 662 gem_reset_tx(sc) 663 struct gem_softc *sc; 664 { 665 bus_space_tag_t t = sc->sc_bustag; 666 bus_space_handle_t h = sc->sc_h; 667 int i; 668 669 /* 670 * Resetting while DMA is in progress can cause a bus hang, so we 671 * disable DMA first. 672 */ 673 gem_disable_tx(sc); 674 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 675 /* Wait till it finishes */ 676 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 677 device_printf(sc->sc_dev, "cannot disable read dma\n"); 678 679 /* Wait 5ms extra. */ 680 DELAY(5000); 681 682 /* Finally, reset the ETX */ 683 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 684 /* Wait till it finishes */ 685 for (i = TRIES; i--; DELAY(100)) 686 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 687 break; 688 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 689 device_printf(sc->sc_dev, "cannot reset receiver\n"); 690 return (1); 691 } 692 return (0); 693 } 694 695 /* 696 * disable receiver. 697 */ 698 static int 699 gem_disable_rx(sc) 700 struct gem_softc *sc; 701 { 702 bus_space_tag_t t = sc->sc_bustag; 703 bus_space_handle_t h = sc->sc_h; 704 u_int32_t cfg; 705 706 /* Flip the enable bit */ 707 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 708 cfg &= ~GEM_MAC_RX_ENABLE; 709 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 710 711 /* Wait for it to finish */ 712 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 713 } 714 715 /* 716 * disable transmitter. 717 */ 718 static int 719 gem_disable_tx(sc) 720 struct gem_softc *sc; 721 { 722 bus_space_tag_t t = sc->sc_bustag; 723 bus_space_handle_t h = sc->sc_h; 724 u_int32_t cfg; 725 726 /* Flip the enable bit */ 727 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 728 cfg &= ~GEM_MAC_TX_ENABLE; 729 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 730 731 /* Wait for it to finish */ 732 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 733 } 734 735 /* 736 * Initialize interface. 737 */ 738 static int 739 gem_meminit(sc) 740 struct gem_softc *sc; 741 { 742 struct gem_rxsoft *rxs; 743 int i, error; 744 745 /* 746 * Initialize the transmit descriptor ring. 747 */ 748 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 749 for (i = 0; i < GEM_NTXDESC; i++) { 750 sc->sc_txdescs[i].gd_flags = 0; 751 sc->sc_txdescs[i].gd_addr = 0; 752 } 753 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 754 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 755 sc->sc_txfree = GEM_MAXTXFREE; 756 sc->sc_txnext = 0; 757 sc->sc_txwin = 0; 758 759 /* 760 * Initialize the receive descriptor and receive job 761 * descriptor rings. 762 */ 763 for (i = 0; i < GEM_NRXDESC; i++) { 764 rxs = &sc->sc_rxsoft[i]; 765 if (rxs->rxs_mbuf == NULL) { 766 if ((error = gem_add_rxbuf(sc, i)) != 0) { 767 device_printf(sc->sc_dev, "unable to " 768 "allocate or map rx buffer %d, error = " 769 "%d\n", i, error); 770 /* 771 * XXX Should attempt to run with fewer receive 772 * XXX buffers instead of just failing. 773 */ 774 gem_rxdrain(sc); 775 return (1); 776 } 777 } else 778 GEM_INIT_RXDESC(sc, i); 779 } 780 sc->sc_rxptr = 0; 781 782 return (0); 783 } 784 785 static int 786 gem_ringsize(sz) 787 int sz; 788 { 789 int v = 0; 790 791 switch (sz) { 792 case 32: 793 v = GEM_RING_SZ_32; 794 break; 795 case 64: 796 v = GEM_RING_SZ_64; 797 break; 798 case 128: 799 v = GEM_RING_SZ_128; 800 break; 801 case 256: 802 v = GEM_RING_SZ_256; 803 break; 804 case 512: 805 v = GEM_RING_SZ_512; 806 break; 807 case 1024: 808 v = GEM_RING_SZ_1024; 809 break; 810 case 2048: 811 v = GEM_RING_SZ_2048; 812 break; 813 case 4096: 814 v = GEM_RING_SZ_4096; 815 break; 816 case 8192: 817 v = GEM_RING_SZ_8192; 818 break; 819 default: 820 printf("gem: invalid Receive Descriptor ring size\n"); 821 break; 822 } 823 return (v); 824 } 825 826 /* 827 * Initialization of interface; set up initialization block 828 * and transmit/receive descriptor rings. 829 */ 830 static void 831 gem_init(xsc) 832 void *xsc; 833 { 834 struct gem_softc *sc = (struct gem_softc *)xsc; 835 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 836 bus_space_tag_t t = sc->sc_bustag; 837 bus_space_handle_t h = sc->sc_h; 838 int s; 839 u_int32_t v; 840 841 s = splnet(); 842 843 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 844 /* 845 * Initialization sequence. The numbered steps below correspond 846 * to the sequence outlined in section 6.3.5.1 in the Ethernet 847 * Channel Engine manual (part of the PCIO manual). 848 * See also the STP2002-STQ document from Sun Microsystems. 849 */ 850 851 /* step 1 & 2. Reset the Ethernet Channel */ 852 gem_stop(&sc->sc_arpcom.ac_if, 0); 853 gem_reset(sc); 854 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 855 856 /* Re-initialize the MIF */ 857 gem_mifinit(sc); 858 859 /* step 3. Setup data structures in host memory */ 860 gem_meminit(sc); 861 862 /* step 4. TX MAC registers & counters */ 863 gem_init_regs(sc); 864 /* XXX: VLAN code from NetBSD temporarily removed. */ 865 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 866 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 867 868 /* step 5. RX MAC registers & counters */ 869 gem_setladrf(sc); 870 871 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 872 /* NOTE: we use only 32-bit DMA addresses here. */ 873 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 874 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 875 876 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 877 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 878 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 879 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 880 881 /* step 8. Global Configuration & Interrupt Mask */ 882 bus_space_write_4(t, h, GEM_INTMASK, 883 ~(GEM_INTR_TX_INTME| 884 GEM_INTR_TX_EMPTY| 885 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 886 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 887 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 888 GEM_INTR_BERR)); 889 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 890 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 891 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 892 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 893 894 /* step 9. ETX Configuration: use mostly default values */ 895 896 /* Enable DMA */ 897 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 898 bus_space_write_4(t, h, GEM_TX_CONFIG, 899 v|GEM_TX_CONFIG_TXDMA_EN| 900 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 901 902 /* step 10. ERX Configuration */ 903 904 /* Encode Receive Descriptor ring size: four possible values */ 905 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 906 907 /* Enable DMA */ 908 bus_space_write_4(t, h, GEM_RX_CONFIG, 909 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 910 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 911 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 912 /* 913 * The following value is for an OFF Threshold of about 3/4 full 914 * and an ON Threshold of 1/4 full. 915 */ 916 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 917 (3 * sc->sc_rxfifosize / 256) | 918 ( (sc->sc_rxfifosize / 256) << 12)); 919 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 920 921 /* step 11. Configure Media */ 922 mii_mediachg(sc->sc_mii); 923 924 /* step 12. RX_MAC Configuration Register */ 925 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 926 v |= GEM_MAC_RX_ENABLE; 927 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 928 929 /* step 14. Issue Transmit Pending command */ 930 931 /* step 15. Give the reciever a swift kick */ 932 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 933 934 /* Start the one second timer. */ 935 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 936 937 ifp->if_flags |= IFF_RUNNING; 938 ifp->if_flags &= ~IFF_OACTIVE; 939 ifp->if_timer = 0; 940 sc->sc_ifflags = ifp->if_flags; 941 splx(s); 942 } 943 944 static int 945 gem_load_txmbuf(sc, m0) 946 struct gem_softc *sc; 947 struct mbuf *m0; 948 { 949 struct gem_txdma txd; 950 struct gem_txsoft *txs; 951 int error; 952 953 /* Get a work queue entry. */ 954 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 955 /* Ran out of descriptors. */ 956 return (-1); 957 } 958 txd.txd_sc = sc; 959 txd.txd_txs = txs; 960 txs->txs_mbuf = m0; 961 txs->txs_firstdesc = sc->sc_txnext; 962 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, 963 gem_txdma_callback, &txd, BUS_DMA_NOWAIT); 964 if (error != 0) 965 goto fail; 966 if (txs->txs_ndescs == -1) { 967 error = -1; 968 goto fail; 969 } 970 971 /* Sync the DMA map. */ 972 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 973 BUS_DMASYNC_PREWRITE); 974 975 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 976 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 977 txs->txs_ndescs); 978 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 979 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 980 981 /* Sync the descriptors we're using. */ 982 GEM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndescs, 983 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 984 985 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 986 sc->sc_txfree -= txs->txs_ndescs; 987 return (0); 988 989 fail: 990 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); 991 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 992 return (error); 993 } 994 995 static void 996 gem_init_regs(sc) 997 struct gem_softc *sc; 998 { 999 bus_space_tag_t t = sc->sc_bustag; 1000 bus_space_handle_t h = sc->sc_h; 1001 const u_char *laddr = sc->sc_arpcom.ac_enaddr; 1002 u_int32_t v; 1003 1004 /* These regs are not cleared on reset */ 1005 if (!sc->sc_inited) { 1006 1007 /* Wooo. Magic values. */ 1008 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1009 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1010 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1011 1012 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1013 /* Max frame and max burst size */ 1014 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1015 ETHER_MAX_LEN | (0x2000<<16)); 1016 1017 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1018 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1019 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1020 /* Dunno.... */ 1021 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1022 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1023 ((laddr[5]<<8)|laddr[4])&0x3ff); 1024 1025 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1026 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1027 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1028 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1029 1030 /* MAC control addr set to 01:80:c2:00:00:01 */ 1031 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1032 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1033 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1034 1035 /* MAC filter addr set to 0:0:0:0:0:0 */ 1036 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1037 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1038 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1039 1040 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1041 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1042 1043 sc->sc_inited = 1; 1044 } 1045 1046 /* Counters need to be zeroed */ 1047 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1048 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1049 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1050 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1051 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1052 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1053 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1054 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1055 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1056 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1057 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1058 1059 /* Un-pause stuff */ 1060 #if 0 1061 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1062 #else 1063 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1064 #endif 1065 1066 /* 1067 * Set the station address. 1068 */ 1069 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1070 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1071 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1072 1073 /* 1074 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1075 */ 1076 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1077 v = GEM_MAC_XIF_TX_MII_ENA; 1078 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1079 v |= GEM_MAC_XIF_FDPLX_LED; 1080 if (sc->sc_flags & GEM_GIGABIT) 1081 v |= GEM_MAC_XIF_GMII_MODE; 1082 } 1083 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1084 } 1085 1086 static void 1087 gem_start(ifp) 1088 struct ifnet *ifp; 1089 { 1090 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1091 struct mbuf *m0 = NULL; 1092 int firsttx, ntx, ofree, txmfail; 1093 1094 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1095 return; 1096 1097 /* 1098 * Remember the previous number of free descriptors and 1099 * the first descriptor we'll use. 1100 */ 1101 ofree = sc->sc_txfree; 1102 firsttx = sc->sc_txnext; 1103 1104 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1105 device_get_name(sc->sc_dev), ofree, firsttx); 1106 1107 /* 1108 * Loop through the send queue, setting up transmit descriptors 1109 * until we drain the queue, or use up all available transmit 1110 * descriptors. 1111 */ 1112 txmfail = 0; 1113 for (ntx = 0;; ntx++) { 1114 /* 1115 * Grab a packet off the queue. 1116 */ 1117 IF_DEQUEUE(&ifp->if_snd, m0); 1118 if (m0 == NULL) 1119 break; 1120 1121 txmfail = gem_load_txmbuf(sc, m0); 1122 if (txmfail > 0) { 1123 /* Drop the mbuf and complain. */ 1124 printf("gem_start: error %d while loading mbuf dma " 1125 "map\n", txmfail); 1126 continue; 1127 } 1128 /* Not enough descriptors. */ 1129 if (txmfail == -1) { 1130 if (sc->sc_txfree == GEM_MAXTXFREE) 1131 panic("gem_start: mbuf chain too long!"); 1132 IF_PREPEND(&ifp->if_snd, m0); 1133 break; 1134 } 1135 1136 /* Kick the transmitter. */ 1137 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", 1138 device_get_name(sc->sc_dev), sc->sc_txnext); 1139 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1140 sc->sc_txnext); 1141 1142 if (ifp->if_bpf != NULL) 1143 bpf_mtap(ifp->if_bpf, m0); 1144 } 1145 1146 if (txmfail == -1 || sc->sc_txfree == 0) { 1147 /* No more slots left; notify upper layer. */ 1148 ifp->if_flags |= IFF_OACTIVE; 1149 } 1150 1151 if (ntx > 0) { 1152 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1153 device_get_name(sc->sc_dev), firsttx); 1154 1155 /* Set a watchdog timer in case the chip flakes out. */ 1156 ifp->if_timer = 5; 1157 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1158 device_get_name(sc->sc_dev), ifp->if_timer); 1159 } 1160 } 1161 1162 /* 1163 * Transmit interrupt. 1164 */ 1165 static void 1166 gem_tint(sc) 1167 struct gem_softc *sc; 1168 { 1169 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1170 bus_space_tag_t t = sc->sc_bustag; 1171 bus_space_handle_t mac = sc->sc_h; 1172 struct gem_txsoft *txs; 1173 int txlast; 1174 int progress = 0; 1175 1176 1177 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1178 1179 /* 1180 * Unload collision counters 1181 */ 1182 ifp->if_collisions += 1183 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1184 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1185 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1186 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1187 1188 /* 1189 * then clear the hardware counters. 1190 */ 1191 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1192 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1193 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1194 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1195 1196 /* 1197 * Go through our Tx list and free mbufs for those 1198 * frames that have been transmitted. 1199 */ 1200 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1201 GEM_CDTXSYNC(sc, txs->txs_lastdesc, 1202 txs->txs_ndescs, 1203 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1204 1205 #ifdef GEM_DEBUG 1206 if (ifp->if_flags & IFF_DEBUG) { 1207 int i; 1208 printf(" txsoft %p transmit chain:\n", txs); 1209 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1210 printf("descriptor %d: ", i); 1211 printf("gd_flags: 0x%016llx\t", (long long) 1212 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1213 printf("gd_addr: 0x%016llx\n", (long long) 1214 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1215 if (i == txs->txs_lastdesc) 1216 break; 1217 } 1218 } 1219 #endif 1220 1221 /* 1222 * In theory, we could harveast some descriptors before 1223 * the ring is empty, but that's a bit complicated. 1224 * 1225 * GEM_TX_COMPLETION points to the last descriptor 1226 * processed +1. 1227 */ 1228 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1229 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1230 "txs->txs_lastdesc = %d, txlast = %d", 1231 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1232 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1233 if ((txlast >= txs->txs_firstdesc) && 1234 (txlast <= txs->txs_lastdesc)) 1235 break; 1236 } else { 1237 /* Ick -- this command wraps */ 1238 if ((txlast >= txs->txs_firstdesc) || 1239 (txlast <= txs->txs_lastdesc)) 1240 break; 1241 } 1242 1243 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1244 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1245 1246 sc->sc_txfree += txs->txs_ndescs; 1247 1248 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1249 BUS_DMASYNC_POSTWRITE); 1250 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1251 if (txs->txs_mbuf != NULL) { 1252 m_freem(txs->txs_mbuf); 1253 txs->txs_mbuf = NULL; 1254 } 1255 1256 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1257 1258 ifp->if_opackets++; 1259 progress = 1; 1260 } 1261 1262 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1263 "GEM_TX_DATA_PTR %llx " 1264 "GEM_TX_COMPLETION %x", 1265 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1266 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1267 GEM_TX_DATA_PTR_HI) << 32) | 1268 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1269 GEM_TX_DATA_PTR_LO), 1270 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1271 1272 if (progress) { 1273 if (sc->sc_txfree == GEM_NTXDESC - 1) 1274 sc->sc_txwin = 0; 1275 1276 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1277 ifp->if_flags &= ~IFF_OACTIVE; 1278 gem_start(ifp); 1279 1280 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1281 ifp->if_timer = 0; 1282 } 1283 1284 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1285 device_get_name(sc->sc_dev), ifp->if_timer); 1286 } 1287 1288 #if 0 1289 static void 1290 gem_rint_timeout(arg) 1291 void *arg; 1292 { 1293 1294 gem_rint((struct gem_softc *)arg); 1295 } 1296 #endif 1297 1298 /* 1299 * Receive interrupt. 1300 */ 1301 static void 1302 gem_rint(sc) 1303 struct gem_softc *sc; 1304 { 1305 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1306 bus_space_tag_t t = sc->sc_bustag; 1307 bus_space_handle_t h = sc->sc_h; 1308 struct gem_rxsoft *rxs; 1309 struct mbuf *m; 1310 u_int64_t rxstat; 1311 u_int32_t rxcomp; 1312 int i, len, progress = 0; 1313 1314 callout_stop(&sc->sc_rx_ch); 1315 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1316 1317 /* 1318 * Read the completion register once. This limits 1319 * how long the following loop can execute. 1320 */ 1321 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1322 1323 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1324 sc->sc_rxptr, rxcomp); 1325 for (i = sc->sc_rxptr; i != rxcomp; 1326 i = GEM_NEXTRX(i)) { 1327 rxs = &sc->sc_rxsoft[i]; 1328 1329 GEM_CDRXSYNC(sc, i, 1330 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1331 1332 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1333 1334 if (rxstat & GEM_RD_OWN) { 1335 #if 0 /* XXX: In case of emergency, re-enable this. */ 1336 /* 1337 * The descriptor is still marked as owned, although 1338 * it is supposed to have completed. This has been 1339 * observed on some machines. Just exiting here 1340 * might leave the packet sitting around until another 1341 * one arrives to trigger a new interrupt, which is 1342 * generally undesirable, so set up a timeout. 1343 */ 1344 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1345 gem_rint_timeout, sc); 1346 #endif 1347 break; 1348 } 1349 1350 progress++; 1351 ifp->if_ipackets++; 1352 1353 if (rxstat & GEM_RD_BAD_CRC) { 1354 ifp->if_ierrors++; 1355 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1356 GEM_INIT_RXDESC(sc, i); 1357 continue; 1358 } 1359 1360 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1361 BUS_DMASYNC_POSTREAD); 1362 #ifdef GEM_DEBUG 1363 if (ifp->if_flags & IFF_DEBUG) { 1364 printf(" rxsoft %p descriptor %d: ", rxs, i); 1365 printf("gd_flags: 0x%016llx\t", (long long) 1366 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1367 printf("gd_addr: 0x%016llx\n", (long long) 1368 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1369 } 1370 #endif 1371 1372 /* 1373 * No errors; receive the packet. Note the Gem 1374 * includes the CRC with every packet. 1375 */ 1376 len = GEM_RD_BUFLEN(rxstat); 1377 1378 /* 1379 * Allocate a new mbuf cluster. If that fails, we are 1380 * out of memory, and must drop the packet and recycle 1381 * the buffer that's already attached to this descriptor. 1382 */ 1383 m = rxs->rxs_mbuf; 1384 if (gem_add_rxbuf(sc, i) != 0) { 1385 ifp->if_ierrors++; 1386 GEM_INIT_RXDESC(sc, i); 1387 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1388 BUS_DMASYNC_PREREAD); 1389 continue; 1390 } 1391 m->m_data += 2; /* We're already off by two */ 1392 1393 m->m_pkthdr.rcvif = ifp; 1394 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1395 1396 /* Pass it on. */ 1397 (*ifp->if_input)(ifp, m); 1398 } 1399 1400 if (progress) { 1401 /* Update the receive pointer. */ 1402 if (i == sc->sc_rxptr) { 1403 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1404 } 1405 sc->sc_rxptr = i; 1406 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1407 } 1408 1409 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1410 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1411 } 1412 1413 1414 /* 1415 * gem_add_rxbuf: 1416 * 1417 * Add a receive buffer to the indicated descriptor. 1418 */ 1419 static int 1420 gem_add_rxbuf(sc, idx) 1421 struct gem_softc *sc; 1422 int idx; 1423 { 1424 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1425 struct mbuf *m; 1426 int error; 1427 1428 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1429 if (m == NULL) 1430 return (ENOBUFS); 1431 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1432 1433 #ifdef GEM_DEBUG 1434 /* bzero the packet to check dma */ 1435 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1436 #endif 1437 1438 if (rxs->rxs_mbuf != NULL) 1439 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1440 1441 rxs->rxs_mbuf = m; 1442 1443 error = bus_dmamap_load_mbuf(sc->sc_rdmatag, rxs->rxs_dmamap, 1444 m, gem_rxdma_callback, rxs, BUS_DMA_NOWAIT); 1445 if (error != 0 || rxs->rxs_paddr == 0) { 1446 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1447 "%d\n", idx, error); 1448 panic("gem_add_rxbuf"); /* XXX */ 1449 } 1450 1451 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1452 1453 GEM_INIT_RXDESC(sc, idx); 1454 1455 return (0); 1456 } 1457 1458 1459 static void 1460 gem_eint(sc, status) 1461 struct gem_softc *sc; 1462 u_int status; 1463 { 1464 1465 if ((status & GEM_INTR_MIF) != 0) { 1466 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1467 return; 1468 } 1469 1470 device_printf(sc->sc_dev, "status=%x\n", status); 1471 } 1472 1473 1474 void 1475 gem_intr(v) 1476 void *v; 1477 { 1478 struct gem_softc *sc = (struct gem_softc *)v; 1479 bus_space_tag_t t = sc->sc_bustag; 1480 bus_space_handle_t seb = sc->sc_h; 1481 u_int32_t status; 1482 1483 status = bus_space_read_4(t, seb, GEM_STATUS); 1484 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1485 device_get_name(sc->sc_dev), (status>>19), 1486 (u_int)status); 1487 1488 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1489 gem_eint(sc, status); 1490 1491 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1492 gem_tint(sc); 1493 1494 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1495 gem_rint(sc); 1496 1497 /* We should eventually do more than just print out error stats. */ 1498 if (status & GEM_INTR_TX_MAC) { 1499 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1500 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1501 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1502 txstat); 1503 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1504 gem_init(sc); 1505 } 1506 if (status & GEM_INTR_RX_MAC) { 1507 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1508 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1509 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1510 rxstat); 1511 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1512 gem_init(sc); 1513 } 1514 } 1515 1516 1517 static void 1518 gem_watchdog(ifp) 1519 struct ifnet *ifp; 1520 { 1521 struct gem_softc *sc = ifp->if_softc; 1522 1523 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1524 "GEM_MAC_RX_CONFIG %x", 1525 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1526 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1527 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1528 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1529 "GEM_MAC_TX_CONFIG %x", 1530 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1531 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1532 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1533 1534 device_printf(sc->sc_dev, "device timeout\n"); 1535 ++ifp->if_oerrors; 1536 1537 /* Try to get more packets going. */ 1538 gem_start(ifp); 1539 } 1540 1541 /* 1542 * Initialize the MII Management Interface 1543 */ 1544 static void 1545 gem_mifinit(sc) 1546 struct gem_softc *sc; 1547 { 1548 bus_space_tag_t t = sc->sc_bustag; 1549 bus_space_handle_t mif = sc->sc_h; 1550 1551 /* Configure the MIF in frame mode */ 1552 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1553 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1554 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1555 } 1556 1557 /* 1558 * MII interface 1559 * 1560 * The GEM MII interface supports at least three different operating modes: 1561 * 1562 * Bitbang mode is implemented using data, clock and output enable registers. 1563 * 1564 * Frame mode is implemented by loading a complete frame into the frame 1565 * register and polling the valid bit for completion. 1566 * 1567 * Polling mode uses the frame register but completion is indicated by 1568 * an interrupt. 1569 * 1570 */ 1571 int 1572 gem_mii_readreg(dev, phy, reg) 1573 device_t dev; 1574 int phy, reg; 1575 { 1576 struct gem_softc *sc = device_get_softc(dev); 1577 bus_space_tag_t t = sc->sc_bustag; 1578 bus_space_handle_t mif = sc->sc_h; 1579 int n; 1580 u_int32_t v; 1581 1582 #ifdef GEM_DEBUG_PHY 1583 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1584 #endif 1585 1586 #if 0 1587 /* Select the desired PHY in the MIF configuration register */ 1588 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1589 /* Clear PHY select bit */ 1590 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1591 if (phy == GEM_PHYAD_EXTERNAL) 1592 /* Set PHY select bit to get at external device */ 1593 v |= GEM_MIF_CONFIG_PHY_SEL; 1594 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1595 #endif 1596 1597 /* Construct the frame command */ 1598 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1599 GEM_MIF_FRAME_READ; 1600 1601 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1602 for (n = 0; n < 100; n++) { 1603 DELAY(1); 1604 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1605 if (v & GEM_MIF_FRAME_TA0) 1606 return (v & GEM_MIF_FRAME_DATA); 1607 } 1608 1609 device_printf(sc->sc_dev, "mii_read timeout\n"); 1610 return (0); 1611 } 1612 1613 int 1614 gem_mii_writereg(dev, phy, reg, val) 1615 device_t dev; 1616 int phy, reg, val; 1617 { 1618 struct gem_softc *sc = device_get_softc(dev); 1619 bus_space_tag_t t = sc->sc_bustag; 1620 bus_space_handle_t mif = sc->sc_h; 1621 int n; 1622 u_int32_t v; 1623 1624 #ifdef GEM_DEBUG_PHY 1625 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1626 #endif 1627 1628 #if 0 1629 /* Select the desired PHY in the MIF configuration register */ 1630 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1631 /* Clear PHY select bit */ 1632 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1633 if (phy == GEM_PHYAD_EXTERNAL) 1634 /* Set PHY select bit to get at external device */ 1635 v |= GEM_MIF_CONFIG_PHY_SEL; 1636 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1637 #endif 1638 /* Construct the frame command */ 1639 v = GEM_MIF_FRAME_WRITE | 1640 (phy << GEM_MIF_PHY_SHIFT) | 1641 (reg << GEM_MIF_REG_SHIFT) | 1642 (val & GEM_MIF_FRAME_DATA); 1643 1644 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1645 for (n = 0; n < 100; n++) { 1646 DELAY(1); 1647 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1648 if (v & GEM_MIF_FRAME_TA0) 1649 return (1); 1650 } 1651 1652 device_printf(sc->sc_dev, "mii_write timeout\n"); 1653 return (0); 1654 } 1655 1656 void 1657 gem_mii_statchg(dev) 1658 device_t dev; 1659 { 1660 struct gem_softc *sc = device_get_softc(dev); 1661 #ifdef GEM_DEBUG 1662 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1663 #endif 1664 bus_space_tag_t t = sc->sc_bustag; 1665 bus_space_handle_t mac = sc->sc_h; 1666 u_int32_t v; 1667 1668 #ifdef GEM_DEBUG 1669 if (sc->sc_debug) 1670 printf("gem_mii_statchg: status change: phy = %d\n", 1671 sc->sc_phys[instance]); 1672 #endif 1673 1674 /* Set tx full duplex options */ 1675 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1676 DELAY(10000); /* reg must be cleared and delay before changing. */ 1677 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1678 GEM_MAC_TX_ENABLE; 1679 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1680 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1681 } 1682 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1683 1684 /* XIF Configuration */ 1685 /* We should really calculate all this rather than rely on defaults */ 1686 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1687 v = GEM_MAC_XIF_LINK_LED; 1688 v |= GEM_MAC_XIF_TX_MII_ENA; 1689 1690 /* If an external transceiver is connected, enable its MII drivers */ 1691 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1692 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1693 /* External MII needs echo disable if half duplex. */ 1694 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1695 /* turn on full duplex LED */ 1696 v |= GEM_MAC_XIF_FDPLX_LED; 1697 else 1698 /* half duplex -- disable echo */ 1699 v |= GEM_MAC_XIF_ECHO_DISABL; 1700 1701 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1702 v |= GEM_MAC_XIF_GMII_MODE; 1703 else 1704 v &= ~GEM_MAC_XIF_GMII_MODE; 1705 } else { 1706 /* Internal MII needs buf enable */ 1707 v |= GEM_MAC_XIF_MII_BUF_ENA; 1708 } 1709 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1710 } 1711 1712 int 1713 gem_mediachange(ifp) 1714 struct ifnet *ifp; 1715 { 1716 struct gem_softc *sc = ifp->if_softc; 1717 1718 /* XXX Add support for serial media. */ 1719 1720 return (mii_mediachg(sc->sc_mii)); 1721 } 1722 1723 void 1724 gem_mediastatus(ifp, ifmr) 1725 struct ifnet *ifp; 1726 struct ifmediareq *ifmr; 1727 { 1728 struct gem_softc *sc = ifp->if_softc; 1729 1730 if ((ifp->if_flags & IFF_UP) == 0) 1731 return; 1732 1733 mii_pollstat(sc->sc_mii); 1734 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1735 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1736 } 1737 1738 /* 1739 * Process an ioctl request. 1740 */ 1741 static int 1742 gem_ioctl(ifp, cmd, data) 1743 struct ifnet *ifp; 1744 u_long cmd; 1745 caddr_t data; 1746 { 1747 struct gem_softc *sc = ifp->if_softc; 1748 struct ifreq *ifr = (struct ifreq *)data; 1749 int s, error = 0; 1750 1751 switch (cmd) { 1752 case SIOCSIFADDR: 1753 case SIOCGIFADDR: 1754 case SIOCSIFMTU: 1755 error = ether_ioctl(ifp, cmd, data); 1756 break; 1757 case SIOCSIFFLAGS: 1758 if (ifp->if_flags & IFF_UP) { 1759 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1760 gem_setladrf(sc); 1761 else 1762 gem_init(sc); 1763 } else { 1764 if (ifp->if_flags & IFF_RUNNING) 1765 gem_stop(ifp, 0); 1766 } 1767 sc->sc_ifflags = ifp->if_flags; 1768 error = 0; 1769 break; 1770 case SIOCADDMULTI: 1771 case SIOCDELMULTI: 1772 gem_setladrf(sc); 1773 error = 0; 1774 break; 1775 case SIOCGIFMEDIA: 1776 case SIOCSIFMEDIA: 1777 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1778 break; 1779 default: 1780 error = ENOTTY; 1781 break; 1782 } 1783 1784 /* Try to get things going again */ 1785 if (ifp->if_flags & IFF_UP) 1786 gem_start(ifp); 1787 splx(s); 1788 return (error); 1789 } 1790 1791 /* 1792 * Set up the logical address filter. 1793 */ 1794 static void 1795 gem_setladrf(sc) 1796 struct gem_softc *sc; 1797 { 1798 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1799 struct ifmultiaddr *inm; 1800 struct sockaddr_dl *sdl; 1801 bus_space_tag_t t = sc->sc_bustag; 1802 bus_space_handle_t h = sc->sc_h; 1803 u_char *cp; 1804 u_int32_t crc; 1805 u_int32_t hash[16]; 1806 u_int32_t v; 1807 int len; 1808 int i; 1809 1810 /* Get current RX configuration */ 1811 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1812 1813 /* 1814 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1815 * and hash filter. Depending on the case, the right bit will be 1816 * enabled. 1817 */ 1818 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1819 GEM_MAC_RX_PROMISC_GRP); 1820 1821 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1822 /* Turn on promiscuous mode */ 1823 v |= GEM_MAC_RX_PROMISCUOUS; 1824 goto chipit; 1825 } 1826 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1827 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1828 ifp->if_flags |= IFF_ALLMULTI; 1829 v |= GEM_MAC_RX_PROMISC_GRP; 1830 goto chipit; 1831 } 1832 1833 /* 1834 * Set up multicast address filter by passing all multicast addresses 1835 * through a crc generator, and then using the high order 8 bits as an 1836 * index into the 256 bit logical address filter. The high order 4 1837 * bits selects the word, while the other 4 bits select the bit within 1838 * the word (where bit 0 is the MSB). 1839 */ 1840 1841 /* Clear hash table */ 1842 memset(hash, 0, sizeof(hash)); 1843 1844 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1845 if (inm->ifma_addr->sa_family != AF_LINK) 1846 continue; 1847 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1848 cp = LLADDR(sdl); 1849 crc = 0xffffffff; 1850 for (len = sdl->sdl_alen; --len >= 0;) { 1851 int octet = *cp++; 1852 int i; 1853 1854 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1855 for (i = 0; i < 8; i++) { 1856 if ((crc & 1) ^ (octet & 1)) { 1857 crc >>= 1; 1858 crc ^= MC_POLY_LE; 1859 } else { 1860 crc >>= 1; 1861 } 1862 octet >>= 1; 1863 } 1864 } 1865 /* Just want the 8 most significant bits. */ 1866 crc >>= 24; 1867 1868 /* Set the corresponding bit in the filter. */ 1869 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1870 } 1871 1872 v |= GEM_MAC_RX_HASH_FILTER; 1873 ifp->if_flags &= ~IFF_ALLMULTI; 1874 1875 /* Now load the hash table into the chip (if we are using it) */ 1876 for (i = 0; i < 16; i++) { 1877 bus_space_write_4(t, h, 1878 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1879 hash[i]); 1880 } 1881 1882 chipit: 1883 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1884 } 1885