1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37 #if 0 38 #define GEM_DEBUG 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/bus.h> 44 #include <sys/callout.h> 45 #include <sys/endian.h> 46 #include <sys/mbuf.h> 47 #include <sys/malloc.h> 48 #include <sys/kernel.h> 49 #include <sys/module.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 53 #include <net/bpf.h> 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_arp.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 60 #include <machine/bus.h> 61 62 #include <dev/mii/mii.h> 63 #include <dev/mii/miivar.h> 64 65 #include <dev/gem/if_gemreg.h> 66 #include <dev/gem/if_gemvar.h> 67 68 #define TRIES 10000 69 70 static void gem_start(struct ifnet *); 71 static void gem_stop(struct ifnet *, int); 72 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 73 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 74 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, 75 bus_size_t, int); 76 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, 77 bus_size_t, int); 78 static void gem_tick(void *); 79 static void gem_watchdog(struct ifnet *); 80 static void gem_init(void *); 81 static void gem_init_regs(struct gem_softc *sc); 82 static int gem_ringsize(int sz); 83 static int gem_meminit(struct gem_softc *); 84 static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); 85 static void gem_mifinit(struct gem_softc *); 86 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 87 u_int32_t clr, u_int32_t set); 88 static int gem_reset_rx(struct gem_softc *); 89 static int gem_reset_tx(struct gem_softc *); 90 static int gem_disable_rx(struct gem_softc *); 91 static int gem_disable_tx(struct gem_softc *); 92 static void gem_rxdrain(struct gem_softc *); 93 static int gem_add_rxbuf(struct gem_softc *, int); 94 static void gem_setladrf(struct gem_softc *); 95 96 struct mbuf *gem_get(struct gem_softc *, int, int); 97 static void gem_eint(struct gem_softc *, u_int); 98 static void gem_rint(struct gem_softc *); 99 #if 0 100 static void gem_rint_timeout(void *); 101 #endif 102 static void gem_tint(struct gem_softc *); 103 #ifdef notyet 104 static void gem_power(int, void *); 105 #endif 106 107 devclass_t gem_devclass; 108 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 109 MODULE_DEPEND(gem, miibus, 1, 1, 1); 110 111 #ifdef GEM_DEBUG 112 #include <sys/ktr.h> 113 #define KTR_GEM KTR_CT2 114 #endif 115 116 #define GEM_NSEGS GEM_NTXDESC 117 118 /* 119 * gem_attach: 120 * 121 * Attach a Gem interface to the system. 122 */ 123 int 124 gem_attach(sc) 125 struct gem_softc *sc; 126 { 127 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 128 struct mii_softc *child; 129 int i, error; 130 u_int32_t v; 131 132 /* Make sure the chip is stopped. */ 133 ifp->if_softc = sc; 134 gem_reset(sc); 135 136 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 137 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 138 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); 139 if (error) 140 return (error); 141 142 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 143 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 144 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, 145 &sc->sc_rdmatag); 146 if (error) 147 goto fail_ptag; 148 149 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 150 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 151 GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, 152 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 153 if (error) 154 goto fail_rtag; 155 156 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 157 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 158 sizeof(struct gem_control_data), 1, 159 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 160 busdma_lock_mutex, &Giant, &sc->sc_cdmatag); 161 if (error) 162 goto fail_ttag; 163 164 /* 165 * Allocate the control data structures, and create and load the 166 * DMA map for it. 167 */ 168 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 169 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 170 device_printf(sc->sc_dev, "unable to allocate control data," 171 " error = %d\n", error); 172 goto fail_ctag; 173 } 174 175 sc->sc_cddma = 0; 176 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 177 sc->sc_control_data, sizeof(struct gem_control_data), 178 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 179 device_printf(sc->sc_dev, "unable to load control data DMA " 180 "map, error = %d\n", error); 181 goto fail_cmem; 182 } 183 184 /* 185 * Initialize the transmit job descriptors. 186 */ 187 STAILQ_INIT(&sc->sc_txfreeq); 188 STAILQ_INIT(&sc->sc_txdirtyq); 189 190 /* 191 * Create the transmit buffer DMA maps. 192 */ 193 error = ENOMEM; 194 for (i = 0; i < GEM_TXQUEUELEN; i++) { 195 struct gem_txsoft *txs; 196 197 txs = &sc->sc_txsoft[i]; 198 txs->txs_mbuf = NULL; 199 txs->txs_ndescs = 0; 200 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 201 &txs->txs_dmamap)) != 0) { 202 device_printf(sc->sc_dev, "unable to create tx DMA map " 203 "%d, error = %d\n", i, error); 204 goto fail_txd; 205 } 206 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 207 } 208 209 /* 210 * Create the receive buffer DMA maps. 211 */ 212 for (i = 0; i < GEM_NRXDESC; i++) { 213 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 214 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 215 device_printf(sc->sc_dev, "unable to create rx DMA map " 216 "%d, error = %d\n", i, error); 217 goto fail_rxd; 218 } 219 sc->sc_rxsoft[i].rxs_mbuf = NULL; 220 } 221 222 223 gem_mifinit(sc); 224 225 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 226 gem_mediastatus)) != 0) { 227 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 228 goto fail_rxd; 229 } 230 sc->sc_mii = device_get_softc(sc->sc_miibus); 231 232 /* 233 * From this point forward, the attachment cannot fail. A failure 234 * before this point releases all resources that may have been 235 * allocated. 236 */ 237 238 /* Get RX FIFO size */ 239 sc->sc_rxfifosize = 64 * 240 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 241 242 /* Get TX FIFO size */ 243 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 244 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 245 sc->sc_rxfifosize / 1024, v / 16); 246 247 /* Initialize ifnet structure. */ 248 ifp->if_softc = sc; 249 if_initname(ifp, device_get_name(sc->sc_dev), 250 device_get_unit(sc->sc_dev)); 251 ifp->if_mtu = ETHERMTU; 252 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 253 ifp->if_start = gem_start; 254 ifp->if_ioctl = gem_ioctl; 255 ifp->if_watchdog = gem_watchdog; 256 ifp->if_init = gem_init; 257 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 258 /* 259 * Walk along the list of attached MII devices and 260 * establish an `MII instance' to `phy number' 261 * mapping. We'll use this mapping in media change 262 * requests to determine which phy to use to program 263 * the MIF configuration register. 264 */ 265 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 266 child = LIST_NEXT(child, mii_list)) { 267 /* 268 * Note: we support just two PHYs: the built-in 269 * internal device and an external on the MII 270 * connector. 271 */ 272 if (child->mii_phy > 1 || child->mii_inst > 1) { 273 device_printf(sc->sc_dev, "cannot accomodate " 274 "MII device %s at phy %d, instance %d\n", 275 device_get_name(child->mii_dev), 276 child->mii_phy, child->mii_inst); 277 continue; 278 } 279 280 sc->sc_phys[child->mii_inst] = child->mii_phy; 281 } 282 283 /* 284 * Now select and activate the PHY we will use. 285 * 286 * The order of preference is External (MDI1), 287 * Internal (MDI0), Serial Link (no MII). 288 */ 289 if (sc->sc_phys[1]) { 290 #ifdef GEM_DEBUG 291 printf("using external phy\n"); 292 #endif 293 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 294 } else { 295 #ifdef GEM_DEBUG 296 printf("using internal phy\n"); 297 #endif 298 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 299 } 300 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 301 sc->sc_mif_config); 302 /* Attach the interface. */ 303 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 304 305 #if notyet 306 /* 307 * Add a suspend hook to make sure we come back up after a 308 * resume. 309 */ 310 sc->sc_powerhook = powerhook_establish(gem_power, sc); 311 if (sc->sc_powerhook == NULL) 312 device_printf(sc->sc_dev, "WARNING: unable to establish power " 313 "hook\n"); 314 #endif 315 316 callout_init(&sc->sc_tick_ch, 0); 317 callout_init(&sc->sc_rx_ch, 0); 318 return (0); 319 320 /* 321 * Free any resources we've allocated during the failed attach 322 * attempt. Do this in reverse order and fall through. 323 */ 324 fail_rxd: 325 for (i = 0; i < GEM_NRXDESC; i++) { 326 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 327 bus_dmamap_destroy(sc->sc_rdmatag, 328 sc->sc_rxsoft[i].rxs_dmamap); 329 } 330 fail_txd: 331 for (i = 0; i < GEM_TXQUEUELEN; i++) { 332 if (sc->sc_txsoft[i].txs_dmamap != NULL) 333 bus_dmamap_destroy(sc->sc_tdmatag, 334 sc->sc_txsoft[i].txs_dmamap); 335 } 336 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 337 fail_cmem: 338 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 339 sc->sc_cddmamap); 340 fail_ctag: 341 bus_dma_tag_destroy(sc->sc_cdmatag); 342 fail_ttag: 343 bus_dma_tag_destroy(sc->sc_tdmatag); 344 fail_rtag: 345 bus_dma_tag_destroy(sc->sc_rdmatag); 346 fail_ptag: 347 bus_dma_tag_destroy(sc->sc_pdmatag); 348 return (error); 349 } 350 351 void 352 gem_detach(sc) 353 struct gem_softc *sc; 354 { 355 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 356 int i; 357 358 ether_ifdetach(ifp); 359 gem_stop(ifp, 1); 360 device_delete_child(sc->sc_dev, sc->sc_miibus); 361 362 for (i = 0; i < GEM_NRXDESC; i++) { 363 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 364 bus_dmamap_destroy(sc->sc_rdmatag, 365 sc->sc_rxsoft[i].rxs_dmamap); 366 } 367 for (i = 0; i < GEM_TXQUEUELEN; i++) { 368 if (sc->sc_txsoft[i].txs_dmamap != NULL) 369 bus_dmamap_destroy(sc->sc_tdmatag, 370 sc->sc_txsoft[i].txs_dmamap); 371 } 372 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 373 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 374 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 375 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 376 sc->sc_cddmamap); 377 bus_dma_tag_destroy(sc->sc_cdmatag); 378 bus_dma_tag_destroy(sc->sc_tdmatag); 379 bus_dma_tag_destroy(sc->sc_rdmatag); 380 bus_dma_tag_destroy(sc->sc_pdmatag); 381 } 382 383 void 384 gem_suspend(sc) 385 struct gem_softc *sc; 386 { 387 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 388 389 gem_stop(ifp, 0); 390 } 391 392 void 393 gem_resume(sc) 394 struct gem_softc *sc; 395 { 396 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 397 398 if (ifp->if_flags & IFF_UP) 399 gem_init(ifp); 400 } 401 402 static void 403 gem_cddma_callback(xsc, segs, nsegs, error) 404 void *xsc; 405 bus_dma_segment_t *segs; 406 int nsegs; 407 int error; 408 { 409 struct gem_softc *sc = (struct gem_softc *)xsc; 410 411 if (error != 0) 412 return; 413 if (nsegs != 1) { 414 /* can't happen... */ 415 panic("gem_cddma_callback: bad control buffer segment count"); 416 } 417 sc->sc_cddma = segs[0].ds_addr; 418 } 419 420 static void 421 gem_rxdma_callback(xsc, segs, nsegs, totsz, error) 422 void *xsc; 423 bus_dma_segment_t *segs; 424 int nsegs; 425 bus_size_t totsz; 426 int error; 427 { 428 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 429 430 if (error != 0) 431 return; 432 KASSERT(nsegs == 1, ("gem_rxdma_callback: bad dma segment count")); 433 rxs->rxs_paddr = segs[0].ds_addr; 434 } 435 436 static void 437 gem_txdma_callback(xsc, segs, nsegs, totsz, error) 438 void *xsc; 439 bus_dma_segment_t *segs; 440 int nsegs; 441 bus_size_t totsz; 442 int error; 443 { 444 struct gem_txdma *txd = (struct gem_txdma *)xsc; 445 struct gem_softc *sc = txd->txd_sc; 446 struct gem_txsoft *txs = txd->txd_txs; 447 bus_size_t len = 0; 448 uint64_t flags = 0; 449 int seg, nexttx; 450 451 if (error != 0) 452 return; 453 /* 454 * Ensure we have enough descriptors free to describe 455 * the packet. Note, we always reserve one descriptor 456 * at the end of the ring as a termination point, to 457 * prevent wrap-around. 458 */ 459 if (nsegs > sc->sc_txfree - 1) { 460 txs->txs_ndescs = -1; 461 return; 462 } 463 txs->txs_ndescs = nsegs; 464 465 nexttx = txs->txs_firstdesc; 466 /* 467 * Initialize the transmit descriptors. 468 */ 469 for (seg = 0; seg < nsegs; 470 seg++, nexttx = GEM_NEXTTX(nexttx)) { 471 #ifdef GEM_DEBUG 472 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 473 "%lx, addr %#lx (%#lx)", seg, nexttx, 474 segs[seg].ds_len, segs[seg].ds_addr, 475 GEM_DMA_WRITE(sc, segs[seg].ds_addr)); 476 #endif 477 478 if (segs[seg].ds_len == 0) 479 continue; 480 sc->sc_txdescs[nexttx].gd_addr = 481 GEM_DMA_WRITE(sc, segs[seg].ds_addr); 482 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, 483 ("gem_txdma_callback: segment size too large!")); 484 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 485 if (len == 0) { 486 #ifdef GEM_DEBUG 487 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 488 "tx %d", seg, nexttx); 489 #endif 490 flags |= GEM_TD_START_OF_PACKET; 491 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 492 sc->sc_txwin = 0; 493 flags |= GEM_TD_INTERRUPT_ME; 494 } 495 } 496 if (len + segs[seg].ds_len == totsz) { 497 #ifdef GEM_DEBUG 498 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 499 "tx %d", seg, nexttx); 500 #endif 501 flags |= GEM_TD_END_OF_PACKET; 502 } 503 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); 504 txs->txs_lastdesc = nexttx; 505 len += segs[seg].ds_len; 506 } 507 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, 508 ("gem_txdma_callback: missed end of packet!")); 509 } 510 511 static void 512 gem_tick(arg) 513 void *arg; 514 { 515 struct gem_softc *sc = arg; 516 int s; 517 518 s = splnet(); 519 mii_tick(sc->sc_mii); 520 splx(s); 521 522 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 523 } 524 525 static int 526 gem_bitwait(sc, r, clr, set) 527 struct gem_softc *sc; 528 bus_addr_t r; 529 u_int32_t clr; 530 u_int32_t set; 531 { 532 int i; 533 u_int32_t reg; 534 535 for (i = TRIES; i--; DELAY(100)) { 536 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 537 if ((r & clr) == 0 && (r & set) == set) 538 return (1); 539 } 540 return (0); 541 } 542 543 void 544 gem_reset(sc) 545 struct gem_softc *sc; 546 { 547 bus_space_tag_t t = sc->sc_bustag; 548 bus_space_handle_t h = sc->sc_h; 549 int s; 550 551 s = splnet(); 552 #ifdef GEM_DEBUG 553 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 554 #endif 555 gem_reset_rx(sc); 556 gem_reset_tx(sc); 557 558 /* Do a full reset */ 559 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 560 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 561 device_printf(sc->sc_dev, "cannot reset device\n"); 562 splx(s); 563 } 564 565 566 /* 567 * gem_rxdrain: 568 * 569 * Drain the receive queue. 570 */ 571 static void 572 gem_rxdrain(sc) 573 struct gem_softc *sc; 574 { 575 struct gem_rxsoft *rxs; 576 int i; 577 578 for (i = 0; i < GEM_NRXDESC; i++) { 579 rxs = &sc->sc_rxsoft[i]; 580 if (rxs->rxs_mbuf != NULL) { 581 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 582 BUS_DMASYNC_POSTREAD); 583 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 584 m_freem(rxs->rxs_mbuf); 585 rxs->rxs_mbuf = NULL; 586 } 587 } 588 } 589 590 /* 591 * Reset the whole thing. 592 */ 593 static void 594 gem_stop(ifp, disable) 595 struct ifnet *ifp; 596 int disable; 597 { 598 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 599 struct gem_txsoft *txs; 600 601 #ifdef GEM_DEBUG 602 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 603 #endif 604 605 callout_stop(&sc->sc_tick_ch); 606 607 /* XXX - Should we reset these instead? */ 608 gem_disable_tx(sc); 609 gem_disable_rx(sc); 610 611 /* 612 * Release any queued transmit buffers. 613 */ 614 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 615 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 616 if (txs->txs_ndescs != 0) { 617 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 618 BUS_DMASYNC_POSTWRITE); 619 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 620 if (txs->txs_mbuf != NULL) { 621 m_freem(txs->txs_mbuf); 622 txs->txs_mbuf = NULL; 623 } 624 } 625 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 626 } 627 628 if (disable) 629 gem_rxdrain(sc); 630 631 /* 632 * Mark the interface down and cancel the watchdog timer. 633 */ 634 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 635 ifp->if_timer = 0; 636 } 637 638 /* 639 * Reset the receiver 640 */ 641 int 642 gem_reset_rx(sc) 643 struct gem_softc *sc; 644 { 645 bus_space_tag_t t = sc->sc_bustag; 646 bus_space_handle_t h = sc->sc_h; 647 648 /* 649 * Resetting while DMA is in progress can cause a bus hang, so we 650 * disable DMA first. 651 */ 652 gem_disable_rx(sc); 653 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 654 /* Wait till it finishes */ 655 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 656 device_printf(sc->sc_dev, "cannot disable read dma\n"); 657 658 /* Wait 5ms extra. */ 659 DELAY(5000); 660 661 /* Finally, reset the ERX */ 662 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 663 /* Wait till it finishes */ 664 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 665 device_printf(sc->sc_dev, "cannot reset receiver\n"); 666 return (1); 667 } 668 return (0); 669 } 670 671 672 /* 673 * Reset the transmitter 674 */ 675 static int 676 gem_reset_tx(sc) 677 struct gem_softc *sc; 678 { 679 bus_space_tag_t t = sc->sc_bustag; 680 bus_space_handle_t h = sc->sc_h; 681 int i; 682 683 /* 684 * Resetting while DMA is in progress can cause a bus hang, so we 685 * disable DMA first. 686 */ 687 gem_disable_tx(sc); 688 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 689 /* Wait till it finishes */ 690 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 691 device_printf(sc->sc_dev, "cannot disable read dma\n"); 692 693 /* Wait 5ms extra. */ 694 DELAY(5000); 695 696 /* Finally, reset the ETX */ 697 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 698 /* Wait till it finishes */ 699 for (i = TRIES; i--; DELAY(100)) 700 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 701 break; 702 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 703 device_printf(sc->sc_dev, "cannot reset receiver\n"); 704 return (1); 705 } 706 return (0); 707 } 708 709 /* 710 * disable receiver. 711 */ 712 static int 713 gem_disable_rx(sc) 714 struct gem_softc *sc; 715 { 716 bus_space_tag_t t = sc->sc_bustag; 717 bus_space_handle_t h = sc->sc_h; 718 u_int32_t cfg; 719 720 /* Flip the enable bit */ 721 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 722 cfg &= ~GEM_MAC_RX_ENABLE; 723 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 724 725 /* Wait for it to finish */ 726 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 727 } 728 729 /* 730 * disable transmitter. 731 */ 732 static int 733 gem_disable_tx(sc) 734 struct gem_softc *sc; 735 { 736 bus_space_tag_t t = sc->sc_bustag; 737 bus_space_handle_t h = sc->sc_h; 738 u_int32_t cfg; 739 740 /* Flip the enable bit */ 741 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 742 cfg &= ~GEM_MAC_TX_ENABLE; 743 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 744 745 /* Wait for it to finish */ 746 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 747 } 748 749 /* 750 * Initialize interface. 751 */ 752 static int 753 gem_meminit(sc) 754 struct gem_softc *sc; 755 { 756 struct gem_rxsoft *rxs; 757 int i, error; 758 759 /* 760 * Initialize the transmit descriptor ring. 761 */ 762 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 763 for (i = 0; i < GEM_NTXDESC; i++) { 764 sc->sc_txdescs[i].gd_flags = 0; 765 sc->sc_txdescs[i].gd_addr = 0; 766 } 767 sc->sc_txfree = GEM_MAXTXFREE; 768 sc->sc_txnext = 0; 769 sc->sc_txwin = 0; 770 771 /* 772 * Initialize the receive descriptor and receive job 773 * descriptor rings. 774 */ 775 for (i = 0; i < GEM_NRXDESC; i++) { 776 rxs = &sc->sc_rxsoft[i]; 777 if (rxs->rxs_mbuf == NULL) { 778 if ((error = gem_add_rxbuf(sc, i)) != 0) { 779 device_printf(sc->sc_dev, "unable to " 780 "allocate or map rx buffer %d, error = " 781 "%d\n", i, error); 782 /* 783 * XXX Should attempt to run with fewer receive 784 * XXX buffers instead of just failing. 785 */ 786 gem_rxdrain(sc); 787 return (1); 788 } 789 } else 790 GEM_INIT_RXDESC(sc, i); 791 } 792 sc->sc_rxptr = 0; 793 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 794 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 795 796 return (0); 797 } 798 799 static int 800 gem_ringsize(sz) 801 int sz; 802 { 803 int v = 0; 804 805 switch (sz) { 806 case 32: 807 v = GEM_RING_SZ_32; 808 break; 809 case 64: 810 v = GEM_RING_SZ_64; 811 break; 812 case 128: 813 v = GEM_RING_SZ_128; 814 break; 815 case 256: 816 v = GEM_RING_SZ_256; 817 break; 818 case 512: 819 v = GEM_RING_SZ_512; 820 break; 821 case 1024: 822 v = GEM_RING_SZ_1024; 823 break; 824 case 2048: 825 v = GEM_RING_SZ_2048; 826 break; 827 case 4096: 828 v = GEM_RING_SZ_4096; 829 break; 830 case 8192: 831 v = GEM_RING_SZ_8192; 832 break; 833 default: 834 printf("gem: invalid Receive Descriptor ring size\n"); 835 break; 836 } 837 return (v); 838 } 839 840 /* 841 * Initialization of interface; set up initialization block 842 * and transmit/receive descriptor rings. 843 */ 844 static void 845 gem_init(xsc) 846 void *xsc; 847 { 848 struct gem_softc *sc = (struct gem_softc *)xsc; 849 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 850 bus_space_tag_t t = sc->sc_bustag; 851 bus_space_handle_t h = sc->sc_h; 852 int s; 853 u_int32_t v; 854 855 s = splnet(); 856 857 #ifdef GEM_DEBUG 858 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 859 #endif 860 /* 861 * Initialization sequence. The numbered steps below correspond 862 * to the sequence outlined in section 6.3.5.1 in the Ethernet 863 * Channel Engine manual (part of the PCIO manual). 864 * See also the STP2002-STQ document from Sun Microsystems. 865 */ 866 867 /* step 1 & 2. Reset the Ethernet Channel */ 868 gem_stop(&sc->sc_arpcom.ac_if, 0); 869 gem_reset(sc); 870 #ifdef GEM_DEBUG 871 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 872 #endif 873 874 /* Re-initialize the MIF */ 875 gem_mifinit(sc); 876 877 /* step 3. Setup data structures in host memory */ 878 gem_meminit(sc); 879 880 /* step 4. TX MAC registers & counters */ 881 gem_init_regs(sc); 882 /* XXX: VLAN code from NetBSD temporarily removed. */ 883 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 884 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 885 886 /* step 5. RX MAC registers & counters */ 887 gem_setladrf(sc); 888 889 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 890 /* NOTE: we use only 32-bit DMA addresses here. */ 891 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 892 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 893 894 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 895 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 896 #ifdef GEM_DEBUG 897 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 898 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 899 #endif 900 901 /* step 8. Global Configuration & Interrupt Mask */ 902 bus_space_write_4(t, h, GEM_INTMASK, 903 ~(GEM_INTR_TX_INTME| 904 GEM_INTR_TX_EMPTY| 905 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 906 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 907 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 908 GEM_INTR_BERR)); 909 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 910 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 911 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 912 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 913 914 /* step 9. ETX Configuration: use mostly default values */ 915 916 /* Enable DMA */ 917 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 918 bus_space_write_4(t, h, GEM_TX_CONFIG, 919 v|GEM_TX_CONFIG_TXDMA_EN| 920 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 921 922 /* step 10. ERX Configuration */ 923 924 /* Encode Receive Descriptor ring size: four possible values */ 925 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 926 927 /* Enable DMA */ 928 bus_space_write_4(t, h, GEM_RX_CONFIG, 929 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 930 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 931 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 932 /* 933 * The following value is for an OFF Threshold of about 3/4 full 934 * and an ON Threshold of 1/4 full. 935 */ 936 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 937 (3 * sc->sc_rxfifosize / 256) | 938 ( (sc->sc_rxfifosize / 256) << 12)); 939 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 940 941 /* step 11. Configure Media */ 942 mii_mediachg(sc->sc_mii); 943 944 /* step 12. RX_MAC Configuration Register */ 945 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 946 v |= GEM_MAC_RX_ENABLE; 947 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 948 949 /* step 14. Issue Transmit Pending command */ 950 951 /* step 15. Give the reciever a swift kick */ 952 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 953 954 /* Start the one second timer. */ 955 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 956 957 ifp->if_flags |= IFF_RUNNING; 958 ifp->if_flags &= ~IFF_OACTIVE; 959 ifp->if_timer = 0; 960 sc->sc_ifflags = ifp->if_flags; 961 splx(s); 962 } 963 964 static int 965 gem_load_txmbuf(sc, m0) 966 struct gem_softc *sc; 967 struct mbuf *m0; 968 { 969 struct gem_txdma txd; 970 struct gem_txsoft *txs; 971 int error; 972 973 /* Get a work queue entry. */ 974 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 975 /* Ran out of descriptors. */ 976 return (-1); 977 } 978 txd.txd_sc = sc; 979 txd.txd_txs = txs; 980 txs->txs_mbuf = m0; 981 txs->txs_firstdesc = sc->sc_txnext; 982 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, 983 gem_txdma_callback, &txd, BUS_DMA_NOWAIT); 984 if (error != 0) 985 goto fail; 986 if (txs->txs_ndescs == -1) { 987 error = -1; 988 goto fail; 989 } 990 991 /* Sync the DMA map. */ 992 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 993 BUS_DMASYNC_PREWRITE); 994 995 #ifdef GEM_DEBUG 996 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 997 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 998 txs->txs_ndescs); 999 #endif 1000 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1001 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1002 1003 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1004 sc->sc_txfree -= txs->txs_ndescs; 1005 return (0); 1006 1007 fail: 1008 #ifdef GEM_DEBUG 1009 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); 1010 #endif 1011 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1012 return (error); 1013 } 1014 1015 static void 1016 gem_init_regs(sc) 1017 struct gem_softc *sc; 1018 { 1019 bus_space_tag_t t = sc->sc_bustag; 1020 bus_space_handle_t h = sc->sc_h; 1021 const u_char *laddr = sc->sc_arpcom.ac_enaddr; 1022 u_int32_t v; 1023 1024 /* These regs are not cleared on reset */ 1025 if (!sc->sc_inited) { 1026 1027 /* Wooo. Magic values. */ 1028 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1029 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1030 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1031 1032 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1033 /* Max frame and max burst size */ 1034 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1035 ETHER_MAX_LEN | (0x2000<<16)); 1036 1037 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1038 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1039 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1040 /* Dunno.... */ 1041 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1042 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1043 ((laddr[5]<<8)|laddr[4])&0x3ff); 1044 1045 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1046 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1047 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1048 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1049 1050 /* MAC control addr set to 01:80:c2:00:00:01 */ 1051 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1052 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1053 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1054 1055 /* MAC filter addr set to 0:0:0:0:0:0 */ 1056 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1057 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1058 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1059 1060 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1061 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1062 1063 sc->sc_inited = 1; 1064 } 1065 1066 /* Counters need to be zeroed */ 1067 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1068 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1069 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1070 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1071 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1072 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1073 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1074 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1075 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1076 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1077 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1078 1079 /* Un-pause stuff */ 1080 #if 0 1081 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1082 #else 1083 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1084 #endif 1085 1086 /* 1087 * Set the station address. 1088 */ 1089 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1090 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1091 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1092 1093 /* 1094 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1095 */ 1096 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1097 v = GEM_MAC_XIF_TX_MII_ENA; 1098 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1099 v |= GEM_MAC_XIF_FDPLX_LED; 1100 if (sc->sc_flags & GEM_GIGABIT) 1101 v |= GEM_MAC_XIF_GMII_MODE; 1102 } 1103 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1104 } 1105 1106 static void 1107 gem_start(ifp) 1108 struct ifnet *ifp; 1109 { 1110 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1111 struct mbuf *m0 = NULL; 1112 int firsttx, ntx = 0, ofree, txmfail; 1113 1114 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1115 return; 1116 1117 /* 1118 * Remember the previous number of free descriptors and 1119 * the first descriptor we'll use. 1120 */ 1121 ofree = sc->sc_txfree; 1122 firsttx = sc->sc_txnext; 1123 1124 #ifdef GEM_DEBUG 1125 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1126 device_get_name(sc->sc_dev), ofree, firsttx); 1127 #endif 1128 1129 /* 1130 * Loop through the send queue, setting up transmit descriptors 1131 * until we drain the queue, or use up all available transmit 1132 * descriptors. 1133 */ 1134 txmfail = 0; 1135 do { 1136 /* 1137 * Grab a packet off the queue. 1138 */ 1139 IF_DEQUEUE(&ifp->if_snd, m0); 1140 if (m0 == NULL) 1141 break; 1142 1143 txmfail = gem_load_txmbuf(sc, m0); 1144 if (txmfail > 0) { 1145 /* Drop the mbuf and complain. */ 1146 printf("gem_start: error %d while loading mbuf dma " 1147 "map\n", txmfail); 1148 continue; 1149 } 1150 /* Not enough descriptors. */ 1151 if (txmfail == -1) { 1152 if (sc->sc_txfree == GEM_MAXTXFREE) 1153 panic("gem_start: mbuf chain too long!"); 1154 IF_PREPEND(&ifp->if_snd, m0); 1155 break; 1156 } 1157 1158 ntx++; 1159 /* Kick the transmitter. */ 1160 #ifdef GEM_DEBUG 1161 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", 1162 device_get_name(sc->sc_dev), sc->sc_txnext); 1163 #endif 1164 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1165 sc->sc_txnext); 1166 1167 if (ifp->if_bpf != NULL) 1168 bpf_mtap(ifp->if_bpf, m0); 1169 } while (1); 1170 1171 if (txmfail == -1 || sc->sc_txfree == 0) { 1172 /* No more slots left; notify upper layer. */ 1173 ifp->if_flags |= IFF_OACTIVE; 1174 } 1175 1176 if (ntx > 0) { 1177 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1178 1179 #ifdef GEM_DEBUG 1180 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1181 device_get_name(sc->sc_dev), firsttx); 1182 #endif 1183 1184 /* Set a watchdog timer in case the chip flakes out. */ 1185 ifp->if_timer = 5; 1186 #ifdef GEM_DEBUG 1187 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1188 device_get_name(sc->sc_dev), ifp->if_timer); 1189 #endif 1190 } 1191 } 1192 1193 /* 1194 * Transmit interrupt. 1195 */ 1196 static void 1197 gem_tint(sc) 1198 struct gem_softc *sc; 1199 { 1200 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1201 bus_space_tag_t t = sc->sc_bustag; 1202 bus_space_handle_t mac = sc->sc_h; 1203 struct gem_txsoft *txs; 1204 int txlast; 1205 int progress = 0; 1206 1207 1208 #ifdef GEM_DEBUG 1209 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1210 #endif 1211 1212 /* 1213 * Unload collision counters 1214 */ 1215 ifp->if_collisions += 1216 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1217 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1218 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1219 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1220 1221 /* 1222 * then clear the hardware counters. 1223 */ 1224 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1225 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1226 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1227 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1228 1229 /* 1230 * Go through our Tx list and free mbufs for those 1231 * frames that have been transmitted. 1232 */ 1233 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1234 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1235 1236 #ifdef GEM_DEBUG 1237 if (ifp->if_flags & IFF_DEBUG) { 1238 int i; 1239 printf(" txsoft %p transmit chain:\n", txs); 1240 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1241 printf("descriptor %d: ", i); 1242 printf("gd_flags: 0x%016llx\t", (long long) 1243 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1244 printf("gd_addr: 0x%016llx\n", (long long) 1245 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1246 if (i == txs->txs_lastdesc) 1247 break; 1248 } 1249 } 1250 #endif 1251 1252 /* 1253 * In theory, we could harveast some descriptors before 1254 * the ring is empty, but that's a bit complicated. 1255 * 1256 * GEM_TX_COMPLETION points to the last descriptor 1257 * processed +1. 1258 */ 1259 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1260 #ifdef GEM_DEBUG 1261 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1262 "txs->txs_lastdesc = %d, txlast = %d", 1263 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1264 #endif 1265 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1266 if ((txlast >= txs->txs_firstdesc) && 1267 (txlast <= txs->txs_lastdesc)) 1268 break; 1269 } else { 1270 /* Ick -- this command wraps */ 1271 if ((txlast >= txs->txs_firstdesc) || 1272 (txlast <= txs->txs_lastdesc)) 1273 break; 1274 } 1275 1276 #ifdef GEM_DEBUG 1277 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1278 #endif 1279 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1280 1281 sc->sc_txfree += txs->txs_ndescs; 1282 1283 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1284 BUS_DMASYNC_POSTWRITE); 1285 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1286 if (txs->txs_mbuf != NULL) { 1287 m_freem(txs->txs_mbuf); 1288 txs->txs_mbuf = NULL; 1289 } 1290 1291 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1292 1293 ifp->if_opackets++; 1294 progress = 1; 1295 } 1296 1297 #ifdef GEM_DEBUG 1298 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1299 "GEM_TX_DATA_PTR %llx " 1300 "GEM_TX_COMPLETION %x", 1301 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1302 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1303 GEM_TX_DATA_PTR_HI) << 32) | 1304 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1305 GEM_TX_DATA_PTR_LO), 1306 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1307 #endif 1308 1309 if (progress) { 1310 if (sc->sc_txfree == GEM_NTXDESC - 1) 1311 sc->sc_txwin = 0; 1312 1313 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1314 ifp->if_flags &= ~IFF_OACTIVE; 1315 gem_start(ifp); 1316 1317 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1318 ifp->if_timer = 0; 1319 } 1320 1321 #ifdef GEM_DEBUG 1322 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1323 device_get_name(sc->sc_dev), ifp->if_timer); 1324 #endif 1325 } 1326 1327 #if 0 1328 static void 1329 gem_rint_timeout(arg) 1330 void *arg; 1331 { 1332 1333 gem_rint((struct gem_softc *)arg); 1334 } 1335 #endif 1336 1337 /* 1338 * Receive interrupt. 1339 */ 1340 static void 1341 gem_rint(sc) 1342 struct gem_softc *sc; 1343 { 1344 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1345 bus_space_tag_t t = sc->sc_bustag; 1346 bus_space_handle_t h = sc->sc_h; 1347 struct gem_rxsoft *rxs; 1348 struct mbuf *m; 1349 u_int64_t rxstat; 1350 u_int32_t rxcomp; 1351 int i, len, progress = 0; 1352 1353 callout_stop(&sc->sc_rx_ch); 1354 #ifdef GEM_DEBUG 1355 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1356 #endif 1357 1358 /* 1359 * Read the completion register once. This limits 1360 * how long the following loop can execute. 1361 */ 1362 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1363 1364 #ifdef GEM_DEBUG 1365 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1366 sc->sc_rxptr, rxcomp); 1367 #endif 1368 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1369 for (i = sc->sc_rxptr; i != rxcomp; 1370 i = GEM_NEXTRX(i)) { 1371 rxs = &sc->sc_rxsoft[i]; 1372 1373 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1374 1375 if (rxstat & GEM_RD_OWN) { 1376 #if 0 /* XXX: In case of emergency, re-enable this. */ 1377 /* 1378 * The descriptor is still marked as owned, although 1379 * it is supposed to have completed. This has been 1380 * observed on some machines. Just exiting here 1381 * might leave the packet sitting around until another 1382 * one arrives to trigger a new interrupt, which is 1383 * generally undesirable, so set up a timeout. 1384 */ 1385 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1386 gem_rint_timeout, sc); 1387 #endif 1388 break; 1389 } 1390 1391 progress++; 1392 ifp->if_ipackets++; 1393 1394 if (rxstat & GEM_RD_BAD_CRC) { 1395 ifp->if_ierrors++; 1396 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1397 GEM_INIT_RXDESC(sc, i); 1398 continue; 1399 } 1400 1401 #ifdef GEM_DEBUG 1402 if (ifp->if_flags & IFF_DEBUG) { 1403 printf(" rxsoft %p descriptor %d: ", rxs, i); 1404 printf("gd_flags: 0x%016llx\t", (long long) 1405 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1406 printf("gd_addr: 0x%016llx\n", (long long) 1407 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1408 } 1409 #endif 1410 1411 /* 1412 * No errors; receive the packet. Note the Gem 1413 * includes the CRC with every packet. 1414 */ 1415 len = GEM_RD_BUFLEN(rxstat); 1416 1417 /* 1418 * Allocate a new mbuf cluster. If that fails, we are 1419 * out of memory, and must drop the packet and recycle 1420 * the buffer that's already attached to this descriptor. 1421 */ 1422 m = rxs->rxs_mbuf; 1423 if (gem_add_rxbuf(sc, i) != 0) { 1424 ifp->if_ierrors++; 1425 GEM_INIT_RXDESC(sc, i); 1426 continue; 1427 } 1428 m->m_data += 2; /* We're already off by two */ 1429 1430 m->m_pkthdr.rcvif = ifp; 1431 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1432 1433 /* Pass it on. */ 1434 (*ifp->if_input)(ifp, m); 1435 } 1436 1437 if (progress) { 1438 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1439 /* Update the receive pointer. */ 1440 if (i == sc->sc_rxptr) { 1441 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1442 } 1443 sc->sc_rxptr = i; 1444 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1445 } 1446 1447 #ifdef GEM_DEBUG 1448 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1449 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1450 #endif 1451 } 1452 1453 1454 /* 1455 * gem_add_rxbuf: 1456 * 1457 * Add a receive buffer to the indicated descriptor. 1458 */ 1459 static int 1460 gem_add_rxbuf(sc, idx) 1461 struct gem_softc *sc; 1462 int idx; 1463 { 1464 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1465 struct mbuf *m; 1466 int error; 1467 1468 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1469 if (m == NULL) 1470 return (ENOBUFS); 1471 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1472 1473 #ifdef GEM_DEBUG 1474 /* bzero the packet to check dma */ 1475 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1476 #endif 1477 1478 if (rxs->rxs_mbuf != NULL) { 1479 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1480 BUS_DMASYNC_POSTREAD); 1481 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1482 } 1483 1484 rxs->rxs_mbuf = m; 1485 1486 error = bus_dmamap_load_mbuf(sc->sc_rdmatag, rxs->rxs_dmamap, 1487 m, gem_rxdma_callback, rxs, BUS_DMA_NOWAIT); 1488 if (error != 0 || rxs->rxs_paddr == 0) { 1489 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1490 "%d\n", idx, error); 1491 panic("gem_add_rxbuf"); /* XXX */ 1492 } 1493 1494 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1495 1496 GEM_INIT_RXDESC(sc, idx); 1497 1498 return (0); 1499 } 1500 1501 1502 static void 1503 gem_eint(sc, status) 1504 struct gem_softc *sc; 1505 u_int status; 1506 { 1507 1508 if ((status & GEM_INTR_MIF) != 0) { 1509 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1510 return; 1511 } 1512 1513 device_printf(sc->sc_dev, "status=%x\n", status); 1514 } 1515 1516 1517 void 1518 gem_intr(v) 1519 void *v; 1520 { 1521 struct gem_softc *sc = (struct gem_softc *)v; 1522 bus_space_tag_t t = sc->sc_bustag; 1523 bus_space_handle_t seb = sc->sc_h; 1524 u_int32_t status; 1525 1526 status = bus_space_read_4(t, seb, GEM_STATUS); 1527 #ifdef GEM_DEBUG 1528 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1529 device_get_name(sc->sc_dev), (status>>19), 1530 (u_int)status); 1531 #endif 1532 1533 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1534 gem_eint(sc, status); 1535 1536 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1537 gem_tint(sc); 1538 1539 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1540 gem_rint(sc); 1541 1542 /* We should eventually do more than just print out error stats. */ 1543 if (status & GEM_INTR_TX_MAC) { 1544 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1545 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1546 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1547 txstat); 1548 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1549 gem_init(sc); 1550 } 1551 if (status & GEM_INTR_RX_MAC) { 1552 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1553 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1554 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1555 rxstat); 1556 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1557 gem_init(sc); 1558 } 1559 } 1560 1561 1562 static void 1563 gem_watchdog(ifp) 1564 struct ifnet *ifp; 1565 { 1566 struct gem_softc *sc = ifp->if_softc; 1567 1568 #ifdef GEM_DEBUG 1569 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1570 "GEM_MAC_RX_CONFIG %x", 1571 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1572 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1573 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1574 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1575 "GEM_MAC_TX_CONFIG %x", 1576 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1577 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1578 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1579 #endif 1580 1581 device_printf(sc->sc_dev, "device timeout\n"); 1582 ++ifp->if_oerrors; 1583 1584 /* Try to get more packets going. */ 1585 gem_start(ifp); 1586 } 1587 1588 /* 1589 * Initialize the MII Management Interface 1590 */ 1591 static void 1592 gem_mifinit(sc) 1593 struct gem_softc *sc; 1594 { 1595 bus_space_tag_t t = sc->sc_bustag; 1596 bus_space_handle_t mif = sc->sc_h; 1597 1598 /* Configure the MIF in frame mode */ 1599 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1600 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1601 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1602 } 1603 1604 /* 1605 * MII interface 1606 * 1607 * The GEM MII interface supports at least three different operating modes: 1608 * 1609 * Bitbang mode is implemented using data, clock and output enable registers. 1610 * 1611 * Frame mode is implemented by loading a complete frame into the frame 1612 * register and polling the valid bit for completion. 1613 * 1614 * Polling mode uses the frame register but completion is indicated by 1615 * an interrupt. 1616 * 1617 */ 1618 int 1619 gem_mii_readreg(dev, phy, reg) 1620 device_t dev; 1621 int phy, reg; 1622 { 1623 struct gem_softc *sc = device_get_softc(dev); 1624 bus_space_tag_t t = sc->sc_bustag; 1625 bus_space_handle_t mif = sc->sc_h; 1626 int n; 1627 u_int32_t v; 1628 1629 #ifdef GEM_DEBUG_PHY 1630 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1631 #endif 1632 1633 #if 0 1634 /* Select the desired PHY in the MIF configuration register */ 1635 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1636 /* Clear PHY select bit */ 1637 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1638 if (phy == GEM_PHYAD_EXTERNAL) 1639 /* Set PHY select bit to get at external device */ 1640 v |= GEM_MIF_CONFIG_PHY_SEL; 1641 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1642 #endif 1643 1644 /* Construct the frame command */ 1645 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1646 GEM_MIF_FRAME_READ; 1647 1648 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1649 for (n = 0; n < 100; n++) { 1650 DELAY(1); 1651 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1652 if (v & GEM_MIF_FRAME_TA0) 1653 return (v & GEM_MIF_FRAME_DATA); 1654 } 1655 1656 device_printf(sc->sc_dev, "mii_read timeout\n"); 1657 return (0); 1658 } 1659 1660 int 1661 gem_mii_writereg(dev, phy, reg, val) 1662 device_t dev; 1663 int phy, reg, val; 1664 { 1665 struct gem_softc *sc = device_get_softc(dev); 1666 bus_space_tag_t t = sc->sc_bustag; 1667 bus_space_handle_t mif = sc->sc_h; 1668 int n; 1669 u_int32_t v; 1670 1671 #ifdef GEM_DEBUG_PHY 1672 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1673 #endif 1674 1675 #if 0 1676 /* Select the desired PHY in the MIF configuration register */ 1677 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1678 /* Clear PHY select bit */ 1679 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1680 if (phy == GEM_PHYAD_EXTERNAL) 1681 /* Set PHY select bit to get at external device */ 1682 v |= GEM_MIF_CONFIG_PHY_SEL; 1683 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1684 #endif 1685 /* Construct the frame command */ 1686 v = GEM_MIF_FRAME_WRITE | 1687 (phy << GEM_MIF_PHY_SHIFT) | 1688 (reg << GEM_MIF_REG_SHIFT) | 1689 (val & GEM_MIF_FRAME_DATA); 1690 1691 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1692 for (n = 0; n < 100; n++) { 1693 DELAY(1); 1694 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1695 if (v & GEM_MIF_FRAME_TA0) 1696 return (1); 1697 } 1698 1699 device_printf(sc->sc_dev, "mii_write timeout\n"); 1700 return (0); 1701 } 1702 1703 void 1704 gem_mii_statchg(dev) 1705 device_t dev; 1706 { 1707 struct gem_softc *sc = device_get_softc(dev); 1708 #ifdef GEM_DEBUG 1709 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1710 #endif 1711 bus_space_tag_t t = sc->sc_bustag; 1712 bus_space_handle_t mac = sc->sc_h; 1713 u_int32_t v; 1714 1715 #ifdef GEM_DEBUG 1716 if (sc->sc_debug) 1717 printf("gem_mii_statchg: status change: phy = %d\n", 1718 sc->sc_phys[instance]); 1719 #endif 1720 1721 /* Set tx full duplex options */ 1722 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1723 DELAY(10000); /* reg must be cleared and delay before changing. */ 1724 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1725 GEM_MAC_TX_ENABLE; 1726 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1727 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1728 } 1729 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1730 1731 /* XIF Configuration */ 1732 /* We should really calculate all this rather than rely on defaults */ 1733 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1734 v = GEM_MAC_XIF_LINK_LED; 1735 v |= GEM_MAC_XIF_TX_MII_ENA; 1736 1737 /* If an external transceiver is connected, enable its MII drivers */ 1738 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1739 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1740 /* External MII needs echo disable if half duplex. */ 1741 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1742 /* turn on full duplex LED */ 1743 v |= GEM_MAC_XIF_FDPLX_LED; 1744 else 1745 /* half duplex -- disable echo */ 1746 v |= GEM_MAC_XIF_ECHO_DISABL; 1747 1748 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1749 v |= GEM_MAC_XIF_GMII_MODE; 1750 else 1751 v &= ~GEM_MAC_XIF_GMII_MODE; 1752 } else { 1753 /* Internal MII needs buf enable */ 1754 v |= GEM_MAC_XIF_MII_BUF_ENA; 1755 } 1756 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1757 } 1758 1759 int 1760 gem_mediachange(ifp) 1761 struct ifnet *ifp; 1762 { 1763 struct gem_softc *sc = ifp->if_softc; 1764 1765 /* XXX Add support for serial media. */ 1766 1767 return (mii_mediachg(sc->sc_mii)); 1768 } 1769 1770 void 1771 gem_mediastatus(ifp, ifmr) 1772 struct ifnet *ifp; 1773 struct ifmediareq *ifmr; 1774 { 1775 struct gem_softc *sc = ifp->if_softc; 1776 1777 if ((ifp->if_flags & IFF_UP) == 0) 1778 return; 1779 1780 mii_pollstat(sc->sc_mii); 1781 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1782 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1783 } 1784 1785 /* 1786 * Process an ioctl request. 1787 */ 1788 static int 1789 gem_ioctl(ifp, cmd, data) 1790 struct ifnet *ifp; 1791 u_long cmd; 1792 caddr_t data; 1793 { 1794 struct gem_softc *sc = ifp->if_softc; 1795 struct ifreq *ifr = (struct ifreq *)data; 1796 int s, error = 0; 1797 1798 switch (cmd) { 1799 case SIOCSIFADDR: 1800 case SIOCGIFADDR: 1801 case SIOCSIFMTU: 1802 error = ether_ioctl(ifp, cmd, data); 1803 break; 1804 case SIOCSIFFLAGS: 1805 if (ifp->if_flags & IFF_UP) { 1806 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1807 gem_setladrf(sc); 1808 else 1809 gem_init(sc); 1810 } else { 1811 if (ifp->if_flags & IFF_RUNNING) 1812 gem_stop(ifp, 0); 1813 } 1814 sc->sc_ifflags = ifp->if_flags; 1815 error = 0; 1816 break; 1817 case SIOCADDMULTI: 1818 case SIOCDELMULTI: 1819 gem_setladrf(sc); 1820 error = 0; 1821 break; 1822 case SIOCGIFMEDIA: 1823 case SIOCSIFMEDIA: 1824 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1825 break; 1826 default: 1827 error = ENOTTY; 1828 break; 1829 } 1830 1831 /* Try to get things going again */ 1832 if (ifp->if_flags & IFF_UP) 1833 gem_start(ifp); 1834 splx(s); 1835 return (error); 1836 } 1837 1838 /* 1839 * Set up the logical address filter. 1840 */ 1841 static void 1842 gem_setladrf(sc) 1843 struct gem_softc *sc; 1844 { 1845 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1846 struct ifmultiaddr *inm; 1847 bus_space_tag_t t = sc->sc_bustag; 1848 bus_space_handle_t h = sc->sc_h; 1849 u_int32_t crc; 1850 u_int32_t hash[16]; 1851 u_int32_t v; 1852 int i; 1853 1854 /* Get current RX configuration */ 1855 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1856 1857 /* 1858 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1859 * and hash filter. Depending on the case, the right bit will be 1860 * enabled. 1861 */ 1862 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1863 GEM_MAC_RX_PROMISC_GRP); 1864 1865 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1866 /* Turn on promiscuous mode */ 1867 v |= GEM_MAC_RX_PROMISCUOUS; 1868 goto chipit; 1869 } 1870 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1871 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1872 ifp->if_flags |= IFF_ALLMULTI; 1873 v |= GEM_MAC_RX_PROMISC_GRP; 1874 goto chipit; 1875 } 1876 1877 /* 1878 * Set up multicast address filter by passing all multicast addresses 1879 * through a crc generator, and then using the high order 8 bits as an 1880 * index into the 256 bit logical address filter. The high order 4 1881 * bits selects the word, while the other 4 bits select the bit within 1882 * the word (where bit 0 is the MSB). 1883 */ 1884 1885 /* Clear hash table */ 1886 memset(hash, 0, sizeof(hash)); 1887 1888 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1889 if (inm->ifma_addr->sa_family != AF_LINK) 1890 continue; 1891 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1892 inm->ifma_addr), ETHER_ADDR_LEN); 1893 1894 /* Just want the 8 most significant bits. */ 1895 crc >>= 24; 1896 1897 /* Set the corresponding bit in the filter. */ 1898 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1899 } 1900 1901 v |= GEM_MAC_RX_HASH_FILTER; 1902 ifp->if_flags &= ~IFF_ALLMULTI; 1903 1904 /* Now load the hash table into the chip (if we are using it) */ 1905 for (i = 0; i < 16; i++) { 1906 bus_space_write_4(t, h, 1907 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1908 hash[i]); 1909 } 1910 1911 chipit: 1912 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1913 } 1914