1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37 #if 0 38 #define GEM_DEBUG 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/bus.h> 44 #include <sys/callout.h> 45 #include <sys/endian.h> 46 #include <sys/mbuf.h> 47 #include <sys/malloc.h> 48 #include <sys/kernel.h> 49 #include <sys/module.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 53 #include <net/bpf.h> 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_arp.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 60 #include <machine/bus.h> 61 62 #include <dev/mii/mii.h> 63 #include <dev/mii/miivar.h> 64 65 #include <dev/gem/if_gemreg.h> 66 #include <dev/gem/if_gemvar.h> 67 68 #define TRIES 10000 69 70 static void gem_start(struct ifnet *); 71 static void gem_stop(struct ifnet *, int); 72 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 73 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 74 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, 75 bus_size_t, int); 76 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, 77 bus_size_t, int); 78 static void gem_tick(void *); 79 static void gem_watchdog(struct ifnet *); 80 static void gem_init(void *); 81 static void gem_init_regs(struct gem_softc *sc); 82 static int gem_ringsize(int sz); 83 static int gem_meminit(struct gem_softc *); 84 static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); 85 static void gem_mifinit(struct gem_softc *); 86 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 87 u_int32_t clr, u_int32_t set); 88 static int gem_reset_rx(struct gem_softc *); 89 static int gem_reset_tx(struct gem_softc *); 90 static int gem_disable_rx(struct gem_softc *); 91 static int gem_disable_tx(struct gem_softc *); 92 static void gem_rxdrain(struct gem_softc *); 93 static int gem_add_rxbuf(struct gem_softc *, int); 94 static void gem_setladrf(struct gem_softc *); 95 96 struct mbuf *gem_get(struct gem_softc *, int, int); 97 static void gem_eint(struct gem_softc *, u_int); 98 static void gem_rint(struct gem_softc *); 99 #if 0 100 static void gem_rint_timeout(void *); 101 #endif 102 static void gem_tint(struct gem_softc *); 103 #ifdef notyet 104 static void gem_power(int, void *); 105 #endif 106 107 devclass_t gem_devclass; 108 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 109 MODULE_DEPEND(gem, miibus, 1, 1, 1); 110 111 #ifdef GEM_DEBUG 112 #include <sys/ktr.h> 113 #define KTR_GEM KTR_CT2 114 #endif 115 116 #define GEM_NSEGS GEM_NTXDESC 117 118 /* 119 * gem_attach: 120 * 121 * Attach a Gem interface to the system. 122 */ 123 int 124 gem_attach(sc) 125 struct gem_softc *sc; 126 { 127 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 128 struct mii_softc *child; 129 int i, error; 130 u_int32_t v; 131 132 /* Make sure the chip is stopped. */ 133 ifp->if_softc = sc; 134 gem_reset(sc); 135 136 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 137 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 138 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); 139 if (error) 140 return (error); 141 142 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 143 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 144 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, 145 &sc->sc_rdmatag); 146 if (error) 147 goto fail_ptag; 148 149 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 150 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 151 GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, 152 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 153 if (error) 154 goto fail_rtag; 155 156 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 157 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 158 sizeof(struct gem_control_data), 1, 159 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 160 busdma_lock_mutex, &Giant, &sc->sc_cdmatag); 161 if (error) 162 goto fail_ttag; 163 164 /* 165 * Allocate the control data structures, and create and load the 166 * DMA map for it. 167 */ 168 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 169 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 170 device_printf(sc->sc_dev, "unable to allocate control data," 171 " error = %d\n", error); 172 goto fail_ctag; 173 } 174 175 sc->sc_cddma = 0; 176 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 177 sc->sc_control_data, sizeof(struct gem_control_data), 178 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 179 device_printf(sc->sc_dev, "unable to load control data DMA " 180 "map, error = %d\n", error); 181 goto fail_cmem; 182 } 183 184 /* 185 * Initialize the transmit job descriptors. 186 */ 187 STAILQ_INIT(&sc->sc_txfreeq); 188 STAILQ_INIT(&sc->sc_txdirtyq); 189 190 /* 191 * Create the transmit buffer DMA maps. 192 */ 193 error = ENOMEM; 194 for (i = 0; i < GEM_TXQUEUELEN; i++) { 195 struct gem_txsoft *txs; 196 197 txs = &sc->sc_txsoft[i]; 198 txs->txs_mbuf = NULL; 199 txs->txs_ndescs = 0; 200 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 201 &txs->txs_dmamap)) != 0) { 202 device_printf(sc->sc_dev, "unable to create tx DMA map " 203 "%d, error = %d\n", i, error); 204 goto fail_txd; 205 } 206 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 207 } 208 209 /* 210 * Create the receive buffer DMA maps. 211 */ 212 for (i = 0; i < GEM_NRXDESC; i++) { 213 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 214 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 215 device_printf(sc->sc_dev, "unable to create rx DMA map " 216 "%d, error = %d\n", i, error); 217 goto fail_rxd; 218 } 219 sc->sc_rxsoft[i].rxs_mbuf = NULL; 220 } 221 222 223 gem_mifinit(sc); 224 225 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 226 gem_mediastatus)) != 0) { 227 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 228 goto fail_rxd; 229 } 230 sc->sc_mii = device_get_softc(sc->sc_miibus); 231 232 /* 233 * From this point forward, the attachment cannot fail. A failure 234 * before this point releases all resources that may have been 235 * allocated. 236 */ 237 238 /* Get RX FIFO size */ 239 sc->sc_rxfifosize = 64 * 240 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 241 242 /* Get TX FIFO size */ 243 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 244 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 245 sc->sc_rxfifosize / 1024, v / 16); 246 247 /* Initialize ifnet structure. */ 248 ifp->if_softc = sc; 249 if_initname(ifp, device_get_name(sc->sc_dev), 250 device_get_unit(sc->sc_dev)); 251 ifp->if_mtu = ETHERMTU; 252 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | 253 IFF_NEEDSGIANT; 254 ifp->if_start = gem_start; 255 ifp->if_ioctl = gem_ioctl; 256 ifp->if_watchdog = gem_watchdog; 257 ifp->if_init = gem_init; 258 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 259 /* 260 * Walk along the list of attached MII devices and 261 * establish an `MII instance' to `phy number' 262 * mapping. We'll use this mapping in media change 263 * requests to determine which phy to use to program 264 * the MIF configuration register. 265 */ 266 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 267 child = LIST_NEXT(child, mii_list)) { 268 /* 269 * Note: we support just two PHYs: the built-in 270 * internal device and an external on the MII 271 * connector. 272 */ 273 if (child->mii_phy > 1 || child->mii_inst > 1) { 274 device_printf(sc->sc_dev, "cannot accomodate " 275 "MII device %s at phy %d, instance %d\n", 276 device_get_name(child->mii_dev), 277 child->mii_phy, child->mii_inst); 278 continue; 279 } 280 281 sc->sc_phys[child->mii_inst] = child->mii_phy; 282 } 283 284 /* 285 * Now select and activate the PHY we will use. 286 * 287 * The order of preference is External (MDI1), 288 * Internal (MDI0), Serial Link (no MII). 289 */ 290 if (sc->sc_phys[1]) { 291 #ifdef GEM_DEBUG 292 printf("using external phy\n"); 293 #endif 294 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 295 } else { 296 #ifdef GEM_DEBUG 297 printf("using internal phy\n"); 298 #endif 299 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 300 } 301 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 302 sc->sc_mif_config); 303 /* Attach the interface. */ 304 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 305 306 #if notyet 307 /* 308 * Add a suspend hook to make sure we come back up after a 309 * resume. 310 */ 311 sc->sc_powerhook = powerhook_establish(gem_power, sc); 312 if (sc->sc_powerhook == NULL) 313 device_printf(sc->sc_dev, "WARNING: unable to establish power " 314 "hook\n"); 315 #endif 316 317 callout_init(&sc->sc_tick_ch, 0); 318 callout_init(&sc->sc_rx_ch, 0); 319 return (0); 320 321 /* 322 * Free any resources we've allocated during the failed attach 323 * attempt. Do this in reverse order and fall through. 324 */ 325 fail_rxd: 326 for (i = 0; i < GEM_NRXDESC; i++) { 327 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 328 bus_dmamap_destroy(sc->sc_rdmatag, 329 sc->sc_rxsoft[i].rxs_dmamap); 330 } 331 fail_txd: 332 for (i = 0; i < GEM_TXQUEUELEN; i++) { 333 if (sc->sc_txsoft[i].txs_dmamap != NULL) 334 bus_dmamap_destroy(sc->sc_tdmatag, 335 sc->sc_txsoft[i].txs_dmamap); 336 } 337 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 338 fail_cmem: 339 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 340 sc->sc_cddmamap); 341 fail_ctag: 342 bus_dma_tag_destroy(sc->sc_cdmatag); 343 fail_ttag: 344 bus_dma_tag_destroy(sc->sc_tdmatag); 345 fail_rtag: 346 bus_dma_tag_destroy(sc->sc_rdmatag); 347 fail_ptag: 348 bus_dma_tag_destroy(sc->sc_pdmatag); 349 return (error); 350 } 351 352 void 353 gem_detach(sc) 354 struct gem_softc *sc; 355 { 356 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 357 int i; 358 359 ether_ifdetach(ifp); 360 gem_stop(ifp, 1); 361 device_delete_child(sc->sc_dev, sc->sc_miibus); 362 363 for (i = 0; i < GEM_NRXDESC; i++) { 364 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 365 bus_dmamap_destroy(sc->sc_rdmatag, 366 sc->sc_rxsoft[i].rxs_dmamap); 367 } 368 for (i = 0; i < GEM_TXQUEUELEN; i++) { 369 if (sc->sc_txsoft[i].txs_dmamap != NULL) 370 bus_dmamap_destroy(sc->sc_tdmatag, 371 sc->sc_txsoft[i].txs_dmamap); 372 } 373 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 374 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 375 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 376 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 377 sc->sc_cddmamap); 378 bus_dma_tag_destroy(sc->sc_cdmatag); 379 bus_dma_tag_destroy(sc->sc_tdmatag); 380 bus_dma_tag_destroy(sc->sc_rdmatag); 381 bus_dma_tag_destroy(sc->sc_pdmatag); 382 } 383 384 void 385 gem_suspend(sc) 386 struct gem_softc *sc; 387 { 388 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 389 390 gem_stop(ifp, 0); 391 } 392 393 void 394 gem_resume(sc) 395 struct gem_softc *sc; 396 { 397 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 398 399 if (ifp->if_flags & IFF_UP) 400 gem_init(ifp); 401 } 402 403 static void 404 gem_cddma_callback(xsc, segs, nsegs, error) 405 void *xsc; 406 bus_dma_segment_t *segs; 407 int nsegs; 408 int error; 409 { 410 struct gem_softc *sc = (struct gem_softc *)xsc; 411 412 if (error != 0) 413 return; 414 if (nsegs != 1) { 415 /* can't happen... */ 416 panic("gem_cddma_callback: bad control buffer segment count"); 417 } 418 sc->sc_cddma = segs[0].ds_addr; 419 } 420 421 static void 422 gem_rxdma_callback(xsc, segs, nsegs, totsz, error) 423 void *xsc; 424 bus_dma_segment_t *segs; 425 int nsegs; 426 bus_size_t totsz; 427 int error; 428 { 429 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 430 431 if (error != 0) 432 return; 433 KASSERT(nsegs == 1, ("gem_rxdma_callback: bad dma segment count")); 434 rxs->rxs_paddr = segs[0].ds_addr; 435 } 436 437 static void 438 gem_txdma_callback(xsc, segs, nsegs, totsz, error) 439 void *xsc; 440 bus_dma_segment_t *segs; 441 int nsegs; 442 bus_size_t totsz; 443 int error; 444 { 445 struct gem_txdma *txd = (struct gem_txdma *)xsc; 446 struct gem_softc *sc = txd->txd_sc; 447 struct gem_txsoft *txs = txd->txd_txs; 448 bus_size_t len = 0; 449 uint64_t flags = 0; 450 int seg, nexttx; 451 452 if (error != 0) 453 return; 454 /* 455 * Ensure we have enough descriptors free to describe 456 * the packet. Note, we always reserve one descriptor 457 * at the end of the ring as a termination point, to 458 * prevent wrap-around. 459 */ 460 if (nsegs > sc->sc_txfree - 1) { 461 txs->txs_ndescs = -1; 462 return; 463 } 464 txs->txs_ndescs = nsegs; 465 466 nexttx = txs->txs_firstdesc; 467 /* 468 * Initialize the transmit descriptors. 469 */ 470 for (seg = 0; seg < nsegs; 471 seg++, nexttx = GEM_NEXTTX(nexttx)) { 472 #ifdef GEM_DEBUG 473 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 474 "%lx, addr %#lx (%#lx)", seg, nexttx, 475 segs[seg].ds_len, segs[seg].ds_addr, 476 GEM_DMA_WRITE(sc, segs[seg].ds_addr)); 477 #endif 478 479 if (segs[seg].ds_len == 0) 480 continue; 481 sc->sc_txdescs[nexttx].gd_addr = 482 GEM_DMA_WRITE(sc, segs[seg].ds_addr); 483 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, 484 ("gem_txdma_callback: segment size too large!")); 485 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 486 if (len == 0) { 487 #ifdef GEM_DEBUG 488 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 489 "tx %d", seg, nexttx); 490 #endif 491 flags |= GEM_TD_START_OF_PACKET; 492 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 493 sc->sc_txwin = 0; 494 flags |= GEM_TD_INTERRUPT_ME; 495 } 496 } 497 if (len + segs[seg].ds_len == totsz) { 498 #ifdef GEM_DEBUG 499 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 500 "tx %d", seg, nexttx); 501 #endif 502 flags |= GEM_TD_END_OF_PACKET; 503 } 504 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); 505 txs->txs_lastdesc = nexttx; 506 len += segs[seg].ds_len; 507 } 508 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, 509 ("gem_txdma_callback: missed end of packet!")); 510 } 511 512 static void 513 gem_tick(arg) 514 void *arg; 515 { 516 struct gem_softc *sc = arg; 517 int s; 518 519 s = splnet(); 520 mii_tick(sc->sc_mii); 521 splx(s); 522 523 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 524 } 525 526 static int 527 gem_bitwait(sc, r, clr, set) 528 struct gem_softc *sc; 529 bus_addr_t r; 530 u_int32_t clr; 531 u_int32_t set; 532 { 533 int i; 534 u_int32_t reg; 535 536 for (i = TRIES; i--; DELAY(100)) { 537 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 538 if ((r & clr) == 0 && (r & set) == set) 539 return (1); 540 } 541 return (0); 542 } 543 544 void 545 gem_reset(sc) 546 struct gem_softc *sc; 547 { 548 bus_space_tag_t t = sc->sc_bustag; 549 bus_space_handle_t h = sc->sc_h; 550 int s; 551 552 s = splnet(); 553 #ifdef GEM_DEBUG 554 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 555 #endif 556 gem_reset_rx(sc); 557 gem_reset_tx(sc); 558 559 /* Do a full reset */ 560 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 561 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 562 device_printf(sc->sc_dev, "cannot reset device\n"); 563 splx(s); 564 } 565 566 567 /* 568 * gem_rxdrain: 569 * 570 * Drain the receive queue. 571 */ 572 static void 573 gem_rxdrain(sc) 574 struct gem_softc *sc; 575 { 576 struct gem_rxsoft *rxs; 577 int i; 578 579 for (i = 0; i < GEM_NRXDESC; i++) { 580 rxs = &sc->sc_rxsoft[i]; 581 if (rxs->rxs_mbuf != NULL) { 582 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 583 BUS_DMASYNC_POSTREAD); 584 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 585 m_freem(rxs->rxs_mbuf); 586 rxs->rxs_mbuf = NULL; 587 } 588 } 589 } 590 591 /* 592 * Reset the whole thing. 593 */ 594 static void 595 gem_stop(ifp, disable) 596 struct ifnet *ifp; 597 int disable; 598 { 599 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 600 struct gem_txsoft *txs; 601 602 #ifdef GEM_DEBUG 603 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 604 #endif 605 606 callout_stop(&sc->sc_tick_ch); 607 608 /* XXX - Should we reset these instead? */ 609 gem_disable_tx(sc); 610 gem_disable_rx(sc); 611 612 /* 613 * Release any queued transmit buffers. 614 */ 615 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 616 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 617 if (txs->txs_ndescs != 0) { 618 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 619 BUS_DMASYNC_POSTWRITE); 620 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 621 if (txs->txs_mbuf != NULL) { 622 m_freem(txs->txs_mbuf); 623 txs->txs_mbuf = NULL; 624 } 625 } 626 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 627 } 628 629 if (disable) 630 gem_rxdrain(sc); 631 632 /* 633 * Mark the interface down and cancel the watchdog timer. 634 */ 635 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 636 ifp->if_timer = 0; 637 } 638 639 /* 640 * Reset the receiver 641 */ 642 int 643 gem_reset_rx(sc) 644 struct gem_softc *sc; 645 { 646 bus_space_tag_t t = sc->sc_bustag; 647 bus_space_handle_t h = sc->sc_h; 648 649 /* 650 * Resetting while DMA is in progress can cause a bus hang, so we 651 * disable DMA first. 652 */ 653 gem_disable_rx(sc); 654 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 655 /* Wait till it finishes */ 656 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 657 device_printf(sc->sc_dev, "cannot disable read dma\n"); 658 659 /* Wait 5ms extra. */ 660 DELAY(5000); 661 662 /* Finally, reset the ERX */ 663 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 664 /* Wait till it finishes */ 665 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 666 device_printf(sc->sc_dev, "cannot reset receiver\n"); 667 return (1); 668 } 669 return (0); 670 } 671 672 673 /* 674 * Reset the transmitter 675 */ 676 static int 677 gem_reset_tx(sc) 678 struct gem_softc *sc; 679 { 680 bus_space_tag_t t = sc->sc_bustag; 681 bus_space_handle_t h = sc->sc_h; 682 int i; 683 684 /* 685 * Resetting while DMA is in progress can cause a bus hang, so we 686 * disable DMA first. 687 */ 688 gem_disable_tx(sc); 689 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 690 /* Wait till it finishes */ 691 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 692 device_printf(sc->sc_dev, "cannot disable read dma\n"); 693 694 /* Wait 5ms extra. */ 695 DELAY(5000); 696 697 /* Finally, reset the ETX */ 698 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 699 /* Wait till it finishes */ 700 for (i = TRIES; i--; DELAY(100)) 701 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 702 break; 703 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 704 device_printf(sc->sc_dev, "cannot reset receiver\n"); 705 return (1); 706 } 707 return (0); 708 } 709 710 /* 711 * disable receiver. 712 */ 713 static int 714 gem_disable_rx(sc) 715 struct gem_softc *sc; 716 { 717 bus_space_tag_t t = sc->sc_bustag; 718 bus_space_handle_t h = sc->sc_h; 719 u_int32_t cfg; 720 721 /* Flip the enable bit */ 722 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 723 cfg &= ~GEM_MAC_RX_ENABLE; 724 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 725 726 /* Wait for it to finish */ 727 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 728 } 729 730 /* 731 * disable transmitter. 732 */ 733 static int 734 gem_disable_tx(sc) 735 struct gem_softc *sc; 736 { 737 bus_space_tag_t t = sc->sc_bustag; 738 bus_space_handle_t h = sc->sc_h; 739 u_int32_t cfg; 740 741 /* Flip the enable bit */ 742 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 743 cfg &= ~GEM_MAC_TX_ENABLE; 744 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 745 746 /* Wait for it to finish */ 747 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 748 } 749 750 /* 751 * Initialize interface. 752 */ 753 static int 754 gem_meminit(sc) 755 struct gem_softc *sc; 756 { 757 struct gem_rxsoft *rxs; 758 int i, error; 759 760 /* 761 * Initialize the transmit descriptor ring. 762 */ 763 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 764 for (i = 0; i < GEM_NTXDESC; i++) { 765 sc->sc_txdescs[i].gd_flags = 0; 766 sc->sc_txdescs[i].gd_addr = 0; 767 } 768 sc->sc_txfree = GEM_MAXTXFREE; 769 sc->sc_txnext = 0; 770 sc->sc_txwin = 0; 771 772 /* 773 * Initialize the receive descriptor and receive job 774 * descriptor rings. 775 */ 776 for (i = 0; i < GEM_NRXDESC; i++) { 777 rxs = &sc->sc_rxsoft[i]; 778 if (rxs->rxs_mbuf == NULL) { 779 if ((error = gem_add_rxbuf(sc, i)) != 0) { 780 device_printf(sc->sc_dev, "unable to " 781 "allocate or map rx buffer %d, error = " 782 "%d\n", i, error); 783 /* 784 * XXX Should attempt to run with fewer receive 785 * XXX buffers instead of just failing. 786 */ 787 gem_rxdrain(sc); 788 return (1); 789 } 790 } else 791 GEM_INIT_RXDESC(sc, i); 792 } 793 sc->sc_rxptr = 0; 794 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 795 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 796 797 return (0); 798 } 799 800 static int 801 gem_ringsize(sz) 802 int sz; 803 { 804 int v = 0; 805 806 switch (sz) { 807 case 32: 808 v = GEM_RING_SZ_32; 809 break; 810 case 64: 811 v = GEM_RING_SZ_64; 812 break; 813 case 128: 814 v = GEM_RING_SZ_128; 815 break; 816 case 256: 817 v = GEM_RING_SZ_256; 818 break; 819 case 512: 820 v = GEM_RING_SZ_512; 821 break; 822 case 1024: 823 v = GEM_RING_SZ_1024; 824 break; 825 case 2048: 826 v = GEM_RING_SZ_2048; 827 break; 828 case 4096: 829 v = GEM_RING_SZ_4096; 830 break; 831 case 8192: 832 v = GEM_RING_SZ_8192; 833 break; 834 default: 835 printf("gem: invalid Receive Descriptor ring size\n"); 836 break; 837 } 838 return (v); 839 } 840 841 /* 842 * Initialization of interface; set up initialization block 843 * and transmit/receive descriptor rings. 844 */ 845 static void 846 gem_init(xsc) 847 void *xsc; 848 { 849 struct gem_softc *sc = (struct gem_softc *)xsc; 850 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 851 bus_space_tag_t t = sc->sc_bustag; 852 bus_space_handle_t h = sc->sc_h; 853 int s; 854 u_int32_t v; 855 856 s = splnet(); 857 858 #ifdef GEM_DEBUG 859 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 860 #endif 861 /* 862 * Initialization sequence. The numbered steps below correspond 863 * to the sequence outlined in section 6.3.5.1 in the Ethernet 864 * Channel Engine manual (part of the PCIO manual). 865 * See also the STP2002-STQ document from Sun Microsystems. 866 */ 867 868 /* step 1 & 2. Reset the Ethernet Channel */ 869 gem_stop(&sc->sc_arpcom.ac_if, 0); 870 gem_reset(sc); 871 #ifdef GEM_DEBUG 872 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 873 #endif 874 875 /* Re-initialize the MIF */ 876 gem_mifinit(sc); 877 878 /* step 3. Setup data structures in host memory */ 879 gem_meminit(sc); 880 881 /* step 4. TX MAC registers & counters */ 882 gem_init_regs(sc); 883 /* XXX: VLAN code from NetBSD temporarily removed. */ 884 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 885 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 886 887 /* step 5. RX MAC registers & counters */ 888 gem_setladrf(sc); 889 890 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 891 /* NOTE: we use only 32-bit DMA addresses here. */ 892 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 893 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 894 895 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 896 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 897 #ifdef GEM_DEBUG 898 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 899 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 900 #endif 901 902 /* step 8. Global Configuration & Interrupt Mask */ 903 bus_space_write_4(t, h, GEM_INTMASK, 904 ~(GEM_INTR_TX_INTME| 905 GEM_INTR_TX_EMPTY| 906 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 907 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 908 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 909 GEM_INTR_BERR)); 910 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 911 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 912 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 913 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 914 915 /* step 9. ETX Configuration: use mostly default values */ 916 917 /* Enable DMA */ 918 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 919 bus_space_write_4(t, h, GEM_TX_CONFIG, 920 v|GEM_TX_CONFIG_TXDMA_EN| 921 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 922 923 /* step 10. ERX Configuration */ 924 925 /* Encode Receive Descriptor ring size: four possible values */ 926 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 927 928 /* Enable DMA */ 929 bus_space_write_4(t, h, GEM_RX_CONFIG, 930 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 931 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 932 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 933 /* 934 * The following value is for an OFF Threshold of about 3/4 full 935 * and an ON Threshold of 1/4 full. 936 */ 937 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 938 (3 * sc->sc_rxfifosize / 256) | 939 ( (sc->sc_rxfifosize / 256) << 12)); 940 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 941 942 /* step 11. Configure Media */ 943 mii_mediachg(sc->sc_mii); 944 945 /* step 12. RX_MAC Configuration Register */ 946 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 947 v |= GEM_MAC_RX_ENABLE; 948 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 949 950 /* step 14. Issue Transmit Pending command */ 951 952 /* step 15. Give the reciever a swift kick */ 953 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 954 955 /* Start the one second timer. */ 956 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 957 958 ifp->if_flags |= IFF_RUNNING; 959 ifp->if_flags &= ~IFF_OACTIVE; 960 ifp->if_timer = 0; 961 sc->sc_ifflags = ifp->if_flags; 962 splx(s); 963 } 964 965 static int 966 gem_load_txmbuf(sc, m0) 967 struct gem_softc *sc; 968 struct mbuf *m0; 969 { 970 struct gem_txdma txd; 971 struct gem_txsoft *txs; 972 int error; 973 974 /* Get a work queue entry. */ 975 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 976 /* Ran out of descriptors. */ 977 return (-1); 978 } 979 txd.txd_sc = sc; 980 txd.txd_txs = txs; 981 txs->txs_mbuf = m0; 982 txs->txs_firstdesc = sc->sc_txnext; 983 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, 984 gem_txdma_callback, &txd, BUS_DMA_NOWAIT); 985 if (error != 0) 986 goto fail; 987 if (txs->txs_ndescs == -1) { 988 error = -1; 989 goto fail; 990 } 991 992 /* Sync the DMA map. */ 993 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 994 BUS_DMASYNC_PREWRITE); 995 996 #ifdef GEM_DEBUG 997 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 998 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 999 txs->txs_ndescs); 1000 #endif 1001 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1002 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1003 1004 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1005 sc->sc_txfree -= txs->txs_ndescs; 1006 return (0); 1007 1008 fail: 1009 #ifdef GEM_DEBUG 1010 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); 1011 #endif 1012 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1013 return (error); 1014 } 1015 1016 static void 1017 gem_init_regs(sc) 1018 struct gem_softc *sc; 1019 { 1020 bus_space_tag_t t = sc->sc_bustag; 1021 bus_space_handle_t h = sc->sc_h; 1022 const u_char *laddr = sc->sc_arpcom.ac_enaddr; 1023 u_int32_t v; 1024 1025 /* These regs are not cleared on reset */ 1026 if (!sc->sc_inited) { 1027 1028 /* Wooo. Magic values. */ 1029 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1030 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1031 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1032 1033 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1034 /* Max frame and max burst size */ 1035 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1036 ETHER_MAX_LEN | (0x2000<<16)); 1037 1038 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1039 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1040 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1041 /* Dunno.... */ 1042 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1043 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1044 ((laddr[5]<<8)|laddr[4])&0x3ff); 1045 1046 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1047 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1048 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1049 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1050 1051 /* MAC control addr set to 01:80:c2:00:00:01 */ 1052 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1053 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1054 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1055 1056 /* MAC filter addr set to 0:0:0:0:0:0 */ 1057 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1058 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1059 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1060 1061 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1062 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1063 1064 sc->sc_inited = 1; 1065 } 1066 1067 /* Counters need to be zeroed */ 1068 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1069 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1070 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1071 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1072 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1073 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1074 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1075 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1076 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1077 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1078 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1079 1080 /* Un-pause stuff */ 1081 #if 0 1082 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1083 #else 1084 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1085 #endif 1086 1087 /* 1088 * Set the station address. 1089 */ 1090 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1091 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1092 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1093 1094 /* 1095 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1096 */ 1097 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1098 v = GEM_MAC_XIF_TX_MII_ENA; 1099 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1100 v |= GEM_MAC_XIF_FDPLX_LED; 1101 if (sc->sc_flags & GEM_GIGABIT) 1102 v |= GEM_MAC_XIF_GMII_MODE; 1103 } 1104 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1105 } 1106 1107 static void 1108 gem_start(ifp) 1109 struct ifnet *ifp; 1110 { 1111 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1112 struct mbuf *m0 = NULL; 1113 int firsttx, ntx = 0, ofree, txmfail; 1114 1115 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1116 return; 1117 1118 /* 1119 * Remember the previous number of free descriptors and 1120 * the first descriptor we'll use. 1121 */ 1122 ofree = sc->sc_txfree; 1123 firsttx = sc->sc_txnext; 1124 1125 #ifdef GEM_DEBUG 1126 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1127 device_get_name(sc->sc_dev), ofree, firsttx); 1128 #endif 1129 1130 /* 1131 * Loop through the send queue, setting up transmit descriptors 1132 * until we drain the queue, or use up all available transmit 1133 * descriptors. 1134 */ 1135 txmfail = 0; 1136 do { 1137 /* 1138 * Grab a packet off the queue. 1139 */ 1140 IF_DEQUEUE(&ifp->if_snd, m0); 1141 if (m0 == NULL) 1142 break; 1143 1144 txmfail = gem_load_txmbuf(sc, m0); 1145 if (txmfail > 0) { 1146 /* Drop the mbuf and complain. */ 1147 printf("gem_start: error %d while loading mbuf dma " 1148 "map\n", txmfail); 1149 continue; 1150 } 1151 /* Not enough descriptors. */ 1152 if (txmfail == -1) { 1153 if (sc->sc_txfree == GEM_MAXTXFREE) 1154 panic("gem_start: mbuf chain too long!"); 1155 IF_PREPEND(&ifp->if_snd, m0); 1156 break; 1157 } 1158 1159 ntx++; 1160 /* Kick the transmitter. */ 1161 #ifdef GEM_DEBUG 1162 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", 1163 device_get_name(sc->sc_dev), sc->sc_txnext); 1164 #endif 1165 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1166 sc->sc_txnext); 1167 1168 if (ifp->if_bpf != NULL) 1169 bpf_mtap(ifp->if_bpf, m0); 1170 } while (1); 1171 1172 if (txmfail == -1 || sc->sc_txfree == 0) { 1173 /* No more slots left; notify upper layer. */ 1174 ifp->if_flags |= IFF_OACTIVE; 1175 } 1176 1177 if (ntx > 0) { 1178 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1179 1180 #ifdef GEM_DEBUG 1181 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1182 device_get_name(sc->sc_dev), firsttx); 1183 #endif 1184 1185 /* Set a watchdog timer in case the chip flakes out. */ 1186 ifp->if_timer = 5; 1187 #ifdef GEM_DEBUG 1188 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1189 device_get_name(sc->sc_dev), ifp->if_timer); 1190 #endif 1191 } 1192 } 1193 1194 /* 1195 * Transmit interrupt. 1196 */ 1197 static void 1198 gem_tint(sc) 1199 struct gem_softc *sc; 1200 { 1201 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1202 bus_space_tag_t t = sc->sc_bustag; 1203 bus_space_handle_t mac = sc->sc_h; 1204 struct gem_txsoft *txs; 1205 int txlast; 1206 int progress = 0; 1207 1208 1209 #ifdef GEM_DEBUG 1210 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1211 #endif 1212 1213 /* 1214 * Unload collision counters 1215 */ 1216 ifp->if_collisions += 1217 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1218 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1219 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1220 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1221 1222 /* 1223 * then clear the hardware counters. 1224 */ 1225 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1226 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1227 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1228 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1229 1230 /* 1231 * Go through our Tx list and free mbufs for those 1232 * frames that have been transmitted. 1233 */ 1234 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1235 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1236 1237 #ifdef GEM_DEBUG 1238 if (ifp->if_flags & IFF_DEBUG) { 1239 int i; 1240 printf(" txsoft %p transmit chain:\n", txs); 1241 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1242 printf("descriptor %d: ", i); 1243 printf("gd_flags: 0x%016llx\t", (long long) 1244 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1245 printf("gd_addr: 0x%016llx\n", (long long) 1246 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1247 if (i == txs->txs_lastdesc) 1248 break; 1249 } 1250 } 1251 #endif 1252 1253 /* 1254 * In theory, we could harveast some descriptors before 1255 * the ring is empty, but that's a bit complicated. 1256 * 1257 * GEM_TX_COMPLETION points to the last descriptor 1258 * processed +1. 1259 */ 1260 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1261 #ifdef GEM_DEBUG 1262 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1263 "txs->txs_lastdesc = %d, txlast = %d", 1264 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1265 #endif 1266 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1267 if ((txlast >= txs->txs_firstdesc) && 1268 (txlast <= txs->txs_lastdesc)) 1269 break; 1270 } else { 1271 /* Ick -- this command wraps */ 1272 if ((txlast >= txs->txs_firstdesc) || 1273 (txlast <= txs->txs_lastdesc)) 1274 break; 1275 } 1276 1277 #ifdef GEM_DEBUG 1278 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1279 #endif 1280 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1281 1282 sc->sc_txfree += txs->txs_ndescs; 1283 1284 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1285 BUS_DMASYNC_POSTWRITE); 1286 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1287 if (txs->txs_mbuf != NULL) { 1288 m_freem(txs->txs_mbuf); 1289 txs->txs_mbuf = NULL; 1290 } 1291 1292 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1293 1294 ifp->if_opackets++; 1295 progress = 1; 1296 } 1297 1298 #ifdef GEM_DEBUG 1299 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1300 "GEM_TX_DATA_PTR %llx " 1301 "GEM_TX_COMPLETION %x", 1302 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1303 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1304 GEM_TX_DATA_PTR_HI) << 32) | 1305 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1306 GEM_TX_DATA_PTR_LO), 1307 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1308 #endif 1309 1310 if (progress) { 1311 if (sc->sc_txfree == GEM_NTXDESC - 1) 1312 sc->sc_txwin = 0; 1313 1314 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1315 ifp->if_flags &= ~IFF_OACTIVE; 1316 gem_start(ifp); 1317 1318 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1319 ifp->if_timer = 0; 1320 } 1321 1322 #ifdef GEM_DEBUG 1323 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1324 device_get_name(sc->sc_dev), ifp->if_timer); 1325 #endif 1326 } 1327 1328 #if 0 1329 static void 1330 gem_rint_timeout(arg) 1331 void *arg; 1332 { 1333 1334 gem_rint((struct gem_softc *)arg); 1335 } 1336 #endif 1337 1338 /* 1339 * Receive interrupt. 1340 */ 1341 static void 1342 gem_rint(sc) 1343 struct gem_softc *sc; 1344 { 1345 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1346 bus_space_tag_t t = sc->sc_bustag; 1347 bus_space_handle_t h = sc->sc_h; 1348 struct gem_rxsoft *rxs; 1349 struct mbuf *m; 1350 u_int64_t rxstat; 1351 u_int32_t rxcomp; 1352 int i, len, progress = 0; 1353 1354 callout_stop(&sc->sc_rx_ch); 1355 #ifdef GEM_DEBUG 1356 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1357 #endif 1358 1359 /* 1360 * Read the completion register once. This limits 1361 * how long the following loop can execute. 1362 */ 1363 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1364 1365 #ifdef GEM_DEBUG 1366 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1367 sc->sc_rxptr, rxcomp); 1368 #endif 1369 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1370 for (i = sc->sc_rxptr; i != rxcomp; 1371 i = GEM_NEXTRX(i)) { 1372 rxs = &sc->sc_rxsoft[i]; 1373 1374 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1375 1376 if (rxstat & GEM_RD_OWN) { 1377 #if 0 /* XXX: In case of emergency, re-enable this. */ 1378 /* 1379 * The descriptor is still marked as owned, although 1380 * it is supposed to have completed. This has been 1381 * observed on some machines. Just exiting here 1382 * might leave the packet sitting around until another 1383 * one arrives to trigger a new interrupt, which is 1384 * generally undesirable, so set up a timeout. 1385 */ 1386 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1387 gem_rint_timeout, sc); 1388 #endif 1389 break; 1390 } 1391 1392 progress++; 1393 ifp->if_ipackets++; 1394 1395 if (rxstat & GEM_RD_BAD_CRC) { 1396 ifp->if_ierrors++; 1397 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1398 GEM_INIT_RXDESC(sc, i); 1399 continue; 1400 } 1401 1402 #ifdef GEM_DEBUG 1403 if (ifp->if_flags & IFF_DEBUG) { 1404 printf(" rxsoft %p descriptor %d: ", rxs, i); 1405 printf("gd_flags: 0x%016llx\t", (long long) 1406 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1407 printf("gd_addr: 0x%016llx\n", (long long) 1408 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1409 } 1410 #endif 1411 1412 /* 1413 * No errors; receive the packet. Note the Gem 1414 * includes the CRC with every packet. 1415 */ 1416 len = GEM_RD_BUFLEN(rxstat); 1417 1418 /* 1419 * Allocate a new mbuf cluster. If that fails, we are 1420 * out of memory, and must drop the packet and recycle 1421 * the buffer that's already attached to this descriptor. 1422 */ 1423 m = rxs->rxs_mbuf; 1424 if (gem_add_rxbuf(sc, i) != 0) { 1425 ifp->if_ierrors++; 1426 GEM_INIT_RXDESC(sc, i); 1427 continue; 1428 } 1429 m->m_data += 2; /* We're already off by two */ 1430 1431 m->m_pkthdr.rcvif = ifp; 1432 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1433 1434 /* Pass it on. */ 1435 (*ifp->if_input)(ifp, m); 1436 } 1437 1438 if (progress) { 1439 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1440 /* Update the receive pointer. */ 1441 if (i == sc->sc_rxptr) { 1442 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1443 } 1444 sc->sc_rxptr = i; 1445 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1446 } 1447 1448 #ifdef GEM_DEBUG 1449 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1450 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1451 #endif 1452 } 1453 1454 1455 /* 1456 * gem_add_rxbuf: 1457 * 1458 * Add a receive buffer to the indicated descriptor. 1459 */ 1460 static int 1461 gem_add_rxbuf(sc, idx) 1462 struct gem_softc *sc; 1463 int idx; 1464 { 1465 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1466 struct mbuf *m; 1467 int error; 1468 1469 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1470 if (m == NULL) 1471 return (ENOBUFS); 1472 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1473 1474 #ifdef GEM_DEBUG 1475 /* bzero the packet to check dma */ 1476 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1477 #endif 1478 1479 if (rxs->rxs_mbuf != NULL) { 1480 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1481 BUS_DMASYNC_POSTREAD); 1482 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1483 } 1484 1485 rxs->rxs_mbuf = m; 1486 1487 error = bus_dmamap_load_mbuf(sc->sc_rdmatag, rxs->rxs_dmamap, 1488 m, gem_rxdma_callback, rxs, BUS_DMA_NOWAIT); 1489 if (error != 0 || rxs->rxs_paddr == 0) { 1490 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1491 "%d\n", idx, error); 1492 panic("gem_add_rxbuf"); /* XXX */ 1493 } 1494 1495 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1496 1497 GEM_INIT_RXDESC(sc, idx); 1498 1499 return (0); 1500 } 1501 1502 1503 static void 1504 gem_eint(sc, status) 1505 struct gem_softc *sc; 1506 u_int status; 1507 { 1508 1509 if ((status & GEM_INTR_MIF) != 0) { 1510 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1511 return; 1512 } 1513 1514 device_printf(sc->sc_dev, "status=%x\n", status); 1515 } 1516 1517 1518 void 1519 gem_intr(v) 1520 void *v; 1521 { 1522 struct gem_softc *sc = (struct gem_softc *)v; 1523 bus_space_tag_t t = sc->sc_bustag; 1524 bus_space_handle_t seb = sc->sc_h; 1525 u_int32_t status; 1526 1527 status = bus_space_read_4(t, seb, GEM_STATUS); 1528 #ifdef GEM_DEBUG 1529 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1530 device_get_name(sc->sc_dev), (status>>19), 1531 (u_int)status); 1532 #endif 1533 1534 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1535 gem_eint(sc, status); 1536 1537 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1538 gem_tint(sc); 1539 1540 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1541 gem_rint(sc); 1542 1543 /* We should eventually do more than just print out error stats. */ 1544 if (status & GEM_INTR_TX_MAC) { 1545 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1546 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1547 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1548 txstat); 1549 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1550 gem_init(sc); 1551 } 1552 if (status & GEM_INTR_RX_MAC) { 1553 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1554 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1555 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1556 rxstat); 1557 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1558 gem_init(sc); 1559 } 1560 } 1561 1562 1563 static void 1564 gem_watchdog(ifp) 1565 struct ifnet *ifp; 1566 { 1567 struct gem_softc *sc = ifp->if_softc; 1568 1569 #ifdef GEM_DEBUG 1570 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1571 "GEM_MAC_RX_CONFIG %x", 1572 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1573 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1574 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1575 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1576 "GEM_MAC_TX_CONFIG %x", 1577 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1578 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1579 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1580 #endif 1581 1582 device_printf(sc->sc_dev, "device timeout\n"); 1583 ++ifp->if_oerrors; 1584 1585 /* Try to get more packets going. */ 1586 gem_start(ifp); 1587 } 1588 1589 /* 1590 * Initialize the MII Management Interface 1591 */ 1592 static void 1593 gem_mifinit(sc) 1594 struct gem_softc *sc; 1595 { 1596 bus_space_tag_t t = sc->sc_bustag; 1597 bus_space_handle_t mif = sc->sc_h; 1598 1599 /* Configure the MIF in frame mode */ 1600 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1601 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1602 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1603 } 1604 1605 /* 1606 * MII interface 1607 * 1608 * The GEM MII interface supports at least three different operating modes: 1609 * 1610 * Bitbang mode is implemented using data, clock and output enable registers. 1611 * 1612 * Frame mode is implemented by loading a complete frame into the frame 1613 * register and polling the valid bit for completion. 1614 * 1615 * Polling mode uses the frame register but completion is indicated by 1616 * an interrupt. 1617 * 1618 */ 1619 int 1620 gem_mii_readreg(dev, phy, reg) 1621 device_t dev; 1622 int phy, reg; 1623 { 1624 struct gem_softc *sc = device_get_softc(dev); 1625 bus_space_tag_t t = sc->sc_bustag; 1626 bus_space_handle_t mif = sc->sc_h; 1627 int n; 1628 u_int32_t v; 1629 1630 #ifdef GEM_DEBUG_PHY 1631 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1632 #endif 1633 1634 #if 0 1635 /* Select the desired PHY in the MIF configuration register */ 1636 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1637 /* Clear PHY select bit */ 1638 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1639 if (phy == GEM_PHYAD_EXTERNAL) 1640 /* Set PHY select bit to get at external device */ 1641 v |= GEM_MIF_CONFIG_PHY_SEL; 1642 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1643 #endif 1644 1645 /* Construct the frame command */ 1646 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1647 GEM_MIF_FRAME_READ; 1648 1649 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1650 for (n = 0; n < 100; n++) { 1651 DELAY(1); 1652 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1653 if (v & GEM_MIF_FRAME_TA0) 1654 return (v & GEM_MIF_FRAME_DATA); 1655 } 1656 1657 device_printf(sc->sc_dev, "mii_read timeout\n"); 1658 return (0); 1659 } 1660 1661 int 1662 gem_mii_writereg(dev, phy, reg, val) 1663 device_t dev; 1664 int phy, reg, val; 1665 { 1666 struct gem_softc *sc = device_get_softc(dev); 1667 bus_space_tag_t t = sc->sc_bustag; 1668 bus_space_handle_t mif = sc->sc_h; 1669 int n; 1670 u_int32_t v; 1671 1672 #ifdef GEM_DEBUG_PHY 1673 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1674 #endif 1675 1676 #if 0 1677 /* Select the desired PHY in the MIF configuration register */ 1678 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1679 /* Clear PHY select bit */ 1680 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1681 if (phy == GEM_PHYAD_EXTERNAL) 1682 /* Set PHY select bit to get at external device */ 1683 v |= GEM_MIF_CONFIG_PHY_SEL; 1684 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1685 #endif 1686 /* Construct the frame command */ 1687 v = GEM_MIF_FRAME_WRITE | 1688 (phy << GEM_MIF_PHY_SHIFT) | 1689 (reg << GEM_MIF_REG_SHIFT) | 1690 (val & GEM_MIF_FRAME_DATA); 1691 1692 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1693 for (n = 0; n < 100; n++) { 1694 DELAY(1); 1695 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1696 if (v & GEM_MIF_FRAME_TA0) 1697 return (1); 1698 } 1699 1700 device_printf(sc->sc_dev, "mii_write timeout\n"); 1701 return (0); 1702 } 1703 1704 void 1705 gem_mii_statchg(dev) 1706 device_t dev; 1707 { 1708 struct gem_softc *sc = device_get_softc(dev); 1709 #ifdef GEM_DEBUG 1710 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1711 #endif 1712 bus_space_tag_t t = sc->sc_bustag; 1713 bus_space_handle_t mac = sc->sc_h; 1714 u_int32_t v; 1715 1716 #ifdef GEM_DEBUG 1717 if (sc->sc_debug) 1718 printf("gem_mii_statchg: status change: phy = %d\n", 1719 sc->sc_phys[instance]); 1720 #endif 1721 1722 /* Set tx full duplex options */ 1723 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1724 DELAY(10000); /* reg must be cleared and delay before changing. */ 1725 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1726 GEM_MAC_TX_ENABLE; 1727 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1728 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1729 } 1730 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1731 1732 /* XIF Configuration */ 1733 /* We should really calculate all this rather than rely on defaults */ 1734 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1735 v = GEM_MAC_XIF_LINK_LED; 1736 v |= GEM_MAC_XIF_TX_MII_ENA; 1737 1738 /* If an external transceiver is connected, enable its MII drivers */ 1739 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1740 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1741 /* External MII needs echo disable if half duplex. */ 1742 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1743 /* turn on full duplex LED */ 1744 v |= GEM_MAC_XIF_FDPLX_LED; 1745 else 1746 /* half duplex -- disable echo */ 1747 v |= GEM_MAC_XIF_ECHO_DISABL; 1748 1749 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1750 v |= GEM_MAC_XIF_GMII_MODE; 1751 else 1752 v &= ~GEM_MAC_XIF_GMII_MODE; 1753 } else { 1754 /* Internal MII needs buf enable */ 1755 v |= GEM_MAC_XIF_MII_BUF_ENA; 1756 } 1757 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1758 } 1759 1760 int 1761 gem_mediachange(ifp) 1762 struct ifnet *ifp; 1763 { 1764 struct gem_softc *sc = ifp->if_softc; 1765 1766 /* XXX Add support for serial media. */ 1767 1768 return (mii_mediachg(sc->sc_mii)); 1769 } 1770 1771 void 1772 gem_mediastatus(ifp, ifmr) 1773 struct ifnet *ifp; 1774 struct ifmediareq *ifmr; 1775 { 1776 struct gem_softc *sc = ifp->if_softc; 1777 1778 if ((ifp->if_flags & IFF_UP) == 0) 1779 return; 1780 1781 mii_pollstat(sc->sc_mii); 1782 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1783 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1784 } 1785 1786 /* 1787 * Process an ioctl request. 1788 */ 1789 static int 1790 gem_ioctl(ifp, cmd, data) 1791 struct ifnet *ifp; 1792 u_long cmd; 1793 caddr_t data; 1794 { 1795 struct gem_softc *sc = ifp->if_softc; 1796 struct ifreq *ifr = (struct ifreq *)data; 1797 int s, error = 0; 1798 1799 switch (cmd) { 1800 case SIOCSIFADDR: 1801 case SIOCGIFADDR: 1802 case SIOCSIFMTU: 1803 error = ether_ioctl(ifp, cmd, data); 1804 break; 1805 case SIOCSIFFLAGS: 1806 if (ifp->if_flags & IFF_UP) { 1807 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1808 gem_setladrf(sc); 1809 else 1810 gem_init(sc); 1811 } else { 1812 if (ifp->if_flags & IFF_RUNNING) 1813 gem_stop(ifp, 0); 1814 } 1815 sc->sc_ifflags = ifp->if_flags; 1816 error = 0; 1817 break; 1818 case SIOCADDMULTI: 1819 case SIOCDELMULTI: 1820 gem_setladrf(sc); 1821 error = 0; 1822 break; 1823 case SIOCGIFMEDIA: 1824 case SIOCSIFMEDIA: 1825 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1826 break; 1827 default: 1828 error = ENOTTY; 1829 break; 1830 } 1831 1832 /* Try to get things going again */ 1833 if (ifp->if_flags & IFF_UP) 1834 gem_start(ifp); 1835 splx(s); 1836 return (error); 1837 } 1838 1839 /* 1840 * Set up the logical address filter. 1841 */ 1842 static void 1843 gem_setladrf(sc) 1844 struct gem_softc *sc; 1845 { 1846 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1847 struct ifmultiaddr *inm; 1848 bus_space_tag_t t = sc->sc_bustag; 1849 bus_space_handle_t h = sc->sc_h; 1850 u_int32_t crc; 1851 u_int32_t hash[16]; 1852 u_int32_t v; 1853 int i; 1854 1855 /* Get current RX configuration */ 1856 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1857 1858 /* 1859 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1860 * and hash filter. Depending on the case, the right bit will be 1861 * enabled. 1862 */ 1863 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1864 GEM_MAC_RX_PROMISC_GRP); 1865 1866 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1867 /* Turn on promiscuous mode */ 1868 v |= GEM_MAC_RX_PROMISCUOUS; 1869 goto chipit; 1870 } 1871 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1872 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1873 ifp->if_flags |= IFF_ALLMULTI; 1874 v |= GEM_MAC_RX_PROMISC_GRP; 1875 goto chipit; 1876 } 1877 1878 /* 1879 * Set up multicast address filter by passing all multicast addresses 1880 * through a crc generator, and then using the high order 8 bits as an 1881 * index into the 256 bit logical address filter. The high order 4 1882 * bits selects the word, while the other 4 bits select the bit within 1883 * the word (where bit 0 is the MSB). 1884 */ 1885 1886 /* Clear hash table */ 1887 memset(hash, 0, sizeof(hash)); 1888 1889 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1890 if (inm->ifma_addr->sa_family != AF_LINK) 1891 continue; 1892 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1893 inm->ifma_addr), ETHER_ADDR_LEN); 1894 1895 /* Just want the 8 most significant bits. */ 1896 crc >>= 24; 1897 1898 /* Set the corresponding bit in the filter. */ 1899 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1900 } 1901 1902 v |= GEM_MAC_RX_HASH_FILTER; 1903 ifp->if_flags &= ~IFF_ALLMULTI; 1904 1905 /* Now load the hash table into the chip (if we are using it) */ 1906 for (i = 0; i < 16; i++) { 1907 bus_space_write_4(t, h, 1908 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1909 hash[i]); 1910 } 1911 1912 chipit: 1913 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1914 } 1915