1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37 #if 0 38 #define GEM_DEBUG 39 #endif 40 41 #if 0 /* XXX: In case of emergency, re-enable this. */ 42 #define GEM_RINT_TIMEOUT 43 #endif 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/bus.h> 48 #include <sys/callout.h> 49 #include <sys/endian.h> 50 #include <sys/mbuf.h> 51 #include <sys/malloc.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/module.h> 55 #include <sys/mutex.h> 56 #include <sys/socket.h> 57 #include <sys/sockio.h> 58 59 #include <net/bpf.h> 60 #include <net/ethernet.h> 61 #include <net/if.h> 62 #include <net/if_arp.h> 63 #include <net/if_dl.h> 64 #include <net/if_media.h> 65 #include <net/if_types.h> 66 #include <net/if_vlan_var.h> 67 68 #include <machine/bus.h> 69 70 #include <dev/mii/mii.h> 71 #include <dev/mii/miivar.h> 72 73 #include <dev/gem/if_gemreg.h> 74 #include <dev/gem/if_gemvar.h> 75 76 #define TRIES 10000 77 78 static void gem_start(struct ifnet *); 79 static void gem_start_locked(struct ifnet *); 80 static void gem_stop(struct ifnet *, int); 81 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 82 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 83 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, 84 bus_size_t, int); 85 static void gem_tick(void *); 86 static int gem_watchdog(struct gem_softc *); 87 static void gem_init(void *); 88 static void gem_init_locked(struct gem_softc *); 89 static void gem_init_regs(struct gem_softc *); 90 static int gem_ringsize(int sz); 91 static int gem_meminit(struct gem_softc *); 92 static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); 93 static void gem_mifinit(struct gem_softc *); 94 static int gem_bitwait(struct gem_softc *, bus_addr_t, u_int32_t, 95 u_int32_t); 96 static int gem_reset_rx(struct gem_softc *); 97 static int gem_reset_tx(struct gem_softc *); 98 static int gem_disable_rx(struct gem_softc *); 99 static int gem_disable_tx(struct gem_softc *); 100 static void gem_rxdrain(struct gem_softc *); 101 static int gem_add_rxbuf(struct gem_softc *, int); 102 static void gem_setladrf(struct gem_softc *); 103 104 struct mbuf *gem_get(struct gem_softc *, int, int); 105 static void gem_eint(struct gem_softc *, u_int); 106 static void gem_rint(struct gem_softc *); 107 #ifdef GEM_RINT_TIMEOUT 108 static void gem_rint_timeout(void *); 109 #endif 110 static void gem_tint(struct gem_softc *); 111 #ifdef notyet 112 static void gem_power(int, void *); 113 #endif 114 115 devclass_t gem_devclass; 116 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 117 MODULE_DEPEND(gem, miibus, 1, 1, 1); 118 119 #ifdef GEM_DEBUG 120 #include <sys/ktr.h> 121 #define KTR_GEM KTR_CT2 122 #endif 123 124 #define GEM_NSEGS GEM_NTXDESC 125 126 /* 127 * gem_attach: 128 * 129 * Attach a Gem interface to the system. 130 */ 131 int 132 gem_attach(sc) 133 struct gem_softc *sc; 134 { 135 struct ifnet *ifp; 136 struct mii_softc *child; 137 int i, error; 138 u_int32_t v; 139 140 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 141 if (ifp == NULL) 142 return (ENOSPC); 143 144 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 145 #ifdef GEM_RINT_TIMEOUT 146 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 147 #endif 148 149 /* Make sure the chip is stopped. */ 150 ifp->if_softc = sc; 151 GEM_LOCK(sc); 152 gem_stop(ifp, 0); 153 gem_reset(sc); 154 GEM_UNLOCK(sc); 155 156 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 157 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 158 MCLBYTES, GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 159 &sc->sc_pdmatag); 160 if (error) 161 goto fail_ifnet; 162 163 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 164 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 165 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, 166 &sc->sc_rdmatag); 167 if (error) 168 goto fail_ptag; 169 170 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 171 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 172 GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, 173 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 174 if (error) 175 goto fail_rtag; 176 177 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 178 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 179 sizeof(struct gem_control_data), 1, 180 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 181 busdma_lock_mutex, &sc->sc_mtx, &sc->sc_cdmatag); 182 if (error) 183 goto fail_ttag; 184 185 /* 186 * Allocate the control data structures, and create and load the 187 * DMA map for it. 188 */ 189 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 190 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 191 device_printf(sc->sc_dev, "unable to allocate control data," 192 " error = %d\n", error); 193 goto fail_ctag; 194 } 195 196 sc->sc_cddma = 0; 197 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 198 sc->sc_control_data, sizeof(struct gem_control_data), 199 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 200 device_printf(sc->sc_dev, "unable to load control data DMA " 201 "map, error = %d\n", error); 202 goto fail_cmem; 203 } 204 205 /* 206 * Initialize the transmit job descriptors. 207 */ 208 STAILQ_INIT(&sc->sc_txfreeq); 209 STAILQ_INIT(&sc->sc_txdirtyq); 210 211 /* 212 * Create the transmit buffer DMA maps. 213 */ 214 error = ENOMEM; 215 for (i = 0; i < GEM_TXQUEUELEN; i++) { 216 struct gem_txsoft *txs; 217 218 txs = &sc->sc_txsoft[i]; 219 txs->txs_mbuf = NULL; 220 txs->txs_ndescs = 0; 221 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 222 &txs->txs_dmamap)) != 0) { 223 device_printf(sc->sc_dev, "unable to create tx DMA map " 224 "%d, error = %d\n", i, error); 225 goto fail_txd; 226 } 227 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 228 } 229 230 /* 231 * Create the receive buffer DMA maps. 232 */ 233 for (i = 0; i < GEM_NRXDESC; i++) { 234 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 235 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 236 device_printf(sc->sc_dev, "unable to create rx DMA map " 237 "%d, error = %d\n", i, error); 238 goto fail_rxd; 239 } 240 sc->sc_rxsoft[i].rxs_mbuf = NULL; 241 } 242 243 gem_mifinit(sc); 244 245 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 246 gem_mediastatus)) != 0) { 247 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 248 goto fail_rxd; 249 } 250 sc->sc_mii = device_get_softc(sc->sc_miibus); 251 252 /* 253 * From this point forward, the attachment cannot fail. A failure 254 * before this point releases all resources that may have been 255 * allocated. 256 */ 257 258 /* Get RX FIFO size */ 259 sc->sc_rxfifosize = 64 * 260 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 261 262 /* Get TX FIFO size */ 263 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 264 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 265 sc->sc_rxfifosize / 1024, v / 16); 266 267 /* Initialize ifnet structure. */ 268 ifp->if_softc = sc; 269 if_initname(ifp, device_get_name(sc->sc_dev), 270 device_get_unit(sc->sc_dev)); 271 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 272 ifp->if_start = gem_start; 273 ifp->if_ioctl = gem_ioctl; 274 ifp->if_init = gem_init; 275 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 276 /* 277 * Walk along the list of attached MII devices and 278 * establish an `MII instance' to `phy number' 279 * mapping. We'll use this mapping in media change 280 * requests to determine which phy to use to program 281 * the MIF configuration register. 282 */ 283 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 284 child = LIST_NEXT(child, mii_list)) { 285 /* 286 * Note: we support just two PHYs: the built-in 287 * internal device and an external on the MII 288 * connector. 289 */ 290 if (child->mii_phy > 1 || child->mii_inst > 1) { 291 device_printf(sc->sc_dev, "cannot accomodate " 292 "MII device %s at phy %d, instance %d\n", 293 device_get_name(child->mii_dev), 294 child->mii_phy, child->mii_inst); 295 continue; 296 } 297 298 sc->sc_phys[child->mii_inst] = child->mii_phy; 299 } 300 301 /* 302 * Now select and activate the PHY we will use. 303 * 304 * The order of preference is External (MDI1), 305 * Internal (MDI0), Serial Link (no MII). 306 */ 307 if (sc->sc_phys[1]) { 308 #ifdef GEM_DEBUG 309 printf("using external phy\n"); 310 #endif 311 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 312 } else { 313 #ifdef GEM_DEBUG 314 printf("using internal phy\n"); 315 #endif 316 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 317 } 318 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 319 sc->sc_mif_config); 320 /* Attach the interface. */ 321 ether_ifattach(ifp, sc->sc_enaddr); 322 323 #ifdef notyet 324 /* 325 * Add a suspend hook to make sure we come back up after a 326 * resume. 327 */ 328 sc->sc_powerhook = powerhook_establish(gem_power, sc); 329 if (sc->sc_powerhook == NULL) 330 device_printf(sc->sc_dev, "WARNING: unable to establish power " 331 "hook\n"); 332 #endif 333 334 /* 335 * Tell the upper layer(s) we support long frames. 336 */ 337 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 338 ifp->if_capabilities |= IFCAP_VLAN_MTU; 339 ifp->if_capenable |= IFCAP_VLAN_MTU; 340 341 return (0); 342 343 /* 344 * Free any resources we've allocated during the failed attach 345 * attempt. Do this in reverse order and fall through. 346 */ 347 fail_rxd: 348 for (i = 0; i < GEM_NRXDESC; i++) { 349 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 350 bus_dmamap_destroy(sc->sc_rdmatag, 351 sc->sc_rxsoft[i].rxs_dmamap); 352 } 353 fail_txd: 354 for (i = 0; i < GEM_TXQUEUELEN; i++) { 355 if (sc->sc_txsoft[i].txs_dmamap != NULL) 356 bus_dmamap_destroy(sc->sc_tdmatag, 357 sc->sc_txsoft[i].txs_dmamap); 358 } 359 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 360 fail_cmem: 361 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 362 sc->sc_cddmamap); 363 fail_ctag: 364 bus_dma_tag_destroy(sc->sc_cdmatag); 365 fail_ttag: 366 bus_dma_tag_destroy(sc->sc_tdmatag); 367 fail_rtag: 368 bus_dma_tag_destroy(sc->sc_rdmatag); 369 fail_ptag: 370 bus_dma_tag_destroy(sc->sc_pdmatag); 371 fail_ifnet: 372 if_free(ifp); 373 return (error); 374 } 375 376 void 377 gem_detach(sc) 378 struct gem_softc *sc; 379 { 380 struct ifnet *ifp = sc->sc_ifp; 381 int i; 382 383 GEM_LOCK(sc); 384 gem_stop(ifp, 1); 385 GEM_UNLOCK(sc); 386 callout_drain(&sc->sc_tick_ch); 387 #ifdef GEM_RINT_TIMEOUT 388 callout_drain(&sc->sc_rx_ch); 389 #endif 390 ether_ifdetach(ifp); 391 if_free(ifp); 392 device_delete_child(sc->sc_dev, sc->sc_miibus); 393 394 for (i = 0; i < GEM_NRXDESC; i++) { 395 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 396 bus_dmamap_destroy(sc->sc_rdmatag, 397 sc->sc_rxsoft[i].rxs_dmamap); 398 } 399 for (i = 0; i < GEM_TXQUEUELEN; i++) { 400 if (sc->sc_txsoft[i].txs_dmamap != NULL) 401 bus_dmamap_destroy(sc->sc_tdmatag, 402 sc->sc_txsoft[i].txs_dmamap); 403 } 404 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 405 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 406 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 407 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 408 sc->sc_cddmamap); 409 bus_dma_tag_destroy(sc->sc_cdmatag); 410 bus_dma_tag_destroy(sc->sc_tdmatag); 411 bus_dma_tag_destroy(sc->sc_rdmatag); 412 bus_dma_tag_destroy(sc->sc_pdmatag); 413 } 414 415 void 416 gem_suspend(sc) 417 struct gem_softc *sc; 418 { 419 struct ifnet *ifp = sc->sc_ifp; 420 421 GEM_LOCK(sc); 422 gem_stop(ifp, 0); 423 GEM_UNLOCK(sc); 424 } 425 426 void 427 gem_resume(sc) 428 struct gem_softc *sc; 429 { 430 struct ifnet *ifp = sc->sc_ifp; 431 432 GEM_LOCK(sc); 433 /* 434 * On resume all registers have to be initialized again like 435 * after power-on. 436 */ 437 sc->sc_inited = 0; 438 if (ifp->if_flags & IFF_UP) 439 gem_init_locked(sc); 440 GEM_UNLOCK(sc); 441 } 442 443 static void 444 gem_cddma_callback(xsc, segs, nsegs, error) 445 void *xsc; 446 bus_dma_segment_t *segs; 447 int nsegs; 448 int error; 449 { 450 struct gem_softc *sc = (struct gem_softc *)xsc; 451 452 if (error != 0) 453 return; 454 if (nsegs != 1) { 455 /* can't happen... */ 456 panic("gem_cddma_callback: bad control buffer segment count"); 457 } 458 sc->sc_cddma = segs[0].ds_addr; 459 } 460 461 static void 462 gem_txdma_callback(xsc, segs, nsegs, totsz, error) 463 void *xsc; 464 bus_dma_segment_t *segs; 465 int nsegs; 466 bus_size_t totsz; 467 int error; 468 { 469 struct gem_txdma *txd = (struct gem_txdma *)xsc; 470 struct gem_softc *sc = txd->txd_sc; 471 struct gem_txsoft *txs = txd->txd_txs; 472 bus_size_t len = 0; 473 uint64_t flags = 0; 474 int seg, nexttx; 475 476 if (error != 0) 477 return; 478 /* 479 * Ensure we have enough descriptors free to describe 480 * the packet. Note, we always reserve one descriptor 481 * at the end of the ring as a termination point, to 482 * prevent wrap-around. 483 */ 484 if (nsegs > sc->sc_txfree - 1) { 485 txs->txs_ndescs = -1; 486 return; 487 } 488 txs->txs_ndescs = nsegs; 489 490 nexttx = txs->txs_firstdesc; 491 /* 492 * Initialize the transmit descriptors. 493 */ 494 for (seg = 0; seg < nsegs; 495 seg++, nexttx = GEM_NEXTTX(nexttx)) { 496 #ifdef GEM_DEBUG 497 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 498 "%lx, addr %#lx (%#lx)", seg, nexttx, 499 segs[seg].ds_len, segs[seg].ds_addr, 500 GEM_DMA_WRITE(sc, segs[seg].ds_addr)); 501 #endif 502 503 if (segs[seg].ds_len == 0) 504 continue; 505 sc->sc_txdescs[nexttx].gd_addr = 506 GEM_DMA_WRITE(sc, segs[seg].ds_addr); 507 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, 508 ("gem_txdma_callback: segment size too large!")); 509 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 510 if (len == 0) { 511 #ifdef GEM_DEBUG 512 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 513 "tx %d", seg, nexttx); 514 #endif 515 flags |= GEM_TD_START_OF_PACKET; 516 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 517 sc->sc_txwin = 0; 518 flags |= GEM_TD_INTERRUPT_ME; 519 } 520 } 521 if (len + segs[seg].ds_len == totsz) { 522 #ifdef GEM_DEBUG 523 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 524 "tx %d", seg, nexttx); 525 #endif 526 flags |= GEM_TD_END_OF_PACKET; 527 } 528 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); 529 txs->txs_lastdesc = nexttx; 530 len += segs[seg].ds_len; 531 } 532 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, 533 ("gem_txdma_callback: missed end of packet!")); 534 } 535 536 static void 537 gem_tick(arg) 538 void *arg; 539 { 540 struct gem_softc *sc = arg; 541 542 GEM_LOCK_ASSERT(sc, MA_OWNED); 543 mii_tick(sc->sc_mii); 544 545 if (gem_watchdog(sc) == EJUSTRETURN) 546 return; 547 548 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 549 } 550 551 static int 552 gem_bitwait(sc, r, clr, set) 553 struct gem_softc *sc; 554 bus_addr_t r; 555 u_int32_t clr; 556 u_int32_t set; 557 { 558 int i; 559 u_int32_t reg; 560 561 for (i = TRIES; i--; DELAY(100)) { 562 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 563 if ((r & clr) == 0 && (r & set) == set) 564 return (1); 565 } 566 return (0); 567 } 568 569 void 570 gem_reset(sc) 571 struct gem_softc *sc; 572 { 573 bus_space_tag_t t = sc->sc_bustag; 574 bus_space_handle_t h = sc->sc_h; 575 576 #ifdef GEM_DEBUG 577 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 578 #endif 579 gem_reset_rx(sc); 580 gem_reset_tx(sc); 581 582 /* Do a full reset */ 583 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 584 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 585 device_printf(sc->sc_dev, "cannot reset device\n"); 586 } 587 588 589 /* 590 * gem_rxdrain: 591 * 592 * Drain the receive queue. 593 */ 594 static void 595 gem_rxdrain(sc) 596 struct gem_softc *sc; 597 { 598 struct gem_rxsoft *rxs; 599 int i; 600 601 for (i = 0; i < GEM_NRXDESC; i++) { 602 rxs = &sc->sc_rxsoft[i]; 603 if (rxs->rxs_mbuf != NULL) { 604 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 605 BUS_DMASYNC_POSTREAD); 606 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 607 m_freem(rxs->rxs_mbuf); 608 rxs->rxs_mbuf = NULL; 609 } 610 } 611 } 612 613 /* 614 * Reset the whole thing. 615 */ 616 static void 617 gem_stop(ifp, disable) 618 struct ifnet *ifp; 619 int disable; 620 { 621 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 622 struct gem_txsoft *txs; 623 624 #ifdef GEM_DEBUG 625 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 626 #endif 627 628 callout_stop(&sc->sc_tick_ch); 629 #ifdef GEM_RINT_TIMEOUT 630 callout_stop(&sc->sc_rx_ch); 631 #endif 632 633 /* XXX - Should we reset these instead? */ 634 gem_disable_tx(sc); 635 gem_disable_rx(sc); 636 637 /* 638 * Release any queued transmit buffers. 639 */ 640 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 641 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 642 if (txs->txs_ndescs != 0) { 643 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 644 BUS_DMASYNC_POSTWRITE); 645 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 646 if (txs->txs_mbuf != NULL) { 647 m_freem(txs->txs_mbuf); 648 txs->txs_mbuf = NULL; 649 } 650 } 651 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 652 } 653 654 if (disable) 655 gem_rxdrain(sc); 656 657 /* 658 * Mark the interface down and cancel the watchdog timer. 659 */ 660 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 661 sc->sc_wdog_timer = 0; 662 } 663 664 /* 665 * Reset the receiver 666 */ 667 int 668 gem_reset_rx(sc) 669 struct gem_softc *sc; 670 { 671 bus_space_tag_t t = sc->sc_bustag; 672 bus_space_handle_t h = sc->sc_h; 673 674 /* 675 * Resetting while DMA is in progress can cause a bus hang, so we 676 * disable DMA first. 677 */ 678 gem_disable_rx(sc); 679 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 680 /* Wait till it finishes */ 681 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 682 device_printf(sc->sc_dev, "cannot disable read dma\n"); 683 684 /* Wait 5ms extra. */ 685 DELAY(5000); 686 687 /* Finally, reset the ERX */ 688 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 689 /* Wait till it finishes */ 690 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 691 device_printf(sc->sc_dev, "cannot reset receiver\n"); 692 return (1); 693 } 694 return (0); 695 } 696 697 698 /* 699 * Reset the transmitter 700 */ 701 static int 702 gem_reset_tx(sc) 703 struct gem_softc *sc; 704 { 705 bus_space_tag_t t = sc->sc_bustag; 706 bus_space_handle_t h = sc->sc_h; 707 int i; 708 709 /* 710 * Resetting while DMA is in progress can cause a bus hang, so we 711 * disable DMA first. 712 */ 713 gem_disable_tx(sc); 714 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 715 /* Wait till it finishes */ 716 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 717 device_printf(sc->sc_dev, "cannot disable read dma\n"); 718 719 /* Wait 5ms extra. */ 720 DELAY(5000); 721 722 /* Finally, reset the ETX */ 723 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 724 /* Wait till it finishes */ 725 for (i = TRIES; i--; DELAY(100)) 726 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 727 break; 728 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 729 device_printf(sc->sc_dev, "cannot reset receiver\n"); 730 return (1); 731 } 732 return (0); 733 } 734 735 /* 736 * disable receiver. 737 */ 738 static int 739 gem_disable_rx(sc) 740 struct gem_softc *sc; 741 { 742 bus_space_tag_t t = sc->sc_bustag; 743 bus_space_handle_t h = sc->sc_h; 744 u_int32_t cfg; 745 746 /* Flip the enable bit */ 747 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 748 cfg &= ~GEM_MAC_RX_ENABLE; 749 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 750 751 /* Wait for it to finish */ 752 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 753 } 754 755 /* 756 * disable transmitter. 757 */ 758 static int 759 gem_disable_tx(sc) 760 struct gem_softc *sc; 761 { 762 bus_space_tag_t t = sc->sc_bustag; 763 bus_space_handle_t h = sc->sc_h; 764 u_int32_t cfg; 765 766 /* Flip the enable bit */ 767 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 768 cfg &= ~GEM_MAC_TX_ENABLE; 769 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 770 771 /* Wait for it to finish */ 772 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 773 } 774 775 /* 776 * Initialize interface. 777 */ 778 static int 779 gem_meminit(sc) 780 struct gem_softc *sc; 781 { 782 struct gem_rxsoft *rxs; 783 int i, error; 784 785 /* 786 * Initialize the transmit descriptor ring. 787 */ 788 for (i = 0; i < GEM_NTXDESC; i++) { 789 sc->sc_txdescs[i].gd_flags = 0; 790 sc->sc_txdescs[i].gd_addr = 0; 791 } 792 sc->sc_txfree = GEM_MAXTXFREE; 793 sc->sc_txnext = 0; 794 sc->sc_txwin = 0; 795 796 /* 797 * Initialize the receive descriptor and receive job 798 * descriptor rings. 799 */ 800 for (i = 0; i < GEM_NRXDESC; i++) { 801 rxs = &sc->sc_rxsoft[i]; 802 if (rxs->rxs_mbuf == NULL) { 803 if ((error = gem_add_rxbuf(sc, i)) != 0) { 804 device_printf(sc->sc_dev, "unable to " 805 "allocate or map rx buffer %d, error = " 806 "%d\n", i, error); 807 /* 808 * XXX Should attempt to run with fewer receive 809 * XXX buffers instead of just failing. 810 */ 811 gem_rxdrain(sc); 812 return (1); 813 } 814 } else 815 GEM_INIT_RXDESC(sc, i); 816 } 817 sc->sc_rxptr = 0; 818 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 819 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 820 821 return (0); 822 } 823 824 static int 825 gem_ringsize(sz) 826 int sz; 827 { 828 int v = 0; 829 830 switch (sz) { 831 case 32: 832 v = GEM_RING_SZ_32; 833 break; 834 case 64: 835 v = GEM_RING_SZ_64; 836 break; 837 case 128: 838 v = GEM_RING_SZ_128; 839 break; 840 case 256: 841 v = GEM_RING_SZ_256; 842 break; 843 case 512: 844 v = GEM_RING_SZ_512; 845 break; 846 case 1024: 847 v = GEM_RING_SZ_1024; 848 break; 849 case 2048: 850 v = GEM_RING_SZ_2048; 851 break; 852 case 4096: 853 v = GEM_RING_SZ_4096; 854 break; 855 case 8192: 856 v = GEM_RING_SZ_8192; 857 break; 858 default: 859 printf("gem: invalid Receive Descriptor ring size\n"); 860 break; 861 } 862 return (v); 863 } 864 865 static void 866 gem_init(xsc) 867 void *xsc; 868 { 869 struct gem_softc *sc = (struct gem_softc *)xsc; 870 871 GEM_LOCK(sc); 872 gem_init_locked(sc); 873 GEM_UNLOCK(sc); 874 } 875 876 /* 877 * Initialization of interface; set up initialization block 878 * and transmit/receive descriptor rings. 879 */ 880 static void 881 gem_init_locked(sc) 882 struct gem_softc *sc; 883 { 884 struct ifnet *ifp = sc->sc_ifp; 885 bus_space_tag_t t = sc->sc_bustag; 886 bus_space_handle_t h = sc->sc_h; 887 u_int32_t v; 888 889 GEM_LOCK_ASSERT(sc, MA_OWNED); 890 891 #ifdef GEM_DEBUG 892 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 893 #endif 894 /* 895 * Initialization sequence. The numbered steps below correspond 896 * to the sequence outlined in section 6.3.5.1 in the Ethernet 897 * Channel Engine manual (part of the PCIO manual). 898 * See also the STP2002-STQ document from Sun Microsystems. 899 */ 900 901 /* step 1 & 2. Reset the Ethernet Channel */ 902 gem_stop(sc->sc_ifp, 0); 903 gem_reset(sc); 904 #ifdef GEM_DEBUG 905 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 906 #endif 907 908 /* Re-initialize the MIF */ 909 gem_mifinit(sc); 910 911 /* step 3. Setup data structures in host memory */ 912 gem_meminit(sc); 913 914 /* step 4. TX MAC registers & counters */ 915 gem_init_regs(sc); 916 917 /* step 5. RX MAC registers & counters */ 918 gem_setladrf(sc); 919 920 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 921 /* NOTE: we use only 32-bit DMA addresses here. */ 922 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 923 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 924 925 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 926 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 927 #ifdef GEM_DEBUG 928 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 929 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 930 #endif 931 932 /* step 8. Global Configuration & Interrupt Mask */ 933 bus_space_write_4(t, h, GEM_INTMASK, 934 ~(GEM_INTR_TX_INTME| 935 GEM_INTR_TX_EMPTY| 936 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 937 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 938 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 939 GEM_INTR_BERR)); 940 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 941 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 942 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 943 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 944 945 /* step 9. ETX Configuration: use mostly default values */ 946 947 /* Enable DMA */ 948 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 949 bus_space_write_4(t, h, GEM_TX_CONFIG, 950 v|GEM_TX_CONFIG_TXDMA_EN| 951 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 952 953 /* step 10. ERX Configuration */ 954 955 /* Encode Receive Descriptor ring size: four possible values */ 956 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 957 958 /* Enable DMA */ 959 bus_space_write_4(t, h, GEM_RX_CONFIG, 960 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 961 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 962 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 963 /* 964 * The following value is for an OFF Threshold of about 3/4 full 965 * and an ON Threshold of 1/4 full. 966 */ 967 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 968 (3 * sc->sc_rxfifosize / 256) | 969 ( (sc->sc_rxfifosize / 256) << 12)); 970 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 971 972 /* step 11. Configure Media */ 973 mii_mediachg(sc->sc_mii); 974 975 /* step 12. RX_MAC Configuration Register */ 976 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 977 v |= GEM_MAC_RX_ENABLE; 978 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 979 980 /* step 14. Issue Transmit Pending command */ 981 982 /* step 15. Give the reciever a swift kick */ 983 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 984 985 /* Start the one second timer. */ 986 sc->sc_wdog_timer = 0; 987 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 988 989 ifp->if_drv_flags |= IFF_DRV_RUNNING; 990 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 991 sc->sc_ifflags = ifp->if_flags; 992 } 993 994 static int 995 gem_load_txmbuf(sc, m0) 996 struct gem_softc *sc; 997 struct mbuf *m0; 998 { 999 struct gem_txdma txd; 1000 struct gem_txsoft *txs; 1001 int error; 1002 1003 /* Get a work queue entry. */ 1004 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1005 /* Ran out of descriptors. */ 1006 return (-1); 1007 } 1008 txd.txd_sc = sc; 1009 txd.txd_txs = txs; 1010 txs->txs_firstdesc = sc->sc_txnext; 1011 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, 1012 gem_txdma_callback, &txd, BUS_DMA_NOWAIT); 1013 if (error != 0) 1014 goto fail; 1015 if (txs->txs_ndescs == -1) { 1016 error = -1; 1017 goto fail; 1018 } 1019 1020 /* Sync the DMA map. */ 1021 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1022 BUS_DMASYNC_PREWRITE); 1023 1024 #ifdef GEM_DEBUG 1025 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 1026 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 1027 txs->txs_ndescs); 1028 #endif 1029 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1030 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1031 txs->txs_mbuf = m0; 1032 1033 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1034 sc->sc_txfree -= txs->txs_ndescs; 1035 return (0); 1036 1037 fail: 1038 #ifdef GEM_DEBUG 1039 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); 1040 #endif 1041 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1042 return (error); 1043 } 1044 1045 static void 1046 gem_init_regs(sc) 1047 struct gem_softc *sc; 1048 { 1049 bus_space_tag_t t = sc->sc_bustag; 1050 bus_space_handle_t h = sc->sc_h; 1051 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1052 u_int32_t v; 1053 1054 /* These regs are not cleared on reset */ 1055 if (!sc->sc_inited) { 1056 1057 /* Wooo. Magic values. */ 1058 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1059 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1060 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1061 1062 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1063 /* Max frame and max burst size */ 1064 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1065 (ETHER_MAX_LEN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) | 1066 (0x2000 << 16)); 1067 1068 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1069 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1070 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1071 /* Dunno.... */ 1072 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1073 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1074 ((laddr[5]<<8)|laddr[4])&0x3ff); 1075 1076 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1077 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1078 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1079 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1080 1081 /* MAC control addr set to 01:80:c2:00:00:01 */ 1082 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1083 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1084 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1085 1086 /* MAC filter addr set to 0:0:0:0:0:0 */ 1087 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1088 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1089 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1090 1091 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1092 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1093 1094 sc->sc_inited = 1; 1095 } 1096 1097 /* Counters need to be zeroed */ 1098 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1099 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1100 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1101 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1102 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1103 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1104 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1105 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1106 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1107 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1108 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1109 1110 /* Un-pause stuff */ 1111 #if 0 1112 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1113 #else 1114 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1115 #endif 1116 1117 /* 1118 * Set the station address. 1119 */ 1120 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1121 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1122 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1123 1124 /* 1125 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1126 */ 1127 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1128 v = GEM_MAC_XIF_TX_MII_ENA; 1129 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1130 v |= GEM_MAC_XIF_FDPLX_LED; 1131 if (sc->sc_flags & GEM_GIGABIT) 1132 v |= GEM_MAC_XIF_GMII_MODE; 1133 } 1134 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1135 } 1136 1137 static void 1138 gem_start(ifp) 1139 struct ifnet *ifp; 1140 { 1141 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1142 1143 GEM_LOCK(sc); 1144 gem_start_locked(ifp); 1145 GEM_UNLOCK(sc); 1146 } 1147 1148 static void 1149 gem_start_locked(ifp) 1150 struct ifnet *ifp; 1151 { 1152 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1153 struct mbuf *m0 = NULL; 1154 int firsttx, ntx = 0, ofree, txmfail; 1155 1156 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1157 IFF_DRV_RUNNING) 1158 return; 1159 1160 /* 1161 * Remember the previous number of free descriptors and 1162 * the first descriptor we'll use. 1163 */ 1164 ofree = sc->sc_txfree; 1165 firsttx = sc->sc_txnext; 1166 1167 #ifdef GEM_DEBUG 1168 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1169 device_get_name(sc->sc_dev), ofree, firsttx); 1170 #endif 1171 1172 /* 1173 * Loop through the send queue, setting up transmit descriptors 1174 * until we drain the queue, or use up all available transmit 1175 * descriptors. 1176 */ 1177 txmfail = 0; 1178 do { 1179 /* 1180 * Grab a packet off the queue. 1181 */ 1182 IF_DEQUEUE(&ifp->if_snd, m0); 1183 if (m0 == NULL) 1184 break; 1185 1186 txmfail = gem_load_txmbuf(sc, m0); 1187 if (txmfail > 0) { 1188 /* Drop the mbuf and complain. */ 1189 printf("gem_start: error %d while loading mbuf dma " 1190 "map\n", txmfail); 1191 continue; 1192 } 1193 /* Not enough descriptors. */ 1194 if (txmfail == -1) { 1195 if (sc->sc_txfree == GEM_MAXTXFREE) 1196 panic("gem_start: mbuf chain too long!"); 1197 IF_PREPEND(&ifp->if_snd, m0); 1198 break; 1199 } 1200 1201 ntx++; 1202 /* Kick the transmitter. */ 1203 #ifdef GEM_DEBUG 1204 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", 1205 device_get_name(sc->sc_dev), sc->sc_txnext); 1206 #endif 1207 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1208 sc->sc_txnext); 1209 1210 BPF_MTAP(ifp, m0); 1211 } while (1); 1212 1213 if (txmfail == -1 || sc->sc_txfree == 0) { 1214 /* No more slots left; notify upper layer. */ 1215 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1216 } 1217 1218 if (ntx > 0) { 1219 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1220 1221 #ifdef GEM_DEBUG 1222 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1223 device_get_name(sc->sc_dev), firsttx); 1224 #endif 1225 1226 /* Set a watchdog timer in case the chip flakes out. */ 1227 sc->sc_wdog_timer = 5; 1228 #ifdef GEM_DEBUG 1229 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1230 device_get_name(sc->sc_dev), sc->sc_wdog_timer); 1231 #endif 1232 } 1233 } 1234 1235 /* 1236 * Transmit interrupt. 1237 */ 1238 static void 1239 gem_tint(sc) 1240 struct gem_softc *sc; 1241 { 1242 struct ifnet *ifp = sc->sc_ifp; 1243 bus_space_tag_t t = sc->sc_bustag; 1244 bus_space_handle_t mac = sc->sc_h; 1245 struct gem_txsoft *txs; 1246 int txlast; 1247 int progress = 0; 1248 1249 1250 #ifdef GEM_DEBUG 1251 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1252 #endif 1253 1254 /* 1255 * Unload collision counters 1256 */ 1257 ifp->if_collisions += 1258 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1259 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1260 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1261 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1262 1263 /* 1264 * then clear the hardware counters. 1265 */ 1266 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1267 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1268 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1269 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1270 1271 /* 1272 * Go through our Tx list and free mbufs for those 1273 * frames that have been transmitted. 1274 */ 1275 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1276 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1277 1278 #ifdef GEM_DEBUG 1279 if (ifp->if_flags & IFF_DEBUG) { 1280 int i; 1281 printf(" txsoft %p transmit chain:\n", txs); 1282 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1283 printf("descriptor %d: ", i); 1284 printf("gd_flags: 0x%016llx\t", (long long) 1285 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1286 printf("gd_addr: 0x%016llx\n", (long long) 1287 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1288 if (i == txs->txs_lastdesc) 1289 break; 1290 } 1291 } 1292 #endif 1293 1294 /* 1295 * In theory, we could harveast some descriptors before 1296 * the ring is empty, but that's a bit complicated. 1297 * 1298 * GEM_TX_COMPLETION points to the last descriptor 1299 * processed +1. 1300 */ 1301 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1302 #ifdef GEM_DEBUG 1303 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1304 "txs->txs_lastdesc = %d, txlast = %d", 1305 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1306 #endif 1307 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1308 if ((txlast >= txs->txs_firstdesc) && 1309 (txlast <= txs->txs_lastdesc)) 1310 break; 1311 } else { 1312 /* Ick -- this command wraps */ 1313 if ((txlast >= txs->txs_firstdesc) || 1314 (txlast <= txs->txs_lastdesc)) 1315 break; 1316 } 1317 1318 #ifdef GEM_DEBUG 1319 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1320 #endif 1321 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1322 1323 sc->sc_txfree += txs->txs_ndescs; 1324 1325 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1326 BUS_DMASYNC_POSTWRITE); 1327 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1328 if (txs->txs_mbuf != NULL) { 1329 m_freem(txs->txs_mbuf); 1330 txs->txs_mbuf = NULL; 1331 } 1332 1333 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1334 1335 ifp->if_opackets++; 1336 progress = 1; 1337 } 1338 1339 #ifdef GEM_DEBUG 1340 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1341 "GEM_TX_DATA_PTR %llx " 1342 "GEM_TX_COMPLETION %x", 1343 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1344 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1345 GEM_TX_DATA_PTR_HI) << 32) | 1346 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1347 GEM_TX_DATA_PTR_LO), 1348 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1349 #endif 1350 1351 if (progress) { 1352 if (sc->sc_txfree == GEM_NTXDESC - 1) 1353 sc->sc_txwin = 0; 1354 1355 /* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */ 1356 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1357 gem_start_locked(ifp); 1358 1359 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; 1360 } 1361 1362 #ifdef GEM_DEBUG 1363 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1364 device_get_name(sc->sc_dev), sc->sc_wdog_timer); 1365 #endif 1366 } 1367 1368 #ifdef GEM_RINT_TIMEOUT 1369 static void 1370 gem_rint_timeout(arg) 1371 void *arg; 1372 { 1373 struct gem_softc *sc = (struct gem_softc *)arg; 1374 1375 GEM_LOCK_ASSERT(sc, MA_OWNED); 1376 gem_rint(sc); 1377 } 1378 #endif 1379 1380 /* 1381 * Receive interrupt. 1382 */ 1383 static void 1384 gem_rint(sc) 1385 struct gem_softc *sc; 1386 { 1387 struct ifnet *ifp = sc->sc_ifp; 1388 bus_space_tag_t t = sc->sc_bustag; 1389 bus_space_handle_t h = sc->sc_h; 1390 struct gem_rxsoft *rxs; 1391 struct mbuf *m; 1392 u_int64_t rxstat; 1393 u_int32_t rxcomp; 1394 int i, len, progress = 0; 1395 1396 #ifdef GEM_RINT_TIMEOUT 1397 callout_stop(&sc->sc_rx_ch); 1398 #endif 1399 #ifdef GEM_DEBUG 1400 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1401 #endif 1402 1403 /* 1404 * Read the completion register once. This limits 1405 * how long the following loop can execute. 1406 */ 1407 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1408 1409 #ifdef GEM_DEBUG 1410 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1411 sc->sc_rxptr, rxcomp); 1412 #endif 1413 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1414 for (i = sc->sc_rxptr; i != rxcomp; 1415 i = GEM_NEXTRX(i)) { 1416 rxs = &sc->sc_rxsoft[i]; 1417 1418 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1419 1420 if (rxstat & GEM_RD_OWN) { 1421 #ifdef GEM_RINT_TIMEOUT 1422 /* 1423 * The descriptor is still marked as owned, although 1424 * it is supposed to have completed. This has been 1425 * observed on some machines. Just exiting here 1426 * might leave the packet sitting around until another 1427 * one arrives to trigger a new interrupt, which is 1428 * generally undesirable, so set up a timeout. 1429 */ 1430 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1431 gem_rint_timeout, sc); 1432 #endif 1433 break; 1434 } 1435 1436 progress++; 1437 ifp->if_ipackets++; 1438 1439 if (rxstat & GEM_RD_BAD_CRC) { 1440 ifp->if_ierrors++; 1441 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1442 GEM_INIT_RXDESC(sc, i); 1443 continue; 1444 } 1445 1446 #ifdef GEM_DEBUG 1447 if (ifp->if_flags & IFF_DEBUG) { 1448 printf(" rxsoft %p descriptor %d: ", rxs, i); 1449 printf("gd_flags: 0x%016llx\t", (long long) 1450 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1451 printf("gd_addr: 0x%016llx\n", (long long) 1452 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1453 } 1454 #endif 1455 1456 /* 1457 * No errors; receive the packet. Note the Gem 1458 * includes the CRC with every packet. 1459 */ 1460 len = GEM_RD_BUFLEN(rxstat); 1461 1462 /* 1463 * Allocate a new mbuf cluster. If that fails, we are 1464 * out of memory, and must drop the packet and recycle 1465 * the buffer that's already attached to this descriptor. 1466 */ 1467 m = rxs->rxs_mbuf; 1468 if (gem_add_rxbuf(sc, i) != 0) { 1469 ifp->if_ierrors++; 1470 GEM_INIT_RXDESC(sc, i); 1471 continue; 1472 } 1473 m->m_data += 2; /* We're already off by two */ 1474 1475 m->m_pkthdr.rcvif = ifp; 1476 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1477 1478 /* Pass it on. */ 1479 GEM_UNLOCK(sc); 1480 (*ifp->if_input)(ifp, m); 1481 GEM_LOCK(sc); 1482 } 1483 1484 if (progress) { 1485 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1486 /* Update the receive pointer. */ 1487 if (i == sc->sc_rxptr) { 1488 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1489 } 1490 sc->sc_rxptr = i; 1491 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1492 } 1493 1494 #ifdef GEM_DEBUG 1495 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1496 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1497 #endif 1498 } 1499 1500 1501 /* 1502 * gem_add_rxbuf: 1503 * 1504 * Add a receive buffer to the indicated descriptor. 1505 */ 1506 static int 1507 gem_add_rxbuf(sc, idx) 1508 struct gem_softc *sc; 1509 int idx; 1510 { 1511 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1512 struct mbuf *m; 1513 bus_dma_segment_t segs[1]; 1514 int error, nsegs; 1515 1516 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1517 if (m == NULL) 1518 return (ENOBUFS); 1519 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1520 1521 #ifdef GEM_DEBUG 1522 /* bzero the packet to check dma */ 1523 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1524 #endif 1525 1526 if (rxs->rxs_mbuf != NULL) { 1527 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1528 BUS_DMASYNC_POSTREAD); 1529 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1530 } 1531 1532 rxs->rxs_mbuf = m; 1533 1534 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1535 m, segs, &nsegs, BUS_DMA_NOWAIT); 1536 /* If nsegs is wrong then the stack is corrupt. */ 1537 KASSERT(nsegs == 1, ("Too many segments returned!")); 1538 if (error != 0) { 1539 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1540 "%d\n", idx, error); 1541 m_freem(m); 1542 return (ENOBUFS); 1543 } 1544 rxs->rxs_paddr = segs[0].ds_addr; 1545 1546 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1547 1548 GEM_INIT_RXDESC(sc, idx); 1549 1550 return (0); 1551 } 1552 1553 1554 static void 1555 gem_eint(sc, status) 1556 struct gem_softc *sc; 1557 u_int status; 1558 { 1559 1560 if ((status & GEM_INTR_MIF) != 0) { 1561 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1562 return; 1563 } 1564 1565 device_printf(sc->sc_dev, "status=%x\n", status); 1566 } 1567 1568 1569 void 1570 gem_intr(v) 1571 void *v; 1572 { 1573 struct gem_softc *sc = (struct gem_softc *)v; 1574 bus_space_tag_t t = sc->sc_bustag; 1575 bus_space_handle_t seb = sc->sc_h; 1576 u_int32_t status; 1577 1578 GEM_LOCK(sc); 1579 status = bus_space_read_4(t, seb, GEM_STATUS); 1580 #ifdef GEM_DEBUG 1581 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1582 device_get_name(sc->sc_dev), (status>>19), 1583 (u_int)status); 1584 #endif 1585 1586 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1587 gem_eint(sc, status); 1588 1589 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1590 gem_tint(sc); 1591 1592 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1593 gem_rint(sc); 1594 1595 /* We should eventually do more than just print out error stats. */ 1596 if (status & GEM_INTR_TX_MAC) { 1597 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1598 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1599 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1600 txstat); 1601 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1602 gem_init_locked(sc); 1603 } 1604 if (status & GEM_INTR_RX_MAC) { 1605 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1606 /* 1607 * On some chip revisions GEM_MAC_RX_OVERFLOW happen often 1608 * due to a silicon bug so handle them silently. 1609 */ 1610 if (rxstat & GEM_MAC_RX_OVERFLOW) 1611 gem_init_locked(sc); 1612 else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1613 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1614 rxstat); 1615 } 1616 GEM_UNLOCK(sc); 1617 } 1618 1619 static int 1620 gem_watchdog(sc) 1621 struct gem_softc *sc; 1622 { 1623 1624 GEM_LOCK_ASSERT(sc, MA_OWNED); 1625 1626 #ifdef GEM_DEBUG 1627 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1628 "GEM_MAC_RX_CONFIG %x", 1629 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1630 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1631 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1632 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1633 "GEM_MAC_TX_CONFIG %x", 1634 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1635 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1636 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1637 #endif 1638 1639 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1640 return (0); 1641 1642 device_printf(sc->sc_dev, "device timeout\n"); 1643 ++sc->sc_ifp->if_oerrors; 1644 1645 /* Try to get more packets going. */ 1646 gem_init_locked(sc); 1647 return (EJUSTRETURN); 1648 } 1649 1650 /* 1651 * Initialize the MII Management Interface 1652 */ 1653 static void 1654 gem_mifinit(sc) 1655 struct gem_softc *sc; 1656 { 1657 bus_space_tag_t t = sc->sc_bustag; 1658 bus_space_handle_t mif = sc->sc_h; 1659 1660 /* Configure the MIF in frame mode */ 1661 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1662 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1663 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1664 } 1665 1666 /* 1667 * MII interface 1668 * 1669 * The GEM MII interface supports at least three different operating modes: 1670 * 1671 * Bitbang mode is implemented using data, clock and output enable registers. 1672 * 1673 * Frame mode is implemented by loading a complete frame into the frame 1674 * register and polling the valid bit for completion. 1675 * 1676 * Polling mode uses the frame register but completion is indicated by 1677 * an interrupt. 1678 * 1679 */ 1680 int 1681 gem_mii_readreg(dev, phy, reg) 1682 device_t dev; 1683 int phy, reg; 1684 { 1685 struct gem_softc *sc = device_get_softc(dev); 1686 bus_space_tag_t t = sc->sc_bustag; 1687 bus_space_handle_t mif = sc->sc_h; 1688 int n; 1689 u_int32_t v; 1690 1691 #ifdef GEM_DEBUG_PHY 1692 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1693 #endif 1694 1695 #if 0 1696 /* Select the desired PHY in the MIF configuration register */ 1697 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1698 /* Clear PHY select bit */ 1699 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1700 if (phy == GEM_PHYAD_EXTERNAL) 1701 /* Set PHY select bit to get at external device */ 1702 v |= GEM_MIF_CONFIG_PHY_SEL; 1703 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1704 #endif 1705 1706 /* Construct the frame command */ 1707 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1708 GEM_MIF_FRAME_READ; 1709 1710 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1711 for (n = 0; n < 100; n++) { 1712 DELAY(1); 1713 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1714 if (v & GEM_MIF_FRAME_TA0) 1715 return (v & GEM_MIF_FRAME_DATA); 1716 } 1717 1718 device_printf(sc->sc_dev, "mii_read timeout\n"); 1719 return (0); 1720 } 1721 1722 int 1723 gem_mii_writereg(dev, phy, reg, val) 1724 device_t dev; 1725 int phy, reg, val; 1726 { 1727 struct gem_softc *sc = device_get_softc(dev); 1728 bus_space_tag_t t = sc->sc_bustag; 1729 bus_space_handle_t mif = sc->sc_h; 1730 int n; 1731 u_int32_t v; 1732 1733 #ifdef GEM_DEBUG_PHY 1734 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1735 #endif 1736 1737 #if 0 1738 /* Select the desired PHY in the MIF configuration register */ 1739 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1740 /* Clear PHY select bit */ 1741 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1742 if (phy == GEM_PHYAD_EXTERNAL) 1743 /* Set PHY select bit to get at external device */ 1744 v |= GEM_MIF_CONFIG_PHY_SEL; 1745 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1746 #endif 1747 /* Construct the frame command */ 1748 v = GEM_MIF_FRAME_WRITE | 1749 (phy << GEM_MIF_PHY_SHIFT) | 1750 (reg << GEM_MIF_REG_SHIFT) | 1751 (val & GEM_MIF_FRAME_DATA); 1752 1753 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1754 for (n = 0; n < 100; n++) { 1755 DELAY(1); 1756 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1757 if (v & GEM_MIF_FRAME_TA0) 1758 return (1); 1759 } 1760 1761 device_printf(sc->sc_dev, "mii_write timeout\n"); 1762 return (0); 1763 } 1764 1765 void 1766 gem_mii_statchg(dev) 1767 device_t dev; 1768 { 1769 struct gem_softc *sc = device_get_softc(dev); 1770 #ifdef GEM_DEBUG 1771 int instance; 1772 #endif 1773 bus_space_tag_t t = sc->sc_bustag; 1774 bus_space_handle_t mac = sc->sc_h; 1775 u_int32_t v; 1776 1777 #ifdef GEM_DEBUG 1778 instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1779 if (sc->sc_debug) 1780 printf("gem_mii_statchg: status change: phy = %d\n", 1781 sc->sc_phys[instance]); 1782 #endif 1783 1784 /* Set tx full duplex options */ 1785 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1786 DELAY(10000); /* reg must be cleared and delay before changing. */ 1787 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1788 GEM_MAC_TX_ENABLE; 1789 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1790 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1791 } 1792 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1793 1794 /* XIF Configuration */ 1795 v = GEM_MAC_XIF_LINK_LED; 1796 v |= GEM_MAC_XIF_TX_MII_ENA; 1797 1798 /* If an external transceiver is connected, enable its MII drivers */ 1799 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1800 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1801 /* External MII needs echo disable if half duplex. */ 1802 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1803 /* turn on full duplex LED */ 1804 v |= GEM_MAC_XIF_FDPLX_LED; 1805 else 1806 /* half duplex -- disable echo */ 1807 v |= GEM_MAC_XIF_ECHO_DISABL; 1808 1809 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1810 v |= GEM_MAC_XIF_GMII_MODE; 1811 else 1812 v &= ~GEM_MAC_XIF_GMII_MODE; 1813 } else { 1814 /* Internal MII needs buf enable */ 1815 v |= GEM_MAC_XIF_MII_BUF_ENA; 1816 } 1817 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1818 } 1819 1820 int 1821 gem_mediachange(ifp) 1822 struct ifnet *ifp; 1823 { 1824 struct gem_softc *sc = ifp->if_softc; 1825 int error; 1826 1827 /* XXX Add support for serial media. */ 1828 1829 GEM_LOCK(sc); 1830 error = mii_mediachg(sc->sc_mii); 1831 GEM_UNLOCK(sc); 1832 return (error); 1833 } 1834 1835 void 1836 gem_mediastatus(ifp, ifmr) 1837 struct ifnet *ifp; 1838 struct ifmediareq *ifmr; 1839 { 1840 struct gem_softc *sc = ifp->if_softc; 1841 1842 GEM_LOCK(sc); 1843 if ((ifp->if_flags & IFF_UP) == 0) { 1844 GEM_UNLOCK(sc); 1845 return; 1846 } 1847 1848 mii_pollstat(sc->sc_mii); 1849 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1850 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1851 GEM_UNLOCK(sc); 1852 } 1853 1854 /* 1855 * Process an ioctl request. 1856 */ 1857 static int 1858 gem_ioctl(ifp, cmd, data) 1859 struct ifnet *ifp; 1860 u_long cmd; 1861 caddr_t data; 1862 { 1863 struct gem_softc *sc = ifp->if_softc; 1864 struct ifreq *ifr = (struct ifreq *)data; 1865 int error = 0; 1866 1867 switch (cmd) { 1868 case SIOCSIFFLAGS: 1869 GEM_LOCK(sc); 1870 if (ifp->if_flags & IFF_UP) { 1871 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1872 gem_setladrf(sc); 1873 else 1874 gem_init_locked(sc); 1875 } else { 1876 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1877 gem_stop(ifp, 0); 1878 } 1879 sc->sc_ifflags = ifp->if_flags; 1880 GEM_UNLOCK(sc); 1881 break; 1882 case SIOCADDMULTI: 1883 case SIOCDELMULTI: 1884 GEM_LOCK(sc); 1885 gem_setladrf(sc); 1886 GEM_UNLOCK(sc); 1887 break; 1888 case SIOCGIFMEDIA: 1889 case SIOCSIFMEDIA: 1890 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1891 break; 1892 default: 1893 error = ether_ioctl(ifp, cmd, data); 1894 break; 1895 } 1896 1897 /* Try to get things going again */ 1898 GEM_LOCK(sc); 1899 if (ifp->if_flags & IFF_UP) 1900 gem_start_locked(ifp); 1901 GEM_UNLOCK(sc); 1902 return (error); 1903 } 1904 1905 /* 1906 * Set up the logical address filter. 1907 */ 1908 static void 1909 gem_setladrf(sc) 1910 struct gem_softc *sc; 1911 { 1912 struct ifnet *ifp = sc->sc_ifp; 1913 struct ifmultiaddr *inm; 1914 bus_space_tag_t t = sc->sc_bustag; 1915 bus_space_handle_t h = sc->sc_h; 1916 u_int32_t crc; 1917 u_int32_t hash[16]; 1918 u_int32_t v; 1919 int i; 1920 1921 GEM_LOCK_ASSERT(sc, MA_OWNED); 1922 1923 /* Get current RX configuration */ 1924 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1925 1926 /* 1927 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1928 * and hash filter. Depending on the case, the right bit will be 1929 * enabled. 1930 */ 1931 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1932 GEM_MAC_RX_PROMISC_GRP); 1933 1934 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1935 /* Turn on promiscuous mode */ 1936 v |= GEM_MAC_RX_PROMISCUOUS; 1937 goto chipit; 1938 } 1939 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1940 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1941 ifp->if_flags |= IFF_ALLMULTI; 1942 v |= GEM_MAC_RX_PROMISC_GRP; 1943 goto chipit; 1944 } 1945 1946 /* 1947 * Set up multicast address filter by passing all multicast addresses 1948 * through a crc generator, and then using the high order 8 bits as an 1949 * index into the 256 bit logical address filter. The high order 4 1950 * bits selects the word, while the other 4 bits select the bit within 1951 * the word (where bit 0 is the MSB). 1952 */ 1953 1954 /* Clear hash table */ 1955 memset(hash, 0, sizeof(hash)); 1956 1957 IF_ADDR_LOCK(ifp); 1958 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 1959 if (inm->ifma_addr->sa_family != AF_LINK) 1960 continue; 1961 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1962 inm->ifma_addr), ETHER_ADDR_LEN); 1963 1964 /* Just want the 8 most significant bits. */ 1965 crc >>= 24; 1966 1967 /* Set the corresponding bit in the filter. */ 1968 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1969 } 1970 IF_ADDR_UNLOCK(ifp); 1971 1972 v |= GEM_MAC_RX_HASH_FILTER; 1973 ifp->if_flags &= ~IFF_ALLMULTI; 1974 1975 /* Now load the hash table into the chip (if we are using it) */ 1976 for (i = 0; i < 16; i++) { 1977 bus_space_write_4(t, h, 1978 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1979 hash[i]); 1980 } 1981 1982 chipit: 1983 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1984 } 1985