1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37 #if 0 38 #define GEM_DEBUG 39 #endif 40 41 #if 0 /* XXX: In case of emergency, re-enable this. */ 42 #define GEM_RINT_TIMEOUT 43 #endif 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/bus.h> 48 #include <sys/callout.h> 49 #include <sys/endian.h> 50 #include <sys/mbuf.h> 51 #include <sys/malloc.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/module.h> 55 #include <sys/mutex.h> 56 #include <sys/socket.h> 57 #include <sys/sockio.h> 58 59 #include <net/bpf.h> 60 #include <net/ethernet.h> 61 #include <net/if.h> 62 #include <net/if_arp.h> 63 #include <net/if_dl.h> 64 #include <net/if_media.h> 65 #include <net/if_types.h> 66 #include <net/if_vlan_var.h> 67 68 #include <machine/bus.h> 69 70 #include <dev/mii/mii.h> 71 #include <dev/mii/miivar.h> 72 73 #include <dev/gem/if_gemreg.h> 74 #include <dev/gem/if_gemvar.h> 75 76 #define TRIES 10000 77 78 static void gem_start(struct ifnet *); 79 static void gem_start_locked(struct ifnet *); 80 static void gem_stop(struct ifnet *, int); 81 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 82 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 83 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, 84 bus_size_t, int); 85 static void gem_tick(void *); 86 static void gem_watchdog(struct ifnet *); 87 static void gem_init(void *); 88 static void gem_init_locked(struct gem_softc *sc); 89 static void gem_init_regs(struct gem_softc *sc); 90 static int gem_ringsize(int sz); 91 static int gem_meminit(struct gem_softc *); 92 static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); 93 static void gem_mifinit(struct gem_softc *); 94 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 95 u_int32_t clr, u_int32_t set); 96 static int gem_reset_rx(struct gem_softc *); 97 static int gem_reset_tx(struct gem_softc *); 98 static int gem_disable_rx(struct gem_softc *); 99 static int gem_disable_tx(struct gem_softc *); 100 static void gem_rxdrain(struct gem_softc *); 101 static int gem_add_rxbuf(struct gem_softc *, int); 102 static void gem_setladrf(struct gem_softc *); 103 104 struct mbuf *gem_get(struct gem_softc *, int, int); 105 static void gem_eint(struct gem_softc *, u_int); 106 static void gem_rint(struct gem_softc *); 107 #ifdef GEM_RINT_TIMEOUT 108 static void gem_rint_timeout(void *); 109 #endif 110 static void gem_tint(struct gem_softc *); 111 #ifdef notyet 112 static void gem_power(int, void *); 113 #endif 114 115 devclass_t gem_devclass; 116 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 117 MODULE_DEPEND(gem, miibus, 1, 1, 1); 118 119 #ifdef GEM_DEBUG 120 #include <sys/ktr.h> 121 #define KTR_GEM KTR_CT2 122 #endif 123 124 #define GEM_NSEGS GEM_NTXDESC 125 126 /* 127 * gem_attach: 128 * 129 * Attach a Gem interface to the system. 130 */ 131 int 132 gem_attach(sc) 133 struct gem_softc *sc; 134 { 135 struct ifnet *ifp; 136 struct mii_softc *child; 137 int i, error; 138 u_int32_t v; 139 140 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 141 if (ifp == NULL) 142 return (ENOSPC); 143 144 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 145 #ifdef GEM_RINT_TIMEOUT 146 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 147 #endif 148 149 /* Make sure the chip is stopped. */ 150 ifp->if_softc = sc; 151 GEM_LOCK(sc); 152 gem_stop(ifp, 0); 153 gem_reset(sc); 154 GEM_UNLOCK(sc); 155 156 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 157 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 158 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); 159 if (error) 160 goto fail_ifnet; 161 162 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 163 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 164 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, 165 &sc->sc_rdmatag); 166 if (error) 167 goto fail_ptag; 168 169 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 170 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 171 GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, 172 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 173 if (error) 174 goto fail_rtag; 175 176 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 177 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 178 sizeof(struct gem_control_data), 1, 179 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 180 busdma_lock_mutex, &sc->sc_mtx, &sc->sc_cdmatag); 181 if (error) 182 goto fail_ttag; 183 184 /* 185 * Allocate the control data structures, and create and load the 186 * DMA map for it. 187 */ 188 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 189 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 190 device_printf(sc->sc_dev, "unable to allocate control data," 191 " error = %d\n", error); 192 goto fail_ctag; 193 } 194 195 sc->sc_cddma = 0; 196 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 197 sc->sc_control_data, sizeof(struct gem_control_data), 198 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 199 device_printf(sc->sc_dev, "unable to load control data DMA " 200 "map, error = %d\n", error); 201 goto fail_cmem; 202 } 203 204 /* 205 * Initialize the transmit job descriptors. 206 */ 207 STAILQ_INIT(&sc->sc_txfreeq); 208 STAILQ_INIT(&sc->sc_txdirtyq); 209 210 /* 211 * Create the transmit buffer DMA maps. 212 */ 213 error = ENOMEM; 214 for (i = 0; i < GEM_TXQUEUELEN; i++) { 215 struct gem_txsoft *txs; 216 217 txs = &sc->sc_txsoft[i]; 218 txs->txs_mbuf = NULL; 219 txs->txs_ndescs = 0; 220 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 221 &txs->txs_dmamap)) != 0) { 222 device_printf(sc->sc_dev, "unable to create tx DMA map " 223 "%d, error = %d\n", i, error); 224 goto fail_txd; 225 } 226 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 227 } 228 229 /* 230 * Create the receive buffer DMA maps. 231 */ 232 for (i = 0; i < GEM_NRXDESC; i++) { 233 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 234 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 235 device_printf(sc->sc_dev, "unable to create rx DMA map " 236 "%d, error = %d\n", i, error); 237 goto fail_rxd; 238 } 239 sc->sc_rxsoft[i].rxs_mbuf = NULL; 240 } 241 242 gem_mifinit(sc); 243 244 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 245 gem_mediastatus)) != 0) { 246 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 247 goto fail_rxd; 248 } 249 sc->sc_mii = device_get_softc(sc->sc_miibus); 250 251 /* 252 * From this point forward, the attachment cannot fail. A failure 253 * before this point releases all resources that may have been 254 * allocated. 255 */ 256 257 /* Get RX FIFO size */ 258 sc->sc_rxfifosize = 64 * 259 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 260 261 /* Get TX FIFO size */ 262 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 263 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 264 sc->sc_rxfifosize / 1024, v / 16); 265 266 /* Initialize ifnet structure. */ 267 ifp->if_softc = sc; 268 if_initname(ifp, device_get_name(sc->sc_dev), 269 device_get_unit(sc->sc_dev)); 270 ifp->if_mtu = ETHERMTU; 271 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 272 ifp->if_start = gem_start; 273 ifp->if_ioctl = gem_ioctl; 274 ifp->if_watchdog = gem_watchdog; 275 ifp->if_init = gem_init; 276 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 277 /* 278 * Walk along the list of attached MII devices and 279 * establish an `MII instance' to `phy number' 280 * mapping. We'll use this mapping in media change 281 * requests to determine which phy to use to program 282 * the MIF configuration register. 283 */ 284 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 285 child = LIST_NEXT(child, mii_list)) { 286 /* 287 * Note: we support just two PHYs: the built-in 288 * internal device and an external on the MII 289 * connector. 290 */ 291 if (child->mii_phy > 1 || child->mii_inst > 1) { 292 device_printf(sc->sc_dev, "cannot accomodate " 293 "MII device %s at phy %d, instance %d\n", 294 device_get_name(child->mii_dev), 295 child->mii_phy, child->mii_inst); 296 continue; 297 } 298 299 sc->sc_phys[child->mii_inst] = child->mii_phy; 300 } 301 302 /* 303 * Now select and activate the PHY we will use. 304 * 305 * The order of preference is External (MDI1), 306 * Internal (MDI0), Serial Link (no MII). 307 */ 308 if (sc->sc_phys[1]) { 309 #ifdef GEM_DEBUG 310 printf("using external phy\n"); 311 #endif 312 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 313 } else { 314 #ifdef GEM_DEBUG 315 printf("using internal phy\n"); 316 #endif 317 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 318 } 319 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 320 sc->sc_mif_config); 321 /* Attach the interface. */ 322 ether_ifattach(ifp, sc->sc_enaddr); 323 324 #ifdef notyet 325 /* 326 * Add a suspend hook to make sure we come back up after a 327 * resume. 328 */ 329 sc->sc_powerhook = powerhook_establish(gem_power, sc); 330 if (sc->sc_powerhook == NULL) 331 device_printf(sc->sc_dev, "WARNING: unable to establish power " 332 "hook\n"); 333 #endif 334 335 /* 336 * Tell the upper layer(s) we support long frames. 337 */ 338 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 339 ifp->if_capabilities |= IFCAP_VLAN_MTU; 340 ifp->if_capenable |= IFCAP_VLAN_MTU; 341 342 return (0); 343 344 /* 345 * Free any resources we've allocated during the failed attach 346 * attempt. Do this in reverse order and fall through. 347 */ 348 fail_rxd: 349 for (i = 0; i < GEM_NRXDESC; i++) { 350 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 351 bus_dmamap_destroy(sc->sc_rdmatag, 352 sc->sc_rxsoft[i].rxs_dmamap); 353 } 354 fail_txd: 355 for (i = 0; i < GEM_TXQUEUELEN; i++) { 356 if (sc->sc_txsoft[i].txs_dmamap != NULL) 357 bus_dmamap_destroy(sc->sc_tdmatag, 358 sc->sc_txsoft[i].txs_dmamap); 359 } 360 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 361 fail_cmem: 362 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 363 sc->sc_cddmamap); 364 fail_ctag: 365 bus_dma_tag_destroy(sc->sc_cdmatag); 366 fail_ttag: 367 bus_dma_tag_destroy(sc->sc_tdmatag); 368 fail_rtag: 369 bus_dma_tag_destroy(sc->sc_rdmatag); 370 fail_ptag: 371 bus_dma_tag_destroy(sc->sc_pdmatag); 372 fail_ifnet: 373 if_free(ifp); 374 return (error); 375 } 376 377 void 378 gem_detach(sc) 379 struct gem_softc *sc; 380 { 381 struct ifnet *ifp = sc->sc_ifp; 382 int i; 383 384 GEM_LOCK(sc); 385 gem_stop(ifp, 1); 386 GEM_UNLOCK(sc); 387 callout_drain(&sc->sc_tick_ch); 388 #ifdef GEM_RINT_TIMEOUT 389 callout_drain(&sc->sc_rx_ch); 390 #endif 391 ether_ifdetach(ifp); 392 if_free(ifp); 393 device_delete_child(sc->sc_dev, sc->sc_miibus); 394 395 for (i = 0; i < GEM_NRXDESC; i++) { 396 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 397 bus_dmamap_destroy(sc->sc_rdmatag, 398 sc->sc_rxsoft[i].rxs_dmamap); 399 } 400 for (i = 0; i < GEM_TXQUEUELEN; i++) { 401 if (sc->sc_txsoft[i].txs_dmamap != NULL) 402 bus_dmamap_destroy(sc->sc_tdmatag, 403 sc->sc_txsoft[i].txs_dmamap); 404 } 405 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 406 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 407 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 408 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 409 sc->sc_cddmamap); 410 bus_dma_tag_destroy(sc->sc_cdmatag); 411 bus_dma_tag_destroy(sc->sc_tdmatag); 412 bus_dma_tag_destroy(sc->sc_rdmatag); 413 bus_dma_tag_destroy(sc->sc_pdmatag); 414 } 415 416 void 417 gem_suspend(sc) 418 struct gem_softc *sc; 419 { 420 struct ifnet *ifp = sc->sc_ifp; 421 422 GEM_LOCK(sc); 423 gem_stop(ifp, 0); 424 GEM_UNLOCK(sc); 425 } 426 427 void 428 gem_resume(sc) 429 struct gem_softc *sc; 430 { 431 struct ifnet *ifp = sc->sc_ifp; 432 433 GEM_LOCK(sc); 434 /* 435 * On resume all registers have to be initialized again like 436 * after power-on. 437 */ 438 sc->sc_inited = 0; 439 if (ifp->if_flags & IFF_UP) 440 gem_init_locked(sc); 441 GEM_UNLOCK(sc); 442 } 443 444 static void 445 gem_cddma_callback(xsc, segs, nsegs, error) 446 void *xsc; 447 bus_dma_segment_t *segs; 448 int nsegs; 449 int error; 450 { 451 struct gem_softc *sc = (struct gem_softc *)xsc; 452 453 if (error != 0) 454 return; 455 if (nsegs != 1) { 456 /* can't happen... */ 457 panic("gem_cddma_callback: bad control buffer segment count"); 458 } 459 sc->sc_cddma = segs[0].ds_addr; 460 } 461 462 static void 463 gem_txdma_callback(xsc, segs, nsegs, totsz, error) 464 void *xsc; 465 bus_dma_segment_t *segs; 466 int nsegs; 467 bus_size_t totsz; 468 int error; 469 { 470 struct gem_txdma *txd = (struct gem_txdma *)xsc; 471 struct gem_softc *sc = txd->txd_sc; 472 struct gem_txsoft *txs = txd->txd_txs; 473 bus_size_t len = 0; 474 uint64_t flags = 0; 475 int seg, nexttx; 476 477 if (error != 0) 478 return; 479 /* 480 * Ensure we have enough descriptors free to describe 481 * the packet. Note, we always reserve one descriptor 482 * at the end of the ring as a termination point, to 483 * prevent wrap-around. 484 */ 485 if (nsegs > sc->sc_txfree - 1) { 486 txs->txs_ndescs = -1; 487 return; 488 } 489 txs->txs_ndescs = nsegs; 490 491 nexttx = txs->txs_firstdesc; 492 /* 493 * Initialize the transmit descriptors. 494 */ 495 for (seg = 0; seg < nsegs; 496 seg++, nexttx = GEM_NEXTTX(nexttx)) { 497 #ifdef GEM_DEBUG 498 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 499 "%lx, addr %#lx (%#lx)", seg, nexttx, 500 segs[seg].ds_len, segs[seg].ds_addr, 501 GEM_DMA_WRITE(sc, segs[seg].ds_addr)); 502 #endif 503 504 if (segs[seg].ds_len == 0) 505 continue; 506 sc->sc_txdescs[nexttx].gd_addr = 507 GEM_DMA_WRITE(sc, segs[seg].ds_addr); 508 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, 509 ("gem_txdma_callback: segment size too large!")); 510 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 511 if (len == 0) { 512 #ifdef GEM_DEBUG 513 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 514 "tx %d", seg, nexttx); 515 #endif 516 flags |= GEM_TD_START_OF_PACKET; 517 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 518 sc->sc_txwin = 0; 519 flags |= GEM_TD_INTERRUPT_ME; 520 } 521 } 522 if (len + segs[seg].ds_len == totsz) { 523 #ifdef GEM_DEBUG 524 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 525 "tx %d", seg, nexttx); 526 #endif 527 flags |= GEM_TD_END_OF_PACKET; 528 } 529 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); 530 txs->txs_lastdesc = nexttx; 531 len += segs[seg].ds_len; 532 } 533 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, 534 ("gem_txdma_callback: missed end of packet!")); 535 } 536 537 static void 538 gem_tick(arg) 539 void *arg; 540 { 541 struct gem_softc *sc = arg; 542 543 GEM_LOCK_ASSERT(sc, MA_OWNED); 544 mii_tick(sc->sc_mii); 545 546 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 547 } 548 549 static int 550 gem_bitwait(sc, r, clr, set) 551 struct gem_softc *sc; 552 bus_addr_t r; 553 u_int32_t clr; 554 u_int32_t set; 555 { 556 int i; 557 u_int32_t reg; 558 559 for (i = TRIES; i--; DELAY(100)) { 560 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 561 if ((r & clr) == 0 && (r & set) == set) 562 return (1); 563 } 564 return (0); 565 } 566 567 void 568 gem_reset(sc) 569 struct gem_softc *sc; 570 { 571 bus_space_tag_t t = sc->sc_bustag; 572 bus_space_handle_t h = sc->sc_h; 573 574 #ifdef GEM_DEBUG 575 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 576 #endif 577 gem_reset_rx(sc); 578 gem_reset_tx(sc); 579 580 /* Do a full reset */ 581 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 582 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 583 device_printf(sc->sc_dev, "cannot reset device\n"); 584 } 585 586 587 /* 588 * gem_rxdrain: 589 * 590 * Drain the receive queue. 591 */ 592 static void 593 gem_rxdrain(sc) 594 struct gem_softc *sc; 595 { 596 struct gem_rxsoft *rxs; 597 int i; 598 599 for (i = 0; i < GEM_NRXDESC; i++) { 600 rxs = &sc->sc_rxsoft[i]; 601 if (rxs->rxs_mbuf != NULL) { 602 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 603 BUS_DMASYNC_POSTREAD); 604 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 605 m_freem(rxs->rxs_mbuf); 606 rxs->rxs_mbuf = NULL; 607 } 608 } 609 } 610 611 /* 612 * Reset the whole thing. 613 */ 614 static void 615 gem_stop(ifp, disable) 616 struct ifnet *ifp; 617 int disable; 618 { 619 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 620 struct gem_txsoft *txs; 621 622 #ifdef GEM_DEBUG 623 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 624 #endif 625 626 callout_stop(&sc->sc_tick_ch); 627 #ifdef GEM_RINT_TIMEOUT 628 callout_stop(&sc->sc_rx_ch); 629 #endif 630 631 /* XXX - Should we reset these instead? */ 632 gem_disable_tx(sc); 633 gem_disable_rx(sc); 634 635 /* 636 * Release any queued transmit buffers. 637 */ 638 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 639 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 640 if (txs->txs_ndescs != 0) { 641 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 642 BUS_DMASYNC_POSTWRITE); 643 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 644 if (txs->txs_mbuf != NULL) { 645 m_freem(txs->txs_mbuf); 646 txs->txs_mbuf = NULL; 647 } 648 } 649 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 650 } 651 652 if (disable) 653 gem_rxdrain(sc); 654 655 /* 656 * Mark the interface down and cancel the watchdog timer. 657 */ 658 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 659 ifp->if_timer = 0; 660 } 661 662 /* 663 * Reset the receiver 664 */ 665 int 666 gem_reset_rx(sc) 667 struct gem_softc *sc; 668 { 669 bus_space_tag_t t = sc->sc_bustag; 670 bus_space_handle_t h = sc->sc_h; 671 672 /* 673 * Resetting while DMA is in progress can cause a bus hang, so we 674 * disable DMA first. 675 */ 676 gem_disable_rx(sc); 677 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 678 /* Wait till it finishes */ 679 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 680 device_printf(sc->sc_dev, "cannot disable read dma\n"); 681 682 /* Wait 5ms extra. */ 683 DELAY(5000); 684 685 /* Finally, reset the ERX */ 686 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 687 /* Wait till it finishes */ 688 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 689 device_printf(sc->sc_dev, "cannot reset receiver\n"); 690 return (1); 691 } 692 return (0); 693 } 694 695 696 /* 697 * Reset the transmitter 698 */ 699 static int 700 gem_reset_tx(sc) 701 struct gem_softc *sc; 702 { 703 bus_space_tag_t t = sc->sc_bustag; 704 bus_space_handle_t h = sc->sc_h; 705 int i; 706 707 /* 708 * Resetting while DMA is in progress can cause a bus hang, so we 709 * disable DMA first. 710 */ 711 gem_disable_tx(sc); 712 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 713 /* Wait till it finishes */ 714 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 715 device_printf(sc->sc_dev, "cannot disable read dma\n"); 716 717 /* Wait 5ms extra. */ 718 DELAY(5000); 719 720 /* Finally, reset the ETX */ 721 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 722 /* Wait till it finishes */ 723 for (i = TRIES; i--; DELAY(100)) 724 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 725 break; 726 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 727 device_printf(sc->sc_dev, "cannot reset receiver\n"); 728 return (1); 729 } 730 return (0); 731 } 732 733 /* 734 * disable receiver. 735 */ 736 static int 737 gem_disable_rx(sc) 738 struct gem_softc *sc; 739 { 740 bus_space_tag_t t = sc->sc_bustag; 741 bus_space_handle_t h = sc->sc_h; 742 u_int32_t cfg; 743 744 /* Flip the enable bit */ 745 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 746 cfg &= ~GEM_MAC_RX_ENABLE; 747 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 748 749 /* Wait for it to finish */ 750 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 751 } 752 753 /* 754 * disable transmitter. 755 */ 756 static int 757 gem_disable_tx(sc) 758 struct gem_softc *sc; 759 { 760 bus_space_tag_t t = sc->sc_bustag; 761 bus_space_handle_t h = sc->sc_h; 762 u_int32_t cfg; 763 764 /* Flip the enable bit */ 765 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 766 cfg &= ~GEM_MAC_TX_ENABLE; 767 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 768 769 /* Wait for it to finish */ 770 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 771 } 772 773 /* 774 * Initialize interface. 775 */ 776 static int 777 gem_meminit(sc) 778 struct gem_softc *sc; 779 { 780 struct gem_rxsoft *rxs; 781 int i, error; 782 783 /* 784 * Initialize the transmit descriptor ring. 785 */ 786 for (i = 0; i < GEM_NTXDESC; i++) { 787 sc->sc_txdescs[i].gd_flags = 0; 788 sc->sc_txdescs[i].gd_addr = 0; 789 } 790 sc->sc_txfree = GEM_MAXTXFREE; 791 sc->sc_txnext = 0; 792 sc->sc_txwin = 0; 793 794 /* 795 * Initialize the receive descriptor and receive job 796 * descriptor rings. 797 */ 798 for (i = 0; i < GEM_NRXDESC; i++) { 799 rxs = &sc->sc_rxsoft[i]; 800 if (rxs->rxs_mbuf == NULL) { 801 if ((error = gem_add_rxbuf(sc, i)) != 0) { 802 device_printf(sc->sc_dev, "unable to " 803 "allocate or map rx buffer %d, error = " 804 "%d\n", i, error); 805 /* 806 * XXX Should attempt to run with fewer receive 807 * XXX buffers instead of just failing. 808 */ 809 gem_rxdrain(sc); 810 return (1); 811 } 812 } else 813 GEM_INIT_RXDESC(sc, i); 814 } 815 sc->sc_rxptr = 0; 816 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 817 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 818 819 return (0); 820 } 821 822 static int 823 gem_ringsize(sz) 824 int sz; 825 { 826 int v = 0; 827 828 switch (sz) { 829 case 32: 830 v = GEM_RING_SZ_32; 831 break; 832 case 64: 833 v = GEM_RING_SZ_64; 834 break; 835 case 128: 836 v = GEM_RING_SZ_128; 837 break; 838 case 256: 839 v = GEM_RING_SZ_256; 840 break; 841 case 512: 842 v = GEM_RING_SZ_512; 843 break; 844 case 1024: 845 v = GEM_RING_SZ_1024; 846 break; 847 case 2048: 848 v = GEM_RING_SZ_2048; 849 break; 850 case 4096: 851 v = GEM_RING_SZ_4096; 852 break; 853 case 8192: 854 v = GEM_RING_SZ_8192; 855 break; 856 default: 857 printf("gem: invalid Receive Descriptor ring size\n"); 858 break; 859 } 860 return (v); 861 } 862 863 static void 864 gem_init(xsc) 865 void *xsc; 866 { 867 struct gem_softc *sc = (struct gem_softc *)xsc; 868 869 GEM_LOCK(sc); 870 gem_init_locked(sc); 871 GEM_UNLOCK(sc); 872 } 873 874 /* 875 * Initialization of interface; set up initialization block 876 * and transmit/receive descriptor rings. 877 */ 878 static void 879 gem_init_locked(sc) 880 struct gem_softc *sc; 881 { 882 struct ifnet *ifp = sc->sc_ifp; 883 bus_space_tag_t t = sc->sc_bustag; 884 bus_space_handle_t h = sc->sc_h; 885 u_int32_t v; 886 887 GEM_LOCK_ASSERT(sc, MA_OWNED); 888 889 #ifdef GEM_DEBUG 890 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 891 #endif 892 /* 893 * Initialization sequence. The numbered steps below correspond 894 * to the sequence outlined in section 6.3.5.1 in the Ethernet 895 * Channel Engine manual (part of the PCIO manual). 896 * See also the STP2002-STQ document from Sun Microsystems. 897 */ 898 899 /* step 1 & 2. Reset the Ethernet Channel */ 900 gem_stop(sc->sc_ifp, 0); 901 gem_reset(sc); 902 #ifdef GEM_DEBUG 903 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 904 #endif 905 906 /* Re-initialize the MIF */ 907 gem_mifinit(sc); 908 909 /* step 3. Setup data structures in host memory */ 910 gem_meminit(sc); 911 912 /* step 4. TX MAC registers & counters */ 913 gem_init_regs(sc); 914 915 /* step 5. RX MAC registers & counters */ 916 gem_setladrf(sc); 917 918 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 919 /* NOTE: we use only 32-bit DMA addresses here. */ 920 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 921 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 922 923 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 924 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 925 #ifdef GEM_DEBUG 926 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 927 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 928 #endif 929 930 /* step 8. Global Configuration & Interrupt Mask */ 931 bus_space_write_4(t, h, GEM_INTMASK, 932 ~(GEM_INTR_TX_INTME| 933 GEM_INTR_TX_EMPTY| 934 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 935 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 936 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 937 GEM_INTR_BERR)); 938 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 939 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 940 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 941 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 942 943 /* step 9. ETX Configuration: use mostly default values */ 944 945 /* Enable DMA */ 946 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 947 bus_space_write_4(t, h, GEM_TX_CONFIG, 948 v|GEM_TX_CONFIG_TXDMA_EN| 949 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 950 951 /* step 10. ERX Configuration */ 952 953 /* Encode Receive Descriptor ring size: four possible values */ 954 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 955 956 /* Enable DMA */ 957 bus_space_write_4(t, h, GEM_RX_CONFIG, 958 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 959 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 960 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 961 /* 962 * The following value is for an OFF Threshold of about 3/4 full 963 * and an ON Threshold of 1/4 full. 964 */ 965 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 966 (3 * sc->sc_rxfifosize / 256) | 967 ( (sc->sc_rxfifosize / 256) << 12)); 968 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 969 970 /* step 11. Configure Media */ 971 mii_mediachg(sc->sc_mii); 972 973 /* step 12. RX_MAC Configuration Register */ 974 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 975 v |= GEM_MAC_RX_ENABLE; 976 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 977 978 /* step 14. Issue Transmit Pending command */ 979 980 /* step 15. Give the reciever a swift kick */ 981 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 982 983 /* Start the one second timer. */ 984 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 985 986 ifp->if_drv_flags |= IFF_DRV_RUNNING; 987 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 988 ifp->if_timer = 0; 989 sc->sc_ifflags = ifp->if_flags; 990 } 991 992 static int 993 gem_load_txmbuf(sc, m0) 994 struct gem_softc *sc; 995 struct mbuf *m0; 996 { 997 struct gem_txdma txd; 998 struct gem_txsoft *txs; 999 int error; 1000 1001 /* Get a work queue entry. */ 1002 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1003 /* Ran out of descriptors. */ 1004 return (-1); 1005 } 1006 txd.txd_sc = sc; 1007 txd.txd_txs = txs; 1008 txs->txs_firstdesc = sc->sc_txnext; 1009 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, 1010 gem_txdma_callback, &txd, BUS_DMA_NOWAIT); 1011 if (error != 0) 1012 goto fail; 1013 if (txs->txs_ndescs == -1) { 1014 error = -1; 1015 goto fail; 1016 } 1017 1018 /* Sync the DMA map. */ 1019 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1020 BUS_DMASYNC_PREWRITE); 1021 1022 #ifdef GEM_DEBUG 1023 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 1024 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 1025 txs->txs_ndescs); 1026 #endif 1027 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1028 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1029 txs->txs_mbuf = m0; 1030 1031 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1032 sc->sc_txfree -= txs->txs_ndescs; 1033 return (0); 1034 1035 fail: 1036 #ifdef GEM_DEBUG 1037 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); 1038 #endif 1039 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1040 return (error); 1041 } 1042 1043 static void 1044 gem_init_regs(sc) 1045 struct gem_softc *sc; 1046 { 1047 bus_space_tag_t t = sc->sc_bustag; 1048 bus_space_handle_t h = sc->sc_h; 1049 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1050 u_int32_t v; 1051 1052 /* These regs are not cleared on reset */ 1053 if (!sc->sc_inited) { 1054 1055 /* Wooo. Magic values. */ 1056 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1057 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1058 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1059 1060 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1061 /* Max frame and max burst size */ 1062 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1063 (ETHER_MAX_LEN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) | 1064 (0x2000 << 16)); 1065 1066 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1067 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1068 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1069 /* Dunno.... */ 1070 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1071 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1072 ((laddr[5]<<8)|laddr[4])&0x3ff); 1073 1074 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1075 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1076 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1077 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1078 1079 /* MAC control addr set to 01:80:c2:00:00:01 */ 1080 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1081 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1082 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1083 1084 /* MAC filter addr set to 0:0:0:0:0:0 */ 1085 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1086 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1087 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1088 1089 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1090 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1091 1092 sc->sc_inited = 1; 1093 } 1094 1095 /* Counters need to be zeroed */ 1096 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1097 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1098 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1099 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1100 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1101 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1102 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1103 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1104 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1105 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1106 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1107 1108 /* Un-pause stuff */ 1109 #if 0 1110 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1111 #else 1112 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1113 #endif 1114 1115 /* 1116 * Set the station address. 1117 */ 1118 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1119 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1120 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1121 1122 /* 1123 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1124 */ 1125 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1126 v = GEM_MAC_XIF_TX_MII_ENA; 1127 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1128 v |= GEM_MAC_XIF_FDPLX_LED; 1129 if (sc->sc_flags & GEM_GIGABIT) 1130 v |= GEM_MAC_XIF_GMII_MODE; 1131 } 1132 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1133 } 1134 1135 static void 1136 gem_start(ifp) 1137 struct ifnet *ifp; 1138 { 1139 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1140 1141 GEM_LOCK(sc); 1142 gem_start_locked(ifp); 1143 GEM_UNLOCK(sc); 1144 } 1145 1146 static void 1147 gem_start_locked(ifp) 1148 struct ifnet *ifp; 1149 { 1150 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1151 struct mbuf *m0 = NULL; 1152 int firsttx, ntx = 0, ofree, txmfail; 1153 1154 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1155 IFF_DRV_RUNNING) 1156 return; 1157 1158 /* 1159 * Remember the previous number of free descriptors and 1160 * the first descriptor we'll use. 1161 */ 1162 ofree = sc->sc_txfree; 1163 firsttx = sc->sc_txnext; 1164 1165 #ifdef GEM_DEBUG 1166 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1167 device_get_name(sc->sc_dev), ofree, firsttx); 1168 #endif 1169 1170 /* 1171 * Loop through the send queue, setting up transmit descriptors 1172 * until we drain the queue, or use up all available transmit 1173 * descriptors. 1174 */ 1175 txmfail = 0; 1176 do { 1177 /* 1178 * Grab a packet off the queue. 1179 */ 1180 IF_DEQUEUE(&ifp->if_snd, m0); 1181 if (m0 == NULL) 1182 break; 1183 1184 txmfail = gem_load_txmbuf(sc, m0); 1185 if (txmfail > 0) { 1186 /* Drop the mbuf and complain. */ 1187 printf("gem_start: error %d while loading mbuf dma " 1188 "map\n", txmfail); 1189 continue; 1190 } 1191 /* Not enough descriptors. */ 1192 if (txmfail == -1) { 1193 if (sc->sc_txfree == GEM_MAXTXFREE) 1194 panic("gem_start: mbuf chain too long!"); 1195 IF_PREPEND(&ifp->if_snd, m0); 1196 break; 1197 } 1198 1199 ntx++; 1200 /* Kick the transmitter. */ 1201 #ifdef GEM_DEBUG 1202 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", 1203 device_get_name(sc->sc_dev), sc->sc_txnext); 1204 #endif 1205 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1206 sc->sc_txnext); 1207 1208 if (ifp->if_bpf != NULL) 1209 bpf_mtap(ifp->if_bpf, m0); 1210 } while (1); 1211 1212 if (txmfail == -1 || sc->sc_txfree == 0) { 1213 /* No more slots left; notify upper layer. */ 1214 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1215 } 1216 1217 if (ntx > 0) { 1218 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1219 1220 #ifdef GEM_DEBUG 1221 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1222 device_get_name(sc->sc_dev), firsttx); 1223 #endif 1224 1225 /* Set a watchdog timer in case the chip flakes out. */ 1226 ifp->if_timer = 5; 1227 #ifdef GEM_DEBUG 1228 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1229 device_get_name(sc->sc_dev), ifp->if_timer); 1230 #endif 1231 } 1232 } 1233 1234 /* 1235 * Transmit interrupt. 1236 */ 1237 static void 1238 gem_tint(sc) 1239 struct gem_softc *sc; 1240 { 1241 struct ifnet *ifp = sc->sc_ifp; 1242 bus_space_tag_t t = sc->sc_bustag; 1243 bus_space_handle_t mac = sc->sc_h; 1244 struct gem_txsoft *txs; 1245 int txlast; 1246 int progress = 0; 1247 1248 1249 #ifdef GEM_DEBUG 1250 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1251 #endif 1252 1253 /* 1254 * Unload collision counters 1255 */ 1256 ifp->if_collisions += 1257 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1258 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1259 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1260 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1261 1262 /* 1263 * then clear the hardware counters. 1264 */ 1265 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1266 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1267 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1268 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1269 1270 /* 1271 * Go through our Tx list and free mbufs for those 1272 * frames that have been transmitted. 1273 */ 1274 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1275 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1276 1277 #ifdef GEM_DEBUG 1278 if (ifp->if_flags & IFF_DEBUG) { 1279 int i; 1280 printf(" txsoft %p transmit chain:\n", txs); 1281 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1282 printf("descriptor %d: ", i); 1283 printf("gd_flags: 0x%016llx\t", (long long) 1284 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1285 printf("gd_addr: 0x%016llx\n", (long long) 1286 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1287 if (i == txs->txs_lastdesc) 1288 break; 1289 } 1290 } 1291 #endif 1292 1293 /* 1294 * In theory, we could harveast some descriptors before 1295 * the ring is empty, but that's a bit complicated. 1296 * 1297 * GEM_TX_COMPLETION points to the last descriptor 1298 * processed +1. 1299 */ 1300 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1301 #ifdef GEM_DEBUG 1302 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1303 "txs->txs_lastdesc = %d, txlast = %d", 1304 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1305 #endif 1306 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1307 if ((txlast >= txs->txs_firstdesc) && 1308 (txlast <= txs->txs_lastdesc)) 1309 break; 1310 } else { 1311 /* Ick -- this command wraps */ 1312 if ((txlast >= txs->txs_firstdesc) || 1313 (txlast <= txs->txs_lastdesc)) 1314 break; 1315 } 1316 1317 #ifdef GEM_DEBUG 1318 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1319 #endif 1320 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1321 1322 sc->sc_txfree += txs->txs_ndescs; 1323 1324 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1325 BUS_DMASYNC_POSTWRITE); 1326 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1327 if (txs->txs_mbuf != NULL) { 1328 m_freem(txs->txs_mbuf); 1329 txs->txs_mbuf = NULL; 1330 } 1331 1332 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1333 1334 ifp->if_opackets++; 1335 progress = 1; 1336 } 1337 1338 #ifdef GEM_DEBUG 1339 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1340 "GEM_TX_DATA_PTR %llx " 1341 "GEM_TX_COMPLETION %x", 1342 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1343 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1344 GEM_TX_DATA_PTR_HI) << 32) | 1345 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1346 GEM_TX_DATA_PTR_LO), 1347 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1348 #endif 1349 1350 if (progress) { 1351 if (sc->sc_txfree == GEM_NTXDESC - 1) 1352 sc->sc_txwin = 0; 1353 1354 /* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */ 1355 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1356 gem_start_locked(ifp); 1357 1358 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1359 ifp->if_timer = 0; 1360 } 1361 1362 #ifdef GEM_DEBUG 1363 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1364 device_get_name(sc->sc_dev), ifp->if_timer); 1365 #endif 1366 } 1367 1368 #ifdef GEM_RINT_TIMEOUT 1369 static void 1370 gem_rint_timeout(arg) 1371 void *arg; 1372 { 1373 struct gem_softc *sc = (struct gem_softc *)arg; 1374 1375 GEM_LOCK_ASSERT(sc, MA_OWNED); 1376 gem_rint(sc); 1377 } 1378 #endif 1379 1380 /* 1381 * Receive interrupt. 1382 */ 1383 static void 1384 gem_rint(sc) 1385 struct gem_softc *sc; 1386 { 1387 struct ifnet *ifp = sc->sc_ifp; 1388 bus_space_tag_t t = sc->sc_bustag; 1389 bus_space_handle_t h = sc->sc_h; 1390 struct gem_rxsoft *rxs; 1391 struct mbuf *m; 1392 u_int64_t rxstat; 1393 u_int32_t rxcomp; 1394 int i, len, progress = 0; 1395 1396 #ifdef GEM_RINT_TIMEOUT 1397 callout_stop(&sc->sc_rx_ch); 1398 #endif 1399 #ifdef GEM_DEBUG 1400 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1401 #endif 1402 1403 /* 1404 * Read the completion register once. This limits 1405 * how long the following loop can execute. 1406 */ 1407 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1408 1409 #ifdef GEM_DEBUG 1410 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1411 sc->sc_rxptr, rxcomp); 1412 #endif 1413 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1414 for (i = sc->sc_rxptr; i != rxcomp; 1415 i = GEM_NEXTRX(i)) { 1416 rxs = &sc->sc_rxsoft[i]; 1417 1418 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1419 1420 if (rxstat & GEM_RD_OWN) { 1421 #ifdef GEM_RINT_TIMEOUT 1422 /* 1423 * The descriptor is still marked as owned, although 1424 * it is supposed to have completed. This has been 1425 * observed on some machines. Just exiting here 1426 * might leave the packet sitting around until another 1427 * one arrives to trigger a new interrupt, which is 1428 * generally undesirable, so set up a timeout. 1429 */ 1430 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1431 gem_rint_timeout, sc); 1432 #endif 1433 break; 1434 } 1435 1436 progress++; 1437 ifp->if_ipackets++; 1438 1439 if (rxstat & GEM_RD_BAD_CRC) { 1440 ifp->if_ierrors++; 1441 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1442 GEM_INIT_RXDESC(sc, i); 1443 continue; 1444 } 1445 1446 #ifdef GEM_DEBUG 1447 if (ifp->if_flags & IFF_DEBUG) { 1448 printf(" rxsoft %p descriptor %d: ", rxs, i); 1449 printf("gd_flags: 0x%016llx\t", (long long) 1450 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1451 printf("gd_addr: 0x%016llx\n", (long long) 1452 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1453 } 1454 #endif 1455 1456 /* 1457 * No errors; receive the packet. Note the Gem 1458 * includes the CRC with every packet. 1459 */ 1460 len = GEM_RD_BUFLEN(rxstat); 1461 1462 /* 1463 * Allocate a new mbuf cluster. If that fails, we are 1464 * out of memory, and must drop the packet and recycle 1465 * the buffer that's already attached to this descriptor. 1466 */ 1467 m = rxs->rxs_mbuf; 1468 if (gem_add_rxbuf(sc, i) != 0) { 1469 ifp->if_ierrors++; 1470 GEM_INIT_RXDESC(sc, i); 1471 continue; 1472 } 1473 m->m_data += 2; /* We're already off by two */ 1474 1475 m->m_pkthdr.rcvif = ifp; 1476 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1477 1478 /* Pass it on. */ 1479 GEM_UNLOCK(sc); 1480 (*ifp->if_input)(ifp, m); 1481 GEM_LOCK(sc); 1482 } 1483 1484 if (progress) { 1485 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1486 /* Update the receive pointer. */ 1487 if (i == sc->sc_rxptr) { 1488 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1489 } 1490 sc->sc_rxptr = i; 1491 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1492 } 1493 1494 #ifdef GEM_DEBUG 1495 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1496 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1497 #endif 1498 } 1499 1500 1501 /* 1502 * gem_add_rxbuf: 1503 * 1504 * Add a receive buffer to the indicated descriptor. 1505 */ 1506 static int 1507 gem_add_rxbuf(sc, idx) 1508 struct gem_softc *sc; 1509 int idx; 1510 { 1511 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1512 struct mbuf *m; 1513 bus_dma_segment_t segs[1]; 1514 int error, nsegs; 1515 1516 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1517 if (m == NULL) 1518 return (ENOBUFS); 1519 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1520 1521 #ifdef GEM_DEBUG 1522 /* bzero the packet to check dma */ 1523 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1524 #endif 1525 1526 if (rxs->rxs_mbuf != NULL) { 1527 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1528 BUS_DMASYNC_POSTREAD); 1529 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1530 } 1531 1532 rxs->rxs_mbuf = m; 1533 1534 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1535 m, segs, &nsegs, BUS_DMA_NOWAIT); 1536 /* If nsegs is wrong then the stack is corrupt. */ 1537 KASSERT(nsegs == 1, ("Too many segments returned!")); 1538 if (error != 0) { 1539 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1540 "%d\n", idx, error); 1541 m_freem(m); 1542 return (ENOBUFS); 1543 } 1544 rxs->rxs_paddr = segs[0].ds_addr; 1545 1546 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1547 1548 GEM_INIT_RXDESC(sc, idx); 1549 1550 return (0); 1551 } 1552 1553 1554 static void 1555 gem_eint(sc, status) 1556 struct gem_softc *sc; 1557 u_int status; 1558 { 1559 1560 if ((status & GEM_INTR_MIF) != 0) { 1561 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1562 return; 1563 } 1564 1565 device_printf(sc->sc_dev, "status=%x\n", status); 1566 } 1567 1568 1569 void 1570 gem_intr(v) 1571 void *v; 1572 { 1573 struct gem_softc *sc = (struct gem_softc *)v; 1574 bus_space_tag_t t = sc->sc_bustag; 1575 bus_space_handle_t seb = sc->sc_h; 1576 u_int32_t status; 1577 1578 GEM_LOCK(sc); 1579 status = bus_space_read_4(t, seb, GEM_STATUS); 1580 #ifdef GEM_DEBUG 1581 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1582 device_get_name(sc->sc_dev), (status>>19), 1583 (u_int)status); 1584 #endif 1585 1586 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1587 gem_eint(sc, status); 1588 1589 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1590 gem_tint(sc); 1591 1592 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1593 gem_rint(sc); 1594 1595 /* We should eventually do more than just print out error stats. */ 1596 if (status & GEM_INTR_TX_MAC) { 1597 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1598 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1599 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1600 txstat); 1601 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1602 gem_init_locked(sc); 1603 } 1604 if (status & GEM_INTR_RX_MAC) { 1605 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1606 /* 1607 * On some chip revisions GEM_MAC_RX_OVERFLOW happen often 1608 * due to a silicon bug so handle them silently. 1609 */ 1610 if (rxstat & GEM_MAC_RX_OVERFLOW) 1611 gem_init_locked(sc); 1612 else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1613 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1614 rxstat); 1615 } 1616 GEM_UNLOCK(sc); 1617 } 1618 1619 1620 static void 1621 gem_watchdog(ifp) 1622 struct ifnet *ifp; 1623 { 1624 struct gem_softc *sc = ifp->if_softc; 1625 1626 GEM_LOCK(sc); 1627 #ifdef GEM_DEBUG 1628 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1629 "GEM_MAC_RX_CONFIG %x", 1630 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1631 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1632 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1633 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1634 "GEM_MAC_TX_CONFIG %x", 1635 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1636 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1637 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1638 #endif 1639 1640 device_printf(sc->sc_dev, "device timeout\n"); 1641 ++ifp->if_oerrors; 1642 1643 /* Try to get more packets going. */ 1644 gem_init_locked(sc); 1645 GEM_UNLOCK(sc); 1646 } 1647 1648 /* 1649 * Initialize the MII Management Interface 1650 */ 1651 static void 1652 gem_mifinit(sc) 1653 struct gem_softc *sc; 1654 { 1655 bus_space_tag_t t = sc->sc_bustag; 1656 bus_space_handle_t mif = sc->sc_h; 1657 1658 /* Configure the MIF in frame mode */ 1659 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1660 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1661 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1662 } 1663 1664 /* 1665 * MII interface 1666 * 1667 * The GEM MII interface supports at least three different operating modes: 1668 * 1669 * Bitbang mode is implemented using data, clock and output enable registers. 1670 * 1671 * Frame mode is implemented by loading a complete frame into the frame 1672 * register and polling the valid bit for completion. 1673 * 1674 * Polling mode uses the frame register but completion is indicated by 1675 * an interrupt. 1676 * 1677 */ 1678 int 1679 gem_mii_readreg(dev, phy, reg) 1680 device_t dev; 1681 int phy, reg; 1682 { 1683 struct gem_softc *sc = device_get_softc(dev); 1684 bus_space_tag_t t = sc->sc_bustag; 1685 bus_space_handle_t mif = sc->sc_h; 1686 int n; 1687 u_int32_t v; 1688 1689 #ifdef GEM_DEBUG_PHY 1690 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1691 #endif 1692 1693 #if 0 1694 /* Select the desired PHY in the MIF configuration register */ 1695 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1696 /* Clear PHY select bit */ 1697 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1698 if (phy == GEM_PHYAD_EXTERNAL) 1699 /* Set PHY select bit to get at external device */ 1700 v |= GEM_MIF_CONFIG_PHY_SEL; 1701 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1702 #endif 1703 1704 /* Construct the frame command */ 1705 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1706 GEM_MIF_FRAME_READ; 1707 1708 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1709 for (n = 0; n < 100; n++) { 1710 DELAY(1); 1711 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1712 if (v & GEM_MIF_FRAME_TA0) 1713 return (v & GEM_MIF_FRAME_DATA); 1714 } 1715 1716 device_printf(sc->sc_dev, "mii_read timeout\n"); 1717 return (0); 1718 } 1719 1720 int 1721 gem_mii_writereg(dev, phy, reg, val) 1722 device_t dev; 1723 int phy, reg, val; 1724 { 1725 struct gem_softc *sc = device_get_softc(dev); 1726 bus_space_tag_t t = sc->sc_bustag; 1727 bus_space_handle_t mif = sc->sc_h; 1728 int n; 1729 u_int32_t v; 1730 1731 #ifdef GEM_DEBUG_PHY 1732 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1733 #endif 1734 1735 #if 0 1736 /* Select the desired PHY in the MIF configuration register */ 1737 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1738 /* Clear PHY select bit */ 1739 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1740 if (phy == GEM_PHYAD_EXTERNAL) 1741 /* Set PHY select bit to get at external device */ 1742 v |= GEM_MIF_CONFIG_PHY_SEL; 1743 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1744 #endif 1745 /* Construct the frame command */ 1746 v = GEM_MIF_FRAME_WRITE | 1747 (phy << GEM_MIF_PHY_SHIFT) | 1748 (reg << GEM_MIF_REG_SHIFT) | 1749 (val & GEM_MIF_FRAME_DATA); 1750 1751 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1752 for (n = 0; n < 100; n++) { 1753 DELAY(1); 1754 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1755 if (v & GEM_MIF_FRAME_TA0) 1756 return (1); 1757 } 1758 1759 device_printf(sc->sc_dev, "mii_write timeout\n"); 1760 return (0); 1761 } 1762 1763 void 1764 gem_mii_statchg(dev) 1765 device_t dev; 1766 { 1767 struct gem_softc *sc = device_get_softc(dev); 1768 #ifdef GEM_DEBUG 1769 int instance; 1770 #endif 1771 bus_space_tag_t t = sc->sc_bustag; 1772 bus_space_handle_t mac = sc->sc_h; 1773 u_int32_t v; 1774 1775 #ifdef GEM_DEBUG 1776 instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1777 if (sc->sc_debug) 1778 printf("gem_mii_statchg: status change: phy = %d\n", 1779 sc->sc_phys[instance]); 1780 #endif 1781 1782 /* Set tx full duplex options */ 1783 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1784 DELAY(10000); /* reg must be cleared and delay before changing. */ 1785 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1786 GEM_MAC_TX_ENABLE; 1787 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1788 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1789 } 1790 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1791 1792 /* XIF Configuration */ 1793 v = GEM_MAC_XIF_LINK_LED; 1794 v |= GEM_MAC_XIF_TX_MII_ENA; 1795 1796 /* If an external transceiver is connected, enable its MII drivers */ 1797 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1798 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1799 /* External MII needs echo disable if half duplex. */ 1800 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1801 /* turn on full duplex LED */ 1802 v |= GEM_MAC_XIF_FDPLX_LED; 1803 else 1804 /* half duplex -- disable echo */ 1805 v |= GEM_MAC_XIF_ECHO_DISABL; 1806 1807 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1808 v |= GEM_MAC_XIF_GMII_MODE; 1809 else 1810 v &= ~GEM_MAC_XIF_GMII_MODE; 1811 } else { 1812 /* Internal MII needs buf enable */ 1813 v |= GEM_MAC_XIF_MII_BUF_ENA; 1814 } 1815 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1816 } 1817 1818 int 1819 gem_mediachange(ifp) 1820 struct ifnet *ifp; 1821 { 1822 struct gem_softc *sc = ifp->if_softc; 1823 int error; 1824 1825 /* XXX Add support for serial media. */ 1826 1827 GEM_LOCK(sc); 1828 error = mii_mediachg(sc->sc_mii); 1829 GEM_UNLOCK(sc); 1830 return (error); 1831 } 1832 1833 void 1834 gem_mediastatus(ifp, ifmr) 1835 struct ifnet *ifp; 1836 struct ifmediareq *ifmr; 1837 { 1838 struct gem_softc *sc = ifp->if_softc; 1839 1840 GEM_LOCK(sc); 1841 if ((ifp->if_flags & IFF_UP) == 0) { 1842 GEM_UNLOCK(sc); 1843 return; 1844 } 1845 1846 mii_pollstat(sc->sc_mii); 1847 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1848 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1849 GEM_UNLOCK(sc); 1850 } 1851 1852 /* 1853 * Process an ioctl request. 1854 */ 1855 static int 1856 gem_ioctl(ifp, cmd, data) 1857 struct ifnet *ifp; 1858 u_long cmd; 1859 caddr_t data; 1860 { 1861 struct gem_softc *sc = ifp->if_softc; 1862 struct ifreq *ifr = (struct ifreq *)data; 1863 int error = 0; 1864 1865 switch (cmd) { 1866 case SIOCSIFFLAGS: 1867 GEM_LOCK(sc); 1868 if (ifp->if_flags & IFF_UP) { 1869 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1870 gem_setladrf(sc); 1871 else 1872 gem_init_locked(sc); 1873 } else { 1874 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1875 gem_stop(ifp, 0); 1876 } 1877 sc->sc_ifflags = ifp->if_flags; 1878 GEM_UNLOCK(sc); 1879 break; 1880 case SIOCADDMULTI: 1881 case SIOCDELMULTI: 1882 GEM_LOCK(sc); 1883 gem_setladrf(sc); 1884 GEM_UNLOCK(sc); 1885 break; 1886 case SIOCGIFMEDIA: 1887 case SIOCSIFMEDIA: 1888 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1889 break; 1890 default: 1891 error = ether_ioctl(ifp, cmd, data); 1892 break; 1893 } 1894 1895 /* Try to get things going again */ 1896 GEM_LOCK(sc); 1897 if (ifp->if_flags & IFF_UP) 1898 gem_start_locked(ifp); 1899 GEM_UNLOCK(sc); 1900 return (error); 1901 } 1902 1903 /* 1904 * Set up the logical address filter. 1905 */ 1906 static void 1907 gem_setladrf(sc) 1908 struct gem_softc *sc; 1909 { 1910 struct ifnet *ifp = sc->sc_ifp; 1911 struct ifmultiaddr *inm; 1912 bus_space_tag_t t = sc->sc_bustag; 1913 bus_space_handle_t h = sc->sc_h; 1914 u_int32_t crc; 1915 u_int32_t hash[16]; 1916 u_int32_t v; 1917 int i; 1918 1919 GEM_LOCK_ASSERT(sc, MA_OWNED); 1920 1921 /* Get current RX configuration */ 1922 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1923 1924 /* 1925 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1926 * and hash filter. Depending on the case, the right bit will be 1927 * enabled. 1928 */ 1929 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1930 GEM_MAC_RX_PROMISC_GRP); 1931 1932 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1933 /* Turn on promiscuous mode */ 1934 v |= GEM_MAC_RX_PROMISCUOUS; 1935 goto chipit; 1936 } 1937 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1938 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1939 ifp->if_flags |= IFF_ALLMULTI; 1940 v |= GEM_MAC_RX_PROMISC_GRP; 1941 goto chipit; 1942 } 1943 1944 /* 1945 * Set up multicast address filter by passing all multicast addresses 1946 * through a crc generator, and then using the high order 8 bits as an 1947 * index into the 256 bit logical address filter. The high order 4 1948 * bits selects the word, while the other 4 bits select the bit within 1949 * the word (where bit 0 is the MSB). 1950 */ 1951 1952 /* Clear hash table */ 1953 memset(hash, 0, sizeof(hash)); 1954 1955 IF_ADDR_LOCK(ifp); 1956 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 1957 if (inm->ifma_addr->sa_family != AF_LINK) 1958 continue; 1959 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1960 inm->ifma_addr), ETHER_ADDR_LEN); 1961 1962 /* Just want the 8 most significant bits. */ 1963 crc >>= 24; 1964 1965 /* Set the corresponding bit in the filter. */ 1966 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1967 } 1968 IF_ADDR_UNLOCK(ifp); 1969 1970 v |= GEM_MAC_RX_HASH_FILTER; 1971 ifp->if_flags &= ~IFF_ALLMULTI; 1972 1973 /* Now load the hash table into the chip (if we are using it) */ 1974 for (i = 0; i < 16; i++) { 1975 bus_space_write_4(t, h, 1976 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1977 hash[i]); 1978 } 1979 1980 chipit: 1981 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1982 } 1983