1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 36 */ 37 38 #if 0 39 #define GEM_DEBUG 40 #endif 41 42 #if 0 /* XXX: In case of emergency, re-enable this. */ 43 #define GEM_RINT_TIMEOUT 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/endian.h> 51 #include <sys/mbuf.h> 52 #include <sys/malloc.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/socket.h> 58 #include <sys/sockio.h> 59 #include <sys/rman.h> 60 61 #include <net/bpf.h> 62 #include <net/ethernet.h> 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_vlan_var.h> 69 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/tcp.h> 74 #include <netinet/udp.h> 75 76 #include <machine/bus.h> 77 78 #include <dev/mii/mii.h> 79 #include <dev/mii/miivar.h> 80 81 #include <dev/gem/if_gemreg.h> 82 #include <dev/gem/if_gemvar.h> 83 84 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 85 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 86 87 #define TRIES 10000 88 89 /* 90 * The GEM hardware support basic TCP/UDP checksum offloading. However, 91 * the hardware doesn't compensate the checksum for UDP datagram which 92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 93 * by default. It can be reactivated by setting special link option 94 * link0 with ifconfig(8). 95 */ 96 #define GEM_CSUM_FEATURES (CSUM_TCP) 97 98 static int gem_add_rxbuf(struct gem_softc *sc, int idx); 99 static int gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, 100 uint32_t clr, uint32_t set); 101 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, 102 int nsegs, int error); 103 static int gem_disable_rx(struct gem_softc *sc); 104 static int gem_disable_tx(struct gem_softc *sc); 105 static void gem_eint(struct gem_softc *sc, u_int status); 106 static void gem_init(void *xsc); 107 static void gem_init_locked(struct gem_softc *sc); 108 static void gem_init_regs(struct gem_softc *sc); 109 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 110 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head); 111 static int gem_meminit(struct gem_softc *sc); 112 static void gem_mifinit(struct gem_softc *sc); 113 static void gem_reset(struct gem_softc *sc); 114 static int gem_reset_rx(struct gem_softc *sc); 115 static void gem_reset_rxdma(struct gem_softc *sc); 116 static int gem_reset_tx(struct gem_softc *sc); 117 static u_int gem_ringsize(u_int sz); 118 static void gem_rint(struct gem_softc *sc); 119 #ifdef GEM_RINT_TIMEOUT 120 static void gem_rint_timeout(void *arg); 121 #endif 122 static __inline void gem_rxcksum(struct mbuf *m, uint64_t flags); 123 static void gem_rxdrain(struct gem_softc *sc); 124 static void gem_setladrf(struct gem_softc *sc); 125 static void gem_start(struct ifnet *ifp); 126 static void gem_start_locked(struct ifnet *ifp); 127 static void gem_stop(struct ifnet *ifp, int disable); 128 static void gem_tick(void *arg); 129 static void gem_tint(struct gem_softc *sc); 130 static int gem_watchdog(struct gem_softc *sc); 131 132 devclass_t gem_devclass; 133 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 134 MODULE_DEPEND(gem, miibus, 1, 1, 1); 135 136 #ifdef GEM_DEBUG 137 #include <sys/ktr.h> 138 #define KTR_GEM KTR_CT2 139 #endif 140 141 #define GEM_BANK1_BITWAIT(sc, r, clr, set) \ 142 gem_bitwait((sc), GEM_RES_BANK1, (r), (clr), (set)) 143 #define GEM_BANK2_BITWAIT(sc, r, clr, set) \ 144 gem_bitwait((sc), GEM_RES_BANK2, (r), (clr), (set)) 145 146 int 147 gem_attach(struct gem_softc *sc) 148 { 149 struct gem_txsoft *txs; 150 struct ifnet *ifp; 151 int error, i; 152 uint32_t v; 153 154 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 155 if (ifp == NULL) 156 return (ENOSPC); 157 158 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 159 #ifdef GEM_RINT_TIMEOUT 160 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 161 #endif 162 163 /* Make sure the chip is stopped. */ 164 ifp->if_softc = sc; 165 gem_reset(sc); 166 167 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 168 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 169 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, 170 NULL, &sc->sc_pdmatag); 171 if (error) 172 goto fail_ifnet; 173 174 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 175 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 176 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 177 if (error) 178 goto fail_ptag; 179 180 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 181 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 182 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 183 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 184 if (error) 185 goto fail_rtag; 186 187 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 188 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 189 sizeof(struct gem_control_data), 1, 190 sizeof(struct gem_control_data), 0, 191 NULL, NULL, &sc->sc_cdmatag); 192 if (error) 193 goto fail_ttag; 194 195 /* 196 * Allocate the control data structures, create and load the 197 * DMA map for it. 198 */ 199 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 200 (void **)&sc->sc_control_data, 201 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 202 &sc->sc_cddmamap))) { 203 device_printf(sc->sc_dev, 204 "unable to allocate control data, error = %d\n", error); 205 goto fail_ctag; 206 } 207 208 sc->sc_cddma = 0; 209 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 210 sc->sc_control_data, sizeof(struct gem_control_data), 211 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 212 device_printf(sc->sc_dev, 213 "unable to load control data DMA map, error = %d\n", 214 error); 215 goto fail_cmem; 216 } 217 218 /* 219 * Initialize the transmit job descriptors. 220 */ 221 STAILQ_INIT(&sc->sc_txfreeq); 222 STAILQ_INIT(&sc->sc_txdirtyq); 223 224 /* 225 * Create the transmit buffer DMA maps. 226 */ 227 error = ENOMEM; 228 for (i = 0; i < GEM_TXQUEUELEN; i++) { 229 txs = &sc->sc_txsoft[i]; 230 txs->txs_mbuf = NULL; 231 txs->txs_ndescs = 0; 232 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 233 &txs->txs_dmamap)) != 0) { 234 device_printf(sc->sc_dev, 235 "unable to create TX DMA map %d, error = %d\n", 236 i, error); 237 goto fail_txd; 238 } 239 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 240 } 241 242 /* 243 * Create the receive buffer DMA maps. 244 */ 245 for (i = 0; i < GEM_NRXDESC; i++) { 246 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 247 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 248 device_printf(sc->sc_dev, 249 "unable to create RX DMA map %d, error = %d\n", 250 i, error); 251 goto fail_rxd; 252 } 253 sc->sc_rxsoft[i].rxs_mbuf = NULL; 254 } 255 256 /* Bad things will happen when touching this register on ERI. */ 257 if (sc->sc_variant != GEM_SUN_ERI) 258 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE, 259 GEM_MII_DATAPATH_MII); 260 261 gem_mifinit(sc); 262 263 /* 264 * Look for an external PHY. 265 */ 266 error = ENXIO; 267 v = GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG); 268 if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 269 v |= GEM_MIF_CONFIG_PHY_SEL; 270 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v); 271 switch (sc->sc_variant) { 272 case GEM_SUN_ERI: 273 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 274 break; 275 default: 276 sc->sc_phyad = -1; 277 break; 278 } 279 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 280 gem_mediachange, gem_mediastatus); 281 } 282 283 /* 284 * Fall back on an internal PHY if no external PHY was found. 285 */ 286 if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) { 287 v &= ~GEM_MIF_CONFIG_PHY_SEL; 288 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v); 289 switch (sc->sc_variant) { 290 case GEM_SUN_ERI: 291 case GEM_APPLE_K2_GMAC: 292 sc->sc_phyad = GEM_PHYAD_INTERNAL; 293 break; 294 case GEM_APPLE_GMAC: 295 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 296 break; 297 default: 298 sc->sc_phyad = -1; 299 break; 300 } 301 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 302 gem_mediachange, gem_mediastatus); 303 } 304 305 /* 306 * Try the external PCS SERDES if we didn't find any PHYs. 307 */ 308 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 309 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE, 310 GEM_MII_DATAPATH_SERDES); 311 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 312 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 313 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 314 sc->sc_flags |= GEM_SERDES; 315 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 316 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 317 gem_mediachange, gem_mediastatus); 318 } 319 320 if (error != 0) { 321 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error); 322 goto fail_rxd; 323 } 324 sc->sc_mii = device_get_softc(sc->sc_miibus); 325 326 /* 327 * From this point forward, the attachment cannot fail. A failure 328 * before this point releases all resources that may have been 329 * allocated. 330 */ 331 332 /* Get RX FIFO size. */ 333 sc->sc_rxfifosize = 64 * 334 GEM_BANK1_READ_4(sc, GEM_RX_FIFO_SIZE); 335 336 /* Get TX FIFO size. */ 337 v = GEM_BANK1_READ_4(sc, GEM_TX_FIFO_SIZE); 338 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 339 sc->sc_rxfifosize / 1024, v / 16); 340 341 sc->sc_csum_features = GEM_CSUM_FEATURES; 342 /* Initialize ifnet structure. */ 343 ifp->if_softc = sc; 344 if_initname(ifp, device_get_name(sc->sc_dev), 345 device_get_unit(sc->sc_dev)); 346 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 347 ifp->if_start = gem_start; 348 ifp->if_ioctl = gem_ioctl; 349 ifp->if_init = gem_init; 350 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN); 351 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN; 352 IFQ_SET_READY(&ifp->if_snd); 353 354 /* Attach the interface. */ 355 ether_ifattach(ifp, sc->sc_enaddr); 356 357 /* 358 * Tell the upper layer(s) we support long frames/checksum offloads. 359 */ 360 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 361 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 362 ifp->if_hwassist |= sc->sc_csum_features; 363 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 364 365 return (0); 366 367 /* 368 * Free any resources we've allocated during the failed attach 369 * attempt. Do this in reverse order and fall through. 370 */ 371 fail_rxd: 372 for (i = 0; i < GEM_NRXDESC; i++) 373 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 374 bus_dmamap_destroy(sc->sc_rdmatag, 375 sc->sc_rxsoft[i].rxs_dmamap); 376 fail_txd: 377 for (i = 0; i < GEM_TXQUEUELEN; i++) 378 if (sc->sc_txsoft[i].txs_dmamap != NULL) 379 bus_dmamap_destroy(sc->sc_tdmatag, 380 sc->sc_txsoft[i].txs_dmamap); 381 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 382 fail_cmem: 383 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 384 sc->sc_cddmamap); 385 fail_ctag: 386 bus_dma_tag_destroy(sc->sc_cdmatag); 387 fail_ttag: 388 bus_dma_tag_destroy(sc->sc_tdmatag); 389 fail_rtag: 390 bus_dma_tag_destroy(sc->sc_rdmatag); 391 fail_ptag: 392 bus_dma_tag_destroy(sc->sc_pdmatag); 393 fail_ifnet: 394 if_free(ifp); 395 return (error); 396 } 397 398 void 399 gem_detach(struct gem_softc *sc) 400 { 401 struct ifnet *ifp = sc->sc_ifp; 402 int i; 403 404 GEM_LOCK(sc); 405 gem_stop(ifp, 1); 406 GEM_UNLOCK(sc); 407 callout_drain(&sc->sc_tick_ch); 408 #ifdef GEM_RINT_TIMEOUT 409 callout_drain(&sc->sc_rx_ch); 410 #endif 411 ether_ifdetach(ifp); 412 if_free(ifp); 413 device_delete_child(sc->sc_dev, sc->sc_miibus); 414 415 for (i = 0; i < GEM_NRXDESC; i++) 416 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 417 bus_dmamap_destroy(sc->sc_rdmatag, 418 sc->sc_rxsoft[i].rxs_dmamap); 419 for (i = 0; i < GEM_TXQUEUELEN; i++) 420 if (sc->sc_txsoft[i].txs_dmamap != NULL) 421 bus_dmamap_destroy(sc->sc_tdmatag, 422 sc->sc_txsoft[i].txs_dmamap); 423 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 424 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 425 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 426 sc->sc_cddmamap); 427 bus_dma_tag_destroy(sc->sc_cdmatag); 428 bus_dma_tag_destroy(sc->sc_tdmatag); 429 bus_dma_tag_destroy(sc->sc_rdmatag); 430 bus_dma_tag_destroy(sc->sc_pdmatag); 431 } 432 433 void 434 gem_suspend(struct gem_softc *sc) 435 { 436 struct ifnet *ifp = sc->sc_ifp; 437 438 GEM_LOCK(sc); 439 gem_stop(ifp, 0); 440 GEM_UNLOCK(sc); 441 } 442 443 void 444 gem_resume(struct gem_softc *sc) 445 { 446 struct ifnet *ifp = sc->sc_ifp; 447 448 GEM_LOCK(sc); 449 /* 450 * On resume all registers have to be initialized again like 451 * after power-on. 452 */ 453 sc->sc_flags &= ~GEM_INITED; 454 if (ifp->if_flags & IFF_UP) 455 gem_init_locked(sc); 456 GEM_UNLOCK(sc); 457 } 458 459 static __inline void 460 gem_rxcksum(struct mbuf *m, uint64_t flags) 461 { 462 struct ether_header *eh; 463 struct ip *ip; 464 struct udphdr *uh; 465 uint16_t *opts; 466 int32_t hlen, len, pktlen; 467 uint32_t temp32; 468 uint16_t cksum; 469 470 pktlen = m->m_pkthdr.len; 471 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 472 return; 473 eh = mtod(m, struct ether_header *); 474 if (eh->ether_type != htons(ETHERTYPE_IP)) 475 return; 476 ip = (struct ip *)(eh + 1); 477 if (ip->ip_v != IPVERSION) 478 return; 479 480 hlen = ip->ip_hl << 2; 481 pktlen -= sizeof(struct ether_header); 482 if (hlen < sizeof(struct ip)) 483 return; 484 if (ntohs(ip->ip_len) < hlen) 485 return; 486 if (ntohs(ip->ip_len) != pktlen) 487 return; 488 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 489 return; /* Cannot handle fragmented packet. */ 490 491 switch (ip->ip_p) { 492 case IPPROTO_TCP: 493 if (pktlen < (hlen + sizeof(struct tcphdr))) 494 return; 495 break; 496 case IPPROTO_UDP: 497 if (pktlen < (hlen + sizeof(struct udphdr))) 498 return; 499 uh = (struct udphdr *)((uint8_t *)ip + hlen); 500 if (uh->uh_sum == 0) 501 return; /* no checksum */ 502 break; 503 default: 504 return; 505 } 506 507 cksum = ~(flags & GEM_RD_CHECKSUM); 508 /* checksum fixup for IP options */ 509 len = hlen - sizeof(struct ip); 510 if (len > 0) { 511 opts = (uint16_t *)(ip + 1); 512 for (; len > 0; len -= sizeof(uint16_t), opts++) { 513 temp32 = cksum - *opts; 514 temp32 = (temp32 >> 16) + (temp32 & 65535); 515 cksum = temp32 & 65535; 516 } 517 } 518 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 519 m->m_pkthdr.csum_data = cksum; 520 } 521 522 static void 523 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 524 { 525 struct gem_softc *sc = xsc; 526 527 if (error != 0) 528 return; 529 if (nsegs != 1) 530 panic("%s: bad control buffer segment count", __func__); 531 sc->sc_cddma = segs[0].ds_addr; 532 } 533 534 static void 535 gem_tick(void *arg) 536 { 537 struct gem_softc *sc = arg; 538 struct ifnet *ifp; 539 540 GEM_LOCK_ASSERT(sc, MA_OWNED); 541 542 ifp = sc->sc_ifp; 543 /* 544 * Unload collision counters. 545 */ 546 ifp->if_collisions += 547 GEM_BANK1_READ_4(sc, GEM_MAC_NORM_COLL_CNT) + 548 GEM_BANK1_READ_4(sc, GEM_MAC_FIRST_COLL_CNT) + 549 GEM_BANK1_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) + 550 GEM_BANK1_READ_4(sc, GEM_MAC_LATE_COLL_CNT); 551 552 /* 553 * Then clear the hardware counters. 554 */ 555 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 556 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 557 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 558 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 559 560 mii_tick(sc->sc_mii); 561 562 if (gem_watchdog(sc) == EJUSTRETURN) 563 return; 564 565 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 566 } 567 568 static int 569 gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr, 570 uint32_t set) 571 { 572 int i; 573 uint32_t reg; 574 575 for (i = TRIES; i--; DELAY(100)) { 576 reg = GEM_BANKN_READ_M(bank, 4, sc, r); 577 if ((reg & clr) == 0 && (reg & set) == set) 578 return (1); 579 } 580 return (0); 581 } 582 583 static void 584 gem_reset(sc) 585 struct gem_softc *sc; 586 { 587 588 #ifdef GEM_DEBUG 589 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 590 #endif 591 gem_reset_rx(sc); 592 gem_reset_tx(sc); 593 594 /* Do a full reset. */ 595 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 596 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 597 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 598 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 599 device_printf(sc->sc_dev, "cannot reset device\n"); 600 } 601 602 static void 603 gem_rxdrain(struct gem_softc *sc) 604 { 605 struct gem_rxsoft *rxs; 606 int i; 607 608 for (i = 0; i < GEM_NRXDESC; i++) { 609 rxs = &sc->sc_rxsoft[i]; 610 if (rxs->rxs_mbuf != NULL) { 611 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 612 BUS_DMASYNC_POSTREAD); 613 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 614 m_freem(rxs->rxs_mbuf); 615 rxs->rxs_mbuf = NULL; 616 } 617 } 618 } 619 620 static void 621 gem_stop(struct ifnet *ifp, int disable) 622 { 623 struct gem_softc *sc = ifp->if_softc; 624 struct gem_txsoft *txs; 625 626 #ifdef GEM_DEBUG 627 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 628 #endif 629 630 callout_stop(&sc->sc_tick_ch); 631 #ifdef GEM_RINT_TIMEOUT 632 callout_stop(&sc->sc_rx_ch); 633 #endif 634 635 /* XXX should we reset these instead? */ 636 gem_disable_tx(sc); 637 gem_disable_rx(sc); 638 639 /* 640 * Release any queued transmit buffers. 641 */ 642 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 643 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 644 if (txs->txs_ndescs != 0) { 645 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 646 BUS_DMASYNC_POSTWRITE); 647 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 648 if (txs->txs_mbuf != NULL) { 649 m_freem(txs->txs_mbuf); 650 txs->txs_mbuf = NULL; 651 } 652 } 653 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 654 } 655 656 if (disable) 657 gem_rxdrain(sc); 658 659 /* 660 * Mark the interface down and cancel the watchdog timer. 661 */ 662 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 663 sc->sc_flags &= ~GEM_LINK; 664 sc->sc_wdog_timer = 0; 665 } 666 667 static int 668 gem_reset_rx(struct gem_softc *sc) 669 { 670 671 /* 672 * Resetting while DMA is in progress can cause a bus hang, so we 673 * disable DMA first. 674 */ 675 gem_disable_rx(sc); 676 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 0); 677 GEM_BANK1_BARRIER(sc, GEM_RX_CONFIG, 4, 678 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 679 if (!GEM_BANK1_BITWAIT(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 680 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 681 682 /* Finally, reset the ERX. */ 683 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX); 684 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 685 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 686 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 687 0)) { 688 device_printf(sc->sc_dev, "cannot reset receiver\n"); 689 return (1); 690 } 691 return (0); 692 } 693 694 /* 695 * Reset the receiver DMA engine. 696 * 697 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 698 * etc in order to reset the receiver DMA engine only and not do a full 699 * reset which amongst others also downs the link and clears the FIFOs. 700 */ 701 static void 702 gem_reset_rxdma(struct gem_softc *sc) 703 { 704 int i; 705 706 if (gem_reset_rx(sc) != 0) 707 return (gem_init_locked(sc)); 708 for (i = 0; i < GEM_NRXDESC; i++) 709 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 710 GEM_UPDATE_RXDESC(sc, i); 711 sc->sc_rxptr = 0; 712 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 713 714 /* NOTE: we use only 32-bit DMA addresses here. */ 715 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 716 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 717 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 718 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 719 gem_ringsize(GEM_NRXDESC /* XXX */) | 720 ((ETHER_HDR_LEN + sizeof(struct ip)) << 721 GEM_RX_CONFIG_CXM_START_SHFT) | 722 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 723 (2 << GEM_RX_CONFIG_FBOFF_SHFT)); 724 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING, 725 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 726 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 727 (3 * sc->sc_rxfifosize / 256) | 728 ((sc->sc_rxfifosize / 256) << 12)); 729 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 730 GEM_BANK1_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN); 731 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK, 732 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 733 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 734 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE); 735 } 736 737 static int 738 gem_reset_tx(struct gem_softc *sc) 739 { 740 741 /* 742 * Resetting while DMA is in progress can cause a bus hang, so we 743 * disable DMA first. 744 */ 745 gem_disable_tx(sc); 746 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, 0); 747 GEM_BANK1_BARRIER(sc, GEM_TX_CONFIG, 4, 748 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 749 if (!GEM_BANK1_BITWAIT(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 750 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 751 752 /* Finally, reset the ETX. */ 753 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_TX); 754 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 755 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 756 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 757 0)) { 758 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 759 return (1); 760 } 761 return (0); 762 } 763 764 static int 765 gem_disable_rx(struct gem_softc *sc) 766 { 767 768 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 769 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE); 770 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 771 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 772 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 773 0)); 774 } 775 776 static int 777 gem_disable_tx(struct gem_softc *sc) 778 { 779 780 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 781 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE); 782 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 783 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 784 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 785 0)); 786 } 787 788 static int 789 gem_meminit(sc) 790 struct gem_softc *sc; 791 { 792 struct gem_rxsoft *rxs; 793 int error, i; 794 795 /* 796 * Initialize the transmit descriptor ring. 797 */ 798 for (i = 0; i < GEM_NTXDESC; i++) { 799 sc->sc_txdescs[i].gd_flags = 0; 800 sc->sc_txdescs[i].gd_addr = 0; 801 } 802 sc->sc_txfree = GEM_MAXTXFREE; 803 sc->sc_txnext = 0; 804 sc->sc_txwin = 0; 805 806 /* 807 * Initialize the receive descriptor and receive job 808 * descriptor rings. 809 */ 810 for (i = 0; i < GEM_NRXDESC; i++) { 811 rxs = &sc->sc_rxsoft[i]; 812 if (rxs->rxs_mbuf == NULL) { 813 if ((error = gem_add_rxbuf(sc, i)) != 0) { 814 device_printf(sc->sc_dev, 815 "unable to allocate or map RX buffer %d, " 816 "error = %d\n", i, error); 817 /* 818 * XXX we should attempt to run with fewer 819 * receive buffers instead of just failing. 820 */ 821 gem_rxdrain(sc); 822 return (1); 823 } 824 } else 825 GEM_INIT_RXDESC(sc, i); 826 } 827 sc->sc_rxptr = 0; 828 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 829 830 return (0); 831 } 832 833 static u_int 834 gem_ringsize(u_int sz) 835 { 836 837 switch (sz) { 838 case 32: 839 return (GEM_RING_SZ_32); 840 case 64: 841 return (GEM_RING_SZ_64); 842 case 128: 843 return (GEM_RING_SZ_128); 844 case 256: 845 return (GEM_RING_SZ_256); 846 case 512: 847 return (GEM_RING_SZ_512); 848 case 1024: 849 return (GEM_RING_SZ_1024); 850 case 2048: 851 return (GEM_RING_SZ_2048); 852 case 4096: 853 return (GEM_RING_SZ_4096); 854 case 8192: 855 return (GEM_RING_SZ_8192); 856 default: 857 printf("%s: invalid ring size %d\n", __func__, sz); 858 return (GEM_RING_SZ_32); 859 } 860 } 861 862 static void 863 gem_init(void *xsc) 864 { 865 struct gem_softc *sc = xsc; 866 867 GEM_LOCK(sc); 868 gem_init_locked(sc); 869 GEM_UNLOCK(sc); 870 } 871 872 /* 873 * Initialization of interface; set up initialization block 874 * and transmit/receive descriptor rings. 875 */ 876 static void 877 gem_init_locked(struct gem_softc *sc) 878 { 879 struct ifnet *ifp = sc->sc_ifp; 880 uint32_t v; 881 882 GEM_LOCK_ASSERT(sc, MA_OWNED); 883 884 #ifdef GEM_DEBUG 885 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 886 __func__); 887 #endif 888 /* 889 * Initialization sequence. The numbered steps below correspond 890 * to the sequence outlined in section 6.3.5.1 in the Ethernet 891 * Channel Engine manual (part of the PCIO manual). 892 * See also the STP2002-STQ document from Sun Microsystems. 893 */ 894 895 /* step 1 & 2. Reset the Ethernet Channel. */ 896 gem_stop(ifp, 0); 897 gem_reset(sc); 898 #ifdef GEM_DEBUG 899 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 900 __func__); 901 #endif 902 903 /* Re-initialize the MIF. */ 904 gem_mifinit(sc); 905 906 /* step 3. Setup data structures in host memory. */ 907 if (gem_meminit(sc) != 0) 908 return; 909 910 /* step 4. TX MAC registers & counters */ 911 gem_init_regs(sc); 912 913 /* step 5. RX MAC registers & counters */ 914 gem_setladrf(sc); 915 916 /* step 6 & 7. Program Descriptor Ring Base Addresses. */ 917 /* NOTE: we use only 32-bit DMA addresses here. */ 918 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0); 919 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 920 921 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 922 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 923 #ifdef GEM_DEBUG 924 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx", 925 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 926 #endif 927 928 /* step 8. Global Configuration & Interrupt Mask */ 929 GEM_BANK1_WRITE_4(sc, GEM_INTMASK, 930 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 931 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 932 GEM_INTR_BERR 933 #ifdef GEM_DEBUG 934 | GEM_INTR_PCS | GEM_INTR_MIF 935 #endif 936 )); 937 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK, 938 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 939 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK, 940 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP); 941 #ifdef GEM_DEBUG 942 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 943 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 944 #else 945 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 946 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 947 #endif 948 949 /* step 9. ETX Configuration: use mostly default values. */ 950 951 /* Enable DMA. */ 952 v = gem_ringsize(GEM_NTXDESC /* XXX */); 953 v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x4ff) << 10) & 954 GEM_TX_CONFIG_TXFIFO_TH; 955 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN); 956 957 /* step 10. ERX Configuration */ 958 959 /* Encode Receive Descriptor ring size. */ 960 v = gem_ringsize(GEM_NRXDESC /* XXX */); 961 /* RX TCP/UDP checksum offset */ 962 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 963 GEM_RX_CONFIG_CXM_START_SHFT); 964 965 /* Enable DMA. */ 966 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 967 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 968 (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN); 969 970 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING, 971 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 972 973 /* 974 * The following value is for an OFF Threshold of about 3/4 full 975 * and an ON Threshold of 1/4 full. 976 */ 977 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 978 (3 * sc->sc_rxfifosize / 256) | 979 ((sc->sc_rxfifosize / 256) << 12)); 980 981 /* step 11. Configure Media. */ 982 983 /* step 12. RX_MAC Configuration Register */ 984 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 985 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC; 986 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0); 987 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 988 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 989 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 990 device_printf(sc->sc_dev, "cannot configure RX MAC\n"); 991 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 992 993 /* step 13. TX_MAC Configuration Register */ 994 v = GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG); 995 v |= GEM_MAC_TX_ENABLE; 996 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0); 997 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 998 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 999 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 1000 device_printf(sc->sc_dev, "cannot configure TX MAC\n"); 1001 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, v); 1002 1003 /* step 14. Issue Transmit Pending command. */ 1004 1005 /* step 15. Give the reciever a swift kick. */ 1006 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 1007 1008 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1009 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1010 1011 mii_mediachg(sc->sc_mii); 1012 1013 /* Start the one second timer. */ 1014 sc->sc_wdog_timer = 0; 1015 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1016 } 1017 1018 static int 1019 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head) 1020 { 1021 bus_dma_segment_t txsegs[GEM_NTXSEGS]; 1022 struct gem_txsoft *txs; 1023 struct ip *ip; 1024 struct mbuf *m; 1025 uint64_t cflags, flags; 1026 int error, nexttx, nsegs, offset, seg; 1027 1028 /* Get a work queue entry. */ 1029 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1030 /* Ran out of descriptors. */ 1031 return (ENOBUFS); 1032 } 1033 1034 cflags = 0; 1035 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) { 1036 if (M_WRITABLE(*m_head) == 0) { 1037 m = m_dup(*m_head, M_DONTWAIT); 1038 m_freem(*m_head); 1039 *m_head = m; 1040 if (m == NULL) 1041 return (ENOBUFS); 1042 } 1043 offset = sizeof(struct ether_header); 1044 m = m_pullup(*m_head, offset + sizeof(struct ip)); 1045 if (m == NULL) { 1046 *m_head = NULL; 1047 return (ENOBUFS); 1048 } 1049 ip = (struct ip *)(mtod(m, caddr_t) + offset); 1050 offset += (ip->ip_hl << 2); 1051 cflags = offset << GEM_TD_CXSUM_STARTSHFT | 1052 ((offset + m->m_pkthdr.csum_data) << 1053 GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE; 1054 *m_head = m; 1055 } 1056 1057 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1058 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1059 if (error == EFBIG) { 1060 m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS); 1061 if (m == NULL) { 1062 m_freem(*m_head); 1063 *m_head = NULL; 1064 return (ENOBUFS); 1065 } 1066 *m_head = m; 1067 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1068 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1069 BUS_DMA_NOWAIT); 1070 if (error != 0) { 1071 m_freem(*m_head); 1072 *m_head = NULL; 1073 return (error); 1074 } 1075 } else if (error != 0) 1076 return (error); 1077 /* If nsegs is wrong then the stack is corrupt. */ 1078 KASSERT(nsegs <= GEM_NTXSEGS, 1079 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1080 if (nsegs == 0) { 1081 m_freem(*m_head); 1082 *m_head = NULL; 1083 return (EIO); 1084 } 1085 1086 /* 1087 * Ensure we have enough descriptors free to describe 1088 * the packet. Note, we always reserve one descriptor 1089 * at the end of the ring as a termination point, in 1090 * order to prevent wrap-around. 1091 */ 1092 if (nsegs > sc->sc_txfree - 1) { 1093 txs->txs_ndescs = 0; 1094 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1095 return (ENOBUFS); 1096 } 1097 1098 txs->txs_ndescs = nsegs; 1099 txs->txs_firstdesc = sc->sc_txnext; 1100 nexttx = txs->txs_firstdesc; 1101 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 1102 #ifdef GEM_DEBUG 1103 CTR6(KTR_GEM, 1104 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1105 __func__, seg, nexttx, txsegs[seg].ds_len, 1106 txsegs[seg].ds_addr, 1107 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr)); 1108 #endif 1109 sc->sc_txdescs[nexttx].gd_addr = 1110 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr); 1111 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 1112 ("%s: segment size too large!", __func__)); 1113 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1114 sc->sc_txdescs[nexttx].gd_flags = 1115 GEM_DMA_WRITE(sc, flags | cflags); 1116 txs->txs_lastdesc = nexttx; 1117 } 1118 1119 /* Set EOP on the last descriptor. */ 1120 #ifdef GEM_DEBUG 1121 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d", 1122 __func__, seg, nexttx); 1123 #endif 1124 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1125 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET); 1126 1127 /* Lastly set SOP on the first descriptor. */ 1128 #ifdef GEM_DEBUG 1129 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d", 1130 __func__, seg, nexttx); 1131 #endif 1132 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1133 sc->sc_txwin = 0; 1134 flags |= GEM_TD_INTERRUPT_ME; 1135 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1136 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME | 1137 GEM_TD_START_OF_PACKET); 1138 } else 1139 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1140 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET); 1141 1142 /* Sync the DMA map. */ 1143 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1144 BUS_DMASYNC_PREWRITE); 1145 1146 #ifdef GEM_DEBUG 1147 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1148 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1149 txs->txs_ndescs); 1150 #endif 1151 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1152 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1153 txs->txs_mbuf = *m_head; 1154 1155 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1156 sc->sc_txfree -= txs->txs_ndescs; 1157 1158 return (0); 1159 } 1160 1161 static void 1162 gem_init_regs(struct gem_softc *sc) 1163 { 1164 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1165 1166 /* These registers are not cleared on reset. */ 1167 if ((sc->sc_flags & GEM_INITED) == 0) { 1168 /* magic values */ 1169 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG0, 0); 1170 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8); 1171 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4); 1172 1173 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1174 /* max frame and max burst size */ 1175 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME, 1176 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1177 1178 GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7); 1179 GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4); 1180 GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1181 /* dunno... */ 1182 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8088); 1183 GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED, 1184 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1185 1186 /* secondary MAC address: 0:0:0:0:0:0 */ 1187 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR3, 0); 1188 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR4, 0); 1189 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR5, 0); 1190 1191 /* MAC control address: 01:80:c2:00:00:01 */ 1192 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001); 1193 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200); 1194 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180); 1195 1196 /* MAC filter address: 0:0:0:0:0:0 */ 1197 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0); 1198 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0); 1199 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0); 1200 1201 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0); 1202 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0); 1203 1204 sc->sc_flags |= GEM_INITED; 1205 } 1206 1207 /* Counters need to be zeroed. */ 1208 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 1209 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 1210 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 1211 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 1212 GEM_BANK1_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0); 1213 GEM_BANK1_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0); 1214 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0); 1215 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 1216 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 1217 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 1218 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 1219 1220 /* Set XOFF PAUSE time. */ 1221 GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1222 1223 /* 1224 * Set the internal arbitration to "infinite" bursts of the 1225 * maximum length of 31 * 64 bytes so DMA transfers aren't 1226 * split up in cache line size chunks. This greatly improves 1227 * especially RX performance. 1228 * Enable silicon bug workarounds for the Apple variants. 1229 */ 1230 GEM_BANK1_WRITE_4(sc, GEM_CONFIG, 1231 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 1232 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ? 1233 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 1234 1235 /* Set the station address. */ 1236 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); 1237 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); 1238 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); 1239 1240 /* Enable MII outputs. */ 1241 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA); 1242 } 1243 1244 static void 1245 gem_start(struct ifnet *ifp) 1246 { 1247 struct gem_softc *sc = ifp->if_softc; 1248 1249 GEM_LOCK(sc); 1250 gem_start_locked(ifp); 1251 GEM_UNLOCK(sc); 1252 } 1253 1254 static void 1255 gem_start_locked(struct ifnet *ifp) 1256 { 1257 struct gem_softc *sc = ifp->if_softc; 1258 struct mbuf *m; 1259 int ntx; 1260 1261 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1262 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 1263 return; 1264 1265 #ifdef GEM_DEBUG 1266 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 1267 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1268 sc->sc_txnext); 1269 #endif 1270 ntx = 0; 1271 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1272 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1273 if (m == NULL) 1274 break; 1275 if (gem_load_txmbuf(sc, &m) != 0) { 1276 if (m == NULL) 1277 break; 1278 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1279 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1280 break; 1281 } 1282 ntx++; 1283 /* Kick the transmitter. */ 1284 #ifdef GEM_DEBUG 1285 CTR3(KTR_GEM, "%s: %s: kicking TX %d", 1286 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1287 #endif 1288 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1289 GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext); 1290 1291 BPF_MTAP(ifp, m); 1292 } 1293 1294 if (ntx > 0) { 1295 #ifdef GEM_DEBUG 1296 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1297 device_get_name(sc->sc_dev), sc->sc_txnext); 1298 #endif 1299 1300 /* Set a watchdog timer in case the chip flakes out. */ 1301 sc->sc_wdog_timer = 5; 1302 #ifdef GEM_DEBUG 1303 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1304 device_get_name(sc->sc_dev), __func__, 1305 sc->sc_wdog_timer); 1306 #endif 1307 } 1308 } 1309 1310 static void 1311 gem_tint(struct gem_softc *sc) 1312 { 1313 struct ifnet *ifp = sc->sc_ifp; 1314 struct gem_txsoft *txs; 1315 int txlast, progress; 1316 #ifdef GEM_DEBUG 1317 int i; 1318 1319 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1320 #endif 1321 1322 /* 1323 * Go through our TX list and free mbufs for those 1324 * frames that have been transmitted. 1325 */ 1326 progress = 0; 1327 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1328 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1329 1330 #ifdef GEM_DEBUG 1331 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1332 printf(" txsoft %p transmit chain:\n", txs); 1333 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1334 printf("descriptor %d: ", i); 1335 printf("gd_flags: 0x%016llx\t", 1336 (long long)GEM_DMA_READ(sc, 1337 sc->sc_txdescs[i].gd_flags)); 1338 printf("gd_addr: 0x%016llx\n", 1339 (long long)GEM_DMA_READ(sc, 1340 sc->sc_txdescs[i].gd_addr)); 1341 if (i == txs->txs_lastdesc) 1342 break; 1343 } 1344 } 1345 #endif 1346 1347 /* 1348 * In theory, we could harvest some descriptors before 1349 * the ring is empty, but that's a bit complicated. 1350 * 1351 * GEM_TX_COMPLETION points to the last descriptor 1352 * processed + 1. 1353 */ 1354 txlast = GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION); 1355 #ifdef GEM_DEBUG 1356 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 1357 "txs->txs_lastdesc = %d, txlast = %d", 1358 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1359 #endif 1360 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1361 if ((txlast >= txs->txs_firstdesc) && 1362 (txlast <= txs->txs_lastdesc)) 1363 break; 1364 } else { 1365 /* Ick -- this command wraps. */ 1366 if ((txlast >= txs->txs_firstdesc) || 1367 (txlast <= txs->txs_lastdesc)) 1368 break; 1369 } 1370 1371 #ifdef GEM_DEBUG 1372 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__); 1373 #endif 1374 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1375 1376 sc->sc_txfree += txs->txs_ndescs; 1377 1378 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1379 BUS_DMASYNC_POSTWRITE); 1380 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1381 if (txs->txs_mbuf != NULL) { 1382 m_freem(txs->txs_mbuf); 1383 txs->txs_mbuf = NULL; 1384 } 1385 1386 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1387 1388 ifp->if_opackets++; 1389 progress = 1; 1390 } 1391 1392 #ifdef GEM_DEBUG 1393 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx " 1394 "GEM_TX_COMPLETION %x", 1395 __func__, GEM_BANK1_READ_4(sc, GEM_TX_STATE_MACHINE), 1396 ((long long)GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) | 1397 GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_LO), 1398 GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION)); 1399 #endif 1400 1401 if (progress) { 1402 if (sc->sc_txfree == GEM_NTXDESC - 1) 1403 sc->sc_txwin = 0; 1404 1405 /* 1406 * We freed some descriptors, so reset IFF_DRV_OACTIVE 1407 * and restart. 1408 */ 1409 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1410 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; 1411 1412 gem_start_locked(ifp); 1413 } 1414 1415 #ifdef GEM_DEBUG 1416 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1417 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1418 #endif 1419 } 1420 1421 #ifdef GEM_RINT_TIMEOUT 1422 static void 1423 gem_rint_timeout(void *arg) 1424 { 1425 struct gem_softc *sc = arg; 1426 1427 GEM_LOCK_ASSERT(sc, MA_OWNED); 1428 gem_rint(sc); 1429 } 1430 #endif 1431 1432 static void 1433 gem_rint(struct gem_softc *sc) 1434 { 1435 struct ifnet *ifp = sc->sc_ifp; 1436 struct mbuf *m; 1437 uint64_t rxstat; 1438 uint32_t rxcomp; 1439 1440 #ifdef GEM_RINT_TIMEOUT 1441 callout_stop(&sc->sc_rx_ch); 1442 #endif 1443 #ifdef GEM_DEBUG 1444 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1445 #endif 1446 1447 /* 1448 * Read the completion register once. This limits 1449 * how long the following loop can execute. 1450 */ 1451 rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION); 1452 1453 #ifdef GEM_DEBUG 1454 CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d", 1455 __func__, sc->sc_rxptr, rxcomp); 1456 #endif 1457 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1458 for (; sc->sc_rxptr != rxcomp;) { 1459 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1460 rxstat = GEM_DMA_READ(sc, 1461 sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 1462 1463 if (rxstat & GEM_RD_OWN) { 1464 #ifdef GEM_RINT_TIMEOUT 1465 /* 1466 * The descriptor is still marked as owned, although 1467 * it is supposed to have completed. This has been 1468 * observed on some machines. Just exiting here 1469 * might leave the packet sitting around until another 1470 * one arrives to trigger a new interrupt, which is 1471 * generally undesirable, so set up a timeout. 1472 */ 1473 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1474 gem_rint_timeout, sc); 1475 #endif 1476 m = NULL; 1477 goto kickit; 1478 } 1479 1480 if (rxstat & GEM_RD_BAD_CRC) { 1481 ifp->if_ierrors++; 1482 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1483 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1484 m = NULL; 1485 goto kickit; 1486 } 1487 1488 #ifdef GEM_DEBUG 1489 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1490 printf(" rxsoft %p descriptor %d: ", 1491 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 1492 printf("gd_flags: 0x%016llx\t", 1493 (long long)GEM_DMA_READ(sc, 1494 sc->sc_rxdescs[sc->sc_rxptr].gd_flags)); 1495 printf("gd_addr: 0x%016llx\n", 1496 (long long)GEM_DMA_READ(sc, 1497 sc->sc_rxdescs[sc->sc_rxptr].gd_addr)); 1498 } 1499 #endif 1500 1501 /* 1502 * Allocate a new mbuf cluster. If that fails, we are 1503 * out of memory, and must drop the packet and recycle 1504 * the buffer that's already attached to this descriptor. 1505 */ 1506 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 1507 ifp->if_ierrors++; 1508 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1509 m = NULL; 1510 } 1511 1512 kickit: 1513 /* 1514 * Update the RX kick register. This register has to point 1515 * to the descriptor after the last valid one (before the 1516 * current batch) and must be incremented in multiples of 1517 * 4 (because the DMA engine fetches/updates descriptors 1518 * in batches of 4). 1519 */ 1520 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 1521 if ((sc->sc_rxptr % 4) == 0) { 1522 GEM_CDSYNC(sc, 1523 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1524 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, 1525 (sc->sc_rxptr + GEM_NRXDESC - 4) & 1526 GEM_NRXDESC_MASK); 1527 } 1528 1529 if (m == NULL) { 1530 if (rxstat & GEM_RD_OWN) 1531 break; 1532 continue; 1533 } 1534 1535 ifp->if_ipackets++; 1536 m->m_data += 2; /* We're already off by two */ 1537 m->m_pkthdr.rcvif = ifp; 1538 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 1539 1540 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1541 gem_rxcksum(m, rxstat); 1542 1543 /* Pass it on. */ 1544 GEM_UNLOCK(sc); 1545 (*ifp->if_input)(ifp, m); 1546 GEM_LOCK(sc); 1547 } 1548 1549 #ifdef GEM_DEBUG 1550 CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__, 1551 sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION)); 1552 #endif 1553 } 1554 1555 static int 1556 gem_add_rxbuf(struct gem_softc *sc, int idx) 1557 { 1558 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1559 struct mbuf *m; 1560 bus_dma_segment_t segs[1]; 1561 int error, nsegs; 1562 1563 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1564 if (m == NULL) 1565 return (ENOBUFS); 1566 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1567 1568 #ifdef GEM_DEBUG 1569 /* Bzero the packet to check DMA. */ 1570 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1571 #endif 1572 1573 if (rxs->rxs_mbuf != NULL) { 1574 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1575 BUS_DMASYNC_POSTREAD); 1576 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1577 } 1578 1579 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1580 m, segs, &nsegs, BUS_DMA_NOWAIT); 1581 if (error != 0) { 1582 device_printf(sc->sc_dev, 1583 "cannot load RS DMA map %d, error = %d\n", idx, error); 1584 m_freem(m); 1585 return (error); 1586 } 1587 /* If nsegs is wrong then the stack is corrupt. */ 1588 KASSERT(nsegs == 1, 1589 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1590 rxs->rxs_mbuf = m; 1591 rxs->rxs_paddr = segs[0].ds_addr; 1592 1593 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1594 BUS_DMASYNC_PREREAD); 1595 1596 GEM_INIT_RXDESC(sc, idx); 1597 1598 return (0); 1599 } 1600 1601 static void 1602 gem_eint(struct gem_softc *sc, u_int status) 1603 { 1604 1605 sc->sc_ifp->if_ierrors++; 1606 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1607 gem_reset_rxdma(sc); 1608 return; 1609 } 1610 1611 device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status); 1612 } 1613 1614 void 1615 gem_intr(void *v) 1616 { 1617 struct gem_softc *sc = v; 1618 uint32_t status, status2; 1619 1620 GEM_LOCK(sc); 1621 status = GEM_BANK1_READ_4(sc, GEM_STATUS); 1622 1623 #ifdef GEM_DEBUG 1624 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 1625 device_get_name(sc->sc_dev), __func__, (status >> 19), 1626 (u_int)status); 1627 1628 /* 1629 * PCS interrupts must be cleared, otherwise no traffic is passed! 1630 */ 1631 if ((status & GEM_INTR_PCS) != 0) { 1632 status2 = 1633 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS) | 1634 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS); 1635 if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 1636 device_printf(sc->sc_dev, 1637 "%s: PCS link status changed\n", __func__); 1638 } 1639 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1640 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_STATUS); 1641 if ((status2 & GEM_MAC_PAUSED) != 0) 1642 device_printf(sc->sc_dev, 1643 "%s: PAUSE received (PAUSE time %d slots)\n", 1644 __func__, GEM_MAC_PAUSE_TIME(status2)); 1645 if ((status2 & GEM_MAC_PAUSE) != 0) 1646 device_printf(sc->sc_dev, 1647 "%s: transited to PAUSE state\n", __func__); 1648 if ((status2 & GEM_MAC_RESUME) != 0) 1649 device_printf(sc->sc_dev, 1650 "%s: transited to non-PAUSE state\n", __func__); 1651 } 1652 if ((status & GEM_INTR_MIF) != 0) 1653 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1654 #endif 1655 1656 if ((status & 1657 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 1658 gem_eint(sc, status); 1659 1660 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1661 gem_rint(sc); 1662 1663 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1664 gem_tint(sc); 1665 1666 if (status & GEM_INTR_TX_MAC) { 1667 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS); 1668 if ((status2 & 1669 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP)) != 0) 1670 device_printf(sc->sc_dev, 1671 "MAC TX fault, status %x\n", status2); 1672 if ((status2 & 1673 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) 1674 gem_init_locked(sc); 1675 } 1676 if (status & GEM_INTR_RX_MAC) { 1677 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS); 1678 /* 1679 * At least with GEM_SUN_GEM and some GEM_SUN_ERI 1680 * revisions GEM_MAC_RX_OVERFLOW happen often due to a 1681 * silicon bug so handle them silently. Moreover, it's 1682 * likely that the receiver has hung so we reset it. 1683 */ 1684 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) { 1685 sc->sc_ifp->if_ierrors++; 1686 gem_reset_rxdma(sc); 1687 } else if ((status2 & 1688 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0) 1689 device_printf(sc->sc_dev, 1690 "MAC RX fault, status %x\n", status2); 1691 } 1692 GEM_UNLOCK(sc); 1693 } 1694 1695 static int 1696 gem_watchdog(struct gem_softc *sc) 1697 { 1698 struct ifnet *ifp = sc->sc_ifp; 1699 1700 GEM_LOCK_ASSERT(sc, MA_OWNED); 1701 1702 #ifdef GEM_DEBUG 1703 CTR4(KTR_GEM, 1704 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x", 1705 __func__, GEM_BANK1_READ_4(sc, GEM_RX_CONFIG), 1706 GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS), 1707 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG)); 1708 CTR4(KTR_GEM, 1709 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x", 1710 __func__, GEM_BANK1_READ_4(sc, GEM_TX_CONFIG), 1711 GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS), 1712 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG)); 1713 #endif 1714 1715 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1716 return (0); 1717 1718 if ((sc->sc_flags & GEM_LINK) != 0) 1719 device_printf(sc->sc_dev, "device timeout\n"); 1720 else if (bootverbose) 1721 device_printf(sc->sc_dev, "device timeout (no link)\n"); 1722 ++ifp->if_oerrors; 1723 1724 /* Try to get more packets going. */ 1725 gem_init_locked(sc); 1726 gem_start_locked(ifp); 1727 return (EJUSTRETURN); 1728 } 1729 1730 static void 1731 gem_mifinit(struct gem_softc *sc) 1732 { 1733 1734 /* Configure the MIF in frame mode. */ 1735 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, 1736 GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1737 } 1738 1739 /* 1740 * MII interface 1741 * 1742 * The GEM MII interface supports at least three different operating modes: 1743 * 1744 * Bitbang mode is implemented using data, clock and output enable registers. 1745 * 1746 * Frame mode is implemented by loading a complete frame into the frame 1747 * register and polling the valid bit for completion. 1748 * 1749 * Polling mode uses the frame register but completion is indicated by 1750 * an interrupt. 1751 * 1752 */ 1753 int 1754 gem_mii_readreg(device_t dev, int phy, int reg) 1755 { 1756 struct gem_softc *sc; 1757 int n; 1758 uint32_t v; 1759 1760 #ifdef GEM_DEBUG_PHY 1761 printf("%s: phy %d reg %d\n", __func__, phy, reg); 1762 #endif 1763 1764 sc = device_get_softc(dev); 1765 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1766 return (0); 1767 1768 if ((sc->sc_flags & GEM_SERDES) != 0) { 1769 switch (reg) { 1770 case MII_BMCR: 1771 reg = GEM_MII_CONTROL; 1772 break; 1773 case MII_BMSR: 1774 reg = GEM_MII_STATUS; 1775 break; 1776 case MII_PHYIDR1: 1777 case MII_PHYIDR2: 1778 return (0); 1779 case MII_ANAR: 1780 reg = GEM_MII_ANAR; 1781 break; 1782 case MII_ANLPAR: 1783 reg = GEM_MII_ANLPAR; 1784 break; 1785 case MII_EXTSR: 1786 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 1787 default: 1788 device_printf(sc->sc_dev, 1789 "%s: unhandled register %d\n", __func__, reg); 1790 return (0); 1791 } 1792 return (GEM_BANK1_READ_4(sc, reg)); 1793 } 1794 1795 /* Construct the frame command. */ 1796 v = GEM_MIF_FRAME_READ | 1797 (phy << GEM_MIF_PHY_SHIFT) | 1798 (reg << GEM_MIF_REG_SHIFT); 1799 1800 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v); 1801 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4, 1802 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1803 for (n = 0; n < 100; n++) { 1804 DELAY(1); 1805 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME); 1806 if (v & GEM_MIF_FRAME_TA0) 1807 return (v & GEM_MIF_FRAME_DATA); 1808 } 1809 1810 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1811 return (0); 1812 } 1813 1814 int 1815 gem_mii_writereg(device_t dev, int phy, int reg, int val) 1816 { 1817 struct gem_softc *sc; 1818 int n; 1819 uint32_t v; 1820 1821 #ifdef GEM_DEBUG_PHY 1822 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 1823 #endif 1824 1825 sc = device_get_softc(dev); 1826 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1827 return (0); 1828 1829 if ((sc->sc_flags & GEM_SERDES) != 0) { 1830 switch (reg) { 1831 case MII_BMSR: 1832 reg = GEM_MII_STATUS; 1833 break; 1834 case MII_BMCR: 1835 reg = GEM_MII_CONTROL; 1836 if ((val & GEM_MII_CONTROL_RESET) == 0) 1837 break; 1838 GEM_BANK1_WRITE_4(sc, GEM_MII_CONTROL, val); 1839 GEM_BANK1_BARRIER(sc, GEM_MII_CONTROL, 4, 1840 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1841 if (!GEM_BANK1_BITWAIT(sc, GEM_MII_CONTROL, 1842 GEM_MII_CONTROL_RESET, 0)) 1843 device_printf(sc->sc_dev, 1844 "cannot reset PCS\n"); 1845 /* FALLTHROUGH */ 1846 case MII_ANAR: 1847 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 0); 1848 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4, 1849 BUS_SPACE_BARRIER_WRITE); 1850 GEM_BANK1_WRITE_4(sc, GEM_MII_ANAR, val); 1851 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 1852 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 1853 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 1854 GEM_MII_CONFIG_ENABLE); 1855 return (0); 1856 case MII_ANLPAR: 1857 reg = GEM_MII_ANLPAR; 1858 break; 1859 default: 1860 device_printf(sc->sc_dev, 1861 "%s: unhandled register %d\n", __func__, reg); 1862 return (0); 1863 } 1864 GEM_BANK1_WRITE_4(sc, reg, val); 1865 return (0); 1866 } 1867 1868 /* Construct the frame command. */ 1869 v = GEM_MIF_FRAME_WRITE | 1870 (phy << GEM_MIF_PHY_SHIFT) | 1871 (reg << GEM_MIF_REG_SHIFT) | 1872 (val & GEM_MIF_FRAME_DATA); 1873 1874 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v); 1875 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4, 1876 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1877 for (n = 0; n < 100; n++) { 1878 DELAY(1); 1879 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME); 1880 if (v & GEM_MIF_FRAME_TA0) 1881 return (1); 1882 } 1883 1884 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1885 return (0); 1886 } 1887 1888 void 1889 gem_mii_statchg(device_t dev) 1890 { 1891 struct gem_softc *sc; 1892 int gigabit; 1893 uint32_t rxcfg, txcfg, v; 1894 1895 sc = device_get_softc(dev); 1896 1897 #ifdef GEM_DEBUG 1898 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) 1899 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n", 1900 __func__, sc->sc_phyad); 1901 #endif 1902 1903 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 1904 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 1905 sc->sc_flags |= GEM_LINK; 1906 else 1907 sc->sc_flags &= ~GEM_LINK; 1908 1909 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 1910 case IFM_1000_SX: 1911 case IFM_1000_LX: 1912 case IFM_1000_CX: 1913 case IFM_1000_T: 1914 gigabit = 1; 1915 break; 1916 default: 1917 gigabit = 0; 1918 } 1919 1920 /* 1921 * The configuration done here corresponds to the steps F) and 1922 * G) and as far as enabling of RX and TX MAC goes also step H) 1923 * of the initialization sequence outlined in section 3.2.1 of 1924 * the GEM Gigabit Ethernet ASIC Specification. 1925 */ 1926 1927 rxcfg = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 1928 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 1929 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 1930 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1931 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 1932 else if (gigabit != 0) { 1933 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 1934 txcfg |= GEM_MAC_TX_CARR_EXTEND; 1935 } 1936 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0); 1937 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 1938 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1939 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 1940 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 1941 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg); 1942 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0); 1943 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 1944 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1945 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1946 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 1947 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg); 1948 1949 v = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_CONFIG) & 1950 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 1951 #ifdef notyet 1952 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1953 IFM_ETH_RXPAUSE) != 0) 1954 v |= GEM_MAC_CC_RX_PAUSE; 1955 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1956 IFM_ETH_TXPAUSE) != 0) 1957 v |= GEM_MAC_CC_TX_PAUSE; 1958 #endif 1959 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v); 1960 1961 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 1962 gigabit != 0) 1963 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME, 1964 GEM_MAC_SLOT_TIME_CARR_EXTEND); 1965 else 1966 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME, 1967 GEM_MAC_SLOT_TIME_NORMAL); 1968 1969 /* XIF Configuration */ 1970 v = GEM_MAC_XIF_LINK_LED; 1971 v |= GEM_MAC_XIF_TX_MII_ENA; 1972 if ((sc->sc_flags & GEM_SERDES) == 0) { 1973 if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & 1974 GEM_MIF_CONFIG_PHY_SEL) != 0 && 1975 (IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1976 IFM_FDX) == 0) 1977 /* External MII needs echo disable if half duplex. */ 1978 v |= GEM_MAC_XIF_ECHO_DISABL; 1979 else 1980 /* 1981 * Internal MII needs buffer enable. 1982 * XXX buffer enable makes only sense for an 1983 * external PHY. 1984 */ 1985 v |= GEM_MAC_XIF_MII_BUF_ENA; 1986 } 1987 if (gigabit != 0) 1988 v |= GEM_MAC_XIF_GMII_MODE; 1989 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1990 v |= GEM_MAC_XIF_FDPLX_LED; 1991 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v); 1992 1993 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1994 (sc->sc_flags & GEM_LINK) != 0) { 1995 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 1996 txcfg | GEM_MAC_TX_ENABLE); 1997 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 1998 rxcfg | GEM_MAC_RX_ENABLE); 1999 } 2000 } 2001 2002 int 2003 gem_mediachange(struct ifnet *ifp) 2004 { 2005 struct gem_softc *sc = ifp->if_softc; 2006 int error; 2007 2008 /* XXX add support for serial media. */ 2009 2010 GEM_LOCK(sc); 2011 error = mii_mediachg(sc->sc_mii); 2012 GEM_UNLOCK(sc); 2013 return (error); 2014 } 2015 2016 void 2017 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2018 { 2019 struct gem_softc *sc = ifp->if_softc; 2020 2021 GEM_LOCK(sc); 2022 if ((ifp->if_flags & IFF_UP) == 0) { 2023 GEM_UNLOCK(sc); 2024 return; 2025 } 2026 2027 mii_pollstat(sc->sc_mii); 2028 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2029 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2030 GEM_UNLOCK(sc); 2031 } 2032 2033 static int 2034 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2035 { 2036 struct gem_softc *sc = ifp->if_softc; 2037 struct ifreq *ifr = (struct ifreq *)data; 2038 int error; 2039 2040 error = 0; 2041 switch (cmd) { 2042 case SIOCSIFFLAGS: 2043 GEM_LOCK(sc); 2044 if ((ifp->if_flags & IFF_UP) != 0) { 2045 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2046 ((ifp->if_flags ^ sc->sc_ifflags) & 2047 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2048 gem_setladrf(sc); 2049 else 2050 gem_init_locked(sc); 2051 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2052 gem_stop(ifp, 0); 2053 if ((ifp->if_flags & IFF_LINK0) != 0) 2054 sc->sc_csum_features |= CSUM_UDP; 2055 else 2056 sc->sc_csum_features &= ~CSUM_UDP; 2057 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2058 ifp->if_hwassist = sc->sc_csum_features; 2059 sc->sc_ifflags = ifp->if_flags; 2060 GEM_UNLOCK(sc); 2061 break; 2062 case SIOCADDMULTI: 2063 case SIOCDELMULTI: 2064 GEM_LOCK(sc); 2065 gem_setladrf(sc); 2066 GEM_UNLOCK(sc); 2067 break; 2068 case SIOCGIFMEDIA: 2069 case SIOCSIFMEDIA: 2070 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2071 break; 2072 case SIOCSIFCAP: 2073 GEM_LOCK(sc); 2074 ifp->if_capenable = ifr->ifr_reqcap; 2075 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2076 ifp->if_hwassist = sc->sc_csum_features; 2077 else 2078 ifp->if_hwassist = 0; 2079 GEM_UNLOCK(sc); 2080 break; 2081 default: 2082 error = ether_ioctl(ifp, cmd, data); 2083 break; 2084 } 2085 2086 return (error); 2087 } 2088 2089 static void 2090 gem_setladrf(struct gem_softc *sc) 2091 { 2092 struct ifnet *ifp = sc->sc_ifp; 2093 struct ifmultiaddr *inm; 2094 int i; 2095 uint32_t hash[16]; 2096 uint32_t crc, v; 2097 2098 GEM_LOCK_ASSERT(sc, MA_OWNED); 2099 2100 /* Get the current RX configuration. */ 2101 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 2102 2103 /* 2104 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2105 * and hash filter. Depending on the case, the right bit will be 2106 * enabled. 2107 */ 2108 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER | 2109 GEM_MAC_RX_PROMISC_GRP); 2110 2111 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 2112 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 2113 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2114 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 2115 0)) 2116 device_printf(sc->sc_dev, "cannot disable RX hash filter\n"); 2117 2118 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2119 v |= GEM_MAC_RX_PROMISCUOUS; 2120 goto chipit; 2121 } 2122 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2123 v |= GEM_MAC_RX_PROMISC_GRP; 2124 goto chipit; 2125 } 2126 2127 /* 2128 * Set up multicast address filter by passing all multicast 2129 * addresses through a crc generator, and then using the high 2130 * order 8 bits as an index into the 256 bit logical address 2131 * filter. The high order 4 bits selects the word, while the 2132 * other 4 bits select the bit within the word (where bit 0 2133 * is the MSB). 2134 */ 2135 2136 /* Clear the hash table. */ 2137 memset(hash, 0, sizeof(hash)); 2138 2139 IF_ADDR_LOCK(ifp); 2140 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 2141 if (inm->ifma_addr->sa_family != AF_LINK) 2142 continue; 2143 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2144 inm->ifma_addr), ETHER_ADDR_LEN); 2145 2146 /* We just want the 8 most significant bits. */ 2147 crc >>= 24; 2148 2149 /* Set the corresponding bit in the filter. */ 2150 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2151 } 2152 IF_ADDR_UNLOCK(ifp); 2153 2154 v |= GEM_MAC_RX_HASH_FILTER; 2155 2156 /* Now load the hash table into the chip (if we are using it). */ 2157 for (i = 0; i < 16; i++) 2158 GEM_BANK1_WRITE_4(sc, 2159 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), 2160 hash[i]); 2161 2162 chipit: 2163 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 2164 } 2165