1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 36 */ 37 38 #if 0 39 #define GEM_DEBUG 40 #endif 41 42 #if 0 /* XXX: In case of emergency, re-enable this. */ 43 #define GEM_RINT_TIMEOUT 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/endian.h> 51 #include <sys/mbuf.h> 52 #include <sys/malloc.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/socket.h> 58 #include <sys/sockio.h> 59 #include <sys/rman.h> 60 61 #include <net/bpf.h> 62 #include <net/ethernet.h> 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_vlan_var.h> 69 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/tcp.h> 74 #include <netinet/udp.h> 75 76 #include <machine/bus.h> 77 78 #include <dev/mii/mii.h> 79 #include <dev/mii/miivar.h> 80 81 #include <dev/gem/if_gemreg.h> 82 #include <dev/gem/if_gemvar.h> 83 84 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 85 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 86 87 #define TRIES 10000 88 89 /* 90 * The hardware supports basic TCP/UDP checksum offloading. However, 91 * the hardware doesn't compensate the checksum for UDP datagram which 92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 93 * by default. It can be reactivated by setting special link option 94 * link0 with ifconfig(8). 95 */ 96 #define GEM_CSUM_FEATURES (CSUM_TCP) 97 98 static int gem_add_rxbuf(struct gem_softc *sc, int idx); 99 static int gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, 100 uint32_t clr, uint32_t set); 101 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, 102 int nsegs, int error); 103 static int gem_disable_rx(struct gem_softc *sc); 104 static int gem_disable_tx(struct gem_softc *sc); 105 static void gem_eint(struct gem_softc *sc, u_int status); 106 static void gem_init(void *xsc); 107 static void gem_init_locked(struct gem_softc *sc); 108 static void gem_init_regs(struct gem_softc *sc); 109 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 110 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head); 111 static int gem_meminit(struct gem_softc *sc); 112 static void gem_mifinit(struct gem_softc *sc); 113 static void gem_reset(struct gem_softc *sc); 114 static int gem_reset_rx(struct gem_softc *sc); 115 static void gem_reset_rxdma(struct gem_softc *sc); 116 static int gem_reset_tx(struct gem_softc *sc); 117 static u_int gem_ringsize(u_int sz); 118 static void gem_rint(struct gem_softc *sc); 119 #ifdef GEM_RINT_TIMEOUT 120 static void gem_rint_timeout(void *arg); 121 #endif 122 static __inline void gem_rxcksum(struct mbuf *m, uint64_t flags); 123 static void gem_rxdrain(struct gem_softc *sc); 124 static void gem_setladrf(struct gem_softc *sc); 125 static void gem_start(struct ifnet *ifp); 126 static void gem_start_locked(struct ifnet *ifp); 127 static void gem_stop(struct ifnet *ifp, int disable); 128 static void gem_tick(void *arg); 129 static void gem_tint(struct gem_softc *sc); 130 static int gem_watchdog(struct gem_softc *sc); 131 132 devclass_t gem_devclass; 133 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 134 MODULE_DEPEND(gem, miibus, 1, 1, 1); 135 136 #ifdef GEM_DEBUG 137 #include <sys/ktr.h> 138 #define KTR_GEM KTR_CT2 139 #endif 140 141 #define GEM_BANK1_BITWAIT(sc, r, clr, set) \ 142 gem_bitwait((sc), GEM_RES_BANK1, (r), (clr), (set)) 143 #define GEM_BANK2_BITWAIT(sc, r, clr, set) \ 144 gem_bitwait((sc), GEM_RES_BANK2, (r), (clr), (set)) 145 146 int 147 gem_attach(struct gem_softc *sc) 148 { 149 struct gem_txsoft *txs; 150 struct ifnet *ifp; 151 int error, i; 152 uint32_t v; 153 154 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 155 if (ifp == NULL) 156 return (ENOSPC); 157 158 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 159 #ifdef GEM_RINT_TIMEOUT 160 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 161 #endif 162 163 /* Make sure the chip is stopped. */ 164 ifp->if_softc = sc; 165 gem_reset(sc); 166 167 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 168 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 169 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, 170 NULL, &sc->sc_pdmatag); 171 if (error) 172 goto fail_ifnet; 173 174 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 175 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 176 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 177 if (error) 178 goto fail_ptag; 179 180 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 181 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 182 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 183 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 184 if (error) 185 goto fail_rtag; 186 187 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 188 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 189 sizeof(struct gem_control_data), 1, 190 sizeof(struct gem_control_data), 0, 191 NULL, NULL, &sc->sc_cdmatag); 192 if (error) 193 goto fail_ttag; 194 195 /* 196 * Allocate the control data structures, create and load the 197 * DMA map for it. 198 */ 199 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 200 (void **)&sc->sc_control_data, 201 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 202 &sc->sc_cddmamap))) { 203 device_printf(sc->sc_dev, 204 "unable to allocate control data, error = %d\n", error); 205 goto fail_ctag; 206 } 207 208 sc->sc_cddma = 0; 209 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 210 sc->sc_control_data, sizeof(struct gem_control_data), 211 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 212 device_printf(sc->sc_dev, 213 "unable to load control data DMA map, error = %d\n", 214 error); 215 goto fail_cmem; 216 } 217 218 /* 219 * Initialize the transmit job descriptors. 220 */ 221 STAILQ_INIT(&sc->sc_txfreeq); 222 STAILQ_INIT(&sc->sc_txdirtyq); 223 224 /* 225 * Create the transmit buffer DMA maps. 226 */ 227 error = ENOMEM; 228 for (i = 0; i < GEM_TXQUEUELEN; i++) { 229 txs = &sc->sc_txsoft[i]; 230 txs->txs_mbuf = NULL; 231 txs->txs_ndescs = 0; 232 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 233 &txs->txs_dmamap)) != 0) { 234 device_printf(sc->sc_dev, 235 "unable to create TX DMA map %d, error = %d\n", 236 i, error); 237 goto fail_txd; 238 } 239 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 240 } 241 242 /* 243 * Create the receive buffer DMA maps. 244 */ 245 for (i = 0; i < GEM_NRXDESC; i++) { 246 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 247 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 248 device_printf(sc->sc_dev, 249 "unable to create RX DMA map %d, error = %d\n", 250 i, error); 251 goto fail_rxd; 252 } 253 sc->sc_rxsoft[i].rxs_mbuf = NULL; 254 } 255 256 /* Bad things will happen when touching this register on ERI. */ 257 if (sc->sc_variant != GEM_SUN_ERI) 258 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE, 259 GEM_MII_DATAPATH_MII); 260 261 gem_mifinit(sc); 262 263 /* 264 * Look for an external PHY. 265 */ 266 error = ENXIO; 267 v = GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG); 268 if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 269 v |= GEM_MIF_CONFIG_PHY_SEL; 270 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v); 271 switch (sc->sc_variant) { 272 case GEM_SUN_ERI: 273 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 274 break; 275 default: 276 sc->sc_phyad = -1; 277 break; 278 } 279 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 280 gem_mediachange, gem_mediastatus); 281 } 282 283 /* 284 * Fall back on an internal PHY if no external PHY was found. 285 */ 286 if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) { 287 v &= ~GEM_MIF_CONFIG_PHY_SEL; 288 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v); 289 switch (sc->sc_variant) { 290 case GEM_SUN_ERI: 291 case GEM_APPLE_K2_GMAC: 292 sc->sc_phyad = GEM_PHYAD_INTERNAL; 293 break; 294 case GEM_APPLE_GMAC: 295 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 296 break; 297 default: 298 sc->sc_phyad = -1; 299 break; 300 } 301 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 302 gem_mediachange, gem_mediastatus); 303 } 304 305 /* 306 * Try the external PCS SERDES if we didn't find any PHYs. 307 */ 308 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 309 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE, 310 GEM_MII_DATAPATH_SERDES); 311 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 312 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 313 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 314 sc->sc_flags |= GEM_SERDES; 315 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 316 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 317 gem_mediachange, gem_mediastatus); 318 } 319 320 if (error != 0) { 321 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error); 322 goto fail_rxd; 323 } 324 sc->sc_mii = device_get_softc(sc->sc_miibus); 325 326 /* 327 * From this point forward, the attachment cannot fail. A failure 328 * before this point releases all resources that may have been 329 * allocated. 330 */ 331 332 /* Get RX FIFO size. */ 333 sc->sc_rxfifosize = 64 * 334 GEM_BANK1_READ_4(sc, GEM_RX_FIFO_SIZE); 335 336 /* Get TX FIFO size. */ 337 v = GEM_BANK1_READ_4(sc, GEM_TX_FIFO_SIZE); 338 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 339 sc->sc_rxfifosize / 1024, v / 16); 340 341 sc->sc_csum_features = GEM_CSUM_FEATURES; 342 /* Initialize ifnet structure. */ 343 ifp->if_softc = sc; 344 if_initname(ifp, device_get_name(sc->sc_dev), 345 device_get_unit(sc->sc_dev)); 346 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 347 ifp->if_start = gem_start; 348 ifp->if_ioctl = gem_ioctl; 349 ifp->if_init = gem_init; 350 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN); 351 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN; 352 IFQ_SET_READY(&ifp->if_snd); 353 354 /* Attach the interface. */ 355 ether_ifattach(ifp, sc->sc_enaddr); 356 357 /* 358 * Tell the upper layer(s) we support long frames/checksum offloads. 359 */ 360 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 361 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 362 ifp->if_hwassist |= sc->sc_csum_features; 363 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 364 365 return (0); 366 367 /* 368 * Free any resources we've allocated during the failed attach 369 * attempt. Do this in reverse order and fall through. 370 */ 371 fail_rxd: 372 for (i = 0; i < GEM_NRXDESC; i++) 373 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 374 bus_dmamap_destroy(sc->sc_rdmatag, 375 sc->sc_rxsoft[i].rxs_dmamap); 376 fail_txd: 377 for (i = 0; i < GEM_TXQUEUELEN; i++) 378 if (sc->sc_txsoft[i].txs_dmamap != NULL) 379 bus_dmamap_destroy(sc->sc_tdmatag, 380 sc->sc_txsoft[i].txs_dmamap); 381 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 382 fail_cmem: 383 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 384 sc->sc_cddmamap); 385 fail_ctag: 386 bus_dma_tag_destroy(sc->sc_cdmatag); 387 fail_ttag: 388 bus_dma_tag_destroy(sc->sc_tdmatag); 389 fail_rtag: 390 bus_dma_tag_destroy(sc->sc_rdmatag); 391 fail_ptag: 392 bus_dma_tag_destroy(sc->sc_pdmatag); 393 fail_ifnet: 394 if_free(ifp); 395 return (error); 396 } 397 398 void 399 gem_detach(struct gem_softc *sc) 400 { 401 struct ifnet *ifp = sc->sc_ifp; 402 int i; 403 404 GEM_LOCK(sc); 405 gem_stop(ifp, 1); 406 GEM_UNLOCK(sc); 407 callout_drain(&sc->sc_tick_ch); 408 #ifdef GEM_RINT_TIMEOUT 409 callout_drain(&sc->sc_rx_ch); 410 #endif 411 ether_ifdetach(ifp); 412 if_free(ifp); 413 device_delete_child(sc->sc_dev, sc->sc_miibus); 414 415 for (i = 0; i < GEM_NRXDESC; i++) 416 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 417 bus_dmamap_destroy(sc->sc_rdmatag, 418 sc->sc_rxsoft[i].rxs_dmamap); 419 for (i = 0; i < GEM_TXQUEUELEN; i++) 420 if (sc->sc_txsoft[i].txs_dmamap != NULL) 421 bus_dmamap_destroy(sc->sc_tdmatag, 422 sc->sc_txsoft[i].txs_dmamap); 423 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 424 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 425 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 426 sc->sc_cddmamap); 427 bus_dma_tag_destroy(sc->sc_cdmatag); 428 bus_dma_tag_destroy(sc->sc_tdmatag); 429 bus_dma_tag_destroy(sc->sc_rdmatag); 430 bus_dma_tag_destroy(sc->sc_pdmatag); 431 } 432 433 void 434 gem_suspend(struct gem_softc *sc) 435 { 436 struct ifnet *ifp = sc->sc_ifp; 437 438 GEM_LOCK(sc); 439 gem_stop(ifp, 0); 440 GEM_UNLOCK(sc); 441 } 442 443 void 444 gem_resume(struct gem_softc *sc) 445 { 446 struct ifnet *ifp = sc->sc_ifp; 447 448 GEM_LOCK(sc); 449 /* 450 * On resume all registers have to be initialized again like 451 * after power-on. 452 */ 453 sc->sc_flags &= ~GEM_INITED; 454 if (ifp->if_flags & IFF_UP) 455 gem_init_locked(sc); 456 GEM_UNLOCK(sc); 457 } 458 459 static __inline void 460 gem_rxcksum(struct mbuf *m, uint64_t flags) 461 { 462 struct ether_header *eh; 463 struct ip *ip; 464 struct udphdr *uh; 465 uint16_t *opts; 466 int32_t hlen, len, pktlen; 467 uint32_t temp32; 468 uint16_t cksum; 469 470 pktlen = m->m_pkthdr.len; 471 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 472 return; 473 eh = mtod(m, struct ether_header *); 474 if (eh->ether_type != htons(ETHERTYPE_IP)) 475 return; 476 ip = (struct ip *)(eh + 1); 477 if (ip->ip_v != IPVERSION) 478 return; 479 480 hlen = ip->ip_hl << 2; 481 pktlen -= sizeof(struct ether_header); 482 if (hlen < sizeof(struct ip)) 483 return; 484 if (ntohs(ip->ip_len) < hlen) 485 return; 486 if (ntohs(ip->ip_len) != pktlen) 487 return; 488 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 489 return; /* Cannot handle fragmented packet. */ 490 491 switch (ip->ip_p) { 492 case IPPROTO_TCP: 493 if (pktlen < (hlen + sizeof(struct tcphdr))) 494 return; 495 break; 496 case IPPROTO_UDP: 497 if (pktlen < (hlen + sizeof(struct udphdr))) 498 return; 499 uh = (struct udphdr *)((uint8_t *)ip + hlen); 500 if (uh->uh_sum == 0) 501 return; /* no checksum */ 502 break; 503 default: 504 return; 505 } 506 507 cksum = ~(flags & GEM_RD_CHECKSUM); 508 /* checksum fixup for IP options */ 509 len = hlen - sizeof(struct ip); 510 if (len > 0) { 511 opts = (uint16_t *)(ip + 1); 512 for (; len > 0; len -= sizeof(uint16_t), opts++) { 513 temp32 = cksum - *opts; 514 temp32 = (temp32 >> 16) + (temp32 & 65535); 515 cksum = temp32 & 65535; 516 } 517 } 518 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 519 m->m_pkthdr.csum_data = cksum; 520 } 521 522 static void 523 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 524 { 525 struct gem_softc *sc = xsc; 526 527 if (error != 0) 528 return; 529 if (nsegs != 1) 530 panic("%s: bad control buffer segment count", __func__); 531 sc->sc_cddma = segs[0].ds_addr; 532 } 533 534 static void 535 gem_tick(void *arg) 536 { 537 struct gem_softc *sc = arg; 538 struct ifnet *ifp; 539 uint32_t v; 540 541 GEM_LOCK_ASSERT(sc, MA_OWNED); 542 543 ifp = sc->sc_ifp; 544 /* 545 * Unload collision and error counters. 546 */ 547 ifp->if_collisions += 548 GEM_BANK1_READ_4(sc, GEM_MAC_NORM_COLL_CNT) + 549 GEM_BANK1_READ_4(sc, GEM_MAC_FIRST_COLL_CNT); 550 v = GEM_BANK1_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) + 551 GEM_BANK1_READ_4(sc, GEM_MAC_LATE_COLL_CNT); 552 ifp->if_collisions += v; 553 ifp->if_oerrors += v; 554 ifp->if_ierrors += 555 GEM_BANK1_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) + 556 GEM_BANK1_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) + 557 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) + 558 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CODE_VIOL); 559 560 /* 561 * Then clear the hardware counters. 562 */ 563 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 564 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 565 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 566 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 567 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 568 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 569 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 570 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 571 572 mii_tick(sc->sc_mii); 573 574 if (gem_watchdog(sc) == EJUSTRETURN) 575 return; 576 577 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 578 } 579 580 static int 581 gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr, 582 uint32_t set) 583 { 584 int i; 585 uint32_t reg; 586 587 for (i = TRIES; i--; DELAY(100)) { 588 reg = GEM_BANKN_READ_M(bank, 4, sc, r); 589 if ((reg & clr) == 0 && (reg & set) == set) 590 return (1); 591 } 592 return (0); 593 } 594 595 static void 596 gem_reset(sc) 597 struct gem_softc *sc; 598 { 599 600 #ifdef GEM_DEBUG 601 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 602 #endif 603 gem_reset_rx(sc); 604 gem_reset_tx(sc); 605 606 /* Do a full reset. */ 607 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 608 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 609 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 610 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 611 device_printf(sc->sc_dev, "cannot reset device\n"); 612 } 613 614 static void 615 gem_rxdrain(struct gem_softc *sc) 616 { 617 struct gem_rxsoft *rxs; 618 int i; 619 620 for (i = 0; i < GEM_NRXDESC; i++) { 621 rxs = &sc->sc_rxsoft[i]; 622 if (rxs->rxs_mbuf != NULL) { 623 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 624 BUS_DMASYNC_POSTREAD); 625 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 626 m_freem(rxs->rxs_mbuf); 627 rxs->rxs_mbuf = NULL; 628 } 629 } 630 } 631 632 static void 633 gem_stop(struct ifnet *ifp, int disable) 634 { 635 struct gem_softc *sc = ifp->if_softc; 636 struct gem_txsoft *txs; 637 638 #ifdef GEM_DEBUG 639 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 640 #endif 641 642 callout_stop(&sc->sc_tick_ch); 643 #ifdef GEM_RINT_TIMEOUT 644 callout_stop(&sc->sc_rx_ch); 645 #endif 646 647 /* XXX should we reset these instead? */ 648 gem_disable_tx(sc); 649 gem_disable_rx(sc); 650 651 /* 652 * Release any queued transmit buffers. 653 */ 654 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 655 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 656 if (txs->txs_ndescs != 0) { 657 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 658 BUS_DMASYNC_POSTWRITE); 659 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 660 if (txs->txs_mbuf != NULL) { 661 m_freem(txs->txs_mbuf); 662 txs->txs_mbuf = NULL; 663 } 664 } 665 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 666 } 667 668 if (disable) 669 gem_rxdrain(sc); 670 671 /* 672 * Mark the interface down and cancel the watchdog timer. 673 */ 674 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 675 sc->sc_flags &= ~GEM_LINK; 676 sc->sc_wdog_timer = 0; 677 } 678 679 static int 680 gem_reset_rx(struct gem_softc *sc) 681 { 682 683 /* 684 * Resetting while DMA is in progress can cause a bus hang, so we 685 * disable DMA first. 686 */ 687 gem_disable_rx(sc); 688 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 0); 689 GEM_BANK1_BARRIER(sc, GEM_RX_CONFIG, 4, 690 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 691 if (!GEM_BANK1_BITWAIT(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 692 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 693 694 /* Finally, reset the ERX. */ 695 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX); 696 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 697 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 698 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 699 0)) { 700 device_printf(sc->sc_dev, "cannot reset receiver\n"); 701 return (1); 702 } 703 return (0); 704 } 705 706 /* 707 * Reset the receiver DMA engine. 708 * 709 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 710 * etc in order to reset the receiver DMA engine only and not do a full 711 * reset which amongst others also downs the link and clears the FIFOs. 712 */ 713 static void 714 gem_reset_rxdma(struct gem_softc *sc) 715 { 716 int i; 717 718 if (gem_reset_rx(sc) != 0) 719 return (gem_init_locked(sc)); 720 for (i = 0; i < GEM_NRXDESC; i++) 721 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 722 GEM_UPDATE_RXDESC(sc, i); 723 sc->sc_rxptr = 0; 724 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 725 726 /* NOTE: we use only 32-bit DMA addresses here. */ 727 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 728 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 729 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 730 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 731 gem_ringsize(GEM_NRXDESC /* XXX */) | 732 ((ETHER_HDR_LEN + sizeof(struct ip)) << 733 GEM_RX_CONFIG_CXM_START_SHFT) | 734 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 735 (2 << GEM_RX_CONFIG_FBOFF_SHFT)); 736 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING, 737 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 738 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 739 (3 * sc->sc_rxfifosize / 256) | 740 ((sc->sc_rxfifosize / 256) << 12)); 741 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 742 GEM_BANK1_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN); 743 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK, 744 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 745 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 746 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE); 747 } 748 749 static int 750 gem_reset_tx(struct gem_softc *sc) 751 { 752 753 /* 754 * Resetting while DMA is in progress can cause a bus hang, so we 755 * disable DMA first. 756 */ 757 gem_disable_tx(sc); 758 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, 0); 759 GEM_BANK1_BARRIER(sc, GEM_TX_CONFIG, 4, 760 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 761 if (!GEM_BANK1_BITWAIT(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 762 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 763 764 /* Finally, reset the ETX. */ 765 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_TX); 766 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 767 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 768 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 769 0)) { 770 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 771 return (1); 772 } 773 return (0); 774 } 775 776 static int 777 gem_disable_rx(struct gem_softc *sc) 778 { 779 780 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 781 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE); 782 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 783 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 784 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 785 0)); 786 } 787 788 static int 789 gem_disable_tx(struct gem_softc *sc) 790 { 791 792 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 793 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE); 794 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 795 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 796 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 797 0)); 798 } 799 800 static int 801 gem_meminit(sc) 802 struct gem_softc *sc; 803 { 804 struct gem_rxsoft *rxs; 805 int error, i; 806 807 /* 808 * Initialize the transmit descriptor ring. 809 */ 810 for (i = 0; i < GEM_NTXDESC; i++) { 811 sc->sc_txdescs[i].gd_flags = 0; 812 sc->sc_txdescs[i].gd_addr = 0; 813 } 814 sc->sc_txfree = GEM_MAXTXFREE; 815 sc->sc_txnext = 0; 816 sc->sc_txwin = 0; 817 818 /* 819 * Initialize the receive descriptor and receive job 820 * descriptor rings. 821 */ 822 for (i = 0; i < GEM_NRXDESC; i++) { 823 rxs = &sc->sc_rxsoft[i]; 824 if (rxs->rxs_mbuf == NULL) { 825 if ((error = gem_add_rxbuf(sc, i)) != 0) { 826 device_printf(sc->sc_dev, 827 "unable to allocate or map RX buffer %d, " 828 "error = %d\n", i, error); 829 /* 830 * XXX we should attempt to run with fewer 831 * receive buffers instead of just failing. 832 */ 833 gem_rxdrain(sc); 834 return (1); 835 } 836 } else 837 GEM_INIT_RXDESC(sc, i); 838 } 839 sc->sc_rxptr = 0; 840 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 841 842 return (0); 843 } 844 845 static u_int 846 gem_ringsize(u_int sz) 847 { 848 849 switch (sz) { 850 case 32: 851 return (GEM_RING_SZ_32); 852 case 64: 853 return (GEM_RING_SZ_64); 854 case 128: 855 return (GEM_RING_SZ_128); 856 case 256: 857 return (GEM_RING_SZ_256); 858 case 512: 859 return (GEM_RING_SZ_512); 860 case 1024: 861 return (GEM_RING_SZ_1024); 862 case 2048: 863 return (GEM_RING_SZ_2048); 864 case 4096: 865 return (GEM_RING_SZ_4096); 866 case 8192: 867 return (GEM_RING_SZ_8192); 868 default: 869 printf("%s: invalid ring size %d\n", __func__, sz); 870 return (GEM_RING_SZ_32); 871 } 872 } 873 874 static void 875 gem_init(void *xsc) 876 { 877 struct gem_softc *sc = xsc; 878 879 GEM_LOCK(sc); 880 gem_init_locked(sc); 881 GEM_UNLOCK(sc); 882 } 883 884 /* 885 * Initialization of interface; set up initialization block 886 * and transmit/receive descriptor rings. 887 */ 888 static void 889 gem_init_locked(struct gem_softc *sc) 890 { 891 struct ifnet *ifp = sc->sc_ifp; 892 uint32_t v; 893 894 GEM_LOCK_ASSERT(sc, MA_OWNED); 895 896 #ifdef GEM_DEBUG 897 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 898 __func__); 899 #endif 900 /* 901 * Initialization sequence. The numbered steps below correspond 902 * to the sequence outlined in section 6.3.5.1 in the Ethernet 903 * Channel Engine manual (part of the PCIO manual). 904 * See also the STP2002-STQ document from Sun Microsystems. 905 */ 906 907 /* step 1 & 2. Reset the Ethernet Channel. */ 908 gem_stop(ifp, 0); 909 gem_reset(sc); 910 #ifdef GEM_DEBUG 911 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 912 __func__); 913 #endif 914 915 /* Re-initialize the MIF. */ 916 gem_mifinit(sc); 917 918 /* step 3. Setup data structures in host memory. */ 919 if (gem_meminit(sc) != 0) 920 return; 921 922 /* step 4. TX MAC registers & counters */ 923 gem_init_regs(sc); 924 925 /* step 5. RX MAC registers & counters */ 926 gem_setladrf(sc); 927 928 /* step 6 & 7. Program Descriptor Ring Base Addresses. */ 929 /* NOTE: we use only 32-bit DMA addresses here. */ 930 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0); 931 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 932 933 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 934 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 935 #ifdef GEM_DEBUG 936 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx", 937 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 938 #endif 939 940 /* step 8. Global Configuration & Interrupt Mask */ 941 GEM_BANK1_WRITE_4(sc, GEM_INTMASK, 942 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 943 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 944 GEM_INTR_BERR 945 #ifdef GEM_DEBUG 946 | GEM_INTR_PCS | GEM_INTR_MIF 947 #endif 948 )); 949 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK, 950 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 951 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK, 952 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP); 953 #ifdef GEM_DEBUG 954 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 955 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 956 #else 957 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 958 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 959 #endif 960 961 /* step 9. ETX Configuration: use mostly default values. */ 962 963 /* Enable DMA. */ 964 v = gem_ringsize(GEM_NTXDESC /* XXX */); 965 v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x4ff) << 10) & 966 GEM_TX_CONFIG_TXFIFO_TH; 967 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN); 968 969 /* step 10. ERX Configuration */ 970 971 /* Encode Receive Descriptor ring size. */ 972 v = gem_ringsize(GEM_NRXDESC /* XXX */); 973 /* RX TCP/UDP checksum offset */ 974 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 975 GEM_RX_CONFIG_CXM_START_SHFT); 976 977 /* Enable DMA. */ 978 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 979 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 980 (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN); 981 982 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING, 983 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 984 985 /* 986 * The following value is for an OFF Threshold of about 3/4 full 987 * and an ON Threshold of 1/4 full. 988 */ 989 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 990 (3 * sc->sc_rxfifosize / 256) | 991 ((sc->sc_rxfifosize / 256) << 12)); 992 993 /* step 11. Configure Media. */ 994 995 /* step 12. RX_MAC Configuration Register */ 996 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 997 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC; 998 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0); 999 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 1000 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1001 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1002 device_printf(sc->sc_dev, "cannot configure RX MAC\n"); 1003 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 1004 1005 /* step 13. TX_MAC Configuration Register */ 1006 v = GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG); 1007 v |= GEM_MAC_TX_ENABLE; 1008 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0); 1009 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 1010 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1011 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 1012 device_printf(sc->sc_dev, "cannot configure TX MAC\n"); 1013 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, v); 1014 1015 /* step 14. Issue Transmit Pending command. */ 1016 1017 /* step 15. Give the reciever a swift kick. */ 1018 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 1019 1020 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1021 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1022 1023 mii_mediachg(sc->sc_mii); 1024 1025 /* Start the one second timer. */ 1026 sc->sc_wdog_timer = 0; 1027 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1028 } 1029 1030 static int 1031 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head) 1032 { 1033 bus_dma_segment_t txsegs[GEM_NTXSEGS]; 1034 struct gem_txsoft *txs; 1035 struct ip *ip; 1036 struct mbuf *m; 1037 uint64_t cflags, flags; 1038 int error, nexttx, nsegs, offset, seg; 1039 1040 /* Get a work queue entry. */ 1041 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1042 /* Ran out of descriptors. */ 1043 return (ENOBUFS); 1044 } 1045 1046 cflags = 0; 1047 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) { 1048 if (M_WRITABLE(*m_head) == 0) { 1049 m = m_dup(*m_head, M_DONTWAIT); 1050 m_freem(*m_head); 1051 *m_head = m; 1052 if (m == NULL) 1053 return (ENOBUFS); 1054 } 1055 offset = sizeof(struct ether_header); 1056 m = m_pullup(*m_head, offset + sizeof(struct ip)); 1057 if (m == NULL) { 1058 *m_head = NULL; 1059 return (ENOBUFS); 1060 } 1061 ip = (struct ip *)(mtod(m, caddr_t) + offset); 1062 offset += (ip->ip_hl << 2); 1063 cflags = offset << GEM_TD_CXSUM_STARTSHFT | 1064 ((offset + m->m_pkthdr.csum_data) << 1065 GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE; 1066 *m_head = m; 1067 } 1068 1069 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1070 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1071 if (error == EFBIG) { 1072 m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS); 1073 if (m == NULL) { 1074 m_freem(*m_head); 1075 *m_head = NULL; 1076 return (ENOBUFS); 1077 } 1078 *m_head = m; 1079 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1080 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1081 BUS_DMA_NOWAIT); 1082 if (error != 0) { 1083 m_freem(*m_head); 1084 *m_head = NULL; 1085 return (error); 1086 } 1087 } else if (error != 0) 1088 return (error); 1089 /* If nsegs is wrong then the stack is corrupt. */ 1090 KASSERT(nsegs <= GEM_NTXSEGS, 1091 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1092 if (nsegs == 0) { 1093 m_freem(*m_head); 1094 *m_head = NULL; 1095 return (EIO); 1096 } 1097 1098 /* 1099 * Ensure we have enough descriptors free to describe 1100 * the packet. Note, we always reserve one descriptor 1101 * at the end of the ring as a termination point, in 1102 * order to prevent wrap-around. 1103 */ 1104 if (nsegs > sc->sc_txfree - 1) { 1105 txs->txs_ndescs = 0; 1106 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1107 return (ENOBUFS); 1108 } 1109 1110 txs->txs_ndescs = nsegs; 1111 txs->txs_firstdesc = sc->sc_txnext; 1112 nexttx = txs->txs_firstdesc; 1113 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 1114 #ifdef GEM_DEBUG 1115 CTR6(KTR_GEM, 1116 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1117 __func__, seg, nexttx, txsegs[seg].ds_len, 1118 txsegs[seg].ds_addr, 1119 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr)); 1120 #endif 1121 sc->sc_txdescs[nexttx].gd_addr = 1122 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr); 1123 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 1124 ("%s: segment size too large!", __func__)); 1125 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1126 sc->sc_txdescs[nexttx].gd_flags = 1127 GEM_DMA_WRITE(sc, flags | cflags); 1128 txs->txs_lastdesc = nexttx; 1129 } 1130 1131 /* Set EOP on the last descriptor. */ 1132 #ifdef GEM_DEBUG 1133 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d", 1134 __func__, seg, nexttx); 1135 #endif 1136 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1137 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET); 1138 1139 /* Lastly set SOP on the first descriptor. */ 1140 #ifdef GEM_DEBUG 1141 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d", 1142 __func__, seg, nexttx); 1143 #endif 1144 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1145 sc->sc_txwin = 0; 1146 flags |= GEM_TD_INTERRUPT_ME; 1147 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1148 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME | 1149 GEM_TD_START_OF_PACKET); 1150 } else 1151 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1152 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET); 1153 1154 /* Sync the DMA map. */ 1155 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1156 BUS_DMASYNC_PREWRITE); 1157 1158 #ifdef GEM_DEBUG 1159 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1160 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1161 txs->txs_ndescs); 1162 #endif 1163 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1164 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1165 txs->txs_mbuf = *m_head; 1166 1167 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1168 sc->sc_txfree -= txs->txs_ndescs; 1169 1170 return (0); 1171 } 1172 1173 static void 1174 gem_init_regs(struct gem_softc *sc) 1175 { 1176 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1177 1178 /* These registers are not cleared on reset. */ 1179 if ((sc->sc_flags & GEM_INITED) == 0) { 1180 /* magic values */ 1181 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG0, 0); 1182 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8); 1183 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4); 1184 1185 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1186 /* max frame and max burst size */ 1187 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME, 1188 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1189 1190 GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7); 1191 GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4); 1192 GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1193 /* dunno... */ 1194 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8088); 1195 GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED, 1196 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1197 1198 /* secondary MAC address: 0:0:0:0:0:0 */ 1199 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR3, 0); 1200 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR4, 0); 1201 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR5, 0); 1202 1203 /* MAC control address: 01:80:c2:00:00:01 */ 1204 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001); 1205 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200); 1206 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180); 1207 1208 /* MAC filter address: 0:0:0:0:0:0 */ 1209 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0); 1210 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0); 1211 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0); 1212 1213 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0); 1214 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0); 1215 1216 sc->sc_flags |= GEM_INITED; 1217 } 1218 1219 /* Counters need to be zeroed. */ 1220 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 1221 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 1222 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 1223 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 1224 GEM_BANK1_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0); 1225 GEM_BANK1_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0); 1226 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0); 1227 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 1228 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 1229 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 1230 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 1231 1232 /* Set XOFF PAUSE time. */ 1233 GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1234 1235 /* 1236 * Set the internal arbitration to "infinite" bursts of the 1237 * maximum length of 31 * 64 bytes so DMA transfers aren't 1238 * split up in cache line size chunks. This greatly improves 1239 * especially RX performance. 1240 * Enable silicon bug workarounds for the Apple variants. 1241 */ 1242 GEM_BANK1_WRITE_4(sc, GEM_CONFIG, 1243 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 1244 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ? 1245 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 1246 1247 /* Set the station address. */ 1248 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); 1249 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); 1250 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); 1251 1252 /* Enable MII outputs. */ 1253 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA); 1254 } 1255 1256 static void 1257 gem_start(struct ifnet *ifp) 1258 { 1259 struct gem_softc *sc = ifp->if_softc; 1260 1261 GEM_LOCK(sc); 1262 gem_start_locked(ifp); 1263 GEM_UNLOCK(sc); 1264 } 1265 1266 static void 1267 gem_start_locked(struct ifnet *ifp) 1268 { 1269 struct gem_softc *sc = ifp->if_softc; 1270 struct mbuf *m; 1271 int ntx; 1272 1273 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1274 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 1275 return; 1276 1277 #ifdef GEM_DEBUG 1278 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 1279 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1280 sc->sc_txnext); 1281 #endif 1282 ntx = 0; 1283 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1284 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1285 if (m == NULL) 1286 break; 1287 if (gem_load_txmbuf(sc, &m) != 0) { 1288 if (m == NULL) 1289 break; 1290 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1291 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1292 break; 1293 } 1294 ntx++; 1295 /* Kick the transmitter. */ 1296 #ifdef GEM_DEBUG 1297 CTR3(KTR_GEM, "%s: %s: kicking TX %d", 1298 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1299 #endif 1300 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1301 GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext); 1302 1303 BPF_MTAP(ifp, m); 1304 } 1305 1306 if (ntx > 0) { 1307 #ifdef GEM_DEBUG 1308 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1309 device_get_name(sc->sc_dev), sc->sc_txnext); 1310 #endif 1311 1312 /* Set a watchdog timer in case the chip flakes out. */ 1313 sc->sc_wdog_timer = 5; 1314 #ifdef GEM_DEBUG 1315 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1316 device_get_name(sc->sc_dev), __func__, 1317 sc->sc_wdog_timer); 1318 #endif 1319 } 1320 } 1321 1322 static void 1323 gem_tint(struct gem_softc *sc) 1324 { 1325 struct ifnet *ifp = sc->sc_ifp; 1326 struct gem_txsoft *txs; 1327 int txlast, progress; 1328 #ifdef GEM_DEBUG 1329 int i; 1330 1331 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1332 #endif 1333 1334 /* 1335 * Go through our TX list and free mbufs for those 1336 * frames that have been transmitted. 1337 */ 1338 progress = 0; 1339 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1340 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1341 1342 #ifdef GEM_DEBUG 1343 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1344 printf(" txsoft %p transmit chain:\n", txs); 1345 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1346 printf("descriptor %d: ", i); 1347 printf("gd_flags: 0x%016llx\t", 1348 (long long)GEM_DMA_READ(sc, 1349 sc->sc_txdescs[i].gd_flags)); 1350 printf("gd_addr: 0x%016llx\n", 1351 (long long)GEM_DMA_READ(sc, 1352 sc->sc_txdescs[i].gd_addr)); 1353 if (i == txs->txs_lastdesc) 1354 break; 1355 } 1356 } 1357 #endif 1358 1359 /* 1360 * In theory, we could harvest some descriptors before 1361 * the ring is empty, but that's a bit complicated. 1362 * 1363 * GEM_TX_COMPLETION points to the last descriptor 1364 * processed + 1. 1365 */ 1366 txlast = GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION); 1367 #ifdef GEM_DEBUG 1368 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 1369 "txs->txs_lastdesc = %d, txlast = %d", 1370 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1371 #endif 1372 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1373 if ((txlast >= txs->txs_firstdesc) && 1374 (txlast <= txs->txs_lastdesc)) 1375 break; 1376 } else { 1377 /* Ick -- this command wraps. */ 1378 if ((txlast >= txs->txs_firstdesc) || 1379 (txlast <= txs->txs_lastdesc)) 1380 break; 1381 } 1382 1383 #ifdef GEM_DEBUG 1384 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__); 1385 #endif 1386 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1387 1388 sc->sc_txfree += txs->txs_ndescs; 1389 1390 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1391 BUS_DMASYNC_POSTWRITE); 1392 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1393 if (txs->txs_mbuf != NULL) { 1394 m_freem(txs->txs_mbuf); 1395 txs->txs_mbuf = NULL; 1396 } 1397 1398 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1399 1400 ifp->if_opackets++; 1401 progress = 1; 1402 } 1403 1404 #ifdef GEM_DEBUG 1405 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx " 1406 "GEM_TX_COMPLETION %x", 1407 __func__, GEM_BANK1_READ_4(sc, GEM_TX_STATE_MACHINE), 1408 ((long long)GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) | 1409 GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_LO), 1410 GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION)); 1411 #endif 1412 1413 if (progress) { 1414 if (sc->sc_txfree == GEM_NTXDESC - 1) 1415 sc->sc_txwin = 0; 1416 1417 /* 1418 * We freed some descriptors, so reset IFF_DRV_OACTIVE 1419 * and restart. 1420 */ 1421 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1422 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; 1423 1424 gem_start_locked(ifp); 1425 } 1426 1427 #ifdef GEM_DEBUG 1428 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1429 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1430 #endif 1431 } 1432 1433 #ifdef GEM_RINT_TIMEOUT 1434 static void 1435 gem_rint_timeout(void *arg) 1436 { 1437 struct gem_softc *sc = arg; 1438 1439 GEM_LOCK_ASSERT(sc, MA_OWNED); 1440 gem_rint(sc); 1441 } 1442 #endif 1443 1444 static void 1445 gem_rint(struct gem_softc *sc) 1446 { 1447 struct ifnet *ifp = sc->sc_ifp; 1448 struct mbuf *m; 1449 uint64_t rxstat; 1450 uint32_t rxcomp; 1451 1452 #ifdef GEM_RINT_TIMEOUT 1453 callout_stop(&sc->sc_rx_ch); 1454 #endif 1455 #ifdef GEM_DEBUG 1456 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1457 #endif 1458 1459 /* 1460 * Read the completion register once. This limits 1461 * how long the following loop can execute. 1462 */ 1463 rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION); 1464 1465 #ifdef GEM_DEBUG 1466 CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d", 1467 __func__, sc->sc_rxptr, rxcomp); 1468 #endif 1469 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1470 for (; sc->sc_rxptr != rxcomp;) { 1471 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1472 rxstat = GEM_DMA_READ(sc, 1473 sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 1474 1475 if (rxstat & GEM_RD_OWN) { 1476 #ifdef GEM_RINT_TIMEOUT 1477 /* 1478 * The descriptor is still marked as owned, although 1479 * it is supposed to have completed. This has been 1480 * observed on some machines. Just exiting here 1481 * might leave the packet sitting around until another 1482 * one arrives to trigger a new interrupt, which is 1483 * generally undesirable, so set up a timeout. 1484 */ 1485 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1486 gem_rint_timeout, sc); 1487 #endif 1488 m = NULL; 1489 goto kickit; 1490 } 1491 1492 if (rxstat & GEM_RD_BAD_CRC) { 1493 ifp->if_ierrors++; 1494 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1495 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1496 m = NULL; 1497 goto kickit; 1498 } 1499 1500 #ifdef GEM_DEBUG 1501 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1502 printf(" rxsoft %p descriptor %d: ", 1503 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 1504 printf("gd_flags: 0x%016llx\t", 1505 (long long)GEM_DMA_READ(sc, 1506 sc->sc_rxdescs[sc->sc_rxptr].gd_flags)); 1507 printf("gd_addr: 0x%016llx\n", 1508 (long long)GEM_DMA_READ(sc, 1509 sc->sc_rxdescs[sc->sc_rxptr].gd_addr)); 1510 } 1511 #endif 1512 1513 /* 1514 * Allocate a new mbuf cluster. If that fails, we are 1515 * out of memory, and must drop the packet and recycle 1516 * the buffer that's already attached to this descriptor. 1517 */ 1518 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 1519 ifp->if_ierrors++; 1520 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1521 m = NULL; 1522 } 1523 1524 kickit: 1525 /* 1526 * Update the RX kick register. This register has to point 1527 * to the descriptor after the last valid one (before the 1528 * current batch) and must be incremented in multiples of 1529 * 4 (because the DMA engine fetches/updates descriptors 1530 * in batches of 4). 1531 */ 1532 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 1533 if ((sc->sc_rxptr % 4) == 0) { 1534 GEM_CDSYNC(sc, 1535 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1536 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, 1537 (sc->sc_rxptr + GEM_NRXDESC - 4) & 1538 GEM_NRXDESC_MASK); 1539 } 1540 1541 if (m == NULL) { 1542 if (rxstat & GEM_RD_OWN) 1543 break; 1544 continue; 1545 } 1546 1547 ifp->if_ipackets++; 1548 m->m_data += 2; /* We're already off by two */ 1549 m->m_pkthdr.rcvif = ifp; 1550 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 1551 1552 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1553 gem_rxcksum(m, rxstat); 1554 1555 /* Pass it on. */ 1556 GEM_UNLOCK(sc); 1557 (*ifp->if_input)(ifp, m); 1558 GEM_LOCK(sc); 1559 } 1560 1561 #ifdef GEM_DEBUG 1562 CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__, 1563 sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION)); 1564 #endif 1565 } 1566 1567 static int 1568 gem_add_rxbuf(struct gem_softc *sc, int idx) 1569 { 1570 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1571 struct mbuf *m; 1572 bus_dma_segment_t segs[1]; 1573 int error, nsegs; 1574 1575 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1576 if (m == NULL) 1577 return (ENOBUFS); 1578 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1579 1580 #ifdef GEM_DEBUG 1581 /* Bzero the packet to check DMA. */ 1582 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1583 #endif 1584 1585 if (rxs->rxs_mbuf != NULL) { 1586 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1587 BUS_DMASYNC_POSTREAD); 1588 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1589 } 1590 1591 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1592 m, segs, &nsegs, BUS_DMA_NOWAIT); 1593 if (error != 0) { 1594 device_printf(sc->sc_dev, 1595 "cannot load RS DMA map %d, error = %d\n", idx, error); 1596 m_freem(m); 1597 return (error); 1598 } 1599 /* If nsegs is wrong then the stack is corrupt. */ 1600 KASSERT(nsegs == 1, 1601 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1602 rxs->rxs_mbuf = m; 1603 rxs->rxs_paddr = segs[0].ds_addr; 1604 1605 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1606 BUS_DMASYNC_PREREAD); 1607 1608 GEM_INIT_RXDESC(sc, idx); 1609 1610 return (0); 1611 } 1612 1613 static void 1614 gem_eint(struct gem_softc *sc, u_int status) 1615 { 1616 1617 sc->sc_ifp->if_ierrors++; 1618 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1619 gem_reset_rxdma(sc); 1620 return; 1621 } 1622 1623 device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status); 1624 } 1625 1626 void 1627 gem_intr(void *v) 1628 { 1629 struct gem_softc *sc = v; 1630 uint32_t status, status2; 1631 1632 GEM_LOCK(sc); 1633 status = GEM_BANK1_READ_4(sc, GEM_STATUS); 1634 1635 #ifdef GEM_DEBUG 1636 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 1637 device_get_name(sc->sc_dev), __func__, (status >> 19), 1638 (u_int)status); 1639 1640 /* 1641 * PCS interrupts must be cleared, otherwise no traffic is passed! 1642 */ 1643 if ((status & GEM_INTR_PCS) != 0) { 1644 status2 = 1645 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS) | 1646 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS); 1647 if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 1648 device_printf(sc->sc_dev, 1649 "%s: PCS link status changed\n", __func__); 1650 } 1651 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1652 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_STATUS); 1653 if ((status2 & GEM_MAC_PAUSED) != 0) 1654 device_printf(sc->sc_dev, 1655 "%s: PAUSE received (PAUSE time %d slots)\n", 1656 __func__, GEM_MAC_PAUSE_TIME(status2)); 1657 if ((status2 & GEM_MAC_PAUSE) != 0) 1658 device_printf(sc->sc_dev, 1659 "%s: transited to PAUSE state\n", __func__); 1660 if ((status2 & GEM_MAC_RESUME) != 0) 1661 device_printf(sc->sc_dev, 1662 "%s: transited to non-PAUSE state\n", __func__); 1663 } 1664 if ((status & GEM_INTR_MIF) != 0) 1665 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1666 #endif 1667 1668 if ((status & 1669 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 1670 gem_eint(sc, status); 1671 1672 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1673 gem_rint(sc); 1674 1675 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1676 gem_tint(sc); 1677 1678 if (status & GEM_INTR_TX_MAC) { 1679 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS); 1680 if ((status2 & 1681 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP)) != 0) 1682 device_printf(sc->sc_dev, 1683 "MAC TX fault, status %x\n", status2); 1684 if ((status2 & 1685 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) 1686 gem_init_locked(sc); 1687 } 1688 if (status & GEM_INTR_RX_MAC) { 1689 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS); 1690 /* 1691 * At least with GEM_SUN_GEM and some GEM_SUN_ERI 1692 * revisions GEM_MAC_RX_OVERFLOW happen often due to a 1693 * silicon bug so handle them silently. Moreover, it's 1694 * likely that the receiver has hung so we reset it. 1695 */ 1696 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) { 1697 sc->sc_ifp->if_ierrors++; 1698 gem_reset_rxdma(sc); 1699 } else if ((status2 & 1700 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0) 1701 device_printf(sc->sc_dev, 1702 "MAC RX fault, status %x\n", status2); 1703 } 1704 GEM_UNLOCK(sc); 1705 } 1706 1707 static int 1708 gem_watchdog(struct gem_softc *sc) 1709 { 1710 struct ifnet *ifp = sc->sc_ifp; 1711 1712 GEM_LOCK_ASSERT(sc, MA_OWNED); 1713 1714 #ifdef GEM_DEBUG 1715 CTR4(KTR_GEM, 1716 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x", 1717 __func__, GEM_BANK1_READ_4(sc, GEM_RX_CONFIG), 1718 GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS), 1719 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG)); 1720 CTR4(KTR_GEM, 1721 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x", 1722 __func__, GEM_BANK1_READ_4(sc, GEM_TX_CONFIG), 1723 GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS), 1724 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG)); 1725 #endif 1726 1727 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1728 return (0); 1729 1730 if ((sc->sc_flags & GEM_LINK) != 0) 1731 device_printf(sc->sc_dev, "device timeout\n"); 1732 else if (bootverbose) 1733 device_printf(sc->sc_dev, "device timeout (no link)\n"); 1734 ++ifp->if_oerrors; 1735 1736 /* Try to get more packets going. */ 1737 gem_init_locked(sc); 1738 gem_start_locked(ifp); 1739 return (EJUSTRETURN); 1740 } 1741 1742 static void 1743 gem_mifinit(struct gem_softc *sc) 1744 { 1745 1746 /* Configure the MIF in frame mode. */ 1747 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, 1748 GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1749 } 1750 1751 /* 1752 * MII interface 1753 * 1754 * The MII interface supports at least three different operating modes: 1755 * 1756 * Bitbang mode is implemented using data, clock and output enable registers. 1757 * 1758 * Frame mode is implemented by loading a complete frame into the frame 1759 * register and polling the valid bit for completion. 1760 * 1761 * Polling mode uses the frame register but completion is indicated by 1762 * an interrupt. 1763 * 1764 */ 1765 int 1766 gem_mii_readreg(device_t dev, int phy, int reg) 1767 { 1768 struct gem_softc *sc; 1769 int n; 1770 uint32_t v; 1771 1772 #ifdef GEM_DEBUG_PHY 1773 printf("%s: phy %d reg %d\n", __func__, phy, reg); 1774 #endif 1775 1776 sc = device_get_softc(dev); 1777 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1778 return (0); 1779 1780 if ((sc->sc_flags & GEM_SERDES) != 0) { 1781 switch (reg) { 1782 case MII_BMCR: 1783 reg = GEM_MII_CONTROL; 1784 break; 1785 case MII_BMSR: 1786 reg = GEM_MII_STATUS; 1787 break; 1788 case MII_PHYIDR1: 1789 case MII_PHYIDR2: 1790 return (0); 1791 case MII_ANAR: 1792 reg = GEM_MII_ANAR; 1793 break; 1794 case MII_ANLPAR: 1795 reg = GEM_MII_ANLPAR; 1796 break; 1797 case MII_EXTSR: 1798 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 1799 default: 1800 device_printf(sc->sc_dev, 1801 "%s: unhandled register %d\n", __func__, reg); 1802 return (0); 1803 } 1804 return (GEM_BANK1_READ_4(sc, reg)); 1805 } 1806 1807 /* Construct the frame command. */ 1808 v = GEM_MIF_FRAME_READ | 1809 (phy << GEM_MIF_PHY_SHIFT) | 1810 (reg << GEM_MIF_REG_SHIFT); 1811 1812 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v); 1813 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4, 1814 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1815 for (n = 0; n < 100; n++) { 1816 DELAY(1); 1817 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME); 1818 if (v & GEM_MIF_FRAME_TA0) 1819 return (v & GEM_MIF_FRAME_DATA); 1820 } 1821 1822 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1823 return (0); 1824 } 1825 1826 int 1827 gem_mii_writereg(device_t dev, int phy, int reg, int val) 1828 { 1829 struct gem_softc *sc; 1830 int n; 1831 uint32_t v; 1832 1833 #ifdef GEM_DEBUG_PHY 1834 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 1835 #endif 1836 1837 sc = device_get_softc(dev); 1838 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1839 return (0); 1840 1841 if ((sc->sc_flags & GEM_SERDES) != 0) { 1842 switch (reg) { 1843 case MII_BMSR: 1844 reg = GEM_MII_STATUS; 1845 break; 1846 case MII_BMCR: 1847 reg = GEM_MII_CONTROL; 1848 if ((val & GEM_MII_CONTROL_RESET) == 0) 1849 break; 1850 GEM_BANK1_WRITE_4(sc, GEM_MII_CONTROL, val); 1851 GEM_BANK1_BARRIER(sc, GEM_MII_CONTROL, 4, 1852 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1853 if (!GEM_BANK1_BITWAIT(sc, GEM_MII_CONTROL, 1854 GEM_MII_CONTROL_RESET, 0)) 1855 device_printf(sc->sc_dev, 1856 "cannot reset PCS\n"); 1857 /* FALLTHROUGH */ 1858 case MII_ANAR: 1859 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 0); 1860 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4, 1861 BUS_SPACE_BARRIER_WRITE); 1862 GEM_BANK1_WRITE_4(sc, GEM_MII_ANAR, val); 1863 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 1864 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 1865 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 1866 GEM_MII_CONFIG_ENABLE); 1867 return (0); 1868 case MII_ANLPAR: 1869 reg = GEM_MII_ANLPAR; 1870 break; 1871 default: 1872 device_printf(sc->sc_dev, 1873 "%s: unhandled register %d\n", __func__, reg); 1874 return (0); 1875 } 1876 GEM_BANK1_WRITE_4(sc, reg, val); 1877 return (0); 1878 } 1879 1880 /* Construct the frame command. */ 1881 v = GEM_MIF_FRAME_WRITE | 1882 (phy << GEM_MIF_PHY_SHIFT) | 1883 (reg << GEM_MIF_REG_SHIFT) | 1884 (val & GEM_MIF_FRAME_DATA); 1885 1886 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v); 1887 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4, 1888 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1889 for (n = 0; n < 100; n++) { 1890 DELAY(1); 1891 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME); 1892 if (v & GEM_MIF_FRAME_TA0) 1893 return (1); 1894 } 1895 1896 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1897 return (0); 1898 } 1899 1900 void 1901 gem_mii_statchg(device_t dev) 1902 { 1903 struct gem_softc *sc; 1904 int gigabit; 1905 uint32_t rxcfg, txcfg, v; 1906 1907 sc = device_get_softc(dev); 1908 1909 #ifdef GEM_DEBUG 1910 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) 1911 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n", 1912 __func__, sc->sc_phyad); 1913 #endif 1914 1915 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 1916 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 1917 sc->sc_flags |= GEM_LINK; 1918 else 1919 sc->sc_flags &= ~GEM_LINK; 1920 1921 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 1922 case IFM_1000_SX: 1923 case IFM_1000_LX: 1924 case IFM_1000_CX: 1925 case IFM_1000_T: 1926 gigabit = 1; 1927 break; 1928 default: 1929 gigabit = 0; 1930 } 1931 1932 /* 1933 * The configuration done here corresponds to the steps F) and 1934 * G) and as far as enabling of RX and TX MAC goes also step H) 1935 * of the initialization sequence outlined in section 3.2.1 of 1936 * the GEM Gigabit Ethernet ASIC Specification. 1937 */ 1938 1939 rxcfg = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 1940 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 1941 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 1942 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1943 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 1944 else if (gigabit != 0) { 1945 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 1946 txcfg |= GEM_MAC_TX_CARR_EXTEND; 1947 } 1948 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0); 1949 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 1950 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1951 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 1952 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 1953 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg); 1954 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0); 1955 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 1956 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1957 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1958 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 1959 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg); 1960 1961 v = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_CONFIG) & 1962 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 1963 #ifdef notyet 1964 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1965 IFM_ETH_RXPAUSE) != 0) 1966 v |= GEM_MAC_CC_RX_PAUSE; 1967 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1968 IFM_ETH_TXPAUSE) != 0) 1969 v |= GEM_MAC_CC_TX_PAUSE; 1970 #endif 1971 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v); 1972 1973 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 1974 gigabit != 0) 1975 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME, 1976 GEM_MAC_SLOT_TIME_CARR_EXTEND); 1977 else 1978 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME, 1979 GEM_MAC_SLOT_TIME_NORMAL); 1980 1981 /* XIF Configuration */ 1982 v = GEM_MAC_XIF_LINK_LED; 1983 v |= GEM_MAC_XIF_TX_MII_ENA; 1984 if ((sc->sc_flags & GEM_SERDES) == 0) { 1985 if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & 1986 GEM_MIF_CONFIG_PHY_SEL) != 0) { 1987 /* External MII needs echo disable if half duplex. */ 1988 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1989 IFM_FDX) == 0) 1990 v |= GEM_MAC_XIF_ECHO_DISABL; 1991 } else 1992 /* 1993 * Internal MII needs buffer enable. 1994 * XXX buffer enable makes only sense for an 1995 * external PHY. 1996 */ 1997 v |= GEM_MAC_XIF_MII_BUF_ENA; 1998 } 1999 if (gigabit != 0) 2000 v |= GEM_MAC_XIF_GMII_MODE; 2001 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2002 v |= GEM_MAC_XIF_FDPLX_LED; 2003 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v); 2004 2005 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2006 (sc->sc_flags & GEM_LINK) != 0) { 2007 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 2008 txcfg | GEM_MAC_TX_ENABLE); 2009 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 2010 rxcfg | GEM_MAC_RX_ENABLE); 2011 } 2012 } 2013 2014 int 2015 gem_mediachange(struct ifnet *ifp) 2016 { 2017 struct gem_softc *sc = ifp->if_softc; 2018 int error; 2019 2020 /* XXX add support for serial media. */ 2021 2022 GEM_LOCK(sc); 2023 error = mii_mediachg(sc->sc_mii); 2024 GEM_UNLOCK(sc); 2025 return (error); 2026 } 2027 2028 void 2029 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2030 { 2031 struct gem_softc *sc = ifp->if_softc; 2032 2033 GEM_LOCK(sc); 2034 if ((ifp->if_flags & IFF_UP) == 0) { 2035 GEM_UNLOCK(sc); 2036 return; 2037 } 2038 2039 mii_pollstat(sc->sc_mii); 2040 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2041 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2042 GEM_UNLOCK(sc); 2043 } 2044 2045 static int 2046 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2047 { 2048 struct gem_softc *sc = ifp->if_softc; 2049 struct ifreq *ifr = (struct ifreq *)data; 2050 int error; 2051 2052 error = 0; 2053 switch (cmd) { 2054 case SIOCSIFFLAGS: 2055 GEM_LOCK(sc); 2056 if ((ifp->if_flags & IFF_UP) != 0) { 2057 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2058 ((ifp->if_flags ^ sc->sc_ifflags) & 2059 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2060 gem_setladrf(sc); 2061 else 2062 gem_init_locked(sc); 2063 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2064 gem_stop(ifp, 0); 2065 if ((ifp->if_flags & IFF_LINK0) != 0) 2066 sc->sc_csum_features |= CSUM_UDP; 2067 else 2068 sc->sc_csum_features &= ~CSUM_UDP; 2069 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2070 ifp->if_hwassist = sc->sc_csum_features; 2071 sc->sc_ifflags = ifp->if_flags; 2072 GEM_UNLOCK(sc); 2073 break; 2074 case SIOCADDMULTI: 2075 case SIOCDELMULTI: 2076 GEM_LOCK(sc); 2077 gem_setladrf(sc); 2078 GEM_UNLOCK(sc); 2079 break; 2080 case SIOCGIFMEDIA: 2081 case SIOCSIFMEDIA: 2082 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2083 break; 2084 case SIOCSIFCAP: 2085 GEM_LOCK(sc); 2086 ifp->if_capenable = ifr->ifr_reqcap; 2087 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2088 ifp->if_hwassist = sc->sc_csum_features; 2089 else 2090 ifp->if_hwassist = 0; 2091 GEM_UNLOCK(sc); 2092 break; 2093 default: 2094 error = ether_ioctl(ifp, cmd, data); 2095 break; 2096 } 2097 2098 return (error); 2099 } 2100 2101 static void 2102 gem_setladrf(struct gem_softc *sc) 2103 { 2104 struct ifnet *ifp = sc->sc_ifp; 2105 struct ifmultiaddr *inm; 2106 int i; 2107 uint32_t hash[16]; 2108 uint32_t crc, v; 2109 2110 GEM_LOCK_ASSERT(sc, MA_OWNED); 2111 2112 /* Get the current RX configuration. */ 2113 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 2114 2115 /* 2116 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2117 * and hash filter. Depending on the case, the right bit will be 2118 * enabled. 2119 */ 2120 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER | 2121 GEM_MAC_RX_PROMISC_GRP); 2122 2123 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 2124 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 2125 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2126 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 2127 0)) 2128 device_printf(sc->sc_dev, "cannot disable RX hash filter\n"); 2129 2130 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2131 v |= GEM_MAC_RX_PROMISCUOUS; 2132 goto chipit; 2133 } 2134 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2135 v |= GEM_MAC_RX_PROMISC_GRP; 2136 goto chipit; 2137 } 2138 2139 /* 2140 * Set up multicast address filter by passing all multicast 2141 * addresses through a crc generator, and then using the high 2142 * order 8 bits as an index into the 256 bit logical address 2143 * filter. The high order 4 bits selects the word, while the 2144 * other 4 bits select the bit within the word (where bit 0 2145 * is the MSB). 2146 */ 2147 2148 /* Clear the hash table. */ 2149 memset(hash, 0, sizeof(hash)); 2150 2151 IF_ADDR_LOCK(ifp); 2152 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 2153 if (inm->ifma_addr->sa_family != AF_LINK) 2154 continue; 2155 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2156 inm->ifma_addr), ETHER_ADDR_LEN); 2157 2158 /* We just want the 8 most significant bits. */ 2159 crc >>= 24; 2160 2161 /* Set the corresponding bit in the filter. */ 2162 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2163 } 2164 IF_ADDR_UNLOCK(ifp); 2165 2166 v |= GEM_MAC_RX_HASH_FILTER; 2167 2168 /* Now load the hash table into the chip (if we are using it). */ 2169 for (i = 0; i < 16; i++) 2170 GEM_BANK1_WRITE_4(sc, 2171 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), 2172 hash[i]); 2173 2174 chipit: 2175 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 2176 } 2177