1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 36 */ 37 38 #if 0 39 #define GEM_DEBUG 40 #endif 41 42 #if 0 /* XXX: In case of emergency, re-enable this. */ 43 #define GEM_RINT_TIMEOUT 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/endian.h> 51 #include <sys/mbuf.h> 52 #include <sys/malloc.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/socket.h> 58 #include <sys/sockio.h> 59 #include <sys/rman.h> 60 61 #include <net/bpf.h> 62 #include <net/ethernet.h> 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_vlan_var.h> 69 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/tcp.h> 74 #include <netinet/udp.h> 75 76 #include <machine/bus.h> 77 78 #include <dev/mii/mii.h> 79 #include <dev/mii/miivar.h> 80 81 #include <dev/gem/if_gemreg.h> 82 #include <dev/gem/if_gemvar.h> 83 84 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 85 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 86 87 #define TRIES 10000 88 89 /* 90 * The GEM hardware support basic TCP/UDP checksum offloading. However, 91 * the hardware doesn't compensate the checksum for UDP datagram which 92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 93 * by default. It can be reactivated by setting special link option 94 * link0 with ifconfig(8). 95 */ 96 #define GEM_CSUM_FEATURES (CSUM_TCP) 97 98 static int gem_add_rxbuf(struct gem_softc *sc, int idx); 99 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, 100 uint32_t set); 101 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, 102 int nsegs, int error); 103 static struct mbuf *gem_defrag(struct mbuf *m0, int how, int maxfrags); 104 static int gem_disable_rx(struct gem_softc *sc); 105 static int gem_disable_tx(struct gem_softc *sc); 106 static void gem_eint(struct gem_softc *sc, u_int status); 107 static void gem_init(void *xsc); 108 static void gem_init_locked(struct gem_softc *sc); 109 static void gem_init_regs(struct gem_softc *sc); 110 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 111 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head); 112 static int gem_meminit(struct gem_softc *sc); 113 static void gem_mifinit(struct gem_softc *sc); 114 static void gem_reset(struct gem_softc *sc); 115 static int gem_reset_rx(struct gem_softc *sc); 116 static void gem_reset_rxdma(struct gem_softc *sc); 117 static int gem_reset_tx(struct gem_softc *sc); 118 static u_int gem_ringsize(u_int sz); 119 static void gem_rint(struct gem_softc *sc); 120 #ifdef GEM_RINT_TIMEOUT 121 static void gem_rint_timeout(void *arg); 122 #endif 123 static __inline void gem_rxcksum(struct mbuf *m, uint64_t flags); 124 static void gem_rxdrain(struct gem_softc *sc); 125 static void gem_setladrf(struct gem_softc *sc); 126 static void gem_start(struct ifnet *ifp); 127 static void gem_start_locked(struct ifnet *ifp); 128 static void gem_stop(struct ifnet *ifp, int disable); 129 static void gem_tick(void *arg); 130 static void gem_tint(struct gem_softc *sc); 131 static __inline void gem_txcksum(struct gem_softc *sc, struct mbuf *m, 132 uint64_t *cflags); 133 static int gem_watchdog(struct gem_softc *sc); 134 135 devclass_t gem_devclass; 136 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 137 MODULE_DEPEND(gem, miibus, 1, 1, 1); 138 139 #ifdef GEM_DEBUG 140 #include <sys/ktr.h> 141 #define KTR_GEM KTR_CT2 142 #endif 143 144 int 145 gem_attach(struct gem_softc *sc) 146 { 147 struct gem_txsoft *txs; 148 struct ifnet *ifp; 149 int error, i; 150 uint32_t v; 151 152 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 153 if (ifp == NULL) 154 return (ENOSPC); 155 156 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 157 #ifdef GEM_RINT_TIMEOUT 158 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 159 #endif 160 161 /* Make sure the chip is stopped. */ 162 ifp->if_softc = sc; 163 gem_reset(sc); 164 165 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 166 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 167 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, 168 NULL, &sc->sc_pdmatag); 169 if (error) 170 goto fail_ifnet; 171 172 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 173 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 174 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 175 if (error) 176 goto fail_ptag; 177 178 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 179 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 180 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 181 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 182 if (error) 183 goto fail_rtag; 184 185 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 186 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 187 sizeof(struct gem_control_data), 1, 188 sizeof(struct gem_control_data), 0, 189 NULL, NULL, &sc->sc_cdmatag); 190 if (error) 191 goto fail_ttag; 192 193 /* 194 * Allocate the control data structures, create and load the 195 * DMA map for it. 196 */ 197 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 198 (void **)&sc->sc_control_data, 199 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 200 &sc->sc_cddmamap))) { 201 device_printf(sc->sc_dev, 202 "unable to allocate control data, error = %d\n", error); 203 goto fail_ctag; 204 } 205 206 sc->sc_cddma = 0; 207 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 208 sc->sc_control_data, sizeof(struct gem_control_data), 209 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 210 device_printf(sc->sc_dev, 211 "unable to load control data DMA map, error = %d\n", 212 error); 213 goto fail_cmem; 214 } 215 216 /* 217 * Initialize the transmit job descriptors. 218 */ 219 STAILQ_INIT(&sc->sc_txfreeq); 220 STAILQ_INIT(&sc->sc_txdirtyq); 221 222 /* 223 * Create the transmit buffer DMA maps. 224 */ 225 error = ENOMEM; 226 for (i = 0; i < GEM_TXQUEUELEN; i++) { 227 txs = &sc->sc_txsoft[i]; 228 txs->txs_mbuf = NULL; 229 txs->txs_ndescs = 0; 230 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 231 &txs->txs_dmamap)) != 0) { 232 device_printf(sc->sc_dev, 233 "unable to create TX DMA map %d, error = %d\n", 234 i, error); 235 goto fail_txd; 236 } 237 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 238 } 239 240 /* 241 * Create the receive buffer DMA maps. 242 */ 243 for (i = 0; i < GEM_NRXDESC; i++) { 244 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 245 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 246 device_printf(sc->sc_dev, 247 "unable to create RX DMA map %d, error = %d\n", 248 i, error); 249 goto fail_rxd; 250 } 251 sc->sc_rxsoft[i].rxs_mbuf = NULL; 252 } 253 254 /* Bad things will happen when touching this register on ERI. */ 255 if (sc->sc_variant != GEM_SUN_ERI) 256 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE, 257 GEM_MII_DATAPATH_MII); 258 259 gem_mifinit(sc); 260 261 /* 262 * Look for an external PHY. 263 */ 264 error = ENXIO; 265 v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG); 266 if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 267 v |= GEM_MIF_CONFIG_PHY_SEL; 268 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v); 269 switch (sc->sc_variant) { 270 case GEM_SUN_ERI: 271 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 272 break; 273 default: 274 sc->sc_phyad = -1; 275 break; 276 } 277 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 278 gem_mediachange, gem_mediastatus); 279 } 280 281 /* 282 * Fall back on an internal PHY if no external PHY was found. 283 */ 284 if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) { 285 v &= ~GEM_MIF_CONFIG_PHY_SEL; 286 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v); 287 switch (sc->sc_variant) { 288 case GEM_SUN_ERI: 289 case GEM_APPLE_K2_GMAC: 290 sc->sc_phyad = GEM_PHYAD_INTERNAL; 291 break; 292 case GEM_APPLE_GMAC: 293 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 294 break; 295 default: 296 sc->sc_phyad = -1; 297 break; 298 } 299 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 300 gem_mediachange, gem_mediastatus); 301 } 302 303 /* 304 * Try the external PCS SERDES if we didn't find any PHYs. 305 */ 306 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 307 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE, 308 GEM_MII_DATAPATH_SERDES); 309 bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL, 310 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 311 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 312 GEM_MII_CONFIG_ENABLE); 313 sc->sc_flags |= GEM_SERDES; 314 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 315 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 316 gem_mediachange, gem_mediastatus); 317 } 318 319 if (error != 0) { 320 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error); 321 goto fail_rxd; 322 } 323 sc->sc_mii = device_get_softc(sc->sc_miibus); 324 325 /* 326 * From this point forward, the attachment cannot fail. A failure 327 * before this point releases all resources that may have been 328 * allocated. 329 */ 330 331 /* Get RX FIFO size */ 332 sc->sc_rxfifosize = 64 * 333 bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE); 334 335 /* Get TX FIFO size */ 336 v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE); 337 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 338 sc->sc_rxfifosize / 1024, v / 16); 339 340 sc->sc_csum_features = GEM_CSUM_FEATURES; 341 /* Initialize ifnet structure. */ 342 ifp->if_softc = sc; 343 if_initname(ifp, device_get_name(sc->sc_dev), 344 device_get_unit(sc->sc_dev)); 345 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 346 ifp->if_start = gem_start; 347 ifp->if_ioctl = gem_ioctl; 348 ifp->if_init = gem_init; 349 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN); 350 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN; 351 IFQ_SET_READY(&ifp->if_snd); 352 353 /* Attach the interface. */ 354 ether_ifattach(ifp, sc->sc_enaddr); 355 356 /* 357 * Tell the upper layer(s) we support long frames/checksum offloads. 358 */ 359 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 360 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 361 ifp->if_hwassist |= sc->sc_csum_features; 362 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 363 364 return (0); 365 366 /* 367 * Free any resources we've allocated during the failed attach 368 * attempt. Do this in reverse order and fall through. 369 */ 370 fail_rxd: 371 for (i = 0; i < GEM_NRXDESC; i++) 372 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 373 bus_dmamap_destroy(sc->sc_rdmatag, 374 sc->sc_rxsoft[i].rxs_dmamap); 375 fail_txd: 376 for (i = 0; i < GEM_TXQUEUELEN; i++) 377 if (sc->sc_txsoft[i].txs_dmamap != NULL) 378 bus_dmamap_destroy(sc->sc_tdmatag, 379 sc->sc_txsoft[i].txs_dmamap); 380 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 381 fail_cmem: 382 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 383 sc->sc_cddmamap); 384 fail_ctag: 385 bus_dma_tag_destroy(sc->sc_cdmatag); 386 fail_ttag: 387 bus_dma_tag_destroy(sc->sc_tdmatag); 388 fail_rtag: 389 bus_dma_tag_destroy(sc->sc_rdmatag); 390 fail_ptag: 391 bus_dma_tag_destroy(sc->sc_pdmatag); 392 fail_ifnet: 393 if_free(ifp); 394 return (error); 395 } 396 397 void 398 gem_detach(struct gem_softc *sc) 399 { 400 struct ifnet *ifp = sc->sc_ifp; 401 int i; 402 403 GEM_LOCK(sc); 404 gem_stop(ifp, 1); 405 GEM_UNLOCK(sc); 406 callout_drain(&sc->sc_tick_ch); 407 #ifdef GEM_RINT_TIMEOUT 408 callout_drain(&sc->sc_rx_ch); 409 #endif 410 ether_ifdetach(ifp); 411 if_free(ifp); 412 device_delete_child(sc->sc_dev, sc->sc_miibus); 413 414 for (i = 0; i < GEM_NRXDESC; i++) 415 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 416 bus_dmamap_destroy(sc->sc_rdmatag, 417 sc->sc_rxsoft[i].rxs_dmamap); 418 for (i = 0; i < GEM_TXQUEUELEN; i++) 419 if (sc->sc_txsoft[i].txs_dmamap != NULL) 420 bus_dmamap_destroy(sc->sc_tdmatag, 421 sc->sc_txsoft[i].txs_dmamap); 422 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 423 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 424 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 425 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 426 sc->sc_cddmamap); 427 bus_dma_tag_destroy(sc->sc_cdmatag); 428 bus_dma_tag_destroy(sc->sc_tdmatag); 429 bus_dma_tag_destroy(sc->sc_rdmatag); 430 bus_dma_tag_destroy(sc->sc_pdmatag); 431 } 432 433 void 434 gem_suspend(struct gem_softc *sc) 435 { 436 struct ifnet *ifp = sc->sc_ifp; 437 438 GEM_LOCK(sc); 439 gem_stop(ifp, 0); 440 GEM_UNLOCK(sc); 441 } 442 443 void 444 gem_resume(struct gem_softc *sc) 445 { 446 struct ifnet *ifp = sc->sc_ifp; 447 448 GEM_LOCK(sc); 449 /* 450 * On resume all registers have to be initialized again like 451 * after power-on. 452 */ 453 sc->sc_flags &= ~GEM_INITED; 454 if (ifp->if_flags & IFF_UP) 455 gem_init_locked(sc); 456 GEM_UNLOCK(sc); 457 } 458 459 static __inline void 460 gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags) 461 { 462 char *p; 463 struct ip *ip; 464 struct mbuf *m0; 465 uint64_t offset, offset2; 466 467 m0 = m; 468 offset = sizeof(struct ip) + ETHER_HDR_LEN; 469 for(; m && m->m_len == 0; m = m->m_next) 470 ; 471 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 472 device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n", 473 __func__); 474 /* Checksum will be corrupted. */ 475 m = m0; 476 goto sendit; 477 } 478 if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) { 479 if (m->m_len != ETHER_HDR_LEN) { 480 device_printf(sc->sc_dev, 481 "%s: m_len != ETHER_HDR_LEN\n", __func__); 482 /* Checksum will be corrupted. */ 483 m = m0; 484 goto sendit; 485 } 486 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 487 ; 488 if (m == NULL) { 489 /* Checksum will be corrupted. */ 490 m = m0; 491 goto sendit; 492 } 493 ip = mtod(m, struct ip *); 494 } else { 495 p = mtod(m, uint8_t *); 496 p += ETHER_HDR_LEN; 497 ip = (struct ip *)p; 498 } 499 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 500 501 sendit: 502 offset2 = m->m_pkthdr.csum_data; 503 *cflags = offset << GEM_TD_CXSUM_STARTSHFT; 504 *cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT); 505 *cflags |= GEM_TD_CXSUM_ENABLE; 506 } 507 508 static __inline void 509 gem_rxcksum(struct mbuf *m, uint64_t flags) 510 { 511 struct ether_header *eh; 512 struct ip *ip; 513 struct udphdr *uh; 514 uint16_t *opts; 515 int32_t hlen, len, pktlen; 516 uint32_t temp32; 517 uint16_t cksum; 518 519 pktlen = m->m_pkthdr.len; 520 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 521 return; 522 eh = mtod(m, struct ether_header *); 523 if (eh->ether_type != htons(ETHERTYPE_IP)) 524 return; 525 ip = (struct ip *)(eh + 1); 526 if (ip->ip_v != IPVERSION) 527 return; 528 529 hlen = ip->ip_hl << 2; 530 pktlen -= sizeof(struct ether_header); 531 if (hlen < sizeof(struct ip)) 532 return; 533 if (ntohs(ip->ip_len) < hlen) 534 return; 535 if (ntohs(ip->ip_len) != pktlen) 536 return; 537 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 538 return; /* Cannot handle fragmented packet. */ 539 540 switch (ip->ip_p) { 541 case IPPROTO_TCP: 542 if (pktlen < (hlen + sizeof(struct tcphdr))) 543 return; 544 break; 545 case IPPROTO_UDP: 546 if (pktlen < (hlen + sizeof(struct udphdr))) 547 return; 548 uh = (struct udphdr *)((uint8_t *)ip + hlen); 549 if (uh->uh_sum == 0) 550 return; /* no checksum */ 551 break; 552 default: 553 return; 554 } 555 556 cksum = ~(flags & GEM_RD_CHECKSUM); 557 /* checksum fixup for IP options */ 558 len = hlen - sizeof(struct ip); 559 if (len > 0) { 560 opts = (uint16_t *)(ip + 1); 561 for (; len > 0; len -= sizeof(uint16_t), opts++) { 562 temp32 = cksum - *opts; 563 temp32 = (temp32 >> 16) + (temp32 & 65535); 564 cksum = temp32 & 65535; 565 } 566 } 567 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 568 m->m_pkthdr.csum_data = cksum; 569 } 570 571 static void 572 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 573 { 574 struct gem_softc *sc = xsc; 575 576 if (error != 0) 577 return; 578 if (nsegs != 1) 579 panic("%s: bad control buffer segment count", __func__); 580 sc->sc_cddma = segs[0].ds_addr; 581 } 582 583 static void 584 gem_tick(void *arg) 585 { 586 struct gem_softc *sc = arg; 587 struct ifnet *ifp; 588 589 GEM_LOCK_ASSERT(sc, MA_OWNED); 590 591 ifp = sc->sc_ifp; 592 /* 593 * Unload collision counters. 594 */ 595 ifp->if_collisions += 596 bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) + 597 bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) + 598 bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) + 599 bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT); 600 601 /* 602 * then clear the hardware counters. 603 */ 604 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0); 605 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0); 606 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0); 607 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0); 608 609 mii_tick(sc->sc_mii); 610 611 if (gem_watchdog(sc) == EJUSTRETURN) 612 return; 613 614 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 615 } 616 617 static int 618 gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) 619 { 620 int i; 621 uint32_t reg; 622 623 for (i = TRIES; i--; DELAY(100)) { 624 reg = bus_read_4(sc->sc_res[0], r); 625 if ((reg & clr) == 0 && (reg & set) == set) 626 return (1); 627 } 628 return (0); 629 } 630 631 static void 632 gem_reset(sc) 633 struct gem_softc *sc; 634 { 635 636 #ifdef GEM_DEBUG 637 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 638 #endif 639 gem_reset_rx(sc); 640 gem_reset_tx(sc); 641 642 /* Do a full reset. */ 643 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 644 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 645 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 646 device_printf(sc->sc_dev, "cannot reset device\n"); 647 } 648 649 static void 650 gem_rxdrain(struct gem_softc *sc) 651 { 652 struct gem_rxsoft *rxs; 653 int i; 654 655 for (i = 0; i < GEM_NRXDESC; i++) { 656 rxs = &sc->sc_rxsoft[i]; 657 if (rxs->rxs_mbuf != NULL) { 658 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 659 BUS_DMASYNC_POSTREAD); 660 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 661 m_freem(rxs->rxs_mbuf); 662 rxs->rxs_mbuf = NULL; 663 } 664 } 665 } 666 667 static void 668 gem_stop(struct ifnet *ifp, int disable) 669 { 670 struct gem_softc *sc = ifp->if_softc; 671 struct gem_txsoft *txs; 672 673 #ifdef GEM_DEBUG 674 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 675 #endif 676 677 callout_stop(&sc->sc_tick_ch); 678 #ifdef GEM_RINT_TIMEOUT 679 callout_stop(&sc->sc_rx_ch); 680 #endif 681 682 /* XXX should we reset these instead? */ 683 gem_disable_tx(sc); 684 gem_disable_rx(sc); 685 686 /* 687 * Release any queued transmit buffers. 688 */ 689 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 690 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 691 if (txs->txs_ndescs != 0) { 692 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 693 BUS_DMASYNC_POSTWRITE); 694 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 695 if (txs->txs_mbuf != NULL) { 696 m_freem(txs->txs_mbuf); 697 txs->txs_mbuf = NULL; 698 } 699 } 700 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 701 } 702 703 if (disable) 704 gem_rxdrain(sc); 705 706 /* 707 * Mark the interface down and cancel the watchdog timer. 708 */ 709 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 710 sc->sc_flags &= ~GEM_LINK; 711 sc->sc_wdog_timer = 0; 712 } 713 714 static int 715 gem_reset_rx(struct gem_softc *sc) 716 { 717 718 /* 719 * Resetting while DMA is in progress can cause a bus hang, so we 720 * disable DMA first. 721 */ 722 gem_disable_rx(sc); 723 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0); 724 bus_barrier(sc->sc_res[0], GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 725 if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 726 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 727 728 /* Finally, reset the ERX */ 729 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX); 730 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 731 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) { 732 device_printf(sc->sc_dev, "cannot reset receiver\n"); 733 return (1); 734 } 735 return (0); 736 } 737 738 /* 739 * Reset the receiver DMA engine. 740 * 741 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 742 * etc in order to reset the receiver DMA engine only and not do a full 743 * reset which amongst others also downs the link and clears the FIFOs. 744 */ 745 static void 746 gem_reset_rxdma(struct gem_softc *sc) 747 { 748 int i; 749 750 if (gem_reset_rx(sc) != 0) 751 return (gem_init_locked(sc)); 752 for (i = 0; i < GEM_NRXDESC; i++) 753 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 754 GEM_UPDATE_RXDESC(sc, i); 755 sc->sc_rxptr = 0; 756 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 757 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 758 759 /* NOTE: we use only 32-bit DMA addresses here. */ 760 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0); 761 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 762 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4); 763 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 764 gem_ringsize(GEM_NRXDESC /* XXX */) | 765 ((ETHER_HDR_LEN + sizeof(struct ip)) << 766 GEM_RX_CONFIG_CXM_START_SHFT) | 767 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 768 (2 << GEM_RX_CONFIG_FBOFF_SHFT)); 769 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, 770 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 771 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH, 772 (3 * sc->sc_rxfifosize / 256) | 773 ((sc->sc_rxfifosize / 256) << 12)); 774 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 775 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG) | 776 GEM_RX_CONFIG_RXDMA_EN); 777 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK, 778 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 779 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 780 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG) | 781 GEM_MAC_RX_ENABLE); 782 } 783 784 static int 785 gem_reset_tx(struct gem_softc *sc) 786 { 787 788 /* 789 * Resetting while DMA is in progress can cause a bus hang, so we 790 * disable DMA first. 791 */ 792 gem_disable_tx(sc); 793 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0); 794 bus_barrier(sc->sc_res[0], GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 795 if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 796 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 797 798 /* Finally, reset the ETX */ 799 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_TX); 800 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 801 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) { 802 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 803 return (1); 804 } 805 return (0); 806 } 807 808 static int 809 gem_disable_rx(struct gem_softc *sc) 810 { 811 uint32_t cfg; 812 813 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 814 cfg &= ~GEM_MAC_RX_ENABLE; 815 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg); 816 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 817 BUS_SPACE_BARRIER_WRITE); 818 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 819 } 820 821 /* 822 * disable transmitter. 823 */ 824 static int 825 gem_disable_tx(struct gem_softc *sc) 826 { 827 uint32_t cfg; 828 829 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG); 830 cfg &= ~GEM_MAC_TX_ENABLE; 831 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg); 832 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4, 833 BUS_SPACE_BARRIER_WRITE); 834 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 835 } 836 837 static int 838 gem_meminit(sc) 839 struct gem_softc *sc; 840 { 841 struct gem_rxsoft *rxs; 842 int error, i; 843 844 /* 845 * Initialize the transmit descriptor ring. 846 */ 847 for (i = 0; i < GEM_NTXDESC; i++) { 848 sc->sc_txdescs[i].gd_flags = 0; 849 sc->sc_txdescs[i].gd_addr = 0; 850 } 851 sc->sc_txfree = GEM_MAXTXFREE; 852 sc->sc_txnext = 0; 853 sc->sc_txwin = 0; 854 855 /* 856 * Initialize the receive descriptor and receive job 857 * descriptor rings. 858 */ 859 for (i = 0; i < GEM_NRXDESC; i++) { 860 rxs = &sc->sc_rxsoft[i]; 861 if (rxs->rxs_mbuf == NULL) { 862 if ((error = gem_add_rxbuf(sc, i)) != 0) { 863 device_printf(sc->sc_dev, 864 "unable to allocate or map RX buffer %d, " 865 "error = %d\n", i, error); 866 /* 867 * XXX we should attempt to run with fewer 868 * receive buffers instead of just failing. 869 */ 870 gem_rxdrain(sc); 871 return (1); 872 } 873 } else 874 GEM_INIT_RXDESC(sc, i); 875 } 876 sc->sc_rxptr = 0; 877 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 878 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 879 880 return (0); 881 } 882 883 static u_int 884 gem_ringsize(u_int sz) 885 { 886 887 switch (sz) { 888 case 32: 889 return (GEM_RING_SZ_32); 890 case 64: 891 return (GEM_RING_SZ_64); 892 case 128: 893 return (GEM_RING_SZ_128); 894 case 256: 895 return (GEM_RING_SZ_256); 896 case 512: 897 return (GEM_RING_SZ_512); 898 case 1024: 899 return (GEM_RING_SZ_1024); 900 case 2048: 901 return (GEM_RING_SZ_2048); 902 case 4096: 903 return (GEM_RING_SZ_4096); 904 case 8192: 905 return (GEM_RING_SZ_8192); 906 default: 907 printf("%s: invalid ring size %d\n", __func__, sz); 908 return (GEM_RING_SZ_32); 909 } 910 } 911 912 static void 913 gem_init(void *xsc) 914 { 915 struct gem_softc *sc = xsc; 916 917 GEM_LOCK(sc); 918 gem_init_locked(sc); 919 GEM_UNLOCK(sc); 920 } 921 922 /* 923 * Initialization of interface; set up initialization block 924 * and transmit/receive descriptor rings. 925 */ 926 static void 927 gem_init_locked(struct gem_softc *sc) 928 { 929 struct ifnet *ifp = sc->sc_ifp; 930 uint32_t v; 931 932 GEM_LOCK_ASSERT(sc, MA_OWNED); 933 934 #ifdef GEM_DEBUG 935 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 936 __func__); 937 #endif 938 /* 939 * Initialization sequence. The numbered steps below correspond 940 * to the sequence outlined in section 6.3.5.1 in the Ethernet 941 * Channel Engine manual (part of the PCIO manual). 942 * See also the STP2002-STQ document from Sun Microsystems. 943 */ 944 945 /* step 1 & 2. Reset the Ethernet Channel. */ 946 gem_stop(sc->sc_ifp, 0); 947 gem_reset(sc); 948 #ifdef GEM_DEBUG 949 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 950 __func__); 951 #endif 952 953 /* Re-initialize the MIF. */ 954 gem_mifinit(sc); 955 956 /* step 3. Setup data structures in host memory. */ 957 if (gem_meminit(sc) != 0) 958 return; 959 960 /* step 4. TX MAC registers & counters */ 961 gem_init_regs(sc); 962 963 /* step 5. RX MAC registers & counters */ 964 gem_setladrf(sc); 965 966 /* step 6 & 7. Program Descriptor Ring Base Addresses. */ 967 /* NOTE: we use only 32-bit DMA addresses here. */ 968 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0); 969 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 970 971 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0); 972 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 973 #ifdef GEM_DEBUG 974 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx", 975 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 976 #endif 977 978 /* step 8. Global Configuration & Interrupt Mask */ 979 bus_write_4(sc->sc_res[0], GEM_INTMASK, 980 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 981 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 982 GEM_INTR_BERR 983 #ifdef GEM_DEBUG 984 | GEM_INTR_PCS | GEM_INTR_MIF 985 #endif 986 )); 987 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK, 988 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 989 bus_write_4(sc->sc_res[0], GEM_MAC_TX_MASK, 990 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP); 991 #ifdef GEM_DEBUG 992 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 993 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 994 #else 995 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 996 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 997 #endif 998 999 /* step 9. ETX Configuration: use mostly default values. */ 1000 1001 /* Enable DMA. */ 1002 v = gem_ringsize(GEM_NTXDESC /* XXX */); 1003 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 1004 v | GEM_TX_CONFIG_TXDMA_EN | 1005 ((0x400 << 10) & GEM_TX_CONFIG_TXFIFO_TH)); 1006 1007 /* step 10. ERX Configuration */ 1008 1009 /* Encode Receive Descriptor ring size. */ 1010 v = gem_ringsize(GEM_NRXDESC /* XXX */); 1011 /* RX TCP/UDP checksum offset */ 1012 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 1013 GEM_RX_CONFIG_CXM_START_SHFT); 1014 1015 /* Enable DMA. */ 1016 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 1017 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 1018 (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN); 1019 1020 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, 1021 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 1022 1023 /* 1024 * The following value is for an OFF Threshold of about 3/4 full 1025 * and an ON Threshold of 1/4 full. 1026 */ 1027 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH, 1028 (3 * sc->sc_rxfifosize / 256) | 1029 ((sc->sc_rxfifosize / 256) << 12)); 1030 1031 /* step 11. Configure Media. */ 1032 1033 /* step 12. RX_MAC Configuration Register */ 1034 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 1035 v |= GEM_MAC_RX_STRIP_CRC; 1036 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0); 1037 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 1038 BUS_SPACE_BARRIER_WRITE); 1039 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1040 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 1041 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 1042 1043 /* step 14. Issue Transmit Pending command. */ 1044 1045 /* step 15. Give the reciever a swift kick. */ 1046 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4); 1047 1048 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1049 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1050 sc->sc_ifflags = ifp->if_flags; 1051 1052 sc->sc_flags &= ~GEM_LINK; 1053 mii_mediachg(sc->sc_mii); 1054 1055 /* Start the one second timer. */ 1056 sc->sc_wdog_timer = 0; 1057 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1058 } 1059 1060 /* 1061 * This is a copy of ath_defrag(ath(4)). 1062 * 1063 * Defragment an mbuf chain, returning at most maxfrags separate 1064 * mbufs+clusters. If this is not possible NULL is returned and 1065 * the original mbuf chain is left in it's present (potentially 1066 * modified) state. We use two techniques: collapsing consecutive 1067 * mbufs and replacing consecutive mbufs by a cluster. 1068 */ 1069 static struct mbuf * 1070 gem_defrag(struct mbuf *m0, int how, int maxfrags) 1071 { 1072 struct mbuf *m, *n, *n2, **prev; 1073 u_int curfrags; 1074 1075 /* 1076 * Calculate the current number of frags. 1077 */ 1078 curfrags = 0; 1079 for (m = m0; m != NULL; m = m->m_next) 1080 curfrags++; 1081 /* 1082 * First, try to collapse mbufs. Note that we always collapse 1083 * towards the front so we don't need to deal with moving the 1084 * pkthdr. This may be suboptimal if the first mbuf has much 1085 * less data than the following. 1086 */ 1087 m = m0; 1088 again: 1089 for (;;) { 1090 n = m->m_next; 1091 if (n == NULL) 1092 break; 1093 if ((m->m_flags & M_RDONLY) == 0 && 1094 n->m_len < M_TRAILINGSPACE(m)) { 1095 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 1096 n->m_len); 1097 m->m_len += n->m_len; 1098 m->m_next = n->m_next; 1099 m_free(n); 1100 if (--curfrags <= maxfrags) 1101 return (m0); 1102 } else 1103 m = n; 1104 } 1105 KASSERT(maxfrags > 1, 1106 ("maxfrags %u, but normal collapse failed", maxfrags)); 1107 /* 1108 * Collapse consecutive mbufs to a cluster. 1109 */ 1110 prev = &m0->m_next; /* NB: not the first mbuf. */ 1111 while ((n = *prev) != NULL) { 1112 if ((n2 = n->m_next) != NULL && 1113 n->m_len + n2->m_len < MCLBYTES) { 1114 m = m_getcl(how, MT_DATA, 0); 1115 if (m == NULL) 1116 goto bad; 1117 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 1118 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 1119 n2->m_len); 1120 m->m_len = n->m_len + n2->m_len; 1121 m->m_next = n2->m_next; 1122 *prev = m; 1123 m_free(n); 1124 m_free(n2); 1125 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 1126 return (m0); 1127 /* 1128 * Still not there, try the normal collapse 1129 * again before we allocate another cluster. 1130 */ 1131 goto again; 1132 } 1133 prev = &n->m_next; 1134 } 1135 /* 1136 * No place where we can collapse to a cluster; punt. 1137 * This can occur if, for example, you request 2 frags 1138 * but the packet requires that both be clusters (we 1139 * never reallocate the first mbuf to avoid moving the 1140 * packet header). 1141 */ 1142 bad: 1143 return (NULL); 1144 } 1145 1146 static int 1147 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head) 1148 { 1149 bus_dma_segment_t txsegs[GEM_NTXSEGS]; 1150 struct gem_txsoft *txs; 1151 struct mbuf *m; 1152 uint64_t cflags, flags; 1153 int error, nexttx, nsegs, seg; 1154 1155 /* Get a work queue entry. */ 1156 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1157 /* Ran out of descriptors. */ 1158 return (ENOBUFS); 1159 } 1160 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1161 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1162 if (error == EFBIG) { 1163 m = gem_defrag(*m_head, M_DONTWAIT, GEM_NTXSEGS); 1164 if (m == NULL) { 1165 m_freem(*m_head); 1166 *m_head = NULL; 1167 return (ENOBUFS); 1168 } 1169 *m_head = m; 1170 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1171 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1172 BUS_DMA_NOWAIT); 1173 if (error != 0) { 1174 m_freem(*m_head); 1175 *m_head = NULL; 1176 return (error); 1177 } 1178 } else if (error != 0) 1179 return (error); 1180 if (nsegs == 0) { 1181 m_freem(*m_head); 1182 *m_head = NULL; 1183 return (EIO); 1184 } 1185 1186 /* 1187 * Ensure we have enough descriptors free to describe 1188 * the packet. Note, we always reserve one descriptor 1189 * at the end of the ring as a termination point, in 1190 * order to prevent wrap-around. 1191 */ 1192 if (nsegs > sc->sc_txfree - 1) { 1193 txs->txs_ndescs = 0; 1194 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1195 return (ENOBUFS); 1196 } 1197 1198 flags = cflags = 0; 1199 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) 1200 gem_txcksum(sc, *m_head, &cflags); 1201 1202 txs->txs_ndescs = nsegs; 1203 txs->txs_firstdesc = sc->sc_txnext; 1204 nexttx = txs->txs_firstdesc; 1205 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 1206 #ifdef GEM_DEBUG 1207 CTR6(KTR_GEM, 1208 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1209 __func__, seg, nexttx, txsegs[seg].ds_len, 1210 txsegs[seg].ds_addr, 1211 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr)); 1212 #endif 1213 sc->sc_txdescs[nexttx].gd_addr = 1214 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr); 1215 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 1216 ("%s: segment size too large!", __func__)); 1217 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1218 sc->sc_txdescs[nexttx].gd_flags = 1219 GEM_DMA_WRITE(sc, flags | cflags); 1220 txs->txs_lastdesc = nexttx; 1221 } 1222 1223 /* Set EOP on the last descriptor. */ 1224 #ifdef GEM_DEBUG 1225 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d", 1226 __func__, seg, nexttx); 1227 #endif 1228 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1229 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET); 1230 1231 /* Lastly set SOP on the first descriptor. */ 1232 #ifdef GEM_DEBUG 1233 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d", 1234 __func__, seg, nexttx); 1235 #endif 1236 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1237 sc->sc_txwin = 0; 1238 flags |= GEM_TD_INTERRUPT_ME; 1239 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1240 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME | 1241 GEM_TD_START_OF_PACKET); 1242 } else 1243 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1244 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET); 1245 1246 /* Sync the DMA map. */ 1247 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1248 BUS_DMASYNC_PREWRITE); 1249 1250 #ifdef GEM_DEBUG 1251 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1252 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1253 txs->txs_ndescs); 1254 #endif 1255 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1256 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1257 txs->txs_mbuf = *m_head; 1258 1259 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1260 sc->sc_txfree -= txs->txs_ndescs; 1261 1262 return (0); 1263 } 1264 1265 static void 1266 gem_init_regs(struct gem_softc *sc) 1267 { 1268 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1269 1270 /* These registers are not cleared on reset. */ 1271 if ((sc->sc_flags & GEM_INITED) == 0) { 1272 /* magic values */ 1273 bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0); 1274 bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8); 1275 bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4); 1276 1277 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME, 1278 ETHER_MIN_LEN); 1279 /* max frame and max burst size */ 1280 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME, 1281 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1282 1283 bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7); 1284 bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4); 1285 bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10); 1286 /* dunno... */ 1287 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088); 1288 bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED, 1289 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1290 1291 /* secondary MAC address: 0:0:0:0:0:0 */ 1292 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0); 1293 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0); 1294 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0); 1295 1296 /* MAC control address: 01:80:c2:00:00:01 */ 1297 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001); 1298 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200); 1299 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180); 1300 1301 /* MAC filter address: 0:0:0:0:0:0 */ 1302 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0); 1303 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0); 1304 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0); 1305 1306 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0); 1307 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0); 1308 1309 sc->sc_flags |= GEM_INITED; 1310 } 1311 1312 /* Counters need to be zeroed. */ 1313 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0); 1314 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0); 1315 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0); 1316 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0); 1317 bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0); 1318 bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0); 1319 bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0); 1320 bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0); 1321 bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0); 1322 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0); 1323 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0); 1324 1325 /* Set XOFF PAUSE time. */ 1326 bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1327 1328 /* 1329 * Set the internal arbitration to "infinite" bursts of the 1330 * maximum length of 31 * 64 bytes so DMA transfers aren't 1331 * split up in cache line size chunks. This greatly improves 1332 * especially RX performance. 1333 * Enable silicon bug workarounds for the Apple variants. 1334 */ 1335 bus_write_4(sc->sc_res[0], GEM_CONFIG, 1336 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 1337 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ? 1338 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 1339 1340 /* Set the station address. */ 1341 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0, 1342 (laddr[4] << 8) | laddr[5]); 1343 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1, 1344 (laddr[2] << 8) | laddr[3]); 1345 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2, 1346 (laddr[0] << 8) | laddr[1]); 1347 1348 /* Enable MII outputs. */ 1349 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, 1350 GEM_MAC_XIF_TX_MII_ENA); 1351 } 1352 1353 static void 1354 gem_start(struct ifnet *ifp) 1355 { 1356 struct gem_softc *sc = ifp->if_softc; 1357 1358 GEM_LOCK(sc); 1359 gem_start_locked(ifp); 1360 GEM_UNLOCK(sc); 1361 } 1362 1363 static void 1364 gem_start_locked(struct ifnet *ifp) 1365 { 1366 struct gem_softc *sc = ifp->if_softc; 1367 struct mbuf *m; 1368 int ntx; 1369 1370 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1371 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 1372 return; 1373 1374 #ifdef GEM_DEBUG 1375 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 1376 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1377 sc->sc_txnext); 1378 #endif 1379 ntx = 0; 1380 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1381 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1382 if (m == NULL) 1383 break; 1384 if (gem_load_txmbuf(sc, &m) != 0) { 1385 if (m == NULL) 1386 break; 1387 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1388 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1389 break; 1390 } 1391 ntx++; 1392 /* Kick the transmitter. */ 1393 #ifdef GEM_DEBUG 1394 CTR3(KTR_GEM, "%s: %s: kicking TX %d", 1395 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1396 #endif 1397 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1398 bus_write_4(sc->sc_res[0], GEM_TX_KICK, sc->sc_txnext); 1399 1400 BPF_MTAP(ifp, m); 1401 } 1402 1403 if (ntx > 0) { 1404 #ifdef GEM_DEBUG 1405 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1406 device_get_name(sc->sc_dev), sc->sc_txnext); 1407 #endif 1408 1409 /* Set a watchdog timer in case the chip flakes out. */ 1410 sc->sc_wdog_timer = 5; 1411 #ifdef GEM_DEBUG 1412 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1413 device_get_name(sc->sc_dev), __func__, 1414 sc->sc_wdog_timer); 1415 #endif 1416 } 1417 } 1418 1419 static void 1420 gem_tint(struct gem_softc *sc) 1421 { 1422 struct ifnet *ifp = sc->sc_ifp; 1423 struct gem_txsoft *txs; 1424 int txlast, progress; 1425 #ifdef GEM_DEBUG 1426 int i; 1427 1428 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1429 #endif 1430 1431 /* 1432 * Go through our TX list and free mbufs for those 1433 * frames that have been transmitted. 1434 */ 1435 progress = 0; 1436 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1437 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1438 1439 #ifdef GEM_DEBUG 1440 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1441 printf(" txsoft %p transmit chain:\n", txs); 1442 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1443 printf("descriptor %d: ", i); 1444 printf("gd_flags: 0x%016llx\t", 1445 (long long)GEM_DMA_READ(sc, 1446 sc->sc_txdescs[i].gd_flags)); 1447 printf("gd_addr: 0x%016llx\n", 1448 (long long)GEM_DMA_READ(sc, 1449 sc->sc_txdescs[i].gd_addr)); 1450 if (i == txs->txs_lastdesc) 1451 break; 1452 } 1453 } 1454 #endif 1455 1456 /* 1457 * In theory, we could harvest some descriptors before 1458 * the ring is empty, but that's a bit complicated. 1459 * 1460 * GEM_TX_COMPLETION points to the last descriptor 1461 * processed + 1. 1462 */ 1463 txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION); 1464 #ifdef GEM_DEBUG 1465 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 1466 "txs->txs_lastdesc = %d, txlast = %d", 1467 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1468 #endif 1469 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1470 if ((txlast >= txs->txs_firstdesc) && 1471 (txlast <= txs->txs_lastdesc)) 1472 break; 1473 } else { 1474 /* Ick -- this command wraps. */ 1475 if ((txlast >= txs->txs_firstdesc) || 1476 (txlast <= txs->txs_lastdesc)) 1477 break; 1478 } 1479 1480 #ifdef GEM_DEBUG 1481 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__); 1482 #endif 1483 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1484 1485 sc->sc_txfree += txs->txs_ndescs; 1486 1487 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1488 BUS_DMASYNC_POSTWRITE); 1489 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1490 if (txs->txs_mbuf != NULL) { 1491 m_freem(txs->txs_mbuf); 1492 txs->txs_mbuf = NULL; 1493 } 1494 1495 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1496 1497 ifp->if_opackets++; 1498 progress = 1; 1499 } 1500 1501 #ifdef GEM_DEBUG 1502 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx " 1503 "GEM_TX_COMPLETION %x", 1504 __func__, bus_read_4(sc->sc_res[0], GEM_TX_STATE_MACHINE), 1505 ((long long)bus_read_4(sc->sc_res[0], 1506 GEM_TX_DATA_PTR_HI) << 32) | 1507 bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_LO), 1508 bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION)); 1509 #endif 1510 1511 if (progress) { 1512 if (sc->sc_txfree == GEM_NTXDESC - 1) 1513 sc->sc_txwin = 0; 1514 1515 /* 1516 * We freed some descriptors, so reset IFF_DRV_OACTIVE 1517 * and restart. 1518 */ 1519 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1520 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; 1521 1522 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1523 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1524 gem_start_locked(ifp); 1525 } 1526 1527 #ifdef GEM_DEBUG 1528 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1529 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1530 #endif 1531 } 1532 1533 #ifdef GEM_RINT_TIMEOUT 1534 static void 1535 gem_rint_timeout(void *arg) 1536 { 1537 struct gem_softc *sc = arg; 1538 1539 GEM_LOCK_ASSERT(sc, MA_OWNED); 1540 gem_rint(sc); 1541 } 1542 #endif 1543 1544 static void 1545 gem_rint(struct gem_softc *sc) 1546 { 1547 struct ifnet *ifp = sc->sc_ifp; 1548 struct mbuf *m; 1549 uint64_t rxstat; 1550 uint32_t rxcomp; 1551 1552 #ifdef GEM_RINT_TIMEOUT 1553 callout_stop(&sc->sc_rx_ch); 1554 #endif 1555 #ifdef GEM_DEBUG 1556 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1557 #endif 1558 1559 /* 1560 * Read the completion register once. This limits 1561 * how long the following loop can execute. 1562 */ 1563 rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION); 1564 1565 #ifdef GEM_DEBUG 1566 CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d", 1567 __func__, sc->sc_rxptr, rxcomp); 1568 #endif 1569 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1570 for (; sc->sc_rxptr != rxcomp;) { 1571 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1572 rxstat = GEM_DMA_READ(sc, 1573 sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 1574 1575 if (rxstat & GEM_RD_OWN) { 1576 #ifdef GEM_RINT_TIMEOUT 1577 /* 1578 * The descriptor is still marked as owned, although 1579 * it is supposed to have completed. This has been 1580 * observed on some machines. Just exiting here 1581 * might leave the packet sitting around until another 1582 * one arrives to trigger a new interrupt, which is 1583 * generally undesirable, so set up a timeout. 1584 */ 1585 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1586 gem_rint_timeout, sc); 1587 #endif 1588 m = NULL; 1589 goto kickit; 1590 } 1591 1592 if (rxstat & GEM_RD_BAD_CRC) { 1593 ifp->if_ierrors++; 1594 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1595 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1596 m = NULL; 1597 goto kickit; 1598 } 1599 1600 #ifdef GEM_DEBUG 1601 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1602 printf(" rxsoft %p descriptor %d: ", 1603 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 1604 printf("gd_flags: 0x%016llx\t", 1605 (long long)GEM_DMA_READ(sc, 1606 sc->sc_rxdescs[sc->sc_rxptr].gd_flags)); 1607 printf("gd_addr: 0x%016llx\n", 1608 (long long)GEM_DMA_READ(sc, 1609 sc->sc_rxdescs[sc->sc_rxptr].gd_addr)); 1610 } 1611 #endif 1612 1613 /* 1614 * Allocate a new mbuf cluster. If that fails, we are 1615 * out of memory, and must drop the packet and recycle 1616 * the buffer that's already attached to this descriptor. 1617 */ 1618 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 1619 ifp->if_ierrors++; 1620 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1621 m = NULL; 1622 } 1623 1624 kickit: 1625 /* 1626 * Update the RX kick register. This register has to point 1627 * to the descriptor after the last valid one (before the 1628 * current batch) and must be incremented in multiples of 1629 * 4 (because the DMA engine fetches/updates descriptors 1630 * in batches of 4). 1631 */ 1632 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 1633 if ((sc->sc_rxptr % 4) == 0) { 1634 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1635 bus_write_4(sc->sc_res[0], GEM_RX_KICK, 1636 (sc->sc_rxptr + GEM_NRXDESC - 4) & 1637 GEM_NRXDESC_MASK); 1638 } 1639 1640 if (m == NULL) { 1641 if (rxstat & GEM_RD_OWN) 1642 break; 1643 continue; 1644 } 1645 1646 ifp->if_ipackets++; 1647 m->m_data += 2; /* We're already off by two */ 1648 m->m_pkthdr.rcvif = ifp; 1649 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 1650 1651 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1652 gem_rxcksum(m, rxstat); 1653 1654 /* Pass it on. */ 1655 GEM_UNLOCK(sc); 1656 (*ifp->if_input)(ifp, m); 1657 GEM_LOCK(sc); 1658 } 1659 1660 #ifdef GEM_DEBUG 1661 CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__, 1662 sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION)); 1663 #endif 1664 } 1665 1666 static int 1667 gem_add_rxbuf(struct gem_softc *sc, int idx) 1668 { 1669 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1670 struct mbuf *m; 1671 bus_dma_segment_t segs[1]; 1672 int error, nsegs; 1673 1674 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1675 if (m == NULL) 1676 return (ENOBUFS); 1677 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1678 1679 #ifdef GEM_DEBUG 1680 /* Bzero the packet to check DMA. */ 1681 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1682 #endif 1683 1684 if (rxs->rxs_mbuf != NULL) { 1685 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1686 BUS_DMASYNC_POSTREAD); 1687 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1688 } 1689 1690 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1691 m, segs, &nsegs, BUS_DMA_NOWAIT); 1692 KASSERT(nsegs == 1, ("Too many segments returned!")); 1693 if (error != 0) { 1694 device_printf(sc->sc_dev, 1695 "cannot load RS DMA map %d, error = %d\n", idx, error); 1696 m_freem(m); 1697 return (error); 1698 } 1699 /* If nsegs is wrong then the stack is corrupt. */ 1700 rxs->rxs_mbuf = m; 1701 rxs->rxs_paddr = segs[0].ds_addr; 1702 1703 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1704 BUS_DMASYNC_PREREAD); 1705 1706 GEM_INIT_RXDESC(sc, idx); 1707 1708 return (0); 1709 } 1710 1711 static void 1712 gem_eint(struct gem_softc *sc, u_int status) 1713 { 1714 1715 sc->sc_ifp->if_ierrors++; 1716 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1717 gem_reset_rxdma(sc); 1718 return; 1719 } 1720 1721 device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status); 1722 } 1723 1724 void 1725 gem_intr(void *v) 1726 { 1727 struct gem_softc *sc = v; 1728 uint32_t status, status2; 1729 1730 GEM_LOCK(sc); 1731 status = bus_read_4(sc->sc_res[0], GEM_STATUS); 1732 1733 #ifdef GEM_DEBUG 1734 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 1735 device_get_name(sc->sc_dev), __func__, (status >> 19), 1736 (u_int)status); 1737 1738 /* 1739 * PCS interrupts must be cleared, otherwise no traffic is passed! 1740 */ 1741 if ((status & GEM_INTR_PCS) != 0) { 1742 status2 = 1743 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS) | 1744 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS); 1745 if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 1746 device_printf(sc->sc_dev, 1747 "%s: PCS link status changed\n", __func__); 1748 } 1749 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1750 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS); 1751 if ((status2 & GEM_MAC_PAUSED) != 0) 1752 device_printf(sc->sc_dev, 1753 "%s: PAUSE received (PAUSE time %d slots)\n", 1754 __func__, GEM_MAC_PAUSE_TIME(status2)); 1755 if ((status2 & GEM_MAC_PAUSE) != 0) 1756 device_printf(sc->sc_dev, 1757 "%s: transited to PAUSE state\n", __func__); 1758 if ((status2 & GEM_MAC_RESUME) != 0) 1759 device_printf(sc->sc_dev, 1760 "%s: transited to non-PAUSE state\n", __func__); 1761 } 1762 if ((status & GEM_INTR_MIF) != 0) 1763 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1764 #endif 1765 1766 if ((status & 1767 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 1768 gem_eint(sc, status); 1769 1770 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1771 gem_rint(sc); 1772 1773 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1774 gem_tint(sc); 1775 1776 if (status & GEM_INTR_TX_MAC) { 1777 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS); 1778 if ((status2 & 1779 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP)) != 0) 1780 device_printf(sc->sc_dev, 1781 "MAC TX fault, status %x\n", status2); 1782 if ((status2 & 1783 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) 1784 gem_init_locked(sc); 1785 } 1786 if (status & GEM_INTR_RX_MAC) { 1787 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS); 1788 /* 1789 * At least with GEM_SUN_GEM and some GEM_SUN_ERI 1790 * revisions GEM_MAC_RX_OVERFLOW happen often due to a 1791 * silicon bug so handle them silently. Moreover, it's 1792 * likely that the receiver has hung so we reset it. 1793 */ 1794 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) { 1795 sc->sc_ifp->if_ierrors++; 1796 gem_reset_rxdma(sc); 1797 } else if ((status2 & 1798 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0) 1799 device_printf(sc->sc_dev, 1800 "MAC RX fault, status %x\n", status2); 1801 } 1802 GEM_UNLOCK(sc); 1803 } 1804 1805 static int 1806 gem_watchdog(struct gem_softc *sc) 1807 { 1808 1809 GEM_LOCK_ASSERT(sc, MA_OWNED); 1810 1811 #ifdef GEM_DEBUG 1812 CTR4(KTR_GEM, 1813 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x", 1814 __func__, bus_read_4(sc->sc_res[0], GEM_RX_CONFIG), 1815 bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS), 1816 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG)); 1817 CTR4(KTR_GEM, 1818 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x", 1819 __func__, bus_read_4(sc->sc_res[0], GEM_TX_CONFIG), 1820 bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS), 1821 bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG)); 1822 #endif 1823 1824 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1825 return (0); 1826 1827 if ((sc->sc_flags & GEM_LINK) != 0) 1828 device_printf(sc->sc_dev, "device timeout\n"); 1829 else if (bootverbose) 1830 device_printf(sc->sc_dev, "device timeout (no link)\n"); 1831 ++sc->sc_ifp->if_oerrors; 1832 1833 /* Try to get more packets going. */ 1834 gem_init_locked(sc); 1835 return (EJUSTRETURN); 1836 } 1837 1838 static void 1839 gem_mifinit(struct gem_softc *sc) 1840 { 1841 1842 /* Configure the MIF in frame mode */ 1843 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, bus_read_4(sc->sc_res[0], 1844 GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1845 } 1846 1847 /* 1848 * MII interface 1849 * 1850 * The GEM MII interface supports at least three different operating modes: 1851 * 1852 * Bitbang mode is implemented using data, clock and output enable registers. 1853 * 1854 * Frame mode is implemented by loading a complete frame into the frame 1855 * register and polling the valid bit for completion. 1856 * 1857 * Polling mode uses the frame register but completion is indicated by 1858 * an interrupt. 1859 * 1860 */ 1861 int 1862 gem_mii_readreg(device_t dev, int phy, int reg) 1863 { 1864 struct gem_softc *sc; 1865 int n; 1866 uint32_t v; 1867 1868 #ifdef GEM_DEBUG_PHY 1869 printf("%s: phy %d reg %d\n", __func__, phy, reg); 1870 #endif 1871 1872 sc = device_get_softc(dev); 1873 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1874 return (0); 1875 1876 if ((sc->sc_flags & GEM_SERDES) != 0) { 1877 switch (reg) { 1878 case MII_BMCR: 1879 reg = GEM_MII_CONTROL; 1880 break; 1881 case MII_BMSR: 1882 reg = GEM_MII_STATUS; 1883 break; 1884 case MII_PHYIDR1: 1885 case MII_PHYIDR2: 1886 return (0); 1887 case MII_ANAR: 1888 reg = GEM_MII_ANAR; 1889 break; 1890 case MII_ANLPAR: 1891 reg = GEM_MII_ANLPAR; 1892 break; 1893 case MII_EXTSR: 1894 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 1895 default: 1896 device_printf(sc->sc_dev, 1897 "%s: unhandled register %d\n", __func__, reg); 1898 return (0); 1899 } 1900 return (bus_read_4(sc->sc_res[0], reg)); 1901 } 1902 1903 /* Construct the frame command. */ 1904 v = GEM_MIF_FRAME_READ | 1905 (phy << GEM_MIF_PHY_SHIFT) | 1906 (reg << GEM_MIF_REG_SHIFT); 1907 1908 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v); 1909 for (n = 0; n < 100; n++) { 1910 DELAY(1); 1911 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME); 1912 if (v & GEM_MIF_FRAME_TA0) 1913 return (v & GEM_MIF_FRAME_DATA); 1914 } 1915 1916 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1917 return (0); 1918 } 1919 1920 int 1921 gem_mii_writereg(device_t dev, int phy, int reg, int val) 1922 { 1923 struct gem_softc *sc; 1924 int n; 1925 uint32_t v; 1926 1927 #ifdef GEM_DEBUG_PHY 1928 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 1929 #endif 1930 1931 sc = device_get_softc(dev); 1932 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1933 return (0); 1934 1935 if ((sc->sc_flags & GEM_SERDES) != 0) { 1936 switch (reg) { 1937 case MII_BMCR: 1938 reg = GEM_MII_CONTROL; 1939 break; 1940 case MII_BMSR: 1941 reg = GEM_MII_STATUS; 1942 break; 1943 case MII_ANAR: 1944 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 0); 1945 bus_barrier(sc->sc_res[0], GEM_MII_CONFIG, 4, 1946 BUS_SPACE_BARRIER_WRITE); 1947 bus_write_4(sc->sc_res[0], GEM_MII_ANAR, val); 1948 bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL, 1949 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 1950 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 1951 GEM_MII_CONFIG_ENABLE); 1952 return (0); 1953 case MII_ANLPAR: 1954 reg = GEM_MII_ANLPAR; 1955 break; 1956 default: 1957 device_printf(sc->sc_dev, 1958 "%s: unhandled register %d\n", __func__, reg); 1959 return (0); 1960 } 1961 bus_write_4(sc->sc_res[0], reg, val); 1962 return (0); 1963 } 1964 1965 /* Construct the frame command. */ 1966 v = GEM_MIF_FRAME_WRITE | 1967 (phy << GEM_MIF_PHY_SHIFT) | 1968 (reg << GEM_MIF_REG_SHIFT) | 1969 (val & GEM_MIF_FRAME_DATA); 1970 1971 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v); 1972 for (n = 0; n < 100; n++) { 1973 DELAY(1); 1974 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME); 1975 if (v & GEM_MIF_FRAME_TA0) 1976 return (1); 1977 } 1978 1979 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1980 return (0); 1981 } 1982 1983 void 1984 gem_mii_statchg(device_t dev) 1985 { 1986 struct gem_softc *sc; 1987 int gigabit; 1988 uint32_t rxcfg, txcfg, v; 1989 1990 sc = device_get_softc(dev); 1991 1992 #ifdef GEM_DEBUG 1993 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) 1994 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n", 1995 __func__, sc->sc_phyad); 1996 #endif 1997 1998 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 1999 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 2000 sc->sc_flags |= GEM_LINK; 2001 else 2002 sc->sc_flags &= ~GEM_LINK; 2003 2004 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 2005 case IFM_1000_SX: 2006 case IFM_1000_LX: 2007 case IFM_1000_CX: 2008 case IFM_1000_T: 2009 gigabit = 1; 2010 break; 2011 default: 2012 gigabit = 0; 2013 } 2014 2015 /* 2016 * The configuration done here corresponds to the steps F) and 2017 * G) and as far as enabling of RX and TX MAC goes also step H) 2018 * of the initialization sequence outlined in section 3.2.1 of 2019 * the GEM Gigabit Ethernet ASIC Specification. 2020 */ 2021 2022 rxcfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 2023 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 2024 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 2025 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2026 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 2027 else if (gigabit != 0) { 2028 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 2029 txcfg |= GEM_MAC_TX_CARR_EXTEND; 2030 } 2031 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 0); 2032 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4, 2033 BUS_SPACE_BARRIER_WRITE); 2034 if (!gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 2035 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 2036 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, txcfg); 2037 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0); 2038 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 2039 BUS_SPACE_BARRIER_WRITE); 2040 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 2041 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 2042 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, rxcfg); 2043 2044 v = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG) & 2045 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 2046 #ifdef notyet 2047 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2048 IFM_ETH_RXPAUSE) != 0) 2049 v |= GEM_MAC_CC_RX_PAUSE; 2050 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2051 IFM_ETH_TXPAUSE) != 0) 2052 v |= GEM_MAC_CC_TX_PAUSE; 2053 #endif 2054 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG, v); 2055 2056 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 2057 gigabit != 0) 2058 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME, 2059 GEM_MAC_SLOT_TIME_CARR_EXTEND); 2060 else 2061 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME, 2062 GEM_MAC_SLOT_TIME_NORMAL); 2063 2064 /* XIF Configuration */ 2065 v = GEM_MAC_XIF_LINK_LED; 2066 v |= GEM_MAC_XIF_TX_MII_ENA; 2067 if ((sc->sc_flags & GEM_SERDES) == 0) { 2068 if ((bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG) & 2069 GEM_MIF_CONFIG_PHY_SEL) != 0 && 2070 (IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2071 IFM_FDX) == 0) 2072 /* External MII needs echo disable if half duplex. */ 2073 v |= GEM_MAC_XIF_ECHO_DISABL; 2074 else 2075 /* 2076 * Internal MII needs buffer enable. 2077 * XXX buffer enable makes only sense for an 2078 * external PHY. 2079 */ 2080 v |= GEM_MAC_XIF_MII_BUF_ENA; 2081 } 2082 if (gigabit != 0) 2083 v |= GEM_MAC_XIF_GMII_MODE; 2084 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2085 v |= GEM_MAC_XIF_FDPLX_LED; 2086 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v); 2087 2088 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2089 (sc->sc_flags & GEM_LINK) != 0) { 2090 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 2091 txcfg | GEM_MAC_TX_ENABLE); 2092 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 2093 rxcfg | GEM_MAC_RX_ENABLE); 2094 } 2095 } 2096 2097 int 2098 gem_mediachange(struct ifnet *ifp) 2099 { 2100 struct gem_softc *sc = ifp->if_softc; 2101 int error; 2102 2103 /* XXX add support for serial media. */ 2104 2105 GEM_LOCK(sc); 2106 error = mii_mediachg(sc->sc_mii); 2107 GEM_UNLOCK(sc); 2108 return (error); 2109 } 2110 2111 void 2112 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2113 { 2114 struct gem_softc *sc = ifp->if_softc; 2115 2116 GEM_LOCK(sc); 2117 if ((ifp->if_flags & IFF_UP) == 0) { 2118 GEM_UNLOCK(sc); 2119 return; 2120 } 2121 2122 mii_pollstat(sc->sc_mii); 2123 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2124 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2125 GEM_UNLOCK(sc); 2126 } 2127 2128 static int 2129 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2130 { 2131 struct gem_softc *sc = ifp->if_softc; 2132 struct ifreq *ifr = (struct ifreq *)data; 2133 int error; 2134 2135 error = 0; 2136 switch (cmd) { 2137 case SIOCSIFFLAGS: 2138 GEM_LOCK(sc); 2139 if ((ifp->if_flags & IFF_UP) != 0) { 2140 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2141 ((ifp->if_flags ^ sc->sc_ifflags) & 2142 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2143 gem_setladrf(sc); 2144 else 2145 gem_init_locked(sc); 2146 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2147 gem_stop(ifp, 0); 2148 if ((ifp->if_flags & IFF_LINK0) != 0) 2149 sc->sc_csum_features |= CSUM_UDP; 2150 else 2151 sc->sc_csum_features &= ~CSUM_UDP; 2152 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2153 ifp->if_hwassist = sc->sc_csum_features; 2154 sc->sc_ifflags = ifp->if_flags; 2155 GEM_UNLOCK(sc); 2156 break; 2157 case SIOCADDMULTI: 2158 case SIOCDELMULTI: 2159 GEM_LOCK(sc); 2160 gem_setladrf(sc); 2161 GEM_UNLOCK(sc); 2162 break; 2163 case SIOCGIFMEDIA: 2164 case SIOCSIFMEDIA: 2165 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2166 break; 2167 case SIOCSIFCAP: 2168 GEM_LOCK(sc); 2169 ifp->if_capenable = ifr->ifr_reqcap; 2170 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2171 ifp->if_hwassist = sc->sc_csum_features; 2172 else 2173 ifp->if_hwassist = 0; 2174 GEM_UNLOCK(sc); 2175 break; 2176 default: 2177 error = ether_ioctl(ifp, cmd, data); 2178 break; 2179 } 2180 2181 return (error); 2182 } 2183 2184 static void 2185 gem_setladrf(struct gem_softc *sc) 2186 { 2187 struct ifnet *ifp = sc->sc_ifp; 2188 struct ifmultiaddr *inm; 2189 int i; 2190 uint32_t hash[16]; 2191 uint32_t crc, v; 2192 2193 GEM_LOCK_ASSERT(sc, MA_OWNED); 2194 2195 /* Get the current RX configuration. */ 2196 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 2197 2198 /* 2199 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2200 * and hash filter. Depending on the case, the right bit will be 2201 * enabled. 2202 */ 2203 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER | 2204 GEM_MAC_RX_PROMISC_GRP); 2205 2206 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 2207 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 2208 BUS_SPACE_BARRIER_WRITE); 2209 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 0)) 2210 device_printf(sc->sc_dev, "cannot disable RX hash filter\n"); 2211 2212 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2213 v |= GEM_MAC_RX_PROMISCUOUS; 2214 goto chipit; 2215 } 2216 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2217 v |= GEM_MAC_RX_PROMISC_GRP; 2218 goto chipit; 2219 } 2220 2221 /* 2222 * Set up multicast address filter by passing all multicast 2223 * addresses through a crc generator, and then using the high 2224 * order 8 bits as an index into the 256 bit logical address 2225 * filter. The high order 4 bits selects the word, while the 2226 * other 4 bits select the bit within the word (where bit 0 2227 * is the MSB). 2228 */ 2229 2230 /* Clear the hash table. */ 2231 memset(hash, 0, sizeof(hash)); 2232 2233 IF_ADDR_LOCK(ifp); 2234 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 2235 if (inm->ifma_addr->sa_family != AF_LINK) 2236 continue; 2237 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2238 inm->ifma_addr), ETHER_ADDR_LEN); 2239 2240 /* We just want the 8 most significant bits. */ 2241 crc >>= 24; 2242 2243 /* Set the corresponding bit in the filter. */ 2244 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2245 } 2246 IF_ADDR_UNLOCK(ifp); 2247 2248 v |= GEM_MAC_RX_HASH_FILTER; 2249 2250 /* Now load the hash table into the chip (if we are using it). */ 2251 for (i = 0; i < 16; i++) 2252 bus_write_4(sc->sc_res[0], 2253 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), 2254 hash[i]); 2255 2256 chipit: 2257 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 2258 } 2259