1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 36 */ 37 38 #if 0 39 #define GEM_DEBUG 40 #endif 41 42 #if 0 /* XXX: In case of emergency, re-enable this. */ 43 #define GEM_RINT_TIMEOUT 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/endian.h> 51 #include <sys/mbuf.h> 52 #include <sys/malloc.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/socket.h> 58 #include <sys/sockio.h> 59 #include <sys/rman.h> 60 61 #include <net/bpf.h> 62 #include <net/ethernet.h> 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_vlan_var.h> 69 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/tcp.h> 74 #include <netinet/udp.h> 75 76 #include <machine/bus.h> 77 78 #include <dev/mii/mii.h> 79 #include <dev/mii/miivar.h> 80 81 #include <dev/gem/if_gemreg.h> 82 #include <dev/gem/if_gemvar.h> 83 84 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 85 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 86 87 #define GEM_TRIES 10000 88 89 /* 90 * The hardware supports basic TCP/UDP checksum offloading. However, 91 * the hardware doesn't compensate the checksum for UDP datagram which 92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 93 * by default. It can be reactivated by setting special link option 94 * link0 with ifconfig(8). 95 */ 96 #define GEM_CSUM_FEATURES (CSUM_TCP) 97 98 static int gem_add_rxbuf(struct gem_softc *sc, int idx); 99 static int gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, 100 uint32_t clr, uint32_t set); 101 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, 102 int nsegs, int error); 103 static int gem_disable_rx(struct gem_softc *sc); 104 static int gem_disable_tx(struct gem_softc *sc); 105 static void gem_eint(struct gem_softc *sc, u_int status); 106 static void gem_init(void *xsc); 107 static void gem_init_locked(struct gem_softc *sc); 108 static void gem_init_regs(struct gem_softc *sc); 109 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 110 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head); 111 static int gem_meminit(struct gem_softc *sc); 112 static void gem_mifinit(struct gem_softc *sc); 113 static void gem_reset(struct gem_softc *sc); 114 static int gem_reset_rx(struct gem_softc *sc); 115 static void gem_reset_rxdma(struct gem_softc *sc); 116 static int gem_reset_tx(struct gem_softc *sc); 117 static u_int gem_ringsize(u_int sz); 118 static void gem_rint(struct gem_softc *sc); 119 #ifdef GEM_RINT_TIMEOUT 120 static void gem_rint_timeout(void *arg); 121 #endif 122 static inline void gem_rxcksum(struct mbuf *m, uint64_t flags); 123 static void gem_rxdrain(struct gem_softc *sc); 124 static void gem_setladrf(struct gem_softc *sc); 125 static void gem_start(struct ifnet *ifp); 126 static void gem_start_locked(struct ifnet *ifp); 127 static void gem_stop(struct ifnet *ifp, int disable); 128 static void gem_tick(void *arg); 129 static void gem_tint(struct gem_softc *sc); 130 static inline void gem_txkick(struct gem_softc *sc); 131 static int gem_watchdog(struct gem_softc *sc); 132 133 devclass_t gem_devclass; 134 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 135 MODULE_DEPEND(gem, miibus, 1, 1, 1); 136 137 #ifdef GEM_DEBUG 138 #include <sys/ktr.h> 139 #define KTR_GEM KTR_CT2 140 #endif 141 142 #define GEM_BANK1_BITWAIT(sc, r, clr, set) \ 143 gem_bitwait((sc), GEM_RES_BANK1, (r), (clr), (set)) 144 #define GEM_BANK2_BITWAIT(sc, r, clr, set) \ 145 gem_bitwait((sc), GEM_RES_BANK2, (r), (clr), (set)) 146 147 int 148 gem_attach(struct gem_softc *sc) 149 { 150 struct gem_txsoft *txs; 151 struct ifnet *ifp; 152 int error, i; 153 uint32_t v; 154 155 if (bootverbose) 156 device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags); 157 158 /* Set up ifnet structure. */ 159 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 160 if (ifp == NULL) 161 return (ENOSPC); 162 sc->sc_csum_features = GEM_CSUM_FEATURES; 163 ifp->if_softc = sc; 164 if_initname(ifp, device_get_name(sc->sc_dev), 165 device_get_unit(sc->sc_dev)); 166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 167 ifp->if_start = gem_start; 168 ifp->if_ioctl = gem_ioctl; 169 ifp->if_init = gem_init; 170 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN); 171 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN; 172 IFQ_SET_READY(&ifp->if_snd); 173 174 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 175 #ifdef GEM_RINT_TIMEOUT 176 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 177 #endif 178 179 /* Make sure the chip is stopped. */ 180 gem_reset(sc); 181 182 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 183 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 184 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, 185 NULL, &sc->sc_pdmatag); 186 if (error != 0) 187 goto fail_ifnet; 188 189 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 190 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 191 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 192 if (error != 0) 193 goto fail_ptag; 194 195 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 196 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 197 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 198 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 199 if (error != 0) 200 goto fail_rtag; 201 202 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 203 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 204 sizeof(struct gem_control_data), 1, 205 sizeof(struct gem_control_data), 0, 206 NULL, NULL, &sc->sc_cdmatag); 207 if (error != 0) 208 goto fail_ttag; 209 210 /* 211 * Allocate the control data structures, create and load the 212 * DMA map for it. 213 */ 214 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 215 (void **)&sc->sc_control_data, 216 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 217 &sc->sc_cddmamap)) != 0) { 218 device_printf(sc->sc_dev, 219 "unable to allocate control data, error = %d\n", error); 220 goto fail_ctag; 221 } 222 223 sc->sc_cddma = 0; 224 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 225 sc->sc_control_data, sizeof(struct gem_control_data), 226 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 227 device_printf(sc->sc_dev, 228 "unable to load control data DMA map, error = %d\n", 229 error); 230 goto fail_cmem; 231 } 232 233 /* 234 * Initialize the transmit job descriptors. 235 */ 236 STAILQ_INIT(&sc->sc_txfreeq); 237 STAILQ_INIT(&sc->sc_txdirtyq); 238 239 /* 240 * Create the transmit buffer DMA maps. 241 */ 242 error = ENOMEM; 243 for (i = 0; i < GEM_TXQUEUELEN; i++) { 244 txs = &sc->sc_txsoft[i]; 245 txs->txs_mbuf = NULL; 246 txs->txs_ndescs = 0; 247 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 248 &txs->txs_dmamap)) != 0) { 249 device_printf(sc->sc_dev, 250 "unable to create TX DMA map %d, error = %d\n", 251 i, error); 252 goto fail_txd; 253 } 254 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 255 } 256 257 /* 258 * Create the receive buffer DMA maps. 259 */ 260 for (i = 0; i < GEM_NRXDESC; i++) { 261 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 262 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 263 device_printf(sc->sc_dev, 264 "unable to create RX DMA map %d, error = %d\n", 265 i, error); 266 goto fail_rxd; 267 } 268 sc->sc_rxsoft[i].rxs_mbuf = NULL; 269 } 270 271 /* Bad things will happen when touching this register on ERI. */ 272 if (sc->sc_variant != GEM_SUN_ERI) 273 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE, 274 GEM_MII_DATAPATH_MII); 275 276 gem_mifinit(sc); 277 278 /* 279 * Look for an external PHY. 280 */ 281 error = ENXIO; 282 v = GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG); 283 if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 284 v |= GEM_MIF_CONFIG_PHY_SEL; 285 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v); 286 switch (sc->sc_variant) { 287 case GEM_SUN_ERI: 288 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 289 break; 290 default: 291 sc->sc_phyad = -1; 292 break; 293 } 294 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 295 gem_mediachange, gem_mediastatus); 296 } 297 298 /* 299 * Fall back on an internal PHY if no external PHY was found. 300 */ 301 if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) { 302 v &= ~GEM_MIF_CONFIG_PHY_SEL; 303 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v); 304 switch (sc->sc_variant) { 305 case GEM_SUN_ERI: 306 case GEM_APPLE_K2_GMAC: 307 sc->sc_phyad = GEM_PHYAD_INTERNAL; 308 break; 309 case GEM_APPLE_GMAC: 310 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 311 break; 312 default: 313 sc->sc_phyad = -1; 314 break; 315 } 316 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 317 gem_mediachange, gem_mediastatus); 318 } 319 320 /* 321 * Try the external PCS SERDES if we didn't find any PHYs. 322 */ 323 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 324 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE, 325 GEM_MII_DATAPATH_SERDES); 326 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 327 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 328 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 329 sc->sc_flags |= GEM_SERDES; 330 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 331 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 332 gem_mediachange, gem_mediastatus); 333 } 334 335 if (error != 0) { 336 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error); 337 goto fail_rxd; 338 } 339 sc->sc_mii = device_get_softc(sc->sc_miibus); 340 341 /* 342 * From this point forward, the attachment cannot fail. A failure 343 * before this point releases all resources that may have been 344 * allocated. 345 */ 346 347 /* Get RX FIFO size. */ 348 sc->sc_rxfifosize = 64 * 349 GEM_BANK1_READ_4(sc, GEM_RX_FIFO_SIZE); 350 351 /* Get TX FIFO size. */ 352 v = GEM_BANK1_READ_4(sc, GEM_TX_FIFO_SIZE); 353 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 354 sc->sc_rxfifosize / 1024, v / 16); 355 356 /* Attach the interface. */ 357 ether_ifattach(ifp, sc->sc_enaddr); 358 359 /* 360 * Tell the upper layer(s) we support long frames/checksum offloads. 361 */ 362 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 363 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 364 ifp->if_hwassist |= sc->sc_csum_features; 365 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 366 367 return (0); 368 369 /* 370 * Free any resources we've allocated during the failed attach 371 * attempt. Do this in reverse order and fall through. 372 */ 373 fail_rxd: 374 for (i = 0; i < GEM_NRXDESC; i++) 375 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 376 bus_dmamap_destroy(sc->sc_rdmatag, 377 sc->sc_rxsoft[i].rxs_dmamap); 378 fail_txd: 379 for (i = 0; i < GEM_TXQUEUELEN; i++) 380 if (sc->sc_txsoft[i].txs_dmamap != NULL) 381 bus_dmamap_destroy(sc->sc_tdmatag, 382 sc->sc_txsoft[i].txs_dmamap); 383 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 384 fail_cmem: 385 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 386 sc->sc_cddmamap); 387 fail_ctag: 388 bus_dma_tag_destroy(sc->sc_cdmatag); 389 fail_ttag: 390 bus_dma_tag_destroy(sc->sc_tdmatag); 391 fail_rtag: 392 bus_dma_tag_destroy(sc->sc_rdmatag); 393 fail_ptag: 394 bus_dma_tag_destroy(sc->sc_pdmatag); 395 fail_ifnet: 396 if_free(ifp); 397 return (error); 398 } 399 400 void 401 gem_detach(struct gem_softc *sc) 402 { 403 struct ifnet *ifp = sc->sc_ifp; 404 int i; 405 406 ether_ifdetach(ifp); 407 GEM_LOCK(sc); 408 gem_stop(ifp, 1); 409 GEM_UNLOCK(sc); 410 callout_drain(&sc->sc_tick_ch); 411 #ifdef GEM_RINT_TIMEOUT 412 callout_drain(&sc->sc_rx_ch); 413 #endif 414 if_free(ifp); 415 device_delete_child(sc->sc_dev, sc->sc_miibus); 416 417 for (i = 0; i < GEM_NRXDESC; i++) 418 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 419 bus_dmamap_destroy(sc->sc_rdmatag, 420 sc->sc_rxsoft[i].rxs_dmamap); 421 for (i = 0; i < GEM_TXQUEUELEN; i++) 422 if (sc->sc_txsoft[i].txs_dmamap != NULL) 423 bus_dmamap_destroy(sc->sc_tdmatag, 424 sc->sc_txsoft[i].txs_dmamap); 425 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 426 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 427 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 428 sc->sc_cddmamap); 429 bus_dma_tag_destroy(sc->sc_cdmatag); 430 bus_dma_tag_destroy(sc->sc_tdmatag); 431 bus_dma_tag_destroy(sc->sc_rdmatag); 432 bus_dma_tag_destroy(sc->sc_pdmatag); 433 } 434 435 void 436 gem_suspend(struct gem_softc *sc) 437 { 438 struct ifnet *ifp = sc->sc_ifp; 439 440 GEM_LOCK(sc); 441 gem_stop(ifp, 0); 442 GEM_UNLOCK(sc); 443 } 444 445 void 446 gem_resume(struct gem_softc *sc) 447 { 448 struct ifnet *ifp = sc->sc_ifp; 449 450 GEM_LOCK(sc); 451 /* 452 * On resume all registers have to be initialized again like 453 * after power-on. 454 */ 455 sc->sc_flags &= ~GEM_INITED; 456 if (ifp->if_flags & IFF_UP) 457 gem_init_locked(sc); 458 GEM_UNLOCK(sc); 459 } 460 461 static inline void 462 gem_rxcksum(struct mbuf *m, uint64_t flags) 463 { 464 struct ether_header *eh; 465 struct ip *ip; 466 struct udphdr *uh; 467 uint16_t *opts; 468 int32_t hlen, len, pktlen; 469 uint32_t temp32; 470 uint16_t cksum; 471 472 pktlen = m->m_pkthdr.len; 473 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 474 return; 475 eh = mtod(m, struct ether_header *); 476 if (eh->ether_type != htons(ETHERTYPE_IP)) 477 return; 478 ip = (struct ip *)(eh + 1); 479 if (ip->ip_v != IPVERSION) 480 return; 481 482 hlen = ip->ip_hl << 2; 483 pktlen -= sizeof(struct ether_header); 484 if (hlen < sizeof(struct ip)) 485 return; 486 if (ntohs(ip->ip_len) < hlen) 487 return; 488 if (ntohs(ip->ip_len) != pktlen) 489 return; 490 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 491 return; /* Cannot handle fragmented packet. */ 492 493 switch (ip->ip_p) { 494 case IPPROTO_TCP: 495 if (pktlen < (hlen + sizeof(struct tcphdr))) 496 return; 497 break; 498 case IPPROTO_UDP: 499 if (pktlen < (hlen + sizeof(struct udphdr))) 500 return; 501 uh = (struct udphdr *)((uint8_t *)ip + hlen); 502 if (uh->uh_sum == 0) 503 return; /* no checksum */ 504 break; 505 default: 506 return; 507 } 508 509 cksum = ~(flags & GEM_RD_CHECKSUM); 510 /* checksum fixup for IP options */ 511 len = hlen - sizeof(struct ip); 512 if (len > 0) { 513 opts = (uint16_t *)(ip + 1); 514 for (; len > 0; len -= sizeof(uint16_t), opts++) { 515 temp32 = cksum - *opts; 516 temp32 = (temp32 >> 16) + (temp32 & 65535); 517 cksum = temp32 & 65535; 518 } 519 } 520 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 521 m->m_pkthdr.csum_data = cksum; 522 } 523 524 static void 525 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 526 { 527 struct gem_softc *sc = xsc; 528 529 if (error != 0) 530 return; 531 if (nsegs != 1) 532 panic("%s: bad control buffer segment count", __func__); 533 sc->sc_cddma = segs[0].ds_addr; 534 } 535 536 static void 537 gem_tick(void *arg) 538 { 539 struct gem_softc *sc = arg; 540 struct ifnet *ifp = sc->sc_ifp; 541 uint32_t v; 542 543 GEM_LOCK_ASSERT(sc, MA_OWNED); 544 545 /* 546 * Unload collision and error counters. 547 */ 548 ifp->if_collisions += 549 GEM_BANK1_READ_4(sc, GEM_MAC_NORM_COLL_CNT) + 550 GEM_BANK1_READ_4(sc, GEM_MAC_FIRST_COLL_CNT); 551 v = GEM_BANK1_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) + 552 GEM_BANK1_READ_4(sc, GEM_MAC_LATE_COLL_CNT); 553 ifp->if_collisions += v; 554 ifp->if_oerrors += v; 555 ifp->if_ierrors += 556 GEM_BANK1_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) + 557 GEM_BANK1_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) + 558 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) + 559 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CODE_VIOL); 560 561 /* 562 * Then clear the hardware counters. 563 */ 564 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 565 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 566 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 567 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 568 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 569 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 570 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 571 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 572 573 mii_tick(sc->sc_mii); 574 575 if (gem_watchdog(sc) == EJUSTRETURN) 576 return; 577 578 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 579 } 580 581 static int 582 gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr, 583 uint32_t set) 584 { 585 int i; 586 uint32_t reg; 587 588 for (i = GEM_TRIES; i--; DELAY(100)) { 589 reg = GEM_BANKN_READ_M(bank, 4, sc, r); 590 if ((reg & clr) == 0 && (reg & set) == set) 591 return (1); 592 } 593 return (0); 594 } 595 596 static void 597 gem_reset(struct gem_softc *sc) 598 { 599 600 #ifdef GEM_DEBUG 601 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 602 #endif 603 gem_reset_rx(sc); 604 gem_reset_tx(sc); 605 606 /* Do a full reset. */ 607 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 608 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 609 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 610 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 611 device_printf(sc->sc_dev, "cannot reset device\n"); 612 } 613 614 static void 615 gem_rxdrain(struct gem_softc *sc) 616 { 617 struct gem_rxsoft *rxs; 618 int i; 619 620 for (i = 0; i < GEM_NRXDESC; i++) { 621 rxs = &sc->sc_rxsoft[i]; 622 if (rxs->rxs_mbuf != NULL) { 623 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 624 BUS_DMASYNC_POSTREAD); 625 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 626 m_freem(rxs->rxs_mbuf); 627 rxs->rxs_mbuf = NULL; 628 } 629 } 630 } 631 632 static void 633 gem_stop(struct ifnet *ifp, int disable) 634 { 635 struct gem_softc *sc = ifp->if_softc; 636 struct gem_txsoft *txs; 637 638 #ifdef GEM_DEBUG 639 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 640 #endif 641 642 callout_stop(&sc->sc_tick_ch); 643 #ifdef GEM_RINT_TIMEOUT 644 callout_stop(&sc->sc_rx_ch); 645 #endif 646 647 gem_reset_tx(sc); 648 gem_reset_rx(sc); 649 650 /* 651 * Release any queued transmit buffers. 652 */ 653 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 654 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 655 if (txs->txs_ndescs != 0) { 656 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 657 BUS_DMASYNC_POSTWRITE); 658 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 659 if (txs->txs_mbuf != NULL) { 660 m_freem(txs->txs_mbuf); 661 txs->txs_mbuf = NULL; 662 } 663 } 664 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 665 } 666 667 if (disable) 668 gem_rxdrain(sc); 669 670 /* 671 * Mark the interface down and cancel the watchdog timer. 672 */ 673 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 674 sc->sc_flags &= ~GEM_LINK; 675 sc->sc_wdog_timer = 0; 676 } 677 678 static int 679 gem_reset_rx(struct gem_softc *sc) 680 { 681 682 /* 683 * Resetting while DMA is in progress can cause a bus hang, so we 684 * disable DMA first. 685 */ 686 gem_disable_rx(sc); 687 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 0); 688 GEM_BANK1_BARRIER(sc, GEM_RX_CONFIG, 4, 689 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 690 if (!GEM_BANK1_BITWAIT(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 691 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 692 693 /* Finally, reset the ERX. */ 694 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX); 695 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 696 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 697 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 698 0)) { 699 device_printf(sc->sc_dev, "cannot reset receiver\n"); 700 return (1); 701 } 702 return (0); 703 } 704 705 /* 706 * Reset the receiver DMA engine. 707 * 708 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 709 * etc in order to reset the receiver DMA engine only and not do a full 710 * reset which amongst others also downs the link and clears the FIFOs. 711 */ 712 static void 713 gem_reset_rxdma(struct gem_softc *sc) 714 { 715 int i; 716 717 if (gem_reset_rx(sc) != 0) 718 return (gem_init_locked(sc)); 719 for (i = 0; i < GEM_NRXDESC; i++) 720 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 721 GEM_UPDATE_RXDESC(sc, i); 722 sc->sc_rxptr = 0; 723 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 724 725 /* NOTE: we use only 32-bit DMA addresses here. */ 726 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 727 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 728 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 729 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 730 gem_ringsize(GEM_NRXDESC /* XXX */) | 731 ((ETHER_HDR_LEN + sizeof(struct ip)) << 732 GEM_RX_CONFIG_CXM_START_SHFT) | 733 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 734 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT)); 735 /* Adjust for the SBus clock probably isn't worth the fuzz. */ 736 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING, 737 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) << 738 GEM_RX_BLANKING_TIME_SHIFT) | 6); 739 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 740 (3 * sc->sc_rxfifosize / 256) | 741 ((sc->sc_rxfifosize / 256) << 12)); 742 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 743 GEM_BANK1_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN); 744 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK, 745 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 746 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 747 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE); 748 } 749 750 static int 751 gem_reset_tx(struct gem_softc *sc) 752 { 753 754 /* 755 * Resetting while DMA is in progress can cause a bus hang, so we 756 * disable DMA first. 757 */ 758 gem_disable_tx(sc); 759 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, 0); 760 GEM_BANK1_BARRIER(sc, GEM_TX_CONFIG, 4, 761 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 762 if (!GEM_BANK1_BITWAIT(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 763 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 764 765 /* Finally, reset the ETX. */ 766 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_TX); 767 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 768 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 769 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 770 0)) { 771 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 772 return (1); 773 } 774 return (0); 775 } 776 777 static int 778 gem_disable_rx(struct gem_softc *sc) 779 { 780 781 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 782 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE); 783 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 784 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 785 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 786 0)); 787 } 788 789 static int 790 gem_disable_tx(struct gem_softc *sc) 791 { 792 793 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 794 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE); 795 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 796 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 797 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 798 0)); 799 } 800 801 static int 802 gem_meminit(struct gem_softc *sc) 803 { 804 struct gem_rxsoft *rxs; 805 int error, i; 806 807 GEM_LOCK_ASSERT(sc, MA_OWNED); 808 809 /* 810 * Initialize the transmit descriptor ring. 811 */ 812 for (i = 0; i < GEM_NTXDESC; i++) { 813 sc->sc_txdescs[i].gd_flags = 0; 814 sc->sc_txdescs[i].gd_addr = 0; 815 } 816 sc->sc_txfree = GEM_MAXTXFREE; 817 sc->sc_txnext = 0; 818 sc->sc_txwin = 0; 819 820 /* 821 * Initialize the receive descriptor and receive job 822 * descriptor rings. 823 */ 824 for (i = 0; i < GEM_NRXDESC; i++) { 825 rxs = &sc->sc_rxsoft[i]; 826 if (rxs->rxs_mbuf == NULL) { 827 if ((error = gem_add_rxbuf(sc, i)) != 0) { 828 device_printf(sc->sc_dev, 829 "unable to allocate or map RX buffer %d, " 830 "error = %d\n", i, error); 831 /* 832 * XXX we should attempt to run with fewer 833 * receive buffers instead of just failing. 834 */ 835 gem_rxdrain(sc); 836 return (1); 837 } 838 } else 839 GEM_INIT_RXDESC(sc, i); 840 } 841 sc->sc_rxptr = 0; 842 843 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 844 845 return (0); 846 } 847 848 static u_int 849 gem_ringsize(u_int sz) 850 { 851 852 switch (sz) { 853 case 32: 854 return (GEM_RING_SZ_32); 855 case 64: 856 return (GEM_RING_SZ_64); 857 case 128: 858 return (GEM_RING_SZ_128); 859 case 256: 860 return (GEM_RING_SZ_256); 861 case 512: 862 return (GEM_RING_SZ_512); 863 case 1024: 864 return (GEM_RING_SZ_1024); 865 case 2048: 866 return (GEM_RING_SZ_2048); 867 case 4096: 868 return (GEM_RING_SZ_4096); 869 case 8192: 870 return (GEM_RING_SZ_8192); 871 default: 872 printf("%s: invalid ring size %d\n", __func__, sz); 873 return (GEM_RING_SZ_32); 874 } 875 } 876 877 static void 878 gem_init(void *xsc) 879 { 880 struct gem_softc *sc = xsc; 881 882 GEM_LOCK(sc); 883 gem_init_locked(sc); 884 GEM_UNLOCK(sc); 885 } 886 887 /* 888 * Initialization of interface; set up initialization block 889 * and transmit/receive descriptor rings. 890 */ 891 static void 892 gem_init_locked(struct gem_softc *sc) 893 { 894 struct ifnet *ifp = sc->sc_ifp; 895 uint32_t v; 896 897 GEM_LOCK_ASSERT(sc, MA_OWNED); 898 899 #ifdef GEM_DEBUG 900 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 901 __func__); 902 #endif 903 /* 904 * Initialization sequence. The numbered steps below correspond 905 * to the sequence outlined in section 6.3.5.1 in the Ethernet 906 * Channel Engine manual (part of the PCIO manual). 907 * See also the STP2002-STQ document from Sun Microsystems. 908 */ 909 910 /* step 1 & 2. Reset the Ethernet Channel. */ 911 gem_stop(ifp, 0); 912 gem_reset(sc); 913 #ifdef GEM_DEBUG 914 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 915 __func__); 916 #endif 917 918 /* Re-initialize the MIF. */ 919 gem_mifinit(sc); 920 921 /* step 3. Setup data structures in host memory. */ 922 if (gem_meminit(sc) != 0) 923 return; 924 925 /* step 4. TX MAC registers & counters */ 926 gem_init_regs(sc); 927 928 /* step 5. RX MAC registers & counters */ 929 gem_setladrf(sc); 930 931 /* step 6 & 7. Program Descriptor Ring Base Addresses. */ 932 /* NOTE: we use only 32-bit DMA addresses here. */ 933 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0); 934 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 935 936 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 937 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 938 #ifdef GEM_DEBUG 939 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx", 940 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 941 #endif 942 943 /* step 8. Global Configuration & Interrupt Mask */ 944 945 /* 946 * Set the internal arbitration to "infinite" bursts of the 947 * maximum length of 31 * 64 bytes so DMA transfers aren't 948 * split up in cache line size chunks. This greatly improves 949 * RX performance. 950 * Enable silicon bug workarounds for the Apple variants. 951 */ 952 GEM_BANK1_WRITE_4(sc, GEM_CONFIG, 953 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 954 ((sc->sc_flags & GEM_PCI) != 0 ? GEM_CONFIG_BURST_INF : 955 GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ? 956 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 957 958 GEM_BANK1_WRITE_4(sc, GEM_INTMASK, 959 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 960 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 961 GEM_INTR_BERR 962 #ifdef GEM_DEBUG 963 | GEM_INTR_PCS | GEM_INTR_MIF 964 #endif 965 )); 966 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK, 967 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 968 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK, 969 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP | 970 GEM_MAC_TX_PEAK_EXP); 971 #ifdef GEM_DEBUG 972 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 973 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 974 #else 975 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 976 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 977 #endif 978 979 /* step 9. ETX Configuration: use mostly default values. */ 980 981 /* Enable DMA. */ 982 v = gem_ringsize(GEM_NTXDESC); 983 /* Set TX FIFO threshold and enable DMA. */ 984 v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x4ff) << 10) & 985 GEM_TX_CONFIG_TXFIFO_TH; 986 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN); 987 988 /* step 10. ERX Configuration */ 989 990 /* Encode Receive Descriptor ring size. */ 991 v = gem_ringsize(GEM_NRXDESC /* XXX */); 992 /* RX TCP/UDP checksum offset */ 993 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 994 GEM_RX_CONFIG_CXM_START_SHFT); 995 /* Set RX FIFO threshold, set first byte offset and enable DMA. */ 996 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 997 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 998 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) | 999 GEM_RX_CONFIG_RXDMA_EN); 1000 1001 /* Adjust for the SBus clock probably isn't worth the fuzz. */ 1002 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING, 1003 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) << 1004 GEM_RX_BLANKING_TIME_SHIFT) | 6); 1005 1006 /* 1007 * The following value is for an OFF Threshold of about 3/4 full 1008 * and an ON Threshold of 1/4 full. 1009 */ 1010 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 1011 (3 * sc->sc_rxfifosize / 256) | 1012 ((sc->sc_rxfifosize / 256) << 12)); 1013 1014 /* step 11. Configure Media. */ 1015 1016 /* step 12. RX_MAC Configuration Register */ 1017 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 1018 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC; 1019 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0); 1020 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 1021 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1022 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1023 device_printf(sc->sc_dev, "cannot configure RX MAC\n"); 1024 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 1025 1026 /* step 13. TX_MAC Configuration Register */ 1027 v = GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG); 1028 v |= GEM_MAC_TX_ENABLE; 1029 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0); 1030 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 1031 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1032 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 1033 device_printf(sc->sc_dev, "cannot configure TX MAC\n"); 1034 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, v); 1035 1036 /* step 14. Issue Transmit Pending command. */ 1037 1038 /* step 15. Give the reciever a swift kick. */ 1039 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 1040 1041 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1042 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1043 1044 mii_mediachg(sc->sc_mii); 1045 1046 /* Start the one second timer. */ 1047 sc->sc_wdog_timer = 0; 1048 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1049 } 1050 1051 static int 1052 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head) 1053 { 1054 bus_dma_segment_t txsegs[GEM_NTXSEGS]; 1055 struct gem_txsoft *txs; 1056 struct ip *ip; 1057 struct mbuf *m; 1058 uint64_t cflags, flags; 1059 int error, nexttx, nsegs, offset, seg; 1060 1061 GEM_LOCK_ASSERT(sc, MA_OWNED); 1062 1063 /* Get a work queue entry. */ 1064 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1065 /* Ran out of descriptors. */ 1066 return (ENOBUFS); 1067 } 1068 1069 cflags = 0; 1070 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) { 1071 if (M_WRITABLE(*m_head) == 0) { 1072 m = m_dup(*m_head, M_DONTWAIT); 1073 m_freem(*m_head); 1074 *m_head = m; 1075 if (m == NULL) 1076 return (ENOBUFS); 1077 } 1078 offset = sizeof(struct ether_header); 1079 m = m_pullup(*m_head, offset + sizeof(struct ip)); 1080 if (m == NULL) { 1081 *m_head = NULL; 1082 return (ENOBUFS); 1083 } 1084 ip = (struct ip *)(mtod(m, caddr_t) + offset); 1085 offset += (ip->ip_hl << 2); 1086 cflags = offset << GEM_TD_CXSUM_STARTSHFT | 1087 ((offset + m->m_pkthdr.csum_data) << 1088 GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE; 1089 *m_head = m; 1090 } 1091 1092 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1093 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1094 if (error == EFBIG) { 1095 m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS); 1096 if (m == NULL) { 1097 m_freem(*m_head); 1098 *m_head = NULL; 1099 return (ENOBUFS); 1100 } 1101 *m_head = m; 1102 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1103 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1104 BUS_DMA_NOWAIT); 1105 if (error != 0) { 1106 m_freem(*m_head); 1107 *m_head = NULL; 1108 return (error); 1109 } 1110 } else if (error != 0) 1111 return (error); 1112 /* If nsegs is wrong then the stack is corrupt. */ 1113 KASSERT(nsegs <= GEM_NTXSEGS, 1114 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1115 if (nsegs == 0) { 1116 m_freem(*m_head); 1117 *m_head = NULL; 1118 return (EIO); 1119 } 1120 1121 /* 1122 * Ensure we have enough descriptors free to describe 1123 * the packet. Note, we always reserve one descriptor 1124 * at the end of the ring as a termination point, in 1125 * order to prevent wrap-around. 1126 */ 1127 if (nsegs > sc->sc_txfree - 1) { 1128 txs->txs_ndescs = 0; 1129 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1130 return (ENOBUFS); 1131 } 1132 1133 txs->txs_ndescs = nsegs; 1134 txs->txs_firstdesc = sc->sc_txnext; 1135 nexttx = txs->txs_firstdesc; 1136 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 1137 #ifdef GEM_DEBUG 1138 CTR6(KTR_GEM, 1139 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1140 __func__, seg, nexttx, txsegs[seg].ds_len, 1141 txsegs[seg].ds_addr, 1142 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr)); 1143 #endif 1144 sc->sc_txdescs[nexttx].gd_addr = 1145 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr); 1146 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 1147 ("%s: segment size too large!", __func__)); 1148 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1149 sc->sc_txdescs[nexttx].gd_flags = 1150 GEM_DMA_WRITE(sc, flags | cflags); 1151 txs->txs_lastdesc = nexttx; 1152 } 1153 1154 /* Set EOP on the last descriptor. */ 1155 #ifdef GEM_DEBUG 1156 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d", 1157 __func__, seg, nexttx); 1158 #endif 1159 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1160 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET); 1161 1162 /* Lastly set SOP on the first descriptor. */ 1163 #ifdef GEM_DEBUG 1164 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d", 1165 __func__, seg, nexttx); 1166 #endif 1167 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1168 sc->sc_txwin = 0; 1169 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1170 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME | 1171 GEM_TD_START_OF_PACKET); 1172 } else 1173 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1174 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET); 1175 1176 /* Sync the DMA map. */ 1177 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1178 BUS_DMASYNC_PREWRITE); 1179 1180 #ifdef GEM_DEBUG 1181 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1182 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1183 txs->txs_ndescs); 1184 #endif 1185 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1186 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1187 txs->txs_mbuf = *m_head; 1188 1189 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1190 sc->sc_txfree -= txs->txs_ndescs; 1191 1192 return (0); 1193 } 1194 1195 static void 1196 gem_init_regs(struct gem_softc *sc) 1197 { 1198 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1199 1200 GEM_LOCK_ASSERT(sc, MA_OWNED); 1201 1202 /* These registers are not cleared on reset. */ 1203 if ((sc->sc_flags & GEM_INITED) == 0) { 1204 /* magic values */ 1205 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG0, 0); 1206 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8); 1207 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4); 1208 1209 /* min frame length */ 1210 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1211 /* max frame length and max burst size */ 1212 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME, 1213 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1214 1215 /* more magic values */ 1216 GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7); 1217 GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4); 1218 GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1219 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8088); 1220 1221 /* random number seed */ 1222 GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED, 1223 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1224 1225 /* secondary MAC address: 0:0:0:0:0:0 */ 1226 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR3, 0); 1227 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR4, 0); 1228 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR5, 0); 1229 1230 /* MAC control address: 01:80:c2:00:00:01 */ 1231 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001); 1232 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200); 1233 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180); 1234 1235 /* MAC filter address: 0:0:0:0:0:0 */ 1236 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0); 1237 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0); 1238 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0); 1239 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0); 1240 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0); 1241 1242 sc->sc_flags |= GEM_INITED; 1243 } 1244 1245 /* Counters need to be zeroed. */ 1246 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 1247 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 1248 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 1249 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 1250 GEM_BANK1_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0); 1251 GEM_BANK1_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0); 1252 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0); 1253 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 1254 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 1255 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 1256 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 1257 1258 /* Set XOFF PAUSE time. */ 1259 GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1260 1261 /* Set the station address. */ 1262 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); 1263 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); 1264 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); 1265 1266 /* Enable MII outputs. */ 1267 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA); 1268 } 1269 1270 static void 1271 gem_start(struct ifnet *ifp) 1272 { 1273 struct gem_softc *sc = ifp->if_softc; 1274 1275 GEM_LOCK(sc); 1276 gem_start_locked(ifp); 1277 GEM_UNLOCK(sc); 1278 } 1279 1280 static inline void 1281 gem_txkick(struct gem_softc *sc) 1282 { 1283 1284 /* 1285 * Update the TX kick register. This register has to point to the 1286 * descriptor after the last valid one and for optimum performance 1287 * should be incremented in multiples of 4 (the DMA engine fetches/ 1288 * updates descriptors in batches of 4). 1289 */ 1290 #ifdef GEM_DEBUG 1291 CTR3(KTR_GEM, "%s: %s: kicking TX %d", 1292 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1293 #endif 1294 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1295 GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext); 1296 } 1297 1298 static void 1299 gem_start_locked(struct ifnet *ifp) 1300 { 1301 struct gem_softc *sc = ifp->if_softc; 1302 struct mbuf *m; 1303 int kicked, ntx; 1304 1305 GEM_LOCK_ASSERT(sc, MA_OWNED); 1306 1307 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1308 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 1309 return; 1310 1311 #ifdef GEM_DEBUG 1312 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 1313 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1314 sc->sc_txnext); 1315 #endif 1316 ntx = 0; 1317 kicked = 0; 1318 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1319 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1320 if (m == NULL) 1321 break; 1322 if (gem_load_txmbuf(sc, &m) != 0) { 1323 if (m == NULL) 1324 break; 1325 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1326 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1327 break; 1328 } 1329 if ((sc->sc_txnext % 4) == 0) { 1330 gem_txkick(sc); 1331 kicked = 1; 1332 } else 1333 kicked = 0; 1334 ntx++; 1335 BPF_MTAP(ifp, m); 1336 } 1337 1338 if (ntx > 0) { 1339 if (kicked == 0) 1340 gem_txkick(sc); 1341 #ifdef GEM_DEBUG 1342 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1343 device_get_name(sc->sc_dev), sc->sc_txnext); 1344 #endif 1345 1346 /* Set a watchdog timer in case the chip flakes out. */ 1347 sc->sc_wdog_timer = 5; 1348 #ifdef GEM_DEBUG 1349 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1350 device_get_name(sc->sc_dev), __func__, 1351 sc->sc_wdog_timer); 1352 #endif 1353 } 1354 } 1355 1356 static void 1357 gem_tint(struct gem_softc *sc) 1358 { 1359 struct ifnet *ifp = sc->sc_ifp; 1360 struct gem_txsoft *txs; 1361 int progress; 1362 uint32_t txlast; 1363 #ifdef GEM_DEBUG 1364 int i; 1365 1366 GEM_LOCK_ASSERT(sc, MA_OWNED); 1367 1368 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1369 #endif 1370 1371 /* 1372 * Go through our TX list and free mbufs for those 1373 * frames that have been transmitted. 1374 */ 1375 progress = 0; 1376 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1377 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1378 #ifdef GEM_DEBUG 1379 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1380 printf(" txsoft %p transmit chain:\n", txs); 1381 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1382 printf("descriptor %d: ", i); 1383 printf("gd_flags: 0x%016llx\t", 1384 (long long)GEM_DMA_READ(sc, 1385 sc->sc_txdescs[i].gd_flags)); 1386 printf("gd_addr: 0x%016llx\n", 1387 (long long)GEM_DMA_READ(sc, 1388 sc->sc_txdescs[i].gd_addr)); 1389 if (i == txs->txs_lastdesc) 1390 break; 1391 } 1392 } 1393 #endif 1394 1395 /* 1396 * In theory, we could harvest some descriptors before 1397 * the ring is empty, but that's a bit complicated. 1398 * 1399 * GEM_TX_COMPLETION points to the last descriptor 1400 * processed + 1. 1401 */ 1402 txlast = GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION); 1403 #ifdef GEM_DEBUG 1404 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 1405 "txs->txs_lastdesc = %d, txlast = %d", 1406 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1407 #endif 1408 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1409 if ((txlast >= txs->txs_firstdesc) && 1410 (txlast <= txs->txs_lastdesc)) 1411 break; 1412 } else { 1413 /* Ick -- this command wraps. */ 1414 if ((txlast >= txs->txs_firstdesc) || 1415 (txlast <= txs->txs_lastdesc)) 1416 break; 1417 } 1418 1419 #ifdef GEM_DEBUG 1420 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__); 1421 #endif 1422 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1423 1424 sc->sc_txfree += txs->txs_ndescs; 1425 1426 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1427 BUS_DMASYNC_POSTWRITE); 1428 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1429 if (txs->txs_mbuf != NULL) { 1430 m_freem(txs->txs_mbuf); 1431 txs->txs_mbuf = NULL; 1432 } 1433 1434 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1435 1436 ifp->if_opackets++; 1437 progress = 1; 1438 } 1439 1440 #ifdef GEM_DEBUG 1441 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx " 1442 "GEM_TX_COMPLETION %x", 1443 __func__, GEM_BANK1_READ_4(sc, GEM_TX_STATE_MACHINE), 1444 ((long long)GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) | 1445 GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_LO), 1446 GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION)); 1447 #endif 1448 1449 if (progress) { 1450 if (sc->sc_txfree == GEM_NTXDESC - 1) 1451 sc->sc_txwin = 0; 1452 1453 /* 1454 * We freed some descriptors, so reset IFF_DRV_OACTIVE 1455 * and restart. 1456 */ 1457 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1458 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1459 sc->sc_wdog_timer = 0; 1460 gem_start_locked(ifp); 1461 } 1462 1463 #ifdef GEM_DEBUG 1464 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1465 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1466 #endif 1467 } 1468 1469 #ifdef GEM_RINT_TIMEOUT 1470 static void 1471 gem_rint_timeout(void *arg) 1472 { 1473 struct gem_softc *sc = arg; 1474 1475 GEM_LOCK_ASSERT(sc, MA_OWNED); 1476 1477 gem_rint(sc); 1478 } 1479 #endif 1480 1481 static void 1482 gem_rint(struct gem_softc *sc) 1483 { 1484 struct ifnet *ifp = sc->sc_ifp; 1485 struct mbuf *m; 1486 uint64_t rxstat; 1487 uint32_t rxcomp; 1488 1489 GEM_LOCK_ASSERT(sc, MA_OWNED); 1490 1491 #ifdef GEM_RINT_TIMEOUT 1492 callout_stop(&sc->sc_rx_ch); 1493 #endif 1494 #ifdef GEM_DEBUG 1495 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1496 #endif 1497 1498 /* 1499 * Read the completion register once. This limits 1500 * how long the following loop can execute. 1501 */ 1502 rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION); 1503 #ifdef GEM_DEBUG 1504 CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d", 1505 __func__, sc->sc_rxptr, rxcomp); 1506 #endif 1507 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1508 for (; sc->sc_rxptr != rxcomp;) { 1509 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1510 rxstat = GEM_DMA_READ(sc, 1511 sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 1512 1513 if (rxstat & GEM_RD_OWN) { 1514 #ifdef GEM_RINT_TIMEOUT 1515 /* 1516 * The descriptor is still marked as owned, although 1517 * it is supposed to have completed. This has been 1518 * observed on some machines. Just exiting here 1519 * might leave the packet sitting around until another 1520 * one arrives to trigger a new interrupt, which is 1521 * generally undesirable, so set up a timeout. 1522 */ 1523 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1524 gem_rint_timeout, sc); 1525 #endif 1526 m = NULL; 1527 goto kickit; 1528 } 1529 1530 if (rxstat & GEM_RD_BAD_CRC) { 1531 ifp->if_ierrors++; 1532 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1533 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1534 m = NULL; 1535 goto kickit; 1536 } 1537 1538 #ifdef GEM_DEBUG 1539 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1540 printf(" rxsoft %p descriptor %d: ", 1541 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 1542 printf("gd_flags: 0x%016llx\t", 1543 (long long)GEM_DMA_READ(sc, 1544 sc->sc_rxdescs[sc->sc_rxptr].gd_flags)); 1545 printf("gd_addr: 0x%016llx\n", 1546 (long long)GEM_DMA_READ(sc, 1547 sc->sc_rxdescs[sc->sc_rxptr].gd_addr)); 1548 } 1549 #endif 1550 1551 /* 1552 * Allocate a new mbuf cluster. If that fails, we are 1553 * out of memory, and must drop the packet and recycle 1554 * the buffer that's already attached to this descriptor. 1555 */ 1556 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 1557 ifp->if_ierrors++; 1558 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1559 m = NULL; 1560 } 1561 1562 kickit: 1563 /* 1564 * Update the RX kick register. This register has to point 1565 * to the descriptor after the last valid one (before the 1566 * current batch) and for optimum performance should be 1567 * incremented in multiples of 4 (the DMA engine fetches/ 1568 * updates descriptors in batches of 4). 1569 */ 1570 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 1571 if ((sc->sc_rxptr % 4) == 0) { 1572 GEM_CDSYNC(sc, 1573 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1574 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, 1575 (sc->sc_rxptr + GEM_NRXDESC - 4) & 1576 GEM_NRXDESC_MASK); 1577 } 1578 1579 if (m == NULL) { 1580 if (rxstat & GEM_RD_OWN) 1581 break; 1582 continue; 1583 } 1584 1585 ifp->if_ipackets++; 1586 m->m_data += ETHER_ALIGN; /* first byte offset */ 1587 m->m_pkthdr.rcvif = ifp; 1588 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 1589 1590 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1591 gem_rxcksum(m, rxstat); 1592 1593 /* Pass it on. */ 1594 GEM_UNLOCK(sc); 1595 (*ifp->if_input)(ifp, m); 1596 GEM_LOCK(sc); 1597 } 1598 1599 #ifdef GEM_DEBUG 1600 CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__, 1601 sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION)); 1602 #endif 1603 } 1604 1605 static int 1606 gem_add_rxbuf(struct gem_softc *sc, int idx) 1607 { 1608 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1609 struct mbuf *m; 1610 bus_dma_segment_t segs[1]; 1611 int error, nsegs; 1612 1613 GEM_LOCK_ASSERT(sc, MA_OWNED); 1614 1615 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1616 if (m == NULL) 1617 return (ENOBUFS); 1618 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1619 1620 #ifdef GEM_DEBUG 1621 /* Bzero the packet to check DMA. */ 1622 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1623 #endif 1624 1625 if (rxs->rxs_mbuf != NULL) { 1626 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1627 BUS_DMASYNC_POSTREAD); 1628 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1629 } 1630 1631 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1632 m, segs, &nsegs, BUS_DMA_NOWAIT); 1633 if (error != 0) { 1634 device_printf(sc->sc_dev, 1635 "cannot load RS DMA map %d, error = %d\n", idx, error); 1636 m_freem(m); 1637 return (error); 1638 } 1639 /* If nsegs is wrong then the stack is corrupt. */ 1640 KASSERT(nsegs == 1, 1641 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1642 rxs->rxs_mbuf = m; 1643 rxs->rxs_paddr = segs[0].ds_addr; 1644 1645 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1646 BUS_DMASYNC_PREREAD); 1647 1648 GEM_INIT_RXDESC(sc, idx); 1649 1650 return (0); 1651 } 1652 1653 static void 1654 gem_eint(struct gem_softc *sc, u_int status) 1655 { 1656 1657 sc->sc_ifp->if_ierrors++; 1658 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1659 gem_reset_rxdma(sc); 1660 return; 1661 } 1662 1663 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status); 1664 if ((status & GEM_INTR_BERR) != 0) { 1665 if ((sc->sc_flags & GEM_PCI) != 0) 1666 printf(", PCI bus error 0x%x\n", 1667 GEM_BANK1_READ_4(sc, GEM_PCI_ERROR_STATUS)); 1668 else 1669 printf(", SBus error 0x%x\n", 1670 GEM_BANK1_READ_4(sc, GEM_SBUS_STATUS)); 1671 } 1672 } 1673 1674 void 1675 gem_intr(void *v) 1676 { 1677 struct gem_softc *sc = v; 1678 uint32_t status, status2; 1679 1680 GEM_LOCK(sc); 1681 status = GEM_BANK1_READ_4(sc, GEM_STATUS); 1682 1683 #ifdef GEM_DEBUG 1684 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 1685 device_get_name(sc->sc_dev), __func__, 1686 (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status); 1687 1688 /* 1689 * PCS interrupts must be cleared, otherwise no traffic is passed! 1690 */ 1691 if ((status & GEM_INTR_PCS) != 0) { 1692 status2 = 1693 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS) | 1694 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS); 1695 if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 1696 device_printf(sc->sc_dev, 1697 "%s: PCS link status changed\n", __func__); 1698 } 1699 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1700 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_STATUS); 1701 if ((status2 & GEM_MAC_PAUSED) != 0) 1702 device_printf(sc->sc_dev, 1703 "%s: PAUSE received (PAUSE time %d slots)\n", 1704 __func__, GEM_MAC_PAUSE_TIME(status2)); 1705 if ((status2 & GEM_MAC_PAUSE) != 0) 1706 device_printf(sc->sc_dev, 1707 "%s: transited to PAUSE state\n", __func__); 1708 if ((status2 & GEM_MAC_RESUME) != 0) 1709 device_printf(sc->sc_dev, 1710 "%s: transited to non-PAUSE state\n", __func__); 1711 } 1712 if ((status & GEM_INTR_MIF) != 0) 1713 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1714 #endif 1715 1716 if (__predict_false(status & 1717 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 1718 gem_eint(sc, status); 1719 1720 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1721 gem_rint(sc); 1722 1723 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1724 gem_tint(sc); 1725 1726 if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) { 1727 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS); 1728 if ((status2 & 1729 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP | 1730 GEM_MAC_TX_PEAK_EXP)) != 0) 1731 device_printf(sc->sc_dev, 1732 "MAC TX fault, status %x\n", status2); 1733 if ((status2 & 1734 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) { 1735 sc->sc_ifp->if_oerrors++; 1736 gem_init_locked(sc); 1737 } 1738 } 1739 if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) { 1740 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS); 1741 /* 1742 * At least with GEM_SUN_GEM and some GEM_SUN_ERI 1743 * revisions GEM_MAC_RX_OVERFLOW happen often due to a 1744 * silicon bug so handle them silently. Moreover, it's 1745 * likely that the receiver has hung so we reset it. 1746 */ 1747 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) { 1748 sc->sc_ifp->if_ierrors++; 1749 gem_reset_rxdma(sc); 1750 } else if ((status2 & 1751 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0) 1752 device_printf(sc->sc_dev, 1753 "MAC RX fault, status %x\n", status2); 1754 } 1755 GEM_UNLOCK(sc); 1756 } 1757 1758 static int 1759 gem_watchdog(struct gem_softc *sc) 1760 { 1761 struct ifnet *ifp = sc->sc_ifp; 1762 1763 GEM_LOCK_ASSERT(sc, MA_OWNED); 1764 1765 #ifdef GEM_DEBUG 1766 CTR4(KTR_GEM, 1767 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x", 1768 __func__, GEM_BANK1_READ_4(sc, GEM_RX_CONFIG), 1769 GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS), 1770 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG)); 1771 CTR4(KTR_GEM, 1772 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x", 1773 __func__, GEM_BANK1_READ_4(sc, GEM_TX_CONFIG), 1774 GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS), 1775 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG)); 1776 #endif 1777 1778 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1779 return (0); 1780 1781 if ((sc->sc_flags & GEM_LINK) != 0) 1782 device_printf(sc->sc_dev, "device timeout\n"); 1783 else if (bootverbose) 1784 device_printf(sc->sc_dev, "device timeout (no link)\n"); 1785 ++ifp->if_oerrors; 1786 1787 /* Try to get more packets going. */ 1788 gem_init_locked(sc); 1789 gem_start_locked(ifp); 1790 return (EJUSTRETURN); 1791 } 1792 1793 static void 1794 gem_mifinit(struct gem_softc *sc) 1795 { 1796 1797 /* Configure the MIF in frame mode. */ 1798 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, 1799 GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1800 } 1801 1802 /* 1803 * MII interface 1804 * 1805 * The MII interface supports at least three different operating modes: 1806 * 1807 * Bitbang mode is implemented using data, clock and output enable registers. 1808 * 1809 * Frame mode is implemented by loading a complete frame into the frame 1810 * register and polling the valid bit for completion. 1811 * 1812 * Polling mode uses the frame register but completion is indicated by 1813 * an interrupt. 1814 * 1815 */ 1816 int 1817 gem_mii_readreg(device_t dev, int phy, int reg) 1818 { 1819 struct gem_softc *sc; 1820 int n; 1821 uint32_t v; 1822 1823 #ifdef GEM_DEBUG_PHY 1824 printf("%s: phy %d reg %d\n", __func__, phy, reg); 1825 #endif 1826 1827 sc = device_get_softc(dev); 1828 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1829 return (0); 1830 1831 if ((sc->sc_flags & GEM_SERDES) != 0) { 1832 switch (reg) { 1833 case MII_BMCR: 1834 reg = GEM_MII_CONTROL; 1835 break; 1836 case MII_BMSR: 1837 reg = GEM_MII_STATUS; 1838 break; 1839 case MII_PHYIDR1: 1840 case MII_PHYIDR2: 1841 return (0); 1842 case MII_ANAR: 1843 reg = GEM_MII_ANAR; 1844 break; 1845 case MII_ANLPAR: 1846 reg = GEM_MII_ANLPAR; 1847 break; 1848 case MII_EXTSR: 1849 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 1850 default: 1851 device_printf(sc->sc_dev, 1852 "%s: unhandled register %d\n", __func__, reg); 1853 return (0); 1854 } 1855 return (GEM_BANK1_READ_4(sc, reg)); 1856 } 1857 1858 /* Construct the frame command. */ 1859 v = GEM_MIF_FRAME_READ | 1860 (phy << GEM_MIF_PHY_SHIFT) | 1861 (reg << GEM_MIF_REG_SHIFT); 1862 1863 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v); 1864 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4, 1865 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1866 for (n = 0; n < 100; n++) { 1867 DELAY(1); 1868 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME); 1869 if (v & GEM_MIF_FRAME_TA0) 1870 return (v & GEM_MIF_FRAME_DATA); 1871 } 1872 1873 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1874 return (0); 1875 } 1876 1877 int 1878 gem_mii_writereg(device_t dev, int phy, int reg, int val) 1879 { 1880 struct gem_softc *sc; 1881 int n; 1882 uint32_t v; 1883 1884 #ifdef GEM_DEBUG_PHY 1885 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 1886 #endif 1887 1888 sc = device_get_softc(dev); 1889 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1890 return (0); 1891 1892 if ((sc->sc_flags & GEM_SERDES) != 0) { 1893 switch (reg) { 1894 case MII_BMSR: 1895 reg = GEM_MII_STATUS; 1896 break; 1897 case MII_BMCR: 1898 reg = GEM_MII_CONTROL; 1899 if ((val & GEM_MII_CONTROL_RESET) == 0) 1900 break; 1901 GEM_BANK1_WRITE_4(sc, GEM_MII_CONTROL, val); 1902 GEM_BANK1_BARRIER(sc, GEM_MII_CONTROL, 4, 1903 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1904 if (!GEM_BANK1_BITWAIT(sc, GEM_MII_CONTROL, 1905 GEM_MII_CONTROL_RESET, 0)) 1906 device_printf(sc->sc_dev, 1907 "cannot reset PCS\n"); 1908 /* FALLTHROUGH */ 1909 case MII_ANAR: 1910 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 0); 1911 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4, 1912 BUS_SPACE_BARRIER_WRITE); 1913 GEM_BANK1_WRITE_4(sc, GEM_MII_ANAR, val); 1914 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 1915 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 1916 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 1917 GEM_MII_CONFIG_ENABLE); 1918 return (0); 1919 case MII_ANLPAR: 1920 reg = GEM_MII_ANLPAR; 1921 break; 1922 default: 1923 device_printf(sc->sc_dev, 1924 "%s: unhandled register %d\n", __func__, reg); 1925 return (0); 1926 } 1927 GEM_BANK1_WRITE_4(sc, reg, val); 1928 return (0); 1929 } 1930 1931 /* Construct the frame command. */ 1932 v = GEM_MIF_FRAME_WRITE | 1933 (phy << GEM_MIF_PHY_SHIFT) | 1934 (reg << GEM_MIF_REG_SHIFT) | 1935 (val & GEM_MIF_FRAME_DATA); 1936 1937 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v); 1938 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4, 1939 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1940 for (n = 0; n < 100; n++) { 1941 DELAY(1); 1942 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME); 1943 if (v & GEM_MIF_FRAME_TA0) 1944 return (1); 1945 } 1946 1947 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1948 return (0); 1949 } 1950 1951 void 1952 gem_mii_statchg(device_t dev) 1953 { 1954 struct gem_softc *sc; 1955 int gigabit; 1956 uint32_t rxcfg, txcfg, v; 1957 1958 sc = device_get_softc(dev); 1959 1960 GEM_LOCK_ASSERT(sc, MA_OWNED); 1961 1962 #ifdef GEM_DEBUG 1963 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) 1964 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n", 1965 __func__, sc->sc_phyad); 1966 #endif 1967 1968 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 1969 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 1970 sc->sc_flags |= GEM_LINK; 1971 else 1972 sc->sc_flags &= ~GEM_LINK; 1973 1974 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 1975 case IFM_1000_SX: 1976 case IFM_1000_LX: 1977 case IFM_1000_CX: 1978 case IFM_1000_T: 1979 gigabit = 1; 1980 break; 1981 default: 1982 gigabit = 0; 1983 } 1984 1985 /* 1986 * The configuration done here corresponds to the steps F) and 1987 * G) and as far as enabling of RX and TX MAC goes also step H) 1988 * of the initialization sequence outlined in section 3.2.1 of 1989 * the GEM Gigabit Ethernet ASIC Specification. 1990 */ 1991 1992 rxcfg = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 1993 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 1994 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 1995 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1996 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 1997 else if (gigabit != 0) { 1998 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 1999 txcfg |= GEM_MAC_TX_CARR_EXTEND; 2000 } 2001 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0); 2002 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 2003 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2004 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 2005 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 2006 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg); 2007 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0); 2008 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 2009 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2010 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 2011 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 2012 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg); 2013 2014 v = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_CONFIG) & 2015 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 2016 #ifdef notyet 2017 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2018 IFM_ETH_RXPAUSE) != 0) 2019 v |= GEM_MAC_CC_RX_PAUSE; 2020 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2021 IFM_ETH_TXPAUSE) != 0) 2022 v |= GEM_MAC_CC_TX_PAUSE; 2023 #endif 2024 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v); 2025 2026 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 2027 gigabit != 0) 2028 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME, 2029 GEM_MAC_SLOT_TIME_CARR_EXTEND); 2030 else 2031 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME, 2032 GEM_MAC_SLOT_TIME_NORMAL); 2033 2034 /* XIF Configuration */ 2035 v = GEM_MAC_XIF_LINK_LED; 2036 v |= GEM_MAC_XIF_TX_MII_ENA; 2037 if ((sc->sc_flags & GEM_SERDES) == 0) { 2038 if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & 2039 GEM_MIF_CONFIG_PHY_SEL) != 0) { 2040 /* External MII needs echo disable if half duplex. */ 2041 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2042 IFM_FDX) == 0) 2043 v |= GEM_MAC_XIF_ECHO_DISABL; 2044 } else 2045 /* 2046 * Internal MII needs buffer enable. 2047 * XXX buffer enable makes only sense for an 2048 * external PHY. 2049 */ 2050 v |= GEM_MAC_XIF_MII_BUF_ENA; 2051 } 2052 if (gigabit != 0) 2053 v |= GEM_MAC_XIF_GMII_MODE; 2054 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2055 v |= GEM_MAC_XIF_FDPLX_LED; 2056 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v); 2057 2058 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2059 (sc->sc_flags & GEM_LINK) != 0) { 2060 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 2061 txcfg | GEM_MAC_TX_ENABLE); 2062 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 2063 rxcfg | GEM_MAC_RX_ENABLE); 2064 } 2065 } 2066 2067 int 2068 gem_mediachange(struct ifnet *ifp) 2069 { 2070 struct gem_softc *sc = ifp->if_softc; 2071 int error; 2072 2073 /* XXX add support for serial media. */ 2074 2075 GEM_LOCK(sc); 2076 error = mii_mediachg(sc->sc_mii); 2077 GEM_UNLOCK(sc); 2078 return (error); 2079 } 2080 2081 void 2082 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2083 { 2084 struct gem_softc *sc = ifp->if_softc; 2085 2086 GEM_LOCK(sc); 2087 if ((ifp->if_flags & IFF_UP) == 0) { 2088 GEM_UNLOCK(sc); 2089 return; 2090 } 2091 2092 mii_pollstat(sc->sc_mii); 2093 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2094 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2095 GEM_UNLOCK(sc); 2096 } 2097 2098 static int 2099 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2100 { 2101 struct gem_softc *sc = ifp->if_softc; 2102 struct ifreq *ifr = (struct ifreq *)data; 2103 int error; 2104 2105 error = 0; 2106 switch (cmd) { 2107 case SIOCSIFFLAGS: 2108 GEM_LOCK(sc); 2109 if ((ifp->if_flags & IFF_UP) != 0) { 2110 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2111 ((ifp->if_flags ^ sc->sc_ifflags) & 2112 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2113 gem_setladrf(sc); 2114 else 2115 gem_init_locked(sc); 2116 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2117 gem_stop(ifp, 0); 2118 if ((ifp->if_flags & IFF_LINK0) != 0) 2119 sc->sc_csum_features |= CSUM_UDP; 2120 else 2121 sc->sc_csum_features &= ~CSUM_UDP; 2122 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2123 ifp->if_hwassist = sc->sc_csum_features; 2124 sc->sc_ifflags = ifp->if_flags; 2125 GEM_UNLOCK(sc); 2126 break; 2127 case SIOCADDMULTI: 2128 case SIOCDELMULTI: 2129 GEM_LOCK(sc); 2130 gem_setladrf(sc); 2131 GEM_UNLOCK(sc); 2132 break; 2133 case SIOCGIFMEDIA: 2134 case SIOCSIFMEDIA: 2135 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2136 break; 2137 case SIOCSIFCAP: 2138 GEM_LOCK(sc); 2139 ifp->if_capenable = ifr->ifr_reqcap; 2140 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2141 ifp->if_hwassist = sc->sc_csum_features; 2142 else 2143 ifp->if_hwassist = 0; 2144 GEM_UNLOCK(sc); 2145 break; 2146 default: 2147 error = ether_ioctl(ifp, cmd, data); 2148 break; 2149 } 2150 2151 return (error); 2152 } 2153 2154 static void 2155 gem_setladrf(struct gem_softc *sc) 2156 { 2157 struct ifnet *ifp = sc->sc_ifp; 2158 struct ifmultiaddr *inm; 2159 int i; 2160 uint32_t hash[16]; 2161 uint32_t crc, v; 2162 2163 GEM_LOCK_ASSERT(sc, MA_OWNED); 2164 2165 /* Get the current RX configuration. */ 2166 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 2167 2168 /* 2169 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2170 * and hash filter. Depending on the case, the right bit will be 2171 * enabled. 2172 */ 2173 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER | 2174 GEM_MAC_RX_PROMISC_GRP); 2175 2176 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 2177 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 2178 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2179 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 2180 0)) 2181 device_printf(sc->sc_dev, "cannot disable RX hash filter\n"); 2182 2183 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2184 v |= GEM_MAC_RX_PROMISCUOUS; 2185 goto chipit; 2186 } 2187 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2188 v |= GEM_MAC_RX_PROMISC_GRP; 2189 goto chipit; 2190 } 2191 2192 /* 2193 * Set up multicast address filter by passing all multicast 2194 * addresses through a crc generator, and then using the high 2195 * order 8 bits as an index into the 256 bit logical address 2196 * filter. The high order 4 bits selects the word, while the 2197 * other 4 bits select the bit within the word (where bit 0 2198 * is the MSB). 2199 */ 2200 2201 /* Clear the hash table. */ 2202 memset(hash, 0, sizeof(hash)); 2203 2204 if_maddr_rlock(ifp); 2205 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 2206 if (inm->ifma_addr->sa_family != AF_LINK) 2207 continue; 2208 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2209 inm->ifma_addr), ETHER_ADDR_LEN); 2210 2211 /* We just want the 8 most significant bits. */ 2212 crc >>= 24; 2213 2214 /* Set the corresponding bit in the filter. */ 2215 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2216 } 2217 if_maddr_runlock(ifp); 2218 2219 v |= GEM_MAC_RX_HASH_FILTER; 2220 2221 /* Now load the hash table into the chip (if we are using it). */ 2222 for (i = 0; i < 16; i++) 2223 GEM_BANK1_WRITE_4(sc, 2224 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), 2225 hash[i]); 2226 2227 chipit: 2228 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 2229 } 2230