1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 36 */ 37 38 #if 0 39 #define GEM_DEBUG 40 #endif 41 42 #if 0 /* XXX: In case of emergency, re-enable this. */ 43 #define GEM_RINT_TIMEOUT 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/endian.h> 51 #include <sys/mbuf.h> 52 #include <sys/malloc.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/socket.h> 58 #include <sys/sockio.h> 59 #include <sys/rman.h> 60 61 #include <net/bpf.h> 62 #include <net/ethernet.h> 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_vlan_var.h> 69 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/tcp.h> 74 #include <netinet/udp.h> 75 76 #include <machine/bus.h> 77 78 #include <dev/mii/mii.h> 79 #include <dev/mii/miivar.h> 80 81 #include <dev/gem/if_gemreg.h> 82 #include <dev/gem/if_gemvar.h> 83 84 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 85 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 86 87 #define GEM_TRIES 10000 88 89 /* 90 * The hardware supports basic TCP/UDP checksum offloading. However, 91 * the hardware doesn't compensate the checksum for UDP datagram which 92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 93 * by default. It can be reactivated by setting special link option 94 * link0 with ifconfig(8). 95 */ 96 #define GEM_CSUM_FEATURES (CSUM_TCP) 97 98 static int gem_add_rxbuf(struct gem_softc *sc, int idx); 99 static int gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, 100 uint32_t clr, uint32_t set); 101 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, 102 int nsegs, int error); 103 static int gem_disable_rx(struct gem_softc *sc); 104 static int gem_disable_tx(struct gem_softc *sc); 105 static void gem_eint(struct gem_softc *sc, u_int status); 106 static void gem_init(void *xsc); 107 static void gem_init_locked(struct gem_softc *sc); 108 static void gem_init_regs(struct gem_softc *sc); 109 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 110 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head); 111 static int gem_meminit(struct gem_softc *sc); 112 static void gem_mifinit(struct gem_softc *sc); 113 static void gem_reset(struct gem_softc *sc); 114 static int gem_reset_rx(struct gem_softc *sc); 115 static void gem_reset_rxdma(struct gem_softc *sc); 116 static int gem_reset_tx(struct gem_softc *sc); 117 static u_int gem_ringsize(u_int sz); 118 static void gem_rint(struct gem_softc *sc); 119 #ifdef GEM_RINT_TIMEOUT 120 static void gem_rint_timeout(void *arg); 121 #endif 122 static inline void gem_rxcksum(struct mbuf *m, uint64_t flags); 123 static void gem_rxdrain(struct gem_softc *sc); 124 static void gem_setladrf(struct gem_softc *sc); 125 static void gem_start(struct ifnet *ifp); 126 static void gem_start_locked(struct ifnet *ifp); 127 static void gem_stop(struct ifnet *ifp, int disable); 128 static void gem_tick(void *arg); 129 static void gem_tint(struct gem_softc *sc); 130 static inline void gem_txkick(struct gem_softc *sc); 131 static int gem_watchdog(struct gem_softc *sc); 132 133 devclass_t gem_devclass; 134 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 135 MODULE_DEPEND(gem, miibus, 1, 1, 1); 136 137 #ifdef GEM_DEBUG 138 #include <sys/ktr.h> 139 #define KTR_GEM KTR_SPARE2 140 #endif 141 142 #define GEM_BANK1_BITWAIT(sc, r, clr, set) \ 143 gem_bitwait((sc), GEM_RES_BANK1, (r), (clr), (set)) 144 #define GEM_BANK2_BITWAIT(sc, r, clr, set) \ 145 gem_bitwait((sc), GEM_RES_BANK2, (r), (clr), (set)) 146 147 int 148 gem_attach(struct gem_softc *sc) 149 { 150 struct gem_txsoft *txs; 151 struct ifnet *ifp; 152 int error, i, phy; 153 uint32_t v; 154 155 if (bootverbose) 156 device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags); 157 158 /* Set up ifnet structure. */ 159 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 160 if (ifp == NULL) 161 return (ENOSPC); 162 sc->sc_csum_features = GEM_CSUM_FEATURES; 163 ifp->if_softc = sc; 164 if_initname(ifp, device_get_name(sc->sc_dev), 165 device_get_unit(sc->sc_dev)); 166 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 167 ifp->if_start = gem_start; 168 ifp->if_ioctl = gem_ioctl; 169 ifp->if_init = gem_init; 170 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN); 171 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN; 172 IFQ_SET_READY(&ifp->if_snd); 173 174 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 175 #ifdef GEM_RINT_TIMEOUT 176 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 177 #endif 178 179 /* Make sure the chip is stopped. */ 180 gem_reset(sc); 181 182 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 183 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 184 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, 185 NULL, &sc->sc_pdmatag); 186 if (error != 0) 187 goto fail_ifnet; 188 189 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 190 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 191 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 192 if (error != 0) 193 goto fail_ptag; 194 195 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 196 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 197 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 198 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 199 if (error != 0) 200 goto fail_rtag; 201 202 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 203 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 204 sizeof(struct gem_control_data), 1, 205 sizeof(struct gem_control_data), 0, 206 NULL, NULL, &sc->sc_cdmatag); 207 if (error != 0) 208 goto fail_ttag; 209 210 /* 211 * Allocate the control data structures, create and load the 212 * DMA map for it. 213 */ 214 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 215 (void **)&sc->sc_control_data, 216 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 217 &sc->sc_cddmamap)) != 0) { 218 device_printf(sc->sc_dev, 219 "unable to allocate control data, error = %d\n", error); 220 goto fail_ctag; 221 } 222 223 sc->sc_cddma = 0; 224 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 225 sc->sc_control_data, sizeof(struct gem_control_data), 226 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 227 device_printf(sc->sc_dev, 228 "unable to load control data DMA map, error = %d\n", 229 error); 230 goto fail_cmem; 231 } 232 233 /* 234 * Initialize the transmit job descriptors. 235 */ 236 STAILQ_INIT(&sc->sc_txfreeq); 237 STAILQ_INIT(&sc->sc_txdirtyq); 238 239 /* 240 * Create the transmit buffer DMA maps. 241 */ 242 error = ENOMEM; 243 for (i = 0; i < GEM_TXQUEUELEN; i++) { 244 txs = &sc->sc_txsoft[i]; 245 txs->txs_mbuf = NULL; 246 txs->txs_ndescs = 0; 247 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 248 &txs->txs_dmamap)) != 0) { 249 device_printf(sc->sc_dev, 250 "unable to create TX DMA map %d, error = %d\n", 251 i, error); 252 goto fail_txd; 253 } 254 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 255 } 256 257 /* 258 * Create the receive buffer DMA maps. 259 */ 260 for (i = 0; i < GEM_NRXDESC; i++) { 261 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 262 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 263 device_printf(sc->sc_dev, 264 "unable to create RX DMA map %d, error = %d\n", 265 i, error); 266 goto fail_rxd; 267 } 268 sc->sc_rxsoft[i].rxs_mbuf = NULL; 269 } 270 271 /* Bypass probing PHYs if we already know for sure to use a SERDES. */ 272 if ((sc->sc_flags & GEM_SERDES) != 0) 273 goto serdes; 274 275 /* Bad things will happen when touching this register on ERI. */ 276 if (sc->sc_variant != GEM_SUN_ERI) { 277 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE, 278 GEM_MII_DATAPATH_MII); 279 GEM_BANK1_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4, 280 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 281 } 282 283 gem_mifinit(sc); 284 285 /* 286 * Look for an external PHY. 287 */ 288 error = ENXIO; 289 v = GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG); 290 if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 291 v |= GEM_MIF_CONFIG_PHY_SEL; 292 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v); 293 GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4, 294 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 295 switch (sc->sc_variant) { 296 case GEM_SUN_ERI: 297 phy = GEM_PHYAD_EXTERNAL; 298 break; 299 default: 300 phy = MII_PHY_ANY; 301 break; 302 } 303 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 304 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy, 305 MII_OFFSET_ANY, MIIF_DOPAUSE); 306 } 307 308 /* 309 * Fall back on an internal PHY if no external PHY was found. 310 * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be 311 * trusted when the firmware has powered down the chip. 312 */ 313 if (error != 0 && 314 ((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) { 315 v &= ~GEM_MIF_CONFIG_PHY_SEL; 316 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v); 317 GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4, 318 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 319 switch (sc->sc_variant) { 320 case GEM_SUN_ERI: 321 case GEM_APPLE_K2_GMAC: 322 phy = GEM_PHYAD_INTERNAL; 323 break; 324 case GEM_APPLE_GMAC: 325 phy = GEM_PHYAD_EXTERNAL; 326 break; 327 default: 328 phy = MII_PHY_ANY; 329 break; 330 } 331 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 332 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy, 333 MII_OFFSET_ANY, MIIF_DOPAUSE); 334 } 335 336 /* 337 * Try the external PCS SERDES if we didn't find any PHYs. 338 */ 339 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 340 serdes: 341 GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE, 342 GEM_MII_DATAPATH_SERDES); 343 GEM_BANK1_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4, 344 BUS_SPACE_BARRIER_WRITE); 345 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 346 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 347 GEM_BANK1_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4, 348 BUS_SPACE_BARRIER_WRITE); 349 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 350 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4, 351 BUS_SPACE_BARRIER_WRITE); 352 sc->sc_flags |= GEM_SERDES; 353 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 354 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, 355 GEM_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE); 356 } 357 if (error != 0) { 358 device_printf(sc->sc_dev, "attaching PHYs failed\n"); 359 goto fail_rxd; 360 } 361 sc->sc_mii = device_get_softc(sc->sc_miibus); 362 363 /* 364 * From this point forward, the attachment cannot fail. A failure 365 * before this point releases all resources that may have been 366 * allocated. 367 */ 368 369 /* Get RX FIFO size. */ 370 sc->sc_rxfifosize = 64 * 371 GEM_BANK1_READ_4(sc, GEM_RX_FIFO_SIZE); 372 373 /* Get TX FIFO size. */ 374 v = GEM_BANK1_READ_4(sc, GEM_TX_FIFO_SIZE); 375 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 376 sc->sc_rxfifosize / 1024, v / 16); 377 378 /* Attach the interface. */ 379 ether_ifattach(ifp, sc->sc_enaddr); 380 381 /* 382 * Tell the upper layer(s) we support long frames/checksum offloads. 383 */ 384 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 385 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 386 ifp->if_hwassist |= sc->sc_csum_features; 387 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 388 389 return (0); 390 391 /* 392 * Free any resources we've allocated during the failed attach 393 * attempt. Do this in reverse order and fall through. 394 */ 395 fail_rxd: 396 for (i = 0; i < GEM_NRXDESC; i++) 397 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 398 bus_dmamap_destroy(sc->sc_rdmatag, 399 sc->sc_rxsoft[i].rxs_dmamap); 400 fail_txd: 401 for (i = 0; i < GEM_TXQUEUELEN; i++) 402 if (sc->sc_txsoft[i].txs_dmamap != NULL) 403 bus_dmamap_destroy(sc->sc_tdmatag, 404 sc->sc_txsoft[i].txs_dmamap); 405 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 406 fail_cmem: 407 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 408 sc->sc_cddmamap); 409 fail_ctag: 410 bus_dma_tag_destroy(sc->sc_cdmatag); 411 fail_ttag: 412 bus_dma_tag_destroy(sc->sc_tdmatag); 413 fail_rtag: 414 bus_dma_tag_destroy(sc->sc_rdmatag); 415 fail_ptag: 416 bus_dma_tag_destroy(sc->sc_pdmatag); 417 fail_ifnet: 418 if_free(ifp); 419 return (error); 420 } 421 422 void 423 gem_detach(struct gem_softc *sc) 424 { 425 struct ifnet *ifp = sc->sc_ifp; 426 int i; 427 428 ether_ifdetach(ifp); 429 GEM_LOCK(sc); 430 gem_stop(ifp, 1); 431 GEM_UNLOCK(sc); 432 callout_drain(&sc->sc_tick_ch); 433 #ifdef GEM_RINT_TIMEOUT 434 callout_drain(&sc->sc_rx_ch); 435 #endif 436 if_free(ifp); 437 device_delete_child(sc->sc_dev, sc->sc_miibus); 438 439 for (i = 0; i < GEM_NRXDESC; i++) 440 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 441 bus_dmamap_destroy(sc->sc_rdmatag, 442 sc->sc_rxsoft[i].rxs_dmamap); 443 for (i = 0; i < GEM_TXQUEUELEN; i++) 444 if (sc->sc_txsoft[i].txs_dmamap != NULL) 445 bus_dmamap_destroy(sc->sc_tdmatag, 446 sc->sc_txsoft[i].txs_dmamap); 447 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 448 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 449 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 450 sc->sc_cddmamap); 451 bus_dma_tag_destroy(sc->sc_cdmatag); 452 bus_dma_tag_destroy(sc->sc_tdmatag); 453 bus_dma_tag_destroy(sc->sc_rdmatag); 454 bus_dma_tag_destroy(sc->sc_pdmatag); 455 } 456 457 void 458 gem_suspend(struct gem_softc *sc) 459 { 460 struct ifnet *ifp = sc->sc_ifp; 461 462 GEM_LOCK(sc); 463 gem_stop(ifp, 0); 464 GEM_UNLOCK(sc); 465 } 466 467 void 468 gem_resume(struct gem_softc *sc) 469 { 470 struct ifnet *ifp = sc->sc_ifp; 471 472 GEM_LOCK(sc); 473 /* 474 * On resume all registers have to be initialized again like 475 * after power-on. 476 */ 477 sc->sc_flags &= ~GEM_INITED; 478 if (ifp->if_flags & IFF_UP) 479 gem_init_locked(sc); 480 GEM_UNLOCK(sc); 481 } 482 483 static inline void 484 gem_rxcksum(struct mbuf *m, uint64_t flags) 485 { 486 struct ether_header *eh; 487 struct ip *ip; 488 struct udphdr *uh; 489 uint16_t *opts; 490 int32_t hlen, len, pktlen; 491 uint32_t temp32; 492 uint16_t cksum; 493 494 pktlen = m->m_pkthdr.len; 495 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 496 return; 497 eh = mtod(m, struct ether_header *); 498 if (eh->ether_type != htons(ETHERTYPE_IP)) 499 return; 500 ip = (struct ip *)(eh + 1); 501 if (ip->ip_v != IPVERSION) 502 return; 503 504 hlen = ip->ip_hl << 2; 505 pktlen -= sizeof(struct ether_header); 506 if (hlen < sizeof(struct ip)) 507 return; 508 if (ntohs(ip->ip_len) < hlen) 509 return; 510 if (ntohs(ip->ip_len) != pktlen) 511 return; 512 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 513 return; /* Cannot handle fragmented packet. */ 514 515 switch (ip->ip_p) { 516 case IPPROTO_TCP: 517 if (pktlen < (hlen + sizeof(struct tcphdr))) 518 return; 519 break; 520 case IPPROTO_UDP: 521 if (pktlen < (hlen + sizeof(struct udphdr))) 522 return; 523 uh = (struct udphdr *)((uint8_t *)ip + hlen); 524 if (uh->uh_sum == 0) 525 return; /* no checksum */ 526 break; 527 default: 528 return; 529 } 530 531 cksum = ~(flags & GEM_RD_CHECKSUM); 532 /* checksum fixup for IP options */ 533 len = hlen - sizeof(struct ip); 534 if (len > 0) { 535 opts = (uint16_t *)(ip + 1); 536 for (; len > 0; len -= sizeof(uint16_t), opts++) { 537 temp32 = cksum - *opts; 538 temp32 = (temp32 >> 16) + (temp32 & 65535); 539 cksum = temp32 & 65535; 540 } 541 } 542 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 543 m->m_pkthdr.csum_data = cksum; 544 } 545 546 static void 547 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 548 { 549 struct gem_softc *sc = xsc; 550 551 if (error != 0) 552 return; 553 if (nsegs != 1) 554 panic("%s: bad control buffer segment count", __func__); 555 sc->sc_cddma = segs[0].ds_addr; 556 } 557 558 static void 559 gem_tick(void *arg) 560 { 561 struct gem_softc *sc = arg; 562 struct ifnet *ifp = sc->sc_ifp; 563 uint32_t v; 564 565 GEM_LOCK_ASSERT(sc, MA_OWNED); 566 567 /* 568 * Unload collision and error counters. 569 */ 570 ifp->if_collisions += 571 GEM_BANK1_READ_4(sc, GEM_MAC_NORM_COLL_CNT) + 572 GEM_BANK1_READ_4(sc, GEM_MAC_FIRST_COLL_CNT); 573 v = GEM_BANK1_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) + 574 GEM_BANK1_READ_4(sc, GEM_MAC_LATE_COLL_CNT); 575 ifp->if_collisions += v; 576 ifp->if_oerrors += v; 577 ifp->if_ierrors += 578 GEM_BANK1_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) + 579 GEM_BANK1_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) + 580 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) + 581 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CODE_VIOL); 582 583 /* 584 * Then clear the hardware counters. 585 */ 586 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 587 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 588 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 589 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 590 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 591 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 592 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 593 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 594 595 mii_tick(sc->sc_mii); 596 597 if (gem_watchdog(sc) == EJUSTRETURN) 598 return; 599 600 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 601 } 602 603 static int 604 gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr, 605 uint32_t set) 606 { 607 int i; 608 uint32_t reg; 609 610 for (i = GEM_TRIES; i--; DELAY(100)) { 611 reg = GEM_BANKN_READ_M(bank, 4, sc, r); 612 if ((reg & clr) == 0 && (reg & set) == set) 613 return (1); 614 } 615 return (0); 616 } 617 618 static void 619 gem_reset(struct gem_softc *sc) 620 { 621 622 #ifdef GEM_DEBUG 623 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 624 #endif 625 gem_reset_rx(sc); 626 gem_reset_tx(sc); 627 628 /* Do a full reset. */ 629 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 630 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 631 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 632 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 633 device_printf(sc->sc_dev, "cannot reset device\n"); 634 } 635 636 static void 637 gem_rxdrain(struct gem_softc *sc) 638 { 639 struct gem_rxsoft *rxs; 640 int i; 641 642 for (i = 0; i < GEM_NRXDESC; i++) { 643 rxs = &sc->sc_rxsoft[i]; 644 if (rxs->rxs_mbuf != NULL) { 645 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 646 BUS_DMASYNC_POSTREAD); 647 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 648 m_freem(rxs->rxs_mbuf); 649 rxs->rxs_mbuf = NULL; 650 } 651 } 652 } 653 654 static void 655 gem_stop(struct ifnet *ifp, int disable) 656 { 657 struct gem_softc *sc = ifp->if_softc; 658 struct gem_txsoft *txs; 659 660 #ifdef GEM_DEBUG 661 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 662 #endif 663 664 callout_stop(&sc->sc_tick_ch); 665 #ifdef GEM_RINT_TIMEOUT 666 callout_stop(&sc->sc_rx_ch); 667 #endif 668 669 gem_reset_tx(sc); 670 gem_reset_rx(sc); 671 672 /* 673 * Release any queued transmit buffers. 674 */ 675 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 676 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 677 if (txs->txs_ndescs != 0) { 678 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 679 BUS_DMASYNC_POSTWRITE); 680 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 681 if (txs->txs_mbuf != NULL) { 682 m_freem(txs->txs_mbuf); 683 txs->txs_mbuf = NULL; 684 } 685 } 686 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 687 } 688 689 if (disable) 690 gem_rxdrain(sc); 691 692 /* 693 * Mark the interface down and cancel the watchdog timer. 694 */ 695 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 696 sc->sc_flags &= ~GEM_LINK; 697 sc->sc_wdog_timer = 0; 698 } 699 700 static int 701 gem_reset_rx(struct gem_softc *sc) 702 { 703 704 /* 705 * Resetting while DMA is in progress can cause a bus hang, so we 706 * disable DMA first. 707 */ 708 gem_disable_rx(sc); 709 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 0); 710 GEM_BANK1_BARRIER(sc, GEM_RX_CONFIG, 4, 711 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 712 if (!GEM_BANK1_BITWAIT(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 713 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 714 715 /* Wait 5ms extra. */ 716 DELAY(5000); 717 718 /* Finally, reset the ERX. */ 719 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX); 720 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 721 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 722 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 723 0)) { 724 device_printf(sc->sc_dev, "cannot reset receiver\n"); 725 return (1); 726 } 727 return (0); 728 } 729 730 /* 731 * Reset the receiver DMA engine. 732 * 733 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 734 * etc in order to reset the receiver DMA engine only and not do a full 735 * reset which amongst others also downs the link and clears the FIFOs. 736 */ 737 static void 738 gem_reset_rxdma(struct gem_softc *sc) 739 { 740 int i; 741 742 if (gem_reset_rx(sc) != 0) { 743 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 744 return (gem_init_locked(sc)); 745 } 746 for (i = 0; i < GEM_NRXDESC; i++) 747 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 748 GEM_UPDATE_RXDESC(sc, i); 749 sc->sc_rxptr = 0; 750 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 751 752 /* NOTE: we use only 32-bit DMA addresses here. */ 753 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 754 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 755 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 756 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 757 gem_ringsize(GEM_NRXDESC /* XXX */) | 758 ((ETHER_HDR_LEN + sizeof(struct ip)) << 759 GEM_RX_CONFIG_CXM_START_SHFT) | 760 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 761 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT)); 762 /* Adjust for the SBus clock probably isn't worth the fuzz. */ 763 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING, 764 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) << 765 GEM_RX_BLANKING_TIME_SHIFT) | 6); 766 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 767 (3 * sc->sc_rxfifosize / 256) | 768 ((sc->sc_rxfifosize / 256) << 12)); 769 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 770 GEM_BANK1_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN); 771 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK, 772 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 773 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 774 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE); 775 } 776 777 static int 778 gem_reset_tx(struct gem_softc *sc) 779 { 780 781 /* 782 * Resetting while DMA is in progress can cause a bus hang, so we 783 * disable DMA first. 784 */ 785 gem_disable_tx(sc); 786 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, 0); 787 GEM_BANK1_BARRIER(sc, GEM_TX_CONFIG, 4, 788 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 789 if (!GEM_BANK1_BITWAIT(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 790 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 791 792 /* Wait 5ms extra. */ 793 DELAY(5000); 794 795 /* Finally, reset the ETX. */ 796 GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_TX); 797 GEM_BANK2_BARRIER(sc, GEM_RESET, 4, 798 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 799 if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 800 0)) { 801 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 802 return (1); 803 } 804 return (0); 805 } 806 807 static int 808 gem_disable_rx(struct gem_softc *sc) 809 { 810 811 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 812 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE); 813 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 814 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 815 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 816 0)); 817 } 818 819 static int 820 gem_disable_tx(struct gem_softc *sc) 821 { 822 823 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 824 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE); 825 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 826 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 827 return (GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 828 0)); 829 } 830 831 static int 832 gem_meminit(struct gem_softc *sc) 833 { 834 struct gem_rxsoft *rxs; 835 int error, i; 836 837 GEM_LOCK_ASSERT(sc, MA_OWNED); 838 839 /* 840 * Initialize the transmit descriptor ring. 841 */ 842 for (i = 0; i < GEM_NTXDESC; i++) { 843 sc->sc_txdescs[i].gd_flags = 0; 844 sc->sc_txdescs[i].gd_addr = 0; 845 } 846 sc->sc_txfree = GEM_MAXTXFREE; 847 sc->sc_txnext = 0; 848 sc->sc_txwin = 0; 849 850 /* 851 * Initialize the receive descriptor and receive job 852 * descriptor rings. 853 */ 854 for (i = 0; i < GEM_NRXDESC; i++) { 855 rxs = &sc->sc_rxsoft[i]; 856 if (rxs->rxs_mbuf == NULL) { 857 if ((error = gem_add_rxbuf(sc, i)) != 0) { 858 device_printf(sc->sc_dev, 859 "unable to allocate or map RX buffer %d, " 860 "error = %d\n", i, error); 861 /* 862 * XXX we should attempt to run with fewer 863 * receive buffers instead of just failing. 864 */ 865 gem_rxdrain(sc); 866 return (1); 867 } 868 } else 869 GEM_INIT_RXDESC(sc, i); 870 } 871 sc->sc_rxptr = 0; 872 873 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 874 875 return (0); 876 } 877 878 static u_int 879 gem_ringsize(u_int sz) 880 { 881 882 switch (sz) { 883 case 32: 884 return (GEM_RING_SZ_32); 885 case 64: 886 return (GEM_RING_SZ_64); 887 case 128: 888 return (GEM_RING_SZ_128); 889 case 256: 890 return (GEM_RING_SZ_256); 891 case 512: 892 return (GEM_RING_SZ_512); 893 case 1024: 894 return (GEM_RING_SZ_1024); 895 case 2048: 896 return (GEM_RING_SZ_2048); 897 case 4096: 898 return (GEM_RING_SZ_4096); 899 case 8192: 900 return (GEM_RING_SZ_8192); 901 default: 902 printf("%s: invalid ring size %d\n", __func__, sz); 903 return (GEM_RING_SZ_32); 904 } 905 } 906 907 static void 908 gem_init(void *xsc) 909 { 910 struct gem_softc *sc = xsc; 911 912 GEM_LOCK(sc); 913 gem_init_locked(sc); 914 GEM_UNLOCK(sc); 915 } 916 917 /* 918 * Initialization of interface; set up initialization block 919 * and transmit/receive descriptor rings. 920 */ 921 static void 922 gem_init_locked(struct gem_softc *sc) 923 { 924 struct ifnet *ifp = sc->sc_ifp; 925 uint32_t v; 926 927 GEM_LOCK_ASSERT(sc, MA_OWNED); 928 929 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 930 return; 931 932 #ifdef GEM_DEBUG 933 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 934 __func__); 935 #endif 936 /* 937 * Initialization sequence. The numbered steps below correspond 938 * to the sequence outlined in section 6.3.5.1 in the Ethernet 939 * Channel Engine manual (part of the PCIO manual). 940 * See also the STP2002-STQ document from Sun Microsystems. 941 */ 942 943 /* step 1 & 2. Reset the Ethernet Channel. */ 944 gem_stop(ifp, 0); 945 gem_reset(sc); 946 #ifdef GEM_DEBUG 947 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 948 __func__); 949 #endif 950 951 if ((sc->sc_flags & GEM_SERDES) == 0) 952 /* Re-initialize the MIF. */ 953 gem_mifinit(sc); 954 955 /* step 3. Setup data structures in host memory. */ 956 if (gem_meminit(sc) != 0) 957 return; 958 959 /* step 4. TX MAC registers & counters */ 960 gem_init_regs(sc); 961 962 /* step 5. RX MAC registers & counters */ 963 gem_setladrf(sc); 964 965 /* step 6 & 7. Program Descriptor Ring Base Addresses. */ 966 /* NOTE: we use only 32-bit DMA addresses here. */ 967 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0); 968 GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 969 970 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 971 GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 972 #ifdef GEM_DEBUG 973 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx", 974 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 975 #endif 976 977 /* step 8. Global Configuration & Interrupt Mask */ 978 979 /* 980 * Set the internal arbitration to "infinite" bursts of the 981 * maximum length of 31 * 64 bytes so DMA transfers aren't 982 * split up in cache line size chunks. This greatly improves 983 * RX performance. 984 * Enable silicon bug workarounds for the Apple variants. 985 */ 986 GEM_BANK1_WRITE_4(sc, GEM_CONFIG, 987 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 988 ((sc->sc_flags & GEM_PCI) != 0 ? GEM_CONFIG_BURST_INF : 989 GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ? 990 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 991 992 GEM_BANK1_WRITE_4(sc, GEM_INTMASK, 993 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 994 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 995 GEM_INTR_BERR 996 #ifdef GEM_DEBUG 997 | GEM_INTR_PCS | GEM_INTR_MIF 998 #endif 999 )); 1000 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK, 1001 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 1002 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK, 1003 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP | 1004 GEM_MAC_TX_PEAK_EXP); 1005 #ifdef GEM_DEBUG 1006 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 1007 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 1008 #else 1009 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 1010 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 1011 #endif 1012 1013 /* step 9. ETX Configuration: use mostly default values. */ 1014 1015 /* Enable DMA. */ 1016 v = gem_ringsize(GEM_NTXDESC); 1017 /* Set TX FIFO threshold and enable DMA. */ 1018 v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x4ff) << 10) & 1019 GEM_TX_CONFIG_TXFIFO_TH; 1020 GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN); 1021 1022 /* step 10. ERX Configuration */ 1023 1024 /* Encode Receive Descriptor ring size. */ 1025 v = gem_ringsize(GEM_NRXDESC /* XXX */); 1026 /* RX TCP/UDP checksum offset */ 1027 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 1028 GEM_RX_CONFIG_CXM_START_SHFT); 1029 /* Set RX FIFO threshold, set first byte offset and enable DMA. */ 1030 GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 1031 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 1032 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) | 1033 GEM_RX_CONFIG_RXDMA_EN); 1034 1035 /* Adjust for the SBus clock probably isn't worth the fuzz. */ 1036 GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING, 1037 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) << 1038 GEM_RX_BLANKING_TIME_SHIFT) | 6); 1039 1040 /* 1041 * The following value is for an OFF Threshold of about 3/4 full 1042 * and an ON Threshold of 1/4 full. 1043 */ 1044 GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 1045 (3 * sc->sc_rxfifosize / 256) | 1046 ((sc->sc_rxfifosize / 256) << 12)); 1047 1048 /* step 11. Configure Media. */ 1049 1050 /* step 12. RX_MAC Configuration Register */ 1051 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 1052 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC; 1053 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0); 1054 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 1055 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1056 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1057 device_printf(sc->sc_dev, "cannot configure RX MAC\n"); 1058 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 1059 1060 /* step 13. TX_MAC Configuration Register */ 1061 v = GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG); 1062 v |= GEM_MAC_TX_ENABLE; 1063 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0); 1064 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 1065 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1066 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 1067 device_printf(sc->sc_dev, "cannot configure TX MAC\n"); 1068 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, v); 1069 1070 /* step 14. Issue Transmit Pending command. */ 1071 1072 /* step 15. Give the receiver a swift kick. */ 1073 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 1074 1075 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1076 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1077 1078 mii_mediachg(sc->sc_mii); 1079 1080 /* Start the one second timer. */ 1081 sc->sc_wdog_timer = 0; 1082 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1083 } 1084 1085 static int 1086 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head) 1087 { 1088 bus_dma_segment_t txsegs[GEM_NTXSEGS]; 1089 struct gem_txsoft *txs; 1090 struct ip *ip; 1091 struct mbuf *m; 1092 uint64_t cflags, flags; 1093 int error, nexttx, nsegs, offset, seg; 1094 1095 GEM_LOCK_ASSERT(sc, MA_OWNED); 1096 1097 /* Get a work queue entry. */ 1098 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1099 /* Ran out of descriptors. */ 1100 return (ENOBUFS); 1101 } 1102 1103 cflags = 0; 1104 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) { 1105 if (M_WRITABLE(*m_head) == 0) { 1106 m = m_dup(*m_head, M_DONTWAIT); 1107 m_freem(*m_head); 1108 *m_head = m; 1109 if (m == NULL) 1110 return (ENOBUFS); 1111 } 1112 offset = sizeof(struct ether_header); 1113 m = m_pullup(*m_head, offset + sizeof(struct ip)); 1114 if (m == NULL) { 1115 *m_head = NULL; 1116 return (ENOBUFS); 1117 } 1118 ip = (struct ip *)(mtod(m, caddr_t) + offset); 1119 offset += (ip->ip_hl << 2); 1120 cflags = offset << GEM_TD_CXSUM_STARTSHFT | 1121 ((offset + m->m_pkthdr.csum_data) << 1122 GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE; 1123 *m_head = m; 1124 } 1125 1126 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1127 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1128 if (error == EFBIG) { 1129 m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS); 1130 if (m == NULL) { 1131 m_freem(*m_head); 1132 *m_head = NULL; 1133 return (ENOBUFS); 1134 } 1135 *m_head = m; 1136 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1137 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1138 BUS_DMA_NOWAIT); 1139 if (error != 0) { 1140 m_freem(*m_head); 1141 *m_head = NULL; 1142 return (error); 1143 } 1144 } else if (error != 0) 1145 return (error); 1146 /* If nsegs is wrong then the stack is corrupt. */ 1147 KASSERT(nsegs <= GEM_NTXSEGS, 1148 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1149 if (nsegs == 0) { 1150 m_freem(*m_head); 1151 *m_head = NULL; 1152 return (EIO); 1153 } 1154 1155 /* 1156 * Ensure we have enough descriptors free to describe 1157 * the packet. Note, we always reserve one descriptor 1158 * at the end of the ring as a termination point, in 1159 * order to prevent wrap-around. 1160 */ 1161 if (nsegs > sc->sc_txfree - 1) { 1162 txs->txs_ndescs = 0; 1163 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1164 return (ENOBUFS); 1165 } 1166 1167 txs->txs_ndescs = nsegs; 1168 txs->txs_firstdesc = sc->sc_txnext; 1169 nexttx = txs->txs_firstdesc; 1170 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 1171 #ifdef GEM_DEBUG 1172 CTR6(KTR_GEM, 1173 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1174 __func__, seg, nexttx, txsegs[seg].ds_len, 1175 txsegs[seg].ds_addr, 1176 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr)); 1177 #endif 1178 sc->sc_txdescs[nexttx].gd_addr = 1179 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr); 1180 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 1181 ("%s: segment size too large!", __func__)); 1182 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1183 sc->sc_txdescs[nexttx].gd_flags = 1184 GEM_DMA_WRITE(sc, flags | cflags); 1185 txs->txs_lastdesc = nexttx; 1186 } 1187 1188 /* Set EOP on the last descriptor. */ 1189 #ifdef GEM_DEBUG 1190 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d", 1191 __func__, seg, nexttx); 1192 #endif 1193 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1194 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET); 1195 1196 /* Lastly set SOP on the first descriptor. */ 1197 #ifdef GEM_DEBUG 1198 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d", 1199 __func__, seg, nexttx); 1200 #endif 1201 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1202 sc->sc_txwin = 0; 1203 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1204 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME | 1205 GEM_TD_START_OF_PACKET); 1206 } else 1207 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1208 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET); 1209 1210 /* Sync the DMA map. */ 1211 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1212 BUS_DMASYNC_PREWRITE); 1213 1214 #ifdef GEM_DEBUG 1215 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1216 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1217 txs->txs_ndescs); 1218 #endif 1219 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1220 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1221 txs->txs_mbuf = *m_head; 1222 1223 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1224 sc->sc_txfree -= txs->txs_ndescs; 1225 1226 return (0); 1227 } 1228 1229 static void 1230 gem_init_regs(struct gem_softc *sc) 1231 { 1232 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1233 1234 GEM_LOCK_ASSERT(sc, MA_OWNED); 1235 1236 /* These registers are not cleared on reset. */ 1237 if ((sc->sc_flags & GEM_INITED) == 0) { 1238 /* magic values */ 1239 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG0, 0); 1240 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8); 1241 GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4); 1242 1243 /* min frame length */ 1244 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1245 /* max frame length and max burst size */ 1246 GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME, 1247 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1248 1249 /* more magic values */ 1250 GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7); 1251 GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4); 1252 GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1253 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8808); 1254 1255 /* random number seed */ 1256 GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED, 1257 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1258 1259 /* secondary MAC address: 0:0:0:0:0:0 */ 1260 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR3, 0); 1261 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR4, 0); 1262 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR5, 0); 1263 1264 /* MAC control address: 01:80:c2:00:00:01 */ 1265 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001); 1266 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200); 1267 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180); 1268 1269 /* MAC filter address: 0:0:0:0:0:0 */ 1270 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0); 1271 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0); 1272 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0); 1273 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0); 1274 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0); 1275 1276 sc->sc_flags |= GEM_INITED; 1277 } 1278 1279 /* Counters need to be zeroed. */ 1280 GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 1281 GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 1282 GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 1283 GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 1284 GEM_BANK1_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0); 1285 GEM_BANK1_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0); 1286 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0); 1287 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 1288 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 1289 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 1290 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 1291 1292 /* Set XOFF PAUSE time. */ 1293 GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1294 1295 /* Set the station address. */ 1296 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); 1297 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); 1298 GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); 1299 1300 /* Enable MII outputs. */ 1301 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA); 1302 } 1303 1304 static void 1305 gem_start(struct ifnet *ifp) 1306 { 1307 struct gem_softc *sc = ifp->if_softc; 1308 1309 GEM_LOCK(sc); 1310 gem_start_locked(ifp); 1311 GEM_UNLOCK(sc); 1312 } 1313 1314 static inline void 1315 gem_txkick(struct gem_softc *sc) 1316 { 1317 1318 /* 1319 * Update the TX kick register. This register has to point to the 1320 * descriptor after the last valid one and for optimum performance 1321 * should be incremented in multiples of 4 (the DMA engine fetches/ 1322 * updates descriptors in batches of 4). 1323 */ 1324 #ifdef GEM_DEBUG 1325 CTR3(KTR_GEM, "%s: %s: kicking TX %d", 1326 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1327 #endif 1328 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1329 GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext); 1330 } 1331 1332 static void 1333 gem_start_locked(struct ifnet *ifp) 1334 { 1335 struct gem_softc *sc = ifp->if_softc; 1336 struct mbuf *m; 1337 int kicked, ntx; 1338 1339 GEM_LOCK_ASSERT(sc, MA_OWNED); 1340 1341 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1342 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 1343 return; 1344 1345 #ifdef GEM_DEBUG 1346 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 1347 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1348 sc->sc_txnext); 1349 #endif 1350 ntx = 0; 1351 kicked = 0; 1352 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1353 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1354 if (m == NULL) 1355 break; 1356 if (gem_load_txmbuf(sc, &m) != 0) { 1357 if (m == NULL) 1358 break; 1359 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1360 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1361 break; 1362 } 1363 if ((sc->sc_txnext % 4) == 0) { 1364 gem_txkick(sc); 1365 kicked = 1; 1366 } else 1367 kicked = 0; 1368 ntx++; 1369 BPF_MTAP(ifp, m); 1370 } 1371 1372 if (ntx > 0) { 1373 if (kicked == 0) 1374 gem_txkick(sc); 1375 #ifdef GEM_DEBUG 1376 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1377 device_get_name(sc->sc_dev), sc->sc_txnext); 1378 #endif 1379 1380 /* Set a watchdog timer in case the chip flakes out. */ 1381 sc->sc_wdog_timer = 5; 1382 #ifdef GEM_DEBUG 1383 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1384 device_get_name(sc->sc_dev), __func__, 1385 sc->sc_wdog_timer); 1386 #endif 1387 } 1388 } 1389 1390 static void 1391 gem_tint(struct gem_softc *sc) 1392 { 1393 struct ifnet *ifp = sc->sc_ifp; 1394 struct gem_txsoft *txs; 1395 int progress; 1396 uint32_t txlast; 1397 #ifdef GEM_DEBUG 1398 int i; 1399 1400 GEM_LOCK_ASSERT(sc, MA_OWNED); 1401 1402 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1403 #endif 1404 1405 /* 1406 * Go through our TX list and free mbufs for those 1407 * frames that have been transmitted. 1408 */ 1409 progress = 0; 1410 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1411 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1412 #ifdef GEM_DEBUG 1413 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1414 printf(" txsoft %p transmit chain:\n", txs); 1415 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1416 printf("descriptor %d: ", i); 1417 printf("gd_flags: 0x%016llx\t", 1418 (long long)GEM_DMA_READ(sc, 1419 sc->sc_txdescs[i].gd_flags)); 1420 printf("gd_addr: 0x%016llx\n", 1421 (long long)GEM_DMA_READ(sc, 1422 sc->sc_txdescs[i].gd_addr)); 1423 if (i == txs->txs_lastdesc) 1424 break; 1425 } 1426 } 1427 #endif 1428 1429 /* 1430 * In theory, we could harvest some descriptors before 1431 * the ring is empty, but that's a bit complicated. 1432 * 1433 * GEM_TX_COMPLETION points to the last descriptor 1434 * processed + 1. 1435 */ 1436 txlast = GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION); 1437 #ifdef GEM_DEBUG 1438 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 1439 "txs->txs_lastdesc = %d, txlast = %d", 1440 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1441 #endif 1442 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1443 if ((txlast >= txs->txs_firstdesc) && 1444 (txlast <= txs->txs_lastdesc)) 1445 break; 1446 } else { 1447 /* Ick -- this command wraps. */ 1448 if ((txlast >= txs->txs_firstdesc) || 1449 (txlast <= txs->txs_lastdesc)) 1450 break; 1451 } 1452 1453 #ifdef GEM_DEBUG 1454 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__); 1455 #endif 1456 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1457 1458 sc->sc_txfree += txs->txs_ndescs; 1459 1460 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1461 BUS_DMASYNC_POSTWRITE); 1462 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1463 if (txs->txs_mbuf != NULL) { 1464 m_freem(txs->txs_mbuf); 1465 txs->txs_mbuf = NULL; 1466 } 1467 1468 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1469 1470 ifp->if_opackets++; 1471 progress = 1; 1472 } 1473 1474 #ifdef GEM_DEBUG 1475 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx " 1476 "GEM_TX_COMPLETION %x", 1477 __func__, GEM_BANK1_READ_4(sc, GEM_TX_STATE_MACHINE), 1478 ((long long)GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) | 1479 GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_LO), 1480 GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION)); 1481 #endif 1482 1483 if (progress) { 1484 if (sc->sc_txfree == GEM_NTXDESC - 1) 1485 sc->sc_txwin = 0; 1486 1487 /* 1488 * We freed some descriptors, so reset IFF_DRV_OACTIVE 1489 * and restart. 1490 */ 1491 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1492 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1493 sc->sc_wdog_timer = 0; 1494 gem_start_locked(ifp); 1495 } 1496 1497 #ifdef GEM_DEBUG 1498 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1499 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1500 #endif 1501 } 1502 1503 #ifdef GEM_RINT_TIMEOUT 1504 static void 1505 gem_rint_timeout(void *arg) 1506 { 1507 struct gem_softc *sc = arg; 1508 1509 GEM_LOCK_ASSERT(sc, MA_OWNED); 1510 1511 gem_rint(sc); 1512 } 1513 #endif 1514 1515 static void 1516 gem_rint(struct gem_softc *sc) 1517 { 1518 struct ifnet *ifp = sc->sc_ifp; 1519 struct mbuf *m; 1520 uint64_t rxstat; 1521 uint32_t rxcomp; 1522 1523 GEM_LOCK_ASSERT(sc, MA_OWNED); 1524 1525 #ifdef GEM_RINT_TIMEOUT 1526 callout_stop(&sc->sc_rx_ch); 1527 #endif 1528 #ifdef GEM_DEBUG 1529 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1530 #endif 1531 1532 /* 1533 * Read the completion register once. This limits 1534 * how long the following loop can execute. 1535 */ 1536 rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION); 1537 #ifdef GEM_DEBUG 1538 CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d", 1539 __func__, sc->sc_rxptr, rxcomp); 1540 #endif 1541 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1542 for (; sc->sc_rxptr != rxcomp;) { 1543 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1544 rxstat = GEM_DMA_READ(sc, 1545 sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 1546 1547 if (rxstat & GEM_RD_OWN) { 1548 #ifdef GEM_RINT_TIMEOUT 1549 /* 1550 * The descriptor is still marked as owned, although 1551 * it is supposed to have completed. This has been 1552 * observed on some machines. Just exiting here 1553 * might leave the packet sitting around until another 1554 * one arrives to trigger a new interrupt, which is 1555 * generally undesirable, so set up a timeout. 1556 */ 1557 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1558 gem_rint_timeout, sc); 1559 #endif 1560 m = NULL; 1561 goto kickit; 1562 } 1563 1564 if (rxstat & GEM_RD_BAD_CRC) { 1565 ifp->if_ierrors++; 1566 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1567 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1568 m = NULL; 1569 goto kickit; 1570 } 1571 1572 #ifdef GEM_DEBUG 1573 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1574 printf(" rxsoft %p descriptor %d: ", 1575 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 1576 printf("gd_flags: 0x%016llx\t", 1577 (long long)GEM_DMA_READ(sc, 1578 sc->sc_rxdescs[sc->sc_rxptr].gd_flags)); 1579 printf("gd_addr: 0x%016llx\n", 1580 (long long)GEM_DMA_READ(sc, 1581 sc->sc_rxdescs[sc->sc_rxptr].gd_addr)); 1582 } 1583 #endif 1584 1585 /* 1586 * Allocate a new mbuf cluster. If that fails, we are 1587 * out of memory, and must drop the packet and recycle 1588 * the buffer that's already attached to this descriptor. 1589 */ 1590 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 1591 ifp->if_ierrors++; 1592 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1593 m = NULL; 1594 } 1595 1596 kickit: 1597 /* 1598 * Update the RX kick register. This register has to point 1599 * to the descriptor after the last valid one (before the 1600 * current batch) and for optimum performance should be 1601 * incremented in multiples of 4 (the DMA engine fetches/ 1602 * updates descriptors in batches of 4). 1603 */ 1604 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 1605 if ((sc->sc_rxptr % 4) == 0) { 1606 GEM_CDSYNC(sc, 1607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1608 GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, 1609 (sc->sc_rxptr + GEM_NRXDESC - 4) & 1610 GEM_NRXDESC_MASK); 1611 } 1612 1613 if (m == NULL) { 1614 if (rxstat & GEM_RD_OWN) 1615 break; 1616 continue; 1617 } 1618 1619 ifp->if_ipackets++; 1620 m->m_data += ETHER_ALIGN; /* first byte offset */ 1621 m->m_pkthdr.rcvif = ifp; 1622 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 1623 1624 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1625 gem_rxcksum(m, rxstat); 1626 1627 /* Pass it on. */ 1628 GEM_UNLOCK(sc); 1629 (*ifp->if_input)(ifp, m); 1630 GEM_LOCK(sc); 1631 } 1632 1633 #ifdef GEM_DEBUG 1634 CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__, 1635 sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION)); 1636 #endif 1637 } 1638 1639 static int 1640 gem_add_rxbuf(struct gem_softc *sc, int idx) 1641 { 1642 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1643 struct mbuf *m; 1644 bus_dma_segment_t segs[1]; 1645 int error, nsegs; 1646 1647 GEM_LOCK_ASSERT(sc, MA_OWNED); 1648 1649 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1650 if (m == NULL) 1651 return (ENOBUFS); 1652 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1653 1654 #ifdef GEM_DEBUG 1655 /* Bzero the packet to check DMA. */ 1656 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1657 #endif 1658 1659 if (rxs->rxs_mbuf != NULL) { 1660 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1661 BUS_DMASYNC_POSTREAD); 1662 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1663 } 1664 1665 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1666 m, segs, &nsegs, BUS_DMA_NOWAIT); 1667 if (error != 0) { 1668 device_printf(sc->sc_dev, 1669 "cannot load RS DMA map %d, error = %d\n", idx, error); 1670 m_freem(m); 1671 return (error); 1672 } 1673 /* If nsegs is wrong then the stack is corrupt. */ 1674 KASSERT(nsegs == 1, 1675 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1676 rxs->rxs_mbuf = m; 1677 rxs->rxs_paddr = segs[0].ds_addr; 1678 1679 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1680 BUS_DMASYNC_PREREAD); 1681 1682 GEM_INIT_RXDESC(sc, idx); 1683 1684 return (0); 1685 } 1686 1687 static void 1688 gem_eint(struct gem_softc *sc, u_int status) 1689 { 1690 1691 sc->sc_ifp->if_ierrors++; 1692 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1693 gem_reset_rxdma(sc); 1694 return; 1695 } 1696 1697 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status); 1698 if ((status & GEM_INTR_BERR) != 0) { 1699 if ((sc->sc_flags & GEM_PCI) != 0) 1700 printf(", PCI bus error 0x%x\n", 1701 GEM_BANK1_READ_4(sc, GEM_PCI_ERROR_STATUS)); 1702 else 1703 printf(", SBus error 0x%x\n", 1704 GEM_BANK1_READ_4(sc, GEM_SBUS_STATUS)); 1705 } 1706 } 1707 1708 void 1709 gem_intr(void *v) 1710 { 1711 struct gem_softc *sc = v; 1712 uint32_t status, status2; 1713 1714 GEM_LOCK(sc); 1715 status = GEM_BANK1_READ_4(sc, GEM_STATUS); 1716 1717 #ifdef GEM_DEBUG 1718 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 1719 device_get_name(sc->sc_dev), __func__, 1720 (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status); 1721 1722 /* 1723 * PCS interrupts must be cleared, otherwise no traffic is passed! 1724 */ 1725 if ((status & GEM_INTR_PCS) != 0) { 1726 status2 = 1727 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS) | 1728 GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS); 1729 if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 1730 device_printf(sc->sc_dev, 1731 "%s: PCS link status changed\n", __func__); 1732 } 1733 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1734 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_STATUS); 1735 if ((status2 & GEM_MAC_PAUSED) != 0) 1736 device_printf(sc->sc_dev, 1737 "%s: PAUSE received (PAUSE time %d slots)\n", 1738 __func__, GEM_MAC_PAUSE_TIME(status2)); 1739 if ((status2 & GEM_MAC_PAUSE) != 0) 1740 device_printf(sc->sc_dev, 1741 "%s: transited to PAUSE state\n", __func__); 1742 if ((status2 & GEM_MAC_RESUME) != 0) 1743 device_printf(sc->sc_dev, 1744 "%s: transited to non-PAUSE state\n", __func__); 1745 } 1746 if ((status & GEM_INTR_MIF) != 0) 1747 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1748 #endif 1749 1750 if (__predict_false(status & 1751 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 1752 gem_eint(sc, status); 1753 1754 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1755 gem_rint(sc); 1756 1757 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1758 gem_tint(sc); 1759 1760 if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) { 1761 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS); 1762 if ((status2 & 1763 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP | 1764 GEM_MAC_TX_PEAK_EXP)) != 0) 1765 device_printf(sc->sc_dev, 1766 "MAC TX fault, status %x\n", status2); 1767 if ((status2 & 1768 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) { 1769 sc->sc_ifp->if_oerrors++; 1770 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1771 gem_init_locked(sc); 1772 } 1773 } 1774 if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) { 1775 status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS); 1776 /* 1777 * At least with GEM_SUN_GEM and some GEM_SUN_ERI 1778 * revisions GEM_MAC_RX_OVERFLOW happen often due to a 1779 * silicon bug so handle them silently. Moreover, it's 1780 * likely that the receiver has hung so we reset it. 1781 */ 1782 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) { 1783 sc->sc_ifp->if_ierrors++; 1784 gem_reset_rxdma(sc); 1785 } else if ((status2 & 1786 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0) 1787 device_printf(sc->sc_dev, 1788 "MAC RX fault, status %x\n", status2); 1789 } 1790 GEM_UNLOCK(sc); 1791 } 1792 1793 static int 1794 gem_watchdog(struct gem_softc *sc) 1795 { 1796 struct ifnet *ifp = sc->sc_ifp; 1797 1798 GEM_LOCK_ASSERT(sc, MA_OWNED); 1799 1800 #ifdef GEM_DEBUG 1801 CTR4(KTR_GEM, 1802 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x", 1803 __func__, GEM_BANK1_READ_4(sc, GEM_RX_CONFIG), 1804 GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS), 1805 GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG)); 1806 CTR4(KTR_GEM, 1807 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x", 1808 __func__, GEM_BANK1_READ_4(sc, GEM_TX_CONFIG), 1809 GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS), 1810 GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG)); 1811 #endif 1812 1813 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1814 return (0); 1815 1816 if ((sc->sc_flags & GEM_LINK) != 0) 1817 device_printf(sc->sc_dev, "device timeout\n"); 1818 else if (bootverbose) 1819 device_printf(sc->sc_dev, "device timeout (no link)\n"); 1820 ++ifp->if_oerrors; 1821 1822 /* Try to get more packets going. */ 1823 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1824 gem_init_locked(sc); 1825 gem_start_locked(ifp); 1826 return (EJUSTRETURN); 1827 } 1828 1829 static void 1830 gem_mifinit(struct gem_softc *sc) 1831 { 1832 1833 /* Configure the MIF in frame mode. */ 1834 GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, 1835 GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1836 GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4, 1837 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1838 } 1839 1840 /* 1841 * MII interface 1842 * 1843 * The MII interface supports at least three different operating modes: 1844 * 1845 * Bitbang mode is implemented using data, clock and output enable registers. 1846 * 1847 * Frame mode is implemented by loading a complete frame into the frame 1848 * register and polling the valid bit for completion. 1849 * 1850 * Polling mode uses the frame register but completion is indicated by 1851 * an interrupt. 1852 * 1853 */ 1854 int 1855 gem_mii_readreg(device_t dev, int phy, int reg) 1856 { 1857 struct gem_softc *sc; 1858 int n; 1859 uint32_t v; 1860 1861 #ifdef GEM_DEBUG_PHY 1862 printf("%s: phy %d reg %d\n", __func__, phy, reg); 1863 #endif 1864 1865 sc = device_get_softc(dev); 1866 if ((sc->sc_flags & GEM_SERDES) != 0) { 1867 switch (reg) { 1868 case MII_BMCR: 1869 reg = GEM_MII_CONTROL; 1870 break; 1871 case MII_BMSR: 1872 reg = GEM_MII_STATUS; 1873 break; 1874 case MII_PHYIDR1: 1875 case MII_PHYIDR2: 1876 return (0); 1877 case MII_ANAR: 1878 reg = GEM_MII_ANAR; 1879 break; 1880 case MII_ANLPAR: 1881 reg = GEM_MII_ANLPAR; 1882 break; 1883 case MII_EXTSR: 1884 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 1885 default: 1886 device_printf(sc->sc_dev, 1887 "%s: unhandled register %d\n", __func__, reg); 1888 return (0); 1889 } 1890 return (GEM_BANK1_READ_4(sc, reg)); 1891 } 1892 1893 /* Construct the frame command. */ 1894 v = GEM_MIF_FRAME_READ | 1895 (phy << GEM_MIF_PHY_SHIFT) | 1896 (reg << GEM_MIF_REG_SHIFT); 1897 1898 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v); 1899 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4, 1900 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1901 for (n = 0; n < 100; n++) { 1902 DELAY(1); 1903 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME); 1904 if (v & GEM_MIF_FRAME_TA0) 1905 return (v & GEM_MIF_FRAME_DATA); 1906 } 1907 1908 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1909 return (0); 1910 } 1911 1912 int 1913 gem_mii_writereg(device_t dev, int phy, int reg, int val) 1914 { 1915 struct gem_softc *sc; 1916 int n; 1917 uint32_t v; 1918 1919 #ifdef GEM_DEBUG_PHY 1920 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 1921 #endif 1922 1923 sc = device_get_softc(dev); 1924 if ((sc->sc_flags & GEM_SERDES) != 0) { 1925 switch (reg) { 1926 case MII_BMSR: 1927 reg = GEM_MII_STATUS; 1928 break; 1929 case MII_BMCR: 1930 reg = GEM_MII_CONTROL; 1931 if ((val & GEM_MII_CONTROL_RESET) == 0) 1932 break; 1933 GEM_BANK1_WRITE_4(sc, GEM_MII_CONTROL, val); 1934 GEM_BANK1_BARRIER(sc, GEM_MII_CONTROL, 4, 1935 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1936 if (!GEM_BANK1_BITWAIT(sc, GEM_MII_CONTROL, 1937 GEM_MII_CONTROL_RESET, 0)) 1938 device_printf(sc->sc_dev, 1939 "cannot reset PCS\n"); 1940 /* FALLTHROUGH */ 1941 case MII_ANAR: 1942 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 0); 1943 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4, 1944 BUS_SPACE_BARRIER_WRITE); 1945 GEM_BANK1_WRITE_4(sc, GEM_MII_ANAR, val); 1946 GEM_BANK1_BARRIER(sc, GEM_MII_ANAR, 4, 1947 BUS_SPACE_BARRIER_WRITE); 1948 GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 1949 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 1950 GEM_BANK1_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4, 1951 BUS_SPACE_BARRIER_WRITE); 1952 GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 1953 GEM_MII_CONFIG_ENABLE); 1954 GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4, 1955 BUS_SPACE_BARRIER_WRITE); 1956 return (0); 1957 case MII_ANLPAR: 1958 reg = GEM_MII_ANLPAR; 1959 break; 1960 default: 1961 device_printf(sc->sc_dev, 1962 "%s: unhandled register %d\n", __func__, reg); 1963 return (0); 1964 } 1965 GEM_BANK1_WRITE_4(sc, reg, val); 1966 GEM_BANK1_BARRIER(sc, reg, 4, 1967 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1968 return (0); 1969 } 1970 1971 /* Construct the frame command. */ 1972 v = GEM_MIF_FRAME_WRITE | 1973 (phy << GEM_MIF_PHY_SHIFT) | 1974 (reg << GEM_MIF_REG_SHIFT) | 1975 (val & GEM_MIF_FRAME_DATA); 1976 1977 GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v); 1978 GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4, 1979 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1980 for (n = 0; n < 100; n++) { 1981 DELAY(1); 1982 v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME); 1983 if (v & GEM_MIF_FRAME_TA0) 1984 return (1); 1985 } 1986 1987 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1988 return (0); 1989 } 1990 1991 void 1992 gem_mii_statchg(device_t dev) 1993 { 1994 struct gem_softc *sc; 1995 int gigabit; 1996 uint32_t rxcfg, txcfg, v; 1997 1998 sc = device_get_softc(dev); 1999 2000 GEM_LOCK_ASSERT(sc, MA_OWNED); 2001 2002 #ifdef GEM_DEBUG 2003 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) 2004 device_printf(sc->sc_dev, "%s: status change\n", __func__); 2005 #endif 2006 2007 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 2008 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 2009 sc->sc_flags |= GEM_LINK; 2010 else 2011 sc->sc_flags &= ~GEM_LINK; 2012 2013 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 2014 case IFM_1000_SX: 2015 case IFM_1000_LX: 2016 case IFM_1000_CX: 2017 case IFM_1000_T: 2018 gigabit = 1; 2019 break; 2020 default: 2021 gigabit = 0; 2022 } 2023 2024 /* 2025 * The configuration done here corresponds to the steps F) and 2026 * G) and as far as enabling of RX and TX MAC goes also step H) 2027 * of the initialization sequence outlined in section 3.2.1 of 2028 * the GEM Gigabit Ethernet ASIC Specification. 2029 */ 2030 2031 rxcfg = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 2032 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 2033 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 2034 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2035 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 2036 else if (gigabit != 0) { 2037 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 2038 txcfg |= GEM_MAC_TX_CARR_EXTEND; 2039 } 2040 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0); 2041 GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 2042 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2043 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 2044 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 2045 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg); 2046 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0); 2047 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 2048 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2049 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 2050 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 2051 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg); 2052 2053 v = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_CONFIG) & 2054 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 2055 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2056 IFM_ETH_RXPAUSE) != 0) 2057 v |= GEM_MAC_CC_RX_PAUSE; 2058 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2059 IFM_ETH_TXPAUSE) != 0) 2060 v |= GEM_MAC_CC_TX_PAUSE; 2061 GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v); 2062 2063 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 2064 gigabit != 0) 2065 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME, 2066 GEM_MAC_SLOT_TIME_CARR_EXTEND); 2067 else 2068 GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME, 2069 GEM_MAC_SLOT_TIME_NORMAL); 2070 2071 /* XIF Configuration */ 2072 v = GEM_MAC_XIF_LINK_LED; 2073 v |= GEM_MAC_XIF_TX_MII_ENA; 2074 if ((sc->sc_flags & GEM_SERDES) == 0) { 2075 if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & 2076 GEM_MIF_CONFIG_PHY_SEL) != 0) { 2077 /* External MII needs echo disable if half duplex. */ 2078 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2079 IFM_FDX) == 0) 2080 v |= GEM_MAC_XIF_ECHO_DISABL; 2081 } else 2082 /* 2083 * Internal MII needs buffer enable. 2084 * XXX buffer enable makes only sense for an 2085 * external PHY. 2086 */ 2087 v |= GEM_MAC_XIF_MII_BUF_ENA; 2088 } 2089 if (gigabit != 0) 2090 v |= GEM_MAC_XIF_GMII_MODE; 2091 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2092 v |= GEM_MAC_XIF_FDPLX_LED; 2093 GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v); 2094 2095 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2096 (sc->sc_flags & GEM_LINK) != 0) { 2097 GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 2098 txcfg | GEM_MAC_TX_ENABLE); 2099 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 2100 rxcfg | GEM_MAC_RX_ENABLE); 2101 } 2102 } 2103 2104 int 2105 gem_mediachange(struct ifnet *ifp) 2106 { 2107 struct gem_softc *sc = ifp->if_softc; 2108 int error; 2109 2110 /* XXX add support for serial media. */ 2111 2112 GEM_LOCK(sc); 2113 error = mii_mediachg(sc->sc_mii); 2114 GEM_UNLOCK(sc); 2115 return (error); 2116 } 2117 2118 void 2119 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2120 { 2121 struct gem_softc *sc = ifp->if_softc; 2122 2123 GEM_LOCK(sc); 2124 if ((ifp->if_flags & IFF_UP) == 0) { 2125 GEM_UNLOCK(sc); 2126 return; 2127 } 2128 2129 mii_pollstat(sc->sc_mii); 2130 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2131 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2132 GEM_UNLOCK(sc); 2133 } 2134 2135 static int 2136 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2137 { 2138 struct gem_softc *sc = ifp->if_softc; 2139 struct ifreq *ifr = (struct ifreq *)data; 2140 int error; 2141 2142 error = 0; 2143 switch (cmd) { 2144 case SIOCSIFFLAGS: 2145 GEM_LOCK(sc); 2146 if ((ifp->if_flags & IFF_UP) != 0) { 2147 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2148 ((ifp->if_flags ^ sc->sc_ifflags) & 2149 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2150 gem_setladrf(sc); 2151 else 2152 gem_init_locked(sc); 2153 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2154 gem_stop(ifp, 0); 2155 if ((ifp->if_flags & IFF_LINK0) != 0) 2156 sc->sc_csum_features |= CSUM_UDP; 2157 else 2158 sc->sc_csum_features &= ~CSUM_UDP; 2159 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2160 ifp->if_hwassist = sc->sc_csum_features; 2161 sc->sc_ifflags = ifp->if_flags; 2162 GEM_UNLOCK(sc); 2163 break; 2164 case SIOCADDMULTI: 2165 case SIOCDELMULTI: 2166 GEM_LOCK(sc); 2167 gem_setladrf(sc); 2168 GEM_UNLOCK(sc); 2169 break; 2170 case SIOCGIFMEDIA: 2171 case SIOCSIFMEDIA: 2172 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2173 break; 2174 case SIOCSIFCAP: 2175 GEM_LOCK(sc); 2176 ifp->if_capenable = ifr->ifr_reqcap; 2177 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2178 ifp->if_hwassist = sc->sc_csum_features; 2179 else 2180 ifp->if_hwassist = 0; 2181 GEM_UNLOCK(sc); 2182 break; 2183 default: 2184 error = ether_ioctl(ifp, cmd, data); 2185 break; 2186 } 2187 2188 return (error); 2189 } 2190 2191 static void 2192 gem_setladrf(struct gem_softc *sc) 2193 { 2194 struct ifnet *ifp = sc->sc_ifp; 2195 struct ifmultiaddr *inm; 2196 int i; 2197 uint32_t hash[16]; 2198 uint32_t crc, v; 2199 2200 GEM_LOCK_ASSERT(sc, MA_OWNED); 2201 2202 /* Get the current RX configuration. */ 2203 v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG); 2204 2205 /* 2206 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2207 * and hash filter. Depending on the case, the right bit will be 2208 * enabled. 2209 */ 2210 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER | 2211 GEM_MAC_RX_PROMISC_GRP); 2212 2213 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 2214 GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 2215 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2216 if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 2217 0)) 2218 device_printf(sc->sc_dev, "cannot disable RX hash filter\n"); 2219 2220 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2221 v |= GEM_MAC_RX_PROMISCUOUS; 2222 goto chipit; 2223 } 2224 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2225 v |= GEM_MAC_RX_PROMISC_GRP; 2226 goto chipit; 2227 } 2228 2229 /* 2230 * Set up multicast address filter by passing all multicast 2231 * addresses through a crc generator, and then using the high 2232 * order 8 bits as an index into the 256 bit logical address 2233 * filter. The high order 4 bits selects the word, while the 2234 * other 4 bits select the bit within the word (where bit 0 2235 * is the MSB). 2236 */ 2237 2238 /* Clear the hash table. */ 2239 memset(hash, 0, sizeof(hash)); 2240 2241 if_maddr_rlock(ifp); 2242 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 2243 if (inm->ifma_addr->sa_family != AF_LINK) 2244 continue; 2245 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2246 inm->ifma_addr), ETHER_ADDR_LEN); 2247 2248 /* We just want the 8 most significant bits. */ 2249 crc >>= 24; 2250 2251 /* Set the corresponding bit in the filter. */ 2252 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2253 } 2254 if_maddr_runlock(ifp); 2255 2256 v |= GEM_MAC_RX_HASH_FILTER; 2257 2258 /* Now load the hash table into the chip (if we are using it). */ 2259 for (i = 0; i < 16; i++) 2260 GEM_BANK1_WRITE_4(sc, 2261 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), 2262 hash[i]); 2263 2264 chipit: 2265 GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 2266 } 2267