1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 36 */ 37 38 #if 0 39 #define GEM_DEBUG 40 #endif 41 42 #if 0 /* XXX: In case of emergency, re-enable this. */ 43 #define GEM_RINT_TIMEOUT 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/endian.h> 51 #include <sys/mbuf.h> 52 #include <sys/malloc.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/socket.h> 58 #include <sys/sockio.h> 59 #include <sys/rman.h> 60 61 #include <net/bpf.h> 62 #include <net/ethernet.h> 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_vlan_var.h> 69 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/tcp.h> 74 #include <netinet/udp.h> 75 76 #include <machine/bus.h> 77 78 #include <dev/mii/mii.h> 79 #include <dev/mii/miivar.h> 80 81 #include <dev/gem/if_gemreg.h> 82 #include <dev/gem/if_gemvar.h> 83 84 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 85 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 86 87 #define TRIES 10000 88 89 /* 90 * The GEM hardware support basic TCP/UDP checksum offloading. However, 91 * the hardware doesn't compensate the checksum for UDP datagram which 92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 93 * by default. It can be reactivated by setting special link option 94 * link0 with ifconfig(8). 95 */ 96 #define GEM_CSUM_FEATURES (CSUM_TCP) 97 98 static int gem_add_rxbuf(struct gem_softc *sc, int idx); 99 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, 100 uint32_t set); 101 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, 102 int nsegs, int error); 103 static int gem_disable_rx(struct gem_softc *sc); 104 static int gem_disable_tx(struct gem_softc *sc); 105 static void gem_eint(struct gem_softc *sc, u_int status); 106 static void gem_init(void *xsc); 107 static void gem_init_locked(struct gem_softc *sc); 108 static void gem_init_regs(struct gem_softc *sc); 109 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 110 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head); 111 static int gem_meminit(struct gem_softc *sc); 112 static void gem_mifinit(struct gem_softc *sc); 113 static void gem_reset(struct gem_softc *sc); 114 static int gem_reset_rx(struct gem_softc *sc); 115 static void gem_reset_rxdma(struct gem_softc *sc); 116 static int gem_reset_tx(struct gem_softc *sc); 117 static u_int gem_ringsize(u_int sz); 118 static void gem_rint(struct gem_softc *sc); 119 #ifdef GEM_RINT_TIMEOUT 120 static void gem_rint_timeout(void *arg); 121 #endif 122 static __inline void gem_rxcksum(struct mbuf *m, uint64_t flags); 123 static void gem_rxdrain(struct gem_softc *sc); 124 static void gem_setladrf(struct gem_softc *sc); 125 static void gem_start(struct ifnet *ifp); 126 static void gem_start_locked(struct ifnet *ifp); 127 static void gem_stop(struct ifnet *ifp, int disable); 128 static void gem_tick(void *arg); 129 static void gem_tint(struct gem_softc *sc); 130 static __inline void gem_txcksum(struct gem_softc *sc, struct mbuf *m, 131 uint64_t *cflags); 132 static int gem_watchdog(struct gem_softc *sc); 133 134 devclass_t gem_devclass; 135 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 136 MODULE_DEPEND(gem, miibus, 1, 1, 1); 137 138 #ifdef GEM_DEBUG 139 #include <sys/ktr.h> 140 #define KTR_GEM KTR_CT2 141 #endif 142 143 int 144 gem_attach(struct gem_softc *sc) 145 { 146 struct gem_txsoft *txs; 147 struct ifnet *ifp; 148 int error, i; 149 uint32_t v; 150 151 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 152 if (ifp == NULL) 153 return (ENOSPC); 154 155 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 156 #ifdef GEM_RINT_TIMEOUT 157 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 158 #endif 159 160 /* Make sure the chip is stopped. */ 161 ifp->if_softc = sc; 162 gem_reset(sc); 163 164 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 165 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 166 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, 167 NULL, &sc->sc_pdmatag); 168 if (error) 169 goto fail_ifnet; 170 171 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 172 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 173 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 174 if (error) 175 goto fail_ptag; 176 177 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 178 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 179 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 180 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 181 if (error) 182 goto fail_rtag; 183 184 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 185 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 186 sizeof(struct gem_control_data), 1, 187 sizeof(struct gem_control_data), 0, 188 NULL, NULL, &sc->sc_cdmatag); 189 if (error) 190 goto fail_ttag; 191 192 /* 193 * Allocate the control data structures, create and load the 194 * DMA map for it. 195 */ 196 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 197 (void **)&sc->sc_control_data, 198 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 199 &sc->sc_cddmamap))) { 200 device_printf(sc->sc_dev, 201 "unable to allocate control data, error = %d\n", error); 202 goto fail_ctag; 203 } 204 205 sc->sc_cddma = 0; 206 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 207 sc->sc_control_data, sizeof(struct gem_control_data), 208 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 209 device_printf(sc->sc_dev, 210 "unable to load control data DMA map, error = %d\n", 211 error); 212 goto fail_cmem; 213 } 214 215 /* 216 * Initialize the transmit job descriptors. 217 */ 218 STAILQ_INIT(&sc->sc_txfreeq); 219 STAILQ_INIT(&sc->sc_txdirtyq); 220 221 /* 222 * Create the transmit buffer DMA maps. 223 */ 224 error = ENOMEM; 225 for (i = 0; i < GEM_TXQUEUELEN; i++) { 226 txs = &sc->sc_txsoft[i]; 227 txs->txs_mbuf = NULL; 228 txs->txs_ndescs = 0; 229 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 230 &txs->txs_dmamap)) != 0) { 231 device_printf(sc->sc_dev, 232 "unable to create TX DMA map %d, error = %d\n", 233 i, error); 234 goto fail_txd; 235 } 236 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 237 } 238 239 /* 240 * Create the receive buffer DMA maps. 241 */ 242 for (i = 0; i < GEM_NRXDESC; i++) { 243 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 244 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 245 device_printf(sc->sc_dev, 246 "unable to create RX DMA map %d, error = %d\n", 247 i, error); 248 goto fail_rxd; 249 } 250 sc->sc_rxsoft[i].rxs_mbuf = NULL; 251 } 252 253 /* Bad things will happen when touching this register on ERI. */ 254 if (sc->sc_variant != GEM_SUN_ERI) 255 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE, 256 GEM_MII_DATAPATH_MII); 257 258 gem_mifinit(sc); 259 260 /* 261 * Look for an external PHY. 262 */ 263 error = ENXIO; 264 v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG); 265 if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 266 v |= GEM_MIF_CONFIG_PHY_SEL; 267 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v); 268 switch (sc->sc_variant) { 269 case GEM_SUN_ERI: 270 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 271 break; 272 default: 273 sc->sc_phyad = -1; 274 break; 275 } 276 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 277 gem_mediachange, gem_mediastatus); 278 } 279 280 /* 281 * Fall back on an internal PHY if no external PHY was found. 282 */ 283 if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) { 284 v &= ~GEM_MIF_CONFIG_PHY_SEL; 285 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v); 286 switch (sc->sc_variant) { 287 case GEM_SUN_ERI: 288 case GEM_APPLE_K2_GMAC: 289 sc->sc_phyad = GEM_PHYAD_INTERNAL; 290 break; 291 case GEM_APPLE_GMAC: 292 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 293 break; 294 default: 295 sc->sc_phyad = -1; 296 break; 297 } 298 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 299 gem_mediachange, gem_mediastatus); 300 } 301 302 /* 303 * Try the external PCS SERDES if we didn't find any PHYs. 304 */ 305 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 306 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE, 307 GEM_MII_DATAPATH_SERDES); 308 bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL, 309 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 310 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 311 GEM_MII_CONFIG_ENABLE); 312 sc->sc_flags |= GEM_SERDES; 313 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 314 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 315 gem_mediachange, gem_mediastatus); 316 } 317 318 if (error != 0) { 319 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error); 320 goto fail_rxd; 321 } 322 sc->sc_mii = device_get_softc(sc->sc_miibus); 323 324 /* 325 * From this point forward, the attachment cannot fail. A failure 326 * before this point releases all resources that may have been 327 * allocated. 328 */ 329 330 /* Get RX FIFO size */ 331 sc->sc_rxfifosize = 64 * 332 bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE); 333 334 /* Get TX FIFO size */ 335 v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE); 336 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 337 sc->sc_rxfifosize / 1024, v / 16); 338 339 sc->sc_csum_features = GEM_CSUM_FEATURES; 340 /* Initialize ifnet structure. */ 341 ifp->if_softc = sc; 342 if_initname(ifp, device_get_name(sc->sc_dev), 343 device_get_unit(sc->sc_dev)); 344 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 345 ifp->if_start = gem_start; 346 ifp->if_ioctl = gem_ioctl; 347 ifp->if_init = gem_init; 348 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN); 349 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN; 350 IFQ_SET_READY(&ifp->if_snd); 351 352 /* Attach the interface. */ 353 ether_ifattach(ifp, sc->sc_enaddr); 354 355 /* 356 * Tell the upper layer(s) we support long frames/checksum offloads. 357 */ 358 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 359 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 360 ifp->if_hwassist |= sc->sc_csum_features; 361 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 362 363 return (0); 364 365 /* 366 * Free any resources we've allocated during the failed attach 367 * attempt. Do this in reverse order and fall through. 368 */ 369 fail_rxd: 370 for (i = 0; i < GEM_NRXDESC; i++) 371 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 372 bus_dmamap_destroy(sc->sc_rdmatag, 373 sc->sc_rxsoft[i].rxs_dmamap); 374 fail_txd: 375 for (i = 0; i < GEM_TXQUEUELEN; i++) 376 if (sc->sc_txsoft[i].txs_dmamap != NULL) 377 bus_dmamap_destroy(sc->sc_tdmatag, 378 sc->sc_txsoft[i].txs_dmamap); 379 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 380 fail_cmem: 381 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 382 sc->sc_cddmamap); 383 fail_ctag: 384 bus_dma_tag_destroy(sc->sc_cdmatag); 385 fail_ttag: 386 bus_dma_tag_destroy(sc->sc_tdmatag); 387 fail_rtag: 388 bus_dma_tag_destroy(sc->sc_rdmatag); 389 fail_ptag: 390 bus_dma_tag_destroy(sc->sc_pdmatag); 391 fail_ifnet: 392 if_free(ifp); 393 return (error); 394 } 395 396 void 397 gem_detach(struct gem_softc *sc) 398 { 399 struct ifnet *ifp = sc->sc_ifp; 400 int i; 401 402 GEM_LOCK(sc); 403 gem_stop(ifp, 1); 404 GEM_UNLOCK(sc); 405 callout_drain(&sc->sc_tick_ch); 406 #ifdef GEM_RINT_TIMEOUT 407 callout_drain(&sc->sc_rx_ch); 408 #endif 409 ether_ifdetach(ifp); 410 if_free(ifp); 411 device_delete_child(sc->sc_dev, sc->sc_miibus); 412 413 for (i = 0; i < GEM_NRXDESC; i++) 414 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 415 bus_dmamap_destroy(sc->sc_rdmatag, 416 sc->sc_rxsoft[i].rxs_dmamap); 417 for (i = 0; i < GEM_TXQUEUELEN; i++) 418 if (sc->sc_txsoft[i].txs_dmamap != NULL) 419 bus_dmamap_destroy(sc->sc_tdmatag, 420 sc->sc_txsoft[i].txs_dmamap); 421 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 422 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 423 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 424 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 425 sc->sc_cddmamap); 426 bus_dma_tag_destroy(sc->sc_cdmatag); 427 bus_dma_tag_destroy(sc->sc_tdmatag); 428 bus_dma_tag_destroy(sc->sc_rdmatag); 429 bus_dma_tag_destroy(sc->sc_pdmatag); 430 } 431 432 void 433 gem_suspend(struct gem_softc *sc) 434 { 435 struct ifnet *ifp = sc->sc_ifp; 436 437 GEM_LOCK(sc); 438 gem_stop(ifp, 0); 439 GEM_UNLOCK(sc); 440 } 441 442 void 443 gem_resume(struct gem_softc *sc) 444 { 445 struct ifnet *ifp = sc->sc_ifp; 446 447 GEM_LOCK(sc); 448 /* 449 * On resume all registers have to be initialized again like 450 * after power-on. 451 */ 452 sc->sc_flags &= ~GEM_INITED; 453 if (ifp->if_flags & IFF_UP) 454 gem_init_locked(sc); 455 GEM_UNLOCK(sc); 456 } 457 458 static __inline void 459 gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags) 460 { 461 char *p; 462 struct ip *ip; 463 struct mbuf *m0; 464 uint64_t offset, offset2; 465 466 m0 = m; 467 offset = sizeof(struct ip) + ETHER_HDR_LEN; 468 for(; m && m->m_len == 0; m = m->m_next) 469 ; 470 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 471 device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n", 472 __func__); 473 /* Checksum will be corrupted. */ 474 m = m0; 475 goto sendit; 476 } 477 if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) { 478 if (m->m_len != ETHER_HDR_LEN) { 479 device_printf(sc->sc_dev, 480 "%s: m_len != ETHER_HDR_LEN\n", __func__); 481 /* Checksum will be corrupted. */ 482 m = m0; 483 goto sendit; 484 } 485 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 486 ; 487 if (m == NULL) { 488 /* Checksum will be corrupted. */ 489 m = m0; 490 goto sendit; 491 } 492 ip = mtod(m, struct ip *); 493 } else { 494 p = mtod(m, uint8_t *); 495 p += ETHER_HDR_LEN; 496 ip = (struct ip *)p; 497 } 498 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 499 500 sendit: 501 offset2 = m->m_pkthdr.csum_data; 502 *cflags = offset << GEM_TD_CXSUM_STARTSHFT; 503 *cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT); 504 *cflags |= GEM_TD_CXSUM_ENABLE; 505 } 506 507 static __inline void 508 gem_rxcksum(struct mbuf *m, uint64_t flags) 509 { 510 struct ether_header *eh; 511 struct ip *ip; 512 struct udphdr *uh; 513 uint16_t *opts; 514 int32_t hlen, len, pktlen; 515 uint32_t temp32; 516 uint16_t cksum; 517 518 pktlen = m->m_pkthdr.len; 519 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 520 return; 521 eh = mtod(m, struct ether_header *); 522 if (eh->ether_type != htons(ETHERTYPE_IP)) 523 return; 524 ip = (struct ip *)(eh + 1); 525 if (ip->ip_v != IPVERSION) 526 return; 527 528 hlen = ip->ip_hl << 2; 529 pktlen -= sizeof(struct ether_header); 530 if (hlen < sizeof(struct ip)) 531 return; 532 if (ntohs(ip->ip_len) < hlen) 533 return; 534 if (ntohs(ip->ip_len) != pktlen) 535 return; 536 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 537 return; /* Cannot handle fragmented packet. */ 538 539 switch (ip->ip_p) { 540 case IPPROTO_TCP: 541 if (pktlen < (hlen + sizeof(struct tcphdr))) 542 return; 543 break; 544 case IPPROTO_UDP: 545 if (pktlen < (hlen + sizeof(struct udphdr))) 546 return; 547 uh = (struct udphdr *)((uint8_t *)ip + hlen); 548 if (uh->uh_sum == 0) 549 return; /* no checksum */ 550 break; 551 default: 552 return; 553 } 554 555 cksum = ~(flags & GEM_RD_CHECKSUM); 556 /* checksum fixup for IP options */ 557 len = hlen - sizeof(struct ip); 558 if (len > 0) { 559 opts = (uint16_t *)(ip + 1); 560 for (; len > 0; len -= sizeof(uint16_t), opts++) { 561 temp32 = cksum - *opts; 562 temp32 = (temp32 >> 16) + (temp32 & 65535); 563 cksum = temp32 & 65535; 564 } 565 } 566 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 567 m->m_pkthdr.csum_data = cksum; 568 } 569 570 static void 571 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 572 { 573 struct gem_softc *sc = xsc; 574 575 if (error != 0) 576 return; 577 if (nsegs != 1) 578 panic("%s: bad control buffer segment count", __func__); 579 sc->sc_cddma = segs[0].ds_addr; 580 } 581 582 static void 583 gem_tick(void *arg) 584 { 585 struct gem_softc *sc = arg; 586 struct ifnet *ifp; 587 588 GEM_LOCK_ASSERT(sc, MA_OWNED); 589 590 ifp = sc->sc_ifp; 591 /* 592 * Unload collision counters. 593 */ 594 ifp->if_collisions += 595 bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) + 596 bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) + 597 bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) + 598 bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT); 599 600 /* 601 * then clear the hardware counters. 602 */ 603 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0); 604 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0); 605 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0); 606 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0); 607 608 mii_tick(sc->sc_mii); 609 610 if (gem_watchdog(sc) == EJUSTRETURN) 611 return; 612 613 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 614 } 615 616 static int 617 gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) 618 { 619 int i; 620 uint32_t reg; 621 622 for (i = TRIES; i--; DELAY(100)) { 623 reg = bus_read_4(sc->sc_res[0], r); 624 if ((reg & clr) == 0 && (reg & set) == set) 625 return (1); 626 } 627 return (0); 628 } 629 630 static void 631 gem_reset(sc) 632 struct gem_softc *sc; 633 { 634 635 #ifdef GEM_DEBUG 636 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 637 #endif 638 gem_reset_rx(sc); 639 gem_reset_tx(sc); 640 641 /* Do a full reset. */ 642 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 643 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 644 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 645 device_printf(sc->sc_dev, "cannot reset device\n"); 646 } 647 648 static void 649 gem_rxdrain(struct gem_softc *sc) 650 { 651 struct gem_rxsoft *rxs; 652 int i; 653 654 for (i = 0; i < GEM_NRXDESC; i++) { 655 rxs = &sc->sc_rxsoft[i]; 656 if (rxs->rxs_mbuf != NULL) { 657 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 658 BUS_DMASYNC_POSTREAD); 659 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 660 m_freem(rxs->rxs_mbuf); 661 rxs->rxs_mbuf = NULL; 662 } 663 } 664 } 665 666 static void 667 gem_stop(struct ifnet *ifp, int disable) 668 { 669 struct gem_softc *sc = ifp->if_softc; 670 struct gem_txsoft *txs; 671 672 #ifdef GEM_DEBUG 673 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 674 #endif 675 676 callout_stop(&sc->sc_tick_ch); 677 #ifdef GEM_RINT_TIMEOUT 678 callout_stop(&sc->sc_rx_ch); 679 #endif 680 681 /* XXX should we reset these instead? */ 682 gem_disable_tx(sc); 683 gem_disable_rx(sc); 684 685 /* 686 * Release any queued transmit buffers. 687 */ 688 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 689 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 690 if (txs->txs_ndescs != 0) { 691 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 692 BUS_DMASYNC_POSTWRITE); 693 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 694 if (txs->txs_mbuf != NULL) { 695 m_freem(txs->txs_mbuf); 696 txs->txs_mbuf = NULL; 697 } 698 } 699 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 700 } 701 702 if (disable) 703 gem_rxdrain(sc); 704 705 /* 706 * Mark the interface down and cancel the watchdog timer. 707 */ 708 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 709 sc->sc_flags &= ~GEM_LINK; 710 sc->sc_wdog_timer = 0; 711 } 712 713 static int 714 gem_reset_rx(struct gem_softc *sc) 715 { 716 717 /* 718 * Resetting while DMA is in progress can cause a bus hang, so we 719 * disable DMA first. 720 */ 721 gem_disable_rx(sc); 722 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0); 723 bus_barrier(sc->sc_res[0], GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 724 if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 725 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 726 727 /* Finally, reset the ERX */ 728 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX); 729 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 730 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) { 731 device_printf(sc->sc_dev, "cannot reset receiver\n"); 732 return (1); 733 } 734 return (0); 735 } 736 737 /* 738 * Reset the receiver DMA engine. 739 * 740 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 741 * etc in order to reset the receiver DMA engine only and not do a full 742 * reset which amongst others also downs the link and clears the FIFOs. 743 */ 744 static void 745 gem_reset_rxdma(struct gem_softc *sc) 746 { 747 int i; 748 749 if (gem_reset_rx(sc) != 0) 750 return (gem_init_locked(sc)); 751 for (i = 0; i < GEM_NRXDESC; i++) 752 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 753 GEM_UPDATE_RXDESC(sc, i); 754 sc->sc_rxptr = 0; 755 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 756 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 757 758 /* NOTE: we use only 32-bit DMA addresses here. */ 759 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0); 760 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 761 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4); 762 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 763 gem_ringsize(GEM_NRXDESC /* XXX */) | 764 ((ETHER_HDR_LEN + sizeof(struct ip)) << 765 GEM_RX_CONFIG_CXM_START_SHFT) | 766 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 767 (2 << GEM_RX_CONFIG_FBOFF_SHFT)); 768 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, 769 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 770 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH, 771 (3 * sc->sc_rxfifosize / 256) | 772 ((sc->sc_rxfifosize / 256) << 12)); 773 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 774 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG) | 775 GEM_RX_CONFIG_RXDMA_EN); 776 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK, 777 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 778 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 779 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG) | 780 GEM_MAC_RX_ENABLE); 781 } 782 783 static int 784 gem_reset_tx(struct gem_softc *sc) 785 { 786 787 /* 788 * Resetting while DMA is in progress can cause a bus hang, so we 789 * disable DMA first. 790 */ 791 gem_disable_tx(sc); 792 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0); 793 bus_barrier(sc->sc_res[0], GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 794 if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 795 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 796 797 /* Finally, reset the ETX */ 798 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_TX); 799 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 800 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) { 801 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 802 return (1); 803 } 804 return (0); 805 } 806 807 static int 808 gem_disable_rx(struct gem_softc *sc) 809 { 810 uint32_t cfg; 811 812 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 813 cfg &= ~GEM_MAC_RX_ENABLE; 814 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg); 815 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 816 BUS_SPACE_BARRIER_WRITE); 817 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 818 } 819 820 /* 821 * disable transmitter. 822 */ 823 static int 824 gem_disable_tx(struct gem_softc *sc) 825 { 826 uint32_t cfg; 827 828 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG); 829 cfg &= ~GEM_MAC_TX_ENABLE; 830 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg); 831 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4, 832 BUS_SPACE_BARRIER_WRITE); 833 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 834 } 835 836 static int 837 gem_meminit(sc) 838 struct gem_softc *sc; 839 { 840 struct gem_rxsoft *rxs; 841 int error, i; 842 843 /* 844 * Initialize the transmit descriptor ring. 845 */ 846 for (i = 0; i < GEM_NTXDESC; i++) { 847 sc->sc_txdescs[i].gd_flags = 0; 848 sc->sc_txdescs[i].gd_addr = 0; 849 } 850 sc->sc_txfree = GEM_MAXTXFREE; 851 sc->sc_txnext = 0; 852 sc->sc_txwin = 0; 853 854 /* 855 * Initialize the receive descriptor and receive job 856 * descriptor rings. 857 */ 858 for (i = 0; i < GEM_NRXDESC; i++) { 859 rxs = &sc->sc_rxsoft[i]; 860 if (rxs->rxs_mbuf == NULL) { 861 if ((error = gem_add_rxbuf(sc, i)) != 0) { 862 device_printf(sc->sc_dev, 863 "unable to allocate or map RX buffer %d, " 864 "error = %d\n", i, error); 865 /* 866 * XXX we should attempt to run with fewer 867 * receive buffers instead of just failing. 868 */ 869 gem_rxdrain(sc); 870 return (1); 871 } 872 } else 873 GEM_INIT_RXDESC(sc, i); 874 } 875 sc->sc_rxptr = 0; 876 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 877 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 878 879 return (0); 880 } 881 882 static u_int 883 gem_ringsize(u_int sz) 884 { 885 886 switch (sz) { 887 case 32: 888 return (GEM_RING_SZ_32); 889 case 64: 890 return (GEM_RING_SZ_64); 891 case 128: 892 return (GEM_RING_SZ_128); 893 case 256: 894 return (GEM_RING_SZ_256); 895 case 512: 896 return (GEM_RING_SZ_512); 897 case 1024: 898 return (GEM_RING_SZ_1024); 899 case 2048: 900 return (GEM_RING_SZ_2048); 901 case 4096: 902 return (GEM_RING_SZ_4096); 903 case 8192: 904 return (GEM_RING_SZ_8192); 905 default: 906 printf("%s: invalid ring size %d\n", __func__, sz); 907 return (GEM_RING_SZ_32); 908 } 909 } 910 911 static void 912 gem_init(void *xsc) 913 { 914 struct gem_softc *sc = xsc; 915 916 GEM_LOCK(sc); 917 gem_init_locked(sc); 918 GEM_UNLOCK(sc); 919 } 920 921 /* 922 * Initialization of interface; set up initialization block 923 * and transmit/receive descriptor rings. 924 */ 925 static void 926 gem_init_locked(struct gem_softc *sc) 927 { 928 struct ifnet *ifp = sc->sc_ifp; 929 uint32_t v; 930 931 GEM_LOCK_ASSERT(sc, MA_OWNED); 932 933 #ifdef GEM_DEBUG 934 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 935 __func__); 936 #endif 937 /* 938 * Initialization sequence. The numbered steps below correspond 939 * to the sequence outlined in section 6.3.5.1 in the Ethernet 940 * Channel Engine manual (part of the PCIO manual). 941 * See also the STP2002-STQ document from Sun Microsystems. 942 */ 943 944 /* step 1 & 2. Reset the Ethernet Channel. */ 945 gem_stop(sc->sc_ifp, 0); 946 gem_reset(sc); 947 #ifdef GEM_DEBUG 948 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 949 __func__); 950 #endif 951 952 /* Re-initialize the MIF. */ 953 gem_mifinit(sc); 954 955 /* step 3. Setup data structures in host memory. */ 956 if (gem_meminit(sc) != 0) 957 return; 958 959 /* step 4. TX MAC registers & counters */ 960 gem_init_regs(sc); 961 962 /* step 5. RX MAC registers & counters */ 963 gem_setladrf(sc); 964 965 /* step 6 & 7. Program Descriptor Ring Base Addresses. */ 966 /* NOTE: we use only 32-bit DMA addresses here. */ 967 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0); 968 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 969 970 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0); 971 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 972 #ifdef GEM_DEBUG 973 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx", 974 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 975 #endif 976 977 /* step 8. Global Configuration & Interrupt Mask */ 978 bus_write_4(sc->sc_res[0], GEM_INTMASK, 979 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 980 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 981 GEM_INTR_BERR 982 #ifdef GEM_DEBUG 983 | GEM_INTR_PCS | GEM_INTR_MIF 984 #endif 985 )); 986 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK, 987 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 988 bus_write_4(sc->sc_res[0], GEM_MAC_TX_MASK, 989 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP); 990 #ifdef GEM_DEBUG 991 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 992 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 993 #else 994 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 995 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 996 #endif 997 998 /* step 9. ETX Configuration: use mostly default values. */ 999 1000 /* Enable DMA. */ 1001 v = gem_ringsize(GEM_NTXDESC /* XXX */); 1002 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 1003 v | GEM_TX_CONFIG_TXDMA_EN | 1004 ((0x400 << 10) & GEM_TX_CONFIG_TXFIFO_TH)); 1005 1006 /* step 10. ERX Configuration */ 1007 1008 /* Encode Receive Descriptor ring size. */ 1009 v = gem_ringsize(GEM_NRXDESC /* XXX */); 1010 /* RX TCP/UDP checksum offset */ 1011 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 1012 GEM_RX_CONFIG_CXM_START_SHFT); 1013 1014 /* Enable DMA. */ 1015 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 1016 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 1017 (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN); 1018 1019 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, 1020 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 1021 1022 /* 1023 * The following value is for an OFF Threshold of about 3/4 full 1024 * and an ON Threshold of 1/4 full. 1025 */ 1026 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH, 1027 (3 * sc->sc_rxfifosize / 256) | 1028 ((sc->sc_rxfifosize / 256) << 12)); 1029 1030 /* step 11. Configure Media. */ 1031 1032 /* step 12. RX_MAC Configuration Register */ 1033 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 1034 v |= GEM_MAC_RX_STRIP_CRC; 1035 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0); 1036 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 1037 BUS_SPACE_BARRIER_WRITE); 1038 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1039 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 1040 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 1041 1042 /* step 14. Issue Transmit Pending command. */ 1043 1044 /* step 15. Give the reciever a swift kick. */ 1045 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4); 1046 1047 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1048 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1049 sc->sc_ifflags = ifp->if_flags; 1050 1051 sc->sc_flags &= ~GEM_LINK; 1052 mii_mediachg(sc->sc_mii); 1053 1054 /* Start the one second timer. */ 1055 sc->sc_wdog_timer = 0; 1056 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1057 } 1058 1059 static int 1060 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head) 1061 { 1062 bus_dma_segment_t txsegs[GEM_NTXSEGS]; 1063 struct gem_txsoft *txs; 1064 struct mbuf *m; 1065 uint64_t cflags, flags; 1066 int error, nexttx, nsegs, seg; 1067 1068 /* Get a work queue entry. */ 1069 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1070 /* Ran out of descriptors. */ 1071 return (ENOBUFS); 1072 } 1073 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1074 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1075 if (error == EFBIG) { 1076 m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS); 1077 if (m == NULL) { 1078 m_freem(*m_head); 1079 *m_head = NULL; 1080 return (ENOBUFS); 1081 } 1082 *m_head = m; 1083 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1084 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1085 BUS_DMA_NOWAIT); 1086 if (error != 0) { 1087 m_freem(*m_head); 1088 *m_head = NULL; 1089 return (error); 1090 } 1091 } else if (error != 0) 1092 return (error); 1093 if (nsegs == 0) { 1094 m_freem(*m_head); 1095 *m_head = NULL; 1096 return (EIO); 1097 } 1098 1099 /* 1100 * Ensure we have enough descriptors free to describe 1101 * the packet. Note, we always reserve one descriptor 1102 * at the end of the ring as a termination point, in 1103 * order to prevent wrap-around. 1104 */ 1105 if (nsegs > sc->sc_txfree - 1) { 1106 txs->txs_ndescs = 0; 1107 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1108 return (ENOBUFS); 1109 } 1110 1111 flags = cflags = 0; 1112 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) 1113 gem_txcksum(sc, *m_head, &cflags); 1114 1115 txs->txs_ndescs = nsegs; 1116 txs->txs_firstdesc = sc->sc_txnext; 1117 nexttx = txs->txs_firstdesc; 1118 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 1119 #ifdef GEM_DEBUG 1120 CTR6(KTR_GEM, 1121 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1122 __func__, seg, nexttx, txsegs[seg].ds_len, 1123 txsegs[seg].ds_addr, 1124 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr)); 1125 #endif 1126 sc->sc_txdescs[nexttx].gd_addr = 1127 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr); 1128 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 1129 ("%s: segment size too large!", __func__)); 1130 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1131 sc->sc_txdescs[nexttx].gd_flags = 1132 GEM_DMA_WRITE(sc, flags | cflags); 1133 txs->txs_lastdesc = nexttx; 1134 } 1135 1136 /* Set EOP on the last descriptor. */ 1137 #ifdef GEM_DEBUG 1138 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d", 1139 __func__, seg, nexttx); 1140 #endif 1141 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1142 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET); 1143 1144 /* Lastly set SOP on the first descriptor. */ 1145 #ifdef GEM_DEBUG 1146 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d", 1147 __func__, seg, nexttx); 1148 #endif 1149 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1150 sc->sc_txwin = 0; 1151 flags |= GEM_TD_INTERRUPT_ME; 1152 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1153 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME | 1154 GEM_TD_START_OF_PACKET); 1155 } else 1156 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1157 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET); 1158 1159 /* Sync the DMA map. */ 1160 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1161 BUS_DMASYNC_PREWRITE); 1162 1163 #ifdef GEM_DEBUG 1164 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1165 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1166 txs->txs_ndescs); 1167 #endif 1168 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1169 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1170 txs->txs_mbuf = *m_head; 1171 1172 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1173 sc->sc_txfree -= txs->txs_ndescs; 1174 1175 return (0); 1176 } 1177 1178 static void 1179 gem_init_regs(struct gem_softc *sc) 1180 { 1181 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1182 1183 /* These registers are not cleared on reset. */ 1184 if ((sc->sc_flags & GEM_INITED) == 0) { 1185 /* magic values */ 1186 bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0); 1187 bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8); 1188 bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4); 1189 1190 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME, 1191 ETHER_MIN_LEN); 1192 /* max frame and max burst size */ 1193 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME, 1194 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1195 1196 bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7); 1197 bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4); 1198 bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10); 1199 /* dunno... */ 1200 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088); 1201 bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED, 1202 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1203 1204 /* secondary MAC address: 0:0:0:0:0:0 */ 1205 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0); 1206 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0); 1207 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0); 1208 1209 /* MAC control address: 01:80:c2:00:00:01 */ 1210 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001); 1211 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200); 1212 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180); 1213 1214 /* MAC filter address: 0:0:0:0:0:0 */ 1215 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0); 1216 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0); 1217 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0); 1218 1219 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0); 1220 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0); 1221 1222 sc->sc_flags |= GEM_INITED; 1223 } 1224 1225 /* Counters need to be zeroed. */ 1226 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0); 1227 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0); 1228 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0); 1229 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0); 1230 bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0); 1231 bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0); 1232 bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0); 1233 bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0); 1234 bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0); 1235 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0); 1236 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0); 1237 1238 /* Set XOFF PAUSE time. */ 1239 bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1240 1241 /* 1242 * Set the internal arbitration to "infinite" bursts of the 1243 * maximum length of 31 * 64 bytes so DMA transfers aren't 1244 * split up in cache line size chunks. This greatly improves 1245 * especially RX performance. 1246 * Enable silicon bug workarounds for the Apple variants. 1247 */ 1248 bus_write_4(sc->sc_res[0], GEM_CONFIG, 1249 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 1250 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ? 1251 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 1252 1253 /* Set the station address. */ 1254 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0, 1255 (laddr[4] << 8) | laddr[5]); 1256 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1, 1257 (laddr[2] << 8) | laddr[3]); 1258 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2, 1259 (laddr[0] << 8) | laddr[1]); 1260 1261 /* Enable MII outputs. */ 1262 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, 1263 GEM_MAC_XIF_TX_MII_ENA); 1264 } 1265 1266 static void 1267 gem_start(struct ifnet *ifp) 1268 { 1269 struct gem_softc *sc = ifp->if_softc; 1270 1271 GEM_LOCK(sc); 1272 gem_start_locked(ifp); 1273 GEM_UNLOCK(sc); 1274 } 1275 1276 static void 1277 gem_start_locked(struct ifnet *ifp) 1278 { 1279 struct gem_softc *sc = ifp->if_softc; 1280 struct mbuf *m; 1281 int ntx; 1282 1283 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1284 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 1285 return; 1286 1287 #ifdef GEM_DEBUG 1288 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 1289 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1290 sc->sc_txnext); 1291 #endif 1292 ntx = 0; 1293 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1294 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1295 if (m == NULL) 1296 break; 1297 if (gem_load_txmbuf(sc, &m) != 0) { 1298 if (m == NULL) 1299 break; 1300 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1301 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1302 break; 1303 } 1304 ntx++; 1305 /* Kick the transmitter. */ 1306 #ifdef GEM_DEBUG 1307 CTR3(KTR_GEM, "%s: %s: kicking TX %d", 1308 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1309 #endif 1310 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1311 bus_write_4(sc->sc_res[0], GEM_TX_KICK, sc->sc_txnext); 1312 1313 BPF_MTAP(ifp, m); 1314 } 1315 1316 if (ntx > 0) { 1317 #ifdef GEM_DEBUG 1318 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1319 device_get_name(sc->sc_dev), sc->sc_txnext); 1320 #endif 1321 1322 /* Set a watchdog timer in case the chip flakes out. */ 1323 sc->sc_wdog_timer = 5; 1324 #ifdef GEM_DEBUG 1325 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1326 device_get_name(sc->sc_dev), __func__, 1327 sc->sc_wdog_timer); 1328 #endif 1329 } 1330 } 1331 1332 static void 1333 gem_tint(struct gem_softc *sc) 1334 { 1335 struct ifnet *ifp = sc->sc_ifp; 1336 struct gem_txsoft *txs; 1337 int txlast, progress; 1338 #ifdef GEM_DEBUG 1339 int i; 1340 1341 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1342 #endif 1343 1344 /* 1345 * Go through our TX list and free mbufs for those 1346 * frames that have been transmitted. 1347 */ 1348 progress = 0; 1349 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1350 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1351 1352 #ifdef GEM_DEBUG 1353 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1354 printf(" txsoft %p transmit chain:\n", txs); 1355 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1356 printf("descriptor %d: ", i); 1357 printf("gd_flags: 0x%016llx\t", 1358 (long long)GEM_DMA_READ(sc, 1359 sc->sc_txdescs[i].gd_flags)); 1360 printf("gd_addr: 0x%016llx\n", 1361 (long long)GEM_DMA_READ(sc, 1362 sc->sc_txdescs[i].gd_addr)); 1363 if (i == txs->txs_lastdesc) 1364 break; 1365 } 1366 } 1367 #endif 1368 1369 /* 1370 * In theory, we could harvest some descriptors before 1371 * the ring is empty, but that's a bit complicated. 1372 * 1373 * GEM_TX_COMPLETION points to the last descriptor 1374 * processed + 1. 1375 */ 1376 txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION); 1377 #ifdef GEM_DEBUG 1378 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 1379 "txs->txs_lastdesc = %d, txlast = %d", 1380 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1381 #endif 1382 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1383 if ((txlast >= txs->txs_firstdesc) && 1384 (txlast <= txs->txs_lastdesc)) 1385 break; 1386 } else { 1387 /* Ick -- this command wraps. */ 1388 if ((txlast >= txs->txs_firstdesc) || 1389 (txlast <= txs->txs_lastdesc)) 1390 break; 1391 } 1392 1393 #ifdef GEM_DEBUG 1394 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__); 1395 #endif 1396 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1397 1398 sc->sc_txfree += txs->txs_ndescs; 1399 1400 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1401 BUS_DMASYNC_POSTWRITE); 1402 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1403 if (txs->txs_mbuf != NULL) { 1404 m_freem(txs->txs_mbuf); 1405 txs->txs_mbuf = NULL; 1406 } 1407 1408 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1409 1410 ifp->if_opackets++; 1411 progress = 1; 1412 } 1413 1414 #ifdef GEM_DEBUG 1415 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx " 1416 "GEM_TX_COMPLETION %x", 1417 __func__, bus_read_4(sc->sc_res[0], GEM_TX_STATE_MACHINE), 1418 ((long long)bus_read_4(sc->sc_res[0], 1419 GEM_TX_DATA_PTR_HI) << 32) | 1420 bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_LO), 1421 bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION)); 1422 #endif 1423 1424 if (progress) { 1425 if (sc->sc_txfree == GEM_NTXDESC - 1) 1426 sc->sc_txwin = 0; 1427 1428 /* 1429 * We freed some descriptors, so reset IFF_DRV_OACTIVE 1430 * and restart. 1431 */ 1432 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1433 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; 1434 1435 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1436 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1437 gem_start_locked(ifp); 1438 } 1439 1440 #ifdef GEM_DEBUG 1441 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1442 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1443 #endif 1444 } 1445 1446 #ifdef GEM_RINT_TIMEOUT 1447 static void 1448 gem_rint_timeout(void *arg) 1449 { 1450 struct gem_softc *sc = arg; 1451 1452 GEM_LOCK_ASSERT(sc, MA_OWNED); 1453 gem_rint(sc); 1454 } 1455 #endif 1456 1457 static void 1458 gem_rint(struct gem_softc *sc) 1459 { 1460 struct ifnet *ifp = sc->sc_ifp; 1461 struct mbuf *m; 1462 uint64_t rxstat; 1463 uint32_t rxcomp; 1464 1465 #ifdef GEM_RINT_TIMEOUT 1466 callout_stop(&sc->sc_rx_ch); 1467 #endif 1468 #ifdef GEM_DEBUG 1469 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1470 #endif 1471 1472 /* 1473 * Read the completion register once. This limits 1474 * how long the following loop can execute. 1475 */ 1476 rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION); 1477 1478 #ifdef GEM_DEBUG 1479 CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d", 1480 __func__, sc->sc_rxptr, rxcomp); 1481 #endif 1482 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1483 for (; sc->sc_rxptr != rxcomp;) { 1484 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1485 rxstat = GEM_DMA_READ(sc, 1486 sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 1487 1488 if (rxstat & GEM_RD_OWN) { 1489 #ifdef GEM_RINT_TIMEOUT 1490 /* 1491 * The descriptor is still marked as owned, although 1492 * it is supposed to have completed. This has been 1493 * observed on some machines. Just exiting here 1494 * might leave the packet sitting around until another 1495 * one arrives to trigger a new interrupt, which is 1496 * generally undesirable, so set up a timeout. 1497 */ 1498 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1499 gem_rint_timeout, sc); 1500 #endif 1501 m = NULL; 1502 goto kickit; 1503 } 1504 1505 if (rxstat & GEM_RD_BAD_CRC) { 1506 ifp->if_ierrors++; 1507 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1508 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1509 m = NULL; 1510 goto kickit; 1511 } 1512 1513 #ifdef GEM_DEBUG 1514 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1515 printf(" rxsoft %p descriptor %d: ", 1516 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 1517 printf("gd_flags: 0x%016llx\t", 1518 (long long)GEM_DMA_READ(sc, 1519 sc->sc_rxdescs[sc->sc_rxptr].gd_flags)); 1520 printf("gd_addr: 0x%016llx\n", 1521 (long long)GEM_DMA_READ(sc, 1522 sc->sc_rxdescs[sc->sc_rxptr].gd_addr)); 1523 } 1524 #endif 1525 1526 /* 1527 * Allocate a new mbuf cluster. If that fails, we are 1528 * out of memory, and must drop the packet and recycle 1529 * the buffer that's already attached to this descriptor. 1530 */ 1531 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 1532 ifp->if_ierrors++; 1533 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1534 m = NULL; 1535 } 1536 1537 kickit: 1538 /* 1539 * Update the RX kick register. This register has to point 1540 * to the descriptor after the last valid one (before the 1541 * current batch) and must be incremented in multiples of 1542 * 4 (because the DMA engine fetches/updates descriptors 1543 * in batches of 4). 1544 */ 1545 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 1546 if ((sc->sc_rxptr % 4) == 0) { 1547 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1548 bus_write_4(sc->sc_res[0], GEM_RX_KICK, 1549 (sc->sc_rxptr + GEM_NRXDESC - 4) & 1550 GEM_NRXDESC_MASK); 1551 } 1552 1553 if (m == NULL) { 1554 if (rxstat & GEM_RD_OWN) 1555 break; 1556 continue; 1557 } 1558 1559 ifp->if_ipackets++; 1560 m->m_data += 2; /* We're already off by two */ 1561 m->m_pkthdr.rcvif = ifp; 1562 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 1563 1564 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1565 gem_rxcksum(m, rxstat); 1566 1567 /* Pass it on. */ 1568 GEM_UNLOCK(sc); 1569 (*ifp->if_input)(ifp, m); 1570 GEM_LOCK(sc); 1571 } 1572 1573 #ifdef GEM_DEBUG 1574 CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__, 1575 sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION)); 1576 #endif 1577 } 1578 1579 static int 1580 gem_add_rxbuf(struct gem_softc *sc, int idx) 1581 { 1582 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1583 struct mbuf *m; 1584 bus_dma_segment_t segs[1]; 1585 int error, nsegs; 1586 1587 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1588 if (m == NULL) 1589 return (ENOBUFS); 1590 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1591 1592 #ifdef GEM_DEBUG 1593 /* Bzero the packet to check DMA. */ 1594 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1595 #endif 1596 1597 if (rxs->rxs_mbuf != NULL) { 1598 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1599 BUS_DMASYNC_POSTREAD); 1600 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1601 } 1602 1603 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1604 m, segs, &nsegs, BUS_DMA_NOWAIT); 1605 KASSERT(nsegs == 1, ("Too many segments returned!")); 1606 if (error != 0) { 1607 device_printf(sc->sc_dev, 1608 "cannot load RS DMA map %d, error = %d\n", idx, error); 1609 m_freem(m); 1610 return (error); 1611 } 1612 /* If nsegs is wrong then the stack is corrupt. */ 1613 rxs->rxs_mbuf = m; 1614 rxs->rxs_paddr = segs[0].ds_addr; 1615 1616 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1617 BUS_DMASYNC_PREREAD); 1618 1619 GEM_INIT_RXDESC(sc, idx); 1620 1621 return (0); 1622 } 1623 1624 static void 1625 gem_eint(struct gem_softc *sc, u_int status) 1626 { 1627 1628 sc->sc_ifp->if_ierrors++; 1629 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1630 gem_reset_rxdma(sc); 1631 return; 1632 } 1633 1634 device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status); 1635 } 1636 1637 void 1638 gem_intr(void *v) 1639 { 1640 struct gem_softc *sc = v; 1641 uint32_t status, status2; 1642 1643 GEM_LOCK(sc); 1644 status = bus_read_4(sc->sc_res[0], GEM_STATUS); 1645 1646 #ifdef GEM_DEBUG 1647 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 1648 device_get_name(sc->sc_dev), __func__, (status >> 19), 1649 (u_int)status); 1650 1651 /* 1652 * PCS interrupts must be cleared, otherwise no traffic is passed! 1653 */ 1654 if ((status & GEM_INTR_PCS) != 0) { 1655 status2 = 1656 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS) | 1657 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS); 1658 if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 1659 device_printf(sc->sc_dev, 1660 "%s: PCS link status changed\n", __func__); 1661 } 1662 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1663 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS); 1664 if ((status2 & GEM_MAC_PAUSED) != 0) 1665 device_printf(sc->sc_dev, 1666 "%s: PAUSE received (PAUSE time %d slots)\n", 1667 __func__, GEM_MAC_PAUSE_TIME(status2)); 1668 if ((status2 & GEM_MAC_PAUSE) != 0) 1669 device_printf(sc->sc_dev, 1670 "%s: transited to PAUSE state\n", __func__); 1671 if ((status2 & GEM_MAC_RESUME) != 0) 1672 device_printf(sc->sc_dev, 1673 "%s: transited to non-PAUSE state\n", __func__); 1674 } 1675 if ((status & GEM_INTR_MIF) != 0) 1676 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1677 #endif 1678 1679 if ((status & 1680 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 1681 gem_eint(sc, status); 1682 1683 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1684 gem_rint(sc); 1685 1686 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1687 gem_tint(sc); 1688 1689 if (status & GEM_INTR_TX_MAC) { 1690 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS); 1691 if ((status2 & 1692 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP)) != 0) 1693 device_printf(sc->sc_dev, 1694 "MAC TX fault, status %x\n", status2); 1695 if ((status2 & 1696 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) 1697 gem_init_locked(sc); 1698 } 1699 if (status & GEM_INTR_RX_MAC) { 1700 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS); 1701 /* 1702 * At least with GEM_SUN_GEM and some GEM_SUN_ERI 1703 * revisions GEM_MAC_RX_OVERFLOW happen often due to a 1704 * silicon bug so handle them silently. Moreover, it's 1705 * likely that the receiver has hung so we reset it. 1706 */ 1707 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) { 1708 sc->sc_ifp->if_ierrors++; 1709 gem_reset_rxdma(sc); 1710 } else if ((status2 & 1711 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0) 1712 device_printf(sc->sc_dev, 1713 "MAC RX fault, status %x\n", status2); 1714 } 1715 GEM_UNLOCK(sc); 1716 } 1717 1718 static int 1719 gem_watchdog(struct gem_softc *sc) 1720 { 1721 1722 GEM_LOCK_ASSERT(sc, MA_OWNED); 1723 1724 #ifdef GEM_DEBUG 1725 CTR4(KTR_GEM, 1726 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x", 1727 __func__, bus_read_4(sc->sc_res[0], GEM_RX_CONFIG), 1728 bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS), 1729 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG)); 1730 CTR4(KTR_GEM, 1731 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x", 1732 __func__, bus_read_4(sc->sc_res[0], GEM_TX_CONFIG), 1733 bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS), 1734 bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG)); 1735 #endif 1736 1737 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1738 return (0); 1739 1740 if ((sc->sc_flags & GEM_LINK) != 0) 1741 device_printf(sc->sc_dev, "device timeout\n"); 1742 else if (bootverbose) 1743 device_printf(sc->sc_dev, "device timeout (no link)\n"); 1744 ++sc->sc_ifp->if_oerrors; 1745 1746 /* Try to get more packets going. */ 1747 gem_init_locked(sc); 1748 return (EJUSTRETURN); 1749 } 1750 1751 static void 1752 gem_mifinit(struct gem_softc *sc) 1753 { 1754 1755 /* Configure the MIF in frame mode */ 1756 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, bus_read_4(sc->sc_res[0], 1757 GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1758 } 1759 1760 /* 1761 * MII interface 1762 * 1763 * The GEM MII interface supports at least three different operating modes: 1764 * 1765 * Bitbang mode is implemented using data, clock and output enable registers. 1766 * 1767 * Frame mode is implemented by loading a complete frame into the frame 1768 * register and polling the valid bit for completion. 1769 * 1770 * Polling mode uses the frame register but completion is indicated by 1771 * an interrupt. 1772 * 1773 */ 1774 int 1775 gem_mii_readreg(device_t dev, int phy, int reg) 1776 { 1777 struct gem_softc *sc; 1778 int n; 1779 uint32_t v; 1780 1781 #ifdef GEM_DEBUG_PHY 1782 printf("%s: phy %d reg %d\n", __func__, phy, reg); 1783 #endif 1784 1785 sc = device_get_softc(dev); 1786 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1787 return (0); 1788 1789 if ((sc->sc_flags & GEM_SERDES) != 0) { 1790 switch (reg) { 1791 case MII_BMCR: 1792 reg = GEM_MII_CONTROL; 1793 break; 1794 case MII_BMSR: 1795 reg = GEM_MII_STATUS; 1796 break; 1797 case MII_PHYIDR1: 1798 case MII_PHYIDR2: 1799 return (0); 1800 case MII_ANAR: 1801 reg = GEM_MII_ANAR; 1802 break; 1803 case MII_ANLPAR: 1804 reg = GEM_MII_ANLPAR; 1805 break; 1806 case MII_EXTSR: 1807 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 1808 default: 1809 device_printf(sc->sc_dev, 1810 "%s: unhandled register %d\n", __func__, reg); 1811 return (0); 1812 } 1813 return (bus_read_4(sc->sc_res[0], reg)); 1814 } 1815 1816 /* Construct the frame command. */ 1817 v = GEM_MIF_FRAME_READ | 1818 (phy << GEM_MIF_PHY_SHIFT) | 1819 (reg << GEM_MIF_REG_SHIFT); 1820 1821 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v); 1822 for (n = 0; n < 100; n++) { 1823 DELAY(1); 1824 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME); 1825 if (v & GEM_MIF_FRAME_TA0) 1826 return (v & GEM_MIF_FRAME_DATA); 1827 } 1828 1829 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1830 return (0); 1831 } 1832 1833 int 1834 gem_mii_writereg(device_t dev, int phy, int reg, int val) 1835 { 1836 struct gem_softc *sc; 1837 int n; 1838 uint32_t v; 1839 1840 #ifdef GEM_DEBUG_PHY 1841 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 1842 #endif 1843 1844 sc = device_get_softc(dev); 1845 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1846 return (0); 1847 1848 if ((sc->sc_flags & GEM_SERDES) != 0) { 1849 switch (reg) { 1850 case MII_BMCR: 1851 reg = GEM_MII_CONTROL; 1852 break; 1853 case MII_BMSR: 1854 reg = GEM_MII_STATUS; 1855 break; 1856 case MII_ANAR: 1857 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 0); 1858 bus_barrier(sc->sc_res[0], GEM_MII_CONFIG, 4, 1859 BUS_SPACE_BARRIER_WRITE); 1860 bus_write_4(sc->sc_res[0], GEM_MII_ANAR, val); 1861 bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL, 1862 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 1863 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 1864 GEM_MII_CONFIG_ENABLE); 1865 return (0); 1866 case MII_ANLPAR: 1867 reg = GEM_MII_ANLPAR; 1868 break; 1869 default: 1870 device_printf(sc->sc_dev, 1871 "%s: unhandled register %d\n", __func__, reg); 1872 return (0); 1873 } 1874 bus_write_4(sc->sc_res[0], reg, val); 1875 return (0); 1876 } 1877 1878 /* Construct the frame command. */ 1879 v = GEM_MIF_FRAME_WRITE | 1880 (phy << GEM_MIF_PHY_SHIFT) | 1881 (reg << GEM_MIF_REG_SHIFT) | 1882 (val & GEM_MIF_FRAME_DATA); 1883 1884 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v); 1885 for (n = 0; n < 100; n++) { 1886 DELAY(1); 1887 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME); 1888 if (v & GEM_MIF_FRAME_TA0) 1889 return (1); 1890 } 1891 1892 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1893 return (0); 1894 } 1895 1896 void 1897 gem_mii_statchg(device_t dev) 1898 { 1899 struct gem_softc *sc; 1900 int gigabit; 1901 uint32_t rxcfg, txcfg, v; 1902 1903 sc = device_get_softc(dev); 1904 1905 #ifdef GEM_DEBUG 1906 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) 1907 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n", 1908 __func__, sc->sc_phyad); 1909 #endif 1910 1911 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 1912 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 1913 sc->sc_flags |= GEM_LINK; 1914 else 1915 sc->sc_flags &= ~GEM_LINK; 1916 1917 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 1918 case IFM_1000_SX: 1919 case IFM_1000_LX: 1920 case IFM_1000_CX: 1921 case IFM_1000_T: 1922 gigabit = 1; 1923 break; 1924 default: 1925 gigabit = 0; 1926 } 1927 1928 /* 1929 * The configuration done here corresponds to the steps F) and 1930 * G) and as far as enabling of RX and TX MAC goes also step H) 1931 * of the initialization sequence outlined in section 3.2.1 of 1932 * the GEM Gigabit Ethernet ASIC Specification. 1933 */ 1934 1935 rxcfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 1936 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 1937 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 1938 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1939 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 1940 else if (gigabit != 0) { 1941 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 1942 txcfg |= GEM_MAC_TX_CARR_EXTEND; 1943 } 1944 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 0); 1945 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4, 1946 BUS_SPACE_BARRIER_WRITE); 1947 if (!gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 1948 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 1949 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, txcfg); 1950 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0); 1951 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 1952 BUS_SPACE_BARRIER_WRITE); 1953 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1954 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 1955 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, rxcfg); 1956 1957 v = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG) & 1958 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 1959 #ifdef notyet 1960 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1961 IFM_ETH_RXPAUSE) != 0) 1962 v |= GEM_MAC_CC_RX_PAUSE; 1963 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1964 IFM_ETH_TXPAUSE) != 0) 1965 v |= GEM_MAC_CC_TX_PAUSE; 1966 #endif 1967 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG, v); 1968 1969 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 1970 gigabit != 0) 1971 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME, 1972 GEM_MAC_SLOT_TIME_CARR_EXTEND); 1973 else 1974 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME, 1975 GEM_MAC_SLOT_TIME_NORMAL); 1976 1977 /* XIF Configuration */ 1978 v = GEM_MAC_XIF_LINK_LED; 1979 v |= GEM_MAC_XIF_TX_MII_ENA; 1980 if ((sc->sc_flags & GEM_SERDES) == 0) { 1981 if ((bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG) & 1982 GEM_MIF_CONFIG_PHY_SEL) != 0 && 1983 (IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1984 IFM_FDX) == 0) 1985 /* External MII needs echo disable if half duplex. */ 1986 v |= GEM_MAC_XIF_ECHO_DISABL; 1987 else 1988 /* 1989 * Internal MII needs buffer enable. 1990 * XXX buffer enable makes only sense for an 1991 * external PHY. 1992 */ 1993 v |= GEM_MAC_XIF_MII_BUF_ENA; 1994 } 1995 if (gigabit != 0) 1996 v |= GEM_MAC_XIF_GMII_MODE; 1997 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1998 v |= GEM_MAC_XIF_FDPLX_LED; 1999 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v); 2000 2001 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2002 (sc->sc_flags & GEM_LINK) != 0) { 2003 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 2004 txcfg | GEM_MAC_TX_ENABLE); 2005 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 2006 rxcfg | GEM_MAC_RX_ENABLE); 2007 } 2008 } 2009 2010 int 2011 gem_mediachange(struct ifnet *ifp) 2012 { 2013 struct gem_softc *sc = ifp->if_softc; 2014 int error; 2015 2016 /* XXX add support for serial media. */ 2017 2018 GEM_LOCK(sc); 2019 error = mii_mediachg(sc->sc_mii); 2020 GEM_UNLOCK(sc); 2021 return (error); 2022 } 2023 2024 void 2025 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2026 { 2027 struct gem_softc *sc = ifp->if_softc; 2028 2029 GEM_LOCK(sc); 2030 if ((ifp->if_flags & IFF_UP) == 0) { 2031 GEM_UNLOCK(sc); 2032 return; 2033 } 2034 2035 mii_pollstat(sc->sc_mii); 2036 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2037 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2038 GEM_UNLOCK(sc); 2039 } 2040 2041 static int 2042 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2043 { 2044 struct gem_softc *sc = ifp->if_softc; 2045 struct ifreq *ifr = (struct ifreq *)data; 2046 int error; 2047 2048 error = 0; 2049 switch (cmd) { 2050 case SIOCSIFFLAGS: 2051 GEM_LOCK(sc); 2052 if ((ifp->if_flags & IFF_UP) != 0) { 2053 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2054 ((ifp->if_flags ^ sc->sc_ifflags) & 2055 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2056 gem_setladrf(sc); 2057 else 2058 gem_init_locked(sc); 2059 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2060 gem_stop(ifp, 0); 2061 if ((ifp->if_flags & IFF_LINK0) != 0) 2062 sc->sc_csum_features |= CSUM_UDP; 2063 else 2064 sc->sc_csum_features &= ~CSUM_UDP; 2065 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2066 ifp->if_hwassist = sc->sc_csum_features; 2067 sc->sc_ifflags = ifp->if_flags; 2068 GEM_UNLOCK(sc); 2069 break; 2070 case SIOCADDMULTI: 2071 case SIOCDELMULTI: 2072 GEM_LOCK(sc); 2073 gem_setladrf(sc); 2074 GEM_UNLOCK(sc); 2075 break; 2076 case SIOCGIFMEDIA: 2077 case SIOCSIFMEDIA: 2078 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2079 break; 2080 case SIOCSIFCAP: 2081 GEM_LOCK(sc); 2082 ifp->if_capenable = ifr->ifr_reqcap; 2083 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2084 ifp->if_hwassist = sc->sc_csum_features; 2085 else 2086 ifp->if_hwassist = 0; 2087 GEM_UNLOCK(sc); 2088 break; 2089 default: 2090 error = ether_ioctl(ifp, cmd, data); 2091 break; 2092 } 2093 2094 return (error); 2095 } 2096 2097 static void 2098 gem_setladrf(struct gem_softc *sc) 2099 { 2100 struct ifnet *ifp = sc->sc_ifp; 2101 struct ifmultiaddr *inm; 2102 int i; 2103 uint32_t hash[16]; 2104 uint32_t crc, v; 2105 2106 GEM_LOCK_ASSERT(sc, MA_OWNED); 2107 2108 /* Get the current RX configuration. */ 2109 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 2110 2111 /* 2112 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2113 * and hash filter. Depending on the case, the right bit will be 2114 * enabled. 2115 */ 2116 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER | 2117 GEM_MAC_RX_PROMISC_GRP); 2118 2119 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 2120 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 2121 BUS_SPACE_BARRIER_WRITE); 2122 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 0)) 2123 device_printf(sc->sc_dev, "cannot disable RX hash filter\n"); 2124 2125 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2126 v |= GEM_MAC_RX_PROMISCUOUS; 2127 goto chipit; 2128 } 2129 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2130 v |= GEM_MAC_RX_PROMISC_GRP; 2131 goto chipit; 2132 } 2133 2134 /* 2135 * Set up multicast address filter by passing all multicast 2136 * addresses through a crc generator, and then using the high 2137 * order 8 bits as an index into the 256 bit logical address 2138 * filter. The high order 4 bits selects the word, while the 2139 * other 4 bits select the bit within the word (where bit 0 2140 * is the MSB). 2141 */ 2142 2143 /* Clear the hash table. */ 2144 memset(hash, 0, sizeof(hash)); 2145 2146 IF_ADDR_LOCK(ifp); 2147 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 2148 if (inm->ifma_addr->sa_family != AF_LINK) 2149 continue; 2150 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2151 inm->ifma_addr), ETHER_ADDR_LEN); 2152 2153 /* We just want the 8 most significant bits. */ 2154 crc >>= 24; 2155 2156 /* Set the corresponding bit in the filter. */ 2157 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2158 } 2159 IF_ADDR_UNLOCK(ifp); 2160 2161 v |= GEM_MAC_RX_HASH_FILTER; 2162 2163 /* Now load the hash table into the chip (if we are using it). */ 2164 for (i = 0; i < 16; i++) 2165 bus_write_4(sc->sc_res[0], 2166 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), 2167 hash[i]); 2168 2169 chipit: 2170 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 2171 } 2172