1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 36 */ 37 38 #if 0 39 #define GEM_DEBUG 40 #endif 41 42 #if 0 /* XXX: In case of emergency, re-enable this. */ 43 #define GEM_RINT_TIMEOUT 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/endian.h> 51 #include <sys/mbuf.h> 52 #include <sys/malloc.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/socket.h> 58 #include <sys/sockio.h> 59 #include <sys/rman.h> 60 61 #include <net/bpf.h> 62 #include <net/ethernet.h> 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_vlan_var.h> 69 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/tcp.h> 74 #include <netinet/udp.h> 75 76 #include <machine/bus.h> 77 78 #include <dev/mii/mii.h> 79 #include <dev/mii/miivar.h> 80 81 #include <dev/gem/if_gemreg.h> 82 #include <dev/gem/if_gemvar.h> 83 84 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 85 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 86 87 #define TRIES 10000 88 89 /* 90 * The GEM hardware support basic TCP/UDP checksum offloading. However, 91 * the hardware doesn't compensate the checksum for UDP datagram which 92 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 93 * by default. It can be reactivated by setting special link option 94 * link0 with ifconfig(8). 95 */ 96 #define GEM_CSUM_FEATURES (CSUM_TCP) 97 98 static int gem_add_rxbuf(struct gem_softc *sc, int idx); 99 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, 100 uint32_t set); 101 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, 102 int nsegs, int error); 103 static int gem_disable_rx(struct gem_softc *sc); 104 static int gem_disable_tx(struct gem_softc *sc); 105 static void gem_eint(struct gem_softc *sc, u_int status); 106 static void gem_init(void *xsc); 107 static void gem_init_locked(struct gem_softc *sc); 108 static void gem_init_regs(struct gem_softc *sc); 109 static int gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 110 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head); 111 static int gem_meminit(struct gem_softc *sc); 112 static void gem_mifinit(struct gem_softc *sc); 113 static void gem_reset(struct gem_softc *sc); 114 static int gem_reset_rx(struct gem_softc *sc); 115 static void gem_reset_rxdma(struct gem_softc *sc); 116 static int gem_reset_tx(struct gem_softc *sc); 117 static u_int gem_ringsize(u_int sz); 118 static void gem_rint(struct gem_softc *sc); 119 #ifdef GEM_RINT_TIMEOUT 120 static void gem_rint_timeout(void *arg); 121 #endif 122 static __inline void gem_rxcksum(struct mbuf *m, uint64_t flags); 123 static void gem_rxdrain(struct gem_softc *sc); 124 static void gem_setladrf(struct gem_softc *sc); 125 static void gem_start(struct ifnet *ifp); 126 static void gem_start_locked(struct ifnet *ifp); 127 static void gem_stop(struct ifnet *ifp, int disable); 128 static void gem_tick(void *arg); 129 static void gem_tint(struct gem_softc *sc); 130 static __inline void gem_txcksum(struct gem_softc *sc, struct mbuf *m, 131 uint64_t *cflags); 132 static int gem_watchdog(struct gem_softc *sc); 133 134 devclass_t gem_devclass; 135 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 136 MODULE_DEPEND(gem, miibus, 1, 1, 1); 137 138 #ifdef GEM_DEBUG 139 #include <sys/ktr.h> 140 #define KTR_GEM KTR_CT2 141 #endif 142 143 int 144 gem_attach(struct gem_softc *sc) 145 { 146 struct gem_txsoft *txs; 147 struct ifnet *ifp; 148 int error, i; 149 uint32_t v; 150 151 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 152 if (ifp == NULL) 153 return (ENOSPC); 154 155 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 156 #ifdef GEM_RINT_TIMEOUT 157 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 158 #endif 159 160 /* Make sure the chip is stopped. */ 161 ifp->if_softc = sc; 162 gem_reset(sc); 163 164 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 165 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 166 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, 167 NULL, &sc->sc_pdmatag); 168 if (error) 169 goto fail_ifnet; 170 171 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 172 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 173 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 174 if (error) 175 goto fail_ptag; 176 177 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 178 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 179 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 180 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 181 if (error) 182 goto fail_rtag; 183 184 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 185 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 186 sizeof(struct gem_control_data), 1, 187 sizeof(struct gem_control_data), 0, 188 NULL, NULL, &sc->sc_cdmatag); 189 if (error) 190 goto fail_ttag; 191 192 /* 193 * Allocate the control data structures, create and load the 194 * DMA map for it. 195 */ 196 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 197 (void **)&sc->sc_control_data, 198 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 199 &sc->sc_cddmamap))) { 200 device_printf(sc->sc_dev, 201 "unable to allocate control data, error = %d\n", error); 202 goto fail_ctag; 203 } 204 205 sc->sc_cddma = 0; 206 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 207 sc->sc_control_data, sizeof(struct gem_control_data), 208 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 209 device_printf(sc->sc_dev, 210 "unable to load control data DMA map, error = %d\n", 211 error); 212 goto fail_cmem; 213 } 214 215 /* 216 * Initialize the transmit job descriptors. 217 */ 218 STAILQ_INIT(&sc->sc_txfreeq); 219 STAILQ_INIT(&sc->sc_txdirtyq); 220 221 /* 222 * Create the transmit buffer DMA maps. 223 */ 224 error = ENOMEM; 225 for (i = 0; i < GEM_TXQUEUELEN; i++) { 226 txs = &sc->sc_txsoft[i]; 227 txs->txs_mbuf = NULL; 228 txs->txs_ndescs = 0; 229 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 230 &txs->txs_dmamap)) != 0) { 231 device_printf(sc->sc_dev, 232 "unable to create TX DMA map %d, error = %d\n", 233 i, error); 234 goto fail_txd; 235 } 236 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 237 } 238 239 /* 240 * Create the receive buffer DMA maps. 241 */ 242 for (i = 0; i < GEM_NRXDESC; i++) { 243 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 244 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 245 device_printf(sc->sc_dev, 246 "unable to create RX DMA map %d, error = %d\n", 247 i, error); 248 goto fail_rxd; 249 } 250 sc->sc_rxsoft[i].rxs_mbuf = NULL; 251 } 252 253 /* Bad things will happen when touching this register on ERI. */ 254 if (sc->sc_variant != GEM_SUN_ERI) 255 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE, 256 GEM_MII_DATAPATH_MII); 257 258 gem_mifinit(sc); 259 260 /* 261 * Look for an external PHY. 262 */ 263 error = ENXIO; 264 v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG); 265 if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 266 v |= GEM_MIF_CONFIG_PHY_SEL; 267 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v); 268 switch (sc->sc_variant) { 269 case GEM_SUN_ERI: 270 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 271 break; 272 default: 273 sc->sc_phyad = -1; 274 break; 275 } 276 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 277 gem_mediachange, gem_mediastatus); 278 } 279 280 /* 281 * Fall back on an internal PHY if no external PHY was found. 282 */ 283 if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) { 284 v &= ~GEM_MIF_CONFIG_PHY_SEL; 285 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v); 286 switch (sc->sc_variant) { 287 case GEM_SUN_ERI: 288 case GEM_APPLE_K2_GMAC: 289 sc->sc_phyad = GEM_PHYAD_INTERNAL; 290 break; 291 case GEM_APPLE_GMAC: 292 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 293 break; 294 default: 295 sc->sc_phyad = -1; 296 break; 297 } 298 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 299 gem_mediachange, gem_mediastatus); 300 } 301 302 /* 303 * Try the external PCS SERDES if we didn't find any PHYs. 304 */ 305 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 306 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE, 307 GEM_MII_DATAPATH_SERDES); 308 bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL, 309 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 310 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 311 GEM_MII_CONFIG_ENABLE); 312 sc->sc_flags |= GEM_SERDES; 313 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 314 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 315 gem_mediachange, gem_mediastatus); 316 } 317 318 if (error != 0) { 319 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error); 320 goto fail_rxd; 321 } 322 sc->sc_mii = device_get_softc(sc->sc_miibus); 323 324 /* 325 * From this point forward, the attachment cannot fail. A failure 326 * before this point releases all resources that may have been 327 * allocated. 328 */ 329 330 /* Get RX FIFO size. */ 331 sc->sc_rxfifosize = 64 * 332 bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE); 333 334 /* Get TX FIFO size. */ 335 v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE); 336 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 337 sc->sc_rxfifosize / 1024, v / 16); 338 339 sc->sc_csum_features = GEM_CSUM_FEATURES; 340 /* Initialize ifnet structure. */ 341 ifp->if_softc = sc; 342 if_initname(ifp, device_get_name(sc->sc_dev), 343 device_get_unit(sc->sc_dev)); 344 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 345 ifp->if_start = gem_start; 346 ifp->if_ioctl = gem_ioctl; 347 ifp->if_init = gem_init; 348 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN); 349 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN; 350 IFQ_SET_READY(&ifp->if_snd); 351 352 /* Attach the interface. */ 353 ether_ifattach(ifp, sc->sc_enaddr); 354 355 /* 356 * Tell the upper layer(s) we support long frames/checksum offloads. 357 */ 358 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 359 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 360 ifp->if_hwassist |= sc->sc_csum_features; 361 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 362 363 return (0); 364 365 /* 366 * Free any resources we've allocated during the failed attach 367 * attempt. Do this in reverse order and fall through. 368 */ 369 fail_rxd: 370 for (i = 0; i < GEM_NRXDESC; i++) 371 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 372 bus_dmamap_destroy(sc->sc_rdmatag, 373 sc->sc_rxsoft[i].rxs_dmamap); 374 fail_txd: 375 for (i = 0; i < GEM_TXQUEUELEN; i++) 376 if (sc->sc_txsoft[i].txs_dmamap != NULL) 377 bus_dmamap_destroy(sc->sc_tdmatag, 378 sc->sc_txsoft[i].txs_dmamap); 379 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 380 fail_cmem: 381 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 382 sc->sc_cddmamap); 383 fail_ctag: 384 bus_dma_tag_destroy(sc->sc_cdmatag); 385 fail_ttag: 386 bus_dma_tag_destroy(sc->sc_tdmatag); 387 fail_rtag: 388 bus_dma_tag_destroy(sc->sc_rdmatag); 389 fail_ptag: 390 bus_dma_tag_destroy(sc->sc_pdmatag); 391 fail_ifnet: 392 if_free(ifp); 393 return (error); 394 } 395 396 void 397 gem_detach(struct gem_softc *sc) 398 { 399 struct ifnet *ifp = sc->sc_ifp; 400 int i; 401 402 GEM_LOCK(sc); 403 gem_stop(ifp, 1); 404 GEM_UNLOCK(sc); 405 callout_drain(&sc->sc_tick_ch); 406 #ifdef GEM_RINT_TIMEOUT 407 callout_drain(&sc->sc_rx_ch); 408 #endif 409 ether_ifdetach(ifp); 410 if_free(ifp); 411 device_delete_child(sc->sc_dev, sc->sc_miibus); 412 413 for (i = 0; i < GEM_NRXDESC; i++) 414 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 415 bus_dmamap_destroy(sc->sc_rdmatag, 416 sc->sc_rxsoft[i].rxs_dmamap); 417 for (i = 0; i < GEM_TXQUEUELEN; i++) 418 if (sc->sc_txsoft[i].txs_dmamap != NULL) 419 bus_dmamap_destroy(sc->sc_tdmatag, 420 sc->sc_txsoft[i].txs_dmamap); 421 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 422 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 423 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 424 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 425 sc->sc_cddmamap); 426 bus_dma_tag_destroy(sc->sc_cdmatag); 427 bus_dma_tag_destroy(sc->sc_tdmatag); 428 bus_dma_tag_destroy(sc->sc_rdmatag); 429 bus_dma_tag_destroy(sc->sc_pdmatag); 430 } 431 432 void 433 gem_suspend(struct gem_softc *sc) 434 { 435 struct ifnet *ifp = sc->sc_ifp; 436 437 GEM_LOCK(sc); 438 gem_stop(ifp, 0); 439 GEM_UNLOCK(sc); 440 } 441 442 void 443 gem_resume(struct gem_softc *sc) 444 { 445 struct ifnet *ifp = sc->sc_ifp; 446 447 GEM_LOCK(sc); 448 /* 449 * On resume all registers have to be initialized again like 450 * after power-on. 451 */ 452 sc->sc_flags &= ~GEM_INITED; 453 if (ifp->if_flags & IFF_UP) 454 gem_init_locked(sc); 455 GEM_UNLOCK(sc); 456 } 457 458 static __inline void 459 gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags) 460 { 461 char *p; 462 struct ip *ip; 463 struct mbuf *m0; 464 uint64_t offset, offset2; 465 466 m0 = m; 467 offset = sizeof(struct ip) + ETHER_HDR_LEN; 468 for(; m && m->m_len == 0; m = m->m_next) 469 ; 470 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 471 device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n", 472 __func__); 473 /* Checksum will be corrupted. */ 474 m = m0; 475 goto sendit; 476 } 477 if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) { 478 if (m->m_len != ETHER_HDR_LEN) { 479 device_printf(sc->sc_dev, 480 "%s: m_len != ETHER_HDR_LEN\n", __func__); 481 /* Checksum will be corrupted. */ 482 m = m0; 483 goto sendit; 484 } 485 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 486 ; 487 if (m == NULL) { 488 /* Checksum will be corrupted. */ 489 m = m0; 490 goto sendit; 491 } 492 ip = mtod(m, struct ip *); 493 } else { 494 p = mtod(m, uint8_t *); 495 p += ETHER_HDR_LEN; 496 ip = (struct ip *)p; 497 } 498 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 499 500 sendit: 501 offset2 = m->m_pkthdr.csum_data; 502 *cflags = offset << GEM_TD_CXSUM_STARTSHFT; 503 *cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT); 504 *cflags |= GEM_TD_CXSUM_ENABLE; 505 } 506 507 static __inline void 508 gem_rxcksum(struct mbuf *m, uint64_t flags) 509 { 510 struct ether_header *eh; 511 struct ip *ip; 512 struct udphdr *uh; 513 uint16_t *opts; 514 int32_t hlen, len, pktlen; 515 uint32_t temp32; 516 uint16_t cksum; 517 518 pktlen = m->m_pkthdr.len; 519 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 520 return; 521 eh = mtod(m, struct ether_header *); 522 if (eh->ether_type != htons(ETHERTYPE_IP)) 523 return; 524 ip = (struct ip *)(eh + 1); 525 if (ip->ip_v != IPVERSION) 526 return; 527 528 hlen = ip->ip_hl << 2; 529 pktlen -= sizeof(struct ether_header); 530 if (hlen < sizeof(struct ip)) 531 return; 532 if (ntohs(ip->ip_len) < hlen) 533 return; 534 if (ntohs(ip->ip_len) != pktlen) 535 return; 536 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 537 return; /* Cannot handle fragmented packet. */ 538 539 switch (ip->ip_p) { 540 case IPPROTO_TCP: 541 if (pktlen < (hlen + sizeof(struct tcphdr))) 542 return; 543 break; 544 case IPPROTO_UDP: 545 if (pktlen < (hlen + sizeof(struct udphdr))) 546 return; 547 uh = (struct udphdr *)((uint8_t *)ip + hlen); 548 if (uh->uh_sum == 0) 549 return; /* no checksum */ 550 break; 551 default: 552 return; 553 } 554 555 cksum = ~(flags & GEM_RD_CHECKSUM); 556 /* checksum fixup for IP options */ 557 len = hlen - sizeof(struct ip); 558 if (len > 0) { 559 opts = (uint16_t *)(ip + 1); 560 for (; len > 0; len -= sizeof(uint16_t), opts++) { 561 temp32 = cksum - *opts; 562 temp32 = (temp32 >> 16) + (temp32 & 65535); 563 cksum = temp32 & 65535; 564 } 565 } 566 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 567 m->m_pkthdr.csum_data = cksum; 568 } 569 570 static void 571 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 572 { 573 struct gem_softc *sc = xsc; 574 575 if (error != 0) 576 return; 577 if (nsegs != 1) 578 panic("%s: bad control buffer segment count", __func__); 579 sc->sc_cddma = segs[0].ds_addr; 580 } 581 582 static void 583 gem_tick(void *arg) 584 { 585 struct gem_softc *sc = arg; 586 struct ifnet *ifp; 587 588 GEM_LOCK_ASSERT(sc, MA_OWNED); 589 590 ifp = sc->sc_ifp; 591 /* 592 * Unload collision counters. 593 */ 594 ifp->if_collisions += 595 bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) + 596 bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) + 597 bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) + 598 bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT); 599 600 /* 601 * Then clear the hardware counters. 602 */ 603 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0); 604 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0); 605 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0); 606 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0); 607 608 mii_tick(sc->sc_mii); 609 610 if (gem_watchdog(sc) == EJUSTRETURN) 611 return; 612 613 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 614 } 615 616 static int 617 gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) 618 { 619 int i; 620 uint32_t reg; 621 622 for (i = TRIES; i--; DELAY(100)) { 623 reg = bus_read_4(sc->sc_res[0], r); 624 if ((reg & clr) == 0 && (reg & set) == set) 625 return (1); 626 } 627 return (0); 628 } 629 630 static void 631 gem_reset(sc) 632 struct gem_softc *sc; 633 { 634 635 #ifdef GEM_DEBUG 636 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 637 #endif 638 gem_reset_rx(sc); 639 gem_reset_tx(sc); 640 641 /* Do a full reset. */ 642 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 643 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 644 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 645 device_printf(sc->sc_dev, "cannot reset device\n"); 646 } 647 648 static void 649 gem_rxdrain(struct gem_softc *sc) 650 { 651 struct gem_rxsoft *rxs; 652 int i; 653 654 for (i = 0; i < GEM_NRXDESC; i++) { 655 rxs = &sc->sc_rxsoft[i]; 656 if (rxs->rxs_mbuf != NULL) { 657 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 658 BUS_DMASYNC_POSTREAD); 659 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 660 m_freem(rxs->rxs_mbuf); 661 rxs->rxs_mbuf = NULL; 662 } 663 } 664 } 665 666 static void 667 gem_stop(struct ifnet *ifp, int disable) 668 { 669 struct gem_softc *sc = ifp->if_softc; 670 struct gem_txsoft *txs; 671 672 #ifdef GEM_DEBUG 673 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 674 #endif 675 676 callout_stop(&sc->sc_tick_ch); 677 #ifdef GEM_RINT_TIMEOUT 678 callout_stop(&sc->sc_rx_ch); 679 #endif 680 681 /* XXX should we reset these instead? */ 682 gem_disable_tx(sc); 683 gem_disable_rx(sc); 684 685 /* 686 * Release any queued transmit buffers. 687 */ 688 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 689 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 690 if (txs->txs_ndescs != 0) { 691 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 692 BUS_DMASYNC_POSTWRITE); 693 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 694 if (txs->txs_mbuf != NULL) { 695 m_freem(txs->txs_mbuf); 696 txs->txs_mbuf = NULL; 697 } 698 } 699 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 700 } 701 702 if (disable) 703 gem_rxdrain(sc); 704 705 /* 706 * Mark the interface down and cancel the watchdog timer. 707 */ 708 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 709 sc->sc_flags &= ~GEM_LINK; 710 sc->sc_wdog_timer = 0; 711 } 712 713 static int 714 gem_reset_rx(struct gem_softc *sc) 715 { 716 717 /* 718 * Resetting while DMA is in progress can cause a bus hang, so we 719 * disable DMA first. 720 */ 721 gem_disable_rx(sc); 722 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0); 723 bus_barrier(sc->sc_res[0], GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 724 if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 725 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 726 727 /* Finally, reset the ERX. */ 728 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX); 729 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 730 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) { 731 device_printf(sc->sc_dev, "cannot reset receiver\n"); 732 return (1); 733 } 734 return (0); 735 } 736 737 /* 738 * Reset the receiver DMA engine. 739 * 740 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 741 * etc in order to reset the receiver DMA engine only and not do a full 742 * reset which amongst others also downs the link and clears the FIFOs. 743 */ 744 static void 745 gem_reset_rxdma(struct gem_softc *sc) 746 { 747 int i; 748 749 if (gem_reset_rx(sc) != 0) 750 return (gem_init_locked(sc)); 751 for (i = 0; i < GEM_NRXDESC; i++) 752 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 753 GEM_UPDATE_RXDESC(sc, i); 754 sc->sc_rxptr = 0; 755 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 756 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 757 758 /* NOTE: we use only 32-bit DMA addresses here. */ 759 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0); 760 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 761 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4); 762 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 763 gem_ringsize(GEM_NRXDESC /* XXX */) | 764 ((ETHER_HDR_LEN + sizeof(struct ip)) << 765 GEM_RX_CONFIG_CXM_START_SHFT) | 766 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 767 (2 << GEM_RX_CONFIG_FBOFF_SHFT)); 768 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, 769 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 770 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH, 771 (3 * sc->sc_rxfifosize / 256) | 772 ((sc->sc_rxfifosize / 256) << 12)); 773 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 774 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG) | 775 GEM_RX_CONFIG_RXDMA_EN); 776 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK, 777 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 778 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 779 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG) | 780 GEM_MAC_RX_ENABLE); 781 } 782 783 static int 784 gem_reset_tx(struct gem_softc *sc) 785 { 786 787 /* 788 * Resetting while DMA is in progress can cause a bus hang, so we 789 * disable DMA first. 790 */ 791 gem_disable_tx(sc); 792 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0); 793 bus_barrier(sc->sc_res[0], GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 794 if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 795 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 796 797 /* Finally, reset the ETX. */ 798 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_TX); 799 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 800 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) { 801 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 802 return (1); 803 } 804 return (0); 805 } 806 807 static int 808 gem_disable_rx(struct gem_softc *sc) 809 { 810 uint32_t cfg; 811 812 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 813 cfg &= ~GEM_MAC_RX_ENABLE; 814 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg); 815 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 816 BUS_SPACE_BARRIER_WRITE); 817 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 818 } 819 820 static int 821 gem_disable_tx(struct gem_softc *sc) 822 { 823 uint32_t cfg; 824 825 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG); 826 cfg &= ~GEM_MAC_TX_ENABLE; 827 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg); 828 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4, 829 BUS_SPACE_BARRIER_WRITE); 830 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 831 } 832 833 static int 834 gem_meminit(sc) 835 struct gem_softc *sc; 836 { 837 struct gem_rxsoft *rxs; 838 int error, i; 839 840 /* 841 * Initialize the transmit descriptor ring. 842 */ 843 for (i = 0; i < GEM_NTXDESC; i++) { 844 sc->sc_txdescs[i].gd_flags = 0; 845 sc->sc_txdescs[i].gd_addr = 0; 846 } 847 sc->sc_txfree = GEM_MAXTXFREE; 848 sc->sc_txnext = 0; 849 sc->sc_txwin = 0; 850 851 /* 852 * Initialize the receive descriptor and receive job 853 * descriptor rings. 854 */ 855 for (i = 0; i < GEM_NRXDESC; i++) { 856 rxs = &sc->sc_rxsoft[i]; 857 if (rxs->rxs_mbuf == NULL) { 858 if ((error = gem_add_rxbuf(sc, i)) != 0) { 859 device_printf(sc->sc_dev, 860 "unable to allocate or map RX buffer %d, " 861 "error = %d\n", i, error); 862 /* 863 * XXX we should attempt to run with fewer 864 * receive buffers instead of just failing. 865 */ 866 gem_rxdrain(sc); 867 return (1); 868 } 869 } else 870 GEM_INIT_RXDESC(sc, i); 871 } 872 sc->sc_rxptr = 0; 873 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 874 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 875 876 return (0); 877 } 878 879 static u_int 880 gem_ringsize(u_int sz) 881 { 882 883 switch (sz) { 884 case 32: 885 return (GEM_RING_SZ_32); 886 case 64: 887 return (GEM_RING_SZ_64); 888 case 128: 889 return (GEM_RING_SZ_128); 890 case 256: 891 return (GEM_RING_SZ_256); 892 case 512: 893 return (GEM_RING_SZ_512); 894 case 1024: 895 return (GEM_RING_SZ_1024); 896 case 2048: 897 return (GEM_RING_SZ_2048); 898 case 4096: 899 return (GEM_RING_SZ_4096); 900 case 8192: 901 return (GEM_RING_SZ_8192); 902 default: 903 printf("%s: invalid ring size %d\n", __func__, sz); 904 return (GEM_RING_SZ_32); 905 } 906 } 907 908 static void 909 gem_init(void *xsc) 910 { 911 struct gem_softc *sc = xsc; 912 913 GEM_LOCK(sc); 914 gem_init_locked(sc); 915 GEM_UNLOCK(sc); 916 } 917 918 /* 919 * Initialization of interface; set up initialization block 920 * and transmit/receive descriptor rings. 921 */ 922 static void 923 gem_init_locked(struct gem_softc *sc) 924 { 925 struct ifnet *ifp = sc->sc_ifp; 926 uint32_t v; 927 928 GEM_LOCK_ASSERT(sc, MA_OWNED); 929 930 #ifdef GEM_DEBUG 931 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 932 __func__); 933 #endif 934 /* 935 * Initialization sequence. The numbered steps below correspond 936 * to the sequence outlined in section 6.3.5.1 in the Ethernet 937 * Channel Engine manual (part of the PCIO manual). 938 * See also the STP2002-STQ document from Sun Microsystems. 939 */ 940 941 /* step 1 & 2. Reset the Ethernet Channel. */ 942 gem_stop(sc->sc_ifp, 0); 943 gem_reset(sc); 944 #ifdef GEM_DEBUG 945 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 946 __func__); 947 #endif 948 949 /* Re-initialize the MIF. */ 950 gem_mifinit(sc); 951 952 /* step 3. Setup data structures in host memory. */ 953 if (gem_meminit(sc) != 0) 954 return; 955 956 /* step 4. TX MAC registers & counters */ 957 gem_init_regs(sc); 958 959 /* step 5. RX MAC registers & counters */ 960 gem_setladrf(sc); 961 962 /* step 6 & 7. Program Descriptor Ring Base Addresses. */ 963 /* NOTE: we use only 32-bit DMA addresses here. */ 964 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0); 965 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 966 967 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0); 968 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 969 #ifdef GEM_DEBUG 970 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx", 971 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 972 #endif 973 974 /* step 8. Global Configuration & Interrupt Mask */ 975 bus_write_4(sc->sc_res[0], GEM_INTMASK, 976 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 977 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 978 GEM_INTR_BERR 979 #ifdef GEM_DEBUG 980 | GEM_INTR_PCS | GEM_INTR_MIF 981 #endif 982 )); 983 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK, 984 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 985 bus_write_4(sc->sc_res[0], GEM_MAC_TX_MASK, 986 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP); 987 #ifdef GEM_DEBUG 988 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 989 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 990 #else 991 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 992 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 993 #endif 994 995 /* step 9. ETX Configuration: use mostly default values. */ 996 997 /* Enable DMA. */ 998 v = gem_ringsize(GEM_NTXDESC /* XXX */); 999 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 1000 v | GEM_TX_CONFIG_TXDMA_EN | 1001 ((0x400 << 10) & GEM_TX_CONFIG_TXFIFO_TH)); 1002 1003 /* step 10. ERX Configuration */ 1004 1005 /* Encode Receive Descriptor ring size. */ 1006 v = gem_ringsize(GEM_NRXDESC /* XXX */); 1007 /* RX TCP/UDP checksum offset */ 1008 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 1009 GEM_RX_CONFIG_CXM_START_SHFT); 1010 1011 /* Enable DMA. */ 1012 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 1013 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 1014 (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN); 1015 1016 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, 1017 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 1018 1019 /* 1020 * The following value is for an OFF Threshold of about 3/4 full 1021 * and an ON Threshold of 1/4 full. 1022 */ 1023 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH, 1024 (3 * sc->sc_rxfifosize / 256) | 1025 ((sc->sc_rxfifosize / 256) << 12)); 1026 1027 /* step 11. Configure Media. */ 1028 1029 /* step 12. RX_MAC Configuration Register */ 1030 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 1031 v |= GEM_MAC_RX_STRIP_CRC; 1032 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0); 1033 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 1034 BUS_SPACE_BARRIER_WRITE); 1035 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1036 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 1037 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 1038 1039 /* step 14. Issue Transmit Pending command. */ 1040 1041 /* step 15. Give the reciever a swift kick. */ 1042 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4); 1043 1044 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1045 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1046 sc->sc_ifflags = ifp->if_flags; 1047 1048 sc->sc_flags &= ~GEM_LINK; 1049 mii_mediachg(sc->sc_mii); 1050 1051 /* Start the one second timer. */ 1052 sc->sc_wdog_timer = 0; 1053 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1054 } 1055 1056 static int 1057 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head) 1058 { 1059 bus_dma_segment_t txsegs[GEM_NTXSEGS]; 1060 struct gem_txsoft *txs; 1061 struct mbuf *m; 1062 uint64_t cflags, flags; 1063 int error, nexttx, nsegs, seg; 1064 1065 /* Get a work queue entry. */ 1066 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1067 /* Ran out of descriptors. */ 1068 return (ENOBUFS); 1069 } 1070 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1071 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1072 if (error == EFBIG) { 1073 m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS); 1074 if (m == NULL) { 1075 m_freem(*m_head); 1076 *m_head = NULL; 1077 return (ENOBUFS); 1078 } 1079 *m_head = m; 1080 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1081 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1082 BUS_DMA_NOWAIT); 1083 if (error != 0) { 1084 m_freem(*m_head); 1085 *m_head = NULL; 1086 return (error); 1087 } 1088 } else if (error != 0) 1089 return (error); 1090 /* If nsegs is wrong then the stack is corrupt. */ 1091 KASSERT(nsegs <= GEM_NTXSEGS, 1092 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1093 if (nsegs == 0) { 1094 m_freem(*m_head); 1095 *m_head = NULL; 1096 return (EIO); 1097 } 1098 1099 /* 1100 * Ensure we have enough descriptors free to describe 1101 * the packet. Note, we always reserve one descriptor 1102 * at the end of the ring as a termination point, in 1103 * order to prevent wrap-around. 1104 */ 1105 if (nsegs > sc->sc_txfree - 1) { 1106 txs->txs_ndescs = 0; 1107 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1108 return (ENOBUFS); 1109 } 1110 1111 flags = cflags = 0; 1112 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) 1113 gem_txcksum(sc, *m_head, &cflags); 1114 1115 txs->txs_ndescs = nsegs; 1116 txs->txs_firstdesc = sc->sc_txnext; 1117 nexttx = txs->txs_firstdesc; 1118 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 1119 #ifdef GEM_DEBUG 1120 CTR6(KTR_GEM, 1121 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1122 __func__, seg, nexttx, txsegs[seg].ds_len, 1123 txsegs[seg].ds_addr, 1124 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr)); 1125 #endif 1126 sc->sc_txdescs[nexttx].gd_addr = 1127 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr); 1128 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 1129 ("%s: segment size too large!", __func__)); 1130 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1131 sc->sc_txdescs[nexttx].gd_flags = 1132 GEM_DMA_WRITE(sc, flags | cflags); 1133 txs->txs_lastdesc = nexttx; 1134 } 1135 1136 /* Set EOP on the last descriptor. */ 1137 #ifdef GEM_DEBUG 1138 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d", 1139 __func__, seg, nexttx); 1140 #endif 1141 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1142 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET); 1143 1144 /* Lastly set SOP on the first descriptor. */ 1145 #ifdef GEM_DEBUG 1146 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d", 1147 __func__, seg, nexttx); 1148 #endif 1149 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1150 sc->sc_txwin = 0; 1151 flags |= GEM_TD_INTERRUPT_ME; 1152 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1153 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME | 1154 GEM_TD_START_OF_PACKET); 1155 } else 1156 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1157 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET); 1158 1159 /* Sync the DMA map. */ 1160 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1161 BUS_DMASYNC_PREWRITE); 1162 1163 #ifdef GEM_DEBUG 1164 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1165 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1166 txs->txs_ndescs); 1167 #endif 1168 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1169 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1170 txs->txs_mbuf = *m_head; 1171 1172 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1173 sc->sc_txfree -= txs->txs_ndescs; 1174 1175 return (0); 1176 } 1177 1178 static void 1179 gem_init_regs(struct gem_softc *sc) 1180 { 1181 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1182 1183 /* These registers are not cleared on reset. */ 1184 if ((sc->sc_flags & GEM_INITED) == 0) { 1185 /* magic values */ 1186 bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0); 1187 bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8); 1188 bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4); 1189 1190 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME, 1191 ETHER_MIN_LEN); 1192 /* max frame and max burst size */ 1193 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME, 1194 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1195 1196 bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7); 1197 bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4); 1198 bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10); 1199 /* dunno... */ 1200 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088); 1201 bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED, 1202 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1203 1204 /* secondary MAC address: 0:0:0:0:0:0 */ 1205 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0); 1206 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0); 1207 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0); 1208 1209 /* MAC control address: 01:80:c2:00:00:01 */ 1210 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001); 1211 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200); 1212 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180); 1213 1214 /* MAC filter address: 0:0:0:0:0:0 */ 1215 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0); 1216 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0); 1217 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0); 1218 1219 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0); 1220 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0); 1221 1222 sc->sc_flags |= GEM_INITED; 1223 } 1224 1225 /* Counters need to be zeroed. */ 1226 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0); 1227 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0); 1228 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0); 1229 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0); 1230 bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0); 1231 bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0); 1232 bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0); 1233 bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0); 1234 bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0); 1235 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0); 1236 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0); 1237 1238 /* Set XOFF PAUSE time. */ 1239 bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1240 1241 /* 1242 * Set the internal arbitration to "infinite" bursts of the 1243 * maximum length of 31 * 64 bytes so DMA transfers aren't 1244 * split up in cache line size chunks. This greatly improves 1245 * especially RX performance. 1246 * Enable silicon bug workarounds for the Apple variants. 1247 */ 1248 bus_write_4(sc->sc_res[0], GEM_CONFIG, 1249 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 1250 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ? 1251 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 1252 1253 /* Set the station address. */ 1254 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0, 1255 (laddr[4] << 8) | laddr[5]); 1256 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1, 1257 (laddr[2] << 8) | laddr[3]); 1258 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2, 1259 (laddr[0] << 8) | laddr[1]); 1260 1261 /* Enable MII outputs. */ 1262 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, 1263 GEM_MAC_XIF_TX_MII_ENA); 1264 } 1265 1266 static void 1267 gem_start(struct ifnet *ifp) 1268 { 1269 struct gem_softc *sc = ifp->if_softc; 1270 1271 GEM_LOCK(sc); 1272 gem_start_locked(ifp); 1273 GEM_UNLOCK(sc); 1274 } 1275 1276 static void 1277 gem_start_locked(struct ifnet *ifp) 1278 { 1279 struct gem_softc *sc = ifp->if_softc; 1280 struct mbuf *m; 1281 int ntx; 1282 1283 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1284 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 1285 return; 1286 1287 #ifdef GEM_DEBUG 1288 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 1289 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1290 sc->sc_txnext); 1291 #endif 1292 ntx = 0; 1293 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1294 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1295 if (m == NULL) 1296 break; 1297 if (gem_load_txmbuf(sc, &m) != 0) { 1298 if (m == NULL) 1299 break; 1300 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1301 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1302 break; 1303 } 1304 ntx++; 1305 /* Kick the transmitter. */ 1306 #ifdef GEM_DEBUG 1307 CTR3(KTR_GEM, "%s: %s: kicking TX %d", 1308 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1309 #endif 1310 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1311 bus_write_4(sc->sc_res[0], GEM_TX_KICK, sc->sc_txnext); 1312 1313 BPF_MTAP(ifp, m); 1314 } 1315 1316 if (ntx > 0) { 1317 #ifdef GEM_DEBUG 1318 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1319 device_get_name(sc->sc_dev), sc->sc_txnext); 1320 #endif 1321 1322 /* Set a watchdog timer in case the chip flakes out. */ 1323 sc->sc_wdog_timer = 5; 1324 #ifdef GEM_DEBUG 1325 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1326 device_get_name(sc->sc_dev), __func__, 1327 sc->sc_wdog_timer); 1328 #endif 1329 } 1330 } 1331 1332 static void 1333 gem_tint(struct gem_softc *sc) 1334 { 1335 struct ifnet *ifp = sc->sc_ifp; 1336 struct gem_txsoft *txs; 1337 int txlast, progress; 1338 #ifdef GEM_DEBUG 1339 int i; 1340 1341 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1342 #endif 1343 1344 /* 1345 * Go through our TX list and free mbufs for those 1346 * frames that have been transmitted. 1347 */ 1348 progress = 0; 1349 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1350 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1351 1352 #ifdef GEM_DEBUG 1353 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1354 printf(" txsoft %p transmit chain:\n", txs); 1355 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1356 printf("descriptor %d: ", i); 1357 printf("gd_flags: 0x%016llx\t", 1358 (long long)GEM_DMA_READ(sc, 1359 sc->sc_txdescs[i].gd_flags)); 1360 printf("gd_addr: 0x%016llx\n", 1361 (long long)GEM_DMA_READ(sc, 1362 sc->sc_txdescs[i].gd_addr)); 1363 if (i == txs->txs_lastdesc) 1364 break; 1365 } 1366 } 1367 #endif 1368 1369 /* 1370 * In theory, we could harvest some descriptors before 1371 * the ring is empty, but that's a bit complicated. 1372 * 1373 * GEM_TX_COMPLETION points to the last descriptor 1374 * processed + 1. 1375 */ 1376 txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION); 1377 #ifdef GEM_DEBUG 1378 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 1379 "txs->txs_lastdesc = %d, txlast = %d", 1380 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1381 #endif 1382 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1383 if ((txlast >= txs->txs_firstdesc) && 1384 (txlast <= txs->txs_lastdesc)) 1385 break; 1386 } else { 1387 /* Ick -- this command wraps. */ 1388 if ((txlast >= txs->txs_firstdesc) || 1389 (txlast <= txs->txs_lastdesc)) 1390 break; 1391 } 1392 1393 #ifdef GEM_DEBUG 1394 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__); 1395 #endif 1396 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1397 1398 sc->sc_txfree += txs->txs_ndescs; 1399 1400 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1401 BUS_DMASYNC_POSTWRITE); 1402 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1403 if (txs->txs_mbuf != NULL) { 1404 m_freem(txs->txs_mbuf); 1405 txs->txs_mbuf = NULL; 1406 } 1407 1408 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1409 1410 ifp->if_opackets++; 1411 progress = 1; 1412 } 1413 1414 #ifdef GEM_DEBUG 1415 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx " 1416 "GEM_TX_COMPLETION %x", 1417 __func__, bus_read_4(sc->sc_res[0], GEM_TX_STATE_MACHINE), 1418 ((long long)bus_read_4(sc->sc_res[0], 1419 GEM_TX_DATA_PTR_HI) << 32) | 1420 bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_LO), 1421 bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION)); 1422 #endif 1423 1424 if (progress) { 1425 if (sc->sc_txfree == GEM_NTXDESC - 1) 1426 sc->sc_txwin = 0; 1427 1428 /* 1429 * We freed some descriptors, so reset IFF_DRV_OACTIVE 1430 * and restart. 1431 */ 1432 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1433 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; 1434 1435 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1436 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1437 gem_start_locked(ifp); 1438 } 1439 1440 #ifdef GEM_DEBUG 1441 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1442 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1443 #endif 1444 } 1445 1446 #ifdef GEM_RINT_TIMEOUT 1447 static void 1448 gem_rint_timeout(void *arg) 1449 { 1450 struct gem_softc *sc = arg; 1451 1452 GEM_LOCK_ASSERT(sc, MA_OWNED); 1453 gem_rint(sc); 1454 } 1455 #endif 1456 1457 static void 1458 gem_rint(struct gem_softc *sc) 1459 { 1460 struct ifnet *ifp = sc->sc_ifp; 1461 struct mbuf *m; 1462 uint64_t rxstat; 1463 uint32_t rxcomp; 1464 1465 #ifdef GEM_RINT_TIMEOUT 1466 callout_stop(&sc->sc_rx_ch); 1467 #endif 1468 #ifdef GEM_DEBUG 1469 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1470 #endif 1471 1472 /* 1473 * Read the completion register once. This limits 1474 * how long the following loop can execute. 1475 */ 1476 rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION); 1477 1478 #ifdef GEM_DEBUG 1479 CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d", 1480 __func__, sc->sc_rxptr, rxcomp); 1481 #endif 1482 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1483 for (; sc->sc_rxptr != rxcomp;) { 1484 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1485 rxstat = GEM_DMA_READ(sc, 1486 sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 1487 1488 if (rxstat & GEM_RD_OWN) { 1489 #ifdef GEM_RINT_TIMEOUT 1490 /* 1491 * The descriptor is still marked as owned, although 1492 * it is supposed to have completed. This has been 1493 * observed on some machines. Just exiting here 1494 * might leave the packet sitting around until another 1495 * one arrives to trigger a new interrupt, which is 1496 * generally undesirable, so set up a timeout. 1497 */ 1498 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1499 gem_rint_timeout, sc); 1500 #endif 1501 m = NULL; 1502 goto kickit; 1503 } 1504 1505 if (rxstat & GEM_RD_BAD_CRC) { 1506 ifp->if_ierrors++; 1507 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1508 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1509 m = NULL; 1510 goto kickit; 1511 } 1512 1513 #ifdef GEM_DEBUG 1514 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1515 printf(" rxsoft %p descriptor %d: ", 1516 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 1517 printf("gd_flags: 0x%016llx\t", 1518 (long long)GEM_DMA_READ(sc, 1519 sc->sc_rxdescs[sc->sc_rxptr].gd_flags)); 1520 printf("gd_addr: 0x%016llx\n", 1521 (long long)GEM_DMA_READ(sc, 1522 sc->sc_rxdescs[sc->sc_rxptr].gd_addr)); 1523 } 1524 #endif 1525 1526 /* 1527 * Allocate a new mbuf cluster. If that fails, we are 1528 * out of memory, and must drop the packet and recycle 1529 * the buffer that's already attached to this descriptor. 1530 */ 1531 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 1532 ifp->if_ierrors++; 1533 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1534 m = NULL; 1535 } 1536 1537 kickit: 1538 /* 1539 * Update the RX kick register. This register has to point 1540 * to the descriptor after the last valid one (before the 1541 * current batch) and must be incremented in multiples of 1542 * 4 (because the DMA engine fetches/updates descriptors 1543 * in batches of 4). 1544 */ 1545 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 1546 if ((sc->sc_rxptr % 4) == 0) { 1547 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1548 bus_write_4(sc->sc_res[0], GEM_RX_KICK, 1549 (sc->sc_rxptr + GEM_NRXDESC - 4) & 1550 GEM_NRXDESC_MASK); 1551 } 1552 1553 if (m == NULL) { 1554 if (rxstat & GEM_RD_OWN) 1555 break; 1556 continue; 1557 } 1558 1559 ifp->if_ipackets++; 1560 m->m_data += 2; /* We're already off by two */ 1561 m->m_pkthdr.rcvif = ifp; 1562 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 1563 1564 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1565 gem_rxcksum(m, rxstat); 1566 1567 /* Pass it on. */ 1568 GEM_UNLOCK(sc); 1569 (*ifp->if_input)(ifp, m); 1570 GEM_LOCK(sc); 1571 } 1572 1573 #ifdef GEM_DEBUG 1574 CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__, 1575 sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION)); 1576 #endif 1577 } 1578 1579 static int 1580 gem_add_rxbuf(struct gem_softc *sc, int idx) 1581 { 1582 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1583 struct mbuf *m; 1584 bus_dma_segment_t segs[1]; 1585 int error, nsegs; 1586 1587 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1588 if (m == NULL) 1589 return (ENOBUFS); 1590 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1591 1592 #ifdef GEM_DEBUG 1593 /* Bzero the packet to check DMA. */ 1594 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1595 #endif 1596 1597 if (rxs->rxs_mbuf != NULL) { 1598 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1599 BUS_DMASYNC_POSTREAD); 1600 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1601 } 1602 1603 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1604 m, segs, &nsegs, BUS_DMA_NOWAIT); 1605 if (error != 0) { 1606 device_printf(sc->sc_dev, 1607 "cannot load RS DMA map %d, error = %d\n", idx, error); 1608 m_freem(m); 1609 return (error); 1610 } 1611 /* If nsegs is wrong then the stack is corrupt. */ 1612 KASSERT(nsegs == 1, 1613 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1614 rxs->rxs_mbuf = m; 1615 rxs->rxs_paddr = segs[0].ds_addr; 1616 1617 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1618 BUS_DMASYNC_PREREAD); 1619 1620 GEM_INIT_RXDESC(sc, idx); 1621 1622 return (0); 1623 } 1624 1625 static void 1626 gem_eint(struct gem_softc *sc, u_int status) 1627 { 1628 1629 sc->sc_ifp->if_ierrors++; 1630 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1631 gem_reset_rxdma(sc); 1632 return; 1633 } 1634 1635 device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status); 1636 } 1637 1638 void 1639 gem_intr(void *v) 1640 { 1641 struct gem_softc *sc = v; 1642 uint32_t status, status2; 1643 1644 GEM_LOCK(sc); 1645 status = bus_read_4(sc->sc_res[0], GEM_STATUS); 1646 1647 #ifdef GEM_DEBUG 1648 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 1649 device_get_name(sc->sc_dev), __func__, (status >> 19), 1650 (u_int)status); 1651 1652 /* 1653 * PCS interrupts must be cleared, otherwise no traffic is passed! 1654 */ 1655 if ((status & GEM_INTR_PCS) != 0) { 1656 status2 = 1657 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS) | 1658 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS); 1659 if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 1660 device_printf(sc->sc_dev, 1661 "%s: PCS link status changed\n", __func__); 1662 } 1663 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1664 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS); 1665 if ((status2 & GEM_MAC_PAUSED) != 0) 1666 device_printf(sc->sc_dev, 1667 "%s: PAUSE received (PAUSE time %d slots)\n", 1668 __func__, GEM_MAC_PAUSE_TIME(status2)); 1669 if ((status2 & GEM_MAC_PAUSE) != 0) 1670 device_printf(sc->sc_dev, 1671 "%s: transited to PAUSE state\n", __func__); 1672 if ((status2 & GEM_MAC_RESUME) != 0) 1673 device_printf(sc->sc_dev, 1674 "%s: transited to non-PAUSE state\n", __func__); 1675 } 1676 if ((status & GEM_INTR_MIF) != 0) 1677 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1678 #endif 1679 1680 if ((status & 1681 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 1682 gem_eint(sc, status); 1683 1684 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1685 gem_rint(sc); 1686 1687 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1688 gem_tint(sc); 1689 1690 if (status & GEM_INTR_TX_MAC) { 1691 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS); 1692 if ((status2 & 1693 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP)) != 0) 1694 device_printf(sc->sc_dev, 1695 "MAC TX fault, status %x\n", status2); 1696 if ((status2 & 1697 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) 1698 gem_init_locked(sc); 1699 } 1700 if (status & GEM_INTR_RX_MAC) { 1701 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS); 1702 /* 1703 * At least with GEM_SUN_GEM and some GEM_SUN_ERI 1704 * revisions GEM_MAC_RX_OVERFLOW happen often due to a 1705 * silicon bug so handle them silently. Moreover, it's 1706 * likely that the receiver has hung so we reset it. 1707 */ 1708 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) { 1709 sc->sc_ifp->if_ierrors++; 1710 gem_reset_rxdma(sc); 1711 } else if ((status2 & 1712 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0) 1713 device_printf(sc->sc_dev, 1714 "MAC RX fault, status %x\n", status2); 1715 } 1716 GEM_UNLOCK(sc); 1717 } 1718 1719 static int 1720 gem_watchdog(struct gem_softc *sc) 1721 { 1722 1723 GEM_LOCK_ASSERT(sc, MA_OWNED); 1724 1725 #ifdef GEM_DEBUG 1726 CTR4(KTR_GEM, 1727 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x", 1728 __func__, bus_read_4(sc->sc_res[0], GEM_RX_CONFIG), 1729 bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS), 1730 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG)); 1731 CTR4(KTR_GEM, 1732 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x", 1733 __func__, bus_read_4(sc->sc_res[0], GEM_TX_CONFIG), 1734 bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS), 1735 bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG)); 1736 #endif 1737 1738 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1739 return (0); 1740 1741 if ((sc->sc_flags & GEM_LINK) != 0) 1742 device_printf(sc->sc_dev, "device timeout\n"); 1743 else if (bootverbose) 1744 device_printf(sc->sc_dev, "device timeout (no link)\n"); 1745 ++sc->sc_ifp->if_oerrors; 1746 1747 /* Try to get more packets going. */ 1748 gem_init_locked(sc); 1749 return (EJUSTRETURN); 1750 } 1751 1752 static void 1753 gem_mifinit(struct gem_softc *sc) 1754 { 1755 1756 /* Configure the MIF in frame mode. */ 1757 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, bus_read_4(sc->sc_res[0], 1758 GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1759 } 1760 1761 /* 1762 * MII interface 1763 * 1764 * The GEM MII interface supports at least three different operating modes: 1765 * 1766 * Bitbang mode is implemented using data, clock and output enable registers. 1767 * 1768 * Frame mode is implemented by loading a complete frame into the frame 1769 * register and polling the valid bit for completion. 1770 * 1771 * Polling mode uses the frame register but completion is indicated by 1772 * an interrupt. 1773 * 1774 */ 1775 int 1776 gem_mii_readreg(device_t dev, int phy, int reg) 1777 { 1778 struct gem_softc *sc; 1779 int n; 1780 uint32_t v; 1781 1782 #ifdef GEM_DEBUG_PHY 1783 printf("%s: phy %d reg %d\n", __func__, phy, reg); 1784 #endif 1785 1786 sc = device_get_softc(dev); 1787 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1788 return (0); 1789 1790 if ((sc->sc_flags & GEM_SERDES) != 0) { 1791 switch (reg) { 1792 case MII_BMCR: 1793 reg = GEM_MII_CONTROL; 1794 break; 1795 case MII_BMSR: 1796 reg = GEM_MII_STATUS; 1797 break; 1798 case MII_PHYIDR1: 1799 case MII_PHYIDR2: 1800 return (0); 1801 case MII_ANAR: 1802 reg = GEM_MII_ANAR; 1803 break; 1804 case MII_ANLPAR: 1805 reg = GEM_MII_ANLPAR; 1806 break; 1807 case MII_EXTSR: 1808 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 1809 default: 1810 device_printf(sc->sc_dev, 1811 "%s: unhandled register %d\n", __func__, reg); 1812 return (0); 1813 } 1814 return (bus_read_4(sc->sc_res[0], reg)); 1815 } 1816 1817 /* Construct the frame command. */ 1818 v = GEM_MIF_FRAME_READ | 1819 (phy << GEM_MIF_PHY_SHIFT) | 1820 (reg << GEM_MIF_REG_SHIFT); 1821 1822 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v); 1823 for (n = 0; n < 100; n++) { 1824 DELAY(1); 1825 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME); 1826 if (v & GEM_MIF_FRAME_TA0) 1827 return (v & GEM_MIF_FRAME_DATA); 1828 } 1829 1830 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1831 return (0); 1832 } 1833 1834 int 1835 gem_mii_writereg(device_t dev, int phy, int reg, int val) 1836 { 1837 struct gem_softc *sc; 1838 int n; 1839 uint32_t v; 1840 1841 #ifdef GEM_DEBUG_PHY 1842 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 1843 #endif 1844 1845 sc = device_get_softc(dev); 1846 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1847 return (0); 1848 1849 if ((sc->sc_flags & GEM_SERDES) != 0) { 1850 switch (reg) { 1851 case MII_BMCR: 1852 reg = GEM_MII_CONTROL; 1853 break; 1854 case MII_BMSR: 1855 reg = GEM_MII_STATUS; 1856 break; 1857 case MII_ANAR: 1858 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 0); 1859 bus_barrier(sc->sc_res[0], GEM_MII_CONFIG, 4, 1860 BUS_SPACE_BARRIER_WRITE); 1861 bus_write_4(sc->sc_res[0], GEM_MII_ANAR, val); 1862 bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL, 1863 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 1864 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 1865 GEM_MII_CONFIG_ENABLE); 1866 return (0); 1867 case MII_ANLPAR: 1868 reg = GEM_MII_ANLPAR; 1869 break; 1870 default: 1871 device_printf(sc->sc_dev, 1872 "%s: unhandled register %d\n", __func__, reg); 1873 return (0); 1874 } 1875 bus_write_4(sc->sc_res[0], reg, val); 1876 return (0); 1877 } 1878 1879 /* Construct the frame command. */ 1880 v = GEM_MIF_FRAME_WRITE | 1881 (phy << GEM_MIF_PHY_SHIFT) | 1882 (reg << GEM_MIF_REG_SHIFT) | 1883 (val & GEM_MIF_FRAME_DATA); 1884 1885 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v); 1886 for (n = 0; n < 100; n++) { 1887 DELAY(1); 1888 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME); 1889 if (v & GEM_MIF_FRAME_TA0) 1890 return (1); 1891 } 1892 1893 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1894 return (0); 1895 } 1896 1897 void 1898 gem_mii_statchg(device_t dev) 1899 { 1900 struct gem_softc *sc; 1901 int gigabit; 1902 uint32_t rxcfg, txcfg, v; 1903 1904 sc = device_get_softc(dev); 1905 1906 #ifdef GEM_DEBUG 1907 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) 1908 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n", 1909 __func__, sc->sc_phyad); 1910 #endif 1911 1912 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 1913 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 1914 sc->sc_flags |= GEM_LINK; 1915 else 1916 sc->sc_flags &= ~GEM_LINK; 1917 1918 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 1919 case IFM_1000_SX: 1920 case IFM_1000_LX: 1921 case IFM_1000_CX: 1922 case IFM_1000_T: 1923 gigabit = 1; 1924 break; 1925 default: 1926 gigabit = 0; 1927 } 1928 1929 /* 1930 * The configuration done here corresponds to the steps F) and 1931 * G) and as far as enabling of RX and TX MAC goes also step H) 1932 * of the initialization sequence outlined in section 3.2.1 of 1933 * the GEM Gigabit Ethernet ASIC Specification. 1934 */ 1935 1936 rxcfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 1937 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 1938 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 1939 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1940 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 1941 else if (gigabit != 0) { 1942 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 1943 txcfg |= GEM_MAC_TX_CARR_EXTEND; 1944 } 1945 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 0); 1946 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4, 1947 BUS_SPACE_BARRIER_WRITE); 1948 if (!gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 1949 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 1950 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, txcfg); 1951 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0); 1952 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 1953 BUS_SPACE_BARRIER_WRITE); 1954 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1955 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 1956 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, rxcfg); 1957 1958 v = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG) & 1959 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 1960 #ifdef notyet 1961 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1962 IFM_ETH_RXPAUSE) != 0) 1963 v |= GEM_MAC_CC_RX_PAUSE; 1964 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1965 IFM_ETH_TXPAUSE) != 0) 1966 v |= GEM_MAC_CC_TX_PAUSE; 1967 #endif 1968 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG, v); 1969 1970 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 1971 gigabit != 0) 1972 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME, 1973 GEM_MAC_SLOT_TIME_CARR_EXTEND); 1974 else 1975 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME, 1976 GEM_MAC_SLOT_TIME_NORMAL); 1977 1978 /* XIF Configuration */ 1979 v = GEM_MAC_XIF_LINK_LED; 1980 v |= GEM_MAC_XIF_TX_MII_ENA; 1981 if ((sc->sc_flags & GEM_SERDES) == 0) { 1982 if ((bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG) & 1983 GEM_MIF_CONFIG_PHY_SEL) != 0 && 1984 (IFM_OPTIONS(sc->sc_mii->mii_media_active) & 1985 IFM_FDX) == 0) 1986 /* External MII needs echo disable if half duplex. */ 1987 v |= GEM_MAC_XIF_ECHO_DISABL; 1988 else 1989 /* 1990 * Internal MII needs buffer enable. 1991 * XXX buffer enable makes only sense for an 1992 * external PHY. 1993 */ 1994 v |= GEM_MAC_XIF_MII_BUF_ENA; 1995 } 1996 if (gigabit != 0) 1997 v |= GEM_MAC_XIF_GMII_MODE; 1998 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1999 v |= GEM_MAC_XIF_FDPLX_LED; 2000 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v); 2001 2002 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2003 (sc->sc_flags & GEM_LINK) != 0) { 2004 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 2005 txcfg | GEM_MAC_TX_ENABLE); 2006 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 2007 rxcfg | GEM_MAC_RX_ENABLE); 2008 } 2009 } 2010 2011 int 2012 gem_mediachange(struct ifnet *ifp) 2013 { 2014 struct gem_softc *sc = ifp->if_softc; 2015 int error; 2016 2017 /* XXX add support for serial media. */ 2018 2019 GEM_LOCK(sc); 2020 error = mii_mediachg(sc->sc_mii); 2021 GEM_UNLOCK(sc); 2022 return (error); 2023 } 2024 2025 void 2026 gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2027 { 2028 struct gem_softc *sc = ifp->if_softc; 2029 2030 GEM_LOCK(sc); 2031 if ((ifp->if_flags & IFF_UP) == 0) { 2032 GEM_UNLOCK(sc); 2033 return; 2034 } 2035 2036 mii_pollstat(sc->sc_mii); 2037 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2038 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2039 GEM_UNLOCK(sc); 2040 } 2041 2042 static int 2043 gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2044 { 2045 struct gem_softc *sc = ifp->if_softc; 2046 struct ifreq *ifr = (struct ifreq *)data; 2047 int error; 2048 2049 error = 0; 2050 switch (cmd) { 2051 case SIOCSIFFLAGS: 2052 GEM_LOCK(sc); 2053 if ((ifp->if_flags & IFF_UP) != 0) { 2054 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2055 ((ifp->if_flags ^ sc->sc_ifflags) & 2056 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2057 gem_setladrf(sc); 2058 else 2059 gem_init_locked(sc); 2060 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2061 gem_stop(ifp, 0); 2062 if ((ifp->if_flags & IFF_LINK0) != 0) 2063 sc->sc_csum_features |= CSUM_UDP; 2064 else 2065 sc->sc_csum_features &= ~CSUM_UDP; 2066 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2067 ifp->if_hwassist = sc->sc_csum_features; 2068 sc->sc_ifflags = ifp->if_flags; 2069 GEM_UNLOCK(sc); 2070 break; 2071 case SIOCADDMULTI: 2072 case SIOCDELMULTI: 2073 GEM_LOCK(sc); 2074 gem_setladrf(sc); 2075 GEM_UNLOCK(sc); 2076 break; 2077 case SIOCGIFMEDIA: 2078 case SIOCSIFMEDIA: 2079 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2080 break; 2081 case SIOCSIFCAP: 2082 GEM_LOCK(sc); 2083 ifp->if_capenable = ifr->ifr_reqcap; 2084 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2085 ifp->if_hwassist = sc->sc_csum_features; 2086 else 2087 ifp->if_hwassist = 0; 2088 GEM_UNLOCK(sc); 2089 break; 2090 default: 2091 error = ether_ioctl(ifp, cmd, data); 2092 break; 2093 } 2094 2095 return (error); 2096 } 2097 2098 static void 2099 gem_setladrf(struct gem_softc *sc) 2100 { 2101 struct ifnet *ifp = sc->sc_ifp; 2102 struct ifmultiaddr *inm; 2103 int i; 2104 uint32_t hash[16]; 2105 uint32_t crc, v; 2106 2107 GEM_LOCK_ASSERT(sc, MA_OWNED); 2108 2109 /* Get the current RX configuration. */ 2110 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 2111 2112 /* 2113 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2114 * and hash filter. Depending on the case, the right bit will be 2115 * enabled. 2116 */ 2117 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER | 2118 GEM_MAC_RX_PROMISC_GRP); 2119 2120 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 2121 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 2122 BUS_SPACE_BARRIER_WRITE); 2123 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 0)) 2124 device_printf(sc->sc_dev, "cannot disable RX hash filter\n"); 2125 2126 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2127 v |= GEM_MAC_RX_PROMISCUOUS; 2128 goto chipit; 2129 } 2130 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2131 v |= GEM_MAC_RX_PROMISC_GRP; 2132 goto chipit; 2133 } 2134 2135 /* 2136 * Set up multicast address filter by passing all multicast 2137 * addresses through a crc generator, and then using the high 2138 * order 8 bits as an index into the 256 bit logical address 2139 * filter. The high order 4 bits selects the word, while the 2140 * other 4 bits select the bit within the word (where bit 0 2141 * is the MSB). 2142 */ 2143 2144 /* Clear the hash table. */ 2145 memset(hash, 0, sizeof(hash)); 2146 2147 IF_ADDR_LOCK(ifp); 2148 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 2149 if (inm->ifma_addr->sa_family != AF_LINK) 2150 continue; 2151 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2152 inm->ifma_addr), ETHER_ADDR_LEN); 2153 2154 /* We just want the 8 most significant bits. */ 2155 crc >>= 24; 2156 2157 /* Set the corresponding bit in the filter. */ 2158 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2159 } 2160 IF_ADDR_UNLOCK(ifp); 2161 2162 v |= GEM_MAC_RX_HASH_FILTER; 2163 2164 /* Now load the hash table into the chip (if we are using it). */ 2165 for (i = 0; i < 16; i++) 2166 bus_write_4(sc->sc_res[0], 2167 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), 2168 hash[i]); 2169 2170 chipit: 2171 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 2172 } 2173