1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 35 */ 36 37 #if 0 38 #define GEM_DEBUG 39 #endif 40 41 #if 0 /* XXX: In case of emergency, re-enable this. */ 42 #define GEM_RINT_TIMEOUT 43 #endif 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/bus.h> 48 #include <sys/callout.h> 49 #include <sys/endian.h> 50 #include <sys/mbuf.h> 51 #include <sys/malloc.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/module.h> 55 #include <sys/mutex.h> 56 #include <sys/socket.h> 57 #include <sys/sockio.h> 58 #include <sys/rman.h> 59 60 #include <net/bpf.h> 61 #include <net/ethernet.h> 62 #include <net/if.h> 63 #include <net/if_arp.h> 64 #include <net/if_dl.h> 65 #include <net/if_media.h> 66 #include <net/if_types.h> 67 #include <net/if_vlan_var.h> 68 69 #include <netinet/in.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/ip.h> 72 #include <netinet/tcp.h> 73 #include <netinet/udp.h> 74 75 #include <machine/bus.h> 76 77 #include <dev/mii/mii.h> 78 #include <dev/mii/miivar.h> 79 80 #include <dev/gem/if_gemreg.h> 81 #include <dev/gem/if_gemvar.h> 82 83 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 84 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 85 86 #define TRIES 10000 87 88 /* 89 * The GEM hardware support basic TCP/UDP checksum offloading. However, 90 * the hardware doesn't compensate the checksum for UDP datagram which 91 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 92 * by default. It can be reactivated by setting special link option 93 * link0 with ifconfig(8). 94 */ 95 #define GEM_CSUM_FEATURES (CSUM_TCP) 96 97 static void gem_start(struct ifnet *); 98 static void gem_start_locked(struct ifnet *); 99 static void gem_stop(struct ifnet *, int); 100 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 101 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 102 static __inline void gem_txcksum(struct gem_softc *, struct mbuf *, uint64_t *); 103 static __inline void gem_rxcksum(struct mbuf *, uint64_t); 104 static void gem_tick(void *); 105 static int gem_watchdog(struct gem_softc *); 106 static void gem_init(void *); 107 static void gem_init_locked(struct gem_softc *); 108 static void gem_init_regs(struct gem_softc *); 109 static u_int gem_ringsize(u_int); 110 static int gem_meminit(struct gem_softc *); 111 static struct mbuf *gem_defrag(struct mbuf *, int, int); 112 static int gem_load_txmbuf(struct gem_softc *, struct mbuf **); 113 static void gem_mifinit(struct gem_softc *); 114 static int gem_bitwait(struct gem_softc *, bus_addr_t, u_int32_t, 115 u_int32_t); 116 static void gem_reset(struct gem_softc *); 117 static int gem_reset_rx(struct gem_softc *); 118 static void gem_reset_rxdma(struct gem_softc *sc); 119 static int gem_reset_tx(struct gem_softc *); 120 static int gem_disable_rx(struct gem_softc *); 121 static int gem_disable_tx(struct gem_softc *); 122 static void gem_rxdrain(struct gem_softc *); 123 static int gem_add_rxbuf(struct gem_softc *, int); 124 static void gem_setladrf(struct gem_softc *); 125 126 struct mbuf *gem_get(struct gem_softc *, int, int); 127 static void gem_eint(struct gem_softc *, u_int); 128 static void gem_rint(struct gem_softc *); 129 #ifdef GEM_RINT_TIMEOUT 130 static void gem_rint_timeout(void *); 131 #endif 132 static void gem_tint(struct gem_softc *); 133 134 devclass_t gem_devclass; 135 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 136 MODULE_DEPEND(gem, miibus, 1, 1, 1); 137 138 #ifdef GEM_DEBUG 139 #include <sys/ktr.h> 140 #define KTR_GEM KTR_CT2 141 #endif 142 143 #define GEM_NSEGS GEM_NTXDESC 144 145 /* 146 * gem_attach: 147 * 148 * Attach a Gem interface to the system. 149 */ 150 int 151 gem_attach(sc) 152 struct gem_softc *sc; 153 { 154 struct ifnet *ifp; 155 int i, error; 156 u_int32_t v; 157 158 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 159 if (ifp == NULL) 160 return (ENOSPC); 161 162 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 163 #ifdef GEM_RINT_TIMEOUT 164 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 165 #endif 166 167 /* Make sure the chip is stopped. */ 168 ifp->if_softc = sc; 169 gem_reset(sc); 170 171 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 172 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 173 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 174 &sc->sc_pdmatag); 175 if (error) 176 goto fail_ifnet; 177 178 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 179 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 180 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 181 if (error) 182 goto fail_ptag; 183 184 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 185 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 186 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 187 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 188 if (error) 189 goto fail_rtag; 190 191 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 192 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 193 sizeof(struct gem_control_data), 1, 194 sizeof(struct gem_control_data), 0, 195 NULL, NULL, &sc->sc_cdmatag); 196 if (error) 197 goto fail_ttag; 198 199 /* 200 * Allocate the control data structures, and create and load the 201 * DMA map for it. 202 */ 203 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 204 (void **)&sc->sc_control_data, 205 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 206 &sc->sc_cddmamap))) { 207 device_printf(sc->sc_dev, "unable to allocate control data," 208 " error = %d\n", error); 209 goto fail_ctag; 210 } 211 212 sc->sc_cddma = 0; 213 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 214 sc->sc_control_data, sizeof(struct gem_control_data), 215 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 216 device_printf(sc->sc_dev, "unable to load control data DMA " 217 "map, error = %d\n", error); 218 goto fail_cmem; 219 } 220 221 /* 222 * Initialize the transmit job descriptors. 223 */ 224 STAILQ_INIT(&sc->sc_txfreeq); 225 STAILQ_INIT(&sc->sc_txdirtyq); 226 227 /* 228 * Create the transmit buffer DMA maps. 229 */ 230 error = ENOMEM; 231 for (i = 0; i < GEM_TXQUEUELEN; i++) { 232 struct gem_txsoft *txs; 233 234 txs = &sc->sc_txsoft[i]; 235 txs->txs_mbuf = NULL; 236 txs->txs_ndescs = 0; 237 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 238 &txs->txs_dmamap)) != 0) { 239 device_printf(sc->sc_dev, "unable to create tx DMA map " 240 "%d, error = %d\n", i, error); 241 goto fail_txd; 242 } 243 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 244 } 245 246 /* 247 * Create the receive buffer DMA maps. 248 */ 249 for (i = 0; i < GEM_NRXDESC; i++) { 250 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 251 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 252 device_printf(sc->sc_dev, "unable to create rx DMA map " 253 "%d, error = %d\n", i, error); 254 goto fail_rxd; 255 } 256 sc->sc_rxsoft[i].rxs_mbuf = NULL; 257 } 258 259 /* Bad things will happen when touching this register on ERI. */ 260 if (sc->sc_variant != GEM_SUN_ERI) 261 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE, 262 GEM_MII_DATAPATH_MII); 263 264 gem_mifinit(sc); 265 266 /* 267 * Look for an external PHY. 268 */ 269 error = ENXIO; 270 v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG); 271 if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 272 v |= GEM_MIF_CONFIG_PHY_SEL; 273 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v); 274 switch (sc->sc_variant) { 275 case GEM_SUN_ERI: 276 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 277 break; 278 default: 279 sc->sc_phyad = -1; 280 break; 281 } 282 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 283 gem_mediachange, gem_mediastatus); 284 } 285 286 /* 287 * Fall back on an internal PHY if no external PHY was found. 288 */ 289 if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) { 290 v &= ~GEM_MIF_CONFIG_PHY_SEL; 291 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v); 292 switch (sc->sc_variant) { 293 case GEM_SUN_ERI: 294 case GEM_APPLE_K2_GMAC: 295 sc->sc_phyad = GEM_PHYAD_INTERNAL; 296 break; 297 case GEM_APPLE_GMAC: 298 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 299 break; 300 default: 301 sc->sc_phyad = -1; 302 break; 303 } 304 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 305 gem_mediachange, gem_mediastatus); 306 } 307 308 /* 309 * Try the external PCS SERDES if we didn't find any PHYs. 310 */ 311 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 312 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE, 313 GEM_MII_DATAPATH_SERDES); 314 bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL, 315 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 316 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 317 GEM_MII_CONFIG_ENABLE); 318 sc->sc_flags |= GEM_SERDES; 319 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 320 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 321 gem_mediachange, gem_mediastatus); 322 } 323 324 if (error != 0) { 325 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error); 326 goto fail_rxd; 327 } 328 sc->sc_mii = device_get_softc(sc->sc_miibus); 329 330 /* 331 * From this point forward, the attachment cannot fail. A failure 332 * before this point releases all resources that may have been 333 * allocated. 334 */ 335 336 /* Get RX FIFO size */ 337 sc->sc_rxfifosize = 64 * 338 bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE); 339 340 /* Get TX FIFO size */ 341 v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE); 342 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 343 sc->sc_rxfifosize / 1024, v / 16); 344 345 sc->sc_csum_features = GEM_CSUM_FEATURES; 346 /* Initialize ifnet structure. */ 347 ifp->if_softc = sc; 348 if_initname(ifp, device_get_name(sc->sc_dev), 349 device_get_unit(sc->sc_dev)); 350 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 351 ifp->if_start = gem_start; 352 ifp->if_ioctl = gem_ioctl; 353 ifp->if_init = gem_init; 354 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN); 355 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN; 356 IFQ_SET_READY(&ifp->if_snd); 357 358 /* Attach the interface. */ 359 ether_ifattach(ifp, sc->sc_enaddr); 360 361 /* 362 * Tell the upper layer(s) we support long frames/checksum offloads. 363 */ 364 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 365 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 366 ifp->if_hwassist |= sc->sc_csum_features; 367 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 368 369 return (0); 370 371 /* 372 * Free any resources we've allocated during the failed attach 373 * attempt. Do this in reverse order and fall through. 374 */ 375 fail_rxd: 376 for (i = 0; i < GEM_NRXDESC; i++) { 377 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 378 bus_dmamap_destroy(sc->sc_rdmatag, 379 sc->sc_rxsoft[i].rxs_dmamap); 380 } 381 fail_txd: 382 for (i = 0; i < GEM_TXQUEUELEN; i++) { 383 if (sc->sc_txsoft[i].txs_dmamap != NULL) 384 bus_dmamap_destroy(sc->sc_tdmatag, 385 sc->sc_txsoft[i].txs_dmamap); 386 } 387 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 388 fail_cmem: 389 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 390 sc->sc_cddmamap); 391 fail_ctag: 392 bus_dma_tag_destroy(sc->sc_cdmatag); 393 fail_ttag: 394 bus_dma_tag_destroy(sc->sc_tdmatag); 395 fail_rtag: 396 bus_dma_tag_destroy(sc->sc_rdmatag); 397 fail_ptag: 398 bus_dma_tag_destroy(sc->sc_pdmatag); 399 fail_ifnet: 400 if_free(ifp); 401 return (error); 402 } 403 404 void 405 gem_detach(sc) 406 struct gem_softc *sc; 407 { 408 struct ifnet *ifp = sc->sc_ifp; 409 int i; 410 411 GEM_LOCK(sc); 412 gem_stop(ifp, 1); 413 GEM_UNLOCK(sc); 414 callout_drain(&sc->sc_tick_ch); 415 #ifdef GEM_RINT_TIMEOUT 416 callout_drain(&sc->sc_rx_ch); 417 #endif 418 ether_ifdetach(ifp); 419 if_free(ifp); 420 device_delete_child(sc->sc_dev, sc->sc_miibus); 421 422 for (i = 0; i < GEM_NRXDESC; i++) { 423 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 424 bus_dmamap_destroy(sc->sc_rdmatag, 425 sc->sc_rxsoft[i].rxs_dmamap); 426 } 427 for (i = 0; i < GEM_TXQUEUELEN; i++) { 428 if (sc->sc_txsoft[i].txs_dmamap != NULL) 429 bus_dmamap_destroy(sc->sc_tdmatag, 430 sc->sc_txsoft[i].txs_dmamap); 431 } 432 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 433 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 434 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 435 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 436 sc->sc_cddmamap); 437 bus_dma_tag_destroy(sc->sc_cdmatag); 438 bus_dma_tag_destroy(sc->sc_tdmatag); 439 bus_dma_tag_destroy(sc->sc_rdmatag); 440 bus_dma_tag_destroy(sc->sc_pdmatag); 441 } 442 443 void 444 gem_suspend(sc) 445 struct gem_softc *sc; 446 { 447 struct ifnet *ifp = sc->sc_ifp; 448 449 GEM_LOCK(sc); 450 gem_stop(ifp, 0); 451 GEM_UNLOCK(sc); 452 } 453 454 void 455 gem_resume(sc) 456 struct gem_softc *sc; 457 { 458 struct ifnet *ifp = sc->sc_ifp; 459 460 GEM_LOCK(sc); 461 /* 462 * On resume all registers have to be initialized again like 463 * after power-on. 464 */ 465 sc->sc_flags &= ~GEM_INITED; 466 if (ifp->if_flags & IFF_UP) 467 gem_init_locked(sc); 468 GEM_UNLOCK(sc); 469 } 470 471 static __inline void 472 gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags) 473 { 474 struct ip *ip; 475 uint64_t offset, offset2; 476 char *p; 477 478 offset = sizeof(struct ip) + ETHER_HDR_LEN; 479 for(; m && m->m_len == 0; m = m->m_next) 480 ; 481 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 482 device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n", 483 __func__); 484 /* checksum will be corrupted */ 485 goto sendit; 486 } 487 if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) { 488 if (m->m_len != ETHER_HDR_LEN) { 489 device_printf(sc->sc_dev, 490 "%s: m_len != ETHER_HDR_LEN\n", __func__); 491 /* checksum will be corrupted */ 492 goto sendit; 493 } 494 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 495 ; 496 if (m == NULL) { 497 /* checksum will be corrupted */ 498 goto sendit; 499 } 500 ip = mtod(m, struct ip *); 501 } else { 502 p = mtod(m, uint8_t *); 503 p += ETHER_HDR_LEN; 504 ip = (struct ip *)p; 505 } 506 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 507 508 sendit: 509 offset2 = m->m_pkthdr.csum_data; 510 *cflags = offset << GEM_TD_CXSUM_STARTSHFT; 511 *cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT); 512 *cflags |= GEM_TD_CXSUM_ENABLE; 513 } 514 515 static __inline void 516 gem_rxcksum(struct mbuf *m, uint64_t flags) 517 { 518 struct ether_header *eh; 519 struct ip *ip; 520 struct udphdr *uh; 521 int32_t hlen, len, pktlen; 522 uint16_t cksum, *opts; 523 uint32_t temp32; 524 525 pktlen = m->m_pkthdr.len; 526 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 527 return; 528 eh = mtod(m, struct ether_header *); 529 if (eh->ether_type != htons(ETHERTYPE_IP)) 530 return; 531 ip = (struct ip *)(eh + 1); 532 if (ip->ip_v != IPVERSION) 533 return; 534 535 hlen = ip->ip_hl << 2; 536 pktlen -= sizeof(struct ether_header); 537 if (hlen < sizeof(struct ip)) 538 return; 539 if (ntohs(ip->ip_len) < hlen) 540 return; 541 if (ntohs(ip->ip_len) != pktlen) 542 return; 543 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 544 return; /* can't handle fragmented packet */ 545 546 switch (ip->ip_p) { 547 case IPPROTO_TCP: 548 if (pktlen < (hlen + sizeof(struct tcphdr))) 549 return; 550 break; 551 case IPPROTO_UDP: 552 if (pktlen < (hlen + sizeof(struct udphdr))) 553 return; 554 uh = (struct udphdr *)((uint8_t *)ip + hlen); 555 if (uh->uh_sum == 0) 556 return; /* no checksum */ 557 break; 558 default: 559 return; 560 } 561 562 cksum = ~(flags & GEM_RD_CHECKSUM); 563 /* checksum fixup for IP options */ 564 len = hlen - sizeof(struct ip); 565 if (len > 0) { 566 opts = (uint16_t *)(ip + 1); 567 for (; len > 0; len -= sizeof(uint16_t), opts++) { 568 temp32 = cksum - *opts; 569 temp32 = (temp32 >> 16) + (temp32 & 65535); 570 cksum = temp32 & 65535; 571 } 572 } 573 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 574 m->m_pkthdr.csum_data = cksum; 575 } 576 577 static void 578 gem_cddma_callback(xsc, segs, nsegs, error) 579 void *xsc; 580 bus_dma_segment_t *segs; 581 int nsegs; 582 int error; 583 { 584 struct gem_softc *sc = (struct gem_softc *)xsc; 585 586 if (error != 0) 587 return; 588 if (nsegs != 1) { 589 /* can't happen... */ 590 panic("%s: bad control buffer segment count", __func__); 591 } 592 sc->sc_cddma = segs[0].ds_addr; 593 } 594 595 static void 596 gem_tick(arg) 597 void *arg; 598 { 599 struct gem_softc *sc = arg; 600 struct ifnet *ifp; 601 602 GEM_LOCK_ASSERT(sc, MA_OWNED); 603 604 ifp = sc->sc_ifp; 605 /* 606 * Unload collision counters 607 */ 608 ifp->if_collisions += 609 bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) + 610 bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) + 611 bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) + 612 bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT); 613 614 /* 615 * then clear the hardware counters. 616 */ 617 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0); 618 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0); 619 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0); 620 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0); 621 622 mii_tick(sc->sc_mii); 623 624 if (gem_watchdog(sc) == EJUSTRETURN) 625 return; 626 627 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 628 } 629 630 static int 631 gem_bitwait(sc, r, clr, set) 632 struct gem_softc *sc; 633 bus_addr_t r; 634 u_int32_t clr; 635 u_int32_t set; 636 { 637 int i; 638 u_int32_t reg; 639 640 for (i = TRIES; i--; DELAY(100)) { 641 reg = bus_read_4(sc->sc_res[0], r); 642 if ((reg & clr) == 0 && (reg & set) == set) 643 return (1); 644 } 645 return (0); 646 } 647 648 static void 649 gem_reset(sc) 650 struct gem_softc *sc; 651 { 652 653 #ifdef GEM_DEBUG 654 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 655 #endif 656 gem_reset_rx(sc); 657 gem_reset_tx(sc); 658 659 /* Do a full reset */ 660 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 661 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 662 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 663 device_printf(sc->sc_dev, "cannot reset device\n"); 664 } 665 666 /* 667 * gem_rxdrain: 668 * 669 * Drain the receive queue. 670 */ 671 static void 672 gem_rxdrain(sc) 673 struct gem_softc *sc; 674 { 675 struct gem_rxsoft *rxs; 676 int i; 677 678 for (i = 0; i < GEM_NRXDESC; i++) { 679 rxs = &sc->sc_rxsoft[i]; 680 if (rxs->rxs_mbuf != NULL) { 681 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 682 BUS_DMASYNC_POSTREAD); 683 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 684 m_freem(rxs->rxs_mbuf); 685 rxs->rxs_mbuf = NULL; 686 } 687 } 688 } 689 690 /* 691 * Reset the whole thing. 692 */ 693 static void 694 gem_stop(ifp, disable) 695 struct ifnet *ifp; 696 int disable; 697 { 698 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 699 struct gem_txsoft *txs; 700 701 #ifdef GEM_DEBUG 702 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 703 #endif 704 705 callout_stop(&sc->sc_tick_ch); 706 #ifdef GEM_RINT_TIMEOUT 707 callout_stop(&sc->sc_rx_ch); 708 #endif 709 710 /* XXX - Should we reset these instead? */ 711 gem_disable_tx(sc); 712 gem_disable_rx(sc); 713 714 /* 715 * Release any queued transmit buffers. 716 */ 717 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 718 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 719 if (txs->txs_ndescs != 0) { 720 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 721 BUS_DMASYNC_POSTWRITE); 722 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 723 if (txs->txs_mbuf != NULL) { 724 m_freem(txs->txs_mbuf); 725 txs->txs_mbuf = NULL; 726 } 727 } 728 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 729 } 730 731 if (disable) 732 gem_rxdrain(sc); 733 734 /* 735 * Mark the interface down and cancel the watchdog timer. 736 */ 737 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 738 sc->sc_flags &= ~GEM_LINK; 739 sc->sc_wdog_timer = 0; 740 } 741 742 /* 743 * Reset the receiver 744 */ 745 static int 746 gem_reset_rx(sc) 747 struct gem_softc *sc; 748 { 749 750 /* 751 * Resetting while DMA is in progress can cause a bus hang, so we 752 * disable DMA first. 753 */ 754 gem_disable_rx(sc); 755 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0); 756 bus_barrier(sc->sc_res[0], GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 757 if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 758 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 759 760 /* Finally, reset the ERX */ 761 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX); 762 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 763 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) { 764 device_printf(sc->sc_dev, "cannot reset receiver\n"); 765 return (1); 766 } 767 return (0); 768 } 769 770 /* 771 * Reset the receiver DMA engine. 772 * 773 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 774 * etc in order to reset the receiver DMA engine only and not do a full 775 * reset which amongst others also downs the link and clears the FIFOs. 776 */ 777 static void 778 gem_reset_rxdma(struct gem_softc *sc) 779 { 780 int i; 781 782 if (gem_reset_rx(sc) != 0) 783 return (gem_init_locked(sc)); 784 for (i = 0; i < GEM_NRXDESC; i++) 785 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 786 GEM_UPDATE_RXDESC(sc, i); 787 sc->sc_rxptr = 0; 788 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 789 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 790 791 /* NOTE: we use only 32-bit DMA addresses here. */ 792 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0); 793 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 794 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4); 795 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 796 gem_ringsize(GEM_NRXDESC /*XXX*/) | 797 ((ETHER_HDR_LEN + sizeof(struct ip)) << 798 GEM_RX_CONFIG_CXM_START_SHFT) | 799 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 800 (2 << GEM_RX_CONFIG_FBOFF_SHFT)); 801 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, 802 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 803 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH, 804 (3 * sc->sc_rxfifosize / 256) | ((sc->sc_rxfifosize / 256) << 12)); 805 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 806 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN); 807 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK, 808 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 809 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 810 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE); 811 } 812 813 /* 814 * Reset the transmitter 815 */ 816 static int 817 gem_reset_tx(sc) 818 struct gem_softc *sc; 819 { 820 821 /* 822 * Resetting while DMA is in progress can cause a bus hang, so we 823 * disable DMA first. 824 */ 825 gem_disable_tx(sc); 826 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0); 827 bus_barrier(sc->sc_res[0], GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 828 if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 829 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 830 831 /* Finally, reset the ETX */ 832 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_TX); 833 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 834 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) { 835 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 836 return (1); 837 } 838 return (0); 839 } 840 841 /* 842 * disable receiver. 843 */ 844 static int 845 gem_disable_rx(sc) 846 struct gem_softc *sc; 847 { 848 u_int32_t cfg; 849 850 /* Flip the enable bit */ 851 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 852 cfg &= ~GEM_MAC_RX_ENABLE; 853 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg); 854 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 855 BUS_SPACE_BARRIER_WRITE); 856 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 857 } 858 859 /* 860 * disable transmitter. 861 */ 862 static int 863 gem_disable_tx(sc) 864 struct gem_softc *sc; 865 { 866 u_int32_t cfg; 867 868 /* Flip the enable bit */ 869 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG); 870 cfg &= ~GEM_MAC_TX_ENABLE; 871 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg); 872 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4, 873 BUS_SPACE_BARRIER_WRITE); 874 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 875 } 876 877 /* 878 * Initialize interface. 879 */ 880 static int 881 gem_meminit(sc) 882 struct gem_softc *sc; 883 { 884 struct gem_rxsoft *rxs; 885 int i, error; 886 887 /* 888 * Initialize the transmit descriptor ring. 889 */ 890 for (i = 0; i < GEM_NTXDESC; i++) { 891 sc->sc_txdescs[i].gd_flags = 0; 892 sc->sc_txdescs[i].gd_addr = 0; 893 } 894 sc->sc_txfree = GEM_MAXTXFREE; 895 sc->sc_txnext = 0; 896 sc->sc_txwin = 0; 897 898 /* 899 * Initialize the receive descriptor and receive job 900 * descriptor rings. 901 */ 902 for (i = 0; i < GEM_NRXDESC; i++) { 903 rxs = &sc->sc_rxsoft[i]; 904 if (rxs->rxs_mbuf == NULL) { 905 if ((error = gem_add_rxbuf(sc, i)) != 0) { 906 device_printf(sc->sc_dev, "unable to " 907 "allocate or map rx buffer %d, error = " 908 "%d\n", i, error); 909 /* 910 * XXX Should attempt to run with fewer receive 911 * XXX buffers instead of just failing. 912 */ 913 gem_rxdrain(sc); 914 return (1); 915 } 916 } else 917 GEM_INIT_RXDESC(sc, i); 918 } 919 sc->sc_rxptr = 0; 920 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 921 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 922 923 return (0); 924 } 925 926 static u_int 927 gem_ringsize(sz) 928 u_int sz; 929 { 930 931 switch (sz) { 932 case 32: 933 return (GEM_RING_SZ_32); 934 case 64: 935 return (GEM_RING_SZ_64); 936 case 128: 937 return (GEM_RING_SZ_128); 938 case 256: 939 return (GEM_RING_SZ_256); 940 case 512: 941 return (GEM_RING_SZ_512); 942 case 1024: 943 return (GEM_RING_SZ_1024); 944 case 2048: 945 return (GEM_RING_SZ_2048); 946 case 4096: 947 return (GEM_RING_SZ_4096); 948 case 8192: 949 return (GEM_RING_SZ_8192); 950 default: 951 printf("%s: invalid ring size %d\n", __func__, sz); 952 return (GEM_RING_SZ_32); 953 } 954 } 955 956 static void 957 gem_init(xsc) 958 void *xsc; 959 { 960 struct gem_softc *sc = (struct gem_softc *)xsc; 961 962 GEM_LOCK(sc); 963 gem_init_locked(sc); 964 GEM_UNLOCK(sc); 965 } 966 967 /* 968 * Initialization of interface; set up initialization block 969 * and transmit/receive descriptor rings. 970 */ 971 static void 972 gem_init_locked(sc) 973 struct gem_softc *sc; 974 { 975 struct ifnet *ifp = sc->sc_ifp; 976 u_int32_t v; 977 978 GEM_LOCK_ASSERT(sc, MA_OWNED); 979 980 #ifdef GEM_DEBUG 981 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 982 __func__); 983 #endif 984 /* 985 * Initialization sequence. The numbered steps below correspond 986 * to the sequence outlined in section 6.3.5.1 in the Ethernet 987 * Channel Engine manual (part of the PCIO manual). 988 * See also the STP2002-STQ document from Sun Microsystems. 989 */ 990 991 /* step 1 & 2. Reset the Ethernet Channel */ 992 gem_stop(sc->sc_ifp, 0); 993 gem_reset(sc); 994 #ifdef GEM_DEBUG 995 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 996 __func__); 997 #endif 998 999 /* Re-initialize the MIF */ 1000 gem_mifinit(sc); 1001 1002 /* step 3. Setup data structures in host memory */ 1003 if (gem_meminit(sc) != 0) 1004 return; 1005 1006 /* step 4. TX MAC registers & counters */ 1007 gem_init_regs(sc); 1008 1009 /* step 5. RX MAC registers & counters */ 1010 gem_setladrf(sc); 1011 1012 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 1013 /* NOTE: we use only 32-bit DMA addresses here. */ 1014 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0); 1015 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 1016 1017 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0); 1018 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 1019 #ifdef GEM_DEBUG 1020 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 1021 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 1022 #endif 1023 1024 /* step 8. Global Configuration & Interrupt Mask */ 1025 bus_write_4(sc->sc_res[0], GEM_INTMASK, 1026 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 1027 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 1028 GEM_INTR_BERR 1029 #ifdef GEM_DEBUG 1030 | GEM_INTR_PCS | GEM_INTR_MIF 1031 #endif 1032 )); 1033 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK, 1034 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 1035 bus_write_4(sc->sc_res[0], GEM_MAC_TX_MASK, 1036 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP); 1037 #ifdef GEM_DEBUG 1038 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 1039 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 1040 #else 1041 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 1042 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 1043 #endif 1044 1045 /* step 9. ETX Configuration: use mostly default values */ 1046 1047 /* Enable DMA */ 1048 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 1049 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 1050 v|GEM_TX_CONFIG_TXDMA_EN| 1051 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 1052 1053 /* step 10. ERX Configuration */ 1054 1055 /* Encode Receive Descriptor ring size. */ 1056 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 1057 /* Rx TCP/UDP checksum offset */ 1058 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 1059 GEM_RX_CONFIG_CXM_START_SHFT); 1060 1061 /* Enable DMA */ 1062 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 1063 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 1064 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN); 1065 1066 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, 1067 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 1068 1069 /* 1070 * The following value is for an OFF Threshold of about 3/4 full 1071 * and an ON Threshold of 1/4 full. 1072 */ 1073 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH, 1074 (3 * sc->sc_rxfifosize / 256) | 1075 ( (sc->sc_rxfifosize / 256) << 12)); 1076 1077 /* step 11. Configure Media */ 1078 1079 /* step 12. RX_MAC Configuration Register */ 1080 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 1081 v |= GEM_MAC_RX_STRIP_CRC; 1082 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0); 1083 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 1084 BUS_SPACE_BARRIER_WRITE); 1085 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1086 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 1087 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 1088 1089 /* step 14. Issue Transmit Pending command */ 1090 1091 /* step 15. Give the reciever a swift kick */ 1092 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC-4); 1093 1094 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1095 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1096 sc->sc_ifflags = ifp->if_flags; 1097 1098 sc->sc_flags &= ~GEM_LINK; 1099 mii_mediachg(sc->sc_mii); 1100 1101 /* Start the one second timer. */ 1102 sc->sc_wdog_timer = 0; 1103 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1104 } 1105 1106 /* 1107 * It's copy of ath_defrag(ath(4)). 1108 * 1109 * Defragment an mbuf chain, returning at most maxfrags separate 1110 * mbufs+clusters. If this is not possible NULL is returned and 1111 * the original mbuf chain is left in it's present (potentially 1112 * modified) state. We use two techniques: collapsing consecutive 1113 * mbufs and replacing consecutive mbufs by a cluster. 1114 */ 1115 static struct mbuf * 1116 gem_defrag(m0, how, maxfrags) 1117 struct mbuf *m0; 1118 int how; 1119 int maxfrags; 1120 { 1121 struct mbuf *m, *n, *n2, **prev; 1122 u_int curfrags; 1123 1124 /* 1125 * Calculate the current number of frags. 1126 */ 1127 curfrags = 0; 1128 for (m = m0; m != NULL; m = m->m_next) 1129 curfrags++; 1130 /* 1131 * First, try to collapse mbufs. Note that we always collapse 1132 * towards the front so we don't need to deal with moving the 1133 * pkthdr. This may be suboptimal if the first mbuf has much 1134 * less data than the following. 1135 */ 1136 m = m0; 1137 again: 1138 for (;;) { 1139 n = m->m_next; 1140 if (n == NULL) 1141 break; 1142 if ((m->m_flags & M_RDONLY) == 0 && 1143 n->m_len < M_TRAILINGSPACE(m)) { 1144 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 1145 n->m_len); 1146 m->m_len += n->m_len; 1147 m->m_next = n->m_next; 1148 m_free(n); 1149 if (--curfrags <= maxfrags) 1150 return (m0); 1151 } else 1152 m = n; 1153 } 1154 KASSERT(maxfrags > 1, 1155 ("maxfrags %u, but normal collapse failed", maxfrags)); 1156 /* 1157 * Collapse consecutive mbufs to a cluster. 1158 */ 1159 prev = &m0->m_next; /* NB: not the first mbuf */ 1160 while ((n = *prev) != NULL) { 1161 if ((n2 = n->m_next) != NULL && 1162 n->m_len + n2->m_len < MCLBYTES) { 1163 m = m_getcl(how, MT_DATA, 0); 1164 if (m == NULL) 1165 goto bad; 1166 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 1167 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 1168 n2->m_len); 1169 m->m_len = n->m_len + n2->m_len; 1170 m->m_next = n2->m_next; 1171 *prev = m; 1172 m_free(n); 1173 m_free(n2); 1174 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 1175 return m0; 1176 /* 1177 * Still not there, try the normal collapse 1178 * again before we allocate another cluster. 1179 */ 1180 goto again; 1181 } 1182 prev = &n->m_next; 1183 } 1184 /* 1185 * No place where we can collapse to a cluster; punt. 1186 * This can occur if, for example, you request 2 frags 1187 * but the packet requires that both be clusters (we 1188 * never reallocate the first mbuf to avoid moving the 1189 * packet header). 1190 */ 1191 bad: 1192 return (NULL); 1193 } 1194 1195 static int 1196 gem_load_txmbuf(sc, m_head) 1197 struct gem_softc *sc; 1198 struct mbuf **m_head; 1199 { 1200 struct gem_txsoft *txs; 1201 bus_dma_segment_t txsegs[GEM_NTXSEGS]; 1202 struct mbuf *m; 1203 uint64_t flags, cflags; 1204 int error, nexttx, nsegs, seg; 1205 1206 /* Get a work queue entry. */ 1207 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1208 /* Ran out of descriptors. */ 1209 return (ENOBUFS); 1210 } 1211 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1212 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1213 if (error == EFBIG) { 1214 m = gem_defrag(*m_head, M_DONTWAIT, GEM_NTXSEGS); 1215 if (m == NULL) { 1216 m_freem(*m_head); 1217 *m_head = NULL; 1218 return (ENOBUFS); 1219 } 1220 *m_head = m; 1221 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1222 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1223 if (error != 0) { 1224 m_freem(*m_head); 1225 *m_head = NULL; 1226 return (error); 1227 } 1228 } else if (error != 0) 1229 return (error); 1230 if (nsegs == 0) { 1231 m_freem(*m_head); 1232 *m_head = NULL; 1233 return (EIO); 1234 } 1235 1236 /* 1237 * Ensure we have enough descriptors free to describe 1238 * the packet. Note, we always reserve one descriptor 1239 * at the end of the ring as a termination point, to 1240 * prevent wrap-around. 1241 */ 1242 if (nsegs > sc->sc_txfree - 1) { 1243 txs->txs_ndescs = 0; 1244 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1245 return (ENOBUFS); 1246 } 1247 1248 flags = cflags = 0; 1249 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) 1250 gem_txcksum(sc, *m_head, &cflags); 1251 1252 txs->txs_ndescs = nsegs; 1253 txs->txs_firstdesc = sc->sc_txnext; 1254 nexttx = txs->txs_firstdesc; 1255 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 1256 #ifdef GEM_DEBUG 1257 CTR6(KTR_GEM, "%s: mapping seg %d (txd %d), len " 1258 "%lx, addr %#lx (%#lx)", __func__, seg, nexttx, 1259 txsegs[seg].ds_len, txsegs[seg].ds_addr, 1260 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr)); 1261 #endif 1262 sc->sc_txdescs[nexttx].gd_addr = 1263 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr); 1264 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 1265 ("%s: segment size too large!", __func__)); 1266 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1267 sc->sc_txdescs[nexttx].gd_flags = 1268 GEM_DMA_WRITE(sc, flags | cflags); 1269 txs->txs_lastdesc = nexttx; 1270 } 1271 1272 /* set EOP on the last descriptor */ 1273 #ifdef GEM_DEBUG 1274 CTR3(KTR_GEM, "%s: end of packet at seg %d, tx %d", __func__, seg, 1275 nexttx); 1276 #endif 1277 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1278 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET); 1279 1280 /* Lastly set SOP on the first descriptor */ 1281 #ifdef GEM_DEBUG 1282 CTR3(KTR_GEM, "%s: start of packet at seg %d, tx %d", __func__, seg, 1283 nexttx); 1284 #endif 1285 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1286 sc->sc_txwin = 0; 1287 flags |= GEM_TD_INTERRUPT_ME; 1288 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1289 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME | 1290 GEM_TD_START_OF_PACKET); 1291 } else 1292 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1293 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET); 1294 1295 /* Sync the DMA map. */ 1296 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE); 1297 1298 #ifdef GEM_DEBUG 1299 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1300 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs); 1301 #endif 1302 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1303 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1304 txs->txs_mbuf = *m_head; 1305 1306 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1307 sc->sc_txfree -= txs->txs_ndescs; 1308 1309 return (0); 1310 } 1311 1312 static void 1313 gem_init_regs(sc) 1314 struct gem_softc *sc; 1315 { 1316 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1317 1318 /* These regs are not cleared on reset */ 1319 if ((sc->sc_flags & GEM_INITED) == 0) { 1320 /* Wooo. Magic values. */ 1321 bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0); 1322 bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8); 1323 bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4); 1324 1325 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1326 /* Max frame and max burst size */ 1327 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME, 1328 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1329 1330 bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7); 1331 bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4); 1332 bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10); 1333 /* Dunno.... */ 1334 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088); 1335 bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED, 1336 ((laddr[5]<<8)|laddr[4])&0x3ff); 1337 1338 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1339 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0); 1340 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0); 1341 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0); 1342 1343 /* MAC control addr set to 01:80:c2:00:00:01 */ 1344 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001); 1345 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200); 1346 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180); 1347 1348 /* MAC filter addr set to 0:0:0:0:0:0 */ 1349 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0); 1350 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0); 1351 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0); 1352 1353 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0); 1354 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0); 1355 1356 sc->sc_flags |= GEM_INITED; 1357 } 1358 1359 /* Counters need to be zeroed */ 1360 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0); 1361 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0); 1362 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0); 1363 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0); 1364 bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0); 1365 bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0); 1366 bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0); 1367 bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0); 1368 bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0); 1369 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0); 1370 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0); 1371 1372 /* Set XOFF PAUSE time. */ 1373 bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1374 1375 /* 1376 * Set the internal arbitration to "infinite" bursts of the 1377 * maximum length of 31 * 64 bytes so DMA transfers aren't 1378 * split up in cache line size chunks. This greatly improves 1379 * especially RX performance. 1380 * Enable silicon bug workarounds for the Apple variants. 1381 */ 1382 bus_write_4(sc->sc_res[0], GEM_CONFIG, 1383 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 1384 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ? 1385 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 1386 1387 /* 1388 * Set the station address. 1389 */ 1390 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1391 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1392 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1393 1394 /* Enable MII outputs. */ 1395 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA); 1396 } 1397 1398 static void 1399 gem_start(ifp) 1400 struct ifnet *ifp; 1401 { 1402 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1403 1404 GEM_LOCK(sc); 1405 gem_start_locked(ifp); 1406 GEM_UNLOCK(sc); 1407 } 1408 1409 static void 1410 gem_start_locked(ifp) 1411 struct ifnet *ifp; 1412 { 1413 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1414 struct mbuf *m; 1415 int ntx = 0; 1416 1417 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1418 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 1419 return; 1420 1421 #ifdef GEM_DEBUG 1422 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 1423 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1424 sc->sc_txnext); 1425 #endif 1426 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1427 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1428 if (m == NULL) 1429 break; 1430 if (gem_load_txmbuf(sc, &m) != 0) { 1431 if (m == NULL) 1432 break; 1433 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1434 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1435 break; 1436 } 1437 ntx++; 1438 /* Kick the transmitter. */ 1439 #ifdef GEM_DEBUG 1440 CTR3(KTR_GEM, "%s: %s: kicking tx %d", 1441 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1442 #endif 1443 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1444 bus_write_4(sc->sc_res[0], GEM_TX_KICK, 1445 sc->sc_txnext); 1446 1447 BPF_MTAP(ifp, m); 1448 } 1449 1450 if (ntx > 0) { 1451 #ifdef GEM_DEBUG 1452 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1453 device_get_name(sc->sc_dev), sc->sc_txnext); 1454 #endif 1455 1456 /* Set a watchdog timer in case the chip flakes out. */ 1457 sc->sc_wdog_timer = 5; 1458 #ifdef GEM_DEBUG 1459 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1460 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1461 #endif 1462 } 1463 } 1464 1465 /* 1466 * Transmit interrupt. 1467 */ 1468 static void 1469 gem_tint(sc) 1470 struct gem_softc *sc; 1471 { 1472 struct ifnet *ifp = sc->sc_ifp; 1473 struct gem_txsoft *txs; 1474 int txlast; 1475 int progress = 0; 1476 1477 #ifdef GEM_DEBUG 1478 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1479 #endif 1480 1481 /* 1482 * Go through our Tx list and free mbufs for those 1483 * frames that have been transmitted. 1484 */ 1485 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1486 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1487 1488 #ifdef GEM_DEBUG 1489 if (ifp->if_flags & IFF_DEBUG) { 1490 int i; 1491 printf(" txsoft %p transmit chain:\n", txs); 1492 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1493 printf("descriptor %d: ", i); 1494 printf("gd_flags: 0x%016llx\t", (long long) 1495 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1496 printf("gd_addr: 0x%016llx\n", (long long) 1497 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1498 if (i == txs->txs_lastdesc) 1499 break; 1500 } 1501 } 1502 #endif 1503 1504 /* 1505 * In theory, we could harvest some descriptors before 1506 * the ring is empty, but that's a bit complicated. 1507 * 1508 * GEM_TX_COMPLETION points to the last descriptor 1509 * processed +1. 1510 */ 1511 txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION); 1512 #ifdef GEM_DEBUG 1513 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 1514 "txs->txs_lastdesc = %d, txlast = %d", 1515 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1516 #endif 1517 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1518 if ((txlast >= txs->txs_firstdesc) && 1519 (txlast <= txs->txs_lastdesc)) 1520 break; 1521 } else { 1522 /* Ick -- this command wraps */ 1523 if ((txlast >= txs->txs_firstdesc) || 1524 (txlast <= txs->txs_lastdesc)) 1525 break; 1526 } 1527 1528 #ifdef GEM_DEBUG 1529 CTR1(KTR_GEM, "%s: releasing a desc", __func__); 1530 #endif 1531 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1532 1533 sc->sc_txfree += txs->txs_ndescs; 1534 1535 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1536 BUS_DMASYNC_POSTWRITE); 1537 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1538 if (txs->txs_mbuf != NULL) { 1539 m_freem(txs->txs_mbuf); 1540 txs->txs_mbuf = NULL; 1541 } 1542 1543 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1544 1545 ifp->if_opackets++; 1546 progress = 1; 1547 } 1548 1549 #ifdef GEM_DEBUG 1550 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x " 1551 "GEM_TX_DATA_PTR %llx " 1552 "GEM_TX_COMPLETION %x", 1553 __func__, 1554 bus_read_4(sc->sc_res[0], GEM_TX_STATE_MACHINE), 1555 ((long long) bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_HI) << 32) | 1556 bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_LO), 1557 bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION)); 1558 #endif 1559 1560 if (progress) { 1561 if (sc->sc_txfree == GEM_NTXDESC - 1) 1562 sc->sc_txwin = 0; 1563 1564 /* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */ 1565 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1566 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; 1567 1568 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1569 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1570 gem_start_locked(ifp); 1571 } 1572 1573 #ifdef GEM_DEBUG 1574 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1575 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1576 #endif 1577 } 1578 1579 #ifdef GEM_RINT_TIMEOUT 1580 static void 1581 gem_rint_timeout(arg) 1582 void *arg; 1583 { 1584 struct gem_softc *sc = (struct gem_softc *)arg; 1585 1586 GEM_LOCK_ASSERT(sc, MA_OWNED); 1587 gem_rint(sc); 1588 } 1589 #endif 1590 1591 /* 1592 * Receive interrupt. 1593 */ 1594 static void 1595 gem_rint(sc) 1596 struct gem_softc *sc; 1597 { 1598 struct ifnet *ifp = sc->sc_ifp; 1599 struct mbuf *m; 1600 u_int64_t rxstat; 1601 u_int32_t rxcomp; 1602 1603 #ifdef GEM_RINT_TIMEOUT 1604 callout_stop(&sc->sc_rx_ch); 1605 #endif 1606 #ifdef GEM_DEBUG 1607 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1608 #endif 1609 1610 /* 1611 * Read the completion register once. This limits 1612 * how long the following loop can execute. 1613 */ 1614 rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION); 1615 1616 #ifdef GEM_DEBUG 1617 CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d", 1618 __func__, sc->sc_rxptr, rxcomp); 1619 #endif 1620 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1621 for (; sc->sc_rxptr != rxcomp;) { 1622 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1623 rxstat = GEM_DMA_READ(sc, 1624 sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 1625 1626 if (rxstat & GEM_RD_OWN) { 1627 #ifdef GEM_RINT_TIMEOUT 1628 /* 1629 * The descriptor is still marked as owned, although 1630 * it is supposed to have completed. This has been 1631 * observed on some machines. Just exiting here 1632 * might leave the packet sitting around until another 1633 * one arrives to trigger a new interrupt, which is 1634 * generally undesirable, so set up a timeout. 1635 */ 1636 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1637 gem_rint_timeout, sc); 1638 #endif 1639 m = NULL; 1640 goto kickit; 1641 } 1642 1643 if (rxstat & GEM_RD_BAD_CRC) { 1644 ifp->if_ierrors++; 1645 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1646 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1647 m = NULL; 1648 goto kickit; 1649 } 1650 1651 #ifdef GEM_DEBUG 1652 if (ifp->if_flags & IFF_DEBUG) { 1653 printf(" rxsoft %p descriptor %d: ", 1654 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 1655 printf("gd_flags: 0x%016llx\t", (long long) 1656 GEM_DMA_READ(sc, sc->sc_rxdescs[ 1657 sc->sc_rxptr].gd_flags)); 1658 printf("gd_addr: 0x%016llx\n", (long long) 1659 GEM_DMA_READ(sc, sc->sc_rxdescs[ 1660 sc->sc_rxptr].gd_addr)); 1661 } 1662 #endif 1663 1664 /* 1665 * Allocate a new mbuf cluster. If that fails, we are 1666 * out of memory, and must drop the packet and recycle 1667 * the buffer that's already attached to this descriptor. 1668 */ 1669 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 1670 ifp->if_ierrors++; 1671 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1672 m = NULL; 1673 } 1674 1675 kickit: 1676 /* 1677 * Update the RX kick register. This register has to point 1678 * to the descriptor after the last valid one (before the 1679 * current batch) and must be incremented in multiples of 1680 * 4 (because the DMA engine fetches/updates descriptors 1681 * in batches of 4). 1682 */ 1683 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 1684 if ((sc->sc_rxptr % 4) == 0) { 1685 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1686 bus_write_4(sc->sc_res[0], GEM_RX_KICK, 1687 (sc->sc_rxptr + GEM_NRXDESC - 4) & 1688 GEM_NRXDESC_MASK); 1689 } 1690 1691 if (m == NULL) { 1692 if (rxstat & GEM_RD_OWN) 1693 break; 1694 continue; 1695 } 1696 1697 ifp->if_ipackets++; 1698 m->m_data += 2; /* We're already off by two */ 1699 m->m_pkthdr.rcvif = ifp; 1700 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 1701 1702 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1703 gem_rxcksum(m, rxstat); 1704 1705 /* Pass it on. */ 1706 GEM_UNLOCK(sc); 1707 (*ifp->if_input)(ifp, m); 1708 GEM_LOCK(sc); 1709 } 1710 1711 #ifdef GEM_DEBUG 1712 CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__, 1713 sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION)); 1714 #endif 1715 } 1716 1717 /* 1718 * gem_add_rxbuf: 1719 * 1720 * Add a receive buffer to the indicated descriptor. 1721 */ 1722 static int 1723 gem_add_rxbuf(sc, idx) 1724 struct gem_softc *sc; 1725 int idx; 1726 { 1727 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1728 struct mbuf *m; 1729 bus_dma_segment_t segs[1]; 1730 int error, nsegs; 1731 1732 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1733 if (m == NULL) 1734 return (ENOBUFS); 1735 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1736 1737 #ifdef GEM_DEBUG 1738 /* bzero the packet to check dma */ 1739 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1740 #endif 1741 1742 if (rxs->rxs_mbuf != NULL) { 1743 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1744 BUS_DMASYNC_POSTREAD); 1745 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1746 } 1747 1748 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1749 m, segs, &nsegs, BUS_DMA_NOWAIT); 1750 /* If nsegs is wrong then the stack is corrupt. */ 1751 KASSERT(nsegs == 1, ("Too many segments returned!")); 1752 if (error != 0) { 1753 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1754 "%d\n", idx, error); 1755 m_freem(m); 1756 return (error); 1757 } 1758 rxs->rxs_mbuf = m; 1759 rxs->rxs_paddr = segs[0].ds_addr; 1760 1761 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1762 1763 GEM_INIT_RXDESC(sc, idx); 1764 1765 return (0); 1766 } 1767 1768 static void 1769 gem_eint(sc, status) 1770 struct gem_softc *sc; 1771 u_int status; 1772 { 1773 1774 sc->sc_ifp->if_ierrors++; 1775 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1776 gem_reset_rxdma(sc); 1777 return; 1778 } 1779 1780 device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status); 1781 } 1782 1783 void 1784 gem_intr(v) 1785 void *v; 1786 { 1787 struct gem_softc *sc = (struct gem_softc *)v; 1788 uint32_t status, status2; 1789 1790 GEM_LOCK(sc); 1791 status = bus_read_4(sc->sc_res[0], GEM_STATUS); 1792 1793 #ifdef GEM_DEBUG 1794 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 1795 device_get_name(sc->sc_dev), __func__, (status>>19), 1796 (u_int)status); 1797 1798 /* 1799 * PCS interrupts must be cleared, otherwise no traffic is passed! 1800 */ 1801 if ((status & GEM_INTR_PCS) != 0) { 1802 status2 = bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS) | 1803 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS); 1804 if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 1805 device_printf(sc->sc_dev, 1806 "%s: PCS link status changed\n", __func__); 1807 } 1808 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1809 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS); 1810 if ((status2 & GEM_MAC_PAUSED) != 0) 1811 device_printf(sc->sc_dev, 1812 "%s: PAUSE received (PAUSE time %d slots)\n", 1813 __func__, GEM_MAC_PAUSE_TIME(status2)); 1814 if ((status2 & GEM_MAC_PAUSE) != 0) 1815 device_printf(sc->sc_dev, 1816 "%s: transited to PAUSE state\n", __func__); 1817 if ((status2 & GEM_MAC_RESUME) != 0) 1818 device_printf(sc->sc_dev, 1819 "%s: transited to non-PAUSE state\n", __func__); 1820 } 1821 if ((status & GEM_INTR_MIF) != 0) 1822 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1823 #endif 1824 1825 if ((status & 1826 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 1827 gem_eint(sc, status); 1828 1829 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1830 gem_rint(sc); 1831 1832 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1833 gem_tint(sc); 1834 1835 if (status & GEM_INTR_TX_MAC) { 1836 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS); 1837 if (status2 & ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP)) 1838 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1839 status2); 1840 if (status2 & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1841 gem_init_locked(sc); 1842 } 1843 if (status & GEM_INTR_RX_MAC) { 1844 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS); 1845 /* 1846 * At least with GEM_SUN_GEM and some GEM_SUN_ERI 1847 * revisions GEM_MAC_RX_OVERFLOW happen often due to a 1848 * silicon bug so handle them silently. Moreover, it's 1849 * likely that the receiver has hung so we reset it. 1850 */ 1851 if (status2 & GEM_MAC_RX_OVERFLOW) { 1852 sc->sc_ifp->if_ierrors++; 1853 gem_reset_rxdma(sc); 1854 } else if (status2 & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1855 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1856 status2); 1857 } 1858 GEM_UNLOCK(sc); 1859 } 1860 1861 static int 1862 gem_watchdog(sc) 1863 struct gem_softc *sc; 1864 { 1865 1866 GEM_LOCK_ASSERT(sc, MA_OWNED); 1867 1868 #ifdef GEM_DEBUG 1869 CTR4(KTR_GEM, "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1870 "GEM_MAC_RX_CONFIG %x", __func__, 1871 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG), 1872 bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS), 1873 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG)); 1874 CTR4(KTR_GEM, "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1875 "GEM_MAC_TX_CONFIG %x", __func__, 1876 bus_read_4(sc->sc_res[0], GEM_TX_CONFIG), 1877 bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS), 1878 bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG)); 1879 #endif 1880 1881 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1882 return (0); 1883 1884 if ((sc->sc_flags & GEM_LINK) != 0) 1885 device_printf(sc->sc_dev, "device timeout\n"); 1886 else if (bootverbose) 1887 device_printf(sc->sc_dev, "device timeout (no link)\n"); 1888 ++sc->sc_ifp->if_oerrors; 1889 1890 /* Try to get more packets going. */ 1891 gem_init_locked(sc); 1892 return (EJUSTRETURN); 1893 } 1894 1895 /* 1896 * Initialize the MII Management Interface 1897 */ 1898 static void 1899 gem_mifinit(sc) 1900 struct gem_softc *sc; 1901 { 1902 1903 /* Configure the MIF in frame mode */ 1904 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, bus_read_4(sc->sc_res[0], 1905 GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1906 } 1907 1908 /* 1909 * MII interface 1910 * 1911 * The GEM MII interface supports at least three different operating modes: 1912 * 1913 * Bitbang mode is implemented using data, clock and output enable registers. 1914 * 1915 * Frame mode is implemented by loading a complete frame into the frame 1916 * register and polling the valid bit for completion. 1917 * 1918 * Polling mode uses the frame register but completion is indicated by 1919 * an interrupt. 1920 * 1921 */ 1922 int 1923 gem_mii_readreg(dev, phy, reg) 1924 device_t dev; 1925 int phy, reg; 1926 { 1927 struct gem_softc *sc = device_get_softc(dev); 1928 int n; 1929 u_int32_t v; 1930 1931 #ifdef GEM_DEBUG_PHY 1932 printf("%s: phy %d reg %d\n", __func__, phy, reg); 1933 #endif 1934 1935 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1936 return (0); 1937 1938 if ((sc->sc_flags & GEM_SERDES) != 0) { 1939 switch (reg) { 1940 case MII_BMCR: 1941 reg = GEM_MII_CONTROL; 1942 break; 1943 case MII_BMSR: 1944 reg = GEM_MII_STATUS; 1945 break; 1946 case MII_PHYIDR1: 1947 case MII_PHYIDR2: 1948 return (0); 1949 case MII_ANAR: 1950 reg = GEM_MII_ANAR; 1951 break; 1952 case MII_ANLPAR: 1953 reg = GEM_MII_ANLPAR; 1954 break; 1955 case MII_EXTSR: 1956 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 1957 default: 1958 device_printf(sc->sc_dev, 1959 "%s: unhandled register %d\n", __func__, reg); 1960 return (0); 1961 } 1962 return (bus_read_4(sc->sc_res[0], reg)); 1963 } 1964 1965 /* Construct the frame command */ 1966 v = GEM_MIF_FRAME_READ | 1967 (phy << GEM_MIF_PHY_SHIFT) | 1968 (reg << GEM_MIF_REG_SHIFT); 1969 1970 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v); 1971 for (n = 0; n < 100; n++) { 1972 DELAY(1); 1973 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME); 1974 if (v & GEM_MIF_FRAME_TA0) 1975 return (v & GEM_MIF_FRAME_DATA); 1976 } 1977 1978 device_printf(sc->sc_dev, "mii_read timeout\n"); 1979 return (0); 1980 } 1981 1982 int 1983 gem_mii_writereg(dev, phy, reg, val) 1984 device_t dev; 1985 int phy, reg, val; 1986 { 1987 struct gem_softc *sc = device_get_softc(dev); 1988 int n; 1989 u_int32_t v; 1990 1991 #ifdef GEM_DEBUG_PHY 1992 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 1993 #endif 1994 1995 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1996 return (0); 1997 1998 if ((sc->sc_flags & GEM_SERDES) != 0) { 1999 switch (reg) { 2000 case MII_BMCR: 2001 reg = GEM_MII_CONTROL; 2002 break; 2003 case MII_BMSR: 2004 reg = GEM_MII_STATUS; 2005 break; 2006 case MII_ANAR: 2007 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 0); 2008 bus_barrier(sc->sc_res[0], GEM_MII_CONFIG, 4, 2009 BUS_SPACE_BARRIER_WRITE); 2010 bus_write_4(sc->sc_res[0], GEM_MII_ANAR, val); 2011 bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL, 2012 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 2013 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 2014 GEM_MII_CONFIG_ENABLE); 2015 return (0); 2016 case MII_ANLPAR: 2017 reg = GEM_MII_ANLPAR; 2018 break; 2019 default: 2020 device_printf(sc->sc_dev, 2021 "%s: unhandled register %d\n", __func__, reg); 2022 return (0); 2023 } 2024 bus_write_4(sc->sc_res[0], reg, val); 2025 return (0); 2026 } 2027 2028 /* Construct the frame command */ 2029 v = GEM_MIF_FRAME_WRITE | 2030 (phy << GEM_MIF_PHY_SHIFT) | 2031 (reg << GEM_MIF_REG_SHIFT) | 2032 (val & GEM_MIF_FRAME_DATA); 2033 2034 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v); 2035 for (n = 0; n < 100; n++) { 2036 DELAY(1); 2037 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME); 2038 if (v & GEM_MIF_FRAME_TA0) 2039 return (1); 2040 } 2041 2042 device_printf(sc->sc_dev, "mii_write timeout\n"); 2043 return (0); 2044 } 2045 2046 void 2047 gem_mii_statchg(dev) 2048 device_t dev; 2049 { 2050 struct gem_softc *sc = device_get_softc(dev); 2051 int gigabit; 2052 uint32_t rxcfg, txcfg, v; 2053 2054 #ifdef GEM_DEBUG 2055 if ((sc->sc_ifflags & IFF_DEBUG) != 0) 2056 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n", 2057 __func__, sc->sc_phyad); 2058 #endif 2059 2060 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 2061 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 2062 sc->sc_flags |= GEM_LINK; 2063 else 2064 sc->sc_flags &= ~GEM_LINK; 2065 2066 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 2067 case IFM_1000_SX: 2068 case IFM_1000_LX: 2069 case IFM_1000_CX: 2070 case IFM_1000_T: 2071 gigabit = 1; 2072 break; 2073 default: 2074 gigabit = 0; 2075 } 2076 2077 /* 2078 * The configuration done here corresponds to the steps F) and 2079 * G) and as far as enabling of RX and TX MAC goes also step H) 2080 * of the initialization sequence outlined in section 3.2.1 of 2081 * the GEM Gigabit Ethernet ASIC Specification. 2082 */ 2083 2084 rxcfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 2085 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 2086 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 2087 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2088 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 2089 else if (gigabit != 0) { 2090 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 2091 txcfg |= GEM_MAC_TX_CARR_EXTEND; 2092 } 2093 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 0); 2094 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4, 2095 BUS_SPACE_BARRIER_WRITE); 2096 if (!gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 2097 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 2098 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, txcfg); 2099 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0); 2100 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 2101 BUS_SPACE_BARRIER_WRITE); 2102 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 2103 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 2104 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, rxcfg); 2105 2106 v = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG) & 2107 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 2108 #ifdef notyet 2109 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2110 v |= GEM_MAC_CC_RX_PAUSE; 2111 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2112 v |= GEM_MAC_CC_TX_PAUSE; 2113 #endif 2114 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG, v); 2115 2116 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 2117 gigabit != 0) 2118 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME, 2119 GEM_MAC_SLOT_TIME_CARR_EXTEND); 2120 else 2121 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME, 2122 GEM_MAC_SLOT_TIME_NORMAL); 2123 2124 /* XIF Configuration */ 2125 v = GEM_MAC_XIF_LINK_LED; 2126 v |= GEM_MAC_XIF_TX_MII_ENA; 2127 if ((sc->sc_flags & GEM_SERDES) == 0) { 2128 if ((bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG) & 2129 GEM_MIF_CONFIG_PHY_SEL) != 0 && 2130 (IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) 2131 /* External MII needs echo disable if half duplex. */ 2132 v |= GEM_MAC_XIF_ECHO_DISABL; 2133 else 2134 /* 2135 * Internal MII needs buffer enable. 2136 * XXX buffer enable makes only sense for an 2137 * external PHY. 2138 */ 2139 v |= GEM_MAC_XIF_MII_BUF_ENA; 2140 } 2141 if (gigabit != 0) 2142 v |= GEM_MAC_XIF_GMII_MODE; 2143 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2144 v |= GEM_MAC_XIF_FDPLX_LED; 2145 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v); 2146 2147 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2148 (sc->sc_flags & GEM_LINK) != 0) { 2149 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 2150 txcfg | GEM_MAC_TX_ENABLE); 2151 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 2152 rxcfg | GEM_MAC_RX_ENABLE); 2153 } 2154 } 2155 2156 int 2157 gem_mediachange(ifp) 2158 struct ifnet *ifp; 2159 { 2160 struct gem_softc *sc = ifp->if_softc; 2161 int error; 2162 2163 /* XXX Add support for serial media. */ 2164 2165 GEM_LOCK(sc); 2166 error = mii_mediachg(sc->sc_mii); 2167 GEM_UNLOCK(sc); 2168 return (error); 2169 } 2170 2171 void 2172 gem_mediastatus(ifp, ifmr) 2173 struct ifnet *ifp; 2174 struct ifmediareq *ifmr; 2175 { 2176 struct gem_softc *sc = ifp->if_softc; 2177 2178 GEM_LOCK(sc); 2179 if ((ifp->if_flags & IFF_UP) == 0) { 2180 GEM_UNLOCK(sc); 2181 return; 2182 } 2183 2184 mii_pollstat(sc->sc_mii); 2185 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2186 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2187 GEM_UNLOCK(sc); 2188 } 2189 2190 /* 2191 * Process an ioctl request. 2192 */ 2193 static int 2194 gem_ioctl(ifp, cmd, data) 2195 struct ifnet *ifp; 2196 u_long cmd; 2197 caddr_t data; 2198 { 2199 struct gem_softc *sc = ifp->if_softc; 2200 struct ifreq *ifr = (struct ifreq *)data; 2201 int error = 0; 2202 2203 switch (cmd) { 2204 case SIOCSIFFLAGS: 2205 GEM_LOCK(sc); 2206 if (ifp->if_flags & IFF_UP) { 2207 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2208 ((ifp->if_flags ^ sc->sc_ifflags) & 2209 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2210 gem_setladrf(sc); 2211 else 2212 gem_init_locked(sc); 2213 } else { 2214 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2215 gem_stop(ifp, 0); 2216 } 2217 if ((ifp->if_flags & IFF_LINK0) != 0) 2218 sc->sc_csum_features |= CSUM_UDP; 2219 else 2220 sc->sc_csum_features &= ~CSUM_UDP; 2221 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2222 ifp->if_hwassist = sc->sc_csum_features; 2223 sc->sc_ifflags = ifp->if_flags; 2224 GEM_UNLOCK(sc); 2225 break; 2226 case SIOCADDMULTI: 2227 case SIOCDELMULTI: 2228 GEM_LOCK(sc); 2229 gem_setladrf(sc); 2230 GEM_UNLOCK(sc); 2231 break; 2232 case SIOCGIFMEDIA: 2233 case SIOCSIFMEDIA: 2234 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2235 break; 2236 case SIOCSIFCAP: 2237 GEM_LOCK(sc); 2238 ifp->if_capenable = ifr->ifr_reqcap; 2239 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2240 ifp->if_hwassist = sc->sc_csum_features; 2241 else 2242 ifp->if_hwassist = 0; 2243 GEM_UNLOCK(sc); 2244 break; 2245 default: 2246 error = ether_ioctl(ifp, cmd, data); 2247 break; 2248 } 2249 2250 return (error); 2251 } 2252 2253 /* 2254 * Set up the logical address filter. 2255 */ 2256 static void 2257 gem_setladrf(sc) 2258 struct gem_softc *sc; 2259 { 2260 struct ifnet *ifp = sc->sc_ifp; 2261 struct ifmultiaddr *inm; 2262 u_int32_t crc; 2263 u_int32_t hash[16]; 2264 u_int32_t v; 2265 int i; 2266 2267 GEM_LOCK_ASSERT(sc, MA_OWNED); 2268 2269 /* Get current RX configuration */ 2270 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 2271 2272 /* 2273 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2274 * and hash filter. Depending on the case, the right bit will be 2275 * enabled. 2276 */ 2277 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 2278 GEM_MAC_RX_PROMISC_GRP); 2279 2280 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 2281 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 2282 BUS_SPACE_BARRIER_WRITE); 2283 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 0)) 2284 device_printf(sc->sc_dev, "cannot disable RX hash filter\n"); 2285 2286 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2287 v |= GEM_MAC_RX_PROMISCUOUS; 2288 goto chipit; 2289 } 2290 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2291 v |= GEM_MAC_RX_PROMISC_GRP; 2292 goto chipit; 2293 } 2294 2295 /* 2296 * Set up multicast address filter by passing all multicast addresses 2297 * through a crc generator, and then using the high order 8 bits as an 2298 * index into the 256 bit logical address filter. The high order 4 2299 * bits selects the word, while the other 4 bits select the bit within 2300 * the word (where bit 0 is the MSB). 2301 */ 2302 2303 /* Clear hash table */ 2304 memset(hash, 0, sizeof(hash)); 2305 2306 IF_ADDR_LOCK(ifp); 2307 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 2308 if (inm->ifma_addr->sa_family != AF_LINK) 2309 continue; 2310 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2311 inm->ifma_addr), ETHER_ADDR_LEN); 2312 2313 /* Just want the 8 most significant bits. */ 2314 crc >>= 24; 2315 2316 /* Set the corresponding bit in the filter. */ 2317 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2318 } 2319 IF_ADDR_UNLOCK(ifp); 2320 2321 v |= GEM_MAC_RX_HASH_FILTER; 2322 2323 /* Now load the hash table into the chip (if we are using it) */ 2324 for (i = 0; i < 16; i++) { 2325 bus_write_4(sc->sc_res[0], 2326 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 2327 hash[i]); 2328 } 2329 2330 chipit: 2331 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 2332 } 2333