1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 35 */ 36 37 #if 0 38 #define GEM_DEBUG 39 #endif 40 41 #if 0 /* XXX: In case of emergency, re-enable this. */ 42 #define GEM_RINT_TIMEOUT 43 #endif 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/bus.h> 48 #include <sys/callout.h> 49 #include <sys/endian.h> 50 #include <sys/mbuf.h> 51 #include <sys/malloc.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/module.h> 55 #include <sys/mutex.h> 56 #include <sys/socket.h> 57 #include <sys/sockio.h> 58 #include <sys/rman.h> 59 60 #include <net/bpf.h> 61 #include <net/ethernet.h> 62 #include <net/if.h> 63 #include <net/if_arp.h> 64 #include <net/if_dl.h> 65 #include <net/if_media.h> 66 #include <net/if_types.h> 67 #include <net/if_vlan_var.h> 68 69 #include <netinet/in.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/ip.h> 72 #include <netinet/tcp.h> 73 #include <netinet/udp.h> 74 75 #include <machine/bus.h> 76 77 #include <dev/mii/mii.h> 78 #include <dev/mii/miivar.h> 79 80 #include <dev/gem/if_gemreg.h> 81 #include <dev/gem/if_gemvar.h> 82 83 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 84 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 85 86 #define TRIES 10000 87 88 /* 89 * The GEM hardware support basic TCP/UDP checksum offloading. However, 90 * the hardware doesn't compensate the checksum for UDP datagram which 91 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 92 * by default. It can be reactivated by setting special link option 93 * link0 with ifconfig(8). 94 */ 95 #define GEM_CSUM_FEATURES (CSUM_TCP) 96 97 static void gem_start(struct ifnet *); 98 static void gem_start_locked(struct ifnet *); 99 static void gem_stop(struct ifnet *, int); 100 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 101 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 102 static __inline void gem_txcksum(struct gem_softc *, struct mbuf *, uint64_t *); 103 static __inline void gem_rxcksum(struct mbuf *, uint64_t); 104 static void gem_tick(void *); 105 static int gem_watchdog(struct gem_softc *); 106 static void gem_init(void *); 107 static void gem_init_locked(struct gem_softc *); 108 static void gem_init_regs(struct gem_softc *); 109 static u_int gem_ringsize(u_int); 110 static int gem_meminit(struct gem_softc *); 111 static struct mbuf *gem_defrag(struct mbuf *, int, int); 112 static int gem_load_txmbuf(struct gem_softc *, struct mbuf **); 113 static void gem_mifinit(struct gem_softc *); 114 static int gem_bitwait(struct gem_softc *, bus_addr_t, u_int32_t, 115 u_int32_t); 116 static void gem_reset(struct gem_softc *); 117 static int gem_reset_rx(struct gem_softc *); 118 static void gem_reset_rxdma(struct gem_softc *sc); 119 static int gem_reset_tx(struct gem_softc *); 120 static int gem_disable_rx(struct gem_softc *); 121 static int gem_disable_tx(struct gem_softc *); 122 static void gem_rxdrain(struct gem_softc *); 123 static int gem_add_rxbuf(struct gem_softc *, int); 124 static void gem_setladrf(struct gem_softc *); 125 126 struct mbuf *gem_get(struct gem_softc *, int, int); 127 static void gem_eint(struct gem_softc *, u_int); 128 static void gem_rint(struct gem_softc *); 129 #ifdef GEM_RINT_TIMEOUT 130 static void gem_rint_timeout(void *); 131 #endif 132 static void gem_tint(struct gem_softc *); 133 134 devclass_t gem_devclass; 135 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 136 MODULE_DEPEND(gem, miibus, 1, 1, 1); 137 138 #ifdef GEM_DEBUG 139 #include <sys/ktr.h> 140 #define KTR_GEM KTR_CT2 141 #endif 142 143 #define GEM_NSEGS GEM_NTXDESC 144 145 /* 146 * gem_attach: 147 * 148 * Attach a Gem interface to the system. 149 */ 150 int 151 gem_attach(sc) 152 struct gem_softc *sc; 153 { 154 struct ifnet *ifp; 155 int i, error; 156 u_int32_t v; 157 158 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 159 if (ifp == NULL) 160 return (ENOSPC); 161 162 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 163 #ifdef GEM_RINT_TIMEOUT 164 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 165 #endif 166 167 /* Make sure the chip is stopped. */ 168 ifp->if_softc = sc; 169 gem_reset(sc); 170 171 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 172 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 173 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 174 &sc->sc_pdmatag); 175 if (error) 176 goto fail_ifnet; 177 178 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 179 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 180 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 181 if (error) 182 goto fail_ptag; 183 184 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 185 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 186 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 187 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 188 if (error) 189 goto fail_rtag; 190 191 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 192 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 193 sizeof(struct gem_control_data), 1, 194 sizeof(struct gem_control_data), 0, 195 NULL, NULL, &sc->sc_cdmatag); 196 if (error) 197 goto fail_ttag; 198 199 /* 200 * Allocate the control data structures, and create and load the 201 * DMA map for it. 202 */ 203 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 204 (void **)&sc->sc_control_data, 205 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 206 &sc->sc_cddmamap))) { 207 device_printf(sc->sc_dev, "unable to allocate control data," 208 " error = %d\n", error); 209 goto fail_ctag; 210 } 211 212 sc->sc_cddma = 0; 213 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 214 sc->sc_control_data, sizeof(struct gem_control_data), 215 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 216 device_printf(sc->sc_dev, "unable to load control data DMA " 217 "map, error = %d\n", error); 218 goto fail_cmem; 219 } 220 221 /* 222 * Initialize the transmit job descriptors. 223 */ 224 STAILQ_INIT(&sc->sc_txfreeq); 225 STAILQ_INIT(&sc->sc_txdirtyq); 226 227 /* 228 * Create the transmit buffer DMA maps. 229 */ 230 error = ENOMEM; 231 for (i = 0; i < GEM_TXQUEUELEN; i++) { 232 struct gem_txsoft *txs; 233 234 txs = &sc->sc_txsoft[i]; 235 txs->txs_mbuf = NULL; 236 txs->txs_ndescs = 0; 237 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 238 &txs->txs_dmamap)) != 0) { 239 device_printf(sc->sc_dev, "unable to create tx DMA map " 240 "%d, error = %d\n", i, error); 241 goto fail_txd; 242 } 243 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 244 } 245 246 /* 247 * Create the receive buffer DMA maps. 248 */ 249 for (i = 0; i < GEM_NRXDESC; i++) { 250 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 251 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 252 device_printf(sc->sc_dev, "unable to create rx DMA map " 253 "%d, error = %d\n", i, error); 254 goto fail_rxd; 255 } 256 sc->sc_rxsoft[i].rxs_mbuf = NULL; 257 } 258 259 /* Bad things will happen when touching this register on ERI. */ 260 if (sc->sc_variant != GEM_SUN_ERI) 261 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE, 262 GEM_MII_DATAPATH_MII); 263 264 gem_mifinit(sc); 265 266 /* 267 * Look for an external PHY. 268 */ 269 error = ENXIO; 270 v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG); 271 if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 272 v |= GEM_MIF_CONFIG_PHY_SEL; 273 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v); 274 switch (sc->sc_variant) { 275 case GEM_SUN_ERI: 276 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 277 break; 278 default: 279 sc->sc_phyad = -1; 280 break; 281 } 282 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 283 gem_mediachange, gem_mediastatus); 284 } 285 286 /* 287 * Fall back on an internal PHY if no external PHY was found. 288 */ 289 if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) { 290 v &= ~GEM_MIF_CONFIG_PHY_SEL; 291 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v); 292 switch (sc->sc_variant) { 293 case GEM_SUN_ERI: 294 case GEM_APPLE_K2_GMAC: 295 sc->sc_phyad = GEM_PHYAD_INTERNAL; 296 break; 297 case GEM_APPLE_GMAC: 298 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 299 break; 300 default: 301 sc->sc_phyad = -1; 302 break; 303 } 304 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 305 gem_mediachange, gem_mediastatus); 306 } 307 308 /* 309 * Try the external PCS SERDES if we didn't find any PHYs. 310 */ 311 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 312 bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE, 313 GEM_MII_DATAPATH_SERDES); 314 bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL, 315 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 316 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 317 GEM_MII_CONFIG_ENABLE); 318 sc->sc_flags |= GEM_SERDES; 319 sc->sc_phyad = GEM_PHYAD_EXTERNAL; 320 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 321 gem_mediachange, gem_mediastatus); 322 } 323 324 if (error != 0) { 325 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error); 326 goto fail_rxd; 327 } 328 sc->sc_mii = device_get_softc(sc->sc_miibus); 329 330 /* 331 * From this point forward, the attachment cannot fail. A failure 332 * before this point releases all resources that may have been 333 * allocated. 334 */ 335 336 /* Get RX FIFO size */ 337 sc->sc_rxfifosize = 64 * 338 bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE); 339 340 /* Get TX FIFO size */ 341 v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE); 342 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 343 sc->sc_rxfifosize / 1024, v / 16); 344 345 sc->sc_csum_features = GEM_CSUM_FEATURES; 346 /* Initialize ifnet structure. */ 347 ifp->if_softc = sc; 348 if_initname(ifp, device_get_name(sc->sc_dev), 349 device_get_unit(sc->sc_dev)); 350 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 351 ifp->if_start = gem_start; 352 ifp->if_ioctl = gem_ioctl; 353 ifp->if_init = gem_init; 354 IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN); 355 ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN; 356 IFQ_SET_READY(&ifp->if_snd); 357 358 /* Attach the interface. */ 359 ether_ifattach(ifp, sc->sc_enaddr); 360 361 /* 362 * Tell the upper layer(s) we support long frames/checksum offloads. 363 */ 364 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 365 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 366 ifp->if_hwassist |= sc->sc_csum_features; 367 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 368 369 return (0); 370 371 /* 372 * Free any resources we've allocated during the failed attach 373 * attempt. Do this in reverse order and fall through. 374 */ 375 fail_rxd: 376 for (i = 0; i < GEM_NRXDESC; i++) { 377 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 378 bus_dmamap_destroy(sc->sc_rdmatag, 379 sc->sc_rxsoft[i].rxs_dmamap); 380 } 381 fail_txd: 382 for (i = 0; i < GEM_TXQUEUELEN; i++) { 383 if (sc->sc_txsoft[i].txs_dmamap != NULL) 384 bus_dmamap_destroy(sc->sc_tdmatag, 385 sc->sc_txsoft[i].txs_dmamap); 386 } 387 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 388 fail_cmem: 389 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 390 sc->sc_cddmamap); 391 fail_ctag: 392 bus_dma_tag_destroy(sc->sc_cdmatag); 393 fail_ttag: 394 bus_dma_tag_destroy(sc->sc_tdmatag); 395 fail_rtag: 396 bus_dma_tag_destroy(sc->sc_rdmatag); 397 fail_ptag: 398 bus_dma_tag_destroy(sc->sc_pdmatag); 399 fail_ifnet: 400 if_free(ifp); 401 return (error); 402 } 403 404 void 405 gem_detach(sc) 406 struct gem_softc *sc; 407 { 408 struct ifnet *ifp = sc->sc_ifp; 409 int i; 410 411 GEM_LOCK(sc); 412 gem_stop(ifp, 1); 413 GEM_UNLOCK(sc); 414 callout_drain(&sc->sc_tick_ch); 415 #ifdef GEM_RINT_TIMEOUT 416 callout_drain(&sc->sc_rx_ch); 417 #endif 418 ether_ifdetach(ifp); 419 if_free(ifp); 420 device_delete_child(sc->sc_dev, sc->sc_miibus); 421 422 for (i = 0; i < GEM_NRXDESC; i++) { 423 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 424 bus_dmamap_destroy(sc->sc_rdmatag, 425 sc->sc_rxsoft[i].rxs_dmamap); 426 } 427 for (i = 0; i < GEM_TXQUEUELEN; i++) { 428 if (sc->sc_txsoft[i].txs_dmamap != NULL) 429 bus_dmamap_destroy(sc->sc_tdmatag, 430 sc->sc_txsoft[i].txs_dmamap); 431 } 432 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 433 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 434 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 435 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 436 sc->sc_cddmamap); 437 bus_dma_tag_destroy(sc->sc_cdmatag); 438 bus_dma_tag_destroy(sc->sc_tdmatag); 439 bus_dma_tag_destroy(sc->sc_rdmatag); 440 bus_dma_tag_destroy(sc->sc_pdmatag); 441 } 442 443 void 444 gem_suspend(sc) 445 struct gem_softc *sc; 446 { 447 struct ifnet *ifp = sc->sc_ifp; 448 449 GEM_LOCK(sc); 450 gem_stop(ifp, 0); 451 GEM_UNLOCK(sc); 452 } 453 454 void 455 gem_resume(sc) 456 struct gem_softc *sc; 457 { 458 struct ifnet *ifp = sc->sc_ifp; 459 460 GEM_LOCK(sc); 461 /* 462 * On resume all registers have to be initialized again like 463 * after power-on. 464 */ 465 sc->sc_flags &= ~GEM_INITED; 466 if (ifp->if_flags & IFF_UP) 467 gem_init_locked(sc); 468 GEM_UNLOCK(sc); 469 } 470 471 static __inline void 472 gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags) 473 { 474 struct mbuf *m0; 475 struct ip *ip; 476 uint64_t offset, offset2; 477 char *p; 478 479 m0 = m; 480 offset = sizeof(struct ip) + ETHER_HDR_LEN; 481 for(; m && m->m_len == 0; m = m->m_next) 482 ; 483 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 484 device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n", 485 __func__); 486 /* checksum will be corrupted */ 487 m = m0; 488 goto sendit; 489 } 490 if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) { 491 if (m->m_len != ETHER_HDR_LEN) { 492 device_printf(sc->sc_dev, 493 "%s: m_len != ETHER_HDR_LEN\n", __func__); 494 /* checksum will be corrupted */ 495 m = m0; 496 goto sendit; 497 } 498 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 499 ; 500 if (m == NULL) { 501 /* checksum will be corrupted */ 502 m = m0; 503 goto sendit; 504 } 505 ip = mtod(m, struct ip *); 506 } else { 507 p = mtod(m, uint8_t *); 508 p += ETHER_HDR_LEN; 509 ip = (struct ip *)p; 510 } 511 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 512 513 sendit: 514 offset2 = m->m_pkthdr.csum_data; 515 *cflags = offset << GEM_TD_CXSUM_STARTSHFT; 516 *cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT); 517 *cflags |= GEM_TD_CXSUM_ENABLE; 518 } 519 520 static __inline void 521 gem_rxcksum(struct mbuf *m, uint64_t flags) 522 { 523 struct ether_header *eh; 524 struct ip *ip; 525 struct udphdr *uh; 526 int32_t hlen, len, pktlen; 527 uint16_t cksum, *opts; 528 uint32_t temp32; 529 530 pktlen = m->m_pkthdr.len; 531 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 532 return; 533 eh = mtod(m, struct ether_header *); 534 if (eh->ether_type != htons(ETHERTYPE_IP)) 535 return; 536 ip = (struct ip *)(eh + 1); 537 if (ip->ip_v != IPVERSION) 538 return; 539 540 hlen = ip->ip_hl << 2; 541 pktlen -= sizeof(struct ether_header); 542 if (hlen < sizeof(struct ip)) 543 return; 544 if (ntohs(ip->ip_len) < hlen) 545 return; 546 if (ntohs(ip->ip_len) != pktlen) 547 return; 548 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 549 return; /* can't handle fragmented packet */ 550 551 switch (ip->ip_p) { 552 case IPPROTO_TCP: 553 if (pktlen < (hlen + sizeof(struct tcphdr))) 554 return; 555 break; 556 case IPPROTO_UDP: 557 if (pktlen < (hlen + sizeof(struct udphdr))) 558 return; 559 uh = (struct udphdr *)((uint8_t *)ip + hlen); 560 if (uh->uh_sum == 0) 561 return; /* no checksum */ 562 break; 563 default: 564 return; 565 } 566 567 cksum = ~(flags & GEM_RD_CHECKSUM); 568 /* checksum fixup for IP options */ 569 len = hlen - sizeof(struct ip); 570 if (len > 0) { 571 opts = (uint16_t *)(ip + 1); 572 for (; len > 0; len -= sizeof(uint16_t), opts++) { 573 temp32 = cksum - *opts; 574 temp32 = (temp32 >> 16) + (temp32 & 65535); 575 cksum = temp32 & 65535; 576 } 577 } 578 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 579 m->m_pkthdr.csum_data = cksum; 580 } 581 582 static void 583 gem_cddma_callback(xsc, segs, nsegs, error) 584 void *xsc; 585 bus_dma_segment_t *segs; 586 int nsegs; 587 int error; 588 { 589 struct gem_softc *sc = (struct gem_softc *)xsc; 590 591 if (error != 0) 592 return; 593 if (nsegs != 1) { 594 /* can't happen... */ 595 panic("%s: bad control buffer segment count", __func__); 596 } 597 sc->sc_cddma = segs[0].ds_addr; 598 } 599 600 static void 601 gem_tick(arg) 602 void *arg; 603 { 604 struct gem_softc *sc = arg; 605 struct ifnet *ifp; 606 607 GEM_LOCK_ASSERT(sc, MA_OWNED); 608 609 ifp = sc->sc_ifp; 610 /* 611 * Unload collision counters 612 */ 613 ifp->if_collisions += 614 bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) + 615 bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) + 616 bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) + 617 bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT); 618 619 /* 620 * then clear the hardware counters. 621 */ 622 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0); 623 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0); 624 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0); 625 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0); 626 627 mii_tick(sc->sc_mii); 628 629 if (gem_watchdog(sc) == EJUSTRETURN) 630 return; 631 632 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 633 } 634 635 static int 636 gem_bitwait(sc, r, clr, set) 637 struct gem_softc *sc; 638 bus_addr_t r; 639 u_int32_t clr; 640 u_int32_t set; 641 { 642 int i; 643 u_int32_t reg; 644 645 for (i = TRIES; i--; DELAY(100)) { 646 reg = bus_read_4(sc->sc_res[0], r); 647 if ((reg & clr) == 0 && (reg & set) == set) 648 return (1); 649 } 650 return (0); 651 } 652 653 static void 654 gem_reset(sc) 655 struct gem_softc *sc; 656 { 657 658 #ifdef GEM_DEBUG 659 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 660 #endif 661 gem_reset_rx(sc); 662 gem_reset_tx(sc); 663 664 /* Do a full reset */ 665 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 666 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 667 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 668 device_printf(sc->sc_dev, "cannot reset device\n"); 669 } 670 671 /* 672 * gem_rxdrain: 673 * 674 * Drain the receive queue. 675 */ 676 static void 677 gem_rxdrain(sc) 678 struct gem_softc *sc; 679 { 680 struct gem_rxsoft *rxs; 681 int i; 682 683 for (i = 0; i < GEM_NRXDESC; i++) { 684 rxs = &sc->sc_rxsoft[i]; 685 if (rxs->rxs_mbuf != NULL) { 686 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 687 BUS_DMASYNC_POSTREAD); 688 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 689 m_freem(rxs->rxs_mbuf); 690 rxs->rxs_mbuf = NULL; 691 } 692 } 693 } 694 695 /* 696 * Reset the whole thing. 697 */ 698 static void 699 gem_stop(ifp, disable) 700 struct ifnet *ifp; 701 int disable; 702 { 703 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 704 struct gem_txsoft *txs; 705 706 #ifdef GEM_DEBUG 707 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 708 #endif 709 710 callout_stop(&sc->sc_tick_ch); 711 #ifdef GEM_RINT_TIMEOUT 712 callout_stop(&sc->sc_rx_ch); 713 #endif 714 715 /* XXX - Should we reset these instead? */ 716 gem_disable_tx(sc); 717 gem_disable_rx(sc); 718 719 /* 720 * Release any queued transmit buffers. 721 */ 722 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 723 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 724 if (txs->txs_ndescs != 0) { 725 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 726 BUS_DMASYNC_POSTWRITE); 727 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 728 if (txs->txs_mbuf != NULL) { 729 m_freem(txs->txs_mbuf); 730 txs->txs_mbuf = NULL; 731 } 732 } 733 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 734 } 735 736 if (disable) 737 gem_rxdrain(sc); 738 739 /* 740 * Mark the interface down and cancel the watchdog timer. 741 */ 742 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 743 sc->sc_flags &= ~GEM_LINK; 744 sc->sc_wdog_timer = 0; 745 } 746 747 /* 748 * Reset the receiver 749 */ 750 static int 751 gem_reset_rx(sc) 752 struct gem_softc *sc; 753 { 754 755 /* 756 * Resetting while DMA is in progress can cause a bus hang, so we 757 * disable DMA first. 758 */ 759 gem_disable_rx(sc); 760 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0); 761 bus_barrier(sc->sc_res[0], GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 762 if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 763 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 764 765 /* Finally, reset the ERX */ 766 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX); 767 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 768 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) { 769 device_printf(sc->sc_dev, "cannot reset receiver\n"); 770 return (1); 771 } 772 return (0); 773 } 774 775 /* 776 * Reset the receiver DMA engine. 777 * 778 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 779 * etc in order to reset the receiver DMA engine only and not do a full 780 * reset which amongst others also downs the link and clears the FIFOs. 781 */ 782 static void 783 gem_reset_rxdma(struct gem_softc *sc) 784 { 785 int i; 786 787 if (gem_reset_rx(sc) != 0) 788 return (gem_init_locked(sc)); 789 for (i = 0; i < GEM_NRXDESC; i++) 790 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 791 GEM_UPDATE_RXDESC(sc, i); 792 sc->sc_rxptr = 0; 793 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 794 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 795 796 /* NOTE: we use only 32-bit DMA addresses here. */ 797 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0); 798 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 799 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4); 800 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 801 gem_ringsize(GEM_NRXDESC /*XXX*/) | 802 ((ETHER_HDR_LEN + sizeof(struct ip)) << 803 GEM_RX_CONFIG_CXM_START_SHFT) | 804 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 805 (2 << GEM_RX_CONFIG_FBOFF_SHFT)); 806 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, 807 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 808 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH, 809 (3 * sc->sc_rxfifosize / 256) | ((sc->sc_rxfifosize / 256) << 12)); 810 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 811 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN); 812 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK, 813 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 814 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 815 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE); 816 } 817 818 /* 819 * Reset the transmitter 820 */ 821 static int 822 gem_reset_tx(sc) 823 struct gem_softc *sc; 824 { 825 826 /* 827 * Resetting while DMA is in progress can cause a bus hang, so we 828 * disable DMA first. 829 */ 830 gem_disable_tx(sc); 831 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0); 832 bus_barrier(sc->sc_res[0], GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 833 if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 834 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 835 836 /* Finally, reset the ETX */ 837 bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_TX); 838 bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 839 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) { 840 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 841 return (1); 842 } 843 return (0); 844 } 845 846 /* 847 * disable receiver. 848 */ 849 static int 850 gem_disable_rx(sc) 851 struct gem_softc *sc; 852 { 853 u_int32_t cfg; 854 855 /* Flip the enable bit */ 856 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 857 cfg &= ~GEM_MAC_RX_ENABLE; 858 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg); 859 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 860 BUS_SPACE_BARRIER_WRITE); 861 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 862 } 863 864 /* 865 * disable transmitter. 866 */ 867 static int 868 gem_disable_tx(sc) 869 struct gem_softc *sc; 870 { 871 u_int32_t cfg; 872 873 /* Flip the enable bit */ 874 cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG); 875 cfg &= ~GEM_MAC_TX_ENABLE; 876 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg); 877 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4, 878 BUS_SPACE_BARRIER_WRITE); 879 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 880 } 881 882 /* 883 * Initialize interface. 884 */ 885 static int 886 gem_meminit(sc) 887 struct gem_softc *sc; 888 { 889 struct gem_rxsoft *rxs; 890 int i, error; 891 892 /* 893 * Initialize the transmit descriptor ring. 894 */ 895 for (i = 0; i < GEM_NTXDESC; i++) { 896 sc->sc_txdescs[i].gd_flags = 0; 897 sc->sc_txdescs[i].gd_addr = 0; 898 } 899 sc->sc_txfree = GEM_MAXTXFREE; 900 sc->sc_txnext = 0; 901 sc->sc_txwin = 0; 902 903 /* 904 * Initialize the receive descriptor and receive job 905 * descriptor rings. 906 */ 907 for (i = 0; i < GEM_NRXDESC; i++) { 908 rxs = &sc->sc_rxsoft[i]; 909 if (rxs->rxs_mbuf == NULL) { 910 if ((error = gem_add_rxbuf(sc, i)) != 0) { 911 device_printf(sc->sc_dev, "unable to " 912 "allocate or map rx buffer %d, error = " 913 "%d\n", i, error); 914 /* 915 * XXX Should attempt to run with fewer receive 916 * XXX buffers instead of just failing. 917 */ 918 gem_rxdrain(sc); 919 return (1); 920 } 921 } else 922 GEM_INIT_RXDESC(sc, i); 923 } 924 sc->sc_rxptr = 0; 925 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 926 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 927 928 return (0); 929 } 930 931 static u_int 932 gem_ringsize(sz) 933 u_int sz; 934 { 935 936 switch (sz) { 937 case 32: 938 return (GEM_RING_SZ_32); 939 case 64: 940 return (GEM_RING_SZ_64); 941 case 128: 942 return (GEM_RING_SZ_128); 943 case 256: 944 return (GEM_RING_SZ_256); 945 case 512: 946 return (GEM_RING_SZ_512); 947 case 1024: 948 return (GEM_RING_SZ_1024); 949 case 2048: 950 return (GEM_RING_SZ_2048); 951 case 4096: 952 return (GEM_RING_SZ_4096); 953 case 8192: 954 return (GEM_RING_SZ_8192); 955 default: 956 printf("%s: invalid ring size %d\n", __func__, sz); 957 return (GEM_RING_SZ_32); 958 } 959 } 960 961 static void 962 gem_init(xsc) 963 void *xsc; 964 { 965 struct gem_softc *sc = (struct gem_softc *)xsc; 966 967 GEM_LOCK(sc); 968 gem_init_locked(sc); 969 GEM_UNLOCK(sc); 970 } 971 972 /* 973 * Initialization of interface; set up initialization block 974 * and transmit/receive descriptor rings. 975 */ 976 static void 977 gem_init_locked(sc) 978 struct gem_softc *sc; 979 { 980 struct ifnet *ifp = sc->sc_ifp; 981 u_int32_t v; 982 983 GEM_LOCK_ASSERT(sc, MA_OWNED); 984 985 #ifdef GEM_DEBUG 986 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 987 __func__); 988 #endif 989 /* 990 * Initialization sequence. The numbered steps below correspond 991 * to the sequence outlined in section 6.3.5.1 in the Ethernet 992 * Channel Engine manual (part of the PCIO manual). 993 * See also the STP2002-STQ document from Sun Microsystems. 994 */ 995 996 /* step 1 & 2. Reset the Ethernet Channel */ 997 gem_stop(sc->sc_ifp, 0); 998 gem_reset(sc); 999 #ifdef GEM_DEBUG 1000 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 1001 __func__); 1002 #endif 1003 1004 /* Re-initialize the MIF */ 1005 gem_mifinit(sc); 1006 1007 /* step 3. Setup data structures in host memory */ 1008 if (gem_meminit(sc) != 0) 1009 return; 1010 1011 /* step 4. TX MAC registers & counters */ 1012 gem_init_regs(sc); 1013 1014 /* step 5. RX MAC registers & counters */ 1015 gem_setladrf(sc); 1016 1017 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 1018 /* NOTE: we use only 32-bit DMA addresses here. */ 1019 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0); 1020 bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 1021 1022 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0); 1023 bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 1024 #ifdef GEM_DEBUG 1025 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 1026 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 1027 #endif 1028 1029 /* step 8. Global Configuration & Interrupt Mask */ 1030 bus_write_4(sc->sc_res[0], GEM_INTMASK, 1031 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 1032 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 1033 GEM_INTR_BERR 1034 #ifdef GEM_DEBUG 1035 | GEM_INTR_PCS | GEM_INTR_MIF 1036 #endif 1037 )); 1038 bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK, 1039 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 1040 bus_write_4(sc->sc_res[0], GEM_MAC_TX_MASK, 1041 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP); 1042 #ifdef GEM_DEBUG 1043 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 1044 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 1045 #else 1046 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 1047 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 1048 #endif 1049 1050 /* step 9. ETX Configuration: use mostly default values */ 1051 1052 /* Enable DMA */ 1053 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 1054 bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 1055 v|GEM_TX_CONFIG_TXDMA_EN| 1056 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 1057 1058 /* step 10. ERX Configuration */ 1059 1060 /* Encode Receive Descriptor ring size. */ 1061 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 1062 /* Rx TCP/UDP checksum offset */ 1063 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 1064 GEM_RX_CONFIG_CXM_START_SHFT); 1065 1066 /* Enable DMA */ 1067 bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 1068 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 1069 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN); 1070 1071 bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, 1072 (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6); 1073 1074 /* 1075 * The following value is for an OFF Threshold of about 3/4 full 1076 * and an ON Threshold of 1/4 full. 1077 */ 1078 bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH, 1079 (3 * sc->sc_rxfifosize / 256) | 1080 ( (sc->sc_rxfifosize / 256) << 12)); 1081 1082 /* step 11. Configure Media */ 1083 1084 /* step 12. RX_MAC Configuration Register */ 1085 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 1086 v |= GEM_MAC_RX_STRIP_CRC; 1087 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0); 1088 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 1089 BUS_SPACE_BARRIER_WRITE); 1090 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 1091 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 1092 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 1093 1094 /* step 14. Issue Transmit Pending command */ 1095 1096 /* step 15. Give the reciever a swift kick */ 1097 bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC-4); 1098 1099 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1100 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1101 sc->sc_ifflags = ifp->if_flags; 1102 1103 sc->sc_flags &= ~GEM_LINK; 1104 mii_mediachg(sc->sc_mii); 1105 1106 /* Start the one second timer. */ 1107 sc->sc_wdog_timer = 0; 1108 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1109 } 1110 1111 /* 1112 * It's copy of ath_defrag(ath(4)). 1113 * 1114 * Defragment an mbuf chain, returning at most maxfrags separate 1115 * mbufs+clusters. If this is not possible NULL is returned and 1116 * the original mbuf chain is left in it's present (potentially 1117 * modified) state. We use two techniques: collapsing consecutive 1118 * mbufs and replacing consecutive mbufs by a cluster. 1119 */ 1120 static struct mbuf * 1121 gem_defrag(m0, how, maxfrags) 1122 struct mbuf *m0; 1123 int how; 1124 int maxfrags; 1125 { 1126 struct mbuf *m, *n, *n2, **prev; 1127 u_int curfrags; 1128 1129 /* 1130 * Calculate the current number of frags. 1131 */ 1132 curfrags = 0; 1133 for (m = m0; m != NULL; m = m->m_next) 1134 curfrags++; 1135 /* 1136 * First, try to collapse mbufs. Note that we always collapse 1137 * towards the front so we don't need to deal with moving the 1138 * pkthdr. This may be suboptimal if the first mbuf has much 1139 * less data than the following. 1140 */ 1141 m = m0; 1142 again: 1143 for (;;) { 1144 n = m->m_next; 1145 if (n == NULL) 1146 break; 1147 if ((m->m_flags & M_RDONLY) == 0 && 1148 n->m_len < M_TRAILINGSPACE(m)) { 1149 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 1150 n->m_len); 1151 m->m_len += n->m_len; 1152 m->m_next = n->m_next; 1153 m_free(n); 1154 if (--curfrags <= maxfrags) 1155 return (m0); 1156 } else 1157 m = n; 1158 } 1159 KASSERT(maxfrags > 1, 1160 ("maxfrags %u, but normal collapse failed", maxfrags)); 1161 /* 1162 * Collapse consecutive mbufs to a cluster. 1163 */ 1164 prev = &m0->m_next; /* NB: not the first mbuf */ 1165 while ((n = *prev) != NULL) { 1166 if ((n2 = n->m_next) != NULL && 1167 n->m_len + n2->m_len < MCLBYTES) { 1168 m = m_getcl(how, MT_DATA, 0); 1169 if (m == NULL) 1170 goto bad; 1171 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 1172 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 1173 n2->m_len); 1174 m->m_len = n->m_len + n2->m_len; 1175 m->m_next = n2->m_next; 1176 *prev = m; 1177 m_free(n); 1178 m_free(n2); 1179 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 1180 return m0; 1181 /* 1182 * Still not there, try the normal collapse 1183 * again before we allocate another cluster. 1184 */ 1185 goto again; 1186 } 1187 prev = &n->m_next; 1188 } 1189 /* 1190 * No place where we can collapse to a cluster; punt. 1191 * This can occur if, for example, you request 2 frags 1192 * but the packet requires that both be clusters (we 1193 * never reallocate the first mbuf to avoid moving the 1194 * packet header). 1195 */ 1196 bad: 1197 return (NULL); 1198 } 1199 1200 static int 1201 gem_load_txmbuf(sc, m_head) 1202 struct gem_softc *sc; 1203 struct mbuf **m_head; 1204 { 1205 struct gem_txsoft *txs; 1206 bus_dma_segment_t txsegs[GEM_NTXSEGS]; 1207 struct mbuf *m; 1208 uint64_t flags, cflags; 1209 int error, nexttx, nsegs, seg; 1210 1211 /* Get a work queue entry. */ 1212 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1213 /* Ran out of descriptors. */ 1214 return (ENOBUFS); 1215 } 1216 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1217 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1218 if (error == EFBIG) { 1219 m = gem_defrag(*m_head, M_DONTWAIT, GEM_NTXSEGS); 1220 if (m == NULL) { 1221 m_freem(*m_head); 1222 *m_head = NULL; 1223 return (ENOBUFS); 1224 } 1225 *m_head = m; 1226 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1227 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1228 if (error != 0) { 1229 m_freem(*m_head); 1230 *m_head = NULL; 1231 return (error); 1232 } 1233 } else if (error != 0) 1234 return (error); 1235 if (nsegs == 0) { 1236 m_freem(*m_head); 1237 *m_head = NULL; 1238 return (EIO); 1239 } 1240 1241 /* 1242 * Ensure we have enough descriptors free to describe 1243 * the packet. Note, we always reserve one descriptor 1244 * at the end of the ring as a termination point, to 1245 * prevent wrap-around. 1246 */ 1247 if (nsegs > sc->sc_txfree - 1) { 1248 txs->txs_ndescs = 0; 1249 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1250 return (ENOBUFS); 1251 } 1252 1253 flags = cflags = 0; 1254 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) 1255 gem_txcksum(sc, *m_head, &cflags); 1256 1257 txs->txs_ndescs = nsegs; 1258 txs->txs_firstdesc = sc->sc_txnext; 1259 nexttx = txs->txs_firstdesc; 1260 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 1261 #ifdef GEM_DEBUG 1262 CTR6(KTR_GEM, "%s: mapping seg %d (txd %d), len " 1263 "%lx, addr %#lx (%#lx)", __func__, seg, nexttx, 1264 txsegs[seg].ds_len, txsegs[seg].ds_addr, 1265 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr)); 1266 #endif 1267 sc->sc_txdescs[nexttx].gd_addr = 1268 GEM_DMA_WRITE(sc, txsegs[seg].ds_addr); 1269 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 1270 ("%s: segment size too large!", __func__)); 1271 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1272 sc->sc_txdescs[nexttx].gd_flags = 1273 GEM_DMA_WRITE(sc, flags | cflags); 1274 txs->txs_lastdesc = nexttx; 1275 } 1276 1277 /* set EOP on the last descriptor */ 1278 #ifdef GEM_DEBUG 1279 CTR3(KTR_GEM, "%s: end of packet at seg %d, tx %d", __func__, seg, 1280 nexttx); 1281 #endif 1282 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1283 GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET); 1284 1285 /* Lastly set SOP on the first descriptor */ 1286 #ifdef GEM_DEBUG 1287 CTR3(KTR_GEM, "%s: start of packet at seg %d, tx %d", __func__, seg, 1288 nexttx); 1289 #endif 1290 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1291 sc->sc_txwin = 0; 1292 flags |= GEM_TD_INTERRUPT_ME; 1293 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1294 GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME | 1295 GEM_TD_START_OF_PACKET); 1296 } else 1297 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1298 GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET); 1299 1300 /* Sync the DMA map. */ 1301 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE); 1302 1303 #ifdef GEM_DEBUG 1304 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1305 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs); 1306 #endif 1307 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1308 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1309 txs->txs_mbuf = *m_head; 1310 1311 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1312 sc->sc_txfree -= txs->txs_ndescs; 1313 1314 return (0); 1315 } 1316 1317 static void 1318 gem_init_regs(sc) 1319 struct gem_softc *sc; 1320 { 1321 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1322 1323 /* These regs are not cleared on reset */ 1324 if ((sc->sc_flags & GEM_INITED) == 0) { 1325 /* Wooo. Magic values. */ 1326 bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0); 1327 bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8); 1328 bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4); 1329 1330 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1331 /* Max frame and max burst size */ 1332 bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME, 1333 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1334 1335 bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7); 1336 bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4); 1337 bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10); 1338 /* Dunno.... */ 1339 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088); 1340 bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED, 1341 ((laddr[5]<<8)|laddr[4])&0x3ff); 1342 1343 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1344 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0); 1345 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0); 1346 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0); 1347 1348 /* MAC control addr set to 01:80:c2:00:00:01 */ 1349 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001); 1350 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200); 1351 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180); 1352 1353 /* MAC filter addr set to 0:0:0:0:0:0 */ 1354 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0); 1355 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0); 1356 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0); 1357 1358 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0); 1359 bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0); 1360 1361 sc->sc_flags |= GEM_INITED; 1362 } 1363 1364 /* Counters need to be zeroed */ 1365 bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0); 1366 bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0); 1367 bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0); 1368 bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0); 1369 bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0); 1370 bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0); 1371 bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0); 1372 bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0); 1373 bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0); 1374 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0); 1375 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0); 1376 1377 /* Set XOFF PAUSE time. */ 1378 bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1379 1380 /* 1381 * Set the internal arbitration to "infinite" bursts of the 1382 * maximum length of 31 * 64 bytes so DMA transfers aren't 1383 * split up in cache line size chunks. This greatly improves 1384 * especially RX performance. 1385 * Enable silicon bug workarounds for the Apple variants. 1386 */ 1387 bus_write_4(sc->sc_res[0], GEM_CONFIG, 1388 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 1389 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ? 1390 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 1391 1392 /* 1393 * Set the station address. 1394 */ 1395 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1396 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1397 bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1398 1399 /* Enable MII outputs. */ 1400 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA); 1401 } 1402 1403 static void 1404 gem_start(ifp) 1405 struct ifnet *ifp; 1406 { 1407 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1408 1409 GEM_LOCK(sc); 1410 gem_start_locked(ifp); 1411 GEM_UNLOCK(sc); 1412 } 1413 1414 static void 1415 gem_start_locked(ifp) 1416 struct ifnet *ifp; 1417 { 1418 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1419 struct mbuf *m; 1420 int ntx = 0; 1421 1422 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1423 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 1424 return; 1425 1426 #ifdef GEM_DEBUG 1427 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 1428 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1429 sc->sc_txnext); 1430 #endif 1431 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1432 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1433 if (m == NULL) 1434 break; 1435 if (gem_load_txmbuf(sc, &m) != 0) { 1436 if (m == NULL) 1437 break; 1438 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1439 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1440 break; 1441 } 1442 ntx++; 1443 /* Kick the transmitter. */ 1444 #ifdef GEM_DEBUG 1445 CTR3(KTR_GEM, "%s: %s: kicking tx %d", 1446 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1447 #endif 1448 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1449 bus_write_4(sc->sc_res[0], GEM_TX_KICK, 1450 sc->sc_txnext); 1451 1452 BPF_MTAP(ifp, m); 1453 } 1454 1455 if (ntx > 0) { 1456 #ifdef GEM_DEBUG 1457 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1458 device_get_name(sc->sc_dev), sc->sc_txnext); 1459 #endif 1460 1461 /* Set a watchdog timer in case the chip flakes out. */ 1462 sc->sc_wdog_timer = 5; 1463 #ifdef GEM_DEBUG 1464 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1465 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1466 #endif 1467 } 1468 } 1469 1470 /* 1471 * Transmit interrupt. 1472 */ 1473 static void 1474 gem_tint(sc) 1475 struct gem_softc *sc; 1476 { 1477 struct ifnet *ifp = sc->sc_ifp; 1478 struct gem_txsoft *txs; 1479 int txlast; 1480 int progress = 0; 1481 1482 #ifdef GEM_DEBUG 1483 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1484 #endif 1485 1486 /* 1487 * Go through our Tx list and free mbufs for those 1488 * frames that have been transmitted. 1489 */ 1490 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1491 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1492 1493 #ifdef GEM_DEBUG 1494 if (ifp->if_flags & IFF_DEBUG) { 1495 int i; 1496 printf(" txsoft %p transmit chain:\n", txs); 1497 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1498 printf("descriptor %d: ", i); 1499 printf("gd_flags: 0x%016llx\t", (long long) 1500 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1501 printf("gd_addr: 0x%016llx\n", (long long) 1502 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1503 if (i == txs->txs_lastdesc) 1504 break; 1505 } 1506 } 1507 #endif 1508 1509 /* 1510 * In theory, we could harvest some descriptors before 1511 * the ring is empty, but that's a bit complicated. 1512 * 1513 * GEM_TX_COMPLETION points to the last descriptor 1514 * processed +1. 1515 */ 1516 txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION); 1517 #ifdef GEM_DEBUG 1518 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 1519 "txs->txs_lastdesc = %d, txlast = %d", 1520 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1521 #endif 1522 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1523 if ((txlast >= txs->txs_firstdesc) && 1524 (txlast <= txs->txs_lastdesc)) 1525 break; 1526 } else { 1527 /* Ick -- this command wraps */ 1528 if ((txlast >= txs->txs_firstdesc) || 1529 (txlast <= txs->txs_lastdesc)) 1530 break; 1531 } 1532 1533 #ifdef GEM_DEBUG 1534 CTR1(KTR_GEM, "%s: releasing a desc", __func__); 1535 #endif 1536 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1537 1538 sc->sc_txfree += txs->txs_ndescs; 1539 1540 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1541 BUS_DMASYNC_POSTWRITE); 1542 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1543 if (txs->txs_mbuf != NULL) { 1544 m_freem(txs->txs_mbuf); 1545 txs->txs_mbuf = NULL; 1546 } 1547 1548 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1549 1550 ifp->if_opackets++; 1551 progress = 1; 1552 } 1553 1554 #ifdef GEM_DEBUG 1555 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x " 1556 "GEM_TX_DATA_PTR %llx " 1557 "GEM_TX_COMPLETION %x", 1558 __func__, 1559 bus_read_4(sc->sc_res[0], GEM_TX_STATE_MACHINE), 1560 ((long long) bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_HI) << 32) | 1561 bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_LO), 1562 bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION)); 1563 #endif 1564 1565 if (progress) { 1566 if (sc->sc_txfree == GEM_NTXDESC - 1) 1567 sc->sc_txwin = 0; 1568 1569 /* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */ 1570 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1571 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; 1572 1573 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1574 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1575 gem_start_locked(ifp); 1576 } 1577 1578 #ifdef GEM_DEBUG 1579 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1580 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1581 #endif 1582 } 1583 1584 #ifdef GEM_RINT_TIMEOUT 1585 static void 1586 gem_rint_timeout(arg) 1587 void *arg; 1588 { 1589 struct gem_softc *sc = (struct gem_softc *)arg; 1590 1591 GEM_LOCK_ASSERT(sc, MA_OWNED); 1592 gem_rint(sc); 1593 } 1594 #endif 1595 1596 /* 1597 * Receive interrupt. 1598 */ 1599 static void 1600 gem_rint(sc) 1601 struct gem_softc *sc; 1602 { 1603 struct ifnet *ifp = sc->sc_ifp; 1604 struct mbuf *m; 1605 u_int64_t rxstat; 1606 u_int32_t rxcomp; 1607 1608 #ifdef GEM_RINT_TIMEOUT 1609 callout_stop(&sc->sc_rx_ch); 1610 #endif 1611 #ifdef GEM_DEBUG 1612 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1613 #endif 1614 1615 /* 1616 * Read the completion register once. This limits 1617 * how long the following loop can execute. 1618 */ 1619 rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION); 1620 1621 #ifdef GEM_DEBUG 1622 CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d", 1623 __func__, sc->sc_rxptr, rxcomp); 1624 #endif 1625 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1626 for (; sc->sc_rxptr != rxcomp;) { 1627 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1628 rxstat = GEM_DMA_READ(sc, 1629 sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 1630 1631 if (rxstat & GEM_RD_OWN) { 1632 #ifdef GEM_RINT_TIMEOUT 1633 /* 1634 * The descriptor is still marked as owned, although 1635 * it is supposed to have completed. This has been 1636 * observed on some machines. Just exiting here 1637 * might leave the packet sitting around until another 1638 * one arrives to trigger a new interrupt, which is 1639 * generally undesirable, so set up a timeout. 1640 */ 1641 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1642 gem_rint_timeout, sc); 1643 #endif 1644 m = NULL; 1645 goto kickit; 1646 } 1647 1648 if (rxstat & GEM_RD_BAD_CRC) { 1649 ifp->if_ierrors++; 1650 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1651 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1652 m = NULL; 1653 goto kickit; 1654 } 1655 1656 #ifdef GEM_DEBUG 1657 if (ifp->if_flags & IFF_DEBUG) { 1658 printf(" rxsoft %p descriptor %d: ", 1659 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 1660 printf("gd_flags: 0x%016llx\t", (long long) 1661 GEM_DMA_READ(sc, sc->sc_rxdescs[ 1662 sc->sc_rxptr].gd_flags)); 1663 printf("gd_addr: 0x%016llx\n", (long long) 1664 GEM_DMA_READ(sc, sc->sc_rxdescs[ 1665 sc->sc_rxptr].gd_addr)); 1666 } 1667 #endif 1668 1669 /* 1670 * Allocate a new mbuf cluster. If that fails, we are 1671 * out of memory, and must drop the packet and recycle 1672 * the buffer that's already attached to this descriptor. 1673 */ 1674 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 1675 ifp->if_ierrors++; 1676 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1677 m = NULL; 1678 } 1679 1680 kickit: 1681 /* 1682 * Update the RX kick register. This register has to point 1683 * to the descriptor after the last valid one (before the 1684 * current batch) and must be incremented in multiples of 1685 * 4 (because the DMA engine fetches/updates descriptors 1686 * in batches of 4). 1687 */ 1688 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 1689 if ((sc->sc_rxptr % 4) == 0) { 1690 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1691 bus_write_4(sc->sc_res[0], GEM_RX_KICK, 1692 (sc->sc_rxptr + GEM_NRXDESC - 4) & 1693 GEM_NRXDESC_MASK); 1694 } 1695 1696 if (m == NULL) { 1697 if (rxstat & GEM_RD_OWN) 1698 break; 1699 continue; 1700 } 1701 1702 ifp->if_ipackets++; 1703 m->m_data += 2; /* We're already off by two */ 1704 m->m_pkthdr.rcvif = ifp; 1705 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 1706 1707 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1708 gem_rxcksum(m, rxstat); 1709 1710 /* Pass it on. */ 1711 GEM_UNLOCK(sc); 1712 (*ifp->if_input)(ifp, m); 1713 GEM_LOCK(sc); 1714 } 1715 1716 #ifdef GEM_DEBUG 1717 CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__, 1718 sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION)); 1719 #endif 1720 } 1721 1722 /* 1723 * gem_add_rxbuf: 1724 * 1725 * Add a receive buffer to the indicated descriptor. 1726 */ 1727 static int 1728 gem_add_rxbuf(sc, idx) 1729 struct gem_softc *sc; 1730 int idx; 1731 { 1732 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1733 struct mbuf *m; 1734 bus_dma_segment_t segs[1]; 1735 int error, nsegs; 1736 1737 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1738 if (m == NULL) 1739 return (ENOBUFS); 1740 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1741 1742 #ifdef GEM_DEBUG 1743 /* bzero the packet to check dma */ 1744 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1745 #endif 1746 1747 if (rxs->rxs_mbuf != NULL) { 1748 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1749 BUS_DMASYNC_POSTREAD); 1750 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1751 } 1752 1753 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1754 m, segs, &nsegs, BUS_DMA_NOWAIT); 1755 /* If nsegs is wrong then the stack is corrupt. */ 1756 KASSERT(nsegs == 1, ("Too many segments returned!")); 1757 if (error != 0) { 1758 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1759 "%d\n", idx, error); 1760 m_freem(m); 1761 return (error); 1762 } 1763 rxs->rxs_mbuf = m; 1764 rxs->rxs_paddr = segs[0].ds_addr; 1765 1766 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1767 1768 GEM_INIT_RXDESC(sc, idx); 1769 1770 return (0); 1771 } 1772 1773 static void 1774 gem_eint(sc, status) 1775 struct gem_softc *sc; 1776 u_int status; 1777 { 1778 1779 sc->sc_ifp->if_ierrors++; 1780 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1781 gem_reset_rxdma(sc); 1782 return; 1783 } 1784 1785 device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status); 1786 } 1787 1788 void 1789 gem_intr(v) 1790 void *v; 1791 { 1792 struct gem_softc *sc = (struct gem_softc *)v; 1793 uint32_t status, status2; 1794 1795 GEM_LOCK(sc); 1796 status = bus_read_4(sc->sc_res[0], GEM_STATUS); 1797 1798 #ifdef GEM_DEBUG 1799 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 1800 device_get_name(sc->sc_dev), __func__, (status>>19), 1801 (u_int)status); 1802 1803 /* 1804 * PCS interrupts must be cleared, otherwise no traffic is passed! 1805 */ 1806 if ((status & GEM_INTR_PCS) != 0) { 1807 status2 = bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS) | 1808 bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS); 1809 if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 1810 device_printf(sc->sc_dev, 1811 "%s: PCS link status changed\n", __func__); 1812 } 1813 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1814 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS); 1815 if ((status2 & GEM_MAC_PAUSED) != 0) 1816 device_printf(sc->sc_dev, 1817 "%s: PAUSE received (PAUSE time %d slots)\n", 1818 __func__, GEM_MAC_PAUSE_TIME(status2)); 1819 if ((status2 & GEM_MAC_PAUSE) != 0) 1820 device_printf(sc->sc_dev, 1821 "%s: transited to PAUSE state\n", __func__); 1822 if ((status2 & GEM_MAC_RESUME) != 0) 1823 device_printf(sc->sc_dev, 1824 "%s: transited to non-PAUSE state\n", __func__); 1825 } 1826 if ((status & GEM_INTR_MIF) != 0) 1827 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1828 #endif 1829 1830 if ((status & 1831 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 1832 gem_eint(sc, status); 1833 1834 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1835 gem_rint(sc); 1836 1837 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1838 gem_tint(sc); 1839 1840 if (status & GEM_INTR_TX_MAC) { 1841 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS); 1842 if (status2 & ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP)) 1843 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1844 status2); 1845 if (status2 & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1846 gem_init_locked(sc); 1847 } 1848 if (status & GEM_INTR_RX_MAC) { 1849 status2 = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS); 1850 /* 1851 * At least with GEM_SUN_GEM and some GEM_SUN_ERI 1852 * revisions GEM_MAC_RX_OVERFLOW happen often due to a 1853 * silicon bug so handle them silently. Moreover, it's 1854 * likely that the receiver has hung so we reset it. 1855 */ 1856 if (status2 & GEM_MAC_RX_OVERFLOW) { 1857 sc->sc_ifp->if_ierrors++; 1858 gem_reset_rxdma(sc); 1859 } else if (status2 & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1860 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1861 status2); 1862 } 1863 GEM_UNLOCK(sc); 1864 } 1865 1866 static int 1867 gem_watchdog(sc) 1868 struct gem_softc *sc; 1869 { 1870 1871 GEM_LOCK_ASSERT(sc, MA_OWNED); 1872 1873 #ifdef GEM_DEBUG 1874 CTR4(KTR_GEM, "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1875 "GEM_MAC_RX_CONFIG %x", __func__, 1876 bus_read_4(sc->sc_res[0], GEM_RX_CONFIG), 1877 bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS), 1878 bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG)); 1879 CTR4(KTR_GEM, "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1880 "GEM_MAC_TX_CONFIG %x", __func__, 1881 bus_read_4(sc->sc_res[0], GEM_TX_CONFIG), 1882 bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS), 1883 bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG)); 1884 #endif 1885 1886 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1887 return (0); 1888 1889 if ((sc->sc_flags & GEM_LINK) != 0) 1890 device_printf(sc->sc_dev, "device timeout\n"); 1891 else if (bootverbose) 1892 device_printf(sc->sc_dev, "device timeout (no link)\n"); 1893 ++sc->sc_ifp->if_oerrors; 1894 1895 /* Try to get more packets going. */ 1896 gem_init_locked(sc); 1897 return (EJUSTRETURN); 1898 } 1899 1900 /* 1901 * Initialize the MII Management Interface 1902 */ 1903 static void 1904 gem_mifinit(sc) 1905 struct gem_softc *sc; 1906 { 1907 1908 /* Configure the MIF in frame mode */ 1909 bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, bus_read_4(sc->sc_res[0], 1910 GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1911 } 1912 1913 /* 1914 * MII interface 1915 * 1916 * The GEM MII interface supports at least three different operating modes: 1917 * 1918 * Bitbang mode is implemented using data, clock and output enable registers. 1919 * 1920 * Frame mode is implemented by loading a complete frame into the frame 1921 * register and polling the valid bit for completion. 1922 * 1923 * Polling mode uses the frame register but completion is indicated by 1924 * an interrupt. 1925 * 1926 */ 1927 int 1928 gem_mii_readreg(dev, phy, reg) 1929 device_t dev; 1930 int phy, reg; 1931 { 1932 struct gem_softc *sc = device_get_softc(dev); 1933 int n; 1934 u_int32_t v; 1935 1936 #ifdef GEM_DEBUG_PHY 1937 printf("%s: phy %d reg %d\n", __func__, phy, reg); 1938 #endif 1939 1940 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 1941 return (0); 1942 1943 if ((sc->sc_flags & GEM_SERDES) != 0) { 1944 switch (reg) { 1945 case MII_BMCR: 1946 reg = GEM_MII_CONTROL; 1947 break; 1948 case MII_BMSR: 1949 reg = GEM_MII_STATUS; 1950 break; 1951 case MII_PHYIDR1: 1952 case MII_PHYIDR2: 1953 return (0); 1954 case MII_ANAR: 1955 reg = GEM_MII_ANAR; 1956 break; 1957 case MII_ANLPAR: 1958 reg = GEM_MII_ANLPAR; 1959 break; 1960 case MII_EXTSR: 1961 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 1962 default: 1963 device_printf(sc->sc_dev, 1964 "%s: unhandled register %d\n", __func__, reg); 1965 return (0); 1966 } 1967 return (bus_read_4(sc->sc_res[0], reg)); 1968 } 1969 1970 /* Construct the frame command */ 1971 v = GEM_MIF_FRAME_READ | 1972 (phy << GEM_MIF_PHY_SHIFT) | 1973 (reg << GEM_MIF_REG_SHIFT); 1974 1975 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v); 1976 for (n = 0; n < 100; n++) { 1977 DELAY(1); 1978 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME); 1979 if (v & GEM_MIF_FRAME_TA0) 1980 return (v & GEM_MIF_FRAME_DATA); 1981 } 1982 1983 device_printf(sc->sc_dev, "mii_read timeout\n"); 1984 return (0); 1985 } 1986 1987 int 1988 gem_mii_writereg(dev, phy, reg, val) 1989 device_t dev; 1990 int phy, reg, val; 1991 { 1992 struct gem_softc *sc = device_get_softc(dev); 1993 int n; 1994 u_int32_t v; 1995 1996 #ifdef GEM_DEBUG_PHY 1997 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 1998 #endif 1999 2000 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 2001 return (0); 2002 2003 if ((sc->sc_flags & GEM_SERDES) != 0) { 2004 switch (reg) { 2005 case MII_BMCR: 2006 reg = GEM_MII_CONTROL; 2007 break; 2008 case MII_BMSR: 2009 reg = GEM_MII_STATUS; 2010 break; 2011 case MII_ANAR: 2012 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 0); 2013 bus_barrier(sc->sc_res[0], GEM_MII_CONFIG, 4, 2014 BUS_SPACE_BARRIER_WRITE); 2015 bus_write_4(sc->sc_res[0], GEM_MII_ANAR, val); 2016 bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL, 2017 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 2018 bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 2019 GEM_MII_CONFIG_ENABLE); 2020 return (0); 2021 case MII_ANLPAR: 2022 reg = GEM_MII_ANLPAR; 2023 break; 2024 default: 2025 device_printf(sc->sc_dev, 2026 "%s: unhandled register %d\n", __func__, reg); 2027 return (0); 2028 } 2029 bus_write_4(sc->sc_res[0], reg, val); 2030 return (0); 2031 } 2032 2033 /* Construct the frame command */ 2034 v = GEM_MIF_FRAME_WRITE | 2035 (phy << GEM_MIF_PHY_SHIFT) | 2036 (reg << GEM_MIF_REG_SHIFT) | 2037 (val & GEM_MIF_FRAME_DATA); 2038 2039 bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v); 2040 for (n = 0; n < 100; n++) { 2041 DELAY(1); 2042 v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME); 2043 if (v & GEM_MIF_FRAME_TA0) 2044 return (1); 2045 } 2046 2047 device_printf(sc->sc_dev, "mii_write timeout\n"); 2048 return (0); 2049 } 2050 2051 void 2052 gem_mii_statchg(dev) 2053 device_t dev; 2054 { 2055 struct gem_softc *sc = device_get_softc(dev); 2056 int gigabit; 2057 uint32_t rxcfg, txcfg, v; 2058 2059 #ifdef GEM_DEBUG 2060 if ((sc->sc_ifflags & IFF_DEBUG) != 0) 2061 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n", 2062 __func__, sc->sc_phyad); 2063 #endif 2064 2065 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 2066 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 2067 sc->sc_flags |= GEM_LINK; 2068 else 2069 sc->sc_flags &= ~GEM_LINK; 2070 2071 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 2072 case IFM_1000_SX: 2073 case IFM_1000_LX: 2074 case IFM_1000_CX: 2075 case IFM_1000_T: 2076 gigabit = 1; 2077 break; 2078 default: 2079 gigabit = 0; 2080 } 2081 2082 /* 2083 * The configuration done here corresponds to the steps F) and 2084 * G) and as far as enabling of RX and TX MAC goes also step H) 2085 * of the initialization sequence outlined in section 3.2.1 of 2086 * the GEM Gigabit Ethernet ASIC Specification. 2087 */ 2088 2089 rxcfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 2090 rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 2091 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 2092 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2093 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 2094 else if (gigabit != 0) { 2095 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 2096 txcfg |= GEM_MAC_TX_CARR_EXTEND; 2097 } 2098 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 0); 2099 bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4, 2100 BUS_SPACE_BARRIER_WRITE); 2101 if (!gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 2102 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 2103 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, txcfg); 2104 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0); 2105 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 2106 BUS_SPACE_BARRIER_WRITE); 2107 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 2108 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 2109 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, rxcfg); 2110 2111 v = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG) & 2112 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 2113 #ifdef notyet 2114 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2115 v |= GEM_MAC_CC_RX_PAUSE; 2116 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2117 v |= GEM_MAC_CC_TX_PAUSE; 2118 #endif 2119 bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG, v); 2120 2121 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 2122 gigabit != 0) 2123 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME, 2124 GEM_MAC_SLOT_TIME_CARR_EXTEND); 2125 else 2126 bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME, 2127 GEM_MAC_SLOT_TIME_NORMAL); 2128 2129 /* XIF Configuration */ 2130 v = GEM_MAC_XIF_LINK_LED; 2131 v |= GEM_MAC_XIF_TX_MII_ENA; 2132 if ((sc->sc_flags & GEM_SERDES) == 0) { 2133 if ((bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG) & 2134 GEM_MIF_CONFIG_PHY_SEL) != 0 && 2135 (IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) 2136 /* External MII needs echo disable if half duplex. */ 2137 v |= GEM_MAC_XIF_ECHO_DISABL; 2138 else 2139 /* 2140 * Internal MII needs buffer enable. 2141 * XXX buffer enable makes only sense for an 2142 * external PHY. 2143 */ 2144 v |= GEM_MAC_XIF_MII_BUF_ENA; 2145 } 2146 if (gigabit != 0) 2147 v |= GEM_MAC_XIF_GMII_MODE; 2148 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2149 v |= GEM_MAC_XIF_FDPLX_LED; 2150 bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v); 2151 2152 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2153 (sc->sc_flags & GEM_LINK) != 0) { 2154 bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 2155 txcfg | GEM_MAC_TX_ENABLE); 2156 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 2157 rxcfg | GEM_MAC_RX_ENABLE); 2158 } 2159 } 2160 2161 int 2162 gem_mediachange(ifp) 2163 struct ifnet *ifp; 2164 { 2165 struct gem_softc *sc = ifp->if_softc; 2166 int error; 2167 2168 /* XXX Add support for serial media. */ 2169 2170 GEM_LOCK(sc); 2171 error = mii_mediachg(sc->sc_mii); 2172 GEM_UNLOCK(sc); 2173 return (error); 2174 } 2175 2176 void 2177 gem_mediastatus(ifp, ifmr) 2178 struct ifnet *ifp; 2179 struct ifmediareq *ifmr; 2180 { 2181 struct gem_softc *sc = ifp->if_softc; 2182 2183 GEM_LOCK(sc); 2184 if ((ifp->if_flags & IFF_UP) == 0) { 2185 GEM_UNLOCK(sc); 2186 return; 2187 } 2188 2189 mii_pollstat(sc->sc_mii); 2190 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2191 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2192 GEM_UNLOCK(sc); 2193 } 2194 2195 /* 2196 * Process an ioctl request. 2197 */ 2198 static int 2199 gem_ioctl(ifp, cmd, data) 2200 struct ifnet *ifp; 2201 u_long cmd; 2202 caddr_t data; 2203 { 2204 struct gem_softc *sc = ifp->if_softc; 2205 struct ifreq *ifr = (struct ifreq *)data; 2206 int error = 0; 2207 2208 switch (cmd) { 2209 case SIOCSIFFLAGS: 2210 GEM_LOCK(sc); 2211 if (ifp->if_flags & IFF_UP) { 2212 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2213 ((ifp->if_flags ^ sc->sc_ifflags) & 2214 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2215 gem_setladrf(sc); 2216 else 2217 gem_init_locked(sc); 2218 } else { 2219 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2220 gem_stop(ifp, 0); 2221 } 2222 if ((ifp->if_flags & IFF_LINK0) != 0) 2223 sc->sc_csum_features |= CSUM_UDP; 2224 else 2225 sc->sc_csum_features &= ~CSUM_UDP; 2226 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2227 ifp->if_hwassist = sc->sc_csum_features; 2228 sc->sc_ifflags = ifp->if_flags; 2229 GEM_UNLOCK(sc); 2230 break; 2231 case SIOCADDMULTI: 2232 case SIOCDELMULTI: 2233 GEM_LOCK(sc); 2234 gem_setladrf(sc); 2235 GEM_UNLOCK(sc); 2236 break; 2237 case SIOCGIFMEDIA: 2238 case SIOCSIFMEDIA: 2239 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2240 break; 2241 case SIOCSIFCAP: 2242 GEM_LOCK(sc); 2243 ifp->if_capenable = ifr->ifr_reqcap; 2244 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2245 ifp->if_hwassist = sc->sc_csum_features; 2246 else 2247 ifp->if_hwassist = 0; 2248 GEM_UNLOCK(sc); 2249 break; 2250 default: 2251 error = ether_ioctl(ifp, cmd, data); 2252 break; 2253 } 2254 2255 return (error); 2256 } 2257 2258 /* 2259 * Set up the logical address filter. 2260 */ 2261 static void 2262 gem_setladrf(sc) 2263 struct gem_softc *sc; 2264 { 2265 struct ifnet *ifp = sc->sc_ifp; 2266 struct ifmultiaddr *inm; 2267 u_int32_t crc; 2268 u_int32_t hash[16]; 2269 u_int32_t v; 2270 int i; 2271 2272 GEM_LOCK_ASSERT(sc, MA_OWNED); 2273 2274 /* Get current RX configuration */ 2275 v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG); 2276 2277 /* 2278 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2279 * and hash filter. Depending on the case, the right bit will be 2280 * enabled. 2281 */ 2282 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 2283 GEM_MAC_RX_PROMISC_GRP); 2284 2285 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 2286 bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4, 2287 BUS_SPACE_BARRIER_WRITE); 2288 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 0)) 2289 device_printf(sc->sc_dev, "cannot disable RX hash filter\n"); 2290 2291 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2292 v |= GEM_MAC_RX_PROMISCUOUS; 2293 goto chipit; 2294 } 2295 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2296 v |= GEM_MAC_RX_PROMISC_GRP; 2297 goto chipit; 2298 } 2299 2300 /* 2301 * Set up multicast address filter by passing all multicast addresses 2302 * through a crc generator, and then using the high order 8 bits as an 2303 * index into the 256 bit logical address filter. The high order 4 2304 * bits selects the word, while the other 4 bits select the bit within 2305 * the word (where bit 0 is the MSB). 2306 */ 2307 2308 /* Clear hash table */ 2309 memset(hash, 0, sizeof(hash)); 2310 2311 IF_ADDR_LOCK(ifp); 2312 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 2313 if (inm->ifma_addr->sa_family != AF_LINK) 2314 continue; 2315 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2316 inm->ifma_addr), ETHER_ADDR_LEN); 2317 2318 /* Just want the 8 most significant bits. */ 2319 crc >>= 24; 2320 2321 /* Set the corresponding bit in the filter. */ 2322 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2323 } 2324 IF_ADDR_UNLOCK(ifp); 2325 2326 v |= GEM_MAC_RX_HASH_FILTER; 2327 2328 /* Now load the hash table into the chip (if we are using it) */ 2329 for (i = 0; i < 16; i++) { 2330 bus_write_4(sc->sc_res[0], 2331 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 2332 hash[i]); 2333 } 2334 2335 chipit: 2336 bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v); 2337 } 2338