1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2001 Eduardo Horvath. 5 * Copyright (c) 2001-2003 Thomas Moestl 6 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 31 */ 32 33 #include <sys/cdefs.h> 34 /* 35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 36 */ 37 38 #if 0 39 #define GEM_DEBUG 40 #endif 41 42 #if 0 /* XXX: In case of emergency, re-enable this. */ 43 #define GEM_RINT_TIMEOUT 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/endian.h> 51 #include <sys/mbuf.h> 52 #include <sys/malloc.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/socket.h> 58 #include <sys/sockio.h> 59 #include <sys/rman.h> 60 61 #include <net/bpf.h> 62 #include <net/ethernet.h> 63 #include <net/if.h> 64 #include <net/if_var.h> 65 #include <net/if_arp.h> 66 #include <net/if_dl.h> 67 #include <net/if_media.h> 68 #include <net/if_types.h> 69 #include <net/if_vlan_var.h> 70 71 #include <netinet/in.h> 72 #include <netinet/in_systm.h> 73 #include <netinet/ip.h> 74 #include <netinet/tcp.h> 75 #include <netinet/udp.h> 76 77 #include <machine/bus.h> 78 79 #include <dev/mii/mii.h> 80 #include <dev/mii/miivar.h> 81 82 #include <dev/gem/if_gemreg.h> 83 #include <dev/gem/if_gemvar.h> 84 85 CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 86 CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 87 88 #define GEM_TRIES 10000 89 90 /* 91 * The hardware supports basic TCP/UDP checksum offloading. However, 92 * the hardware doesn't compensate the checksum for UDP datagram which 93 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 94 * by default. It can be reactivated by setting special link option 95 * link0 with ifconfig(8). 96 */ 97 #define GEM_CSUM_FEATURES (CSUM_TCP) 98 99 static int gem_add_rxbuf(struct gem_softc *sc, int idx); 100 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, 101 uint32_t set); 102 static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, 103 int nsegs, int error); 104 static int gem_disable_rx(struct gem_softc *sc); 105 static int gem_disable_tx(struct gem_softc *sc); 106 static void gem_eint(struct gem_softc *sc, u_int status); 107 static void gem_init(void *xsc); 108 static void gem_init_locked(struct gem_softc *sc); 109 static void gem_init_regs(struct gem_softc *sc); 110 static int gem_ioctl(if_t ifp, u_long cmd, caddr_t data); 111 static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head); 112 static int gem_meminit(struct gem_softc *sc); 113 static void gem_mifinit(struct gem_softc *sc); 114 static void gem_reset(struct gem_softc *sc); 115 static int gem_reset_rx(struct gem_softc *sc); 116 static void gem_reset_rxdma(struct gem_softc *sc); 117 static int gem_reset_tx(struct gem_softc *sc); 118 static u_int gem_ringsize(u_int sz); 119 static void gem_rint(struct gem_softc *sc); 120 #ifdef GEM_RINT_TIMEOUT 121 static void gem_rint_timeout(void *arg); 122 #endif 123 static inline void gem_rxcksum(struct mbuf *m, uint64_t flags); 124 static void gem_rxdrain(struct gem_softc *sc); 125 static void gem_setladrf(struct gem_softc *sc); 126 static void gem_start(if_t ifp); 127 static void gem_start_locked(if_t ifp); 128 static void gem_stop(if_t ifp, int disable); 129 static void gem_tick(void *arg); 130 static void gem_tint(struct gem_softc *sc); 131 static inline void gem_txkick(struct gem_softc *sc); 132 static int gem_watchdog(struct gem_softc *sc); 133 134 DRIVER_MODULE(miibus, gem, miibus_driver, 0, 0); 135 MODULE_DEPEND(gem, miibus, 1, 1, 1); 136 137 #ifdef GEM_DEBUG 138 #include <sys/ktr.h> 139 #define KTR_GEM KTR_SPARE2 140 #endif 141 142 int 143 gem_attach(struct gem_softc *sc) 144 { 145 struct gem_txsoft *txs; 146 if_t ifp; 147 int error, i, phy; 148 uint32_t v; 149 150 if (bootverbose) 151 device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags); 152 153 /* Set up ifnet structure. */ 154 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 155 sc->sc_csum_features = GEM_CSUM_FEATURES; 156 if_setsoftc(ifp, sc); 157 if_initname(ifp, device_get_name(sc->sc_dev), 158 device_get_unit(sc->sc_dev)); 159 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 160 if_setstartfn(ifp, gem_start); 161 if_setioctlfn(ifp, gem_ioctl); 162 if_setinitfn(ifp, gem_init); 163 if_setsendqlen(ifp, GEM_TXQUEUELEN); 164 if_setsendqready(ifp); 165 166 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 167 #ifdef GEM_RINT_TIMEOUT 168 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 169 #endif 170 171 /* Make sure the chip is stopped. */ 172 gem_reset(sc); 173 174 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 175 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 176 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, 177 NULL, &sc->sc_pdmatag); 178 if (error != 0) 179 goto fail_ifnet; 180 181 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 182 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 183 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 184 if (error != 0) 185 goto fail_ptag; 186 187 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 188 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 189 MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 190 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 191 if (error != 0) 192 goto fail_rtag; 193 194 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 195 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 196 sizeof(struct gem_control_data), 1, 197 sizeof(struct gem_control_data), 0, 198 NULL, NULL, &sc->sc_cdmatag); 199 if (error != 0) 200 goto fail_ttag; 201 202 /* 203 * Allocate the control data structures, create and load the 204 * DMA map for it. 205 */ 206 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 207 (void **)&sc->sc_control_data, 208 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 209 &sc->sc_cddmamap)) != 0) { 210 device_printf(sc->sc_dev, 211 "unable to allocate control data, error = %d\n", error); 212 goto fail_ctag; 213 } 214 215 sc->sc_cddma = 0; 216 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 217 sc->sc_control_data, sizeof(struct gem_control_data), 218 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 219 device_printf(sc->sc_dev, 220 "unable to load control data DMA map, error = %d\n", 221 error); 222 goto fail_cmem; 223 } 224 225 /* 226 * Initialize the transmit job descriptors. 227 */ 228 STAILQ_INIT(&sc->sc_txfreeq); 229 STAILQ_INIT(&sc->sc_txdirtyq); 230 231 /* 232 * Create the transmit buffer DMA maps. 233 */ 234 error = ENOMEM; 235 for (i = 0; i < GEM_TXQUEUELEN; i++) { 236 txs = &sc->sc_txsoft[i]; 237 txs->txs_mbuf = NULL; 238 txs->txs_ndescs = 0; 239 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 240 &txs->txs_dmamap)) != 0) { 241 device_printf(sc->sc_dev, 242 "unable to create TX DMA map %d, error = %d\n", 243 i, error); 244 goto fail_txd; 245 } 246 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 247 } 248 249 /* 250 * Create the receive buffer DMA maps. 251 */ 252 for (i = 0; i < GEM_NRXDESC; i++) { 253 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 254 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 255 device_printf(sc->sc_dev, 256 "unable to create RX DMA map %d, error = %d\n", 257 i, error); 258 goto fail_rxd; 259 } 260 sc->sc_rxsoft[i].rxs_mbuf = NULL; 261 } 262 263 /* Bypass probing PHYs if we already know for sure to use a SERDES. */ 264 if ((sc->sc_flags & GEM_SERDES) != 0) 265 goto serdes; 266 267 GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII); 268 GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4, 269 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 270 271 gem_mifinit(sc); 272 273 /* 274 * Look for an external PHY. 275 */ 276 error = ENXIO; 277 v = GEM_READ_4(sc, GEM_MIF_CONFIG); 278 if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 279 v |= GEM_MIF_CONFIG_PHY_SEL; 280 GEM_WRITE_4(sc, GEM_MIF_CONFIG, v); 281 GEM_BARRIER(sc, GEM_MIF_CONFIG, 4, 282 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 283 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 284 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, 285 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); 286 } 287 288 /* 289 * Fall back on an internal PHY if no external PHY was found. 290 * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be 291 * trusted when the firmware has powered down the chip. 292 */ 293 if (error != 0 && 294 ((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) { 295 v &= ~GEM_MIF_CONFIG_PHY_SEL; 296 GEM_WRITE_4(sc, GEM_MIF_CONFIG, v); 297 GEM_BARRIER(sc, GEM_MIF_CONFIG, 4, 298 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 299 switch (sc->sc_variant) { 300 case GEM_APPLE_K2_GMAC: 301 phy = GEM_PHYAD_INTERNAL; 302 break; 303 case GEM_APPLE_GMAC: 304 phy = GEM_PHYAD_EXTERNAL; 305 break; 306 default: 307 phy = MII_PHY_ANY; 308 break; 309 } 310 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 311 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy, 312 MII_OFFSET_ANY, MIIF_DOPAUSE); 313 } 314 315 /* 316 * Try the external PCS SERDES if we didn't find any PHYs. 317 */ 318 if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 319 serdes: 320 GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE, 321 GEM_MII_DATAPATH_SERDES); 322 GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4, 323 BUS_SPACE_BARRIER_WRITE); 324 GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 325 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 326 GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4, 327 BUS_SPACE_BARRIER_WRITE); 328 GEM_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 329 GEM_BARRIER(sc, GEM_MII_CONFIG, 4, 330 BUS_SPACE_BARRIER_WRITE); 331 sc->sc_flags |= GEM_SERDES; 332 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 333 gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, 334 GEM_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE); 335 } 336 if (error != 0) { 337 device_printf(sc->sc_dev, "attaching PHYs failed\n"); 338 goto fail_rxd; 339 } 340 sc->sc_mii = device_get_softc(sc->sc_miibus); 341 342 /* 343 * From this point forward, the attachment cannot fail. A failure 344 * before this point releases all resources that may have been 345 * allocated. 346 */ 347 348 /* Get RX FIFO size. */ 349 sc->sc_rxfifosize = 64 * 350 GEM_READ_4(sc, GEM_RX_FIFO_SIZE); 351 352 /* Get TX FIFO size. */ 353 v = GEM_READ_4(sc, GEM_TX_FIFO_SIZE); 354 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 355 sc->sc_rxfifosize / 1024, v / 16); 356 357 /* Attach the interface. */ 358 ether_ifattach(ifp, sc->sc_enaddr); 359 360 /* 361 * Tell the upper layer(s) we support long frames/checksum offloads. 362 */ 363 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 364 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0); 365 if_sethwassistbits(ifp, sc->sc_csum_features, 0); 366 if_setcapenablebit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0); 367 368 return (0); 369 370 /* 371 * Free any resources we've allocated during the failed attach 372 * attempt. Do this in reverse order and fall through. 373 */ 374 fail_rxd: 375 for (i = 0; i < GEM_NRXDESC; i++) 376 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 377 bus_dmamap_destroy(sc->sc_rdmatag, 378 sc->sc_rxsoft[i].rxs_dmamap); 379 fail_txd: 380 for (i = 0; i < GEM_TXQUEUELEN; i++) 381 if (sc->sc_txsoft[i].txs_dmamap != NULL) 382 bus_dmamap_destroy(sc->sc_tdmatag, 383 sc->sc_txsoft[i].txs_dmamap); 384 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 385 fail_cmem: 386 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 387 sc->sc_cddmamap); 388 fail_ctag: 389 bus_dma_tag_destroy(sc->sc_cdmatag); 390 fail_ttag: 391 bus_dma_tag_destroy(sc->sc_tdmatag); 392 fail_rtag: 393 bus_dma_tag_destroy(sc->sc_rdmatag); 394 fail_ptag: 395 bus_dma_tag_destroy(sc->sc_pdmatag); 396 fail_ifnet: 397 if_free(ifp); 398 return (error); 399 } 400 401 void 402 gem_detach(struct gem_softc *sc) 403 { 404 if_t ifp = sc->sc_ifp; 405 int i; 406 407 ether_ifdetach(ifp); 408 GEM_LOCK(sc); 409 gem_stop(ifp, 1); 410 GEM_UNLOCK(sc); 411 callout_drain(&sc->sc_tick_ch); 412 #ifdef GEM_RINT_TIMEOUT 413 callout_drain(&sc->sc_rx_ch); 414 #endif 415 if_free(ifp); 416 device_delete_child(sc->sc_dev, sc->sc_miibus); 417 418 for (i = 0; i < GEM_NRXDESC; i++) 419 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 420 bus_dmamap_destroy(sc->sc_rdmatag, 421 sc->sc_rxsoft[i].rxs_dmamap); 422 for (i = 0; i < GEM_TXQUEUELEN; i++) 423 if (sc->sc_txsoft[i].txs_dmamap != NULL) 424 bus_dmamap_destroy(sc->sc_tdmatag, 425 sc->sc_txsoft[i].txs_dmamap); 426 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 427 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 428 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 429 sc->sc_cddmamap); 430 bus_dma_tag_destroy(sc->sc_cdmatag); 431 bus_dma_tag_destroy(sc->sc_tdmatag); 432 bus_dma_tag_destroy(sc->sc_rdmatag); 433 bus_dma_tag_destroy(sc->sc_pdmatag); 434 } 435 436 void 437 gem_suspend(struct gem_softc *sc) 438 { 439 if_t ifp = sc->sc_ifp; 440 441 GEM_LOCK(sc); 442 gem_stop(ifp, 0); 443 GEM_UNLOCK(sc); 444 } 445 446 void 447 gem_resume(struct gem_softc *sc) 448 { 449 if_t ifp = sc->sc_ifp; 450 451 GEM_LOCK(sc); 452 /* 453 * On resume all registers have to be initialized again like 454 * after power-on. 455 */ 456 sc->sc_flags &= ~GEM_INITED; 457 if (if_getflags(ifp) & IFF_UP) 458 gem_init_locked(sc); 459 GEM_UNLOCK(sc); 460 } 461 462 static inline void 463 gem_rxcksum(struct mbuf *m, uint64_t flags) 464 { 465 struct ether_header *eh; 466 struct ip *ip; 467 struct udphdr *uh; 468 uint16_t *opts; 469 int32_t hlen, len, pktlen; 470 uint32_t temp32; 471 uint16_t cksum; 472 473 pktlen = m->m_pkthdr.len; 474 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 475 return; 476 eh = mtod(m, struct ether_header *); 477 if (eh->ether_type != htons(ETHERTYPE_IP)) 478 return; 479 ip = (struct ip *)(eh + 1); 480 if (ip->ip_v != IPVERSION) 481 return; 482 483 hlen = ip->ip_hl << 2; 484 pktlen -= sizeof(struct ether_header); 485 if (hlen < sizeof(struct ip)) 486 return; 487 if (ntohs(ip->ip_len) < hlen) 488 return; 489 if (ntohs(ip->ip_len) != pktlen) 490 return; 491 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 492 return; /* Cannot handle fragmented packet. */ 493 494 switch (ip->ip_p) { 495 case IPPROTO_TCP: 496 if (pktlen < (hlen + sizeof(struct tcphdr))) 497 return; 498 break; 499 case IPPROTO_UDP: 500 if (pktlen < (hlen + sizeof(struct udphdr))) 501 return; 502 uh = (struct udphdr *)((uint8_t *)ip + hlen); 503 if (uh->uh_sum == 0) 504 return; /* no checksum */ 505 break; 506 default: 507 return; 508 } 509 510 cksum = ~(flags & GEM_RD_CHECKSUM); 511 /* checksum fixup for IP options */ 512 len = hlen - sizeof(struct ip); 513 if (len > 0) { 514 opts = (uint16_t *)(ip + 1); 515 for (; len > 0; len -= sizeof(uint16_t), opts++) { 516 temp32 = cksum - *opts; 517 temp32 = (temp32 >> 16) + (temp32 & 65535); 518 cksum = temp32 & 65535; 519 } 520 } 521 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 522 m->m_pkthdr.csum_data = cksum; 523 } 524 525 static void 526 gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 527 { 528 struct gem_softc *sc = xsc; 529 530 if (error != 0) 531 return; 532 if (nsegs != 1) 533 panic("%s: bad control buffer segment count", __func__); 534 sc->sc_cddma = segs[0].ds_addr; 535 } 536 537 static void 538 gem_tick(void *arg) 539 { 540 struct gem_softc *sc = arg; 541 if_t ifp = sc->sc_ifp; 542 uint32_t v; 543 544 GEM_LOCK_ASSERT(sc, MA_OWNED); 545 546 /* 547 * Unload collision and error counters. 548 */ 549 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 550 GEM_READ_4(sc, GEM_MAC_NORM_COLL_CNT) + 551 GEM_READ_4(sc, GEM_MAC_FIRST_COLL_CNT)); 552 v = GEM_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) + 553 GEM_READ_4(sc, GEM_MAC_LATE_COLL_CNT); 554 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v); 555 if_inc_counter(ifp, IFCOUNTER_OERRORS, v); 556 if_inc_counter(ifp, IFCOUNTER_IERRORS, 557 GEM_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) + 558 GEM_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) + 559 GEM_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) + 560 GEM_READ_4(sc, GEM_MAC_RX_CODE_VIOL)); 561 562 /* 563 * Then clear the hardware counters. 564 */ 565 GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 566 GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 567 GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 568 GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 569 GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 570 GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 571 GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 572 GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 573 574 mii_tick(sc->sc_mii); 575 576 if (gem_watchdog(sc) == EJUSTRETURN) 577 return; 578 579 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 580 } 581 582 static int 583 gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) 584 { 585 int i; 586 uint32_t reg; 587 588 for (i = GEM_TRIES; i--; DELAY(100)) { 589 reg = GEM_READ_4(sc, r); 590 if ((reg & clr) == 0 && (reg & set) == set) 591 return (1); 592 } 593 return (0); 594 } 595 596 static void 597 gem_reset(struct gem_softc *sc) 598 { 599 600 #ifdef GEM_DEBUG 601 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 602 #endif 603 gem_reset_rx(sc); 604 gem_reset_tx(sc); 605 606 /* Do a full reset. */ 607 GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 608 GEM_BARRIER(sc, GEM_RESET, 4, 609 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 610 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 611 device_printf(sc->sc_dev, "cannot reset device\n"); 612 } 613 614 static void 615 gem_rxdrain(struct gem_softc *sc) 616 { 617 struct gem_rxsoft *rxs; 618 int i; 619 620 for (i = 0; i < GEM_NRXDESC; i++) { 621 rxs = &sc->sc_rxsoft[i]; 622 if (rxs->rxs_mbuf != NULL) { 623 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 624 BUS_DMASYNC_POSTREAD); 625 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 626 m_freem(rxs->rxs_mbuf); 627 rxs->rxs_mbuf = NULL; 628 } 629 } 630 } 631 632 static void 633 gem_stop(if_t ifp, int disable) 634 { 635 struct gem_softc *sc = if_getsoftc(ifp); 636 struct gem_txsoft *txs; 637 638 #ifdef GEM_DEBUG 639 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 640 #endif 641 642 callout_stop(&sc->sc_tick_ch); 643 #ifdef GEM_RINT_TIMEOUT 644 callout_stop(&sc->sc_rx_ch); 645 #endif 646 647 gem_reset_tx(sc); 648 gem_reset_rx(sc); 649 650 /* 651 * Release any queued transmit buffers. 652 */ 653 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 654 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 655 if (txs->txs_ndescs != 0) { 656 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 657 BUS_DMASYNC_POSTWRITE); 658 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 659 if (txs->txs_mbuf != NULL) { 660 m_freem(txs->txs_mbuf); 661 txs->txs_mbuf = NULL; 662 } 663 } 664 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 665 } 666 667 if (disable) 668 gem_rxdrain(sc); 669 670 /* 671 * Mark the interface down and cancel the watchdog timer. 672 */ 673 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 674 sc->sc_flags &= ~GEM_LINK; 675 sc->sc_wdog_timer = 0; 676 } 677 678 static int 679 gem_reset_rx(struct gem_softc *sc) 680 { 681 682 /* 683 * Resetting while DMA is in progress can cause a bus hang, so we 684 * disable DMA first. 685 */ 686 (void)gem_disable_rx(sc); 687 GEM_WRITE_4(sc, GEM_RX_CONFIG, 0); 688 GEM_BARRIER(sc, GEM_RX_CONFIG, 4, 689 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 690 if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 691 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 692 693 /* Wait 5ms extra. */ 694 DELAY(5000); 695 696 /* Reset the ERX. */ 697 GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX); 698 GEM_BARRIER(sc, GEM_RESET, 4, 699 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 700 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX, 0)) { 701 device_printf(sc->sc_dev, "cannot reset receiver\n"); 702 return (1); 703 } 704 705 /* Finally, reset RX MAC. */ 706 GEM_WRITE_4(sc, GEM_MAC_RXRESET, 1); 707 GEM_BARRIER(sc, GEM_MAC_RXRESET, 4, 708 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 709 if (!gem_bitwait(sc, GEM_MAC_RXRESET, 1, 0)) { 710 device_printf(sc->sc_dev, "cannot reset RX MAC\n"); 711 return (1); 712 } 713 714 return (0); 715 } 716 717 /* 718 * Reset the receiver DMA engine. 719 * 720 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 721 * etc in order to reset the receiver DMA engine only and not do a full 722 * reset which amongst others also downs the link and clears the FIFOs. 723 */ 724 static void 725 gem_reset_rxdma(struct gem_softc *sc) 726 { 727 int i; 728 729 if (gem_reset_rx(sc) != 0) { 730 if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING); 731 return (gem_init_locked(sc)); 732 } 733 for (i = 0; i < GEM_NRXDESC; i++) 734 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 735 GEM_UPDATE_RXDESC(sc, i); 736 sc->sc_rxptr = 0; 737 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 738 739 /* NOTE: we use only 32-bit DMA addresses here. */ 740 GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 741 GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 742 GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 743 GEM_WRITE_4(sc, GEM_RX_CONFIG, 744 gem_ringsize(GEM_NRXDESC /* XXX */) | 745 ((ETHER_HDR_LEN + sizeof(struct ip)) << 746 GEM_RX_CONFIG_CXM_START_SHFT) | 747 (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 748 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT)); 749 GEM_WRITE_4(sc, GEM_RX_BLANKING, 750 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) << 751 GEM_RX_BLANKING_TIME_SHIFT) | 6); 752 GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 753 (3 * sc->sc_rxfifosize / 256) | 754 ((sc->sc_rxfifosize / 256) << 12)); 755 GEM_WRITE_4(sc, GEM_RX_CONFIG, 756 GEM_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN); 757 GEM_WRITE_4(sc, GEM_MAC_RX_MASK, 758 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 759 /* 760 * Clear the RX filter and reprogram it. This will also set the 761 * current RX MAC configuration and enable it. 762 */ 763 gem_setladrf(sc); 764 } 765 766 static int 767 gem_reset_tx(struct gem_softc *sc) 768 { 769 770 /* 771 * Resetting while DMA is in progress can cause a bus hang, so we 772 * disable DMA first. 773 */ 774 (void)gem_disable_tx(sc); 775 GEM_WRITE_4(sc, GEM_TX_CONFIG, 0); 776 GEM_BARRIER(sc, GEM_TX_CONFIG, 4, 777 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 778 if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 779 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 780 781 /* Wait 5ms extra. */ 782 DELAY(5000); 783 784 /* Finally, reset the ETX. */ 785 GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_TX); 786 GEM_BARRIER(sc, GEM_RESET, 4, 787 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 788 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 789 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 790 return (1); 791 } 792 return (0); 793 } 794 795 static int 796 gem_disable_rx(struct gem_softc *sc) 797 { 798 799 GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, 800 GEM_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE); 801 GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 802 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 803 if (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 804 return (1); 805 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 806 return (0); 807 } 808 809 static int 810 gem_disable_tx(struct gem_softc *sc) 811 { 812 813 GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, 814 GEM_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE); 815 GEM_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 816 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 817 if (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 818 return (1); 819 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 820 return (0); 821 } 822 823 static int 824 gem_meminit(struct gem_softc *sc) 825 { 826 struct gem_rxsoft *rxs; 827 int error, i; 828 829 GEM_LOCK_ASSERT(sc, MA_OWNED); 830 831 /* 832 * Initialize the transmit descriptor ring. 833 */ 834 for (i = 0; i < GEM_NTXDESC; i++) { 835 sc->sc_txdescs[i].gd_flags = 0; 836 sc->sc_txdescs[i].gd_addr = 0; 837 } 838 sc->sc_txfree = GEM_MAXTXFREE; 839 sc->sc_txnext = 0; 840 sc->sc_txwin = 0; 841 842 /* 843 * Initialize the receive descriptor and receive job 844 * descriptor rings. 845 */ 846 for (i = 0; i < GEM_NRXDESC; i++) { 847 rxs = &sc->sc_rxsoft[i]; 848 if (rxs->rxs_mbuf == NULL) { 849 if ((error = gem_add_rxbuf(sc, i)) != 0) { 850 device_printf(sc->sc_dev, 851 "unable to allocate or map RX buffer %d, " 852 "error = %d\n", i, error); 853 /* 854 * XXX we should attempt to run with fewer 855 * receive buffers instead of just failing. 856 */ 857 gem_rxdrain(sc); 858 return (1); 859 } 860 } else 861 GEM_INIT_RXDESC(sc, i); 862 } 863 sc->sc_rxptr = 0; 864 865 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 866 867 return (0); 868 } 869 870 static u_int 871 gem_ringsize(u_int sz) 872 { 873 874 switch (sz) { 875 case 32: 876 return (GEM_RING_SZ_32); 877 case 64: 878 return (GEM_RING_SZ_64); 879 case 128: 880 return (GEM_RING_SZ_128); 881 case 256: 882 return (GEM_RING_SZ_256); 883 case 512: 884 return (GEM_RING_SZ_512); 885 case 1024: 886 return (GEM_RING_SZ_1024); 887 case 2048: 888 return (GEM_RING_SZ_2048); 889 case 4096: 890 return (GEM_RING_SZ_4096); 891 case 8192: 892 return (GEM_RING_SZ_8192); 893 default: 894 printf("%s: invalid ring size %d\n", __func__, sz); 895 return (GEM_RING_SZ_32); 896 } 897 } 898 899 static void 900 gem_init(void *xsc) 901 { 902 struct gem_softc *sc = xsc; 903 904 GEM_LOCK(sc); 905 gem_init_locked(sc); 906 GEM_UNLOCK(sc); 907 } 908 909 /* 910 * Initialization of interface; set up initialization block 911 * and transmit/receive descriptor rings. 912 */ 913 static void 914 gem_init_locked(struct gem_softc *sc) 915 { 916 if_t ifp = sc->sc_ifp; 917 uint32_t v; 918 919 GEM_LOCK_ASSERT(sc, MA_OWNED); 920 921 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 922 return; 923 924 #ifdef GEM_DEBUG 925 CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 926 __func__); 927 #endif 928 /* 929 * Initialization sequence. The numbered steps below correspond 930 * to the sequence outlined in section 6.3.5.1 in the Ethernet 931 * Channel Engine manual (part of the PCIO manual). 932 * See also the STP2002-STQ document from Sun Microsystems. 933 */ 934 935 /* step 1 & 2. Reset the Ethernet Channel. */ 936 gem_stop(ifp, 0); 937 gem_reset(sc); 938 #ifdef GEM_DEBUG 939 CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 940 __func__); 941 #endif 942 943 if ((sc->sc_flags & GEM_SERDES) == 0) 944 /* Re-initialize the MIF. */ 945 gem_mifinit(sc); 946 947 /* step 3. Setup data structures in host memory. */ 948 if (gem_meminit(sc) != 0) 949 return; 950 951 /* step 4. TX MAC registers & counters */ 952 gem_init_regs(sc); 953 954 /* step 5. RX MAC registers & counters */ 955 956 /* step 6 & 7. Program Descriptor Ring Base Addresses. */ 957 /* NOTE: we use only 32-bit DMA addresses here. */ 958 GEM_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0); 959 GEM_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 960 961 GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 962 GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 963 #ifdef GEM_DEBUG 964 CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx", 965 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 966 #endif 967 968 /* step 8. Global Configuration & Interrupt Mask */ 969 970 /* 971 * Set the internal arbitration to "infinite" bursts of the 972 * maximum length of 31 * 64 bytes so DMA transfers aren't 973 * split up in cache line size chunks. This greatly improves 974 * RX performance. 975 * Enable silicon bug workarounds for the Apple variants. 976 */ 977 GEM_WRITE_4(sc, GEM_CONFIG, 978 GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 979 GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ? 980 GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 981 982 GEM_WRITE_4(sc, GEM_INTMASK, 983 ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 984 GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 985 GEM_INTR_BERR 986 #ifdef GEM_DEBUG 987 | GEM_INTR_PCS | GEM_INTR_MIF 988 #endif 989 )); 990 GEM_WRITE_4(sc, GEM_MAC_RX_MASK, 991 GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 992 GEM_WRITE_4(sc, GEM_MAC_TX_MASK, 993 GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP | 994 GEM_MAC_TX_PEAK_EXP); 995 #ifdef GEM_DEBUG 996 GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 997 ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 998 #else 999 GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 1000 GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 1001 #endif 1002 1003 /* step 9. ETX Configuration: use mostly default values. */ 1004 1005 /* Enable DMA. */ 1006 v = gem_ringsize(GEM_NTXDESC); 1007 /* Set TX FIFO threshold and enable DMA. */ 1008 v |= (0x4ff << 10) & GEM_TX_CONFIG_TXFIFO_TH; 1009 GEM_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN); 1010 1011 /* step 10. ERX Configuration */ 1012 1013 /* Encode Receive Descriptor ring size. */ 1014 v = gem_ringsize(GEM_NRXDESC /* XXX */); 1015 /* RX TCP/UDP checksum offset */ 1016 v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 1017 GEM_RX_CONFIG_CXM_START_SHFT); 1018 /* Set RX FIFO threshold, set first byte offset and enable DMA. */ 1019 GEM_WRITE_4(sc, GEM_RX_CONFIG, 1020 v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 1021 (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) | 1022 GEM_RX_CONFIG_RXDMA_EN); 1023 1024 GEM_WRITE_4(sc, GEM_RX_BLANKING, 1025 ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) << 1026 GEM_RX_BLANKING_TIME_SHIFT) | 6); 1027 1028 /* 1029 * The following value is for an OFF Threshold of about 3/4 full 1030 * and an ON Threshold of 1/4 full. 1031 */ 1032 GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 1033 (3 * sc->sc_rxfifosize / 256) | 1034 ((sc->sc_rxfifosize / 256) << 12)); 1035 1036 /* step 11. Configure Media. */ 1037 1038 /* step 12. RX_MAC Configuration Register */ 1039 v = GEM_READ_4(sc, GEM_MAC_RX_CONFIG); 1040 v &= ~GEM_MAC_RX_ENABLE; 1041 v |= GEM_MAC_RX_STRIP_CRC; 1042 sc->sc_mac_rxcfg = v; 1043 /* 1044 * Clear the RX filter and reprogram it. This will also set the 1045 * current RX MAC configuration and enable it. 1046 */ 1047 gem_setladrf(sc); 1048 1049 /* step 13. TX_MAC Configuration Register */ 1050 v = GEM_READ_4(sc, GEM_MAC_TX_CONFIG); 1051 v |= GEM_MAC_TX_ENABLE; 1052 (void)gem_disable_tx(sc); 1053 GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, v); 1054 1055 /* step 14. Issue Transmit Pending command. */ 1056 1057 /* step 15. Give the receiver a swift kick. */ 1058 GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 1059 1060 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1061 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1062 1063 mii_mediachg(sc->sc_mii); 1064 1065 /* Start the one second timer. */ 1066 sc->sc_wdog_timer = 0; 1067 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 1068 } 1069 1070 static int 1071 gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head) 1072 { 1073 bus_dma_segment_t txsegs[GEM_NTXSEGS]; 1074 struct gem_txsoft *txs; 1075 struct ip *ip; 1076 struct mbuf *m; 1077 uint64_t cflags, flags; 1078 int error, nexttx, nsegs, offset, seg; 1079 1080 GEM_LOCK_ASSERT(sc, MA_OWNED); 1081 1082 /* Get a work queue entry. */ 1083 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1084 /* Ran out of descriptors. */ 1085 return (ENOBUFS); 1086 } 1087 1088 cflags = 0; 1089 if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) { 1090 if (M_WRITABLE(*m_head) == 0) { 1091 m = m_dup(*m_head, M_NOWAIT); 1092 m_freem(*m_head); 1093 *m_head = m; 1094 if (m == NULL) 1095 return (ENOBUFS); 1096 } 1097 offset = sizeof(struct ether_header); 1098 m = m_pullup(*m_head, offset + sizeof(struct ip)); 1099 if (m == NULL) { 1100 *m_head = NULL; 1101 return (ENOBUFS); 1102 } 1103 ip = (struct ip *)(mtod(m, caddr_t) + offset); 1104 offset += (ip->ip_hl << 2); 1105 cflags = offset << GEM_TD_CXSUM_STARTSHFT | 1106 ((offset + m->m_pkthdr.csum_data) << 1107 GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE; 1108 *m_head = m; 1109 } 1110 1111 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1112 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1113 if (error == EFBIG) { 1114 m = m_collapse(*m_head, M_NOWAIT, GEM_NTXSEGS); 1115 if (m == NULL) { 1116 m_freem(*m_head); 1117 *m_head = NULL; 1118 return (ENOBUFS); 1119 } 1120 *m_head = m; 1121 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1122 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1123 BUS_DMA_NOWAIT); 1124 if (error != 0) { 1125 m_freem(*m_head); 1126 *m_head = NULL; 1127 return (error); 1128 } 1129 } else if (error != 0) 1130 return (error); 1131 /* If nsegs is wrong then the stack is corrupt. */ 1132 KASSERT(nsegs <= GEM_NTXSEGS, 1133 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1134 if (nsegs == 0) { 1135 m_freem(*m_head); 1136 *m_head = NULL; 1137 return (EIO); 1138 } 1139 1140 /* 1141 * Ensure we have enough descriptors free to describe 1142 * the packet. Note, we always reserve one descriptor 1143 * at the end of the ring as a termination point, in 1144 * order to prevent wrap-around. 1145 */ 1146 if (nsegs > sc->sc_txfree - 1) { 1147 txs->txs_ndescs = 0; 1148 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1149 return (ENOBUFS); 1150 } 1151 1152 txs->txs_ndescs = nsegs; 1153 txs->txs_firstdesc = sc->sc_txnext; 1154 nexttx = txs->txs_firstdesc; 1155 for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 1156 #ifdef GEM_DEBUG 1157 CTR6(KTR_GEM, 1158 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1159 __func__, seg, nexttx, txsegs[seg].ds_len, 1160 txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr)); 1161 #endif 1162 sc->sc_txdescs[nexttx].gd_addr = htole64(txsegs[seg].ds_addr); 1163 KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 1164 ("%s: segment size too large!", __func__)); 1165 flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1166 sc->sc_txdescs[nexttx].gd_flags = htole64(flags | cflags); 1167 txs->txs_lastdesc = nexttx; 1168 } 1169 1170 /* Set EOP on the last descriptor. */ 1171 #ifdef GEM_DEBUG 1172 CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d", 1173 __func__, seg, nexttx); 1174 #endif 1175 sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1176 htole64(GEM_TD_END_OF_PACKET); 1177 1178 /* Lastly set SOP on the first descriptor. */ 1179 #ifdef GEM_DEBUG 1180 CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d", 1181 __func__, seg, nexttx); 1182 #endif 1183 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1184 sc->sc_txwin = 0; 1185 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1186 htole64(GEM_TD_INTERRUPT_ME | GEM_TD_START_OF_PACKET); 1187 } else 1188 sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1189 htole64(GEM_TD_START_OF_PACKET); 1190 1191 /* Sync the DMA map. */ 1192 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1193 BUS_DMASYNC_PREWRITE); 1194 1195 #ifdef GEM_DEBUG 1196 CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1197 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1198 txs->txs_ndescs); 1199 #endif 1200 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1201 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1202 txs->txs_mbuf = *m_head; 1203 1204 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1205 sc->sc_txfree -= txs->txs_ndescs; 1206 1207 return (0); 1208 } 1209 1210 static void 1211 gem_init_regs(struct gem_softc *sc) 1212 { 1213 const u_char *laddr = if_getlladdr(sc->sc_ifp); 1214 1215 GEM_LOCK_ASSERT(sc, MA_OWNED); 1216 1217 /* These registers are not cleared on reset. */ 1218 if ((sc->sc_flags & GEM_INITED) == 0) { 1219 /* magic values */ 1220 GEM_WRITE_4(sc, GEM_MAC_IPG0, 0); 1221 GEM_WRITE_4(sc, GEM_MAC_IPG1, 8); 1222 GEM_WRITE_4(sc, GEM_MAC_IPG2, 4); 1223 1224 /* min frame length */ 1225 GEM_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1226 /* max frame length and max burst size */ 1227 GEM_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME, 1228 (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1229 1230 /* more magic values */ 1231 GEM_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7); 1232 GEM_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4); 1233 GEM_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1234 GEM_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8808); 1235 1236 /* random number seed */ 1237 GEM_WRITE_4(sc, GEM_MAC_RANDOM_SEED, 1238 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1239 1240 /* secondary MAC address: 0:0:0:0:0:0 */ 1241 GEM_WRITE_4(sc, GEM_MAC_ADDR3, 0); 1242 GEM_WRITE_4(sc, GEM_MAC_ADDR4, 0); 1243 GEM_WRITE_4(sc, GEM_MAC_ADDR5, 0); 1244 1245 /* MAC control address: 01:80:c2:00:00:01 */ 1246 GEM_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001); 1247 GEM_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200); 1248 GEM_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180); 1249 1250 /* MAC filter address: 0:0:0:0:0:0 */ 1251 GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0); 1252 GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0); 1253 GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0); 1254 GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0); 1255 GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0); 1256 1257 sc->sc_flags |= GEM_INITED; 1258 } 1259 1260 /* Counters need to be zeroed. */ 1261 GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 1262 GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 1263 GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 1264 GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 1265 GEM_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0); 1266 GEM_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0); 1267 GEM_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0); 1268 GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 1269 GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 1270 GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 1271 GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 1272 1273 /* Set XOFF PAUSE time. */ 1274 GEM_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1275 1276 /* Set the station address. */ 1277 GEM_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); 1278 GEM_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); 1279 GEM_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); 1280 1281 /* Enable MII outputs. */ 1282 GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA); 1283 } 1284 1285 static void 1286 gem_start(if_t ifp) 1287 { 1288 struct gem_softc *sc = if_getsoftc(ifp); 1289 1290 GEM_LOCK(sc); 1291 gem_start_locked(ifp); 1292 GEM_UNLOCK(sc); 1293 } 1294 1295 static inline void 1296 gem_txkick(struct gem_softc *sc) 1297 { 1298 1299 /* 1300 * Update the TX kick register. This register has to point to the 1301 * descriptor after the last valid one and for optimum performance 1302 * should be incremented in multiples of 4 (the DMA engine fetches/ 1303 * updates descriptors in batches of 4). 1304 */ 1305 #ifdef GEM_DEBUG 1306 CTR3(KTR_GEM, "%s: %s: kicking TX %d", 1307 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1308 #endif 1309 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1310 GEM_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext); 1311 } 1312 1313 static void 1314 gem_start_locked(if_t ifp) 1315 { 1316 struct gem_softc *sc = if_getsoftc(ifp); 1317 struct mbuf *m; 1318 int kicked, ntx; 1319 1320 GEM_LOCK_ASSERT(sc, MA_OWNED); 1321 1322 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1323 IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 1324 return; 1325 1326 #ifdef GEM_DEBUG 1327 CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 1328 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1329 sc->sc_txnext); 1330 #endif 1331 ntx = 0; 1332 kicked = 0; 1333 for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) { 1334 m = if_dequeue(ifp); 1335 if (m == NULL) 1336 break; 1337 if (gem_load_txmbuf(sc, &m) != 0) { 1338 if (m == NULL) 1339 break; 1340 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1341 if_sendq_prepend(ifp, m); 1342 break; 1343 } 1344 if ((sc->sc_txnext % 4) == 0) { 1345 gem_txkick(sc); 1346 kicked = 1; 1347 } else 1348 kicked = 0; 1349 ntx++; 1350 BPF_MTAP(ifp, m); 1351 } 1352 1353 if (ntx > 0) { 1354 if (kicked == 0) 1355 gem_txkick(sc); 1356 #ifdef GEM_DEBUG 1357 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1358 device_get_name(sc->sc_dev), sc->sc_txnext); 1359 #endif 1360 1361 /* Set a watchdog timer in case the chip flakes out. */ 1362 sc->sc_wdog_timer = 5; 1363 #ifdef GEM_DEBUG 1364 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1365 device_get_name(sc->sc_dev), __func__, 1366 sc->sc_wdog_timer); 1367 #endif 1368 } 1369 } 1370 1371 static void 1372 gem_tint(struct gem_softc *sc) 1373 { 1374 if_t ifp = sc->sc_ifp; 1375 struct gem_txsoft *txs; 1376 int progress; 1377 uint32_t txlast; 1378 #ifdef GEM_DEBUG 1379 int i; 1380 1381 GEM_LOCK_ASSERT(sc, MA_OWNED); 1382 1383 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1384 #endif 1385 1386 /* 1387 * Go through our TX list and free mbufs for those 1388 * frames that have been transmitted. 1389 */ 1390 progress = 0; 1391 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1392 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1393 #ifdef GEM_DEBUG 1394 if ((if_getflags(ifp) & IFF_DEBUG) != 0) { 1395 printf(" txsoft %p transmit chain:\n", txs); 1396 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1397 printf("descriptor %d: ", i); 1398 printf("gd_flags: 0x%016llx\t", 1399 (long long)le64toh( 1400 sc->sc_txdescs[i].gd_flags)); 1401 printf("gd_addr: 0x%016llx\n", 1402 (long long)le64toh( 1403 sc->sc_txdescs[i].gd_addr)); 1404 if (i == txs->txs_lastdesc) 1405 break; 1406 } 1407 } 1408 #endif 1409 1410 /* 1411 * In theory, we could harvest some descriptors before 1412 * the ring is empty, but that's a bit complicated. 1413 * 1414 * GEM_TX_COMPLETION points to the last descriptor 1415 * processed + 1. 1416 */ 1417 txlast = GEM_READ_4(sc, GEM_TX_COMPLETION); 1418 #ifdef GEM_DEBUG 1419 CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 1420 "txs->txs_lastdesc = %d, txlast = %d", 1421 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1422 #endif 1423 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1424 if ((txlast >= txs->txs_firstdesc) && 1425 (txlast <= txs->txs_lastdesc)) 1426 break; 1427 } else { 1428 /* Ick -- this command wraps. */ 1429 if ((txlast >= txs->txs_firstdesc) || 1430 (txlast <= txs->txs_lastdesc)) 1431 break; 1432 } 1433 1434 #ifdef GEM_DEBUG 1435 CTR1(KTR_GEM, "%s: releasing a descriptor", __func__); 1436 #endif 1437 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1438 1439 sc->sc_txfree += txs->txs_ndescs; 1440 1441 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1442 BUS_DMASYNC_POSTWRITE); 1443 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1444 if (txs->txs_mbuf != NULL) { 1445 m_freem(txs->txs_mbuf); 1446 txs->txs_mbuf = NULL; 1447 } 1448 1449 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1450 1451 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1452 progress = 1; 1453 } 1454 1455 #ifdef GEM_DEBUG 1456 CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx " 1457 "GEM_TX_COMPLETION %x", 1458 __func__, GEM_READ_4(sc, GEM_TX_STATE_MACHINE), 1459 ((long long)GEM_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) | 1460 GEM_READ_4(sc, GEM_TX_DATA_PTR_LO), 1461 GEM_READ_4(sc, GEM_TX_COMPLETION)); 1462 #endif 1463 1464 if (progress) { 1465 if (sc->sc_txfree == GEM_NTXDESC - 1) 1466 sc->sc_txwin = 0; 1467 1468 /* 1469 * We freed some descriptors, so reset IFF_DRV_OACTIVE 1470 * and restart. 1471 */ 1472 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1473 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1474 sc->sc_wdog_timer = 0; 1475 gem_start_locked(ifp); 1476 } 1477 1478 #ifdef GEM_DEBUG 1479 CTR3(KTR_GEM, "%s: %s: watchdog %d", 1480 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1481 #endif 1482 } 1483 1484 #ifdef GEM_RINT_TIMEOUT 1485 static void 1486 gem_rint_timeout(void *arg) 1487 { 1488 struct gem_softc *sc = arg; 1489 1490 GEM_LOCK_ASSERT(sc, MA_OWNED); 1491 1492 gem_rint(sc); 1493 } 1494 #endif 1495 1496 static void 1497 gem_rint(struct gem_softc *sc) 1498 { 1499 if_t ifp = sc->sc_ifp; 1500 struct mbuf *m; 1501 uint64_t rxstat; 1502 uint32_t rxcomp; 1503 1504 GEM_LOCK_ASSERT(sc, MA_OWNED); 1505 1506 #ifdef GEM_RINT_TIMEOUT 1507 callout_stop(&sc->sc_rx_ch); 1508 #endif 1509 #ifdef GEM_DEBUG 1510 CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 1511 #endif 1512 1513 /* 1514 * Read the completion register once. This limits 1515 * how long the following loop can execute. 1516 */ 1517 rxcomp = GEM_READ_4(sc, GEM_RX_COMPLETION); 1518 #ifdef GEM_DEBUG 1519 CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d", 1520 __func__, sc->sc_rxptr, rxcomp); 1521 #endif 1522 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1523 for (; sc->sc_rxptr != rxcomp;) { 1524 m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1525 rxstat = le64toh(sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 1526 1527 if (rxstat & GEM_RD_OWN) { 1528 #ifdef GEM_RINT_TIMEOUT 1529 /* 1530 * The descriptor is still marked as owned, although 1531 * it is supposed to have completed. This has been 1532 * observed on some machines. Just exiting here 1533 * might leave the packet sitting around until another 1534 * one arrives to trigger a new interrupt, which is 1535 * generally undesirable, so set up a timeout. 1536 */ 1537 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1538 gem_rint_timeout, sc); 1539 #endif 1540 m = NULL; 1541 goto kickit; 1542 } 1543 1544 if (rxstat & GEM_RD_BAD_CRC) { 1545 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1546 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1547 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1548 m = NULL; 1549 goto kickit; 1550 } 1551 1552 #ifdef GEM_DEBUG 1553 if ((if_getflags(ifp) & IFF_DEBUG) != 0) { 1554 printf(" rxsoft %p descriptor %d: ", 1555 &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 1556 printf("gd_flags: 0x%016llx\t", 1557 (long long)le64toh( 1558 sc->sc_rxdescs[sc->sc_rxptr].gd_flags)); 1559 printf("gd_addr: 0x%016llx\n", 1560 (long long)le64toh( 1561 sc->sc_rxdescs[sc->sc_rxptr].gd_addr)); 1562 } 1563 #endif 1564 1565 /* 1566 * Allocate a new mbuf cluster. If that fails, we are 1567 * out of memory, and must drop the packet and recycle 1568 * the buffer that's already attached to this descriptor. 1569 */ 1570 if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 1571 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1572 GEM_INIT_RXDESC(sc, sc->sc_rxptr); 1573 m = NULL; 1574 } 1575 1576 kickit: 1577 /* 1578 * Update the RX kick register. This register has to point 1579 * to the descriptor after the last valid one (before the 1580 * current batch) and for optimum performance should be 1581 * incremented in multiples of 4 (the DMA engine fetches/ 1582 * updates descriptors in batches of 4). 1583 */ 1584 sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 1585 if ((sc->sc_rxptr % 4) == 0) { 1586 GEM_CDSYNC(sc, 1587 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1588 GEM_WRITE_4(sc, GEM_RX_KICK, 1589 (sc->sc_rxptr + GEM_NRXDESC - 4) & 1590 GEM_NRXDESC_MASK); 1591 } 1592 1593 if (m == NULL) { 1594 if (rxstat & GEM_RD_OWN) 1595 break; 1596 continue; 1597 } 1598 1599 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1600 m->m_data += ETHER_ALIGN; /* first byte offset */ 1601 m->m_pkthdr.rcvif = ifp; 1602 m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 1603 1604 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) 1605 gem_rxcksum(m, rxstat); 1606 1607 /* Pass it on. */ 1608 GEM_UNLOCK(sc); 1609 if_input(ifp, m); 1610 GEM_LOCK(sc); 1611 } 1612 1613 #ifdef GEM_DEBUG 1614 CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__, 1615 sc->sc_rxptr, GEM_READ_4(sc, GEM_RX_COMPLETION)); 1616 #endif 1617 } 1618 1619 static int 1620 gem_add_rxbuf(struct gem_softc *sc, int idx) 1621 { 1622 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1623 struct mbuf *m; 1624 bus_dma_segment_t segs[1]; 1625 int error, nsegs; 1626 1627 GEM_LOCK_ASSERT(sc, MA_OWNED); 1628 1629 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1630 if (m == NULL) 1631 return (ENOBUFS); 1632 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1633 1634 #ifdef GEM_DEBUG 1635 /* Bzero the packet to check DMA. */ 1636 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1637 #endif 1638 1639 if (rxs->rxs_mbuf != NULL) { 1640 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1641 BUS_DMASYNC_POSTREAD); 1642 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1643 } 1644 1645 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1646 m, segs, &nsegs, BUS_DMA_NOWAIT); 1647 if (error != 0) { 1648 device_printf(sc->sc_dev, 1649 "cannot load RS DMA map %d, error = %d\n", idx, error); 1650 m_freem(m); 1651 return (error); 1652 } 1653 /* If nsegs is wrong then the stack is corrupt. */ 1654 KASSERT(nsegs == 1, 1655 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1656 rxs->rxs_mbuf = m; 1657 rxs->rxs_paddr = segs[0].ds_addr; 1658 1659 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1660 BUS_DMASYNC_PREREAD); 1661 1662 GEM_INIT_RXDESC(sc, idx); 1663 1664 return (0); 1665 } 1666 1667 static void 1668 gem_eint(struct gem_softc *sc, u_int status) 1669 { 1670 1671 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); 1672 if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 1673 gem_reset_rxdma(sc); 1674 return; 1675 } 1676 1677 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status); 1678 if ((status & GEM_INTR_BERR) != 0) { 1679 printf(", PCI bus error 0x%x", 1680 GEM_READ_4(sc, GEM_PCI_ERROR_STATUS)); 1681 } 1682 printf("\n"); 1683 } 1684 1685 void 1686 gem_intr(void *v) 1687 { 1688 struct gem_softc *sc = v; 1689 uint32_t status, status2; 1690 1691 GEM_LOCK(sc); 1692 status = GEM_READ_4(sc, GEM_STATUS); 1693 1694 #ifdef GEM_DEBUG 1695 CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 1696 device_get_name(sc->sc_dev), __func__, 1697 (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status); 1698 1699 /* 1700 * PCS interrupts must be cleared, otherwise no traffic is passed! 1701 */ 1702 if ((status & GEM_INTR_PCS) != 0) { 1703 status2 = 1704 GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS) | 1705 GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS); 1706 if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 1707 device_printf(sc->sc_dev, 1708 "%s: PCS link status changed\n", __func__); 1709 } 1710 if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1711 status2 = GEM_READ_4(sc, GEM_MAC_CONTROL_STATUS); 1712 if ((status2 & GEM_MAC_PAUSED) != 0) 1713 device_printf(sc->sc_dev, 1714 "%s: PAUSE received (PAUSE time %d slots)\n", 1715 __func__, GEM_MAC_PAUSE_TIME(status2)); 1716 if ((status2 & GEM_MAC_PAUSE) != 0) 1717 device_printf(sc->sc_dev, 1718 "%s: transited to PAUSE state\n", __func__); 1719 if ((status2 & GEM_MAC_RESUME) != 0) 1720 device_printf(sc->sc_dev, 1721 "%s: transited to non-PAUSE state\n", __func__); 1722 } 1723 if ((status & GEM_INTR_MIF) != 0) 1724 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1725 #endif 1726 1727 if (__predict_false(status & 1728 (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 1729 gem_eint(sc, status); 1730 1731 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1732 gem_rint(sc); 1733 1734 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1735 gem_tint(sc); 1736 1737 if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) { 1738 status2 = GEM_READ_4(sc, GEM_MAC_TX_STATUS); 1739 if ((status2 & 1740 ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP | 1741 GEM_MAC_TX_PEAK_EXP)) != 0) 1742 device_printf(sc->sc_dev, 1743 "MAC TX fault, status %x\n", status2); 1744 if ((status2 & 1745 (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) { 1746 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 1747 if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING); 1748 gem_init_locked(sc); 1749 } 1750 } 1751 if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) { 1752 status2 = GEM_READ_4(sc, GEM_MAC_RX_STATUS); 1753 /* 1754 * At least with GEM_SUN_GEM revisions GEM_MAC_RX_OVERFLOW 1755 * happen often due to a silicon bug so handle them silently. 1756 * Moreover, it's likely that the receiver has hung so we 1757 * reset it. 1758 */ 1759 if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) { 1760 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); 1761 gem_reset_rxdma(sc); 1762 } else if ((status2 & 1763 ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0) 1764 device_printf(sc->sc_dev, 1765 "MAC RX fault, status %x\n", status2); 1766 } 1767 GEM_UNLOCK(sc); 1768 } 1769 1770 static int 1771 gem_watchdog(struct gem_softc *sc) 1772 { 1773 if_t ifp = sc->sc_ifp; 1774 1775 GEM_LOCK_ASSERT(sc, MA_OWNED); 1776 1777 #ifdef GEM_DEBUG 1778 CTR4(KTR_GEM, 1779 "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x", 1780 __func__, GEM_READ_4(sc, GEM_RX_CONFIG), 1781 GEM_READ_4(sc, GEM_MAC_RX_STATUS), 1782 GEM_READ_4(sc, GEM_MAC_RX_CONFIG)); 1783 CTR4(KTR_GEM, 1784 "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x", 1785 __func__, GEM_READ_4(sc, GEM_TX_CONFIG), 1786 GEM_READ_4(sc, GEM_MAC_TX_STATUS), 1787 GEM_READ_4(sc, GEM_MAC_TX_CONFIG)); 1788 #endif 1789 1790 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1791 return (0); 1792 1793 if ((sc->sc_flags & GEM_LINK) != 0) 1794 device_printf(sc->sc_dev, "device timeout\n"); 1795 else if (bootverbose) 1796 device_printf(sc->sc_dev, "device timeout (no link)\n"); 1797 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1798 1799 /* Try to get more packets going. */ 1800 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1801 gem_init_locked(sc); 1802 gem_start_locked(ifp); 1803 return (EJUSTRETURN); 1804 } 1805 1806 static void 1807 gem_mifinit(struct gem_softc *sc) 1808 { 1809 1810 /* Configure the MIF in frame mode. */ 1811 GEM_WRITE_4(sc, GEM_MIF_CONFIG, 1812 GEM_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1813 GEM_BARRIER(sc, GEM_MIF_CONFIG, 4, 1814 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1815 } 1816 1817 /* 1818 * MII interface 1819 * 1820 * The MII interface supports at least three different operating modes: 1821 * 1822 * Bitbang mode is implemented using data, clock and output enable registers. 1823 * 1824 * Frame mode is implemented by loading a complete frame into the frame 1825 * register and polling the valid bit for completion. 1826 * 1827 * Polling mode uses the frame register but completion is indicated by 1828 * an interrupt. 1829 * 1830 */ 1831 int 1832 gem_mii_readreg(device_t dev, int phy, int reg) 1833 { 1834 struct gem_softc *sc; 1835 int n; 1836 uint32_t v; 1837 1838 #ifdef GEM_DEBUG_PHY 1839 printf("%s: phy %d reg %d\n", __func__, phy, reg); 1840 #endif 1841 1842 sc = device_get_softc(dev); 1843 if ((sc->sc_flags & GEM_SERDES) != 0) { 1844 switch (reg) { 1845 case MII_BMCR: 1846 reg = GEM_MII_CONTROL; 1847 break; 1848 case MII_BMSR: 1849 reg = GEM_MII_STATUS; 1850 break; 1851 case MII_PHYIDR1: 1852 case MII_PHYIDR2: 1853 return (0); 1854 case MII_ANAR: 1855 reg = GEM_MII_ANAR; 1856 break; 1857 case MII_ANLPAR: 1858 reg = GEM_MII_ANLPAR; 1859 break; 1860 case MII_EXTSR: 1861 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 1862 default: 1863 device_printf(sc->sc_dev, 1864 "%s: unhandled register %d\n", __func__, reg); 1865 return (0); 1866 } 1867 return (GEM_READ_4(sc, reg)); 1868 } 1869 1870 /* Construct the frame command. */ 1871 v = GEM_MIF_FRAME_READ | 1872 (phy << GEM_MIF_PHY_SHIFT) | 1873 (reg << GEM_MIF_REG_SHIFT); 1874 1875 GEM_WRITE_4(sc, GEM_MIF_FRAME, v); 1876 GEM_BARRIER(sc, GEM_MIF_FRAME, 4, 1877 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1878 for (n = 0; n < 100; n++) { 1879 DELAY(1); 1880 v = GEM_READ_4(sc, GEM_MIF_FRAME); 1881 if (v & GEM_MIF_FRAME_TA0) 1882 return (v & GEM_MIF_FRAME_DATA); 1883 } 1884 1885 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1886 return (0); 1887 } 1888 1889 int 1890 gem_mii_writereg(device_t dev, int phy, int reg, int val) 1891 { 1892 struct gem_softc *sc; 1893 int n; 1894 uint32_t v; 1895 1896 #ifdef GEM_DEBUG_PHY 1897 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 1898 #endif 1899 1900 sc = device_get_softc(dev); 1901 if ((sc->sc_flags & GEM_SERDES) != 0) { 1902 switch (reg) { 1903 case MII_BMSR: 1904 reg = GEM_MII_STATUS; 1905 break; 1906 case MII_BMCR: 1907 reg = GEM_MII_CONTROL; 1908 if ((val & GEM_MII_CONTROL_RESET) == 0) 1909 break; 1910 GEM_WRITE_4(sc, GEM_MII_CONTROL, val); 1911 GEM_BARRIER(sc, GEM_MII_CONTROL, 4, 1912 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1913 if (!gem_bitwait(sc, GEM_MII_CONTROL, 1914 GEM_MII_CONTROL_RESET, 0)) 1915 device_printf(sc->sc_dev, 1916 "cannot reset PCS\n"); 1917 /* FALLTHROUGH */ 1918 case MII_ANAR: 1919 GEM_WRITE_4(sc, GEM_MII_CONFIG, 0); 1920 GEM_BARRIER(sc, GEM_MII_CONFIG, 4, 1921 BUS_SPACE_BARRIER_WRITE); 1922 GEM_WRITE_4(sc, GEM_MII_ANAR, val); 1923 GEM_BARRIER(sc, GEM_MII_ANAR, 4, 1924 BUS_SPACE_BARRIER_WRITE); 1925 GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 1926 GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 1927 GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4, 1928 BUS_SPACE_BARRIER_WRITE); 1929 GEM_WRITE_4(sc, GEM_MII_CONFIG, 1930 GEM_MII_CONFIG_ENABLE); 1931 GEM_BARRIER(sc, GEM_MII_CONFIG, 4, 1932 BUS_SPACE_BARRIER_WRITE); 1933 return (0); 1934 case MII_ANLPAR: 1935 reg = GEM_MII_ANLPAR; 1936 break; 1937 default: 1938 device_printf(sc->sc_dev, 1939 "%s: unhandled register %d\n", __func__, reg); 1940 return (0); 1941 } 1942 GEM_WRITE_4(sc, reg, val); 1943 GEM_BARRIER(sc, reg, 4, 1944 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1945 return (0); 1946 } 1947 1948 /* Construct the frame command. */ 1949 v = GEM_MIF_FRAME_WRITE | 1950 (phy << GEM_MIF_PHY_SHIFT) | 1951 (reg << GEM_MIF_REG_SHIFT) | 1952 (val & GEM_MIF_FRAME_DATA); 1953 1954 GEM_WRITE_4(sc, GEM_MIF_FRAME, v); 1955 GEM_BARRIER(sc, GEM_MIF_FRAME, 4, 1956 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1957 for (n = 0; n < 100; n++) { 1958 DELAY(1); 1959 v = GEM_READ_4(sc, GEM_MIF_FRAME); 1960 if (v & GEM_MIF_FRAME_TA0) 1961 return (1); 1962 } 1963 1964 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 1965 return (0); 1966 } 1967 1968 void 1969 gem_mii_statchg(device_t dev) 1970 { 1971 struct gem_softc *sc; 1972 int gigabit; 1973 uint32_t rxcfg, txcfg, v; 1974 1975 sc = device_get_softc(dev); 1976 1977 GEM_LOCK_ASSERT(sc, MA_OWNED); 1978 1979 #ifdef GEM_DEBUG 1980 if ((sc->sc_if_getflags(ifp) & IFF_DEBUG) != 0) 1981 device_printf(sc->sc_dev, "%s: status change\n", __func__); 1982 #endif 1983 1984 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 1985 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 1986 sc->sc_flags |= GEM_LINK; 1987 else 1988 sc->sc_flags &= ~GEM_LINK; 1989 1990 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 1991 case IFM_1000_SX: 1992 case IFM_1000_LX: 1993 case IFM_1000_CX: 1994 case IFM_1000_T: 1995 gigabit = 1; 1996 break; 1997 default: 1998 gigabit = 0; 1999 } 2000 2001 /* 2002 * The configuration done here corresponds to the steps F) and 2003 * G) and as far as enabling of RX and TX MAC goes also step H) 2004 * of the initialization sequence outlined in section 3.2.1 of 2005 * the GEM Gigabit Ethernet ASIC Specification. 2006 */ 2007 2008 rxcfg = sc->sc_mac_rxcfg; 2009 rxcfg &= ~GEM_MAC_RX_CARR_EXTEND; 2010 txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 2011 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2012 txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 2013 else if (gigabit != 0) { 2014 rxcfg |= GEM_MAC_RX_CARR_EXTEND; 2015 txcfg |= GEM_MAC_TX_CARR_EXTEND; 2016 } 2017 (void)gem_disable_tx(sc); 2018 GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg); 2019 (void)gem_disable_rx(sc); 2020 GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg); 2021 2022 v = GEM_READ_4(sc, GEM_MAC_CONTROL_CONFIG) & 2023 ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 2024 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2025 IFM_ETH_RXPAUSE) != 0) 2026 v |= GEM_MAC_CC_RX_PAUSE; 2027 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2028 IFM_ETH_TXPAUSE) != 0) 2029 v |= GEM_MAC_CC_TX_PAUSE; 2030 GEM_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v); 2031 2032 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 2033 gigabit != 0) 2034 GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME, 2035 GEM_MAC_SLOT_TIME_CARR_EXTEND); 2036 else 2037 GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME, 2038 GEM_MAC_SLOT_TIME_NORMAL); 2039 2040 /* XIF Configuration */ 2041 v = GEM_MAC_XIF_LINK_LED; 2042 v |= GEM_MAC_XIF_TX_MII_ENA; 2043 if ((sc->sc_flags & GEM_SERDES) == 0) { 2044 if ((GEM_READ_4(sc, GEM_MIF_CONFIG) & 2045 GEM_MIF_CONFIG_PHY_SEL) != 0) { 2046 /* External MII needs echo disable if half duplex. */ 2047 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2048 IFM_FDX) == 0) 2049 v |= GEM_MAC_XIF_ECHO_DISABL; 2050 } else 2051 /* 2052 * Internal MII needs buffer enable. 2053 * XXX buffer enable makes only sense for an 2054 * external PHY. 2055 */ 2056 v |= GEM_MAC_XIF_MII_BUF_ENA; 2057 } 2058 if (gigabit != 0) 2059 v |= GEM_MAC_XIF_GMII_MODE; 2060 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2061 v |= GEM_MAC_XIF_FDPLX_LED; 2062 GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v); 2063 2064 sc->sc_mac_rxcfg = rxcfg; 2065 if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0 && 2066 (sc->sc_flags & GEM_LINK) != 0) { 2067 GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, 2068 txcfg | GEM_MAC_TX_ENABLE); 2069 GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, 2070 rxcfg | GEM_MAC_RX_ENABLE); 2071 } 2072 } 2073 2074 int 2075 gem_mediachange(if_t ifp) 2076 { 2077 struct gem_softc *sc = if_getsoftc(ifp); 2078 int error; 2079 2080 /* XXX add support for serial media. */ 2081 2082 GEM_LOCK(sc); 2083 error = mii_mediachg(sc->sc_mii); 2084 GEM_UNLOCK(sc); 2085 return (error); 2086 } 2087 2088 void 2089 gem_mediastatus(if_t ifp, struct ifmediareq *ifmr) 2090 { 2091 struct gem_softc *sc = if_getsoftc(ifp); 2092 2093 GEM_LOCK(sc); 2094 if ((if_getflags(ifp) & IFF_UP) == 0) { 2095 GEM_UNLOCK(sc); 2096 return; 2097 } 2098 2099 mii_pollstat(sc->sc_mii); 2100 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2101 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2102 GEM_UNLOCK(sc); 2103 } 2104 2105 static int 2106 gem_ioctl(if_t ifp, u_long cmd, caddr_t data) 2107 { 2108 struct gem_softc *sc = if_getsoftc(ifp); 2109 struct ifreq *ifr = (struct ifreq *)data; 2110 int error; 2111 2112 error = 0; 2113 switch (cmd) { 2114 case SIOCSIFFLAGS: 2115 GEM_LOCK(sc); 2116 if ((if_getflags(ifp) & IFF_UP) != 0) { 2117 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 2118 ((if_getflags(ifp) ^ sc->sc_ifflags) & 2119 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2120 gem_setladrf(sc); 2121 else 2122 gem_init_locked(sc); 2123 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2124 gem_stop(ifp, 0); 2125 if ((if_getflags(ifp) & IFF_LINK0) != 0) 2126 sc->sc_csum_features |= CSUM_UDP; 2127 else 2128 sc->sc_csum_features &= ~CSUM_UDP; 2129 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 2130 if_sethwassist(ifp, sc->sc_csum_features); 2131 sc->sc_ifflags = if_getflags(ifp); 2132 GEM_UNLOCK(sc); 2133 break; 2134 case SIOCADDMULTI: 2135 case SIOCDELMULTI: 2136 GEM_LOCK(sc); 2137 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2138 gem_setladrf(sc); 2139 GEM_UNLOCK(sc); 2140 break; 2141 case SIOCGIFMEDIA: 2142 case SIOCSIFMEDIA: 2143 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2144 break; 2145 case SIOCSIFCAP: 2146 GEM_LOCK(sc); 2147 if_setcapenable(ifp, ifr->ifr_reqcap); 2148 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 2149 if_sethwassist(ifp, sc->sc_csum_features); 2150 else 2151 if_sethwassist(ifp, 0); 2152 GEM_UNLOCK(sc); 2153 break; 2154 default: 2155 error = ether_ioctl(ifp, cmd, data); 2156 break; 2157 } 2158 2159 return (error); 2160 } 2161 2162 static u_int 2163 gem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2164 { 2165 uint32_t crc, *hash = arg; 2166 2167 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); 2168 /* We just want the 8 most significant bits. */ 2169 crc >>= 24; 2170 /* Set the corresponding bit in the filter. */ 2171 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2172 2173 return (1); 2174 } 2175 2176 static void 2177 gem_setladrf(struct gem_softc *sc) 2178 { 2179 if_t ifp = sc->sc_ifp; 2180 int i; 2181 uint32_t hash[16]; 2182 uint32_t v; 2183 2184 GEM_LOCK_ASSERT(sc, MA_OWNED); 2185 2186 /* 2187 * Turn off the RX MAC and the hash filter as required by the Sun GEM 2188 * programming restrictions. 2189 */ 2190 v = sc->sc_mac_rxcfg & ~GEM_MAC_RX_HASH_FILTER; 2191 GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 2192 GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 2193 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2194 if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER | 2195 GEM_MAC_RX_ENABLE, 0)) 2196 device_printf(sc->sc_dev, 2197 "cannot disable RX MAC or hash filter\n"); 2198 2199 v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_PROMISC_GRP); 2200 if ((if_getflags(ifp) & IFF_PROMISC) != 0) { 2201 v |= GEM_MAC_RX_PROMISCUOUS; 2202 goto chipit; 2203 } 2204 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { 2205 v |= GEM_MAC_RX_PROMISC_GRP; 2206 goto chipit; 2207 } 2208 2209 /* 2210 * Set up multicast address filter by passing all multicast 2211 * addresses through a crc generator, and then using the high 2212 * order 8 bits as an index into the 256 bit logical address 2213 * filter. The high order 4 bits selects the word, while the 2214 * other 4 bits select the bit within the word (where bit 0 2215 * is the MSB). 2216 */ 2217 2218 memset(hash, 0, sizeof(hash)); 2219 if_foreach_llmaddr(ifp, gem_hash_maddr, hash); 2220 2221 v |= GEM_MAC_RX_HASH_FILTER; 2222 2223 /* Now load the hash table into the chip (if we are using it). */ 2224 for (i = 0; i < 16; i++) 2225 GEM_WRITE_4(sc, 2226 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), 2227 hash[i]); 2228 2229 chipit: 2230 sc->sc_mac_rxcfg = v; 2231 GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v | GEM_MAC_RX_ENABLE); 2232 } 2233