1 /*- 2 * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * A network interface driver for Cadence GEM Gigabit Ethernet 29 * interface such as the one used in Xilinx Zynq-7000 SoC. 30 * 31 * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. 32 * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 33 * and register definitions are in appendix B.18. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bus.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/module.h> 46 #include <sys/rman.h> 47 #include <sys/socket.h> 48 #include <sys/sockio.h> 49 #include <sys/sysctl.h> 50 51 #include <machine/bus.h> 52 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_arp.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 #include <net/if_mib.h> 60 #include <net/if_types.h> 61 62 #ifdef INET 63 #include <netinet/in.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip.h> 67 #endif 68 69 #include <net/bpf.h> 70 #include <net/bpfdesc.h> 71 72 #include <dev/fdt/fdt_common.h> 73 #include <dev/ofw/ofw_bus.h> 74 #include <dev/ofw/ofw_bus_subr.h> 75 76 #include <dev/mii/mii.h> 77 #include <dev/mii/miivar.h> 78 79 #include <dev/cadence/if_cgem_hw.h> 80 81 #include "miibus_if.h" 82 83 #define IF_CGEM_NAME "cgem" 84 85 #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ 86 #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ 87 88 #define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\ 89 CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc))) 90 91 92 /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ 93 #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ 94 95 #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ 96 97 #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ 98 CSUM_TCP_IPV6 | CSUM_UDP_IPV6) 99 100 struct cgem_softc { 101 struct ifnet *ifp; 102 struct mtx sc_mtx; 103 device_t dev; 104 device_t miibus; 105 u_int mii_media_active; /* last active media */ 106 int if_old_flags; 107 struct resource *mem_res; 108 struct resource *irq_res; 109 void *intrhand; 110 struct callout tick_ch; 111 uint32_t net_ctl_shadow; 112 int ref_clk_num; 113 u_char eaddr[6]; 114 115 bus_dma_tag_t desc_dma_tag; 116 bus_dma_tag_t mbuf_dma_tag; 117 118 /* receive descriptor ring */ 119 struct cgem_rx_desc *rxring; 120 bus_addr_t rxring_physaddr; 121 struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; 122 bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; 123 int rxring_hd_ptr; /* where to put rcv bufs */ 124 int rxring_tl_ptr; /* where to get receives */ 125 int rxring_queued; /* how many rcv bufs queued */ 126 bus_dmamap_t rxring_dma_map; 127 int rxbufs; /* tunable number rcv bufs */ 128 int rxhangwar; /* rx hang work-around */ 129 u_int rxoverruns; /* rx overruns */ 130 u_int rxnobufs; /* rx buf ring empty events */ 131 u_int rxdmamapfails; /* rx dmamap failures */ 132 uint32_t rx_frames_prev; 133 134 /* transmit descriptor ring */ 135 struct cgem_tx_desc *txring; 136 bus_addr_t txring_physaddr; 137 struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; 138 bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; 139 int txring_hd_ptr; /* where to put next xmits */ 140 int txring_tl_ptr; /* next xmit mbuf to free */ 141 int txring_queued; /* num xmits segs queued */ 142 bus_dmamap_t txring_dma_map; 143 u_int txfull; /* tx ring full events */ 144 u_int txdefrags; /* tx calls to m_defrag() */ 145 u_int txdefragfails; /* tx m_defrag() failures */ 146 u_int txdmamapfails; /* tx dmamap failures */ 147 148 /* hardware provided statistics */ 149 struct cgem_hw_stats { 150 uint64_t tx_bytes; 151 uint32_t tx_frames; 152 uint32_t tx_frames_bcast; 153 uint32_t tx_frames_multi; 154 uint32_t tx_frames_pause; 155 uint32_t tx_frames_64b; 156 uint32_t tx_frames_65to127b; 157 uint32_t tx_frames_128to255b; 158 uint32_t tx_frames_256to511b; 159 uint32_t tx_frames_512to1023b; 160 uint32_t tx_frames_1024to1536b; 161 uint32_t tx_under_runs; 162 uint32_t tx_single_collisn; 163 uint32_t tx_multi_collisn; 164 uint32_t tx_excsv_collisn; 165 uint32_t tx_late_collisn; 166 uint32_t tx_deferred_frames; 167 uint32_t tx_carrier_sense_errs; 168 169 uint64_t rx_bytes; 170 uint32_t rx_frames; 171 uint32_t rx_frames_bcast; 172 uint32_t rx_frames_multi; 173 uint32_t rx_frames_pause; 174 uint32_t rx_frames_64b; 175 uint32_t rx_frames_65to127b; 176 uint32_t rx_frames_128to255b; 177 uint32_t rx_frames_256to511b; 178 uint32_t rx_frames_512to1023b; 179 uint32_t rx_frames_1024to1536b; 180 uint32_t rx_frames_undersize; 181 uint32_t rx_frames_oversize; 182 uint32_t rx_frames_jabber; 183 uint32_t rx_frames_fcs_errs; 184 uint32_t rx_frames_length_errs; 185 uint32_t rx_symbol_errs; 186 uint32_t rx_align_errs; 187 uint32_t rx_resource_errs; 188 uint32_t rx_overrun_errs; 189 uint32_t rx_ip_hdr_csum_errs; 190 uint32_t rx_tcp_csum_errs; 191 uint32_t rx_udp_csum_errs; 192 } stats; 193 }; 194 195 #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) 196 #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) 197 #define BARRIER(sc, off, len, flags) \ 198 (bus_barrier((sc)->mem_res, (off), (len), (flags)) 199 200 #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx) 201 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) 202 #define CGEM_LOCK_INIT(sc) \ 203 mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ 204 MTX_NETWORK_LOCK, MTX_DEF) 205 #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) 206 #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) 207 208 /* Allow platforms to optionally provide a way to set the reference clock. */ 209 int cgem_set_ref_clk(int unit, int frequency); 210 211 static devclass_t cgem_devclass; 212 213 static int cgem_probe(device_t dev); 214 static int cgem_attach(device_t dev); 215 static int cgem_detach(device_t dev); 216 static void cgem_tick(void *); 217 static void cgem_intr(void *); 218 219 static void cgem_mediachange(struct cgem_softc *, struct mii_data *); 220 221 static void 222 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[]) 223 { 224 int i; 225 uint32_t rnd; 226 227 /* See if boot loader gave us a MAC address already. */ 228 for (i = 0; i < 4; i++) { 229 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i)); 230 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff; 231 if (low != 0 || high != 0) { 232 eaddr[0] = low & 0xff; 233 eaddr[1] = (low >> 8) & 0xff; 234 eaddr[2] = (low >> 16) & 0xff; 235 eaddr[3] = (low >> 24) & 0xff; 236 eaddr[4] = high & 0xff; 237 eaddr[5] = (high >> 8) & 0xff; 238 break; 239 } 240 } 241 242 /* No MAC from boot loader? Assign a random one. */ 243 if (i == 4) { 244 rnd = arc4random(); 245 246 eaddr[0] = 'b'; 247 eaddr[1] = 's'; 248 eaddr[2] = 'd'; 249 eaddr[3] = (rnd >> 16) & 0xff; 250 eaddr[4] = (rnd >> 8) & 0xff; 251 eaddr[5] = rnd & 0xff; 252 253 device_printf(sc->dev, "no mac address found, assigning " 254 "random: %02x:%02x:%02x:%02x:%02x:%02x\n", 255 eaddr[0], eaddr[1], eaddr[2], 256 eaddr[3], eaddr[4], eaddr[5]); 257 } 258 259 /* Move address to first slot and zero out the rest. */ 260 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | 261 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); 262 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); 263 264 for (i = 1; i < 4; i++) { 265 WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0); 266 WR4(sc, CGEM_SPEC_ADDR_HI(i), 0); 267 } 268 } 269 270 /* cgem_mac_hash(): map 48-bit address to a 6-bit hash. 271 * The 6-bit hash corresponds to a bit in a 64-bit hash 272 * register. Setting that bit in the hash register enables 273 * reception of all frames with a destination address that hashes 274 * to that 6-bit value. 275 * 276 * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech 277 * Reference Manual. Bits 0-5 in the hash are the exclusive-or of 278 * every sixth bit in the destination address. 279 */ 280 static int 281 cgem_mac_hash(u_char eaddr[]) 282 { 283 int hash; 284 int i, j; 285 286 hash = 0; 287 for (i = 0; i < 6; i++) 288 for (j = i; j < 48; j += 6) 289 if ((eaddr[j >> 3] & (1 << (j & 7))) != 0) 290 hash ^= (1 << i); 291 292 return hash; 293 } 294 295 /* After any change in rx flags or multi-cast addresses, set up 296 * hash registers and net config register bits. 297 */ 298 static void 299 cgem_rx_filter(struct cgem_softc *sc) 300 { 301 struct ifnet *ifp = sc->ifp; 302 struct ifmultiaddr *ifma; 303 int index; 304 uint32_t hash_hi, hash_lo; 305 uint32_t net_cfg; 306 307 hash_hi = 0; 308 hash_lo = 0; 309 310 net_cfg = RD4(sc, CGEM_NET_CFG); 311 312 net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN | 313 CGEM_NET_CFG_NO_BCAST | 314 CGEM_NET_CFG_COPY_ALL); 315 316 if ((ifp->if_flags & IFF_PROMISC) != 0) 317 net_cfg |= CGEM_NET_CFG_COPY_ALL; 318 else { 319 if ((ifp->if_flags & IFF_BROADCAST) == 0) 320 net_cfg |= CGEM_NET_CFG_NO_BCAST; 321 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 322 hash_hi = 0xffffffff; 323 hash_lo = 0xffffffff; 324 } else { 325 if_maddr_rlock(ifp); 326 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 327 if (ifma->ifma_addr->sa_family != AF_LINK) 328 continue; 329 index = cgem_mac_hash( 330 LLADDR((struct sockaddr_dl *) 331 ifma->ifma_addr)); 332 if (index > 31) 333 hash_hi |= (1<<(index-32)); 334 else 335 hash_lo |= (1<<index); 336 } 337 if_maddr_runlock(ifp); 338 } 339 340 if (hash_hi != 0 || hash_lo != 0) 341 net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN; 342 } 343 344 WR4(sc, CGEM_HASH_TOP, hash_hi); 345 WR4(sc, CGEM_HASH_BOT, hash_lo); 346 WR4(sc, CGEM_NET_CFG, net_cfg); 347 } 348 349 /* For bus_dmamap_load() callback. */ 350 static void 351 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 352 { 353 354 if (nsegs != 1 || error != 0) 355 return; 356 *(bus_addr_t *)arg = segs[0].ds_addr; 357 } 358 359 /* Create DMA'able descriptor rings. */ 360 static int 361 cgem_setup_descs(struct cgem_softc *sc) 362 { 363 int i, err; 364 365 sc->txring = NULL; 366 sc->rxring = NULL; 367 368 /* Allocate non-cached DMA space for RX and TX descriptors. 369 */ 370 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 371 BUS_SPACE_MAXADDR_32BIT, 372 BUS_SPACE_MAXADDR, 373 NULL, NULL, 374 MAX_DESC_RING_SIZE, 375 1, 376 MAX_DESC_RING_SIZE, 377 0, 378 busdma_lock_mutex, 379 &sc->sc_mtx, 380 &sc->desc_dma_tag); 381 if (err) 382 return (err); 383 384 /* Set up a bus_dma_tag for mbufs. */ 385 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 386 BUS_SPACE_MAXADDR_32BIT, 387 BUS_SPACE_MAXADDR, 388 NULL, NULL, 389 MCLBYTES, 390 TX_MAX_DMA_SEGS, 391 MCLBYTES, 392 0, 393 busdma_lock_mutex, 394 &sc->sc_mtx, 395 &sc->mbuf_dma_tag); 396 if (err) 397 return (err); 398 399 /* Allocate DMA memory in non-cacheable space. */ 400 err = bus_dmamem_alloc(sc->desc_dma_tag, 401 (void **)&sc->rxring, 402 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 403 &sc->rxring_dma_map); 404 if (err) 405 return (err); 406 407 /* Load descriptor DMA memory. */ 408 err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, 409 (void *)sc->rxring, 410 CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc), 411 cgem_getaddr, &sc->rxring_physaddr, 412 BUS_DMA_NOWAIT); 413 if (err) 414 return (err); 415 416 /* Initialize RX descriptors. */ 417 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { 418 sc->rxring[i].addr = CGEM_RXDESC_OWN; 419 sc->rxring[i].ctl = 0; 420 sc->rxring_m[i] = NULL; 421 err = bus_dmamap_create(sc->mbuf_dma_tag, 0, 422 &sc->rxring_m_dmamap[i]); 423 if (err) 424 return (err); 425 } 426 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; 427 428 sc->rxring_hd_ptr = 0; 429 sc->rxring_tl_ptr = 0; 430 sc->rxring_queued = 0; 431 432 /* Allocate DMA memory for TX descriptors in non-cacheable space. */ 433 err = bus_dmamem_alloc(sc->desc_dma_tag, 434 (void **)&sc->txring, 435 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 436 &sc->txring_dma_map); 437 if (err) 438 return (err); 439 440 /* Load TX descriptor DMA memory. */ 441 err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map, 442 (void *)sc->txring, 443 CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc), 444 cgem_getaddr, &sc->txring_physaddr, 445 BUS_DMA_NOWAIT); 446 if (err) 447 return (err); 448 449 /* Initialize TX descriptor ring. */ 450 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { 451 sc->txring[i].addr = 0; 452 sc->txring[i].ctl = CGEM_TXDESC_USED; 453 sc->txring_m[i] = NULL; 454 err = bus_dmamap_create(sc->mbuf_dma_tag, 0, 455 &sc->txring_m_dmamap[i]); 456 if (err) 457 return (err); 458 } 459 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; 460 461 sc->txring_hd_ptr = 0; 462 sc->txring_tl_ptr = 0; 463 sc->txring_queued = 0; 464 465 return (0); 466 } 467 468 /* Fill receive descriptor ring with mbufs. */ 469 static void 470 cgem_fill_rqueue(struct cgem_softc *sc) 471 { 472 struct mbuf *m = NULL; 473 bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; 474 int nsegs; 475 476 CGEM_ASSERT_LOCKED(sc); 477 478 while (sc->rxring_queued < sc->rxbufs) { 479 /* Get a cluster mbuf. */ 480 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 481 if (m == NULL) 482 break; 483 484 m->m_len = MCLBYTES; 485 m->m_pkthdr.len = MCLBYTES; 486 m->m_pkthdr.rcvif = sc->ifp; 487 488 /* Load map and plug in physical address. */ 489 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 490 sc->rxring_m_dmamap[sc->rxring_hd_ptr], m, 491 segs, &nsegs, BUS_DMA_NOWAIT)) { 492 sc->rxdmamapfails++; 493 m_free(m); 494 break; 495 } 496 sc->rxring_m[sc->rxring_hd_ptr] = m; 497 498 /* Sync cache with receive buffer. */ 499 bus_dmamap_sync(sc->mbuf_dma_tag, 500 sc->rxring_m_dmamap[sc->rxring_hd_ptr], 501 BUS_DMASYNC_PREREAD); 502 503 /* Write rx descriptor and increment head pointer. */ 504 sc->rxring[sc->rxring_hd_ptr].ctl = 0; 505 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { 506 sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | 507 CGEM_RXDESC_WRAP; 508 sc->rxring_hd_ptr = 0; 509 } else 510 sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr; 511 512 sc->rxring_queued++; 513 } 514 } 515 516 /* Pull received packets off of receive descriptor ring. */ 517 static void 518 cgem_recv(struct cgem_softc *sc) 519 { 520 struct ifnet *ifp = sc->ifp; 521 struct mbuf *m, *m_hd, **m_tl; 522 uint32_t ctl; 523 524 CGEM_ASSERT_LOCKED(sc); 525 526 /* Pick up all packets in which the OWN bit is set. */ 527 m_hd = NULL; 528 m_tl = &m_hd; 529 while (sc->rxring_queued > 0 && 530 (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { 531 532 ctl = sc->rxring[sc->rxring_tl_ptr].ctl; 533 534 /* Grab filled mbuf. */ 535 m = sc->rxring_m[sc->rxring_tl_ptr]; 536 sc->rxring_m[sc->rxring_tl_ptr] = NULL; 537 538 /* Sync cache with receive buffer. */ 539 bus_dmamap_sync(sc->mbuf_dma_tag, 540 sc->rxring_m_dmamap[sc->rxring_tl_ptr], 541 BUS_DMASYNC_POSTREAD); 542 543 /* Unload dmamap. */ 544 bus_dmamap_unload(sc->mbuf_dma_tag, 545 sc->rxring_m_dmamap[sc->rxring_tl_ptr]); 546 547 /* Increment tail pointer. */ 548 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS) 549 sc->rxring_tl_ptr = 0; 550 sc->rxring_queued--; 551 552 /* Check FCS and make sure entire packet landed in one mbuf 553 * cluster (which is much bigger than the largest ethernet 554 * packet). 555 */ 556 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 || 557 (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) != 558 (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) { 559 /* discard. */ 560 m_free(m); 561 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 562 continue; 563 } 564 565 /* Ready it to hand off to upper layers. */ 566 m->m_data += ETHER_ALIGN; 567 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK); 568 m->m_pkthdr.rcvif = ifp; 569 m->m_pkthdr.len = m->m_len; 570 571 /* Are we using hardware checksumming? Check the 572 * status in the receive descriptor. 573 */ 574 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 575 /* TCP or UDP checks out, IP checks out too. */ 576 if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 577 CGEM_RXDESC_CKSUM_STAT_TCP_GOOD || 578 (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 579 CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) { 580 m->m_pkthdr.csum_flags |= 581 CSUM_IP_CHECKED | CSUM_IP_VALID | 582 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 583 m->m_pkthdr.csum_data = 0xffff; 584 } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 585 CGEM_RXDESC_CKSUM_STAT_IP_GOOD) { 586 /* Only IP checks out. */ 587 m->m_pkthdr.csum_flags |= 588 CSUM_IP_CHECKED | CSUM_IP_VALID; 589 m->m_pkthdr.csum_data = 0xffff; 590 } 591 } 592 593 /* Queue it up for delivery below. */ 594 *m_tl = m; 595 m_tl = &m->m_next; 596 } 597 598 /* Replenish receive buffers. */ 599 cgem_fill_rqueue(sc); 600 601 /* Unlock and send up packets. */ 602 CGEM_UNLOCK(sc); 603 while (m_hd != NULL) { 604 m = m_hd; 605 m_hd = m_hd->m_next; 606 m->m_next = NULL; 607 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 608 (*ifp->if_input)(ifp, m); 609 } 610 CGEM_LOCK(sc); 611 } 612 613 /* Find completed transmits and free their mbufs. */ 614 static void 615 cgem_clean_tx(struct cgem_softc *sc) 616 { 617 struct mbuf *m; 618 uint32_t ctl; 619 620 CGEM_ASSERT_LOCKED(sc); 621 622 /* free up finished transmits. */ 623 while (sc->txring_queued > 0 && 624 ((ctl = sc->txring[sc->txring_tl_ptr].ctl) & 625 CGEM_TXDESC_USED) != 0) { 626 627 /* Sync cache. nop? */ 628 bus_dmamap_sync(sc->mbuf_dma_tag, 629 sc->txring_m_dmamap[sc->txring_tl_ptr], 630 BUS_DMASYNC_POSTWRITE); 631 632 /* Unload DMA map. */ 633 bus_dmamap_unload(sc->mbuf_dma_tag, 634 sc->txring_m_dmamap[sc->txring_tl_ptr]); 635 636 /* Free up the mbuf. */ 637 m = sc->txring_m[sc->txring_tl_ptr]; 638 sc->txring_m[sc->txring_tl_ptr] = NULL; 639 m_freem(m); 640 641 /* Check the status. */ 642 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { 643 /* Serious bus error. log to console. */ 644 device_printf(sc->dev, "cgem_clean_tx: Whoa! " 645 "AHB error, addr=0x%x\n", 646 sc->txring[sc->txring_tl_ptr].addr); 647 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR | 648 CGEM_TXDESC_LATE_COLL)) != 0) { 649 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); 650 } else 651 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); 652 653 /* If the packet spanned more than one tx descriptor, 654 * skip descriptors until we find the end so that only 655 * start-of-frame descriptors are processed. 656 */ 657 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { 658 if ((ctl & CGEM_TXDESC_WRAP) != 0) 659 sc->txring_tl_ptr = 0; 660 else 661 sc->txring_tl_ptr++; 662 sc->txring_queued--; 663 664 ctl = sc->txring[sc->txring_tl_ptr].ctl; 665 666 sc->txring[sc->txring_tl_ptr].ctl = 667 ctl | CGEM_TXDESC_USED; 668 } 669 670 /* Next descriptor. */ 671 if ((ctl & CGEM_TXDESC_WRAP) != 0) 672 sc->txring_tl_ptr = 0; 673 else 674 sc->txring_tl_ptr++; 675 sc->txring_queued--; 676 677 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 678 } 679 } 680 681 /* Start transmits. */ 682 static void 683 cgem_start_locked(struct ifnet *ifp) 684 { 685 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; 686 struct mbuf *m; 687 bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; 688 uint32_t ctl; 689 int i, nsegs, wrap, err; 690 691 CGEM_ASSERT_LOCKED(sc); 692 693 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) 694 return; 695 696 for (;;) { 697 /* Check that there is room in the descriptor ring. */ 698 if (sc->txring_queued >= 699 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { 700 701 /* Try to make room. */ 702 cgem_clean_tx(sc); 703 704 /* Still no room? */ 705 if (sc->txring_queued >= 706 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { 707 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 708 sc->txfull++; 709 break; 710 } 711 } 712 713 /* Grab next transmit packet. */ 714 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 715 if (m == NULL) 716 break; 717 718 /* Load DMA map. */ 719 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 720 sc->txring_m_dmamap[sc->txring_hd_ptr], 721 m, segs, &nsegs, BUS_DMA_NOWAIT); 722 if (err == EFBIG) { 723 /* Too many segments! defrag and try again. */ 724 struct mbuf *m2 = m_defrag(m, M_NOWAIT); 725 726 if (m2 == NULL) { 727 sc->txdefragfails++; 728 m_freem(m); 729 continue; 730 } 731 m = m2; 732 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 733 sc->txring_m_dmamap[sc->txring_hd_ptr], 734 m, segs, &nsegs, BUS_DMA_NOWAIT); 735 sc->txdefrags++; 736 } 737 if (err) { 738 /* Give up. */ 739 m_freem(m); 740 sc->txdmamapfails++; 741 continue; 742 } 743 sc->txring_m[sc->txring_hd_ptr] = m; 744 745 /* Sync tx buffer with cache. */ 746 bus_dmamap_sync(sc->mbuf_dma_tag, 747 sc->txring_m_dmamap[sc->txring_hd_ptr], 748 BUS_DMASYNC_PREWRITE); 749 750 /* Set wrap flag if next packet might run off end of ring. */ 751 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >= 752 CGEM_NUM_TX_DESCS; 753 754 /* Fill in the TX descriptors back to front so that USED 755 * bit in first descriptor is cleared last. 756 */ 757 for (i = nsegs - 1; i >= 0; i--) { 758 /* Descriptor address. */ 759 sc->txring[sc->txring_hd_ptr + i].addr = 760 segs[i].ds_addr; 761 762 /* Descriptor control word. */ 763 ctl = segs[i].ds_len; 764 if (i == nsegs - 1) { 765 ctl |= CGEM_TXDESC_LAST_BUF; 766 if (wrap) 767 ctl |= CGEM_TXDESC_WRAP; 768 } 769 sc->txring[sc->txring_hd_ptr + i].ctl = ctl; 770 771 if (i != 0) 772 sc->txring_m[sc->txring_hd_ptr + i] = NULL; 773 } 774 775 if (wrap) 776 sc->txring_hd_ptr = 0; 777 else 778 sc->txring_hd_ptr += nsegs; 779 sc->txring_queued += nsegs; 780 781 /* Kick the transmitter. */ 782 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | 783 CGEM_NET_CTRL_START_TX); 784 785 /* If there is a BPF listener, bounce a copy to to him. */ 786 ETHER_BPF_MTAP(ifp, m); 787 } 788 } 789 790 static void 791 cgem_start(struct ifnet *ifp) 792 { 793 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; 794 795 CGEM_LOCK(sc); 796 cgem_start_locked(ifp); 797 CGEM_UNLOCK(sc); 798 } 799 800 static void 801 cgem_poll_hw_stats(struct cgem_softc *sc) 802 { 803 uint32_t n; 804 805 CGEM_ASSERT_LOCKED(sc); 806 807 sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT); 808 sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32; 809 810 sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX); 811 sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX); 812 sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX); 813 sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX); 814 sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX); 815 sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX); 816 sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX); 817 sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX); 818 sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX); 819 sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX); 820 sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS); 821 822 n = RD4(sc, CGEM_SINGLE_COLL_FRAMES); 823 sc->stats.tx_single_collisn += n; 824 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 825 n = RD4(sc, CGEM_MULTI_COLL_FRAMES); 826 sc->stats.tx_multi_collisn += n; 827 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 828 n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES); 829 sc->stats.tx_excsv_collisn += n; 830 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 831 n = RD4(sc, CGEM_LATE_COLL); 832 sc->stats.tx_late_collisn += n; 833 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 834 835 sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES); 836 sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS); 837 838 sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT); 839 sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32; 840 841 sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX); 842 sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX); 843 sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX); 844 sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX); 845 sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX); 846 sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX); 847 sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX); 848 sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX); 849 sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX); 850 sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX); 851 sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX); 852 sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX); 853 sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX); 854 sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS); 855 sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS); 856 sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS); 857 sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS); 858 sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS); 859 sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS); 860 sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS); 861 sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS); 862 sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS); 863 } 864 865 static void 866 cgem_tick(void *arg) 867 { 868 struct cgem_softc *sc = (struct cgem_softc *)arg; 869 struct mii_data *mii; 870 871 CGEM_ASSERT_LOCKED(sc); 872 873 /* Poll the phy. */ 874 if (sc->miibus != NULL) { 875 mii = device_get_softc(sc->miibus); 876 mii_tick(mii); 877 } 878 879 /* Poll statistics registers. */ 880 cgem_poll_hw_stats(sc); 881 882 /* Check for receiver hang. */ 883 if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) { 884 /* 885 * Reset receiver logic by toggling RX_EN bit. 1usec 886 * delay is necessary especially when operating at 100mbps 887 * and 10mbps speeds. 888 */ 889 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow & 890 ~CGEM_NET_CTRL_RX_EN); 891 DELAY(1); 892 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 893 } 894 sc->rx_frames_prev = sc->stats.rx_frames; 895 896 /* Next callout in one second. */ 897 callout_reset(&sc->tick_ch, hz, cgem_tick, sc); 898 } 899 900 /* Interrupt handler. */ 901 static void 902 cgem_intr(void *arg) 903 { 904 struct cgem_softc *sc = (struct cgem_softc *)arg; 905 uint32_t istatus; 906 907 CGEM_LOCK(sc); 908 909 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 910 CGEM_UNLOCK(sc); 911 return; 912 } 913 914 /* Read interrupt status and immediately clear the bits. */ 915 istatus = RD4(sc, CGEM_INTR_STAT); 916 WR4(sc, CGEM_INTR_STAT, istatus); 917 918 /* Packets received. */ 919 if ((istatus & CGEM_INTR_RX_COMPLETE) != 0) 920 cgem_recv(sc); 921 922 /* Free up any completed transmit buffers. */ 923 cgem_clean_tx(sc); 924 925 /* Hresp not ok. Something is very bad with DMA. Try to clear. */ 926 if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) { 927 device_printf(sc->dev, "cgem_intr: hresp not okay! " 928 "rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT)); 929 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK); 930 } 931 932 /* Receiver overrun. */ 933 if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) { 934 /* Clear status bit. */ 935 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN); 936 sc->rxoverruns++; 937 } 938 939 /* Receiver ran out of bufs. */ 940 if ((istatus & CGEM_INTR_RX_USED_READ) != 0) { 941 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | 942 CGEM_NET_CTRL_FLUSH_DPRAM_PKT); 943 cgem_fill_rqueue(sc); 944 sc->rxnobufs++; 945 } 946 947 /* Restart transmitter if needed. */ 948 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd)) 949 cgem_start_locked(sc->ifp); 950 951 CGEM_UNLOCK(sc); 952 } 953 954 /* Reset hardware. */ 955 static void 956 cgem_reset(struct cgem_softc *sc) 957 { 958 959 CGEM_ASSERT_LOCKED(sc); 960 961 WR4(sc, CGEM_NET_CTRL, 0); 962 WR4(sc, CGEM_NET_CFG, 0); 963 WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); 964 WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); 965 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); 966 WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); 967 WR4(sc, CGEM_HASH_BOT, 0); 968 WR4(sc, CGEM_HASH_TOP, 0); 969 WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ 970 WR4(sc, CGEM_RX_QBAR, 0); 971 972 /* Get management port running even if interface is down. */ 973 WR4(sc, CGEM_NET_CFG, 974 CGEM_NET_CFG_DBUS_WIDTH_32 | 975 CGEM_NET_CFG_MDC_CLK_DIV_64); 976 977 sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; 978 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 979 } 980 981 /* Bring up the hardware. */ 982 static void 983 cgem_config(struct cgem_softc *sc) 984 { 985 uint32_t net_cfg; 986 uint32_t dma_cfg; 987 u_char *eaddr = IF_LLADDR(sc->ifp); 988 989 CGEM_ASSERT_LOCKED(sc); 990 991 /* Program Net Config Register. */ 992 net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 | 993 CGEM_NET_CFG_MDC_CLK_DIV_64 | 994 CGEM_NET_CFG_FCS_REMOVE | 995 CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | 996 CGEM_NET_CFG_GIGE_EN | 997 CGEM_NET_CFG_1536RXEN | 998 CGEM_NET_CFG_FULL_DUPLEX | 999 CGEM_NET_CFG_SPEED100; 1000 1001 /* Enable receive checksum offloading? */ 1002 if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0) 1003 net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; 1004 1005 WR4(sc, CGEM_NET_CFG, net_cfg); 1006 1007 /* Program DMA Config Register. */ 1008 dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | 1009 CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | 1010 CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | 1011 CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | 1012 CGEM_DMA_CFG_DISC_WHEN_NO_AHB; 1013 1014 /* Enable transmit checksum offloading? */ 1015 if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0) 1016 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; 1017 1018 WR4(sc, CGEM_DMA_CFG, dma_cfg); 1019 1020 /* Write the rx and tx descriptor ring addresses to the QBAR regs. */ 1021 WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr); 1022 WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr); 1023 1024 /* Enable rx and tx. */ 1025 sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); 1026 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 1027 1028 /* Set receive address in case it changed. */ 1029 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | 1030 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); 1031 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); 1032 1033 /* Set up interrupts. */ 1034 WR4(sc, CGEM_INTR_EN, 1035 CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN | 1036 CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ | 1037 CGEM_INTR_HRESP_NOT_OK); 1038 } 1039 1040 /* Turn on interface and load up receive ring with buffers. */ 1041 static void 1042 cgem_init_locked(struct cgem_softc *sc) 1043 { 1044 struct mii_data *mii; 1045 1046 CGEM_ASSERT_LOCKED(sc); 1047 1048 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1049 return; 1050 1051 cgem_config(sc); 1052 cgem_fill_rqueue(sc); 1053 1054 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; 1055 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1056 1057 mii = device_get_softc(sc->miibus); 1058 mii_mediachg(mii); 1059 1060 callout_reset(&sc->tick_ch, hz, cgem_tick, sc); 1061 } 1062 1063 static void 1064 cgem_init(void *arg) 1065 { 1066 struct cgem_softc *sc = (struct cgem_softc *)arg; 1067 1068 CGEM_LOCK(sc); 1069 cgem_init_locked(sc); 1070 CGEM_UNLOCK(sc); 1071 } 1072 1073 /* Turn off interface. Free up any buffers in transmit or receive queues. */ 1074 static void 1075 cgem_stop(struct cgem_softc *sc) 1076 { 1077 int i; 1078 1079 CGEM_ASSERT_LOCKED(sc); 1080 1081 callout_stop(&sc->tick_ch); 1082 1083 /* Shut down hardware. */ 1084 cgem_reset(sc); 1085 1086 /* Clear out transmit queue. */ 1087 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { 1088 sc->txring[i].ctl = CGEM_TXDESC_USED; 1089 sc->txring[i].addr = 0; 1090 if (sc->txring_m[i]) { 1091 bus_dmamap_unload(sc->mbuf_dma_tag, 1092 sc->txring_m_dmamap[i]); 1093 m_freem(sc->txring_m[i]); 1094 sc->txring_m[i] = NULL; 1095 } 1096 } 1097 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; 1098 1099 sc->txring_hd_ptr = 0; 1100 sc->txring_tl_ptr = 0; 1101 sc->txring_queued = 0; 1102 1103 /* Clear out receive queue. */ 1104 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { 1105 sc->rxring[i].addr = CGEM_RXDESC_OWN; 1106 sc->rxring[i].ctl = 0; 1107 if (sc->rxring_m[i]) { 1108 /* Unload dmamap. */ 1109 bus_dmamap_unload(sc->mbuf_dma_tag, 1110 sc->rxring_m_dmamap[sc->rxring_tl_ptr]); 1111 1112 m_freem(sc->rxring_m[i]); 1113 sc->rxring_m[i] = NULL; 1114 } 1115 } 1116 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; 1117 1118 sc->rxring_hd_ptr = 0; 1119 sc->rxring_tl_ptr = 0; 1120 sc->rxring_queued = 0; 1121 1122 /* Force next statchg or linkchg to program net config register. */ 1123 sc->mii_media_active = 0; 1124 } 1125 1126 1127 static int 1128 cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1129 { 1130 struct cgem_softc *sc = ifp->if_softc; 1131 struct ifreq *ifr = (struct ifreq *)data; 1132 struct mii_data *mii; 1133 int error = 0, mask; 1134 1135 switch (cmd) { 1136 case SIOCSIFFLAGS: 1137 CGEM_LOCK(sc); 1138 if ((ifp->if_flags & IFF_UP) != 0) { 1139 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1140 if (((ifp->if_flags ^ sc->if_old_flags) & 1141 (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1142 cgem_rx_filter(sc); 1143 } 1144 } else { 1145 cgem_init_locked(sc); 1146 } 1147 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1148 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1149 cgem_stop(sc); 1150 } 1151 sc->if_old_flags = ifp->if_flags; 1152 CGEM_UNLOCK(sc); 1153 break; 1154 1155 case SIOCADDMULTI: 1156 case SIOCDELMULTI: 1157 /* Set up multi-cast filters. */ 1158 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1159 CGEM_LOCK(sc); 1160 cgem_rx_filter(sc); 1161 CGEM_UNLOCK(sc); 1162 } 1163 break; 1164 1165 case SIOCSIFMEDIA: 1166 case SIOCGIFMEDIA: 1167 mii = device_get_softc(sc->miibus); 1168 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1169 break; 1170 1171 case SIOCSIFCAP: 1172 CGEM_LOCK(sc); 1173 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 1174 1175 if ((mask & IFCAP_TXCSUM) != 0) { 1176 if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) { 1177 /* Turn on TX checksumming. */ 1178 ifp->if_capenable |= (IFCAP_TXCSUM | 1179 IFCAP_TXCSUM_IPV6); 1180 ifp->if_hwassist |= CGEM_CKSUM_ASSIST; 1181 1182 WR4(sc, CGEM_DMA_CFG, 1183 RD4(sc, CGEM_DMA_CFG) | 1184 CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); 1185 } else { 1186 /* Turn off TX checksumming. */ 1187 ifp->if_capenable &= ~(IFCAP_TXCSUM | 1188 IFCAP_TXCSUM_IPV6); 1189 ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST; 1190 1191 WR4(sc, CGEM_DMA_CFG, 1192 RD4(sc, CGEM_DMA_CFG) & 1193 ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); 1194 } 1195 } 1196 if ((mask & IFCAP_RXCSUM) != 0) { 1197 if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { 1198 /* Turn on RX checksumming. */ 1199 ifp->if_capenable |= (IFCAP_RXCSUM | 1200 IFCAP_RXCSUM_IPV6); 1201 WR4(sc, CGEM_NET_CFG, 1202 RD4(sc, CGEM_NET_CFG) | 1203 CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); 1204 } else { 1205 /* Turn off RX checksumming. */ 1206 ifp->if_capenable &= ~(IFCAP_RXCSUM | 1207 IFCAP_RXCSUM_IPV6); 1208 WR4(sc, CGEM_NET_CFG, 1209 RD4(sc, CGEM_NET_CFG) & 1210 ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); 1211 } 1212 } 1213 if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == 1214 (IFCAP_RXCSUM | IFCAP_TXCSUM)) 1215 ifp->if_capenable |= IFCAP_VLAN_HWCSUM; 1216 else 1217 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 1218 1219 CGEM_UNLOCK(sc); 1220 break; 1221 default: 1222 error = ether_ioctl(ifp, cmd, data); 1223 break; 1224 } 1225 1226 return (error); 1227 } 1228 1229 /* MII bus support routines. 1230 */ 1231 static void 1232 cgem_child_detached(device_t dev, device_t child) 1233 { 1234 struct cgem_softc *sc = device_get_softc(dev); 1235 1236 if (child == sc->miibus) 1237 sc->miibus = NULL; 1238 } 1239 1240 static int 1241 cgem_ifmedia_upd(struct ifnet *ifp) 1242 { 1243 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; 1244 struct mii_data *mii; 1245 struct mii_softc *miisc; 1246 int error = 0; 1247 1248 mii = device_get_softc(sc->miibus); 1249 CGEM_LOCK(sc); 1250 if ((ifp->if_flags & IFF_UP) != 0) { 1251 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1252 PHY_RESET(miisc); 1253 error = mii_mediachg(mii); 1254 } 1255 CGEM_UNLOCK(sc); 1256 1257 return (error); 1258 } 1259 1260 static void 1261 cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1262 { 1263 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; 1264 struct mii_data *mii; 1265 1266 mii = device_get_softc(sc->miibus); 1267 CGEM_LOCK(sc); 1268 mii_pollstat(mii); 1269 ifmr->ifm_active = mii->mii_media_active; 1270 ifmr->ifm_status = mii->mii_media_status; 1271 CGEM_UNLOCK(sc); 1272 } 1273 1274 static int 1275 cgem_miibus_readreg(device_t dev, int phy, int reg) 1276 { 1277 struct cgem_softc *sc = device_get_softc(dev); 1278 int tries, val; 1279 1280 WR4(sc, CGEM_PHY_MAINT, 1281 CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | 1282 CGEM_PHY_MAINT_OP_READ | 1283 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | 1284 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT)); 1285 1286 /* Wait for completion. */ 1287 tries=0; 1288 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { 1289 DELAY(5); 1290 if (++tries > 200) { 1291 device_printf(dev, "phy read timeout: %d\n", reg); 1292 return (-1); 1293 } 1294 } 1295 1296 val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK; 1297 1298 if (reg == MII_EXTSR) 1299 /* 1300 * MAC does not support half-duplex at gig speeds. 1301 * Let mii(4) exclude the capability. 1302 */ 1303 val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX); 1304 1305 return (val); 1306 } 1307 1308 static int 1309 cgem_miibus_writereg(device_t dev, int phy, int reg, int data) 1310 { 1311 struct cgem_softc *sc = device_get_softc(dev); 1312 int tries; 1313 1314 WR4(sc, CGEM_PHY_MAINT, 1315 CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | 1316 CGEM_PHY_MAINT_OP_WRITE | 1317 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | 1318 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) | 1319 (data & CGEM_PHY_MAINT_DATA_MASK)); 1320 1321 /* Wait for completion. */ 1322 tries = 0; 1323 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { 1324 DELAY(5); 1325 if (++tries > 200) { 1326 device_printf(dev, "phy write timeout: %d\n", reg); 1327 return (-1); 1328 } 1329 } 1330 1331 return (0); 1332 } 1333 1334 static void 1335 cgem_miibus_statchg(device_t dev) 1336 { 1337 struct cgem_softc *sc = device_get_softc(dev); 1338 struct mii_data *mii = device_get_softc(sc->miibus); 1339 1340 CGEM_ASSERT_LOCKED(sc); 1341 1342 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1343 (IFM_ACTIVE | IFM_AVALID) && 1344 sc->mii_media_active != mii->mii_media_active) 1345 cgem_mediachange(sc, mii); 1346 } 1347 1348 static void 1349 cgem_miibus_linkchg(device_t dev) 1350 { 1351 struct cgem_softc *sc = device_get_softc(dev); 1352 struct mii_data *mii = device_get_softc(sc->miibus); 1353 1354 CGEM_ASSERT_LOCKED(sc); 1355 1356 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1357 (IFM_ACTIVE | IFM_AVALID) && 1358 sc->mii_media_active != mii->mii_media_active) 1359 cgem_mediachange(sc, mii); 1360 } 1361 1362 /* 1363 * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to 1364 * provide a function to set the cgem's reference clock. 1365 */ 1366 static int __used 1367 cgem_default_set_ref_clk(int unit, int frequency) 1368 { 1369 1370 return 0; 1371 } 1372 __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk); 1373 1374 /* Call to set reference clock and network config bits according to media. */ 1375 static void 1376 cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) 1377 { 1378 uint32_t net_cfg; 1379 int ref_clk_freq; 1380 1381 CGEM_ASSERT_LOCKED(sc); 1382 1383 /* Update hardware to reflect media. */ 1384 net_cfg = RD4(sc, CGEM_NET_CFG); 1385 net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | 1386 CGEM_NET_CFG_FULL_DUPLEX); 1387 1388 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1389 case IFM_1000_T: 1390 net_cfg |= (CGEM_NET_CFG_SPEED100 | 1391 CGEM_NET_CFG_GIGE_EN); 1392 ref_clk_freq = 125000000; 1393 break; 1394 case IFM_100_TX: 1395 net_cfg |= CGEM_NET_CFG_SPEED100; 1396 ref_clk_freq = 25000000; 1397 break; 1398 default: 1399 ref_clk_freq = 2500000; 1400 } 1401 1402 if ((mii->mii_media_active & IFM_FDX) != 0) 1403 net_cfg |= CGEM_NET_CFG_FULL_DUPLEX; 1404 1405 WR4(sc, CGEM_NET_CFG, net_cfg); 1406 1407 /* Set the reference clock if necessary. */ 1408 if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq)) 1409 device_printf(sc->dev, "cgem_mediachange: " 1410 "could not set ref clk%d to %d.\n", 1411 sc->ref_clk_num, ref_clk_freq); 1412 1413 sc->mii_media_active = mii->mii_media_active; 1414 } 1415 1416 static void 1417 cgem_add_sysctls(device_t dev) 1418 { 1419 struct cgem_softc *sc = device_get_softc(dev); 1420 struct sysctl_ctx_list *ctx; 1421 struct sysctl_oid_list *child; 1422 struct sysctl_oid *tree; 1423 1424 ctx = device_get_sysctl_ctx(dev); 1425 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 1426 1427 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW, 1428 &sc->rxbufs, 0, 1429 "Number receive buffers to provide"); 1430 1431 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW, 1432 &sc->rxhangwar, 0, 1433 "Enable receive hang work-around"); 1434 1435 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD, 1436 &sc->rxoverruns, 0, 1437 "Receive overrun events"); 1438 1439 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD, 1440 &sc->rxnobufs, 0, 1441 "Receive buf queue empty events"); 1442 1443 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD, 1444 &sc->rxdmamapfails, 0, 1445 "Receive DMA map failures"); 1446 1447 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD, 1448 &sc->txfull, 0, 1449 "Transmit ring full events"); 1450 1451 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD, 1452 &sc->txdmamapfails, 0, 1453 "Transmit DMA map failures"); 1454 1455 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD, 1456 &sc->txdefrags, 0, 1457 "Transmit m_defrag() calls"); 1458 1459 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD, 1460 &sc->txdefragfails, 0, 1461 "Transmit m_defrag() failures"); 1462 1463 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 1464 NULL, "GEM statistics"); 1465 child = SYSCTL_CHILDREN(tree); 1466 1467 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD, 1468 &sc->stats.tx_bytes, "Total bytes transmitted"); 1469 1470 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD, 1471 &sc->stats.tx_frames, 0, "Total frames transmitted"); 1472 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD, 1473 &sc->stats.tx_frames_bcast, 0, 1474 "Number broadcast frames transmitted"); 1475 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD, 1476 &sc->stats.tx_frames_multi, 0, 1477 "Number multicast frames transmitted"); 1478 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause", 1479 CTLFLAG_RD, &sc->stats.tx_frames_pause, 0, 1480 "Number pause frames transmitted"); 1481 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD, 1482 &sc->stats.tx_frames_64b, 0, 1483 "Number frames transmitted of size 64 bytes or less"); 1484 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD, 1485 &sc->stats.tx_frames_65to127b, 0, 1486 "Number frames transmitted of size 65-127 bytes"); 1487 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b", 1488 CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0, 1489 "Number frames transmitted of size 128-255 bytes"); 1490 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b", 1491 CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0, 1492 "Number frames transmitted of size 256-511 bytes"); 1493 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b", 1494 CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0, 1495 "Number frames transmitted of size 512-1023 bytes"); 1496 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b", 1497 CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0, 1498 "Number frames transmitted of size 1024-1536 bytes"); 1499 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs", 1500 CTLFLAG_RD, &sc->stats.tx_under_runs, 0, 1501 "Number transmit under-run events"); 1502 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn", 1503 CTLFLAG_RD, &sc->stats.tx_single_collisn, 0, 1504 "Number single-collision transmit frames"); 1505 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn", 1506 CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0, 1507 "Number multi-collision transmit frames"); 1508 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn", 1509 CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0, 1510 "Number excessive collision transmit frames"); 1511 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn", 1512 CTLFLAG_RD, &sc->stats.tx_late_collisn, 0, 1513 "Number late-collision transmit frames"); 1514 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames", 1515 CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0, 1516 "Number deferred transmit frames"); 1517 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs", 1518 CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0, 1519 "Number carrier sense errors on transmit"); 1520 1521 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD, 1522 &sc->stats.rx_bytes, "Total bytes received"); 1523 1524 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD, 1525 &sc->stats.rx_frames, 0, "Total frames received"); 1526 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast", 1527 CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0, 1528 "Number broadcast frames received"); 1529 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi", 1530 CTLFLAG_RD, &sc->stats.rx_frames_multi, 0, 1531 "Number multicast frames received"); 1532 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause", 1533 CTLFLAG_RD, &sc->stats.rx_frames_pause, 0, 1534 "Number pause frames received"); 1535 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b", 1536 CTLFLAG_RD, &sc->stats.rx_frames_64b, 0, 1537 "Number frames received of size 64 bytes or less"); 1538 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b", 1539 CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0, 1540 "Number frames received of size 65-127 bytes"); 1541 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b", 1542 CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0, 1543 "Number frames received of size 128-255 bytes"); 1544 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b", 1545 CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0, 1546 "Number frames received of size 256-511 bytes"); 1547 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b", 1548 CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0, 1549 "Number frames received of size 512-1023 bytes"); 1550 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b", 1551 CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0, 1552 "Number frames received of size 1024-1536 bytes"); 1553 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize", 1554 CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0, 1555 "Number undersize frames received"); 1556 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize", 1557 CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0, 1558 "Number oversize frames received"); 1559 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber", 1560 CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0, 1561 "Number jabber frames received"); 1562 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs", 1563 CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0, 1564 "Number frames received with FCS errors"); 1565 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs", 1566 CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0, 1567 "Number frames received with length errors"); 1568 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs", 1569 CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0, 1570 "Number receive symbol errors"); 1571 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs", 1572 CTLFLAG_RD, &sc->stats.rx_align_errs, 0, 1573 "Number receive alignment errors"); 1574 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs", 1575 CTLFLAG_RD, &sc->stats.rx_resource_errs, 0, 1576 "Number frames received when no rx buffer available"); 1577 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs", 1578 CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0, 1579 "Number frames received but not copied due to " 1580 "receive overrun"); 1581 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs", 1582 CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0, 1583 "Number frames received with IP header checksum " 1584 "errors"); 1585 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs", 1586 CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0, 1587 "Number frames received with TCP checksum errors"); 1588 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs", 1589 CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0, 1590 "Number frames received with UDP checksum errors"); 1591 } 1592 1593 1594 static int 1595 cgem_probe(device_t dev) 1596 { 1597 1598 if (!ofw_bus_is_compatible(dev, "cadence,gem")) 1599 return (ENXIO); 1600 1601 device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface"); 1602 return (0); 1603 } 1604 1605 static int 1606 cgem_attach(device_t dev) 1607 { 1608 struct cgem_softc *sc = device_get_softc(dev); 1609 struct ifnet *ifp = NULL; 1610 phandle_t node; 1611 pcell_t cell; 1612 int rid, err; 1613 u_char eaddr[ETHER_ADDR_LEN]; 1614 1615 sc->dev = dev; 1616 CGEM_LOCK_INIT(sc); 1617 1618 /* Get reference clock number and base divider from fdt. */ 1619 node = ofw_bus_get_node(dev); 1620 sc->ref_clk_num = 0; 1621 if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0) 1622 sc->ref_clk_num = fdt32_to_cpu(cell); 1623 1624 /* Get memory resource. */ 1625 rid = 0; 1626 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1627 RF_ACTIVE); 1628 if (sc->mem_res == NULL) { 1629 device_printf(dev, "could not allocate memory resources.\n"); 1630 return (ENOMEM); 1631 } 1632 1633 /* Get IRQ resource. */ 1634 rid = 0; 1635 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1636 RF_ACTIVE); 1637 if (sc->irq_res == NULL) { 1638 device_printf(dev, "could not allocate interrupt resource.\n"); 1639 cgem_detach(dev); 1640 return (ENOMEM); 1641 } 1642 1643 /* Set up ifnet structure. */ 1644 ifp = sc->ifp = if_alloc(IFT_ETHER); 1645 if (ifp == NULL) { 1646 device_printf(dev, "could not allocate ifnet structure\n"); 1647 cgem_detach(dev); 1648 return (ENOMEM); 1649 } 1650 ifp->if_softc = sc; 1651 if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev)); 1652 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1653 ifp->if_start = cgem_start; 1654 ifp->if_ioctl = cgem_ioctl; 1655 ifp->if_init = cgem_init; 1656 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | 1657 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; 1658 /* Disable hardware checksumming by default. */ 1659 ifp->if_hwassist = 0; 1660 ifp->if_capenable = ifp->if_capabilities & 1661 ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM); 1662 ifp->if_snd.ifq_drv_maxlen = CGEM_NUM_TX_DESCS; 1663 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1664 IFQ_SET_READY(&ifp->if_snd); 1665 1666 sc->if_old_flags = ifp->if_flags; 1667 sc->rxbufs = DEFAULT_NUM_RX_BUFS; 1668 sc->rxhangwar = 1; 1669 1670 /* Reset hardware. */ 1671 CGEM_LOCK(sc); 1672 cgem_reset(sc); 1673 CGEM_UNLOCK(sc); 1674 1675 /* Attach phy to mii bus. */ 1676 err = mii_attach(dev, &sc->miibus, ifp, 1677 cgem_ifmedia_upd, cgem_ifmedia_sts, 1678 BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 1679 if (err) { 1680 device_printf(dev, "attaching PHYs failed\n"); 1681 cgem_detach(dev); 1682 return (err); 1683 } 1684 1685 /* Set up TX and RX descriptor area. */ 1686 err = cgem_setup_descs(sc); 1687 if (err) { 1688 device_printf(dev, "could not set up dma mem for descs.\n"); 1689 cgem_detach(dev); 1690 return (ENOMEM); 1691 } 1692 1693 /* Get a MAC address. */ 1694 cgem_get_mac(sc, eaddr); 1695 1696 /* Start ticks. */ 1697 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); 1698 1699 ether_ifattach(ifp, eaddr); 1700 1701 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE | 1702 INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand); 1703 if (err) { 1704 device_printf(dev, "could not set interrupt handler.\n"); 1705 ether_ifdetach(ifp); 1706 cgem_detach(dev); 1707 return (err); 1708 } 1709 1710 cgem_add_sysctls(dev); 1711 1712 return (0); 1713 } 1714 1715 static int 1716 cgem_detach(device_t dev) 1717 { 1718 struct cgem_softc *sc = device_get_softc(dev); 1719 int i; 1720 1721 if (sc == NULL) 1722 return (ENODEV); 1723 1724 if (device_is_attached(dev)) { 1725 CGEM_LOCK(sc); 1726 cgem_stop(sc); 1727 CGEM_UNLOCK(sc); 1728 callout_drain(&sc->tick_ch); 1729 sc->ifp->if_flags &= ~IFF_UP; 1730 ether_ifdetach(sc->ifp); 1731 } 1732 1733 if (sc->miibus != NULL) { 1734 device_delete_child(dev, sc->miibus); 1735 sc->miibus = NULL; 1736 } 1737 1738 /* Release resources. */ 1739 if (sc->mem_res != NULL) { 1740 bus_release_resource(dev, SYS_RES_MEMORY, 1741 rman_get_rid(sc->mem_res), sc->mem_res); 1742 sc->mem_res = NULL; 1743 } 1744 if (sc->irq_res != NULL) { 1745 if (sc->intrhand) 1746 bus_teardown_intr(dev, sc->irq_res, sc->intrhand); 1747 bus_release_resource(dev, SYS_RES_IRQ, 1748 rman_get_rid(sc->irq_res), sc->irq_res); 1749 sc->irq_res = NULL; 1750 } 1751 1752 /* Release DMA resources. */ 1753 if (sc->rxring != NULL) { 1754 if (sc->rxring_physaddr != 0) { 1755 bus_dmamap_unload(sc->desc_dma_tag, sc->rxring_dma_map); 1756 sc->rxring_physaddr = 0; 1757 } 1758 bus_dmamem_free(sc->desc_dma_tag, sc->rxring, 1759 sc->rxring_dma_map); 1760 sc->rxring = NULL; 1761 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) 1762 if (sc->rxring_m_dmamap[i] != NULL) { 1763 bus_dmamap_destroy(sc->mbuf_dma_tag, 1764 sc->rxring_m_dmamap[i]); 1765 sc->rxring_m_dmamap[i] = NULL; 1766 } 1767 } 1768 if (sc->txring != NULL) { 1769 if (sc->txring_physaddr != 0) { 1770 bus_dmamap_unload(sc->desc_dma_tag, sc->txring_dma_map); 1771 sc->txring_physaddr = 0; 1772 } 1773 bus_dmamem_free(sc->desc_dma_tag, sc->txring, 1774 sc->txring_dma_map); 1775 sc->txring = NULL; 1776 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) 1777 if (sc->txring_m_dmamap[i] != NULL) { 1778 bus_dmamap_destroy(sc->mbuf_dma_tag, 1779 sc->txring_m_dmamap[i]); 1780 sc->txring_m_dmamap[i] = NULL; 1781 } 1782 } 1783 if (sc->desc_dma_tag != NULL) { 1784 bus_dma_tag_destroy(sc->desc_dma_tag); 1785 sc->desc_dma_tag = NULL; 1786 } 1787 if (sc->mbuf_dma_tag != NULL) { 1788 bus_dma_tag_destroy(sc->mbuf_dma_tag); 1789 sc->mbuf_dma_tag = NULL; 1790 } 1791 1792 bus_generic_detach(dev); 1793 1794 CGEM_LOCK_DESTROY(sc); 1795 1796 return (0); 1797 } 1798 1799 static device_method_t cgem_methods[] = { 1800 /* Device interface */ 1801 DEVMETHOD(device_probe, cgem_probe), 1802 DEVMETHOD(device_attach, cgem_attach), 1803 DEVMETHOD(device_detach, cgem_detach), 1804 1805 /* Bus interface */ 1806 DEVMETHOD(bus_child_detached, cgem_child_detached), 1807 1808 /* MII interface */ 1809 DEVMETHOD(miibus_readreg, cgem_miibus_readreg), 1810 DEVMETHOD(miibus_writereg, cgem_miibus_writereg), 1811 DEVMETHOD(miibus_statchg, cgem_miibus_statchg), 1812 DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg), 1813 1814 DEVMETHOD_END 1815 }; 1816 1817 static driver_t cgem_driver = { 1818 "cgem", 1819 cgem_methods, 1820 sizeof(struct cgem_softc), 1821 }; 1822 1823 DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL); 1824 DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL); 1825 MODULE_DEPEND(cgem, miibus, 1, 1, 1); 1826 MODULE_DEPEND(cgem, ether, 1, 1, 1); 1827