1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * A network interface driver for Cadence GEM Gigabit Ethernet 31 * interface such as the one used in Xilinx Zynq-7000 SoC. 32 * 33 * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. 34 * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 35 * and register definitions are in appendix B.18. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/bus.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/module.h> 48 #include <sys/rman.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include <machine/bus.h> 54 55 #include <net/ethernet.h> 56 #include <net/if.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_media.h> 60 #include <net/if_mib.h> 61 #include <net/if_types.h> 62 63 #ifdef INET 64 #include <netinet/in.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/in_var.h> 67 #include <netinet/ip.h> 68 #endif 69 70 #include <net/bpf.h> 71 #include <net/bpfdesc.h> 72 73 #include <dev/fdt/fdt_common.h> 74 #include <dev/ofw/ofw_bus.h> 75 #include <dev/ofw/ofw_bus_subr.h> 76 77 #include <dev/mii/mii.h> 78 #include <dev/mii/miivar.h> 79 80 #include <dev/extres/clk/clk.h> 81 82 #if BUS_SPACE_MAXADDR > BUS_SPACE_MAXADDR_32BIT 83 #define CGEM64 84 #endif 85 86 #include <dev/cadence/if_cgem_hw.h> 87 88 #include "miibus_if.h" 89 90 #define IF_CGEM_NAME "cgem" 91 92 #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ 93 #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ 94 95 /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ 96 #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ 97 98 #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ 99 100 #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ 101 CSUM_TCP_IPV6 | CSUM_UDP_IPV6) 102 103 #define HWTYPE_GENERIC_GEM 1 104 #define HWTYPE_ZYNQ 2 105 #define HWTYPE_ZYNQMP 3 106 #define HWTYPE_SIFIVE 4 107 108 static struct ofw_compat_data compat_data[] = { 109 { "cdns,zynq-gem", HWTYPE_ZYNQ }, 110 { "cdns,zynqmp-gem", HWTYPE_ZYNQMP }, 111 { "sifive,fu540-c000-gem", HWTYPE_SIFIVE }, 112 { "sifive,fu740-c000-gem", HWTYPE_SIFIVE }, 113 { "cdns,gem", HWTYPE_GENERIC_GEM }, 114 { "cadence,gem", HWTYPE_GENERIC_GEM }, 115 { NULL, 0 } 116 }; 117 118 struct cgem_softc { 119 if_t ifp; 120 struct mtx sc_mtx; 121 device_t dev; 122 device_t miibus; 123 u_int mii_media_active; /* last active media */ 124 int if_old_flags; 125 struct resource *mem_res; 126 struct resource *irq_res; 127 void *intrhand; 128 struct callout tick_ch; 129 uint32_t net_ctl_shadow; 130 uint32_t net_cfg_shadow; 131 clk_t ref_clk; 132 int neednullqs; 133 134 bus_dma_tag_t desc_dma_tag; 135 bus_dma_tag_t mbuf_dma_tag; 136 137 /* receive descriptor ring */ 138 struct cgem_rx_desc *rxring; 139 bus_addr_t rxring_physaddr; 140 struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; 141 bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; 142 int rxring_hd_ptr; /* where to put rcv bufs */ 143 int rxring_tl_ptr; /* where to get receives */ 144 int rxring_queued; /* how many rcv bufs queued */ 145 bus_dmamap_t rxring_dma_map; 146 int rxbufs; /* tunable number rcv bufs */ 147 int rxhangwar; /* rx hang work-around */ 148 u_int rxoverruns; /* rx overruns */ 149 u_int rxnobufs; /* rx buf ring empty events */ 150 u_int rxdmamapfails; /* rx dmamap failures */ 151 uint32_t rx_frames_prev; 152 153 /* transmit descriptor ring */ 154 struct cgem_tx_desc *txring; 155 bus_addr_t txring_physaddr; 156 struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; 157 bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; 158 int txring_hd_ptr; /* where to put next xmits */ 159 int txring_tl_ptr; /* next xmit mbuf to free */ 160 int txring_queued; /* num xmits segs queued */ 161 u_int txfull; /* tx ring full events */ 162 u_int txdefrags; /* tx calls to m_defrag() */ 163 u_int txdefragfails; /* tx m_defrag() failures */ 164 u_int txdmamapfails; /* tx dmamap failures */ 165 166 /* null descriptor rings */ 167 void *null_qs; 168 bus_addr_t null_qs_physaddr; 169 170 /* hardware provided statistics */ 171 struct cgem_hw_stats { 172 uint64_t tx_bytes; 173 uint32_t tx_frames; 174 uint32_t tx_frames_bcast; 175 uint32_t tx_frames_multi; 176 uint32_t tx_frames_pause; 177 uint32_t tx_frames_64b; 178 uint32_t tx_frames_65to127b; 179 uint32_t tx_frames_128to255b; 180 uint32_t tx_frames_256to511b; 181 uint32_t tx_frames_512to1023b; 182 uint32_t tx_frames_1024to1536b; 183 uint32_t tx_under_runs; 184 uint32_t tx_single_collisn; 185 uint32_t tx_multi_collisn; 186 uint32_t tx_excsv_collisn; 187 uint32_t tx_late_collisn; 188 uint32_t tx_deferred_frames; 189 uint32_t tx_carrier_sense_errs; 190 191 uint64_t rx_bytes; 192 uint32_t rx_frames; 193 uint32_t rx_frames_bcast; 194 uint32_t rx_frames_multi; 195 uint32_t rx_frames_pause; 196 uint32_t rx_frames_64b; 197 uint32_t rx_frames_65to127b; 198 uint32_t rx_frames_128to255b; 199 uint32_t rx_frames_256to511b; 200 uint32_t rx_frames_512to1023b; 201 uint32_t rx_frames_1024to1536b; 202 uint32_t rx_frames_undersize; 203 uint32_t rx_frames_oversize; 204 uint32_t rx_frames_jabber; 205 uint32_t rx_frames_fcs_errs; 206 uint32_t rx_frames_length_errs; 207 uint32_t rx_symbol_errs; 208 uint32_t rx_align_errs; 209 uint32_t rx_resource_errs; 210 uint32_t rx_overrun_errs; 211 uint32_t rx_ip_hdr_csum_errs; 212 uint32_t rx_tcp_csum_errs; 213 uint32_t rx_udp_csum_errs; 214 } stats; 215 }; 216 217 #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) 218 #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) 219 #define BARRIER(sc, off, len, flags) \ 220 (bus_barrier((sc)->mem_res, (off), (len), (flags)) 221 222 #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx) 223 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) 224 #define CGEM_LOCK_INIT(sc) mtx_init(&(sc)->sc_mtx, \ 225 device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF) 226 #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) 227 #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) 228 229 /* Allow platforms to optionally provide a way to set the reference clock. */ 230 int cgem_set_ref_clk(int unit, int frequency); 231 232 static devclass_t cgem_devclass; 233 234 static int cgem_probe(device_t dev); 235 static int cgem_attach(device_t dev); 236 static int cgem_detach(device_t dev); 237 static void cgem_tick(void *); 238 static void cgem_intr(void *); 239 240 static void cgem_mediachange(struct cgem_softc *, struct mii_data *); 241 242 static void 243 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[]) 244 { 245 int i; 246 uint32_t rnd; 247 248 /* See if boot loader gave us a MAC address already. */ 249 for (i = 0; i < 4; i++) { 250 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i)); 251 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff; 252 if (low != 0 || high != 0) { 253 eaddr[0] = low & 0xff; 254 eaddr[1] = (low >> 8) & 0xff; 255 eaddr[2] = (low >> 16) & 0xff; 256 eaddr[3] = (low >> 24) & 0xff; 257 eaddr[4] = high & 0xff; 258 eaddr[5] = (high >> 8) & 0xff; 259 break; 260 } 261 } 262 263 /* No MAC from boot loader? Assign a random one. */ 264 if (i == 4) { 265 rnd = arc4random(); 266 267 eaddr[0] = 'b'; 268 eaddr[1] = 's'; 269 eaddr[2] = 'd'; 270 eaddr[3] = (rnd >> 16) & 0xff; 271 eaddr[4] = (rnd >> 8) & 0xff; 272 eaddr[5] = rnd & 0xff; 273 274 device_printf(sc->dev, "no mac address found, assigning " 275 "random: %02x:%02x:%02x:%02x:%02x:%02x\n", eaddr[0], 276 eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]); 277 } 278 279 /* Move address to first slot and zero out the rest. */ 280 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | 281 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); 282 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); 283 284 for (i = 1; i < 4; i++) { 285 WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0); 286 WR4(sc, CGEM_SPEC_ADDR_HI(i), 0); 287 } 288 } 289 290 /* 291 * cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash 292 * corresponds to a bit in a 64-bit hash register. Setting that bit in the 293 * hash register enables reception of all frames with a destination address 294 * that hashes to that 6-bit value. 295 * 296 * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech 297 * Reference Manual. Bits 0-5 in the hash are the exclusive-or of 298 * every sixth bit in the destination address. 299 */ 300 static int 301 cgem_mac_hash(u_char eaddr[]) 302 { 303 int hash; 304 int i, j; 305 306 hash = 0; 307 for (i = 0; i < 6; i++) 308 for (j = i; j < 48; j += 6) 309 if ((eaddr[j >> 3] & (1 << (j & 7))) != 0) 310 hash ^= (1 << i); 311 312 return hash; 313 } 314 315 static u_int 316 cgem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 317 { 318 uint32_t *hashes = arg; 319 int index; 320 321 index = cgem_mac_hash(LLADDR(sdl)); 322 if (index > 31) 323 hashes[0] |= (1U << (index - 32)); 324 else 325 hashes[1] |= (1U << index); 326 327 return (1); 328 } 329 330 /* 331 * After any change in rx flags or multi-cast addresses, set up hash registers 332 * and net config register bits. 333 */ 334 static void 335 cgem_rx_filter(struct cgem_softc *sc) 336 { 337 if_t ifp = sc->ifp; 338 uint32_t hashes[2] = { 0, 0 }; 339 340 sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN | 341 CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL); 342 343 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 344 sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL; 345 else { 346 if ((if_getflags(ifp) & IFF_BROADCAST) == 0) 347 sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST; 348 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { 349 hashes[0] = 0xffffffff; 350 hashes[1] = 0xffffffff; 351 } else 352 if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes); 353 354 if (hashes[0] != 0 || hashes[1] != 0) 355 sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN; 356 } 357 358 WR4(sc, CGEM_HASH_TOP, hashes[0]); 359 WR4(sc, CGEM_HASH_BOT, hashes[1]); 360 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 361 } 362 363 /* For bus_dmamap_load() callback. */ 364 static void 365 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 366 { 367 368 if (nsegs != 1 || error != 0) 369 return; 370 *(bus_addr_t *)arg = segs[0].ds_addr; 371 } 372 373 /* Set up null queues for priority queues we actually can't disable. */ 374 static void 375 cgem_null_qs(struct cgem_softc *sc) 376 { 377 struct cgem_rx_desc *rx_desc; 378 struct cgem_tx_desc *tx_desc; 379 uint32_t queue_mask; 380 int n; 381 382 /* Read design config register 6 to determine number of queues. */ 383 queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) & 384 CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1; 385 if (queue_mask == 0) 386 return; 387 388 /* Create empty RX queue and empty TX buf queues. */ 389 memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) + 390 sizeof(struct cgem_tx_desc)); 391 rx_desc = sc->null_qs; 392 rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP; 393 tx_desc = (struct cgem_tx_desc *)(rx_desc + 1); 394 tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP; 395 396 /* Point all valid ring base pointers to the null queues. */ 397 for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) { 398 WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr); 399 WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr + 400 sizeof(struct cgem_rx_desc)); 401 } 402 } 403 404 /* Create DMA'able descriptor rings. */ 405 static int 406 cgem_setup_descs(struct cgem_softc *sc) 407 { 408 int i, err; 409 int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) + 410 CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc); 411 412 if (sc->neednullqs) 413 desc_rings_size += sizeof(struct cgem_rx_desc) + 414 sizeof(struct cgem_tx_desc); 415 416 sc->txring = NULL; 417 sc->rxring = NULL; 418 419 /* Allocate non-cached DMA space for RX and TX descriptors. */ 420 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 421 #ifdef CGEM64 422 1ULL << 32, /* Do not cross a 4G boundary. */ 423 #else 424 0, 425 #endif 426 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 427 desc_rings_size, 1, desc_rings_size, 0, 428 busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag); 429 if (err) 430 return (err); 431 432 /* Set up a bus_dma_tag for mbufs. */ 433 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 434 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 435 TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, 436 &sc->mbuf_dma_tag); 437 if (err) 438 return (err); 439 440 /* 441 * Allocate DMA memory. We allocate transmit, receive and null 442 * descriptor queues all at once because the hardware only provides 443 * one register for the upper 32 bits of rx and tx descriptor queues 444 * hardware addresses. 445 */ 446 err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring, 447 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, 448 &sc->rxring_dma_map); 449 if (err) 450 return (err); 451 452 /* Load descriptor DMA memory. */ 453 err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, 454 (void *)sc->rxring, desc_rings_size, 455 cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT); 456 if (err) 457 return (err); 458 459 /* Initialize RX descriptors. */ 460 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { 461 sc->rxring[i].addr = CGEM_RXDESC_OWN; 462 sc->rxring[i].ctl = 0; 463 sc->rxring_m[i] = NULL; 464 sc->rxring_m_dmamap[i] = NULL; 465 } 466 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; 467 468 sc->rxring_hd_ptr = 0; 469 sc->rxring_tl_ptr = 0; 470 sc->rxring_queued = 0; 471 472 sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS); 473 sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS * 474 sizeof(struct cgem_rx_desc); 475 476 /* Initialize TX descriptor ring. */ 477 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { 478 sc->txring[i].addr = 0; 479 sc->txring[i].ctl = CGEM_TXDESC_USED; 480 sc->txring_m[i] = NULL; 481 sc->txring_m_dmamap[i] = NULL; 482 } 483 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; 484 485 sc->txring_hd_ptr = 0; 486 sc->txring_tl_ptr = 0; 487 sc->txring_queued = 0; 488 489 if (sc->neednullqs) { 490 sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS); 491 sc->null_qs_physaddr = sc->txring_physaddr + 492 CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc); 493 494 cgem_null_qs(sc); 495 } 496 497 return (0); 498 } 499 500 /* Fill receive descriptor ring with mbufs. */ 501 static void 502 cgem_fill_rqueue(struct cgem_softc *sc) 503 { 504 struct mbuf *m = NULL; 505 bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; 506 int nsegs; 507 508 CGEM_ASSERT_LOCKED(sc); 509 510 while (sc->rxring_queued < sc->rxbufs) { 511 /* Get a cluster mbuf. */ 512 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 513 if (m == NULL) 514 break; 515 516 m->m_len = MCLBYTES; 517 m->m_pkthdr.len = MCLBYTES; 518 m->m_pkthdr.rcvif = sc->ifp; 519 520 /* Load map and plug in physical address. */ 521 if (bus_dmamap_create(sc->mbuf_dma_tag, 0, 522 &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) { 523 sc->rxdmamapfails++; 524 m_free(m); 525 break; 526 } 527 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 528 sc->rxring_m_dmamap[sc->rxring_hd_ptr], m, 529 segs, &nsegs, BUS_DMA_NOWAIT)) { 530 sc->rxdmamapfails++; 531 bus_dmamap_destroy(sc->mbuf_dma_tag, 532 sc->rxring_m_dmamap[sc->rxring_hd_ptr]); 533 sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL; 534 m_free(m); 535 break; 536 } 537 sc->rxring_m[sc->rxring_hd_ptr] = m; 538 539 /* Sync cache with receive buffer. */ 540 bus_dmamap_sync(sc->mbuf_dma_tag, 541 sc->rxring_m_dmamap[sc->rxring_hd_ptr], 542 BUS_DMASYNC_PREREAD); 543 544 /* Write rx descriptor and increment head pointer. */ 545 sc->rxring[sc->rxring_hd_ptr].ctl = 0; 546 #ifdef CGEM64 547 sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32; 548 #endif 549 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { 550 sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | 551 CGEM_RXDESC_WRAP; 552 sc->rxring_hd_ptr = 0; 553 } else 554 sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr; 555 556 sc->rxring_queued++; 557 } 558 } 559 560 /* Pull received packets off of receive descriptor ring. */ 561 static void 562 cgem_recv(struct cgem_softc *sc) 563 { 564 if_t ifp = sc->ifp; 565 struct mbuf *m, *m_hd, **m_tl; 566 uint32_t ctl; 567 568 CGEM_ASSERT_LOCKED(sc); 569 570 /* Pick up all packets in which the OWN bit is set. */ 571 m_hd = NULL; 572 m_tl = &m_hd; 573 while (sc->rxring_queued > 0 && 574 (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { 575 ctl = sc->rxring[sc->rxring_tl_ptr].ctl; 576 577 /* Grab filled mbuf. */ 578 m = sc->rxring_m[sc->rxring_tl_ptr]; 579 sc->rxring_m[sc->rxring_tl_ptr] = NULL; 580 581 /* Sync cache with receive buffer. */ 582 bus_dmamap_sync(sc->mbuf_dma_tag, 583 sc->rxring_m_dmamap[sc->rxring_tl_ptr], 584 BUS_DMASYNC_POSTREAD); 585 586 /* Unload and destroy dmamap. */ 587 bus_dmamap_unload(sc->mbuf_dma_tag, 588 sc->rxring_m_dmamap[sc->rxring_tl_ptr]); 589 bus_dmamap_destroy(sc->mbuf_dma_tag, 590 sc->rxring_m_dmamap[sc->rxring_tl_ptr]); 591 sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL; 592 593 /* Increment tail pointer. */ 594 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS) 595 sc->rxring_tl_ptr = 0; 596 sc->rxring_queued--; 597 598 /* 599 * Check FCS and make sure entire packet landed in one mbuf 600 * cluster (which is much bigger than the largest ethernet 601 * packet). 602 */ 603 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 || 604 (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) != 605 (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) { 606 /* discard. */ 607 m_free(m); 608 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 609 continue; 610 } 611 612 /* Ready it to hand off to upper layers. */ 613 m->m_data += ETHER_ALIGN; 614 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK); 615 m->m_pkthdr.rcvif = ifp; 616 m->m_pkthdr.len = m->m_len; 617 618 /* 619 * Are we using hardware checksumming? Check the status in the 620 * receive descriptor. 621 */ 622 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 623 /* TCP or UDP checks out, IP checks out too. */ 624 if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 625 CGEM_RXDESC_CKSUM_STAT_TCP_GOOD || 626 (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 627 CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) { 628 m->m_pkthdr.csum_flags |= 629 CSUM_IP_CHECKED | CSUM_IP_VALID | 630 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 631 m->m_pkthdr.csum_data = 0xffff; 632 } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 633 CGEM_RXDESC_CKSUM_STAT_IP_GOOD) { 634 /* Only IP checks out. */ 635 m->m_pkthdr.csum_flags |= 636 CSUM_IP_CHECKED | CSUM_IP_VALID; 637 m->m_pkthdr.csum_data = 0xffff; 638 } 639 } 640 641 /* Queue it up for delivery below. */ 642 *m_tl = m; 643 m_tl = &m->m_next; 644 } 645 646 /* Replenish receive buffers. */ 647 cgem_fill_rqueue(sc); 648 649 /* Unlock and send up packets. */ 650 CGEM_UNLOCK(sc); 651 while (m_hd != NULL) { 652 m = m_hd; 653 m_hd = m_hd->m_next; 654 m->m_next = NULL; 655 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 656 if_input(ifp, m); 657 } 658 CGEM_LOCK(sc); 659 } 660 661 /* Find completed transmits and free their mbufs. */ 662 static void 663 cgem_clean_tx(struct cgem_softc *sc) 664 { 665 struct mbuf *m; 666 uint32_t ctl; 667 668 CGEM_ASSERT_LOCKED(sc); 669 670 /* free up finished transmits. */ 671 while (sc->txring_queued > 0 && 672 ((ctl = sc->txring[sc->txring_tl_ptr].ctl) & 673 CGEM_TXDESC_USED) != 0) { 674 /* Sync cache. */ 675 bus_dmamap_sync(sc->mbuf_dma_tag, 676 sc->txring_m_dmamap[sc->txring_tl_ptr], 677 BUS_DMASYNC_POSTWRITE); 678 679 /* Unload and destroy DMA map. */ 680 bus_dmamap_unload(sc->mbuf_dma_tag, 681 sc->txring_m_dmamap[sc->txring_tl_ptr]); 682 bus_dmamap_destroy(sc->mbuf_dma_tag, 683 sc->txring_m_dmamap[sc->txring_tl_ptr]); 684 sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL; 685 686 /* Free up the mbuf. */ 687 m = sc->txring_m[sc->txring_tl_ptr]; 688 sc->txring_m[sc->txring_tl_ptr] = NULL; 689 m_freem(m); 690 691 /* Check the status. */ 692 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { 693 /* Serious bus error. log to console. */ 694 #ifdef CGEM64 695 device_printf(sc->dev, 696 "cgem_clean_tx: AHB error, addr=0x%x%08x\n", 697 sc->txring[sc->txring_tl_ptr].addrhi, 698 sc->txring[sc->txring_tl_ptr].addr); 699 #else 700 device_printf(sc->dev, 701 "cgem_clean_tx: AHB error, addr=0x%x\n", 702 sc->txring[sc->txring_tl_ptr].addr); 703 #endif 704 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR | 705 CGEM_TXDESC_LATE_COLL)) != 0) { 706 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); 707 } else 708 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); 709 710 /* 711 * If the packet spanned more than one tx descriptor, skip 712 * descriptors until we find the end so that only 713 * start-of-frame descriptors are processed. 714 */ 715 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { 716 if ((ctl & CGEM_TXDESC_WRAP) != 0) 717 sc->txring_tl_ptr = 0; 718 else 719 sc->txring_tl_ptr++; 720 sc->txring_queued--; 721 722 ctl = sc->txring[sc->txring_tl_ptr].ctl; 723 724 sc->txring[sc->txring_tl_ptr].ctl = 725 ctl | CGEM_TXDESC_USED; 726 } 727 728 /* Next descriptor. */ 729 if ((ctl & CGEM_TXDESC_WRAP) != 0) 730 sc->txring_tl_ptr = 0; 731 else 732 sc->txring_tl_ptr++; 733 sc->txring_queued--; 734 735 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE); 736 } 737 } 738 739 /* Start transmits. */ 740 static void 741 cgem_start_locked(if_t ifp) 742 { 743 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); 744 struct mbuf *m; 745 bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; 746 uint32_t ctl; 747 int i, nsegs, wrap, err; 748 749 CGEM_ASSERT_LOCKED(sc); 750 751 if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0) 752 return; 753 754 for (;;) { 755 /* Check that there is room in the descriptor ring. */ 756 if (sc->txring_queued >= 757 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { 758 /* Try to make room. */ 759 cgem_clean_tx(sc); 760 761 /* Still no room? */ 762 if (sc->txring_queued >= 763 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { 764 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 765 sc->txfull++; 766 break; 767 } 768 } 769 770 /* Grab next transmit packet. */ 771 m = if_dequeue(ifp); 772 if (m == NULL) 773 break; 774 775 /* Create and load DMA map. */ 776 if (bus_dmamap_create(sc->mbuf_dma_tag, 0, 777 &sc->txring_m_dmamap[sc->txring_hd_ptr])) { 778 m_freem(m); 779 sc->txdmamapfails++; 780 continue; 781 } 782 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 783 sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs, 784 BUS_DMA_NOWAIT); 785 if (err == EFBIG) { 786 /* Too many segments! defrag and try again. */ 787 struct mbuf *m2 = m_defrag(m, M_NOWAIT); 788 789 if (m2 == NULL) { 790 sc->txdefragfails++; 791 m_freem(m); 792 bus_dmamap_destroy(sc->mbuf_dma_tag, 793 sc->txring_m_dmamap[sc->txring_hd_ptr]); 794 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL; 795 continue; 796 } 797 m = m2; 798 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 799 sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, 800 &nsegs, BUS_DMA_NOWAIT); 801 sc->txdefrags++; 802 } 803 if (err) { 804 /* Give up. */ 805 m_freem(m); 806 bus_dmamap_destroy(sc->mbuf_dma_tag, 807 sc->txring_m_dmamap[sc->txring_hd_ptr]); 808 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL; 809 sc->txdmamapfails++; 810 continue; 811 } 812 sc->txring_m[sc->txring_hd_ptr] = m; 813 814 /* Sync tx buffer with cache. */ 815 bus_dmamap_sync(sc->mbuf_dma_tag, 816 sc->txring_m_dmamap[sc->txring_hd_ptr], 817 BUS_DMASYNC_PREWRITE); 818 819 /* Set wrap flag if next packet might run off end of ring. */ 820 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >= 821 CGEM_NUM_TX_DESCS; 822 823 /* 824 * Fill in the TX descriptors back to front so that USED bit in 825 * first descriptor is cleared last. 826 */ 827 for (i = nsegs - 1; i >= 0; i--) { 828 /* Descriptor address. */ 829 sc->txring[sc->txring_hd_ptr + i].addr = 830 segs[i].ds_addr; 831 #ifdef CGEM64 832 sc->txring[sc->txring_hd_ptr + i].addrhi = 833 segs[i].ds_addr >> 32; 834 #endif 835 /* Descriptor control word. */ 836 ctl = segs[i].ds_len; 837 if (i == nsegs - 1) { 838 ctl |= CGEM_TXDESC_LAST_BUF; 839 if (wrap) 840 ctl |= CGEM_TXDESC_WRAP; 841 } 842 sc->txring[sc->txring_hd_ptr + i].ctl = ctl; 843 844 if (i != 0) 845 sc->txring_m[sc->txring_hd_ptr + i] = NULL; 846 } 847 848 if (wrap) 849 sc->txring_hd_ptr = 0; 850 else 851 sc->txring_hd_ptr += nsegs; 852 sc->txring_queued += nsegs; 853 854 /* Kick the transmitter. */ 855 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | 856 CGEM_NET_CTRL_START_TX); 857 858 /* If there is a BPF listener, bounce a copy to him. */ 859 ETHER_BPF_MTAP(ifp, m); 860 } 861 } 862 863 static void 864 cgem_start(if_t ifp) 865 { 866 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); 867 868 CGEM_LOCK(sc); 869 cgem_start_locked(ifp); 870 CGEM_UNLOCK(sc); 871 } 872 873 static void 874 cgem_poll_hw_stats(struct cgem_softc *sc) 875 { 876 uint32_t n; 877 878 CGEM_ASSERT_LOCKED(sc); 879 880 sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT); 881 sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32; 882 883 sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX); 884 sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX); 885 sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX); 886 sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX); 887 sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX); 888 sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX); 889 sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX); 890 sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX); 891 sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX); 892 sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX); 893 sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS); 894 895 n = RD4(sc, CGEM_SINGLE_COLL_FRAMES); 896 sc->stats.tx_single_collisn += n; 897 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 898 n = RD4(sc, CGEM_MULTI_COLL_FRAMES); 899 sc->stats.tx_multi_collisn += n; 900 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 901 n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES); 902 sc->stats.tx_excsv_collisn += n; 903 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 904 n = RD4(sc, CGEM_LATE_COLL); 905 sc->stats.tx_late_collisn += n; 906 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 907 908 sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES); 909 sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS); 910 911 sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT); 912 sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32; 913 914 sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX); 915 sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX); 916 sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX); 917 sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX); 918 sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX); 919 sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX); 920 sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX); 921 sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX); 922 sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX); 923 sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX); 924 sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX); 925 sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX); 926 sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX); 927 sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS); 928 sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS); 929 sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS); 930 sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS); 931 sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS); 932 sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS); 933 sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS); 934 sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS); 935 sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS); 936 } 937 938 static void 939 cgem_tick(void *arg) 940 { 941 struct cgem_softc *sc = (struct cgem_softc *)arg; 942 struct mii_data *mii; 943 944 CGEM_ASSERT_LOCKED(sc); 945 946 /* Poll the phy. */ 947 if (sc->miibus != NULL) { 948 mii = device_get_softc(sc->miibus); 949 mii_tick(mii); 950 } 951 952 /* Poll statistics registers. */ 953 cgem_poll_hw_stats(sc); 954 955 /* Check for receiver hang. */ 956 if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) { 957 /* 958 * Reset receiver logic by toggling RX_EN bit. 1usec 959 * delay is necessary especially when operating at 100mbps 960 * and 10mbps speeds. 961 */ 962 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow & 963 ~CGEM_NET_CTRL_RX_EN); 964 DELAY(1); 965 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 966 } 967 sc->rx_frames_prev = sc->stats.rx_frames; 968 969 /* Next callout in one second. */ 970 callout_reset(&sc->tick_ch, hz, cgem_tick, sc); 971 } 972 973 /* Interrupt handler. */ 974 static void 975 cgem_intr(void *arg) 976 { 977 struct cgem_softc *sc = (struct cgem_softc *)arg; 978 if_t ifp = sc->ifp; 979 uint32_t istatus; 980 981 CGEM_LOCK(sc); 982 983 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 984 CGEM_UNLOCK(sc); 985 return; 986 } 987 988 /* Read interrupt status and immediately clear the bits. */ 989 istatus = RD4(sc, CGEM_INTR_STAT); 990 WR4(sc, CGEM_INTR_STAT, istatus); 991 992 /* Packets received. */ 993 if ((istatus & CGEM_INTR_RX_COMPLETE) != 0) 994 cgem_recv(sc); 995 996 /* Free up any completed transmit buffers. */ 997 cgem_clean_tx(sc); 998 999 /* Hresp not ok. Something is very bad with DMA. Try to clear. */ 1000 if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) { 1001 device_printf(sc->dev, 1002 "cgem_intr: hresp not okay! rx_status=0x%x\n", 1003 RD4(sc, CGEM_RX_STAT)); 1004 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK); 1005 } 1006 1007 /* Receiver overrun. */ 1008 if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) { 1009 /* Clear status bit. */ 1010 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN); 1011 sc->rxoverruns++; 1012 } 1013 1014 /* Receiver ran out of bufs. */ 1015 if ((istatus & CGEM_INTR_RX_USED_READ) != 0) { 1016 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | 1017 CGEM_NET_CTRL_FLUSH_DPRAM_PKT); 1018 cgem_fill_rqueue(sc); 1019 sc->rxnobufs++; 1020 } 1021 1022 /* Restart transmitter if needed. */ 1023 if (!if_sendq_empty(ifp)) 1024 cgem_start_locked(ifp); 1025 1026 CGEM_UNLOCK(sc); 1027 } 1028 1029 /* Reset hardware. */ 1030 static void 1031 cgem_reset(struct cgem_softc *sc) 1032 { 1033 1034 CGEM_ASSERT_LOCKED(sc); 1035 1036 /* Determine data bus width from design configuration register. */ 1037 switch (RD4(sc, CGEM_DESIGN_CFG1) & 1038 CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) { 1039 case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64: 1040 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64; 1041 break; 1042 case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128: 1043 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128; 1044 break; 1045 default: 1046 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32; 1047 } 1048 1049 WR4(sc, CGEM_NET_CTRL, 0); 1050 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1051 WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); 1052 WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); 1053 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); 1054 WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); 1055 WR4(sc, CGEM_HASH_BOT, 0); 1056 WR4(sc, CGEM_HASH_TOP, 0); 1057 WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ 1058 WR4(sc, CGEM_RX_QBAR, 0); 1059 1060 /* Get management port running even if interface is down. */ 1061 sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48; 1062 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1063 1064 sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; 1065 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 1066 } 1067 1068 /* Bring up the hardware. */ 1069 static void 1070 cgem_config(struct cgem_softc *sc) 1071 { 1072 if_t ifp = sc->ifp; 1073 uint32_t dma_cfg; 1074 u_char *eaddr = if_getlladdr(ifp); 1075 1076 CGEM_ASSERT_LOCKED(sc); 1077 1078 /* Program Net Config Register. */ 1079 sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK | 1080 CGEM_NET_CFG_DBUS_WIDTH_MASK); 1081 sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE | 1082 CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | 1083 CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN | 1084 CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100); 1085 1086 /* Enable receive checksum offloading? */ 1087 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) 1088 sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; 1089 1090 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1091 1092 /* Program DMA Config Register. */ 1093 dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | 1094 CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | 1095 CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | 1096 CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | 1097 #ifdef CGEM64 1098 CGEM_DMA_CFG_ADDR_BUS_64 | 1099 #endif 1100 CGEM_DMA_CFG_DISC_WHEN_NO_AHB; 1101 1102 /* Enable transmit checksum offloading? */ 1103 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1104 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; 1105 1106 WR4(sc, CGEM_DMA_CFG, dma_cfg); 1107 1108 /* Write the rx and tx descriptor ring addresses to the QBAR regs. */ 1109 WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr); 1110 WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr); 1111 #ifdef CGEM64 1112 WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32)); 1113 WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32)); 1114 #endif 1115 1116 /* Enable rx and tx. */ 1117 sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); 1118 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 1119 1120 /* Set receive address in case it changed. */ 1121 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | 1122 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); 1123 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); 1124 1125 /* Set up interrupts. */ 1126 WR4(sc, CGEM_INTR_EN, CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN | 1127 CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ | 1128 CGEM_INTR_HRESP_NOT_OK); 1129 } 1130 1131 /* Turn on interface and load up receive ring with buffers. */ 1132 static void 1133 cgem_init_locked(struct cgem_softc *sc) 1134 { 1135 struct mii_data *mii; 1136 1137 CGEM_ASSERT_LOCKED(sc); 1138 1139 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0) 1140 return; 1141 1142 cgem_config(sc); 1143 cgem_fill_rqueue(sc); 1144 1145 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 1146 1147 if (sc->miibus != NULL) { 1148 mii = device_get_softc(sc->miibus); 1149 mii_mediachg(mii); 1150 } 1151 1152 callout_reset(&sc->tick_ch, hz, cgem_tick, sc); 1153 } 1154 1155 static void 1156 cgem_init(void *arg) 1157 { 1158 struct cgem_softc *sc = (struct cgem_softc *)arg; 1159 1160 CGEM_LOCK(sc); 1161 cgem_init_locked(sc); 1162 CGEM_UNLOCK(sc); 1163 } 1164 1165 /* Turn off interface. Free up any buffers in transmit or receive queues. */ 1166 static void 1167 cgem_stop(struct cgem_softc *sc) 1168 { 1169 int i; 1170 1171 CGEM_ASSERT_LOCKED(sc); 1172 1173 callout_stop(&sc->tick_ch); 1174 1175 /* Shut down hardware. */ 1176 cgem_reset(sc); 1177 1178 /* Clear out transmit queue. */ 1179 memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc)); 1180 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { 1181 sc->txring[i].ctl = CGEM_TXDESC_USED; 1182 if (sc->txring_m[i]) { 1183 /* Unload and destroy dmamap. */ 1184 bus_dmamap_unload(sc->mbuf_dma_tag, 1185 sc->txring_m_dmamap[i]); 1186 bus_dmamap_destroy(sc->mbuf_dma_tag, 1187 sc->txring_m_dmamap[i]); 1188 sc->txring_m_dmamap[i] = NULL; 1189 m_freem(sc->txring_m[i]); 1190 sc->txring_m[i] = NULL; 1191 } 1192 } 1193 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; 1194 1195 sc->txring_hd_ptr = 0; 1196 sc->txring_tl_ptr = 0; 1197 sc->txring_queued = 0; 1198 1199 /* Clear out receive queue. */ 1200 memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc)); 1201 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { 1202 sc->rxring[i].addr = CGEM_RXDESC_OWN; 1203 if (sc->rxring_m[i]) { 1204 /* Unload and destroy dmamap. */ 1205 bus_dmamap_unload(sc->mbuf_dma_tag, 1206 sc->rxring_m_dmamap[i]); 1207 bus_dmamap_destroy(sc->mbuf_dma_tag, 1208 sc->rxring_m_dmamap[i]); 1209 sc->rxring_m_dmamap[i] = NULL; 1210 1211 m_freem(sc->rxring_m[i]); 1212 sc->rxring_m[i] = NULL; 1213 } 1214 } 1215 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; 1216 1217 sc->rxring_hd_ptr = 0; 1218 sc->rxring_tl_ptr = 0; 1219 sc->rxring_queued = 0; 1220 1221 /* Force next statchg or linkchg to program net config register. */ 1222 sc->mii_media_active = 0; 1223 } 1224 1225 static int 1226 cgem_ioctl(if_t ifp, u_long cmd, caddr_t data) 1227 { 1228 struct cgem_softc *sc = if_getsoftc(ifp); 1229 struct ifreq *ifr = (struct ifreq *)data; 1230 struct mii_data *mii; 1231 int error = 0, mask; 1232 1233 switch (cmd) { 1234 case SIOCSIFFLAGS: 1235 CGEM_LOCK(sc); 1236 if ((if_getflags(ifp) & IFF_UP) != 0) { 1237 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1238 if (((if_getflags(ifp) ^ sc->if_old_flags) & 1239 (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1240 cgem_rx_filter(sc); 1241 } 1242 } else { 1243 cgem_init_locked(sc); 1244 } 1245 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1246 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1247 cgem_stop(sc); 1248 } 1249 sc->if_old_flags = if_getflags(ifp); 1250 CGEM_UNLOCK(sc); 1251 break; 1252 1253 case SIOCADDMULTI: 1254 case SIOCDELMULTI: 1255 /* Set up multi-cast filters. */ 1256 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1257 CGEM_LOCK(sc); 1258 cgem_rx_filter(sc); 1259 CGEM_UNLOCK(sc); 1260 } 1261 break; 1262 1263 case SIOCSIFMEDIA: 1264 case SIOCGIFMEDIA: 1265 if (sc->miibus == NULL) 1266 return (ENXIO); 1267 mii = device_get_softc(sc->miibus); 1268 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1269 break; 1270 1271 case SIOCSIFCAP: 1272 CGEM_LOCK(sc); 1273 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; 1274 1275 if ((mask & IFCAP_TXCSUM) != 0) { 1276 if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) { 1277 /* Turn on TX checksumming. */ 1278 if_setcapenablebit(ifp, IFCAP_TXCSUM | 1279 IFCAP_TXCSUM_IPV6, 0); 1280 if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0); 1281 1282 WR4(sc, CGEM_DMA_CFG, 1283 RD4(sc, CGEM_DMA_CFG) | 1284 CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); 1285 } else { 1286 /* Turn off TX checksumming. */ 1287 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM | 1288 IFCAP_TXCSUM_IPV6); 1289 if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST); 1290 1291 WR4(sc, CGEM_DMA_CFG, 1292 RD4(sc, CGEM_DMA_CFG) & 1293 ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); 1294 } 1295 } 1296 if ((mask & IFCAP_RXCSUM) != 0) { 1297 if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { 1298 /* Turn on RX checksumming. */ 1299 if_setcapenablebit(ifp, IFCAP_RXCSUM | 1300 IFCAP_RXCSUM_IPV6, 0); 1301 sc->net_cfg_shadow |= 1302 CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; 1303 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1304 } else { 1305 /* Turn off RX checksumming. */ 1306 if_setcapenablebit(ifp, 0, IFCAP_RXCSUM | 1307 IFCAP_RXCSUM_IPV6); 1308 sc->net_cfg_shadow &= 1309 ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; 1310 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1311 } 1312 } 1313 if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == 1314 (IFCAP_RXCSUM | IFCAP_TXCSUM)) 1315 if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0); 1316 else 1317 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM); 1318 1319 CGEM_UNLOCK(sc); 1320 break; 1321 default: 1322 error = ether_ioctl(ifp, cmd, data); 1323 break; 1324 } 1325 1326 return (error); 1327 } 1328 1329 /* MII bus support routines. 1330 */ 1331 static int 1332 cgem_ifmedia_upd(if_t ifp) 1333 { 1334 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); 1335 struct mii_data *mii; 1336 struct mii_softc *miisc; 1337 int error = 0; 1338 1339 mii = device_get_softc(sc->miibus); 1340 CGEM_LOCK(sc); 1341 if ((if_getflags(ifp) & IFF_UP) != 0) { 1342 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1343 PHY_RESET(miisc); 1344 error = mii_mediachg(mii); 1345 } 1346 CGEM_UNLOCK(sc); 1347 1348 return (error); 1349 } 1350 1351 static void 1352 cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 1353 { 1354 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); 1355 struct mii_data *mii; 1356 1357 mii = device_get_softc(sc->miibus); 1358 CGEM_LOCK(sc); 1359 mii_pollstat(mii); 1360 ifmr->ifm_active = mii->mii_media_active; 1361 ifmr->ifm_status = mii->mii_media_status; 1362 CGEM_UNLOCK(sc); 1363 } 1364 1365 static int 1366 cgem_miibus_readreg(device_t dev, int phy, int reg) 1367 { 1368 struct cgem_softc *sc = device_get_softc(dev); 1369 int tries, val; 1370 1371 WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 | 1372 CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_READ | 1373 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | 1374 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT)); 1375 1376 /* Wait for completion. */ 1377 tries=0; 1378 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { 1379 DELAY(5); 1380 if (++tries > 200) { 1381 device_printf(dev, "phy read timeout: %d\n", reg); 1382 return (-1); 1383 } 1384 } 1385 1386 val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK; 1387 1388 if (reg == MII_EXTSR) 1389 /* 1390 * MAC does not support half-duplex at gig speeds. 1391 * Let mii(4) exclude the capability. 1392 */ 1393 val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX); 1394 1395 return (val); 1396 } 1397 1398 static int 1399 cgem_miibus_writereg(device_t dev, int phy, int reg, int data) 1400 { 1401 struct cgem_softc *sc = device_get_softc(dev); 1402 int tries; 1403 1404 WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 | 1405 CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_WRITE | 1406 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | 1407 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) | 1408 (data & CGEM_PHY_MAINT_DATA_MASK)); 1409 1410 /* Wait for completion. */ 1411 tries = 0; 1412 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { 1413 DELAY(5); 1414 if (++tries > 200) { 1415 device_printf(dev, "phy write timeout: %d\n", reg); 1416 return (-1); 1417 } 1418 } 1419 1420 return (0); 1421 } 1422 1423 static void 1424 cgem_miibus_statchg(device_t dev) 1425 { 1426 struct cgem_softc *sc = device_get_softc(dev); 1427 struct mii_data *mii = device_get_softc(sc->miibus); 1428 1429 CGEM_ASSERT_LOCKED(sc); 1430 1431 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1432 (IFM_ACTIVE | IFM_AVALID) && 1433 sc->mii_media_active != mii->mii_media_active) 1434 cgem_mediachange(sc, mii); 1435 } 1436 1437 static void 1438 cgem_miibus_linkchg(device_t dev) 1439 { 1440 struct cgem_softc *sc = device_get_softc(dev); 1441 struct mii_data *mii = device_get_softc(sc->miibus); 1442 1443 CGEM_ASSERT_LOCKED(sc); 1444 1445 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1446 (IFM_ACTIVE | IFM_AVALID) && 1447 sc->mii_media_active != mii->mii_media_active) 1448 cgem_mediachange(sc, mii); 1449 } 1450 1451 /* 1452 * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to 1453 * provide a function to set the cgem's reference clock. 1454 */ 1455 static int __used 1456 cgem_default_set_ref_clk(int unit, int frequency) 1457 { 1458 1459 return 0; 1460 } 1461 __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk); 1462 1463 /* Call to set reference clock and network config bits according to media. */ 1464 static void 1465 cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) 1466 { 1467 int ref_clk_freq; 1468 1469 CGEM_ASSERT_LOCKED(sc); 1470 1471 /* Update hardware to reflect media. */ 1472 sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | 1473 CGEM_NET_CFG_FULL_DUPLEX); 1474 1475 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1476 case IFM_1000_T: 1477 sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 | 1478 CGEM_NET_CFG_GIGE_EN); 1479 ref_clk_freq = 125000000; 1480 break; 1481 case IFM_100_TX: 1482 sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100; 1483 ref_clk_freq = 25000000; 1484 break; 1485 default: 1486 ref_clk_freq = 2500000; 1487 } 1488 1489 if ((mii->mii_media_active & IFM_FDX) != 0) 1490 sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX; 1491 1492 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1493 1494 if (sc->ref_clk != NULL) { 1495 CGEM_UNLOCK(sc); 1496 if (clk_set_freq(sc->ref_clk, ref_clk_freq, 0)) 1497 device_printf(sc->dev, "could not set ref clk to %d\n", 1498 ref_clk_freq); 1499 CGEM_LOCK(sc); 1500 } 1501 1502 sc->mii_media_active = mii->mii_media_active; 1503 } 1504 1505 static void 1506 cgem_add_sysctls(device_t dev) 1507 { 1508 struct cgem_softc *sc = device_get_softc(dev); 1509 struct sysctl_ctx_list *ctx; 1510 struct sysctl_oid_list *child; 1511 struct sysctl_oid *tree; 1512 1513 ctx = device_get_sysctl_ctx(dev); 1514 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 1515 1516 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW, 1517 &sc->rxbufs, 0, "Number receive buffers to provide"); 1518 1519 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW, 1520 &sc->rxhangwar, 0, "Enable receive hang work-around"); 1521 1522 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD, 1523 &sc->rxoverruns, 0, "Receive overrun events"); 1524 1525 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD, 1526 &sc->rxnobufs, 0, "Receive buf queue empty events"); 1527 1528 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD, 1529 &sc->rxdmamapfails, 0, "Receive DMA map failures"); 1530 1531 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD, 1532 &sc->txfull, 0, "Transmit ring full events"); 1533 1534 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD, 1535 &sc->txdmamapfails, 0, "Transmit DMA map failures"); 1536 1537 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD, 1538 &sc->txdefrags, 0, "Transmit m_defrag() calls"); 1539 1540 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD, 1541 &sc->txdefragfails, 0, "Transmit m_defrag() failures"); 1542 1543 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", 1544 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics"); 1545 child = SYSCTL_CHILDREN(tree); 1546 1547 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD, 1548 &sc->stats.tx_bytes, "Total bytes transmitted"); 1549 1550 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD, 1551 &sc->stats.tx_frames, 0, "Total frames transmitted"); 1552 1553 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD, 1554 &sc->stats.tx_frames_bcast, 0, 1555 "Number broadcast frames transmitted"); 1556 1557 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD, 1558 &sc->stats.tx_frames_multi, 0, 1559 "Number multicast frames transmitted"); 1560 1561 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause", 1562 CTLFLAG_RD, &sc->stats.tx_frames_pause, 0, 1563 "Number pause frames transmitted"); 1564 1565 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD, 1566 &sc->stats.tx_frames_64b, 0, 1567 "Number frames transmitted of size 64 bytes or less"); 1568 1569 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD, 1570 &sc->stats.tx_frames_65to127b, 0, 1571 "Number frames transmitted of size 65-127 bytes"); 1572 1573 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b", 1574 CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0, 1575 "Number frames transmitted of size 128-255 bytes"); 1576 1577 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b", 1578 CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0, 1579 "Number frames transmitted of size 256-511 bytes"); 1580 1581 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b", 1582 CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0, 1583 "Number frames transmitted of size 512-1023 bytes"); 1584 1585 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b", 1586 CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0, 1587 "Number frames transmitted of size 1024-1536 bytes"); 1588 1589 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs", 1590 CTLFLAG_RD, &sc->stats.tx_under_runs, 0, 1591 "Number transmit under-run events"); 1592 1593 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn", 1594 CTLFLAG_RD, &sc->stats.tx_single_collisn, 0, 1595 "Number single-collision transmit frames"); 1596 1597 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn", 1598 CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0, 1599 "Number multi-collision transmit frames"); 1600 1601 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn", 1602 CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0, 1603 "Number excessive collision transmit frames"); 1604 1605 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn", 1606 CTLFLAG_RD, &sc->stats.tx_late_collisn, 0, 1607 "Number late-collision transmit frames"); 1608 1609 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames", 1610 CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0, 1611 "Number deferred transmit frames"); 1612 1613 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs", 1614 CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0, 1615 "Number carrier sense errors on transmit"); 1616 1617 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD, 1618 &sc->stats.rx_bytes, "Total bytes received"); 1619 1620 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD, 1621 &sc->stats.rx_frames, 0, "Total frames received"); 1622 1623 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast", 1624 CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0, 1625 "Number broadcast frames received"); 1626 1627 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi", 1628 CTLFLAG_RD, &sc->stats.rx_frames_multi, 0, 1629 "Number multicast frames received"); 1630 1631 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause", 1632 CTLFLAG_RD, &sc->stats.rx_frames_pause, 0, 1633 "Number pause frames received"); 1634 1635 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b", 1636 CTLFLAG_RD, &sc->stats.rx_frames_64b, 0, 1637 "Number frames received of size 64 bytes or less"); 1638 1639 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b", 1640 CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0, 1641 "Number frames received of size 65-127 bytes"); 1642 1643 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b", 1644 CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0, 1645 "Number frames received of size 128-255 bytes"); 1646 1647 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b", 1648 CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0, 1649 "Number frames received of size 256-511 bytes"); 1650 1651 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b", 1652 CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0, 1653 "Number frames received of size 512-1023 bytes"); 1654 1655 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b", 1656 CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0, 1657 "Number frames received of size 1024-1536 bytes"); 1658 1659 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize", 1660 CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0, 1661 "Number undersize frames received"); 1662 1663 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize", 1664 CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0, 1665 "Number oversize frames received"); 1666 1667 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber", 1668 CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0, 1669 "Number jabber frames received"); 1670 1671 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs", 1672 CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0, 1673 "Number frames received with FCS errors"); 1674 1675 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs", 1676 CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0, 1677 "Number frames received with length errors"); 1678 1679 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs", 1680 CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0, 1681 "Number receive symbol errors"); 1682 1683 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs", 1684 CTLFLAG_RD, &sc->stats.rx_align_errs, 0, 1685 "Number receive alignment errors"); 1686 1687 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs", 1688 CTLFLAG_RD, &sc->stats.rx_resource_errs, 0, 1689 "Number frames received when no rx buffer available"); 1690 1691 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs", 1692 CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0, 1693 "Number frames received but not copied due to receive overrun"); 1694 1695 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs", 1696 CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0, 1697 "Number frames received with IP header checksum errors"); 1698 1699 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs", 1700 CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0, 1701 "Number frames received with TCP checksum errors"); 1702 1703 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs", 1704 CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0, 1705 "Number frames received with UDP checksum errors"); 1706 } 1707 1708 static int 1709 cgem_probe(device_t dev) 1710 { 1711 1712 if (!ofw_bus_status_okay(dev)) 1713 return (ENXIO); 1714 1715 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 1716 return (ENXIO); 1717 1718 device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface"); 1719 return (0); 1720 } 1721 1722 static int 1723 cgem_attach(device_t dev) 1724 { 1725 struct cgem_softc *sc = device_get_softc(dev); 1726 if_t ifp = NULL; 1727 int rid, err; 1728 u_char eaddr[ETHER_ADDR_LEN]; 1729 int hwtype; 1730 1731 sc->dev = dev; 1732 CGEM_LOCK_INIT(sc); 1733 1734 /* Key off of compatible string and set hardware-specific options. */ 1735 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 1736 if (hwtype == HWTYPE_ZYNQMP) 1737 sc->neednullqs = 1; 1738 if (hwtype == HWTYPE_ZYNQ) 1739 sc->rxhangwar = 1; 1740 1741 if (hwtype == HWTYPE_ZYNQ || hwtype == HWTYPE_ZYNQMP) { 1742 if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->ref_clk) != 0) 1743 device_printf(dev, 1744 "could not retrieve reference clock.\n"); 1745 else if (clk_enable(sc->ref_clk) != 0) 1746 device_printf(dev, "could not enable clock.\n"); 1747 } else if (hwtype == HWTYPE_SIFIVE) { 1748 if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->ref_clk) != 0) 1749 device_printf(dev, 1750 "could not retrieve reference clock.\n"); 1751 else if (clk_enable(sc->ref_clk) != 0) 1752 device_printf(dev, "could not enable clock.\n"); 1753 } 1754 1755 /* Get memory resource. */ 1756 rid = 0; 1757 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1758 RF_ACTIVE); 1759 if (sc->mem_res == NULL) { 1760 device_printf(dev, "could not allocate memory resources.\n"); 1761 return (ENOMEM); 1762 } 1763 1764 /* Get IRQ resource. */ 1765 rid = 0; 1766 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1767 RF_ACTIVE); 1768 if (sc->irq_res == NULL) { 1769 device_printf(dev, "could not allocate interrupt resource.\n"); 1770 cgem_detach(dev); 1771 return (ENOMEM); 1772 } 1773 1774 /* Set up ifnet structure. */ 1775 ifp = sc->ifp = if_alloc(IFT_ETHER); 1776 if (ifp == NULL) { 1777 device_printf(dev, "could not allocate ifnet structure\n"); 1778 cgem_detach(dev); 1779 return (ENOMEM); 1780 } 1781 if_setsoftc(ifp, sc); 1782 if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev)); 1783 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1784 if_setinitfn(ifp, cgem_init); 1785 if_setioctlfn(ifp, cgem_ioctl); 1786 if_setstartfn(ifp, cgem_start); 1787 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | 1788 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0); 1789 if_setsendqlen(ifp, CGEM_NUM_TX_DESCS); 1790 if_setsendqready(ifp); 1791 1792 /* Disable hardware checksumming by default. */ 1793 if_sethwassist(ifp, 0); 1794 if_setcapenable(ifp, if_getcapabilities(ifp) & 1795 ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM)); 1796 1797 sc->if_old_flags = if_getflags(ifp); 1798 sc->rxbufs = DEFAULT_NUM_RX_BUFS; 1799 1800 /* Reset hardware. */ 1801 CGEM_LOCK(sc); 1802 cgem_reset(sc); 1803 CGEM_UNLOCK(sc); 1804 1805 /* Attach phy to mii bus. */ 1806 err = mii_attach(dev, &sc->miibus, ifp, 1807 cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK, 1808 MII_PHY_ANY, MII_OFFSET_ANY, 0); 1809 if (err) 1810 device_printf(dev, "warning: attaching PHYs failed\n"); 1811 1812 /* Set up TX and RX descriptor area. */ 1813 err = cgem_setup_descs(sc); 1814 if (err) { 1815 device_printf(dev, "could not set up dma mem for descs.\n"); 1816 cgem_detach(dev); 1817 return (ENOMEM); 1818 } 1819 1820 /* Get a MAC address. */ 1821 cgem_get_mac(sc, eaddr); 1822 1823 /* Start ticks. */ 1824 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); 1825 1826 ether_ifattach(ifp, eaddr); 1827 1828 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE | 1829 INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand); 1830 if (err) { 1831 device_printf(dev, "could not set interrupt handler.\n"); 1832 ether_ifdetach(ifp); 1833 cgem_detach(dev); 1834 return (err); 1835 } 1836 1837 cgem_add_sysctls(dev); 1838 1839 return (0); 1840 } 1841 1842 static int 1843 cgem_detach(device_t dev) 1844 { 1845 struct cgem_softc *sc = device_get_softc(dev); 1846 int i; 1847 1848 if (sc == NULL) 1849 return (ENODEV); 1850 1851 if (device_is_attached(dev)) { 1852 CGEM_LOCK(sc); 1853 cgem_stop(sc); 1854 CGEM_UNLOCK(sc); 1855 callout_drain(&sc->tick_ch); 1856 if_setflagbits(sc->ifp, 0, IFF_UP); 1857 ether_ifdetach(sc->ifp); 1858 } 1859 1860 if (sc->miibus != NULL) { 1861 device_delete_child(dev, sc->miibus); 1862 sc->miibus = NULL; 1863 } 1864 1865 /* Release resources. */ 1866 if (sc->mem_res != NULL) { 1867 bus_release_resource(dev, SYS_RES_MEMORY, 1868 rman_get_rid(sc->mem_res), sc->mem_res); 1869 sc->mem_res = NULL; 1870 } 1871 if (sc->irq_res != NULL) { 1872 if (sc->intrhand) 1873 bus_teardown_intr(dev, sc->irq_res, sc->intrhand); 1874 bus_release_resource(dev, SYS_RES_IRQ, 1875 rman_get_rid(sc->irq_res), sc->irq_res); 1876 sc->irq_res = NULL; 1877 } 1878 1879 /* Release DMA resources. */ 1880 if (sc->rxring != NULL) { 1881 if (sc->rxring_physaddr != 0) { 1882 bus_dmamap_unload(sc->desc_dma_tag, 1883 sc->rxring_dma_map); 1884 sc->rxring_physaddr = 0; 1885 sc->txring_physaddr = 0; 1886 sc->null_qs_physaddr = 0; 1887 } 1888 bus_dmamem_free(sc->desc_dma_tag, sc->rxring, 1889 sc->rxring_dma_map); 1890 sc->rxring = NULL; 1891 sc->txring = NULL; 1892 sc->null_qs = NULL; 1893 1894 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) 1895 if (sc->rxring_m_dmamap[i] != NULL) { 1896 bus_dmamap_destroy(sc->mbuf_dma_tag, 1897 sc->rxring_m_dmamap[i]); 1898 sc->rxring_m_dmamap[i] = NULL; 1899 } 1900 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) 1901 if (sc->txring_m_dmamap[i] != NULL) { 1902 bus_dmamap_destroy(sc->mbuf_dma_tag, 1903 sc->txring_m_dmamap[i]); 1904 sc->txring_m_dmamap[i] = NULL; 1905 } 1906 } 1907 if (sc->desc_dma_tag != NULL) { 1908 bus_dma_tag_destroy(sc->desc_dma_tag); 1909 sc->desc_dma_tag = NULL; 1910 } 1911 if (sc->mbuf_dma_tag != NULL) { 1912 bus_dma_tag_destroy(sc->mbuf_dma_tag); 1913 sc->mbuf_dma_tag = NULL; 1914 } 1915 1916 if (sc->ref_clk != NULL) { 1917 clk_release(sc->ref_clk); 1918 sc->ref_clk = NULL; 1919 } 1920 1921 bus_generic_detach(dev); 1922 1923 CGEM_LOCK_DESTROY(sc); 1924 1925 return (0); 1926 } 1927 1928 static device_method_t cgem_methods[] = { 1929 /* Device interface */ 1930 DEVMETHOD(device_probe, cgem_probe), 1931 DEVMETHOD(device_attach, cgem_attach), 1932 DEVMETHOD(device_detach, cgem_detach), 1933 1934 /* MII interface */ 1935 DEVMETHOD(miibus_readreg, cgem_miibus_readreg), 1936 DEVMETHOD(miibus_writereg, cgem_miibus_writereg), 1937 DEVMETHOD(miibus_statchg, cgem_miibus_statchg), 1938 DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg), 1939 1940 DEVMETHOD_END 1941 }; 1942 1943 static driver_t cgem_driver = { 1944 "cgem", 1945 cgem_methods, 1946 sizeof(struct cgem_softc), 1947 }; 1948 1949 DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL); 1950 DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL); 1951 MODULE_DEPEND(cgem, miibus, 1, 1, 1); 1952 MODULE_DEPEND(cgem, ether, 1, 1, 1); 1953 SIMPLEBUS_PNP_INFO(compat_data); 1954