1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * A network interface driver for Cadence GEM Gigabit Ethernet 31 * interface such as the one used in Xilinx Zynq-7000 SoC. 32 * 33 * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. 34 * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 35 * and register definitions are in appendix B.18. 36 */ 37 38 #include <sys/cdefs.h> 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bus.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/module.h> 46 #include <sys/rman.h> 47 #include <sys/socket.h> 48 #include <sys/sockio.h> 49 #include <sys/sysctl.h> 50 51 #include <machine/bus.h> 52 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_arp.h> 56 #include <net/if_dl.h> 57 #include <net/if_media.h> 58 #include <net/if_mib.h> 59 #include <net/if_types.h> 60 61 #ifdef INET 62 #include <netinet/in.h> 63 #include <netinet/in_systm.h> 64 #include <netinet/in_var.h> 65 #include <netinet/ip.h> 66 #endif 67 68 #include <net/bpf.h> 69 #include <net/bpfdesc.h> 70 71 #include <dev/fdt/fdt_common.h> 72 #include <dev/ofw/ofw_bus.h> 73 #include <dev/ofw/ofw_bus_subr.h> 74 75 #include <dev/mii/mii.h> 76 #include <dev/mii/miivar.h> 77 #include <dev/mii/mii_fdt.h> 78 79 #include <dev/extres/clk/clk.h> 80 81 #if BUS_SPACE_MAXADDR > BUS_SPACE_MAXADDR_32BIT 82 #define CGEM64 83 #endif 84 85 #include <dev/cadence/if_cgem_hw.h> 86 87 #include "miibus_if.h" 88 89 #define IF_CGEM_NAME "cgem" 90 91 #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ 92 #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ 93 94 /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ 95 #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ 96 97 #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ 98 99 #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ 100 CSUM_TCP_IPV6 | CSUM_UDP_IPV6) 101 102 #define HWQUIRK_NONE 0 103 #define HWQUIRK_NEEDNULLQS 1 104 #define HWQUIRK_RXHANGWAR 2 105 106 static struct ofw_compat_data compat_data[] = { 107 { "cdns,zynq-gem", HWQUIRK_RXHANGWAR }, /* Deprecated */ 108 { "cdns,zynqmp-gem", HWQUIRK_NEEDNULLQS }, /* Deprecated */ 109 { "xlnx,zynq-gem", HWQUIRK_RXHANGWAR }, 110 { "xlnx,zynqmp-gem", HWQUIRK_NEEDNULLQS }, 111 { "microchip,mpfs-mss-gem", HWQUIRK_NEEDNULLQS }, 112 { "sifive,fu540-c000-gem", HWQUIRK_NONE }, 113 { "sifive,fu740-c000-gem", HWQUIRK_NONE }, 114 { NULL, 0 } 115 }; 116 117 struct cgem_softc { 118 if_t ifp; 119 struct mtx sc_mtx; 120 device_t dev; 121 device_t miibus; 122 u_int mii_media_active; /* last active media */ 123 int if_old_flags; 124 struct resource *mem_res; 125 struct resource *irq_res; 126 void *intrhand; 127 struct callout tick_ch; 128 uint32_t net_ctl_shadow; 129 uint32_t net_cfg_shadow; 130 clk_t clk_pclk; 131 clk_t clk_hclk; 132 clk_t clk_txclk; 133 clk_t clk_rxclk; 134 clk_t clk_tsuclk; 135 int neednullqs; 136 int phy_contype; 137 138 bus_dma_tag_t desc_dma_tag; 139 bus_dma_tag_t mbuf_dma_tag; 140 141 /* receive descriptor ring */ 142 struct cgem_rx_desc *rxring; 143 bus_addr_t rxring_physaddr; 144 struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; 145 bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; 146 int rxring_hd_ptr; /* where to put rcv bufs */ 147 int rxring_tl_ptr; /* where to get receives */ 148 int rxring_queued; /* how many rcv bufs queued */ 149 bus_dmamap_t rxring_dma_map; 150 int rxbufs; /* tunable number rcv bufs */ 151 int rxhangwar; /* rx hang work-around */ 152 u_int rxoverruns; /* rx overruns */ 153 u_int rxnobufs; /* rx buf ring empty events */ 154 u_int rxdmamapfails; /* rx dmamap failures */ 155 uint32_t rx_frames_prev; 156 157 /* transmit descriptor ring */ 158 struct cgem_tx_desc *txring; 159 bus_addr_t txring_physaddr; 160 struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; 161 bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; 162 int txring_hd_ptr; /* where to put next xmits */ 163 int txring_tl_ptr; /* next xmit mbuf to free */ 164 int txring_queued; /* num xmits segs queued */ 165 u_int txfull; /* tx ring full events */ 166 u_int txdefrags; /* tx calls to m_defrag() */ 167 u_int txdefragfails; /* tx m_defrag() failures */ 168 u_int txdmamapfails; /* tx dmamap failures */ 169 170 /* null descriptor rings */ 171 void *null_qs; 172 bus_addr_t null_qs_physaddr; 173 174 /* hardware provided statistics */ 175 struct cgem_hw_stats { 176 uint64_t tx_bytes; 177 uint32_t tx_frames; 178 uint32_t tx_frames_bcast; 179 uint32_t tx_frames_multi; 180 uint32_t tx_frames_pause; 181 uint32_t tx_frames_64b; 182 uint32_t tx_frames_65to127b; 183 uint32_t tx_frames_128to255b; 184 uint32_t tx_frames_256to511b; 185 uint32_t tx_frames_512to1023b; 186 uint32_t tx_frames_1024to1536b; 187 uint32_t tx_under_runs; 188 uint32_t tx_single_collisn; 189 uint32_t tx_multi_collisn; 190 uint32_t tx_excsv_collisn; 191 uint32_t tx_late_collisn; 192 uint32_t tx_deferred_frames; 193 uint32_t tx_carrier_sense_errs; 194 195 uint64_t rx_bytes; 196 uint32_t rx_frames; 197 uint32_t rx_frames_bcast; 198 uint32_t rx_frames_multi; 199 uint32_t rx_frames_pause; 200 uint32_t rx_frames_64b; 201 uint32_t rx_frames_65to127b; 202 uint32_t rx_frames_128to255b; 203 uint32_t rx_frames_256to511b; 204 uint32_t rx_frames_512to1023b; 205 uint32_t rx_frames_1024to1536b; 206 uint32_t rx_frames_undersize; 207 uint32_t rx_frames_oversize; 208 uint32_t rx_frames_jabber; 209 uint32_t rx_frames_fcs_errs; 210 uint32_t rx_frames_length_errs; 211 uint32_t rx_symbol_errs; 212 uint32_t rx_align_errs; 213 uint32_t rx_resource_errs; 214 uint32_t rx_overrun_errs; 215 uint32_t rx_ip_hdr_csum_errs; 216 uint32_t rx_tcp_csum_errs; 217 uint32_t rx_udp_csum_errs; 218 } stats; 219 }; 220 221 #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) 222 #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) 223 #define BARRIER(sc, off, len, flags) \ 224 (bus_barrier((sc)->mem_res, (off), (len), (flags)) 225 226 #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx) 227 #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) 228 #define CGEM_LOCK_INIT(sc) mtx_init(&(sc)->sc_mtx, \ 229 device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF) 230 #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) 231 #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) 232 233 /* Allow platforms to optionally provide a way to set the reference clock. */ 234 int cgem_set_ref_clk(int unit, int frequency); 235 236 static int cgem_probe(device_t dev); 237 static int cgem_attach(device_t dev); 238 static int cgem_detach(device_t dev); 239 static void cgem_tick(void *); 240 static void cgem_intr(void *); 241 242 static void cgem_mediachange(struct cgem_softc *, struct mii_data *); 243 244 static void 245 cgem_get_mac(struct cgem_softc *sc, u_char eaddr[]) 246 { 247 int i; 248 uint32_t rnd; 249 250 /* See if boot loader gave us a MAC address already. */ 251 for (i = 0; i < 4; i++) { 252 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i)); 253 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff; 254 if (low != 0 || high != 0) { 255 eaddr[0] = low & 0xff; 256 eaddr[1] = (low >> 8) & 0xff; 257 eaddr[2] = (low >> 16) & 0xff; 258 eaddr[3] = (low >> 24) & 0xff; 259 eaddr[4] = high & 0xff; 260 eaddr[5] = (high >> 8) & 0xff; 261 break; 262 } 263 } 264 265 /* No MAC from boot loader? Assign a random one. */ 266 if (i == 4) { 267 rnd = arc4random(); 268 269 eaddr[0] = 'b'; 270 eaddr[1] = 's'; 271 eaddr[2] = 'd'; 272 eaddr[3] = (rnd >> 16) & 0xff; 273 eaddr[4] = (rnd >> 8) & 0xff; 274 eaddr[5] = rnd & 0xff; 275 276 device_printf(sc->dev, "no mac address found, assigning " 277 "random: %02x:%02x:%02x:%02x:%02x:%02x\n", eaddr[0], 278 eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]); 279 } 280 281 /* Move address to first slot and zero out the rest. */ 282 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | 283 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); 284 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); 285 286 for (i = 1; i < 4; i++) { 287 WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0); 288 WR4(sc, CGEM_SPEC_ADDR_HI(i), 0); 289 } 290 } 291 292 /* 293 * cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash 294 * corresponds to a bit in a 64-bit hash register. Setting that bit in the 295 * hash register enables reception of all frames with a destination address 296 * that hashes to that 6-bit value. 297 * 298 * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech 299 * Reference Manual. Bits 0-5 in the hash are the exclusive-or of 300 * every sixth bit in the destination address. 301 */ 302 static int 303 cgem_mac_hash(u_char eaddr[]) 304 { 305 int hash; 306 int i, j; 307 308 hash = 0; 309 for (i = 0; i < 6; i++) 310 for (j = i; j < 48; j += 6) 311 if ((eaddr[j >> 3] & (1 << (j & 7))) != 0) 312 hash ^= (1 << i); 313 314 return hash; 315 } 316 317 static u_int 318 cgem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 319 { 320 uint32_t *hashes = arg; 321 int index; 322 323 index = cgem_mac_hash(LLADDR(sdl)); 324 if (index > 31) 325 hashes[0] |= (1U << (index - 32)); 326 else 327 hashes[1] |= (1U << index); 328 329 return (1); 330 } 331 332 /* 333 * After any change in rx flags or multi-cast addresses, set up hash registers 334 * and net config register bits. 335 */ 336 static void 337 cgem_rx_filter(struct cgem_softc *sc) 338 { 339 if_t ifp = sc->ifp; 340 uint32_t hashes[2] = { 0, 0 }; 341 342 sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN | 343 CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL); 344 345 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 346 sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL; 347 else { 348 if ((if_getflags(ifp) & IFF_BROADCAST) == 0) 349 sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST; 350 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { 351 hashes[0] = 0xffffffff; 352 hashes[1] = 0xffffffff; 353 } else 354 if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes); 355 356 if (hashes[0] != 0 || hashes[1] != 0) 357 sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN; 358 } 359 360 WR4(sc, CGEM_HASH_TOP, hashes[0]); 361 WR4(sc, CGEM_HASH_BOT, hashes[1]); 362 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 363 } 364 365 /* For bus_dmamap_load() callback. */ 366 static void 367 cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 368 { 369 370 if (nsegs != 1 || error != 0) 371 return; 372 *(bus_addr_t *)arg = segs[0].ds_addr; 373 } 374 375 /* Set up null queues for priority queues we actually can't disable. */ 376 static void 377 cgem_null_qs(struct cgem_softc *sc) 378 { 379 struct cgem_rx_desc *rx_desc; 380 struct cgem_tx_desc *tx_desc; 381 uint32_t queue_mask; 382 int n; 383 384 /* Read design config register 6 to determine number of queues. */ 385 queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) & 386 CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1; 387 if (queue_mask == 0) 388 return; 389 390 /* Create empty RX queue and empty TX buf queues. */ 391 memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) + 392 sizeof(struct cgem_tx_desc)); 393 rx_desc = sc->null_qs; 394 rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP; 395 tx_desc = (struct cgem_tx_desc *)(rx_desc + 1); 396 tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP; 397 398 /* Point all valid ring base pointers to the null queues. */ 399 for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) { 400 WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr); 401 WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr + 402 sizeof(struct cgem_rx_desc)); 403 } 404 } 405 406 /* Create DMA'able descriptor rings. */ 407 static int 408 cgem_setup_descs(struct cgem_softc *sc) 409 { 410 int i, err; 411 int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) + 412 CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc); 413 414 if (sc->neednullqs) 415 desc_rings_size += sizeof(struct cgem_rx_desc) + 416 sizeof(struct cgem_tx_desc); 417 418 sc->txring = NULL; 419 sc->rxring = NULL; 420 421 /* Allocate non-cached DMA space for RX and TX descriptors. */ 422 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 423 #ifdef CGEM64 424 1ULL << 32, /* Do not cross a 4G boundary. */ 425 #else 426 0, 427 #endif 428 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 429 desc_rings_size, 1, desc_rings_size, 0, 430 busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag); 431 if (err) 432 return (err); 433 434 /* Set up a bus_dma_tag for mbufs. */ 435 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 436 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 437 TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, 438 &sc->mbuf_dma_tag); 439 if (err) 440 return (err); 441 442 /* 443 * Allocate DMA memory. We allocate transmit, receive and null 444 * descriptor queues all at once because the hardware only provides 445 * one register for the upper 32 bits of rx and tx descriptor queues 446 * hardware addresses. 447 */ 448 err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring, 449 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, 450 &sc->rxring_dma_map); 451 if (err) 452 return (err); 453 454 /* Load descriptor DMA memory. */ 455 err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, 456 (void *)sc->rxring, desc_rings_size, 457 cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT); 458 if (err) 459 return (err); 460 461 /* Initialize RX descriptors. */ 462 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { 463 sc->rxring[i].addr = CGEM_RXDESC_OWN; 464 sc->rxring[i].ctl = 0; 465 sc->rxring_m[i] = NULL; 466 sc->rxring_m_dmamap[i] = NULL; 467 } 468 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; 469 470 sc->rxring_hd_ptr = 0; 471 sc->rxring_tl_ptr = 0; 472 sc->rxring_queued = 0; 473 474 sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS); 475 sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS * 476 sizeof(struct cgem_rx_desc); 477 478 /* Initialize TX descriptor ring. */ 479 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { 480 sc->txring[i].addr = 0; 481 sc->txring[i].ctl = CGEM_TXDESC_USED; 482 sc->txring_m[i] = NULL; 483 sc->txring_m_dmamap[i] = NULL; 484 } 485 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; 486 487 sc->txring_hd_ptr = 0; 488 sc->txring_tl_ptr = 0; 489 sc->txring_queued = 0; 490 491 if (sc->neednullqs) { 492 sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS); 493 sc->null_qs_physaddr = sc->txring_physaddr + 494 CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc); 495 496 cgem_null_qs(sc); 497 } 498 499 return (0); 500 } 501 502 /* Fill receive descriptor ring with mbufs. */ 503 static void 504 cgem_fill_rqueue(struct cgem_softc *sc) 505 { 506 struct mbuf *m = NULL; 507 bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; 508 int nsegs; 509 510 CGEM_ASSERT_LOCKED(sc); 511 512 while (sc->rxring_queued < sc->rxbufs) { 513 /* Get a cluster mbuf. */ 514 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 515 if (m == NULL) 516 break; 517 518 m->m_len = MCLBYTES; 519 m->m_pkthdr.len = MCLBYTES; 520 m->m_pkthdr.rcvif = sc->ifp; 521 522 /* Load map and plug in physical address. */ 523 if (bus_dmamap_create(sc->mbuf_dma_tag, 0, 524 &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) { 525 sc->rxdmamapfails++; 526 m_free(m); 527 break; 528 } 529 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 530 sc->rxring_m_dmamap[sc->rxring_hd_ptr], m, 531 segs, &nsegs, BUS_DMA_NOWAIT)) { 532 sc->rxdmamapfails++; 533 bus_dmamap_destroy(sc->mbuf_dma_tag, 534 sc->rxring_m_dmamap[sc->rxring_hd_ptr]); 535 sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL; 536 m_free(m); 537 break; 538 } 539 sc->rxring_m[sc->rxring_hd_ptr] = m; 540 541 /* Sync cache with receive buffer. */ 542 bus_dmamap_sync(sc->mbuf_dma_tag, 543 sc->rxring_m_dmamap[sc->rxring_hd_ptr], 544 BUS_DMASYNC_PREREAD); 545 546 /* Write rx descriptor and increment head pointer. */ 547 sc->rxring[sc->rxring_hd_ptr].ctl = 0; 548 #ifdef CGEM64 549 sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32; 550 #endif 551 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { 552 sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | 553 CGEM_RXDESC_WRAP; 554 sc->rxring_hd_ptr = 0; 555 } else 556 sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr; 557 558 sc->rxring_queued++; 559 } 560 } 561 562 /* Pull received packets off of receive descriptor ring. */ 563 static void 564 cgem_recv(struct cgem_softc *sc) 565 { 566 if_t ifp = sc->ifp; 567 struct mbuf *m, *m_hd, **m_tl; 568 uint32_t ctl; 569 570 CGEM_ASSERT_LOCKED(sc); 571 572 /* Pick up all packets in which the OWN bit is set. */ 573 m_hd = NULL; 574 m_tl = &m_hd; 575 while (sc->rxring_queued > 0 && 576 (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { 577 ctl = sc->rxring[sc->rxring_tl_ptr].ctl; 578 579 /* Grab filled mbuf. */ 580 m = sc->rxring_m[sc->rxring_tl_ptr]; 581 sc->rxring_m[sc->rxring_tl_ptr] = NULL; 582 583 /* Sync cache with receive buffer. */ 584 bus_dmamap_sync(sc->mbuf_dma_tag, 585 sc->rxring_m_dmamap[sc->rxring_tl_ptr], 586 BUS_DMASYNC_POSTREAD); 587 588 /* Unload and destroy dmamap. */ 589 bus_dmamap_unload(sc->mbuf_dma_tag, 590 sc->rxring_m_dmamap[sc->rxring_tl_ptr]); 591 bus_dmamap_destroy(sc->mbuf_dma_tag, 592 sc->rxring_m_dmamap[sc->rxring_tl_ptr]); 593 sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL; 594 595 /* Increment tail pointer. */ 596 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS) 597 sc->rxring_tl_ptr = 0; 598 sc->rxring_queued--; 599 600 /* 601 * Check FCS and make sure entire packet landed in one mbuf 602 * cluster (which is much bigger than the largest ethernet 603 * packet). 604 */ 605 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 || 606 (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) != 607 (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) { 608 /* discard. */ 609 m_free(m); 610 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 611 continue; 612 } 613 614 /* Ready it to hand off to upper layers. */ 615 m->m_data += ETHER_ALIGN; 616 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK); 617 m->m_pkthdr.rcvif = ifp; 618 m->m_pkthdr.len = m->m_len; 619 620 /* 621 * Are we using hardware checksumming? Check the status in the 622 * receive descriptor. 623 */ 624 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 625 /* TCP or UDP checks out, IP checks out too. */ 626 if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 627 CGEM_RXDESC_CKSUM_STAT_TCP_GOOD || 628 (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 629 CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) { 630 m->m_pkthdr.csum_flags |= 631 CSUM_IP_CHECKED | CSUM_IP_VALID | 632 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 633 m->m_pkthdr.csum_data = 0xffff; 634 } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 635 CGEM_RXDESC_CKSUM_STAT_IP_GOOD) { 636 /* Only IP checks out. */ 637 m->m_pkthdr.csum_flags |= 638 CSUM_IP_CHECKED | CSUM_IP_VALID; 639 m->m_pkthdr.csum_data = 0xffff; 640 } 641 } 642 643 /* Queue it up for delivery below. */ 644 *m_tl = m; 645 m_tl = &m->m_next; 646 } 647 648 /* Replenish receive buffers. */ 649 cgem_fill_rqueue(sc); 650 651 /* Unlock and send up packets. */ 652 CGEM_UNLOCK(sc); 653 while (m_hd != NULL) { 654 m = m_hd; 655 m_hd = m_hd->m_next; 656 m->m_next = NULL; 657 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 658 if_input(ifp, m); 659 } 660 CGEM_LOCK(sc); 661 } 662 663 /* Find completed transmits and free their mbufs. */ 664 static void 665 cgem_clean_tx(struct cgem_softc *sc) 666 { 667 struct mbuf *m; 668 uint32_t ctl; 669 670 CGEM_ASSERT_LOCKED(sc); 671 672 /* free up finished transmits. */ 673 while (sc->txring_queued > 0 && 674 ((ctl = sc->txring[sc->txring_tl_ptr].ctl) & 675 CGEM_TXDESC_USED) != 0) { 676 /* Sync cache. */ 677 bus_dmamap_sync(sc->mbuf_dma_tag, 678 sc->txring_m_dmamap[sc->txring_tl_ptr], 679 BUS_DMASYNC_POSTWRITE); 680 681 /* Unload and destroy DMA map. */ 682 bus_dmamap_unload(sc->mbuf_dma_tag, 683 sc->txring_m_dmamap[sc->txring_tl_ptr]); 684 bus_dmamap_destroy(sc->mbuf_dma_tag, 685 sc->txring_m_dmamap[sc->txring_tl_ptr]); 686 sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL; 687 688 /* Free up the mbuf. */ 689 m = sc->txring_m[sc->txring_tl_ptr]; 690 sc->txring_m[sc->txring_tl_ptr] = NULL; 691 m_freem(m); 692 693 /* Check the status. */ 694 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { 695 /* Serious bus error. log to console. */ 696 #ifdef CGEM64 697 device_printf(sc->dev, 698 "cgem_clean_tx: AHB error, addr=0x%x%08x\n", 699 sc->txring[sc->txring_tl_ptr].addrhi, 700 sc->txring[sc->txring_tl_ptr].addr); 701 #else 702 device_printf(sc->dev, 703 "cgem_clean_tx: AHB error, addr=0x%x\n", 704 sc->txring[sc->txring_tl_ptr].addr); 705 #endif 706 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR | 707 CGEM_TXDESC_LATE_COLL)) != 0) { 708 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); 709 } else 710 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); 711 712 /* 713 * If the packet spanned more than one tx descriptor, skip 714 * descriptors until we find the end so that only 715 * start-of-frame descriptors are processed. 716 */ 717 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { 718 if ((ctl & CGEM_TXDESC_WRAP) != 0) 719 sc->txring_tl_ptr = 0; 720 else 721 sc->txring_tl_ptr++; 722 sc->txring_queued--; 723 724 ctl = sc->txring[sc->txring_tl_ptr].ctl; 725 726 sc->txring[sc->txring_tl_ptr].ctl = 727 ctl | CGEM_TXDESC_USED; 728 } 729 730 /* Next descriptor. */ 731 if ((ctl & CGEM_TXDESC_WRAP) != 0) 732 sc->txring_tl_ptr = 0; 733 else 734 sc->txring_tl_ptr++; 735 sc->txring_queued--; 736 737 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE); 738 } 739 } 740 741 /* Start transmits. */ 742 static void 743 cgem_start_locked(if_t ifp) 744 { 745 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); 746 struct mbuf *m; 747 bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; 748 uint32_t ctl; 749 int i, nsegs, wrap, err; 750 751 CGEM_ASSERT_LOCKED(sc); 752 753 if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0) 754 return; 755 756 for (;;) { 757 /* Check that there is room in the descriptor ring. */ 758 if (sc->txring_queued >= 759 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { 760 /* Try to make room. */ 761 cgem_clean_tx(sc); 762 763 /* Still no room? */ 764 if (sc->txring_queued >= 765 CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { 766 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 767 sc->txfull++; 768 break; 769 } 770 } 771 772 /* Grab next transmit packet. */ 773 m = if_dequeue(ifp); 774 if (m == NULL) 775 break; 776 777 /* Create and load DMA map. */ 778 if (bus_dmamap_create(sc->mbuf_dma_tag, 0, 779 &sc->txring_m_dmamap[sc->txring_hd_ptr])) { 780 m_freem(m); 781 sc->txdmamapfails++; 782 continue; 783 } 784 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 785 sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs, 786 BUS_DMA_NOWAIT); 787 if (err == EFBIG) { 788 /* Too many segments! defrag and try again. */ 789 struct mbuf *m2 = m_defrag(m, M_NOWAIT); 790 791 if (m2 == NULL) { 792 sc->txdefragfails++; 793 m_freem(m); 794 bus_dmamap_destroy(sc->mbuf_dma_tag, 795 sc->txring_m_dmamap[sc->txring_hd_ptr]); 796 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL; 797 continue; 798 } 799 m = m2; 800 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 801 sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, 802 &nsegs, BUS_DMA_NOWAIT); 803 sc->txdefrags++; 804 } 805 if (err) { 806 /* Give up. */ 807 m_freem(m); 808 bus_dmamap_destroy(sc->mbuf_dma_tag, 809 sc->txring_m_dmamap[sc->txring_hd_ptr]); 810 sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL; 811 sc->txdmamapfails++; 812 continue; 813 } 814 sc->txring_m[sc->txring_hd_ptr] = m; 815 816 /* Sync tx buffer with cache. */ 817 bus_dmamap_sync(sc->mbuf_dma_tag, 818 sc->txring_m_dmamap[sc->txring_hd_ptr], 819 BUS_DMASYNC_PREWRITE); 820 821 /* Set wrap flag if next packet might run off end of ring. */ 822 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >= 823 CGEM_NUM_TX_DESCS; 824 825 /* 826 * Fill in the TX descriptors back to front so that USED bit in 827 * first descriptor is cleared last. 828 */ 829 for (i = nsegs - 1; i >= 0; i--) { 830 /* Descriptor address. */ 831 sc->txring[sc->txring_hd_ptr + i].addr = 832 segs[i].ds_addr; 833 #ifdef CGEM64 834 sc->txring[sc->txring_hd_ptr + i].addrhi = 835 segs[i].ds_addr >> 32; 836 #endif 837 /* Descriptor control word. */ 838 ctl = segs[i].ds_len; 839 if (i == nsegs - 1) { 840 ctl |= CGEM_TXDESC_LAST_BUF; 841 if (wrap) 842 ctl |= CGEM_TXDESC_WRAP; 843 } 844 sc->txring[sc->txring_hd_ptr + i].ctl = ctl; 845 846 if (i != 0) 847 sc->txring_m[sc->txring_hd_ptr + i] = NULL; 848 } 849 850 if (wrap) 851 sc->txring_hd_ptr = 0; 852 else 853 sc->txring_hd_ptr += nsegs; 854 sc->txring_queued += nsegs; 855 856 /* Kick the transmitter. */ 857 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | 858 CGEM_NET_CTRL_START_TX); 859 860 /* If there is a BPF listener, bounce a copy to him. */ 861 ETHER_BPF_MTAP(ifp, m); 862 } 863 } 864 865 static void 866 cgem_start(if_t ifp) 867 { 868 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); 869 870 CGEM_LOCK(sc); 871 cgem_start_locked(ifp); 872 CGEM_UNLOCK(sc); 873 } 874 875 static void 876 cgem_poll_hw_stats(struct cgem_softc *sc) 877 { 878 uint32_t n; 879 880 CGEM_ASSERT_LOCKED(sc); 881 882 sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT); 883 sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32; 884 885 sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX); 886 sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX); 887 sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX); 888 sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX); 889 sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX); 890 sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX); 891 sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX); 892 sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX); 893 sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX); 894 sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX); 895 sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS); 896 897 n = RD4(sc, CGEM_SINGLE_COLL_FRAMES); 898 sc->stats.tx_single_collisn += n; 899 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 900 n = RD4(sc, CGEM_MULTI_COLL_FRAMES); 901 sc->stats.tx_multi_collisn += n; 902 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 903 n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES); 904 sc->stats.tx_excsv_collisn += n; 905 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 906 n = RD4(sc, CGEM_LATE_COLL); 907 sc->stats.tx_late_collisn += n; 908 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); 909 910 sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES); 911 sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS); 912 913 sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT); 914 sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32; 915 916 sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX); 917 sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX); 918 sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX); 919 sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX); 920 sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX); 921 sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX); 922 sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX); 923 sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX); 924 sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX); 925 sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX); 926 sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX); 927 sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX); 928 sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX); 929 sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS); 930 sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS); 931 sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS); 932 sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS); 933 sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS); 934 sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS); 935 sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS); 936 sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS); 937 sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS); 938 } 939 940 static void 941 cgem_tick(void *arg) 942 { 943 struct cgem_softc *sc = (struct cgem_softc *)arg; 944 struct mii_data *mii; 945 946 CGEM_ASSERT_LOCKED(sc); 947 948 /* Poll the phy. */ 949 if (sc->miibus != NULL) { 950 mii = device_get_softc(sc->miibus); 951 mii_tick(mii); 952 } 953 954 /* Poll statistics registers. */ 955 cgem_poll_hw_stats(sc); 956 957 /* Check for receiver hang. */ 958 if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) { 959 /* 960 * Reset receiver logic by toggling RX_EN bit. 1usec 961 * delay is necessary especially when operating at 100mbps 962 * and 10mbps speeds. 963 */ 964 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow & 965 ~CGEM_NET_CTRL_RX_EN); 966 DELAY(1); 967 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 968 } 969 sc->rx_frames_prev = sc->stats.rx_frames; 970 971 /* Next callout in one second. */ 972 callout_reset(&sc->tick_ch, hz, cgem_tick, sc); 973 } 974 975 /* Interrupt handler. */ 976 static void 977 cgem_intr(void *arg) 978 { 979 struct cgem_softc *sc = (struct cgem_softc *)arg; 980 if_t ifp = sc->ifp; 981 uint32_t istatus; 982 983 CGEM_LOCK(sc); 984 985 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 986 CGEM_UNLOCK(sc); 987 return; 988 } 989 990 /* Read interrupt status and immediately clear the bits. */ 991 istatus = RD4(sc, CGEM_INTR_STAT); 992 WR4(sc, CGEM_INTR_STAT, istatus); 993 994 /* Packets received. */ 995 if ((istatus & CGEM_INTR_RX_COMPLETE) != 0) 996 cgem_recv(sc); 997 998 /* Free up any completed transmit buffers. */ 999 cgem_clean_tx(sc); 1000 1001 /* Hresp not ok. Something is very bad with DMA. Try to clear. */ 1002 if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) { 1003 device_printf(sc->dev, 1004 "cgem_intr: hresp not okay! rx_status=0x%x\n", 1005 RD4(sc, CGEM_RX_STAT)); 1006 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK); 1007 } 1008 1009 /* Receiver overrun. */ 1010 if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) { 1011 /* Clear status bit. */ 1012 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN); 1013 sc->rxoverruns++; 1014 } 1015 1016 /* Receiver ran out of bufs. */ 1017 if ((istatus & CGEM_INTR_RX_USED_READ) != 0) { 1018 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | 1019 CGEM_NET_CTRL_FLUSH_DPRAM_PKT); 1020 cgem_fill_rqueue(sc); 1021 sc->rxnobufs++; 1022 } 1023 1024 /* Restart transmitter if needed. */ 1025 if (!if_sendq_empty(ifp)) 1026 cgem_start_locked(ifp); 1027 1028 CGEM_UNLOCK(sc); 1029 } 1030 1031 /* Reset hardware. */ 1032 static void 1033 cgem_reset(struct cgem_softc *sc) 1034 { 1035 1036 CGEM_ASSERT_LOCKED(sc); 1037 1038 /* Determine data bus width from design configuration register. */ 1039 switch (RD4(sc, CGEM_DESIGN_CFG1) & 1040 CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) { 1041 case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64: 1042 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64; 1043 break; 1044 case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128: 1045 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128; 1046 break; 1047 default: 1048 sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32; 1049 } 1050 1051 WR4(sc, CGEM_NET_CTRL, 0); 1052 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1053 WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); 1054 WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); 1055 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); 1056 WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); 1057 WR4(sc, CGEM_HASH_BOT, 0); 1058 WR4(sc, CGEM_HASH_TOP, 0); 1059 WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ 1060 WR4(sc, CGEM_RX_QBAR, 0); 1061 1062 /* Get management port running even if interface is down. */ 1063 sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48; 1064 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1065 1066 sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; 1067 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 1068 } 1069 1070 /* Bring up the hardware. */ 1071 static void 1072 cgem_config(struct cgem_softc *sc) 1073 { 1074 if_t ifp = sc->ifp; 1075 uint32_t dma_cfg; 1076 u_char *eaddr = if_getlladdr(ifp); 1077 1078 CGEM_ASSERT_LOCKED(sc); 1079 1080 /* Program Net Config Register. */ 1081 sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK | 1082 CGEM_NET_CFG_DBUS_WIDTH_MASK); 1083 sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE | 1084 CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | 1085 CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN | 1086 CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100); 1087 1088 /* Check connection type, enable SGMII bits if necessary. */ 1089 if (sc->phy_contype == MII_CONTYPE_SGMII) { 1090 sc->net_cfg_shadow |= CGEM_NET_CFG_SGMII_EN; 1091 sc->net_cfg_shadow |= CGEM_NET_CFG_PCS_SEL; 1092 } 1093 1094 /* Enable receive checksum offloading? */ 1095 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) 1096 sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; 1097 1098 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1099 1100 /* Program DMA Config Register. */ 1101 dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | 1102 CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | 1103 CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | 1104 CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | 1105 #ifdef CGEM64 1106 CGEM_DMA_CFG_ADDR_BUS_64 | 1107 #endif 1108 CGEM_DMA_CFG_DISC_WHEN_NO_AHB; 1109 1110 /* Enable transmit checksum offloading? */ 1111 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1112 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; 1113 1114 WR4(sc, CGEM_DMA_CFG, dma_cfg); 1115 1116 /* Write the rx and tx descriptor ring addresses to the QBAR regs. */ 1117 WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr); 1118 WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr); 1119 #ifdef CGEM64 1120 WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32)); 1121 WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32)); 1122 #endif 1123 1124 /* Enable rx and tx. */ 1125 sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); 1126 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 1127 1128 /* Set receive address in case it changed. */ 1129 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | 1130 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); 1131 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); 1132 1133 /* Set up interrupts. */ 1134 WR4(sc, CGEM_INTR_EN, CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN | 1135 CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ | 1136 CGEM_INTR_HRESP_NOT_OK); 1137 } 1138 1139 /* Turn on interface and load up receive ring with buffers. */ 1140 static void 1141 cgem_init_locked(struct cgem_softc *sc) 1142 { 1143 struct mii_data *mii; 1144 1145 CGEM_ASSERT_LOCKED(sc); 1146 1147 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0) 1148 return; 1149 1150 cgem_config(sc); 1151 cgem_fill_rqueue(sc); 1152 1153 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 1154 1155 if (sc->miibus != NULL) { 1156 mii = device_get_softc(sc->miibus); 1157 mii_mediachg(mii); 1158 } 1159 1160 callout_reset(&sc->tick_ch, hz, cgem_tick, sc); 1161 } 1162 1163 static void 1164 cgem_init(void *arg) 1165 { 1166 struct cgem_softc *sc = (struct cgem_softc *)arg; 1167 1168 CGEM_LOCK(sc); 1169 cgem_init_locked(sc); 1170 CGEM_UNLOCK(sc); 1171 } 1172 1173 /* Turn off interface. Free up any buffers in transmit or receive queues. */ 1174 static void 1175 cgem_stop(struct cgem_softc *sc) 1176 { 1177 int i; 1178 1179 CGEM_ASSERT_LOCKED(sc); 1180 1181 callout_stop(&sc->tick_ch); 1182 1183 /* Shut down hardware. */ 1184 cgem_reset(sc); 1185 1186 /* Clear out transmit queue. */ 1187 memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc)); 1188 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { 1189 sc->txring[i].ctl = CGEM_TXDESC_USED; 1190 if (sc->txring_m[i]) { 1191 /* Unload and destroy dmamap. */ 1192 bus_dmamap_unload(sc->mbuf_dma_tag, 1193 sc->txring_m_dmamap[i]); 1194 bus_dmamap_destroy(sc->mbuf_dma_tag, 1195 sc->txring_m_dmamap[i]); 1196 sc->txring_m_dmamap[i] = NULL; 1197 m_freem(sc->txring_m[i]); 1198 sc->txring_m[i] = NULL; 1199 } 1200 } 1201 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; 1202 1203 sc->txring_hd_ptr = 0; 1204 sc->txring_tl_ptr = 0; 1205 sc->txring_queued = 0; 1206 1207 /* Clear out receive queue. */ 1208 memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc)); 1209 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { 1210 sc->rxring[i].addr = CGEM_RXDESC_OWN; 1211 if (sc->rxring_m[i]) { 1212 /* Unload and destroy dmamap. */ 1213 bus_dmamap_unload(sc->mbuf_dma_tag, 1214 sc->rxring_m_dmamap[i]); 1215 bus_dmamap_destroy(sc->mbuf_dma_tag, 1216 sc->rxring_m_dmamap[i]); 1217 sc->rxring_m_dmamap[i] = NULL; 1218 1219 m_freem(sc->rxring_m[i]); 1220 sc->rxring_m[i] = NULL; 1221 } 1222 } 1223 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; 1224 1225 sc->rxring_hd_ptr = 0; 1226 sc->rxring_tl_ptr = 0; 1227 sc->rxring_queued = 0; 1228 1229 /* Force next statchg or linkchg to program net config register. */ 1230 sc->mii_media_active = 0; 1231 } 1232 1233 static int 1234 cgem_ioctl(if_t ifp, u_long cmd, caddr_t data) 1235 { 1236 struct cgem_softc *sc = if_getsoftc(ifp); 1237 struct ifreq *ifr = (struct ifreq *)data; 1238 struct mii_data *mii; 1239 int error = 0, mask; 1240 1241 switch (cmd) { 1242 case SIOCSIFFLAGS: 1243 CGEM_LOCK(sc); 1244 if ((if_getflags(ifp) & IFF_UP) != 0) { 1245 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1246 if (((if_getflags(ifp) ^ sc->if_old_flags) & 1247 (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1248 cgem_rx_filter(sc); 1249 } 1250 } else { 1251 cgem_init_locked(sc); 1252 } 1253 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1254 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1255 cgem_stop(sc); 1256 } 1257 sc->if_old_flags = if_getflags(ifp); 1258 CGEM_UNLOCK(sc); 1259 break; 1260 1261 case SIOCADDMULTI: 1262 case SIOCDELMULTI: 1263 /* Set up multi-cast filters. */ 1264 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1265 CGEM_LOCK(sc); 1266 cgem_rx_filter(sc); 1267 CGEM_UNLOCK(sc); 1268 } 1269 break; 1270 1271 case SIOCSIFMEDIA: 1272 case SIOCGIFMEDIA: 1273 if (sc->miibus == NULL) 1274 return (ENXIO); 1275 mii = device_get_softc(sc->miibus); 1276 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1277 break; 1278 1279 case SIOCSIFCAP: 1280 CGEM_LOCK(sc); 1281 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; 1282 1283 if ((mask & IFCAP_TXCSUM) != 0) { 1284 if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) { 1285 /* Turn on TX checksumming. */ 1286 if_setcapenablebit(ifp, IFCAP_TXCSUM | 1287 IFCAP_TXCSUM_IPV6, 0); 1288 if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0); 1289 1290 WR4(sc, CGEM_DMA_CFG, 1291 RD4(sc, CGEM_DMA_CFG) | 1292 CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); 1293 } else { 1294 /* Turn off TX checksumming. */ 1295 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM | 1296 IFCAP_TXCSUM_IPV6); 1297 if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST); 1298 1299 WR4(sc, CGEM_DMA_CFG, 1300 RD4(sc, CGEM_DMA_CFG) & 1301 ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); 1302 } 1303 } 1304 if ((mask & IFCAP_RXCSUM) != 0) { 1305 if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { 1306 /* Turn on RX checksumming. */ 1307 if_setcapenablebit(ifp, IFCAP_RXCSUM | 1308 IFCAP_RXCSUM_IPV6, 0); 1309 sc->net_cfg_shadow |= 1310 CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; 1311 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1312 } else { 1313 /* Turn off RX checksumming. */ 1314 if_setcapenablebit(ifp, 0, IFCAP_RXCSUM | 1315 IFCAP_RXCSUM_IPV6); 1316 sc->net_cfg_shadow &= 1317 ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; 1318 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1319 } 1320 } 1321 if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == 1322 (IFCAP_RXCSUM | IFCAP_TXCSUM)) 1323 if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0); 1324 else 1325 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM); 1326 1327 CGEM_UNLOCK(sc); 1328 break; 1329 default: 1330 error = ether_ioctl(ifp, cmd, data); 1331 break; 1332 } 1333 1334 return (error); 1335 } 1336 1337 /* MII bus support routines. 1338 */ 1339 static int 1340 cgem_ifmedia_upd(if_t ifp) 1341 { 1342 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); 1343 struct mii_data *mii; 1344 struct mii_softc *miisc; 1345 int error = 0; 1346 1347 mii = device_get_softc(sc->miibus); 1348 CGEM_LOCK(sc); 1349 if ((if_getflags(ifp) & IFF_UP) != 0) { 1350 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1351 PHY_RESET(miisc); 1352 error = mii_mediachg(mii); 1353 } 1354 CGEM_UNLOCK(sc); 1355 1356 return (error); 1357 } 1358 1359 static void 1360 cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 1361 { 1362 struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); 1363 struct mii_data *mii; 1364 1365 mii = device_get_softc(sc->miibus); 1366 CGEM_LOCK(sc); 1367 mii_pollstat(mii); 1368 ifmr->ifm_active = mii->mii_media_active; 1369 ifmr->ifm_status = mii->mii_media_status; 1370 CGEM_UNLOCK(sc); 1371 } 1372 1373 static int 1374 cgem_miibus_readreg(device_t dev, int phy, int reg) 1375 { 1376 struct cgem_softc *sc = device_get_softc(dev); 1377 int tries, val; 1378 1379 WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 | 1380 CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_READ | 1381 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | 1382 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT)); 1383 1384 /* Wait for completion. */ 1385 tries=0; 1386 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { 1387 DELAY(5); 1388 if (++tries > 200) { 1389 device_printf(dev, "phy read timeout: %d\n", reg); 1390 return (-1); 1391 } 1392 } 1393 1394 val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK; 1395 1396 if (reg == MII_EXTSR) 1397 /* 1398 * MAC does not support half-duplex at gig speeds. 1399 * Let mii(4) exclude the capability. 1400 */ 1401 val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX); 1402 1403 return (val); 1404 } 1405 1406 static int 1407 cgem_miibus_writereg(device_t dev, int phy, int reg, int data) 1408 { 1409 struct cgem_softc *sc = device_get_softc(dev); 1410 int tries; 1411 1412 WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 | 1413 CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_WRITE | 1414 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | 1415 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) | 1416 (data & CGEM_PHY_MAINT_DATA_MASK)); 1417 1418 /* Wait for completion. */ 1419 tries = 0; 1420 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { 1421 DELAY(5); 1422 if (++tries > 200) { 1423 device_printf(dev, "phy write timeout: %d\n", reg); 1424 return (-1); 1425 } 1426 } 1427 1428 return (0); 1429 } 1430 1431 static void 1432 cgem_miibus_statchg(device_t dev) 1433 { 1434 struct cgem_softc *sc = device_get_softc(dev); 1435 struct mii_data *mii = device_get_softc(sc->miibus); 1436 1437 CGEM_ASSERT_LOCKED(sc); 1438 1439 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1440 (IFM_ACTIVE | IFM_AVALID) && 1441 sc->mii_media_active != mii->mii_media_active) 1442 cgem_mediachange(sc, mii); 1443 } 1444 1445 static void 1446 cgem_miibus_linkchg(device_t dev) 1447 { 1448 struct cgem_softc *sc = device_get_softc(dev); 1449 struct mii_data *mii = device_get_softc(sc->miibus); 1450 1451 CGEM_ASSERT_LOCKED(sc); 1452 1453 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1454 (IFM_ACTIVE | IFM_AVALID) && 1455 sc->mii_media_active != mii->mii_media_active) 1456 cgem_mediachange(sc, mii); 1457 } 1458 1459 /* 1460 * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to 1461 * provide a function to set the cgem's reference clock. 1462 */ 1463 static int __used 1464 cgem_default_set_ref_clk(int unit, int frequency) 1465 { 1466 1467 return 0; 1468 } 1469 __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk); 1470 1471 /* Call to set reference clock and network config bits according to media. */ 1472 static void 1473 cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) 1474 { 1475 int ref_clk_freq; 1476 1477 CGEM_ASSERT_LOCKED(sc); 1478 1479 /* Update hardware to reflect media. */ 1480 sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | 1481 CGEM_NET_CFG_FULL_DUPLEX); 1482 1483 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1484 case IFM_1000_T: 1485 sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 | 1486 CGEM_NET_CFG_GIGE_EN); 1487 ref_clk_freq = 125000000; 1488 break; 1489 case IFM_100_TX: 1490 sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100; 1491 ref_clk_freq = 25000000; 1492 break; 1493 default: 1494 ref_clk_freq = 2500000; 1495 } 1496 1497 if ((mii->mii_media_active & IFM_FDX) != 0) 1498 sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX; 1499 1500 WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); 1501 1502 if (sc->clk_pclk != NULL) { 1503 CGEM_UNLOCK(sc); 1504 if (clk_set_freq(sc->clk_pclk, ref_clk_freq, 0)) 1505 device_printf(sc->dev, "could not set ref clk to %d\n", 1506 ref_clk_freq); 1507 CGEM_LOCK(sc); 1508 } 1509 1510 sc->mii_media_active = mii->mii_media_active; 1511 } 1512 1513 static void 1514 cgem_add_sysctls(device_t dev) 1515 { 1516 struct cgem_softc *sc = device_get_softc(dev); 1517 struct sysctl_ctx_list *ctx; 1518 struct sysctl_oid_list *child; 1519 struct sysctl_oid *tree; 1520 1521 ctx = device_get_sysctl_ctx(dev); 1522 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 1523 1524 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW, 1525 &sc->rxbufs, 0, "Number receive buffers to provide"); 1526 1527 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW, 1528 &sc->rxhangwar, 0, "Enable receive hang work-around"); 1529 1530 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD, 1531 &sc->rxoverruns, 0, "Receive overrun events"); 1532 1533 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD, 1534 &sc->rxnobufs, 0, "Receive buf queue empty events"); 1535 1536 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD, 1537 &sc->rxdmamapfails, 0, "Receive DMA map failures"); 1538 1539 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD, 1540 &sc->txfull, 0, "Transmit ring full events"); 1541 1542 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD, 1543 &sc->txdmamapfails, 0, "Transmit DMA map failures"); 1544 1545 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD, 1546 &sc->txdefrags, 0, "Transmit m_defrag() calls"); 1547 1548 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD, 1549 &sc->txdefragfails, 0, "Transmit m_defrag() failures"); 1550 1551 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", 1552 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics"); 1553 child = SYSCTL_CHILDREN(tree); 1554 1555 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD, 1556 &sc->stats.tx_bytes, "Total bytes transmitted"); 1557 1558 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD, 1559 &sc->stats.tx_frames, 0, "Total frames transmitted"); 1560 1561 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD, 1562 &sc->stats.tx_frames_bcast, 0, 1563 "Number broadcast frames transmitted"); 1564 1565 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD, 1566 &sc->stats.tx_frames_multi, 0, 1567 "Number multicast frames transmitted"); 1568 1569 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause", 1570 CTLFLAG_RD, &sc->stats.tx_frames_pause, 0, 1571 "Number pause frames transmitted"); 1572 1573 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD, 1574 &sc->stats.tx_frames_64b, 0, 1575 "Number frames transmitted of size 64 bytes or less"); 1576 1577 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD, 1578 &sc->stats.tx_frames_65to127b, 0, 1579 "Number frames transmitted of size 65-127 bytes"); 1580 1581 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b", 1582 CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0, 1583 "Number frames transmitted of size 128-255 bytes"); 1584 1585 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b", 1586 CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0, 1587 "Number frames transmitted of size 256-511 bytes"); 1588 1589 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b", 1590 CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0, 1591 "Number frames transmitted of size 512-1023 bytes"); 1592 1593 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b", 1594 CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0, 1595 "Number frames transmitted of size 1024-1536 bytes"); 1596 1597 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs", 1598 CTLFLAG_RD, &sc->stats.tx_under_runs, 0, 1599 "Number transmit under-run events"); 1600 1601 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn", 1602 CTLFLAG_RD, &sc->stats.tx_single_collisn, 0, 1603 "Number single-collision transmit frames"); 1604 1605 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn", 1606 CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0, 1607 "Number multi-collision transmit frames"); 1608 1609 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn", 1610 CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0, 1611 "Number excessive collision transmit frames"); 1612 1613 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn", 1614 CTLFLAG_RD, &sc->stats.tx_late_collisn, 0, 1615 "Number late-collision transmit frames"); 1616 1617 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames", 1618 CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0, 1619 "Number deferred transmit frames"); 1620 1621 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs", 1622 CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0, 1623 "Number carrier sense errors on transmit"); 1624 1625 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD, 1626 &sc->stats.rx_bytes, "Total bytes received"); 1627 1628 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD, 1629 &sc->stats.rx_frames, 0, "Total frames received"); 1630 1631 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast", 1632 CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0, 1633 "Number broadcast frames received"); 1634 1635 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi", 1636 CTLFLAG_RD, &sc->stats.rx_frames_multi, 0, 1637 "Number multicast frames received"); 1638 1639 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause", 1640 CTLFLAG_RD, &sc->stats.rx_frames_pause, 0, 1641 "Number pause frames received"); 1642 1643 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b", 1644 CTLFLAG_RD, &sc->stats.rx_frames_64b, 0, 1645 "Number frames received of size 64 bytes or less"); 1646 1647 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b", 1648 CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0, 1649 "Number frames received of size 65-127 bytes"); 1650 1651 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b", 1652 CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0, 1653 "Number frames received of size 128-255 bytes"); 1654 1655 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b", 1656 CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0, 1657 "Number frames received of size 256-511 bytes"); 1658 1659 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b", 1660 CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0, 1661 "Number frames received of size 512-1023 bytes"); 1662 1663 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b", 1664 CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0, 1665 "Number frames received of size 1024-1536 bytes"); 1666 1667 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize", 1668 CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0, 1669 "Number undersize frames received"); 1670 1671 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize", 1672 CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0, 1673 "Number oversize frames received"); 1674 1675 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber", 1676 CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0, 1677 "Number jabber frames received"); 1678 1679 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs", 1680 CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0, 1681 "Number frames received with FCS errors"); 1682 1683 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs", 1684 CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0, 1685 "Number frames received with length errors"); 1686 1687 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs", 1688 CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0, 1689 "Number receive symbol errors"); 1690 1691 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs", 1692 CTLFLAG_RD, &sc->stats.rx_align_errs, 0, 1693 "Number receive alignment errors"); 1694 1695 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs", 1696 CTLFLAG_RD, &sc->stats.rx_resource_errs, 0, 1697 "Number frames received when no rx buffer available"); 1698 1699 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs", 1700 CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0, 1701 "Number frames received but not copied due to receive overrun"); 1702 1703 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs", 1704 CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0, 1705 "Number frames received with IP header checksum errors"); 1706 1707 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs", 1708 CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0, 1709 "Number frames received with TCP checksum errors"); 1710 1711 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs", 1712 CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0, 1713 "Number frames received with UDP checksum errors"); 1714 } 1715 1716 static int 1717 cgem_probe(device_t dev) 1718 { 1719 1720 if (!ofw_bus_status_okay(dev)) 1721 return (ENXIO); 1722 1723 if (ofw_bus_search_compatible(dev, compat_data)->ocd_str == NULL) 1724 return (ENXIO); 1725 1726 device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface"); 1727 return (0); 1728 } 1729 1730 static int 1731 cgem_attach(device_t dev) 1732 { 1733 struct cgem_softc *sc = device_get_softc(dev); 1734 if_t ifp = NULL; 1735 int rid, err; 1736 u_char eaddr[ETHER_ADDR_LEN]; 1737 int hwquirks; 1738 phandle_t node; 1739 1740 sc->dev = dev; 1741 CGEM_LOCK_INIT(sc); 1742 1743 /* Key off of compatible string and set hardware-specific options. */ 1744 hwquirks = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 1745 if ((hwquirks & HWQUIRK_NEEDNULLQS) != 0) 1746 sc->neednullqs = 1; 1747 if ((hwquirks & HWQUIRK_RXHANGWAR) != 0) 1748 sc->rxhangwar = 1; 1749 /* 1750 * Both pclk and hclk are mandatory but we don't have a proper 1751 * clock driver for Zynq so don't make it fatal if we can't 1752 * get them. 1753 */ 1754 if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->clk_pclk) != 0) 1755 device_printf(dev, 1756 "could not retrieve pclk.\n"); 1757 else { 1758 if (clk_enable(sc->clk_pclk) != 0) 1759 device_printf(dev, "could not enable pclk.\n"); 1760 } 1761 if (clk_get_by_ofw_name(dev, 0, "hclk", &sc->clk_hclk) != 0) 1762 device_printf(dev, 1763 "could not retrieve hclk.\n"); 1764 else { 1765 if (clk_enable(sc->clk_hclk) != 0) 1766 device_printf(dev, "could not enable hclk.\n"); 1767 } 1768 1769 /* Optional clocks */ 1770 if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->clk_txclk) == 0) { 1771 if (clk_enable(sc->clk_txclk) != 0) { 1772 device_printf(dev, "could not enable tx_clk.\n"); 1773 err = ENXIO; 1774 goto err_pclk; 1775 } 1776 } 1777 if (clk_get_by_ofw_name(dev, 0, "rx_clk", &sc->clk_rxclk) == 0) { 1778 if (clk_enable(sc->clk_rxclk) != 0) { 1779 device_printf(dev, "could not enable rx_clk.\n"); 1780 err = ENXIO; 1781 goto err_tx_clk; 1782 } 1783 } 1784 if (clk_get_by_ofw_name(dev, 0, "tsu_clk", &sc->clk_tsuclk) == 0) { 1785 if (clk_enable(sc->clk_tsuclk) != 0) { 1786 device_printf(dev, "could not enable tsu_clk.\n"); 1787 err = ENXIO; 1788 goto err_rx_clk; 1789 } 1790 } 1791 1792 node = ofw_bus_get_node(dev); 1793 sc->phy_contype = mii_fdt_get_contype(node); 1794 1795 /* Get memory resource. */ 1796 rid = 0; 1797 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1798 RF_ACTIVE); 1799 if (sc->mem_res == NULL) { 1800 device_printf(dev, "could not allocate memory resources.\n"); 1801 err = ENOMEM; 1802 goto err_tsu_clk; 1803 } 1804 1805 /* Get IRQ resource. */ 1806 rid = 0; 1807 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1808 RF_ACTIVE); 1809 if (sc->irq_res == NULL) { 1810 device_printf(dev, "could not allocate interrupt resource.\n"); 1811 cgem_detach(dev); 1812 return (ENOMEM); 1813 } 1814 1815 /* Set up ifnet structure. */ 1816 ifp = sc->ifp = if_alloc(IFT_ETHER); 1817 if (ifp == NULL) { 1818 device_printf(dev, "could not allocate ifnet structure\n"); 1819 cgem_detach(dev); 1820 return (ENOMEM); 1821 } 1822 if_setsoftc(ifp, sc); 1823 if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev)); 1824 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1825 if_setinitfn(ifp, cgem_init); 1826 if_setioctlfn(ifp, cgem_ioctl); 1827 if_setstartfn(ifp, cgem_start); 1828 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | 1829 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0); 1830 if_setsendqlen(ifp, CGEM_NUM_TX_DESCS); 1831 if_setsendqready(ifp); 1832 1833 /* Disable hardware checksumming by default. */ 1834 if_sethwassist(ifp, 0); 1835 if_setcapenable(ifp, if_getcapabilities(ifp) & 1836 ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM)); 1837 1838 sc->if_old_flags = if_getflags(ifp); 1839 sc->rxbufs = DEFAULT_NUM_RX_BUFS; 1840 1841 /* Reset hardware. */ 1842 CGEM_LOCK(sc); 1843 cgem_reset(sc); 1844 CGEM_UNLOCK(sc); 1845 1846 /* Attach phy to mii bus. */ 1847 err = mii_attach(dev, &sc->miibus, ifp, 1848 cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK, 1849 MII_PHY_ANY, MII_OFFSET_ANY, 0); 1850 if (err) 1851 device_printf(dev, "warning: attaching PHYs failed\n"); 1852 1853 /* Set up TX and RX descriptor area. */ 1854 err = cgem_setup_descs(sc); 1855 if (err) { 1856 device_printf(dev, "could not set up dma mem for descs.\n"); 1857 cgem_detach(dev); 1858 goto err; 1859 } 1860 1861 /* Get a MAC address. */ 1862 cgem_get_mac(sc, eaddr); 1863 1864 /* Start ticks. */ 1865 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); 1866 1867 ether_ifattach(ifp, eaddr); 1868 1869 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE | 1870 INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand); 1871 if (err) { 1872 device_printf(dev, "could not set interrupt handler.\n"); 1873 ether_ifdetach(ifp); 1874 cgem_detach(dev); 1875 goto err; 1876 } 1877 1878 cgem_add_sysctls(dev); 1879 1880 return (0); 1881 1882 err_tsu_clk: 1883 if (sc->clk_tsuclk) 1884 clk_release(sc->clk_tsuclk); 1885 err_rx_clk: 1886 if (sc->clk_rxclk) 1887 clk_release(sc->clk_rxclk); 1888 err_tx_clk: 1889 if (sc->clk_txclk) 1890 clk_release(sc->clk_txclk); 1891 err_pclk: 1892 if (sc->clk_pclk) 1893 clk_release(sc->clk_pclk); 1894 if (sc->clk_hclk) 1895 clk_release(sc->clk_hclk); 1896 err: 1897 return (err); 1898 } 1899 1900 static int 1901 cgem_detach(device_t dev) 1902 { 1903 struct cgem_softc *sc = device_get_softc(dev); 1904 int i; 1905 1906 if (sc == NULL) 1907 return (ENODEV); 1908 1909 if (device_is_attached(dev)) { 1910 CGEM_LOCK(sc); 1911 cgem_stop(sc); 1912 CGEM_UNLOCK(sc); 1913 callout_drain(&sc->tick_ch); 1914 if_setflagbits(sc->ifp, 0, IFF_UP); 1915 ether_ifdetach(sc->ifp); 1916 } 1917 1918 if (sc->miibus != NULL) { 1919 device_delete_child(dev, sc->miibus); 1920 sc->miibus = NULL; 1921 } 1922 1923 /* Release resources. */ 1924 if (sc->mem_res != NULL) { 1925 bus_release_resource(dev, SYS_RES_MEMORY, 1926 rman_get_rid(sc->mem_res), sc->mem_res); 1927 sc->mem_res = NULL; 1928 } 1929 if (sc->irq_res != NULL) { 1930 if (sc->intrhand) 1931 bus_teardown_intr(dev, sc->irq_res, sc->intrhand); 1932 bus_release_resource(dev, SYS_RES_IRQ, 1933 rman_get_rid(sc->irq_res), sc->irq_res); 1934 sc->irq_res = NULL; 1935 } 1936 1937 /* Release DMA resources. */ 1938 if (sc->rxring != NULL) { 1939 if (sc->rxring_physaddr != 0) { 1940 bus_dmamap_unload(sc->desc_dma_tag, 1941 sc->rxring_dma_map); 1942 sc->rxring_physaddr = 0; 1943 sc->txring_physaddr = 0; 1944 sc->null_qs_physaddr = 0; 1945 } 1946 bus_dmamem_free(sc->desc_dma_tag, sc->rxring, 1947 sc->rxring_dma_map); 1948 sc->rxring = NULL; 1949 sc->txring = NULL; 1950 sc->null_qs = NULL; 1951 1952 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) 1953 if (sc->rxring_m_dmamap[i] != NULL) { 1954 bus_dmamap_destroy(sc->mbuf_dma_tag, 1955 sc->rxring_m_dmamap[i]); 1956 sc->rxring_m_dmamap[i] = NULL; 1957 } 1958 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) 1959 if (sc->txring_m_dmamap[i] != NULL) { 1960 bus_dmamap_destroy(sc->mbuf_dma_tag, 1961 sc->txring_m_dmamap[i]); 1962 sc->txring_m_dmamap[i] = NULL; 1963 } 1964 } 1965 if (sc->desc_dma_tag != NULL) { 1966 bus_dma_tag_destroy(sc->desc_dma_tag); 1967 sc->desc_dma_tag = NULL; 1968 } 1969 if (sc->mbuf_dma_tag != NULL) { 1970 bus_dma_tag_destroy(sc->mbuf_dma_tag); 1971 sc->mbuf_dma_tag = NULL; 1972 } 1973 1974 bus_generic_detach(dev); 1975 1976 if (sc->clk_tsuclk) 1977 clk_release(sc->clk_tsuclk); 1978 if (sc->clk_rxclk) 1979 clk_release(sc->clk_rxclk); 1980 if (sc->clk_txclk) 1981 clk_release(sc->clk_txclk); 1982 if (sc->clk_pclk) 1983 clk_release(sc->clk_pclk); 1984 if (sc->clk_hclk) 1985 clk_release(sc->clk_hclk); 1986 1987 CGEM_LOCK_DESTROY(sc); 1988 1989 return (0); 1990 } 1991 1992 static device_method_t cgem_methods[] = { 1993 /* Device interface */ 1994 DEVMETHOD(device_probe, cgem_probe), 1995 DEVMETHOD(device_attach, cgem_attach), 1996 DEVMETHOD(device_detach, cgem_detach), 1997 1998 /* MII interface */ 1999 DEVMETHOD(miibus_readreg, cgem_miibus_readreg), 2000 DEVMETHOD(miibus_writereg, cgem_miibus_writereg), 2001 DEVMETHOD(miibus_statchg, cgem_miibus_statchg), 2002 DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg), 2003 2004 DEVMETHOD_END 2005 }; 2006 2007 static driver_t cgem_driver = { 2008 "cgem", 2009 cgem_methods, 2010 sizeof(struct cgem_softc), 2011 }; 2012 2013 DRIVER_MODULE(cgem, simplebus, cgem_driver, NULL, NULL); 2014 DRIVER_MODULE(miibus, cgem, miibus_driver, NULL, NULL); 2015 MODULE_DEPEND(cgem, miibus, 1, 1, 1); 2016 MODULE_DEPEND(cgem, ether, 1, 1, 1); 2017 SIMPLEBUS_PNP_INFO(compat_data); 2018