1 /*- 2 * Copyright (c) 2020 Michael J Karels 3 * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca> 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller 31 * 32 * This driver is derived in large part from bcmgenet.c from NetBSD by 33 * Jared McNeill. Parts of the structure and other common code in 34 * this driver have been copied from if_awg.c for the Allwinner EMAC, 35 * also by Jared McNeill. 36 */ 37 38 #include "opt_device_polling.h" 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/bus.h> 46 #include <sys/rman.h> 47 #include <sys/kernel.h> 48 #include <sys/endian.h> 49 #include <sys/mbuf.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/module.h> 53 #include <sys/taskqueue.h> 54 #include <sys/gpio.h> 55 56 #include <net/bpf.h> 57 #include <net/if.h> 58 #include <net/ethernet.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 #include <net/if_var.h> 63 64 #include <machine/bus.h> 65 66 #include <dev/ofw/ofw_bus.h> 67 #include <dev/ofw/ofw_bus_subr.h> 68 69 #define __BIT(_x) (1 << (_x)) 70 #include "if_genetreg.h" 71 72 #include <dev/mii/mii.h> 73 #include <dev/mii/miivar.h> 74 #include <dev/mii/mii_fdt.h> 75 76 #include <netinet/in.h> 77 #include <netinet/ip.h> 78 #include <netinet/ip6.h> 79 #define ICMPV6_HACK /* workaround for chip issue */ 80 #ifdef ICMPV6_HACK 81 #include <netinet/icmp6.h> 82 #endif 83 84 #include "syscon_if.h" 85 #include "miibus_if.h" 86 #include "gpio_if.h" 87 88 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_MAC], (reg)) 89 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_MAC], (reg), (val)) 90 91 #define GEN_LOCK(sc) mtx_lock(&(sc)->mtx) 92 #define GEN_UNLOCK(sc) mtx_unlock(&(sc)->mtx) 93 #define GEN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 94 #define GEN_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 95 96 #define TX_DESC_COUNT GENET_DMA_DESC_COUNT 97 #define RX_DESC_COUNT GENET_DMA_DESC_COUNT 98 99 #define TX_NEXT(n, count) (((n) + 1) & ((count) - 1)) 100 #define RX_NEXT(n, count) (((n) + 1) & ((count) - 1)) 101 102 103 #define TX_MAX_SEGS 20 104 105 /* Maximum number of mbufs to send to if_input */ 106 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */; 107 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch); 108 109 static struct ofw_compat_data compat_data[] = { 110 { "brcm,genet-v1", 1 }, 111 { "brcm,genet-v2", 2 }, 112 { "brcm,genet-v3", 3 }, 113 { "brcm,genet-v4", 4 }, 114 { "brcm,genet-v5", 5 }, 115 { NULL, 0 } 116 }; 117 118 enum { 119 _RES_MAC, /* what to call this? */ 120 _RES_IRQ1, 121 _RES_IRQ2, 122 _RES_NITEMS 123 }; 124 125 static struct resource_spec gen_spec[] = { 126 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 127 { SYS_RES_IRQ, 0, RF_ACTIVE }, 128 { SYS_RES_IRQ, 1, RF_ACTIVE }, 129 { -1, 0 } 130 }; 131 132 /* structure per ring entry */ 133 struct gen_ring_ent { 134 bus_dmamap_t map; 135 struct mbuf *mbuf; 136 }; 137 138 struct tx_queue { 139 int hwindex; /* hardware index */ 140 int nentries; 141 u_int queued; /* or avail? */ 142 u_int cur; 143 u_int next; 144 u_int prod_idx; 145 u_int cons_idx; 146 struct gen_ring_ent *entries; 147 }; 148 149 struct rx_queue { 150 int hwindex; /* hardware index */ 151 int nentries; 152 u_int cur; 153 u_int prod_idx; 154 u_int cons_idx; 155 struct gen_ring_ent *entries; 156 }; 157 158 struct gen_softc { 159 struct resource *res[_RES_NITEMS]; 160 struct mtx mtx; 161 if_t ifp; 162 device_t dev; 163 device_t miibus; 164 mii_contype_t phy_mode; 165 166 struct callout stat_ch; 167 struct task link_task; 168 void *ih; 169 void *ih2; 170 int type; 171 int if_flags; 172 int link; 173 bus_dma_tag_t tx_buf_tag; 174 /* 175 * The genet chip has multiple queues for transmit and receive. 176 * This driver uses only one (queue 16, the default), but is cast 177 * with multiple rings. The additional rings are used for different 178 * priorities. 179 */ 180 #define DEF_TXQUEUE 0 181 #define NTXQUEUE 1 182 struct tx_queue tx_queue[NTXQUEUE]; 183 struct gen_ring_ent tx_ring_ent[TX_DESC_COUNT]; /* ring entries */ 184 185 bus_dma_tag_t rx_buf_tag; 186 #define DEF_RXQUEUE 0 187 #define NRXQUEUE 1 188 struct rx_queue rx_queue[NRXQUEUE]; 189 struct gen_ring_ent rx_ring_ent[RX_DESC_COUNT]; /* ring entries */ 190 }; 191 192 static void gen_init(void *softc); 193 static void gen_start(if_t ifp); 194 static void gen_destroy(struct gen_softc *sc); 195 static int gen_encap(struct gen_softc *sc, struct mbuf **mp); 196 static int gen_parse_tx(struct mbuf *m, int csum_flags); 197 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data); 198 static int gen_get_phy_mode(device_t dev); 199 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr); 200 static void gen_set_enaddr(struct gen_softc *sc); 201 static void gen_setup_rxfilter(struct gen_softc *sc); 202 static void gen_reset(struct gen_softc *sc); 203 static void gen_enable(struct gen_softc *sc); 204 static void gen_dma_disable(device_t dev); 205 static int gen_bus_dma_init(struct gen_softc *sc); 206 static void gen_bus_dma_teardown(struct gen_softc *sc); 207 static void gen_enable_intr(struct gen_softc *sc); 208 static void gen_init_txrings(struct gen_softc *sc); 209 static void gen_init_rxrings(struct gen_softc *sc); 210 static void gen_intr(void *softc); 211 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q); 212 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q); 213 static void gen_intr2(void *softc); 214 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index); 215 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index, 216 struct mbuf *m); 217 static void gen_link_task(void *arg, int pending); 218 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr); 219 static int gen_media_change(if_t ifp); 220 static void gen_tick(void *softc); 221 222 static int 223 gen_probe(device_t dev) 224 { 225 if (!ofw_bus_status_okay(dev)) 226 return (ENXIO); 227 228 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 229 return (ENXIO); 230 231 device_set_desc(dev, "RPi4 Gigabit Ethernet"); 232 return (BUS_PROBE_DEFAULT); 233 } 234 235 static int 236 gen_attach(device_t dev) 237 { 238 struct ether_addr eaddr; 239 struct gen_softc *sc; 240 int major, minor, error; 241 bool eaddr_found; 242 243 sc = device_get_softc(dev); 244 sc->dev = dev; 245 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 246 247 if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) { 248 device_printf(dev, "cannot allocate resources for device\n"); 249 error = ENXIO; 250 goto fail; 251 } 252 253 major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT; 254 if (major != REV_MAJOR_V5) { 255 device_printf(dev, "version %d is not supported\n", major); 256 error = ENXIO; 257 goto fail; 258 } 259 minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT; 260 device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor, 261 RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY); 262 263 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 264 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); 265 TASK_INIT(&sc->link_task, 0, gen_link_task, sc); 266 267 error = gen_get_phy_mode(dev); 268 if (error != 0) 269 goto fail; 270 271 bzero(&eaddr, sizeof(eaddr)); 272 eaddr_found = gen_get_eaddr(dev, &eaddr); 273 274 /* reset core */ 275 gen_reset(sc); 276 277 gen_dma_disable(dev); 278 279 /* Setup DMA */ 280 error = gen_bus_dma_init(sc); 281 if (error != 0) { 282 device_printf(dev, "cannot setup bus dma\n"); 283 goto fail; 284 } 285 286 /* Install interrupt handlers */ 287 error = bus_setup_intr(dev, sc->res[_RES_IRQ1], 288 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih); 289 if (error != 0) { 290 device_printf(dev, "cannot setup interrupt handler1\n"); 291 goto fail; 292 } 293 294 error = bus_setup_intr(dev, sc->res[_RES_IRQ2], 295 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2); 296 if (error != 0) { 297 device_printf(dev, "cannot setup interrupt handler2\n"); 298 goto fail; 299 } 300 301 /* Setup ethernet interface */ 302 sc->ifp = if_alloc(IFT_ETHER); 303 if_setsoftc(sc->ifp, sc); 304 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); 305 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 306 if_setstartfn(sc->ifp, gen_start); 307 if_setioctlfn(sc->ifp, gen_ioctl); 308 if_setinitfn(sc->ifp, gen_init); 309 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); 310 if_setsendqready(sc->ifp); 311 #define GEN_CSUM_FEATURES (CSUM_UDP | CSUM_TCP) 312 if_sethwassist(sc->ifp, GEN_CSUM_FEATURES); 313 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | 314 IFCAP_HWCSUM_IPV6); 315 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 316 317 /* Attach MII driver */ 318 error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change, 319 gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 320 MIIF_DOPAUSE); 321 if (error != 0) { 322 device_printf(dev, "cannot attach PHY\n"); 323 goto fail; 324 } 325 326 /* If address was not found, create one based on the hostid and name. */ 327 if (eaddr_found == 0) 328 ether_gen_addr(sc->ifp, &eaddr); 329 /* Attach ethernet interface */ 330 ether_ifattach(sc->ifp, eaddr.octet); 331 332 fail: 333 if (error) 334 gen_destroy(sc); 335 return (error); 336 } 337 338 /* Free resources after failed attach. This is not a complete detach. */ 339 static void 340 gen_destroy(struct gen_softc *sc) 341 { 342 343 if (sc->miibus) { /* can't happen */ 344 device_delete_child(sc->dev, sc->miibus); 345 sc->miibus = NULL; 346 } 347 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih); 348 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2); 349 gen_bus_dma_teardown(sc); 350 callout_drain(&sc->stat_ch); 351 if (mtx_initialized(&sc->mtx)) 352 mtx_destroy(&sc->mtx); 353 bus_release_resources(sc->dev, gen_spec, sc->res); 354 if (sc->ifp != NULL) { 355 if_free(sc->ifp); 356 sc->ifp = NULL; 357 } 358 } 359 360 static int 361 gen_get_phy_mode(device_t dev) 362 { 363 struct gen_softc *sc; 364 phandle_t node; 365 mii_contype_t type; 366 int error = 0; 367 368 sc = device_get_softc(dev); 369 node = ofw_bus_get_node(dev); 370 type = mii_fdt_get_contype(node); 371 372 switch (type) { 373 case MII_CONTYPE_RGMII: 374 case MII_CONTYPE_RGMII_RXID: 375 case MII_CONTYPE_RGMII_TXID: 376 sc->phy_mode = type; 377 break; 378 default: 379 device_printf(dev, "unknown phy-mode '%s'\n", 380 mii_fdt_contype_to_name(type)); 381 error = ENXIO; 382 break; 383 } 384 385 return (error); 386 } 387 388 static bool 389 gen_get_eaddr(device_t dev, struct ether_addr *eaddr) 390 { 391 struct gen_softc *sc; 392 uint32_t maclo, machi, val; 393 phandle_t node; 394 395 sc = device_get_softc(dev); 396 397 node = ofw_bus_get_node(dev); 398 if (OF_getprop(node, "mac-address", eaddr->octet, 399 ETHER_ADDR_LEN) != -1 || 400 OF_getprop(node, "local-mac-address", eaddr->octet, 401 ETHER_ADDR_LEN) != -1 || 402 OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1) 403 return (true); 404 405 device_printf(dev, "No Ethernet address found in fdt!\n"); 406 maclo = machi = 0; 407 408 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL); 409 if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) { 410 maclo = htobe32(RD4(sc, GENET_UMAC_MAC0)); 411 machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff); 412 } 413 414 if (maclo == 0 && machi == 0) { 415 if (bootverbose) 416 device_printf(dev, 417 "No Ethernet address found in controller\n"); 418 return (false); 419 } else { 420 eaddr->octet[0] = maclo & 0xff; 421 eaddr->octet[1] = (maclo >> 8) & 0xff; 422 eaddr->octet[2] = (maclo >> 16) & 0xff; 423 eaddr->octet[3] = (maclo >> 24) & 0xff; 424 eaddr->octet[4] = machi & 0xff; 425 eaddr->octet[5] = (machi >> 8) & 0xff; 426 return (true); 427 } 428 } 429 430 static void 431 gen_reset(struct gen_softc *sc) 432 { 433 uint32_t val; 434 435 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL); 436 val |= GENET_SYS_RBUF_FLUSH_RESET; 437 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val); 438 DELAY(10); 439 440 val &= ~GENET_SYS_RBUF_FLUSH_RESET; 441 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val); 442 DELAY(10); 443 444 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0); 445 DELAY(10); 446 447 WR4(sc, GENET_UMAC_CMD, 0); 448 WR4(sc, GENET_UMAC_CMD, 449 GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET); 450 DELAY(10); 451 WR4(sc, GENET_UMAC_CMD, 0); 452 453 WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT | 454 GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX); 455 WR4(sc, GENET_UMAC_MIB_CTRL, 0); 456 457 WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536); 458 459 val = RD4(sc, GENET_RBUF_CTRL); 460 val |= GENET_RBUF_ALIGN_2B; 461 WR4(sc, GENET_RBUF_CTRL, val); 462 463 WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1); 464 } 465 466 static void 467 gen_enable(struct gen_softc *sc) 468 { 469 u_int val; 470 471 /* Enable transmitter and receiver */ 472 val = RD4(sc, GENET_UMAC_CMD); 473 val |= GENET_UMAC_CMD_TXEN; 474 val |= GENET_UMAC_CMD_RXEN; 475 WR4(sc, GENET_UMAC_CMD, val); 476 477 /* Enable interrupts */ 478 gen_enable_intr(sc); 479 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 480 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE); 481 } 482 483 static void 484 gen_enable_offload(struct gen_softc *sc) 485 { 486 uint32_t check_ctrl, buf_ctrl; 487 488 check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL); 489 buf_ctrl = RD4(sc, GENET_RBUF_CTRL); 490 if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) { 491 check_ctrl |= GENET_RBUF_CHECK_CTRL_EN; 492 buf_ctrl |= GENET_RBUF_64B_EN; 493 } else { 494 check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN; 495 buf_ctrl &= ~GENET_RBUF_64B_EN; 496 } 497 WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl); 498 WR4(sc, GENET_RBUF_CTRL, buf_ctrl); 499 500 buf_ctrl = RD4(sc, GENET_TBUF_CTRL); 501 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 502 0) 503 buf_ctrl |= GENET_RBUF_64B_EN; 504 else 505 buf_ctrl &= ~GENET_RBUF_64B_EN; 506 WR4(sc, GENET_TBUF_CTRL, buf_ctrl); 507 } 508 509 static void 510 gen_dma_disable(device_t dev) 511 { 512 struct gen_softc *sc = device_get_softc(dev); 513 int val; 514 515 val = RD4(sc, GENET_TX_DMA_CTRL); 516 val &= ~GENET_TX_DMA_CTRL_EN; 517 val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE); 518 WR4(sc, GENET_TX_DMA_CTRL, val); 519 520 val = RD4(sc, GENET_RX_DMA_CTRL); 521 val &= ~GENET_RX_DMA_CTRL_EN; 522 val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE); 523 WR4(sc, GENET_RX_DMA_CTRL, val); 524 } 525 526 static int 527 gen_bus_dma_init(struct gen_softc *sc) 528 { 529 struct device *dev = sc->dev; 530 int i, error; 531 532 error = bus_dma_tag_create( 533 bus_get_dma_tag(dev), /* Parent tag */ 534 4, 0, /* alignment, boundary */ 535 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */ 536 BUS_SPACE_MAXADDR, /* highaddr */ 537 NULL, NULL, /* filter, filterarg */ 538 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ 539 MCLBYTES, /* maxsegsize */ 540 0, /* flags */ 541 NULL, NULL, /* lockfunc, lockarg */ 542 &sc->tx_buf_tag); 543 if (error != 0) { 544 device_printf(dev, "cannot create TX buffer tag\n"); 545 return (error); 546 } 547 548 for (i = 0; i < TX_DESC_COUNT; i++) { 549 error = bus_dmamap_create(sc->tx_buf_tag, 0, 550 &sc->tx_ring_ent[i].map); 551 if (error != 0) { 552 device_printf(dev, "cannot create TX buffer map\n"); 553 return (error); 554 } 555 } 556 557 error = bus_dma_tag_create( 558 bus_get_dma_tag(dev), /* Parent tag */ 559 4, 0, /* alignment, boundary */ 560 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */ 561 BUS_SPACE_MAXADDR, /* highaddr */ 562 NULL, NULL, /* filter, filterarg */ 563 MCLBYTES, 1, /* maxsize, nsegs */ 564 MCLBYTES, /* maxsegsize */ 565 0, /* flags */ 566 NULL, NULL, /* lockfunc, lockarg */ 567 &sc->rx_buf_tag); 568 if (error != 0) { 569 device_printf(dev, "cannot create RX buffer tag\n"); 570 return (error); 571 } 572 573 for (i = 0; i < RX_DESC_COUNT; i++) { 574 error = bus_dmamap_create(sc->rx_buf_tag, 0, 575 &sc->rx_ring_ent[i].map); 576 if (error != 0) { 577 device_printf(dev, "cannot create RX buffer map\n"); 578 return (error); 579 } 580 } 581 return (0); 582 } 583 584 static void 585 gen_bus_dma_teardown(struct gen_softc *sc) 586 { 587 int i, error; 588 589 if (sc->tx_buf_tag != NULL) { 590 for (i = 0; i < TX_DESC_COUNT; i++) { 591 error = bus_dmamap_destroy(sc->tx_buf_tag, 592 sc->tx_ring_ent[i].map); 593 sc->tx_ring_ent[i].map = NULL; 594 if (error) 595 device_printf(sc->dev, 596 "%s: bus_dmamap_destroy failed: %d\n", 597 __func__, error); 598 } 599 error = bus_dma_tag_destroy(sc->tx_buf_tag); 600 sc->tx_buf_tag = NULL; 601 if (error) 602 device_printf(sc->dev, 603 "%s: bus_dma_tag_destroy failed: %d\n", __func__, 604 error); 605 } 606 607 if (sc->tx_buf_tag != NULL) { 608 for (i = 0; i < RX_DESC_COUNT; i++) { 609 error = bus_dmamap_destroy(sc->rx_buf_tag, 610 sc->rx_ring_ent[i].map); 611 sc->rx_ring_ent[i].map = NULL; 612 if (error) 613 device_printf(sc->dev, 614 "%s: bus_dmamap_destroy failed: %d\n", 615 __func__, error); 616 } 617 error = bus_dma_tag_destroy(sc->rx_buf_tag); 618 sc->rx_buf_tag = NULL; 619 if (error) 620 device_printf(sc->dev, 621 "%s: bus_dma_tag_destroy failed: %d\n", __func__, 622 error); 623 } 624 } 625 626 static void 627 gen_enable_intr(struct gen_softc *sc) 628 { 629 630 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 631 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE); 632 } 633 634 /* 635 * "queue" is the software queue index (0-4); "qid" is the hardware index 636 * (0-16). "base" is the starting index in the ring array. 637 */ 638 static void 639 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base, 640 int nentries) 641 { 642 struct tx_queue *q; 643 uint32_t val; 644 645 q = &sc->tx_queue[queue]; 646 q->entries = &sc->tx_ring_ent[base]; 647 q->hwindex = qid; 648 q->nentries = nentries; 649 650 /* TX ring */ 651 652 q->queued = 0; 653 q->cons_idx = q->prod_idx = 0; 654 655 WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08); 656 657 WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0); 658 WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0); 659 WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0); 660 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0); 661 WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid), 662 (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) | 663 (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK)); 664 WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0); 665 WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0); 666 WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid), 667 TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1); 668 WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0); 669 WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1); 670 WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0); 671 WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0); 672 WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0); 673 674 WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */ 675 676 /* Enable transmit DMA */ 677 val = RD4(sc, GENET_TX_DMA_CTRL); 678 val |= GENET_TX_DMA_CTRL_EN; 679 val |= GENET_TX_DMA_CTRL_RBUF_EN(qid); 680 WR4(sc, GENET_TX_DMA_CTRL, val); 681 } 682 683 /* 684 * "queue" is the software queue index (0-4); "qid" is the hardware index 685 * (0-16). "base" is the starting index in the ring array. 686 */ 687 static void 688 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base, 689 int nentries) 690 { 691 struct rx_queue *q; 692 uint32_t val; 693 int i; 694 695 q = &sc->rx_queue[queue]; 696 q->entries = &sc->rx_ring_ent[base]; 697 q->hwindex = qid; 698 q->nentries = nentries; 699 q->cons_idx = q->prod_idx = 0; 700 701 WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08); 702 703 WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0); 704 WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0); 705 WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0); 706 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0); 707 WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid), 708 (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) | 709 (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK)); 710 WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0); 711 WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0); 712 WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid), 713 RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1); 714 WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0); 715 WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid), 716 (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4)); 717 WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0); 718 WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0); 719 720 WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */ 721 722 /* fill ring */ 723 for (i = 0; i < RX_DESC_COUNT; i++) 724 gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i); 725 726 /* Enable receive DMA */ 727 val = RD4(sc, GENET_RX_DMA_CTRL); 728 val |= GENET_RX_DMA_CTRL_EN; 729 val |= GENET_RX_DMA_CTRL_RBUF_EN(qid); 730 WR4(sc, GENET_RX_DMA_CTRL, val); 731 } 732 733 static void 734 gen_init_txrings(struct gen_softc *sc) 735 { 736 int base = 0; 737 #ifdef PRI_RINGS 738 int i; 739 740 /* init priority rings */ 741 for (i = 0; i < PRI_RINGS; i++) { 742 gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT); 743 sc->tx_queue[i].queue = i; 744 base += TX_DESC_PRICOUNT; 745 dma_ring_conf |= 1 << i; 746 dma_control |= DMA_RENABLE(i); 747 } 748 #endif 749 750 /* init GENET_DMA_DEFAULT_QUEUE (16) */ 751 gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base, 752 TX_DESC_COUNT); 753 sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE; 754 } 755 756 static void 757 gen_init_rxrings(struct gen_softc *sc) 758 { 759 int base = 0; 760 #ifdef PRI_RINGS 761 int i; 762 763 /* init priority rings */ 764 for (i = 0; i < PRI_RINGS; i++) { 765 gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT); 766 sc->rx_queue[i].queue = i; 767 base += TX_DESC_PRICOUNT; 768 dma_ring_conf |= 1 << i; 769 dma_control |= DMA_RENABLE(i); 770 } 771 #endif 772 773 /* init GENET_DMA_DEFAULT_QUEUE (16) */ 774 gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base, 775 RX_DESC_COUNT); 776 sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE; 777 778 } 779 780 static void 781 gen_init_locked(struct gen_softc *sc) 782 { 783 struct mii_data *mii; 784 if_t ifp; 785 786 mii = device_get_softc(sc->miibus); 787 ifp = sc->ifp; 788 789 GEN_ASSERT_LOCKED(sc); 790 791 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 792 return; 793 794 if (sc->phy_mode == MII_CONTYPE_RGMII || 795 sc->phy_mode == MII_CONTYPE_RGMII_RXID) 796 WR4(sc, GENET_SYS_PORT_CTRL, 797 GENET_SYS_PORT_MODE_EXT_GPHY); 798 799 gen_set_enaddr(sc); 800 801 /* Setup RX filter */ 802 gen_setup_rxfilter(sc); 803 804 gen_init_txrings(sc); 805 gen_init_rxrings(sc); 806 gen_enable(sc); 807 gen_enable_offload(sc); 808 809 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 810 811 mii_mediachg(mii); 812 callout_reset(&sc->stat_ch, hz, gen_tick, sc); 813 } 814 815 static void 816 gen_init(void *softc) 817 { 818 struct gen_softc *sc; 819 820 sc = softc; 821 GEN_LOCK(sc); 822 gen_init_locked(sc); 823 GEN_UNLOCK(sc); 824 } 825 826 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 827 828 static void 829 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea) 830 { 831 uint32_t addr0 = (ea[0] << 8) | ea[1]; 832 uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]; 833 834 WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0); 835 WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1); 836 } 837 838 static u_int 839 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count) 840 { 841 struct gen_softc *sc = arg; 842 843 /* "count + 2" to account for unicast and broadcast */ 844 gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl)); 845 return (1); /* increment to count */ 846 } 847 848 static void 849 gen_setup_rxfilter(struct gen_softc *sc) 850 { 851 struct ifnet *ifp = sc->ifp; 852 uint32_t cmd, mdf_ctrl; 853 u_int n; 854 855 GEN_ASSERT_LOCKED(sc); 856 857 cmd = RD4(sc, GENET_UMAC_CMD); 858 859 /* 860 * Count the required number of hardware filters. We need one 861 * for each multicast address, plus one for our own address and 862 * the broadcast address. 863 */ 864 n = if_llmaddr_count(ifp) + 2; 865 866 if (n > GENET_MAX_MDF_FILTER) 867 ifp->if_flags |= IFF_ALLMULTI; 868 else 869 ifp->if_flags &= ~IFF_ALLMULTI; 870 871 if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) { 872 cmd |= GENET_UMAC_CMD_PROMISC; 873 mdf_ctrl = 0; 874 } else { 875 cmd &= ~GENET_UMAC_CMD_PROMISC; 876 gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr); 877 gen_setup_rxfilter_mdf(sc, 1, IF_LLADDR(ifp)); 878 (void) if_foreach_llmaddr(ifp, gen_setup_multi, sc); 879 mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1) &~ 880 (__BIT(GENET_MAX_MDF_FILTER - n) - 1); 881 } 882 883 WR4(sc, GENET_UMAC_CMD, cmd); 884 WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl); 885 } 886 887 static void 888 gen_set_enaddr(struct gen_softc *sc) 889 { 890 uint8_t *enaddr; 891 uint32_t val; 892 if_t ifp; 893 894 GEN_ASSERT_LOCKED(sc); 895 896 ifp = sc->ifp; 897 898 /* Write our unicast address */ 899 enaddr = IF_LLADDR(ifp); 900 /* Write hardware address */ 901 val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) | 902 (enaddr[0] << 24); 903 WR4(sc, GENET_UMAC_MAC0, val); 904 val = enaddr[5] | (enaddr[4] << 8); 905 WR4(sc, GENET_UMAC_MAC1, val); 906 } 907 908 static void 909 gen_start_locked(struct gen_softc *sc) 910 { 911 struct mbuf *m; 912 if_t ifp; 913 int cnt, err; 914 915 GEN_ASSERT_LOCKED(sc); 916 917 if (!sc->link) 918 return; 919 920 ifp = sc->ifp; 921 922 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 923 IFF_DRV_RUNNING) 924 return; 925 926 for (cnt = 0; ; cnt++) { 927 m = if_dequeue(ifp); 928 if (m == NULL) 929 break; 930 931 err = gen_encap(sc, &m); 932 if (err != 0) { 933 if (err == ENOBUFS) 934 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 935 if (m != NULL) 936 if_sendq_prepend(ifp, m); 937 break; 938 } 939 if_bpfmtap(ifp, m); 940 } 941 } 942 943 static void 944 gen_start(if_t ifp) 945 { 946 struct gen_softc *sc; 947 948 sc = if_getsoftc(ifp); 949 950 GEN_LOCK(sc); 951 gen_start_locked(sc); 952 GEN_UNLOCK(sc); 953 } 954 955 /* Test for any delayed checksum */ 956 #define CSUM_DELAY_ANY (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP) 957 958 static int 959 gen_encap(struct gen_softc *sc, struct mbuf **mp) 960 { 961 bus_dmamap_t map; 962 bus_dma_segment_t segs[TX_MAX_SEGS]; 963 int error, nsegs, cur, first, i, index, offset; 964 uint32_t csuminfo, length_status, csum_flags = 0, csumdata; 965 struct mbuf *m; 966 struct statusblock *sb = NULL; 967 struct tx_queue *q; 968 struct gen_ring_ent *ent; 969 970 GEN_ASSERT_LOCKED(sc); 971 972 q = &sc->tx_queue[DEF_TXQUEUE]; 973 974 m = *mp; 975 #ifdef ICMPV6_HACK 976 /* 977 * Reflected ICMPv6 packets, e.g. echo replies, tend to get laid 978 * out with only the Ethernet header in the first mbuf, and this 979 * doesn't seem to work. 980 */ 981 #define ICMP6_LEN (sizeof(struct ether_header) + sizeof(struct ip6_hdr) + \ 982 sizeof(struct icmp6_hdr)) 983 if (m->m_len == sizeof(struct ether_header)) { 984 int ether_type = mtod(m, struct ether_header *)->ether_type; 985 if (ntohs(ether_type) == ETHERTYPE_IPV6 && 986 m->m_next->m_len >= sizeof(struct ip6_hdr)) { 987 struct ip6_hdr *ip6; 988 989 ip6 = mtod(m->m_next, struct ip6_hdr *); 990 if (ip6->ip6_nxt == IPPROTO_ICMPV6) { 991 m = m_pullup(m, 992 MIN(m->m_pkthdr.len, ICMP6_LEN)); 993 if (m == NULL) { 994 if (sc->ifp->if_flags & IFF_DEBUG) 995 device_printf(sc->dev, 996 "ICMPV6 pullup fail\n"); 997 *mp = NULL; 998 return (ENOMEM); 999 } 1000 } 1001 } 1002 } 1003 #undef ICMP6_LEN 1004 #endif 1005 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 1006 0) { 1007 csum_flags = m->m_pkthdr.csum_flags; 1008 csumdata = m->m_pkthdr.csum_data; 1009 M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT); 1010 if (m == NULL) { 1011 if (sc->ifp->if_flags & IFF_DEBUG) 1012 device_printf(sc->dev, "prepend fail\n"); 1013 *mp = NULL; 1014 return (ENOMEM); 1015 } 1016 offset = gen_parse_tx(m, csum_flags); 1017 sb = mtod(m, struct statusblock *); 1018 if ((csum_flags & CSUM_DELAY_ANY) != 0) { 1019 csuminfo = (offset << TXCSUM_OFF_SHIFT) | 1020 (offset + csumdata); 1021 csuminfo |= TXCSUM_LEN_VALID; 1022 if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP)) 1023 csuminfo |= TXCSUM_UDP; 1024 sb->txcsuminfo = csuminfo; 1025 } else 1026 sb->txcsuminfo = 0; 1027 } 1028 1029 *mp = m; 1030 1031 cur = first = q->cur; 1032 ent = &q->entries[cur]; 1033 map = ent->map; 1034 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs, 1035 &nsegs, BUS_DMA_NOWAIT); 1036 if (error == EFBIG) { 1037 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); 1038 if (m == NULL) { 1039 device_printf(sc->dev, 1040 "gen_encap: m_collapse failed\n"); 1041 m_freem(*mp); 1042 *mp = NULL; 1043 return (ENOMEM); 1044 } 1045 *mp = m; 1046 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, 1047 segs, &nsegs, BUS_DMA_NOWAIT); 1048 if (error != 0) { 1049 m_freem(*mp); 1050 *mp = NULL; 1051 } 1052 } 1053 if (error != 0) { 1054 device_printf(sc->dev, 1055 "gen_encap: bus_dmamap_load_mbuf_sg failed\n"); 1056 return (error); 1057 } 1058 if (nsegs == 0) { 1059 m_freem(*mp); 1060 *mp = NULL; 1061 return (EIO); 1062 } 1063 1064 /* Remove statusblock after mapping, before possible requeue or bpf. */ 1065 if (sb != NULL) { 1066 m->m_data += sizeof(struct statusblock); 1067 m->m_len -= sizeof(struct statusblock); 1068 m->m_pkthdr.len -= sizeof(struct statusblock); 1069 } 1070 if (q->queued + nsegs > q->nentries) { 1071 bus_dmamap_unload(sc->tx_buf_tag, map); 1072 return (ENOBUFS); 1073 } 1074 1075 bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE); 1076 1077 index = q->prod_idx & (q->nentries - 1); 1078 for (i = 0; i < nsegs; i++) { 1079 ent = &q->entries[cur]; 1080 length_status = GENET_TX_DESC_STATUS_QTAG_MASK; 1081 if (i == 0) { 1082 length_status |= GENET_TX_DESC_STATUS_SOP | 1083 GENET_TX_DESC_STATUS_CRC; 1084 if ((csum_flags & CSUM_DELAY_ANY) != 0) 1085 length_status |= GENET_TX_DESC_STATUS_CKSUM; 1086 } 1087 if (i == nsegs - 1) 1088 length_status |= GENET_TX_DESC_STATUS_EOP; 1089 1090 length_status |= segs[i].ds_len << 1091 GENET_TX_DESC_STATUS_BUFLEN_SHIFT; 1092 1093 WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), 1094 (uint32_t)segs[i].ds_addr); 1095 WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), 1096 (uint32_t)(segs[i].ds_addr >> 32)); 1097 WR4(sc, GENET_TX_DESC_STATUS(index), length_status); 1098 1099 ++q->queued; 1100 cur = TX_NEXT(cur, q->nentries); 1101 index = TX_NEXT(index, q->nentries); 1102 } 1103 1104 q->prod_idx += nsegs; 1105 q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK; 1106 /* We probably don't need to write the producer index on every iter */ 1107 if (nsegs != 0) 1108 WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx); 1109 q->cur = cur; 1110 1111 /* Store mbuf in the last segment */ 1112 q->entries[first].mbuf = m; 1113 1114 return (0); 1115 } 1116 1117 /* 1118 * Parse a packet to find the offset of the transport header for checksum 1119 * offload. Ensure that the link and network headers are contiguous with 1120 * the status block, or transmission fails. 1121 */ 1122 static int 1123 gen_parse_tx(struct mbuf *m, int csum_flags) 1124 { 1125 int offset, off_in_m; 1126 bool copy = false, shift = false; 1127 u_char *p, *copy_p = NULL; 1128 struct mbuf *m0 = m; 1129 uint16_t ether_type; 1130 1131 if (m->m_len == sizeof(struct statusblock)) { 1132 /* M_PREPEND placed statusblock at end; move to beginning */ 1133 m->m_data = m->m_pktdat; 1134 copy_p = mtodo(m, sizeof(struct statusblock)); 1135 m = m->m_next; 1136 off_in_m = 0; 1137 p = mtod(m, u_char *); 1138 copy = true; 1139 } else { 1140 /* 1141 * If statusblock is not at beginning of mbuf (likely), 1142 * then remember to move mbuf contents down before copying 1143 * after them. 1144 */ 1145 if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat) 1146 shift = true; 1147 p = mtodo(m, sizeof(struct statusblock)); 1148 off_in_m = sizeof(struct statusblock); 1149 } 1150 1151 /* 1152 * If headers need to be copied contiguous to statusblock, do so. 1153 * If copying to the internal mbuf data area, and the status block 1154 * is not at the beginning of that area, shift the status block (which 1155 * is empty) and following data. 1156 */ 1157 #define COPY(size) { \ 1158 int hsize = size; \ 1159 if (copy) { \ 1160 if (shift) { \ 1161 u_char *p0; \ 1162 shift = false; \ 1163 p0 = mtodo(m0, sizeof(struct statusblock)); \ 1164 m0->m_data = m0->m_pktdat; \ 1165 bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\ 1166 m0->m_len - sizeof(struct statusblock)); \ 1167 copy_p = mtodo(m0, sizeof(struct statusblock)); \ 1168 } \ 1169 bcopy(p, copy_p, hsize); \ 1170 m0->m_len += hsize; \ 1171 m0->m_pkthdr.len += hsize; /* unneeded */ \ 1172 m->m_len -= hsize; \ 1173 m->m_data += hsize; \ 1174 } \ 1175 copy_p += hsize; \ 1176 } 1177 1178 KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) + 1179 sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__)); 1180 1181 if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) { 1182 offset = sizeof(struct ether_vlan_header); 1183 ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto); 1184 COPY(sizeof(struct ether_vlan_header)); 1185 if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) { 1186 m = m->m_next; 1187 off_in_m = 0; 1188 p = mtod(m, u_char *); 1189 copy = true; 1190 } else { 1191 off_in_m += sizeof(struct ether_vlan_header); 1192 p += sizeof(struct ether_vlan_header); 1193 } 1194 } else { 1195 offset = sizeof(struct ether_header); 1196 ether_type = ntohs(((struct ether_header *)p)->ether_type); 1197 COPY(sizeof(struct ether_header)); 1198 if (m->m_len == off_in_m + sizeof(struct ether_header)) { 1199 m = m->m_next; 1200 off_in_m = 0; 1201 p = mtod(m, u_char *); 1202 copy = true; 1203 } else { 1204 off_in_m += sizeof(struct ether_header); 1205 p += sizeof(struct ether_header); 1206 } 1207 } 1208 if (ether_type == ETHERTYPE_IP) { 1209 COPY(((struct ip *)p)->ip_hl << 2); 1210 offset += ((struct ip *)p)->ip_hl << 2; 1211 } else if (ether_type == ETHERTYPE_IPV6) { 1212 COPY(sizeof(struct ip6_hdr)); 1213 offset += sizeof(struct ip6_hdr); 1214 } else { 1215 /* 1216 * Unknown whether other cases require moving a header; 1217 * ARP works without. 1218 */ 1219 } 1220 return (offset); 1221 #undef COPY 1222 } 1223 1224 static void 1225 gen_intr(void *arg) 1226 { 1227 struct gen_softc *sc = arg; 1228 uint32_t val; 1229 1230 GEN_LOCK(sc); 1231 1232 val = RD4(sc, GENET_INTRL2_CPU_STAT); 1233 val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK); 1234 WR4(sc, GENET_INTRL2_CPU_CLEAR, val); 1235 1236 if (val & GENET_IRQ_RXDMA_DONE) 1237 gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]); 1238 1239 1240 if (val & GENET_IRQ_TXDMA_DONE) { 1241 gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]); 1242 if (!if_sendq_empty(sc->ifp)) 1243 gen_start_locked(sc); 1244 } 1245 1246 GEN_UNLOCK(sc); 1247 } 1248 1249 static int 1250 gen_rxintr(struct gen_softc *sc, struct rx_queue *q) 1251 { 1252 if_t ifp; 1253 struct mbuf *m, *mh, *mt; 1254 struct statusblock *sb = NULL; 1255 int error, index, len, cnt, npkt, n; 1256 uint32_t status, prod_idx, total; 1257 1258 ifp = sc->ifp; 1259 mh = mt = NULL; 1260 cnt = 0; 1261 npkt = 0; 1262 1263 prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) & 1264 GENET_RX_DMA_PROD_CONS_MASK; 1265 total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK; 1266 1267 index = q->cons_idx & (RX_DESC_COUNT - 1); 1268 for (n = 0; n < total; n++) { 1269 bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map, 1270 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1271 bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map); 1272 1273 m = q->entries[index].mbuf; 1274 1275 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 1276 sb = mtod(m, struct statusblock *); 1277 status = sb->status_buflen; 1278 } else 1279 status = RD4(sc, GENET_RX_DESC_STATUS(index)); 1280 1281 len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >> 1282 GENET_RX_DESC_STATUS_BUFLEN_SHIFT; 1283 1284 /* check for errors */ 1285 if ((status & 1286 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP | 1287 GENET_RX_DESC_STATUS_RX_ERROR)) != 1288 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) { 1289 if (ifp->if_flags & IFF_DEBUG) 1290 device_printf(sc->dev, 1291 "error/frag %x csum %x\n", status, 1292 sb->rxcsum); 1293 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1294 continue; 1295 } 1296 1297 error = gen_newbuf_rx(sc, q, index); 1298 if (error != 0) { 1299 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1300 if (ifp->if_flags & IFF_DEBUG) 1301 device_printf(sc->dev, "gen_newbuf_rx %d\n", 1302 error); 1303 /* reuse previous mbuf */ 1304 (void) gen_mapbuf_rx(sc, q, index, m); 1305 continue; 1306 } 1307 1308 if (sb != NULL) { 1309 if (status & GENET_RX_DESC_STATUS_CKSUM_OK) { 1310 /* L4 checksum checked; not sure about L3. */ 1311 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | 1312 CSUM_PSEUDO_HDR; 1313 m->m_pkthdr.csum_data = 0xffff; 1314 } 1315 m->m_data += sizeof(struct statusblock); 1316 m->m_len -= sizeof(struct statusblock); 1317 len -= sizeof(struct statusblock); 1318 } 1319 if (len > ETHER_ALIGN) { 1320 m_adj(m, ETHER_ALIGN); 1321 len -= ETHER_ALIGN; 1322 } 1323 1324 m->m_pkthdr.rcvif = ifp; 1325 m->m_pkthdr.len = len; 1326 m->m_len = len; 1327 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1328 1329 m->m_nextpkt = NULL; 1330 if (mh == NULL) 1331 mh = m; 1332 else 1333 mt->m_nextpkt = m; 1334 mt = m; 1335 ++cnt; 1336 ++npkt; 1337 1338 index = RX_NEXT(index, q->nentries); 1339 1340 q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK; 1341 WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx); 1342 1343 if (cnt == gen_rx_batch) { 1344 GEN_UNLOCK(sc); 1345 if_input(ifp, mh); 1346 GEN_LOCK(sc); 1347 mh = mt = NULL; 1348 cnt = 0; 1349 } 1350 } 1351 1352 if (mh != NULL) { 1353 GEN_UNLOCK(sc); 1354 if_input(ifp, mh); 1355 GEN_LOCK(sc); 1356 } 1357 1358 return (npkt); 1359 } 1360 1361 static void 1362 gen_txintr(struct gen_softc *sc, struct tx_queue *q) 1363 { 1364 uint32_t cons_idx, total; 1365 struct gen_ring_ent *ent; 1366 if_t ifp; 1367 int i, prog; 1368 1369 GEN_ASSERT_LOCKED(sc); 1370 1371 ifp = sc->ifp; 1372 1373 cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) & 1374 GENET_TX_DMA_PROD_CONS_MASK; 1375 total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK; 1376 1377 prog = 0; 1378 for (i = q->next; q->queued > 0 && total > 0; 1379 i = TX_NEXT(i, q->nentries), total--) { 1380 /* XXX check for errors */ 1381 1382 ent = &q->entries[i]; 1383 if (ent->mbuf != NULL) { 1384 bus_dmamap_sync(sc->tx_buf_tag, ent->map, 1385 BUS_DMASYNC_POSTWRITE); 1386 bus_dmamap_unload(sc->tx_buf_tag, ent->map); 1387 m_freem(ent->mbuf); 1388 ent->mbuf = NULL; 1389 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1390 } 1391 1392 prog++; 1393 --q->queued; 1394 } 1395 1396 if (prog > 0) { 1397 q->next = i; 1398 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1399 } 1400 1401 q->cons_idx = cons_idx; 1402 } 1403 1404 static void 1405 gen_intr2(void *arg) 1406 { 1407 struct gen_softc *sc = arg; 1408 1409 device_printf(sc->dev, "gen_intr2\n"); 1410 } 1411 1412 static int 1413 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index) 1414 { 1415 struct mbuf *m; 1416 1417 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1418 if (m == NULL) 1419 return (ENOBUFS); 1420 1421 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 1422 m_adj(m, ETHER_ALIGN); 1423 1424 return (gen_mapbuf_rx(sc, q, index, m)); 1425 } 1426 1427 static int 1428 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index, 1429 struct mbuf *m) 1430 { 1431 bus_dma_segment_t seg; 1432 bus_dmamap_t map; 1433 int nsegs; 1434 1435 map = q->entries[index].map; 1436 if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs, 1437 BUS_DMA_NOWAIT) != 0) { 1438 m_freem(m); 1439 return (ENOBUFS); 1440 } 1441 1442 bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD); 1443 1444 q->entries[index].mbuf = m; 1445 WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr); 1446 WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32)); 1447 1448 return (0); 1449 } 1450 1451 static int 1452 gen_ioctl(if_t ifp, u_long cmd, caddr_t data) 1453 { 1454 struct gen_softc *sc; 1455 struct mii_data *mii; 1456 struct ifreq *ifr; 1457 int flags, enable, error; 1458 1459 sc = if_getsoftc(ifp); 1460 mii = device_get_softc(sc->miibus); 1461 ifr = (struct ifreq *)data; 1462 error = 0; 1463 1464 switch (cmd) { 1465 case SIOCSIFFLAGS: 1466 GEN_LOCK(sc); 1467 if (if_getflags(ifp) & IFF_UP) { 1468 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1469 flags = if_getflags(ifp) ^ sc->if_flags; 1470 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 1471 gen_setup_rxfilter(sc); 1472 } else 1473 gen_init_locked(sc); 1474 } else { 1475 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1476 gen_reset(sc); 1477 } 1478 sc->if_flags = if_getflags(ifp); 1479 GEN_UNLOCK(sc); 1480 break; 1481 1482 case SIOCADDMULTI: 1483 case SIOCDELMULTI: 1484 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1485 GEN_LOCK(sc); 1486 gen_setup_rxfilter(sc); 1487 GEN_UNLOCK(sc); 1488 } 1489 break; 1490 1491 case SIOCSIFMEDIA: 1492 case SIOCGIFMEDIA: 1493 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1494 break; 1495 1496 case SIOCSIFCAP: 1497 enable = if_getcapenable(ifp); 1498 flags = ifr->ifr_reqcap ^ enable; 1499 if (flags & IFCAP_RXCSUM) 1500 enable ^= IFCAP_RXCSUM; 1501 if (flags & IFCAP_RXCSUM_IPV6) 1502 enable ^= IFCAP_RXCSUM_IPV6; 1503 if (flags & IFCAP_TXCSUM) 1504 enable ^= IFCAP_TXCSUM; 1505 if (flags & IFCAP_TXCSUM_IPV6) 1506 enable ^= IFCAP_TXCSUM_IPV6; 1507 if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) 1508 if_sethwassist(ifp, GEN_CSUM_FEATURES); 1509 else 1510 if_sethwassist(ifp, 0); 1511 if_setcapenable(ifp, enable); 1512 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1513 gen_enable_offload(sc); 1514 break; 1515 1516 default: 1517 error = ether_ioctl(ifp, cmd, data); 1518 break; 1519 } 1520 return (error); 1521 } 1522 1523 static void 1524 gen_tick(void *softc) 1525 { 1526 struct gen_softc *sc; 1527 struct mii_data *mii; 1528 if_t ifp; 1529 int link; 1530 1531 sc = softc; 1532 ifp = sc->ifp; 1533 mii = device_get_softc(sc->miibus); 1534 1535 GEN_ASSERT_LOCKED(sc); 1536 1537 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1538 return; 1539 1540 link = sc->link; 1541 mii_tick(mii); 1542 if (sc->link && !link) 1543 gen_start_locked(sc); 1544 1545 callout_reset(&sc->stat_ch, hz, gen_tick, sc); 1546 } 1547 1548 #define MII_BUSY_RETRY 1000 1549 1550 static int 1551 gen_miibus_readreg(device_t dev, int phy, int reg) 1552 { 1553 struct gen_softc *sc; 1554 int retry, val; 1555 1556 sc = device_get_softc(dev); 1557 val = 0; 1558 1559 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ | 1560 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT)); 1561 val = RD4(sc, GENET_MDIO_CMD); 1562 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY); 1563 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 1564 if (((val = RD4(sc, GENET_MDIO_CMD)) & 1565 GENET_MDIO_START_BUSY) == 0) { 1566 if (val & GENET_MDIO_READ_FAILED) 1567 return (0); /* -1? */ 1568 val &= GENET_MDIO_VAL_MASK; 1569 break; 1570 } 1571 DELAY(10); 1572 } 1573 1574 if (retry == 0) 1575 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 1576 phy, reg); 1577 1578 return (val); 1579 } 1580 1581 static int 1582 gen_miibus_writereg(device_t dev, int phy, int reg, int val) 1583 { 1584 struct gen_softc *sc; 1585 int retry; 1586 1587 sc = device_get_softc(dev); 1588 1589 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE | 1590 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) | 1591 (val & GENET_MDIO_VAL_MASK)); 1592 val = RD4(sc, GENET_MDIO_CMD); 1593 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY); 1594 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 1595 val = RD4(sc, GENET_MDIO_CMD); 1596 if ((val & GENET_MDIO_START_BUSY) == 0) 1597 break; 1598 DELAY(10); 1599 } 1600 if (retry == 0) 1601 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 1602 phy, reg); 1603 1604 return (0); 1605 } 1606 1607 static void 1608 gen_update_link_locked(struct gen_softc *sc) 1609 { 1610 struct mii_data *mii; 1611 uint32_t val; 1612 u_int speed; 1613 1614 GEN_ASSERT_LOCKED(sc); 1615 1616 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) 1617 return; 1618 mii = device_get_softc(sc->miibus); 1619 1620 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1621 (IFM_ACTIVE | IFM_AVALID)) { 1622 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1623 case IFM_1000_T: 1624 case IFM_1000_SX: 1625 speed = GENET_UMAC_CMD_SPEED_1000; 1626 sc->link = 1; 1627 break; 1628 case IFM_100_TX: 1629 speed = GENET_UMAC_CMD_SPEED_100; 1630 sc->link = 1; 1631 break; 1632 case IFM_10_T: 1633 speed = GENET_UMAC_CMD_SPEED_10; 1634 sc->link = 1; 1635 break; 1636 default: 1637 sc->link = 0; 1638 break; 1639 } 1640 } else 1641 sc->link = 0; 1642 1643 if (sc->link == 0) 1644 return; 1645 1646 val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL); 1647 val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE; 1648 val |= GENET_EXT_RGMII_OOB_RGMII_LINK; 1649 val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN; 1650 if (sc->phy_mode == MII_CONTYPE_RGMII) 1651 val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE; 1652 WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val); 1653 1654 val = RD4(sc, GENET_UMAC_CMD); 1655 val &= ~GENET_UMAC_CMD_SPEED; 1656 val |= speed; 1657 WR4(sc, GENET_UMAC_CMD, val); 1658 } 1659 1660 static void 1661 gen_link_task(void *arg, int pending) 1662 { 1663 struct gen_softc *sc; 1664 1665 sc = arg; 1666 1667 GEN_LOCK(sc); 1668 gen_update_link_locked(sc); 1669 GEN_UNLOCK(sc); 1670 } 1671 1672 static void 1673 gen_miibus_statchg(device_t dev) 1674 { 1675 struct gen_softc *sc; 1676 1677 sc = device_get_softc(dev); 1678 1679 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 1680 } 1681 1682 static void 1683 gen_media_status(if_t ifp, struct ifmediareq *ifmr) 1684 { 1685 struct gen_softc *sc; 1686 struct mii_data *mii; 1687 1688 sc = if_getsoftc(ifp); 1689 mii = device_get_softc(sc->miibus); 1690 1691 GEN_LOCK(sc); 1692 mii_pollstat(mii); 1693 ifmr->ifm_active = mii->mii_media_active; 1694 ifmr->ifm_status = mii->mii_media_status; 1695 GEN_UNLOCK(sc); 1696 } 1697 1698 static int 1699 gen_media_change(if_t ifp) 1700 { 1701 struct gen_softc *sc; 1702 struct mii_data *mii; 1703 int error; 1704 1705 sc = if_getsoftc(ifp); 1706 mii = device_get_softc(sc->miibus); 1707 1708 GEN_LOCK(sc); 1709 error = mii_mediachg(mii); 1710 GEN_UNLOCK(sc); 1711 1712 return (error); 1713 } 1714 1715 static device_method_t gen_methods[] = { 1716 /* Device interface */ 1717 DEVMETHOD(device_probe, gen_probe), 1718 DEVMETHOD(device_attach, gen_attach), 1719 1720 /* MII interface */ 1721 DEVMETHOD(miibus_readreg, gen_miibus_readreg), 1722 DEVMETHOD(miibus_writereg, gen_miibus_writereg), 1723 DEVMETHOD(miibus_statchg, gen_miibus_statchg), 1724 1725 DEVMETHOD_END 1726 }; 1727 1728 static driver_t gen_driver = { 1729 "genet", 1730 gen_methods, 1731 sizeof(struct gen_softc), 1732 }; 1733 1734 static devclass_t gen_devclass; 1735 1736 DRIVER_MODULE(genet, simplebus, gen_driver, gen_devclass, 0, 0); 1737 DRIVER_MODULE(miibus, genet, miibus_driver, miibus_devclass, 0, 0); 1738 MODULE_DEPEND(genet, ether, 1, 1, 1); 1739 MODULE_DEPEND(genet, miibus, 1, 1, 1); 1740