1 /*- 2 * Copyright (c) 2020 Michael J Karels 3 * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca> 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller 31 * 32 * This driver is derived in large part from bcmgenet.c from NetBSD by 33 * Jared McNeill. Parts of the structure and other common code in 34 * this driver have been copied from if_awg.c for the Allwinner EMAC, 35 * also by Jared McNeill. 36 */ 37 38 #include "opt_device_polling.h" 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/bus.h> 46 #include <sys/rman.h> 47 #include <sys/kernel.h> 48 #include <sys/endian.h> 49 #include <sys/mbuf.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/module.h> 53 #include <sys/taskqueue.h> 54 #include <sys/gpio.h> 55 56 #include <net/bpf.h> 57 #include <net/if.h> 58 #include <net/ethernet.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 #include <net/if_var.h> 63 64 #include <machine/bus.h> 65 66 #include <dev/ofw/ofw_bus.h> 67 #include <dev/ofw/ofw_bus_subr.h> 68 69 #define __BIT(_x) (1 << (_x)) 70 #include "if_genetreg.h" 71 72 #include <dev/mii/mii.h> 73 #include <dev/mii/miivar.h> 74 #include <dev/mii/mii_fdt.h> 75 76 #include <netinet/in.h> 77 #include <netinet/ip.h> 78 #include <netinet/ip6.h> 79 #define ICMPV6_HACK /* workaround for chip issue */ 80 #ifdef ICMPV6_HACK 81 #include <netinet/icmp6.h> 82 #endif 83 84 #include "syscon_if.h" 85 #include "miibus_if.h" 86 #include "gpio_if.h" 87 88 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_MAC], (reg)) 89 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_MAC], (reg), (val)) 90 91 #define GEN_LOCK(sc) mtx_lock(&(sc)->mtx) 92 #define GEN_UNLOCK(sc) mtx_unlock(&(sc)->mtx) 93 #define GEN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 94 #define GEN_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 95 96 #define TX_DESC_COUNT GENET_DMA_DESC_COUNT 97 #define RX_DESC_COUNT GENET_DMA_DESC_COUNT 98 99 #define TX_NEXT(n, count) (((n) + 1) & ((count) - 1)) 100 #define RX_NEXT(n, count) (((n) + 1) & ((count) - 1)) 101 102 #define TX_MAX_SEGS 20 103 104 /* Maximum number of mbufs to send to if_input */ 105 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */; 106 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch); 107 108 static struct ofw_compat_data compat_data[] = { 109 { "brcm,genet-v1", 1 }, 110 { "brcm,genet-v2", 2 }, 111 { "brcm,genet-v3", 3 }, 112 { "brcm,genet-v4", 4 }, 113 { "brcm,genet-v5", 5 }, 114 { NULL, 0 } 115 }; 116 117 enum { 118 _RES_MAC, /* what to call this? */ 119 _RES_IRQ1, 120 _RES_IRQ2, 121 _RES_NITEMS 122 }; 123 124 static struct resource_spec gen_spec[] = { 125 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 126 { SYS_RES_IRQ, 0, RF_ACTIVE }, 127 { SYS_RES_IRQ, 1, RF_ACTIVE }, 128 { -1, 0 } 129 }; 130 131 /* structure per ring entry */ 132 struct gen_ring_ent { 133 bus_dmamap_t map; 134 struct mbuf *mbuf; 135 }; 136 137 struct tx_queue { 138 int hwindex; /* hardware index */ 139 int nentries; 140 u_int queued; /* or avail? */ 141 u_int cur; 142 u_int next; 143 u_int prod_idx; 144 u_int cons_idx; 145 struct gen_ring_ent *entries; 146 }; 147 148 struct rx_queue { 149 int hwindex; /* hardware index */ 150 int nentries; 151 u_int cur; 152 u_int prod_idx; 153 u_int cons_idx; 154 struct gen_ring_ent *entries; 155 }; 156 157 struct gen_softc { 158 struct resource *res[_RES_NITEMS]; 159 struct mtx mtx; 160 if_t ifp; 161 device_t dev; 162 device_t miibus; 163 mii_contype_t phy_mode; 164 165 struct callout stat_ch; 166 struct task link_task; 167 void *ih; 168 void *ih2; 169 int type; 170 int if_flags; 171 int link; 172 bus_dma_tag_t tx_buf_tag; 173 /* 174 * The genet chip has multiple queues for transmit and receive. 175 * This driver uses only one (queue 16, the default), but is cast 176 * with multiple rings. The additional rings are used for different 177 * priorities. 178 */ 179 #define DEF_TXQUEUE 0 180 #define NTXQUEUE 1 181 struct tx_queue tx_queue[NTXQUEUE]; 182 struct gen_ring_ent tx_ring_ent[TX_DESC_COUNT]; /* ring entries */ 183 184 bus_dma_tag_t rx_buf_tag; 185 #define DEF_RXQUEUE 0 186 #define NRXQUEUE 1 187 struct rx_queue rx_queue[NRXQUEUE]; 188 struct gen_ring_ent rx_ring_ent[RX_DESC_COUNT]; /* ring entries */ 189 }; 190 191 static void gen_init(void *softc); 192 static void gen_start(if_t ifp); 193 static void gen_destroy(struct gen_softc *sc); 194 static int gen_encap(struct gen_softc *sc, struct mbuf **mp); 195 static int gen_parse_tx(struct mbuf *m, int csum_flags); 196 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data); 197 static int gen_get_phy_mode(device_t dev); 198 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr); 199 static void gen_set_enaddr(struct gen_softc *sc); 200 static void gen_setup_rxfilter(struct gen_softc *sc); 201 static void gen_reset(struct gen_softc *sc); 202 static void gen_enable(struct gen_softc *sc); 203 static void gen_dma_disable(device_t dev); 204 static int gen_bus_dma_init(struct gen_softc *sc); 205 static void gen_bus_dma_teardown(struct gen_softc *sc); 206 static void gen_enable_intr(struct gen_softc *sc); 207 static void gen_init_txrings(struct gen_softc *sc); 208 static void gen_init_rxrings(struct gen_softc *sc); 209 static void gen_intr(void *softc); 210 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q); 211 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q); 212 static void gen_intr2(void *softc); 213 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index); 214 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index, 215 struct mbuf *m); 216 static void gen_link_task(void *arg, int pending); 217 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr); 218 static int gen_media_change(if_t ifp); 219 static void gen_tick(void *softc); 220 221 static int 222 gen_probe(device_t dev) 223 { 224 if (!ofw_bus_status_okay(dev)) 225 return (ENXIO); 226 227 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 228 return (ENXIO); 229 230 device_set_desc(dev, "RPi4 Gigabit Ethernet"); 231 return (BUS_PROBE_DEFAULT); 232 } 233 234 static int 235 gen_attach(device_t dev) 236 { 237 struct ether_addr eaddr; 238 struct gen_softc *sc; 239 int major, minor, error, mii_flags; 240 bool eaddr_found; 241 242 sc = device_get_softc(dev); 243 sc->dev = dev; 244 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 245 246 if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) { 247 device_printf(dev, "cannot allocate resources for device\n"); 248 error = ENXIO; 249 goto fail; 250 } 251 252 major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT; 253 if (major != REV_MAJOR_V5) { 254 device_printf(dev, "version %d is not supported\n", major); 255 error = ENXIO; 256 goto fail; 257 } 258 minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT; 259 device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor, 260 RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY); 261 262 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 263 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); 264 TASK_INIT(&sc->link_task, 0, gen_link_task, sc); 265 266 error = gen_get_phy_mode(dev); 267 if (error != 0) 268 goto fail; 269 270 bzero(&eaddr, sizeof(eaddr)); 271 eaddr_found = gen_get_eaddr(dev, &eaddr); 272 273 /* reset core */ 274 gen_reset(sc); 275 276 gen_dma_disable(dev); 277 278 /* Setup DMA */ 279 error = gen_bus_dma_init(sc); 280 if (error != 0) { 281 device_printf(dev, "cannot setup bus dma\n"); 282 goto fail; 283 } 284 285 /* Install interrupt handlers */ 286 error = bus_setup_intr(dev, sc->res[_RES_IRQ1], 287 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih); 288 if (error != 0) { 289 device_printf(dev, "cannot setup interrupt handler1\n"); 290 goto fail; 291 } 292 293 error = bus_setup_intr(dev, sc->res[_RES_IRQ2], 294 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2); 295 if (error != 0) { 296 device_printf(dev, "cannot setup interrupt handler2\n"); 297 goto fail; 298 } 299 300 /* Setup ethernet interface */ 301 sc->ifp = if_alloc(IFT_ETHER); 302 if_setsoftc(sc->ifp, sc); 303 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); 304 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 305 if_setstartfn(sc->ifp, gen_start); 306 if_setioctlfn(sc->ifp, gen_ioctl); 307 if_setinitfn(sc->ifp, gen_init); 308 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); 309 if_setsendqready(sc->ifp); 310 #define GEN_CSUM_FEATURES (CSUM_UDP | CSUM_TCP) 311 if_sethwassist(sc->ifp, GEN_CSUM_FEATURES); 312 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | 313 IFCAP_HWCSUM_IPV6); 314 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 315 316 /* Attach MII driver */ 317 mii_flags = 0; 318 switch (sc->phy_mode) 319 { 320 case MII_CONTYPE_RGMII_ID: 321 mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY; 322 break; 323 case MII_CONTYPE_RGMII_RXID: 324 mii_flags |= MIIF_RX_DELAY; 325 break; 326 case MII_CONTYPE_RGMII_TXID: 327 mii_flags |= MIIF_TX_DELAY; 328 break; 329 default: 330 break; 331 } 332 error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change, 333 gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 334 mii_flags); 335 if (error != 0) { 336 device_printf(dev, "cannot attach PHY\n"); 337 goto fail; 338 } 339 340 /* If address was not found, create one based on the hostid and name. */ 341 if (eaddr_found == 0) 342 ether_gen_addr(sc->ifp, &eaddr); 343 /* Attach ethernet interface */ 344 ether_ifattach(sc->ifp, eaddr.octet); 345 346 fail: 347 if (error) 348 gen_destroy(sc); 349 return (error); 350 } 351 352 /* Free resources after failed attach. This is not a complete detach. */ 353 static void 354 gen_destroy(struct gen_softc *sc) 355 { 356 357 if (sc->miibus) { /* can't happen */ 358 device_delete_child(sc->dev, sc->miibus); 359 sc->miibus = NULL; 360 } 361 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih); 362 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2); 363 gen_bus_dma_teardown(sc); 364 callout_drain(&sc->stat_ch); 365 if (mtx_initialized(&sc->mtx)) 366 mtx_destroy(&sc->mtx); 367 bus_release_resources(sc->dev, gen_spec, sc->res); 368 if (sc->ifp != NULL) { 369 if_free(sc->ifp); 370 sc->ifp = NULL; 371 } 372 } 373 374 static int 375 gen_get_phy_mode(device_t dev) 376 { 377 struct gen_softc *sc; 378 phandle_t node; 379 mii_contype_t type; 380 int error = 0; 381 382 sc = device_get_softc(dev); 383 node = ofw_bus_get_node(dev); 384 type = mii_fdt_get_contype(node); 385 386 switch (type) { 387 case MII_CONTYPE_RGMII: 388 case MII_CONTYPE_RGMII_ID: 389 case MII_CONTYPE_RGMII_RXID: 390 case MII_CONTYPE_RGMII_TXID: 391 sc->phy_mode = type; 392 break; 393 default: 394 device_printf(dev, "unknown phy-mode '%s'\n", 395 mii_fdt_contype_to_name(type)); 396 error = ENXIO; 397 break; 398 } 399 400 return (error); 401 } 402 403 static bool 404 gen_get_eaddr(device_t dev, struct ether_addr *eaddr) 405 { 406 struct gen_softc *sc; 407 uint32_t maclo, machi, val; 408 phandle_t node; 409 410 sc = device_get_softc(dev); 411 412 node = ofw_bus_get_node(dev); 413 if (OF_getprop(node, "mac-address", eaddr->octet, 414 ETHER_ADDR_LEN) != -1 || 415 OF_getprop(node, "local-mac-address", eaddr->octet, 416 ETHER_ADDR_LEN) != -1 || 417 OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1) 418 return (true); 419 420 device_printf(dev, "No Ethernet address found in fdt!\n"); 421 maclo = machi = 0; 422 423 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL); 424 if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) { 425 maclo = htobe32(RD4(sc, GENET_UMAC_MAC0)); 426 machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff); 427 } 428 429 if (maclo == 0 && machi == 0) { 430 if (bootverbose) 431 device_printf(dev, 432 "No Ethernet address found in controller\n"); 433 return (false); 434 } else { 435 eaddr->octet[0] = maclo & 0xff; 436 eaddr->octet[1] = (maclo >> 8) & 0xff; 437 eaddr->octet[2] = (maclo >> 16) & 0xff; 438 eaddr->octet[3] = (maclo >> 24) & 0xff; 439 eaddr->octet[4] = machi & 0xff; 440 eaddr->octet[5] = (machi >> 8) & 0xff; 441 return (true); 442 } 443 } 444 445 static void 446 gen_reset(struct gen_softc *sc) 447 { 448 uint32_t val; 449 450 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL); 451 val |= GENET_SYS_RBUF_FLUSH_RESET; 452 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val); 453 DELAY(10); 454 455 val &= ~GENET_SYS_RBUF_FLUSH_RESET; 456 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val); 457 DELAY(10); 458 459 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0); 460 DELAY(10); 461 462 WR4(sc, GENET_UMAC_CMD, 0); 463 WR4(sc, GENET_UMAC_CMD, 464 GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET); 465 DELAY(10); 466 WR4(sc, GENET_UMAC_CMD, 0); 467 468 WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT | 469 GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX); 470 WR4(sc, GENET_UMAC_MIB_CTRL, 0); 471 472 WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536); 473 474 val = RD4(sc, GENET_RBUF_CTRL); 475 val |= GENET_RBUF_ALIGN_2B; 476 WR4(sc, GENET_RBUF_CTRL, val); 477 478 WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1); 479 } 480 481 static void 482 gen_enable(struct gen_softc *sc) 483 { 484 u_int val; 485 486 /* Enable transmitter and receiver */ 487 val = RD4(sc, GENET_UMAC_CMD); 488 val |= GENET_UMAC_CMD_TXEN; 489 val |= GENET_UMAC_CMD_RXEN; 490 WR4(sc, GENET_UMAC_CMD, val); 491 492 /* Enable interrupts */ 493 gen_enable_intr(sc); 494 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 495 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE); 496 } 497 498 static void 499 gen_enable_offload(struct gen_softc *sc) 500 { 501 uint32_t check_ctrl, buf_ctrl; 502 503 check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL); 504 buf_ctrl = RD4(sc, GENET_RBUF_CTRL); 505 if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) { 506 check_ctrl |= GENET_RBUF_CHECK_CTRL_EN; 507 buf_ctrl |= GENET_RBUF_64B_EN; 508 } else { 509 check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN; 510 buf_ctrl &= ~GENET_RBUF_64B_EN; 511 } 512 WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl); 513 WR4(sc, GENET_RBUF_CTRL, buf_ctrl); 514 515 buf_ctrl = RD4(sc, GENET_TBUF_CTRL); 516 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 517 0) 518 buf_ctrl |= GENET_RBUF_64B_EN; 519 else 520 buf_ctrl &= ~GENET_RBUF_64B_EN; 521 WR4(sc, GENET_TBUF_CTRL, buf_ctrl); 522 } 523 524 static void 525 gen_dma_disable(device_t dev) 526 { 527 struct gen_softc *sc = device_get_softc(dev); 528 int val; 529 530 val = RD4(sc, GENET_TX_DMA_CTRL); 531 val &= ~GENET_TX_DMA_CTRL_EN; 532 val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE); 533 WR4(sc, GENET_TX_DMA_CTRL, val); 534 535 val = RD4(sc, GENET_RX_DMA_CTRL); 536 val &= ~GENET_RX_DMA_CTRL_EN; 537 val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE); 538 WR4(sc, GENET_RX_DMA_CTRL, val); 539 } 540 541 static int 542 gen_bus_dma_init(struct gen_softc *sc) 543 { 544 struct device *dev = sc->dev; 545 int i, error; 546 547 error = bus_dma_tag_create( 548 bus_get_dma_tag(dev), /* Parent tag */ 549 4, 0, /* alignment, boundary */ 550 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */ 551 BUS_SPACE_MAXADDR, /* highaddr */ 552 NULL, NULL, /* filter, filterarg */ 553 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ 554 MCLBYTES, /* maxsegsize */ 555 0, /* flags */ 556 NULL, NULL, /* lockfunc, lockarg */ 557 &sc->tx_buf_tag); 558 if (error != 0) { 559 device_printf(dev, "cannot create TX buffer tag\n"); 560 return (error); 561 } 562 563 for (i = 0; i < TX_DESC_COUNT; i++) { 564 error = bus_dmamap_create(sc->tx_buf_tag, 0, 565 &sc->tx_ring_ent[i].map); 566 if (error != 0) { 567 device_printf(dev, "cannot create TX buffer map\n"); 568 return (error); 569 } 570 } 571 572 error = bus_dma_tag_create( 573 bus_get_dma_tag(dev), /* Parent tag */ 574 4, 0, /* alignment, boundary */ 575 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */ 576 BUS_SPACE_MAXADDR, /* highaddr */ 577 NULL, NULL, /* filter, filterarg */ 578 MCLBYTES, 1, /* maxsize, nsegs */ 579 MCLBYTES, /* maxsegsize */ 580 0, /* flags */ 581 NULL, NULL, /* lockfunc, lockarg */ 582 &sc->rx_buf_tag); 583 if (error != 0) { 584 device_printf(dev, "cannot create RX buffer tag\n"); 585 return (error); 586 } 587 588 for (i = 0; i < RX_DESC_COUNT; i++) { 589 error = bus_dmamap_create(sc->rx_buf_tag, 0, 590 &sc->rx_ring_ent[i].map); 591 if (error != 0) { 592 device_printf(dev, "cannot create RX buffer map\n"); 593 return (error); 594 } 595 } 596 return (0); 597 } 598 599 static void 600 gen_bus_dma_teardown(struct gen_softc *sc) 601 { 602 int i, error; 603 604 if (sc->tx_buf_tag != NULL) { 605 for (i = 0; i < TX_DESC_COUNT; i++) { 606 error = bus_dmamap_destroy(sc->tx_buf_tag, 607 sc->tx_ring_ent[i].map); 608 sc->tx_ring_ent[i].map = NULL; 609 if (error) 610 device_printf(sc->dev, 611 "%s: bus_dmamap_destroy failed: %d\n", 612 __func__, error); 613 } 614 error = bus_dma_tag_destroy(sc->tx_buf_tag); 615 sc->tx_buf_tag = NULL; 616 if (error) 617 device_printf(sc->dev, 618 "%s: bus_dma_tag_destroy failed: %d\n", __func__, 619 error); 620 } 621 622 if (sc->tx_buf_tag != NULL) { 623 for (i = 0; i < RX_DESC_COUNT; i++) { 624 error = bus_dmamap_destroy(sc->rx_buf_tag, 625 sc->rx_ring_ent[i].map); 626 sc->rx_ring_ent[i].map = NULL; 627 if (error) 628 device_printf(sc->dev, 629 "%s: bus_dmamap_destroy failed: %d\n", 630 __func__, error); 631 } 632 error = bus_dma_tag_destroy(sc->rx_buf_tag); 633 sc->rx_buf_tag = NULL; 634 if (error) 635 device_printf(sc->dev, 636 "%s: bus_dma_tag_destroy failed: %d\n", __func__, 637 error); 638 } 639 } 640 641 static void 642 gen_enable_intr(struct gen_softc *sc) 643 { 644 645 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 646 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE); 647 } 648 649 /* 650 * "queue" is the software queue index (0-4); "qid" is the hardware index 651 * (0-16). "base" is the starting index in the ring array. 652 */ 653 static void 654 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base, 655 int nentries) 656 { 657 struct tx_queue *q; 658 uint32_t val; 659 660 q = &sc->tx_queue[queue]; 661 q->entries = &sc->tx_ring_ent[base]; 662 q->hwindex = qid; 663 q->nentries = nentries; 664 665 /* TX ring */ 666 667 q->queued = 0; 668 q->cons_idx = q->prod_idx = 0; 669 670 WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08); 671 672 WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0); 673 WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0); 674 WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0); 675 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0); 676 WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid), 677 (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) | 678 (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK)); 679 WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0); 680 WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0); 681 WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid), 682 TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1); 683 WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0); 684 WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1); 685 WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0); 686 WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0); 687 WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0); 688 689 WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */ 690 691 /* Enable transmit DMA */ 692 val = RD4(sc, GENET_TX_DMA_CTRL); 693 val |= GENET_TX_DMA_CTRL_EN; 694 val |= GENET_TX_DMA_CTRL_RBUF_EN(qid); 695 WR4(sc, GENET_TX_DMA_CTRL, val); 696 } 697 698 /* 699 * "queue" is the software queue index (0-4); "qid" is the hardware index 700 * (0-16). "base" is the starting index in the ring array. 701 */ 702 static void 703 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base, 704 int nentries) 705 { 706 struct rx_queue *q; 707 uint32_t val; 708 int i; 709 710 q = &sc->rx_queue[queue]; 711 q->entries = &sc->rx_ring_ent[base]; 712 q->hwindex = qid; 713 q->nentries = nentries; 714 q->cons_idx = q->prod_idx = 0; 715 716 WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08); 717 718 WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0); 719 WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0); 720 WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0); 721 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0); 722 WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid), 723 (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) | 724 (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK)); 725 WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0); 726 WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0); 727 WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid), 728 RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1); 729 WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0); 730 WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid), 731 (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4)); 732 WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0); 733 WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0); 734 735 WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */ 736 737 /* fill ring */ 738 for (i = 0; i < RX_DESC_COUNT; i++) 739 gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i); 740 741 /* Enable receive DMA */ 742 val = RD4(sc, GENET_RX_DMA_CTRL); 743 val |= GENET_RX_DMA_CTRL_EN; 744 val |= GENET_RX_DMA_CTRL_RBUF_EN(qid); 745 WR4(sc, GENET_RX_DMA_CTRL, val); 746 } 747 748 static void 749 gen_init_txrings(struct gen_softc *sc) 750 { 751 int base = 0; 752 #ifdef PRI_RINGS 753 int i; 754 755 /* init priority rings */ 756 for (i = 0; i < PRI_RINGS; i++) { 757 gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT); 758 sc->tx_queue[i].queue = i; 759 base += TX_DESC_PRICOUNT; 760 dma_ring_conf |= 1 << i; 761 dma_control |= DMA_RENABLE(i); 762 } 763 #endif 764 765 /* init GENET_DMA_DEFAULT_QUEUE (16) */ 766 gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base, 767 TX_DESC_COUNT); 768 sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE; 769 } 770 771 static void 772 gen_init_rxrings(struct gen_softc *sc) 773 { 774 int base = 0; 775 #ifdef PRI_RINGS 776 int i; 777 778 /* init priority rings */ 779 for (i = 0; i < PRI_RINGS; i++) { 780 gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT); 781 sc->rx_queue[i].queue = i; 782 base += TX_DESC_PRICOUNT; 783 dma_ring_conf |= 1 << i; 784 dma_control |= DMA_RENABLE(i); 785 } 786 #endif 787 788 /* init GENET_DMA_DEFAULT_QUEUE (16) */ 789 gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base, 790 RX_DESC_COUNT); 791 sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE; 792 793 } 794 795 static void 796 gen_init_locked(struct gen_softc *sc) 797 { 798 struct mii_data *mii; 799 if_t ifp; 800 801 mii = device_get_softc(sc->miibus); 802 ifp = sc->ifp; 803 804 GEN_ASSERT_LOCKED(sc); 805 806 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 807 return; 808 809 switch (sc->phy_mode) 810 { 811 case MII_CONTYPE_RGMII: 812 case MII_CONTYPE_RGMII_ID: 813 case MII_CONTYPE_RGMII_RXID: 814 case MII_CONTYPE_RGMII_TXID: 815 WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY); 816 break; 817 default: 818 WR4(sc, GENET_SYS_PORT_CTRL, 0); 819 } 820 821 gen_set_enaddr(sc); 822 823 /* Setup RX filter */ 824 gen_setup_rxfilter(sc); 825 826 gen_init_txrings(sc); 827 gen_init_rxrings(sc); 828 gen_enable(sc); 829 gen_enable_offload(sc); 830 831 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 832 833 mii_mediachg(mii); 834 callout_reset(&sc->stat_ch, hz, gen_tick, sc); 835 } 836 837 static void 838 gen_init(void *softc) 839 { 840 struct gen_softc *sc; 841 842 sc = softc; 843 GEN_LOCK(sc); 844 gen_init_locked(sc); 845 GEN_UNLOCK(sc); 846 } 847 848 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 849 850 static void 851 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea) 852 { 853 uint32_t addr0 = (ea[0] << 8) | ea[1]; 854 uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]; 855 856 WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0); 857 WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1); 858 } 859 860 static u_int 861 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count) 862 { 863 struct gen_softc *sc = arg; 864 865 /* "count + 2" to account for unicast and broadcast */ 866 gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl)); 867 return (1); /* increment to count */ 868 } 869 870 static void 871 gen_setup_rxfilter(struct gen_softc *sc) 872 { 873 struct ifnet *ifp = sc->ifp; 874 uint32_t cmd, mdf_ctrl; 875 u_int n; 876 877 GEN_ASSERT_LOCKED(sc); 878 879 cmd = RD4(sc, GENET_UMAC_CMD); 880 881 /* 882 * Count the required number of hardware filters. We need one 883 * for each multicast address, plus one for our own address and 884 * the broadcast address. 885 */ 886 n = if_llmaddr_count(ifp) + 2; 887 888 if (n > GENET_MAX_MDF_FILTER) 889 ifp->if_flags |= IFF_ALLMULTI; 890 else 891 ifp->if_flags &= ~IFF_ALLMULTI; 892 893 if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) { 894 cmd |= GENET_UMAC_CMD_PROMISC; 895 mdf_ctrl = 0; 896 } else { 897 cmd &= ~GENET_UMAC_CMD_PROMISC; 898 gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr); 899 gen_setup_rxfilter_mdf(sc, 1, IF_LLADDR(ifp)); 900 (void) if_foreach_llmaddr(ifp, gen_setup_multi, sc); 901 mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1) &~ 902 (__BIT(GENET_MAX_MDF_FILTER - n) - 1); 903 } 904 905 WR4(sc, GENET_UMAC_CMD, cmd); 906 WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl); 907 } 908 909 static void 910 gen_set_enaddr(struct gen_softc *sc) 911 { 912 uint8_t *enaddr; 913 uint32_t val; 914 if_t ifp; 915 916 GEN_ASSERT_LOCKED(sc); 917 918 ifp = sc->ifp; 919 920 /* Write our unicast address */ 921 enaddr = IF_LLADDR(ifp); 922 /* Write hardware address */ 923 val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) | 924 (enaddr[0] << 24); 925 WR4(sc, GENET_UMAC_MAC0, val); 926 val = enaddr[5] | (enaddr[4] << 8); 927 WR4(sc, GENET_UMAC_MAC1, val); 928 } 929 930 static void 931 gen_start_locked(struct gen_softc *sc) 932 { 933 struct mbuf *m; 934 if_t ifp; 935 int cnt, err; 936 937 GEN_ASSERT_LOCKED(sc); 938 939 if (!sc->link) 940 return; 941 942 ifp = sc->ifp; 943 944 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 945 IFF_DRV_RUNNING) 946 return; 947 948 for (cnt = 0; ; cnt++) { 949 m = if_dequeue(ifp); 950 if (m == NULL) 951 break; 952 953 err = gen_encap(sc, &m); 954 if (err != 0) { 955 if (err == ENOBUFS) 956 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 957 if (m != NULL) 958 if_sendq_prepend(ifp, m); 959 break; 960 } 961 if_bpfmtap(ifp, m); 962 } 963 } 964 965 static void 966 gen_start(if_t ifp) 967 { 968 struct gen_softc *sc; 969 970 sc = if_getsoftc(ifp); 971 972 GEN_LOCK(sc); 973 gen_start_locked(sc); 974 GEN_UNLOCK(sc); 975 } 976 977 /* Test for any delayed checksum */ 978 #define CSUM_DELAY_ANY (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP) 979 980 static int 981 gen_encap(struct gen_softc *sc, struct mbuf **mp) 982 { 983 bus_dmamap_t map; 984 bus_dma_segment_t segs[TX_MAX_SEGS]; 985 int error, nsegs, cur, first, i, index, offset; 986 uint32_t csuminfo, length_status, csum_flags = 0, csumdata; 987 struct mbuf *m; 988 struct statusblock *sb = NULL; 989 struct tx_queue *q; 990 struct gen_ring_ent *ent; 991 992 GEN_ASSERT_LOCKED(sc); 993 994 q = &sc->tx_queue[DEF_TXQUEUE]; 995 996 m = *mp; 997 #ifdef ICMPV6_HACK 998 /* 999 * Reflected ICMPv6 packets, e.g. echo replies, tend to get laid 1000 * out with only the Ethernet header in the first mbuf, and this 1001 * doesn't seem to work. 1002 */ 1003 #define ICMP6_LEN (sizeof(struct ether_header) + sizeof(struct ip6_hdr) + \ 1004 sizeof(struct icmp6_hdr)) 1005 if (m->m_len == sizeof(struct ether_header)) { 1006 int ether_type = mtod(m, struct ether_header *)->ether_type; 1007 if (ntohs(ether_type) == ETHERTYPE_IPV6 && 1008 m->m_next->m_len >= sizeof(struct ip6_hdr)) { 1009 struct ip6_hdr *ip6; 1010 1011 ip6 = mtod(m->m_next, struct ip6_hdr *); 1012 if (ip6->ip6_nxt == IPPROTO_ICMPV6) { 1013 m = m_pullup(m, 1014 MIN(m->m_pkthdr.len, ICMP6_LEN)); 1015 if (m == NULL) { 1016 if (sc->ifp->if_flags & IFF_DEBUG) 1017 device_printf(sc->dev, 1018 "ICMPV6 pullup fail\n"); 1019 *mp = NULL; 1020 return (ENOMEM); 1021 } 1022 } 1023 } 1024 } 1025 #undef ICMP6_LEN 1026 #endif 1027 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 1028 0) { 1029 csum_flags = m->m_pkthdr.csum_flags; 1030 csumdata = m->m_pkthdr.csum_data; 1031 M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT); 1032 if (m == NULL) { 1033 if (sc->ifp->if_flags & IFF_DEBUG) 1034 device_printf(sc->dev, "prepend fail\n"); 1035 *mp = NULL; 1036 return (ENOMEM); 1037 } 1038 offset = gen_parse_tx(m, csum_flags); 1039 sb = mtod(m, struct statusblock *); 1040 if ((csum_flags & CSUM_DELAY_ANY) != 0) { 1041 csuminfo = (offset << TXCSUM_OFF_SHIFT) | 1042 (offset + csumdata); 1043 csuminfo |= TXCSUM_LEN_VALID; 1044 if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP)) 1045 csuminfo |= TXCSUM_UDP; 1046 sb->txcsuminfo = csuminfo; 1047 } else 1048 sb->txcsuminfo = 0; 1049 } 1050 1051 *mp = m; 1052 1053 cur = first = q->cur; 1054 ent = &q->entries[cur]; 1055 map = ent->map; 1056 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs, 1057 &nsegs, BUS_DMA_NOWAIT); 1058 if (error == EFBIG) { 1059 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); 1060 if (m == NULL) { 1061 device_printf(sc->dev, 1062 "gen_encap: m_collapse failed\n"); 1063 m_freem(*mp); 1064 *mp = NULL; 1065 return (ENOMEM); 1066 } 1067 *mp = m; 1068 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, 1069 segs, &nsegs, BUS_DMA_NOWAIT); 1070 if (error != 0) { 1071 m_freem(*mp); 1072 *mp = NULL; 1073 } 1074 } 1075 if (error != 0) { 1076 device_printf(sc->dev, 1077 "gen_encap: bus_dmamap_load_mbuf_sg failed\n"); 1078 return (error); 1079 } 1080 if (nsegs == 0) { 1081 m_freem(*mp); 1082 *mp = NULL; 1083 return (EIO); 1084 } 1085 1086 /* Remove statusblock after mapping, before possible requeue or bpf. */ 1087 if (sb != NULL) { 1088 m->m_data += sizeof(struct statusblock); 1089 m->m_len -= sizeof(struct statusblock); 1090 m->m_pkthdr.len -= sizeof(struct statusblock); 1091 } 1092 if (q->queued + nsegs > q->nentries) { 1093 bus_dmamap_unload(sc->tx_buf_tag, map); 1094 return (ENOBUFS); 1095 } 1096 1097 bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE); 1098 1099 index = q->prod_idx & (q->nentries - 1); 1100 for (i = 0; i < nsegs; i++) { 1101 ent = &q->entries[cur]; 1102 length_status = GENET_TX_DESC_STATUS_QTAG_MASK; 1103 if (i == 0) { 1104 length_status |= GENET_TX_DESC_STATUS_SOP | 1105 GENET_TX_DESC_STATUS_CRC; 1106 if ((csum_flags & CSUM_DELAY_ANY) != 0) 1107 length_status |= GENET_TX_DESC_STATUS_CKSUM; 1108 } 1109 if (i == nsegs - 1) 1110 length_status |= GENET_TX_DESC_STATUS_EOP; 1111 1112 length_status |= segs[i].ds_len << 1113 GENET_TX_DESC_STATUS_BUFLEN_SHIFT; 1114 1115 WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), 1116 (uint32_t)segs[i].ds_addr); 1117 WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), 1118 (uint32_t)(segs[i].ds_addr >> 32)); 1119 WR4(sc, GENET_TX_DESC_STATUS(index), length_status); 1120 1121 ++q->queued; 1122 cur = TX_NEXT(cur, q->nentries); 1123 index = TX_NEXT(index, q->nentries); 1124 } 1125 1126 q->prod_idx += nsegs; 1127 q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK; 1128 /* We probably don't need to write the producer index on every iter */ 1129 if (nsegs != 0) 1130 WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx); 1131 q->cur = cur; 1132 1133 /* Store mbuf in the last segment */ 1134 q->entries[first].mbuf = m; 1135 1136 return (0); 1137 } 1138 1139 /* 1140 * Parse a packet to find the offset of the transport header for checksum 1141 * offload. Ensure that the link and network headers are contiguous with 1142 * the status block, or transmission fails. 1143 */ 1144 static int 1145 gen_parse_tx(struct mbuf *m, int csum_flags) 1146 { 1147 int offset, off_in_m; 1148 bool copy = false, shift = false; 1149 u_char *p, *copy_p = NULL; 1150 struct mbuf *m0 = m; 1151 uint16_t ether_type; 1152 1153 if (m->m_len == sizeof(struct statusblock)) { 1154 /* M_PREPEND placed statusblock at end; move to beginning */ 1155 m->m_data = m->m_pktdat; 1156 copy_p = mtodo(m, sizeof(struct statusblock)); 1157 m = m->m_next; 1158 off_in_m = 0; 1159 p = mtod(m, u_char *); 1160 copy = true; 1161 } else { 1162 /* 1163 * If statusblock is not at beginning of mbuf (likely), 1164 * then remember to move mbuf contents down before copying 1165 * after them. 1166 */ 1167 if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat) 1168 shift = true; 1169 p = mtodo(m, sizeof(struct statusblock)); 1170 off_in_m = sizeof(struct statusblock); 1171 } 1172 1173 /* 1174 * If headers need to be copied contiguous to statusblock, do so. 1175 * If copying to the internal mbuf data area, and the status block 1176 * is not at the beginning of that area, shift the status block (which 1177 * is empty) and following data. 1178 */ 1179 #define COPY(size) { \ 1180 int hsize = size; \ 1181 if (copy) { \ 1182 if (shift) { \ 1183 u_char *p0; \ 1184 shift = false; \ 1185 p0 = mtodo(m0, sizeof(struct statusblock)); \ 1186 m0->m_data = m0->m_pktdat; \ 1187 bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\ 1188 m0->m_len - sizeof(struct statusblock)); \ 1189 copy_p = mtodo(m0, sizeof(struct statusblock)); \ 1190 } \ 1191 bcopy(p, copy_p, hsize); \ 1192 m0->m_len += hsize; \ 1193 m0->m_pkthdr.len += hsize; /* unneeded */ \ 1194 m->m_len -= hsize; \ 1195 m->m_data += hsize; \ 1196 } \ 1197 copy_p += hsize; \ 1198 } 1199 1200 KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) + 1201 sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__)); 1202 1203 if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) { 1204 offset = sizeof(struct ether_vlan_header); 1205 ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto); 1206 COPY(sizeof(struct ether_vlan_header)); 1207 if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) { 1208 m = m->m_next; 1209 off_in_m = 0; 1210 p = mtod(m, u_char *); 1211 copy = true; 1212 } else { 1213 off_in_m += sizeof(struct ether_vlan_header); 1214 p += sizeof(struct ether_vlan_header); 1215 } 1216 } else { 1217 offset = sizeof(struct ether_header); 1218 ether_type = ntohs(((struct ether_header *)p)->ether_type); 1219 COPY(sizeof(struct ether_header)); 1220 if (m->m_len == off_in_m + sizeof(struct ether_header)) { 1221 m = m->m_next; 1222 off_in_m = 0; 1223 p = mtod(m, u_char *); 1224 copy = true; 1225 } else { 1226 off_in_m += sizeof(struct ether_header); 1227 p += sizeof(struct ether_header); 1228 } 1229 } 1230 if (ether_type == ETHERTYPE_IP) { 1231 COPY(((struct ip *)p)->ip_hl << 2); 1232 offset += ((struct ip *)p)->ip_hl << 2; 1233 } else if (ether_type == ETHERTYPE_IPV6) { 1234 COPY(sizeof(struct ip6_hdr)); 1235 offset += sizeof(struct ip6_hdr); 1236 } else { 1237 /* 1238 * Unknown whether other cases require moving a header; 1239 * ARP works without. 1240 */ 1241 } 1242 return (offset); 1243 #undef COPY 1244 } 1245 1246 static void 1247 gen_intr(void *arg) 1248 { 1249 struct gen_softc *sc = arg; 1250 uint32_t val; 1251 1252 GEN_LOCK(sc); 1253 1254 val = RD4(sc, GENET_INTRL2_CPU_STAT); 1255 val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK); 1256 WR4(sc, GENET_INTRL2_CPU_CLEAR, val); 1257 1258 if (val & GENET_IRQ_RXDMA_DONE) 1259 gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]); 1260 1261 if (val & GENET_IRQ_TXDMA_DONE) { 1262 gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]); 1263 if (!if_sendq_empty(sc->ifp)) 1264 gen_start_locked(sc); 1265 } 1266 1267 GEN_UNLOCK(sc); 1268 } 1269 1270 static int 1271 gen_rxintr(struct gen_softc *sc, struct rx_queue *q) 1272 { 1273 if_t ifp; 1274 struct mbuf *m, *mh, *mt; 1275 struct statusblock *sb = NULL; 1276 int error, index, len, cnt, npkt, n; 1277 uint32_t status, prod_idx, total; 1278 1279 ifp = sc->ifp; 1280 mh = mt = NULL; 1281 cnt = 0; 1282 npkt = 0; 1283 1284 prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) & 1285 GENET_RX_DMA_PROD_CONS_MASK; 1286 total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK; 1287 1288 index = q->cons_idx & (RX_DESC_COUNT - 1); 1289 for (n = 0; n < total; n++) { 1290 bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map, 1291 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1292 bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map); 1293 1294 m = q->entries[index].mbuf; 1295 1296 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 1297 sb = mtod(m, struct statusblock *); 1298 status = sb->status_buflen; 1299 } else 1300 status = RD4(sc, GENET_RX_DESC_STATUS(index)); 1301 1302 len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >> 1303 GENET_RX_DESC_STATUS_BUFLEN_SHIFT; 1304 1305 /* check for errors */ 1306 if ((status & 1307 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP | 1308 GENET_RX_DESC_STATUS_RX_ERROR)) != 1309 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) { 1310 if (ifp->if_flags & IFF_DEBUG) 1311 device_printf(sc->dev, 1312 "error/frag %x csum %x\n", status, 1313 sb->rxcsum); 1314 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1315 continue; 1316 } 1317 1318 error = gen_newbuf_rx(sc, q, index); 1319 if (error != 0) { 1320 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1321 if (ifp->if_flags & IFF_DEBUG) 1322 device_printf(sc->dev, "gen_newbuf_rx %d\n", 1323 error); 1324 /* reuse previous mbuf */ 1325 (void) gen_mapbuf_rx(sc, q, index, m); 1326 continue; 1327 } 1328 1329 if (sb != NULL) { 1330 if (status & GENET_RX_DESC_STATUS_CKSUM_OK) { 1331 /* L4 checksum checked; not sure about L3. */ 1332 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | 1333 CSUM_PSEUDO_HDR; 1334 m->m_pkthdr.csum_data = 0xffff; 1335 } 1336 m->m_data += sizeof(struct statusblock); 1337 m->m_len -= sizeof(struct statusblock); 1338 len -= sizeof(struct statusblock); 1339 } 1340 if (len > ETHER_ALIGN) { 1341 m_adj(m, ETHER_ALIGN); 1342 len -= ETHER_ALIGN; 1343 } 1344 1345 m->m_pkthdr.rcvif = ifp; 1346 m->m_pkthdr.len = len; 1347 m->m_len = len; 1348 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1349 1350 m->m_nextpkt = NULL; 1351 if (mh == NULL) 1352 mh = m; 1353 else 1354 mt->m_nextpkt = m; 1355 mt = m; 1356 ++cnt; 1357 ++npkt; 1358 1359 index = RX_NEXT(index, q->nentries); 1360 1361 q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK; 1362 WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx); 1363 1364 if (cnt == gen_rx_batch) { 1365 GEN_UNLOCK(sc); 1366 if_input(ifp, mh); 1367 GEN_LOCK(sc); 1368 mh = mt = NULL; 1369 cnt = 0; 1370 } 1371 } 1372 1373 if (mh != NULL) { 1374 GEN_UNLOCK(sc); 1375 if_input(ifp, mh); 1376 GEN_LOCK(sc); 1377 } 1378 1379 return (npkt); 1380 } 1381 1382 static void 1383 gen_txintr(struct gen_softc *sc, struct tx_queue *q) 1384 { 1385 uint32_t cons_idx, total; 1386 struct gen_ring_ent *ent; 1387 if_t ifp; 1388 int i, prog; 1389 1390 GEN_ASSERT_LOCKED(sc); 1391 1392 ifp = sc->ifp; 1393 1394 cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) & 1395 GENET_TX_DMA_PROD_CONS_MASK; 1396 total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK; 1397 1398 prog = 0; 1399 for (i = q->next; q->queued > 0 && total > 0; 1400 i = TX_NEXT(i, q->nentries), total--) { 1401 /* XXX check for errors */ 1402 1403 ent = &q->entries[i]; 1404 if (ent->mbuf != NULL) { 1405 bus_dmamap_sync(sc->tx_buf_tag, ent->map, 1406 BUS_DMASYNC_POSTWRITE); 1407 bus_dmamap_unload(sc->tx_buf_tag, ent->map); 1408 m_freem(ent->mbuf); 1409 ent->mbuf = NULL; 1410 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1411 } 1412 1413 prog++; 1414 --q->queued; 1415 } 1416 1417 if (prog > 0) { 1418 q->next = i; 1419 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1420 } 1421 1422 q->cons_idx = cons_idx; 1423 } 1424 1425 static void 1426 gen_intr2(void *arg) 1427 { 1428 struct gen_softc *sc = arg; 1429 1430 device_printf(sc->dev, "gen_intr2\n"); 1431 } 1432 1433 static int 1434 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index) 1435 { 1436 struct mbuf *m; 1437 1438 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1439 if (m == NULL) 1440 return (ENOBUFS); 1441 1442 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 1443 m_adj(m, ETHER_ALIGN); 1444 1445 return (gen_mapbuf_rx(sc, q, index, m)); 1446 } 1447 1448 static int 1449 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index, 1450 struct mbuf *m) 1451 { 1452 bus_dma_segment_t seg; 1453 bus_dmamap_t map; 1454 int nsegs; 1455 1456 map = q->entries[index].map; 1457 if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs, 1458 BUS_DMA_NOWAIT) != 0) { 1459 m_freem(m); 1460 return (ENOBUFS); 1461 } 1462 1463 bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD); 1464 1465 q->entries[index].mbuf = m; 1466 WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr); 1467 WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32)); 1468 1469 return (0); 1470 } 1471 1472 static int 1473 gen_ioctl(if_t ifp, u_long cmd, caddr_t data) 1474 { 1475 struct gen_softc *sc; 1476 struct mii_data *mii; 1477 struct ifreq *ifr; 1478 int flags, enable, error; 1479 1480 sc = if_getsoftc(ifp); 1481 mii = device_get_softc(sc->miibus); 1482 ifr = (struct ifreq *)data; 1483 error = 0; 1484 1485 switch (cmd) { 1486 case SIOCSIFFLAGS: 1487 GEN_LOCK(sc); 1488 if (if_getflags(ifp) & IFF_UP) { 1489 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1490 flags = if_getflags(ifp) ^ sc->if_flags; 1491 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 1492 gen_setup_rxfilter(sc); 1493 } else 1494 gen_init_locked(sc); 1495 } else { 1496 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1497 gen_reset(sc); 1498 } 1499 sc->if_flags = if_getflags(ifp); 1500 GEN_UNLOCK(sc); 1501 break; 1502 1503 case SIOCADDMULTI: 1504 case SIOCDELMULTI: 1505 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1506 GEN_LOCK(sc); 1507 gen_setup_rxfilter(sc); 1508 GEN_UNLOCK(sc); 1509 } 1510 break; 1511 1512 case SIOCSIFMEDIA: 1513 case SIOCGIFMEDIA: 1514 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1515 break; 1516 1517 case SIOCSIFCAP: 1518 enable = if_getcapenable(ifp); 1519 flags = ifr->ifr_reqcap ^ enable; 1520 if (flags & IFCAP_RXCSUM) 1521 enable ^= IFCAP_RXCSUM; 1522 if (flags & IFCAP_RXCSUM_IPV6) 1523 enable ^= IFCAP_RXCSUM_IPV6; 1524 if (flags & IFCAP_TXCSUM) 1525 enable ^= IFCAP_TXCSUM; 1526 if (flags & IFCAP_TXCSUM_IPV6) 1527 enable ^= IFCAP_TXCSUM_IPV6; 1528 if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) 1529 if_sethwassist(ifp, GEN_CSUM_FEATURES); 1530 else 1531 if_sethwassist(ifp, 0); 1532 if_setcapenable(ifp, enable); 1533 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1534 gen_enable_offload(sc); 1535 break; 1536 1537 default: 1538 error = ether_ioctl(ifp, cmd, data); 1539 break; 1540 } 1541 return (error); 1542 } 1543 1544 static void 1545 gen_tick(void *softc) 1546 { 1547 struct gen_softc *sc; 1548 struct mii_data *mii; 1549 if_t ifp; 1550 int link; 1551 1552 sc = softc; 1553 ifp = sc->ifp; 1554 mii = device_get_softc(sc->miibus); 1555 1556 GEN_ASSERT_LOCKED(sc); 1557 1558 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1559 return; 1560 1561 link = sc->link; 1562 mii_tick(mii); 1563 if (sc->link && !link) 1564 gen_start_locked(sc); 1565 1566 callout_reset(&sc->stat_ch, hz, gen_tick, sc); 1567 } 1568 1569 #define MII_BUSY_RETRY 1000 1570 1571 static int 1572 gen_miibus_readreg(device_t dev, int phy, int reg) 1573 { 1574 struct gen_softc *sc; 1575 int retry, val; 1576 1577 sc = device_get_softc(dev); 1578 val = 0; 1579 1580 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ | 1581 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT)); 1582 val = RD4(sc, GENET_MDIO_CMD); 1583 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY); 1584 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 1585 if (((val = RD4(sc, GENET_MDIO_CMD)) & 1586 GENET_MDIO_START_BUSY) == 0) { 1587 if (val & GENET_MDIO_READ_FAILED) 1588 return (0); /* -1? */ 1589 val &= GENET_MDIO_VAL_MASK; 1590 break; 1591 } 1592 DELAY(10); 1593 } 1594 1595 if (retry == 0) 1596 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 1597 phy, reg); 1598 1599 return (val); 1600 } 1601 1602 static int 1603 gen_miibus_writereg(device_t dev, int phy, int reg, int val) 1604 { 1605 struct gen_softc *sc; 1606 int retry; 1607 1608 sc = device_get_softc(dev); 1609 1610 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE | 1611 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) | 1612 (val & GENET_MDIO_VAL_MASK)); 1613 val = RD4(sc, GENET_MDIO_CMD); 1614 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY); 1615 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 1616 val = RD4(sc, GENET_MDIO_CMD); 1617 if ((val & GENET_MDIO_START_BUSY) == 0) 1618 break; 1619 DELAY(10); 1620 } 1621 if (retry == 0) 1622 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 1623 phy, reg); 1624 1625 return (0); 1626 } 1627 1628 static void 1629 gen_update_link_locked(struct gen_softc *sc) 1630 { 1631 struct mii_data *mii; 1632 uint32_t val; 1633 u_int speed; 1634 1635 GEN_ASSERT_LOCKED(sc); 1636 1637 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) 1638 return; 1639 mii = device_get_softc(sc->miibus); 1640 1641 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1642 (IFM_ACTIVE | IFM_AVALID)) { 1643 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1644 case IFM_1000_T: 1645 case IFM_1000_SX: 1646 speed = GENET_UMAC_CMD_SPEED_1000; 1647 sc->link = 1; 1648 break; 1649 case IFM_100_TX: 1650 speed = GENET_UMAC_CMD_SPEED_100; 1651 sc->link = 1; 1652 break; 1653 case IFM_10_T: 1654 speed = GENET_UMAC_CMD_SPEED_10; 1655 sc->link = 1; 1656 break; 1657 default: 1658 sc->link = 0; 1659 break; 1660 } 1661 } else 1662 sc->link = 0; 1663 1664 if (sc->link == 0) 1665 return; 1666 1667 val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL); 1668 val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE; 1669 val |= GENET_EXT_RGMII_OOB_RGMII_LINK; 1670 val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN; 1671 if (sc->phy_mode == MII_CONTYPE_RGMII) 1672 val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE; 1673 else 1674 val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE; 1675 WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val); 1676 1677 val = RD4(sc, GENET_UMAC_CMD); 1678 val &= ~GENET_UMAC_CMD_SPEED; 1679 val |= speed; 1680 WR4(sc, GENET_UMAC_CMD, val); 1681 } 1682 1683 static void 1684 gen_link_task(void *arg, int pending) 1685 { 1686 struct gen_softc *sc; 1687 1688 sc = arg; 1689 1690 GEN_LOCK(sc); 1691 gen_update_link_locked(sc); 1692 GEN_UNLOCK(sc); 1693 } 1694 1695 static void 1696 gen_miibus_statchg(device_t dev) 1697 { 1698 struct gen_softc *sc; 1699 1700 sc = device_get_softc(dev); 1701 1702 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 1703 } 1704 1705 static void 1706 gen_media_status(if_t ifp, struct ifmediareq *ifmr) 1707 { 1708 struct gen_softc *sc; 1709 struct mii_data *mii; 1710 1711 sc = if_getsoftc(ifp); 1712 mii = device_get_softc(sc->miibus); 1713 1714 GEN_LOCK(sc); 1715 mii_pollstat(mii); 1716 ifmr->ifm_active = mii->mii_media_active; 1717 ifmr->ifm_status = mii->mii_media_status; 1718 GEN_UNLOCK(sc); 1719 } 1720 1721 static int 1722 gen_media_change(if_t ifp) 1723 { 1724 struct gen_softc *sc; 1725 struct mii_data *mii; 1726 int error; 1727 1728 sc = if_getsoftc(ifp); 1729 mii = device_get_softc(sc->miibus); 1730 1731 GEN_LOCK(sc); 1732 error = mii_mediachg(mii); 1733 GEN_UNLOCK(sc); 1734 1735 return (error); 1736 } 1737 1738 static device_method_t gen_methods[] = { 1739 /* Device interface */ 1740 DEVMETHOD(device_probe, gen_probe), 1741 DEVMETHOD(device_attach, gen_attach), 1742 1743 /* MII interface */ 1744 DEVMETHOD(miibus_readreg, gen_miibus_readreg), 1745 DEVMETHOD(miibus_writereg, gen_miibus_writereg), 1746 DEVMETHOD(miibus_statchg, gen_miibus_statchg), 1747 1748 DEVMETHOD_END 1749 }; 1750 1751 static driver_t gen_driver = { 1752 "genet", 1753 gen_methods, 1754 sizeof(struct gen_softc), 1755 }; 1756 1757 static devclass_t gen_devclass; 1758 1759 DRIVER_MODULE(genet, simplebus, gen_driver, gen_devclass, 0, 0); 1760 DRIVER_MODULE(miibus, genet, miibus_driver, miibus_devclass, 0, 0); 1761 MODULE_DEPEND(genet, ether, 1, 1, 1); 1762 MODULE_DEPEND(genet, miibus, 1, 1, 1); 1763