1 /*- 2 * Copyright (c) 2020 Michael J Karels 3 * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca> 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller 29 * 30 * This driver is derived in large part from bcmgenet.c from NetBSD by 31 * Jared McNeill. Parts of the structure and other common code in 32 * this driver have been copied from if_awg.c for the Allwinner EMAC, 33 * also by Jared McNeill. 34 */ 35 36 #include "opt_device_polling.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bus.h> 41 #include <sys/rman.h> 42 #include <sys/kernel.h> 43 #include <sys/endian.h> 44 #include <sys/mbuf.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/sysctl.h> 48 #include <sys/module.h> 49 #include <sys/taskqueue.h> 50 #include <sys/gpio.h> 51 52 #include <net/bpf.h> 53 #include <net/if.h> 54 #include <net/ethernet.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/if_types.h> 58 #include <net/if_var.h> 59 60 #include <machine/bus.h> 61 62 #include <dev/ofw/ofw_bus.h> 63 #include <dev/ofw/ofw_bus_subr.h> 64 65 #define __BIT(_x) (1 << (_x)) 66 #include "if_genetreg.h" 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 #include <dev/mii/mii_fdt.h> 71 72 #include <netinet/in.h> 73 #include <netinet/ip.h> 74 #include <netinet/ip6.h> 75 76 #include "syscon_if.h" 77 #include "miibus_if.h" 78 #include "gpio_if.h" 79 80 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_MAC], (reg)) 81 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_MAC], (reg), (val)) 82 83 #define GEN_LOCK(sc) mtx_lock(&(sc)->mtx) 84 #define GEN_UNLOCK(sc) mtx_unlock(&(sc)->mtx) 85 #define GEN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 86 #define GEN_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 87 88 #define TX_DESC_COUNT GENET_DMA_DESC_COUNT 89 #define RX_DESC_COUNT GENET_DMA_DESC_COUNT 90 91 #define TX_NEXT(n, count) (((n) + 1) & ((count) - 1)) 92 #define RX_NEXT(n, count) (((n) + 1) & ((count) - 1)) 93 94 #define TX_MAX_SEGS 20 95 96 static SYSCTL_NODE(_hw, OID_AUTO, genet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 97 "genet driver parameters"); 98 99 /* Maximum number of mbufs to pass per call to if_input */ 100 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */; 101 SYSCTL_INT(_hw_genet, OID_AUTO, rx_batch, CTLFLAG_RDTUN, 102 &gen_rx_batch, 0, "max mbufs per call to if_input"); 103 104 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch); /* old name/interface */ 105 106 /* 107 * Transmitting packets with only an Ethernet header in the first mbuf 108 * fails. Examples include reflected ICMPv6 packets, e.g. echo replies; 109 * forwarded IPv6/TCP packets; and forwarded IPv4/TCP packets that use NAT 110 * with IPFW. Pulling up the sizes of ether_header + ip6_hdr + icmp6_hdr 111 * seems to work for both ICMPv6 and TCP over IPv6, as well as the IPv4/TCP 112 * case. 113 */ 114 static int gen_tx_hdr_min = 56; /* ether_header + ip6_hdr + icmp6_hdr */ 115 SYSCTL_INT(_hw_genet, OID_AUTO, tx_hdr_min, CTLFLAG_RW, 116 &gen_tx_hdr_min, 0, "header to add to packets with ether header only"); 117 118 static struct ofw_compat_data compat_data[] = { 119 { "brcm,genet-v1", 1 }, 120 { "brcm,genet-v2", 2 }, 121 { "brcm,genet-v3", 3 }, 122 { "brcm,genet-v4", 4 }, 123 { "brcm,genet-v5", 5 }, 124 { "brcm,bcm2711-genet-v5", 5 }, 125 { NULL, 0 } 126 }; 127 128 enum { 129 _RES_MAC, /* what to call this? */ 130 _RES_IRQ1, 131 _RES_IRQ2, 132 _RES_NITEMS 133 }; 134 135 static struct resource_spec gen_spec[] = { 136 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 137 { SYS_RES_IRQ, 0, RF_ACTIVE }, 138 { SYS_RES_IRQ, 1, RF_ACTIVE }, 139 { -1, 0 } 140 }; 141 142 /* structure per ring entry */ 143 struct gen_ring_ent { 144 bus_dmamap_t map; 145 struct mbuf *mbuf; 146 }; 147 148 struct tx_queue { 149 int hwindex; /* hardware index */ 150 int nentries; 151 u_int queued; /* or avail? */ 152 u_int cur; 153 u_int next; 154 u_int prod_idx; 155 u_int cons_idx; 156 struct gen_ring_ent *entries; 157 }; 158 159 struct rx_queue { 160 int hwindex; /* hardware index */ 161 int nentries; 162 u_int cur; 163 u_int prod_idx; 164 u_int cons_idx; 165 struct gen_ring_ent *entries; 166 }; 167 168 struct gen_softc { 169 struct resource *res[_RES_NITEMS]; 170 struct mtx mtx; 171 if_t ifp; 172 device_t dev; 173 device_t miibus; 174 mii_contype_t phy_mode; 175 176 struct callout stat_ch; 177 struct task link_task; 178 void *ih; 179 void *ih2; 180 int type; 181 int if_flags; 182 int link; 183 bus_dma_tag_t tx_buf_tag; 184 /* 185 * The genet chip has multiple queues for transmit and receive. 186 * This driver uses only one (queue 16, the default), but is cast 187 * with multiple rings. The additional rings are used for different 188 * priorities. 189 */ 190 #define DEF_TXQUEUE 0 191 #define NTXQUEUE 1 192 struct tx_queue tx_queue[NTXQUEUE]; 193 struct gen_ring_ent tx_ring_ent[TX_DESC_COUNT]; /* ring entries */ 194 195 bus_dma_tag_t rx_buf_tag; 196 #define DEF_RXQUEUE 0 197 #define NRXQUEUE 1 198 struct rx_queue rx_queue[NRXQUEUE]; 199 struct gen_ring_ent rx_ring_ent[RX_DESC_COUNT]; /* ring entries */ 200 }; 201 202 static void gen_init(void *softc); 203 static void gen_start(if_t ifp); 204 static void gen_destroy(struct gen_softc *sc); 205 static int gen_encap(struct gen_softc *sc, struct mbuf **mp); 206 static int gen_parse_tx(struct mbuf *m, int csum_flags); 207 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data); 208 static int gen_get_phy_mode(device_t dev); 209 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr); 210 static void gen_set_enaddr(struct gen_softc *sc); 211 static void gen_setup_rxfilter(struct gen_softc *sc); 212 static void gen_reset(struct gen_softc *sc); 213 static void gen_enable(struct gen_softc *sc); 214 static void gen_dma_disable(struct gen_softc *sc); 215 static int gen_bus_dma_init(struct gen_softc *sc); 216 static void gen_bus_dma_teardown(struct gen_softc *sc); 217 static void gen_enable_intr(struct gen_softc *sc); 218 static void gen_init_txrings(struct gen_softc *sc); 219 static void gen_init_rxrings(struct gen_softc *sc); 220 static void gen_intr(void *softc); 221 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q); 222 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q); 223 static void gen_intr2(void *softc); 224 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index); 225 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index, 226 struct mbuf *m); 227 static void gen_link_task(void *arg, int pending); 228 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr); 229 static int gen_media_change(if_t ifp); 230 static void gen_tick(void *softc); 231 232 static int 233 gen_probe(device_t dev) 234 { 235 if (!ofw_bus_status_okay(dev)) 236 return (ENXIO); 237 238 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 239 return (ENXIO); 240 241 device_set_desc(dev, "RPi4 Gigabit Ethernet"); 242 return (BUS_PROBE_DEFAULT); 243 } 244 245 static int 246 gen_attach(device_t dev) 247 { 248 struct ether_addr eaddr; 249 struct gen_softc *sc; 250 int major, minor, error, mii_flags; 251 bool eaddr_found; 252 253 sc = device_get_softc(dev); 254 sc->dev = dev; 255 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 256 257 if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) { 258 device_printf(dev, "cannot allocate resources for device\n"); 259 error = ENXIO; 260 goto fail; 261 } 262 263 major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT; 264 if (major != REV_MAJOR_V5) { 265 device_printf(dev, "version %d is not supported\n", major); 266 error = ENXIO; 267 goto fail; 268 } 269 minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT; 270 device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor, 271 RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY); 272 273 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 274 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); 275 TASK_INIT(&sc->link_task, 0, gen_link_task, sc); 276 277 error = gen_get_phy_mode(dev); 278 if (error != 0) 279 goto fail; 280 281 bzero(&eaddr, sizeof(eaddr)); 282 eaddr_found = gen_get_eaddr(dev, &eaddr); 283 284 /* reset core */ 285 gen_reset(sc); 286 287 gen_dma_disable(sc); 288 289 /* Setup DMA */ 290 error = gen_bus_dma_init(sc); 291 if (error != 0) { 292 device_printf(dev, "cannot setup bus dma\n"); 293 goto fail; 294 } 295 296 /* Setup ethernet interface */ 297 sc->ifp = if_alloc(IFT_ETHER); 298 if_setsoftc(sc->ifp, sc); 299 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); 300 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 301 if_setstartfn(sc->ifp, gen_start); 302 if_setioctlfn(sc->ifp, gen_ioctl); 303 if_setinitfn(sc->ifp, gen_init); 304 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); 305 if_setsendqready(sc->ifp); 306 #define GEN_CSUM_FEATURES (CSUM_UDP | CSUM_TCP) 307 if_sethwassist(sc->ifp, GEN_CSUM_FEATURES); 308 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | 309 IFCAP_HWCSUM_IPV6); 310 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 311 312 /* Install interrupt handlers */ 313 error = bus_setup_intr(dev, sc->res[_RES_IRQ1], 314 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih); 315 if (error != 0) { 316 device_printf(dev, "cannot setup interrupt handler1\n"); 317 goto fail; 318 } 319 320 error = bus_setup_intr(dev, sc->res[_RES_IRQ2], 321 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2); 322 if (error != 0) { 323 device_printf(dev, "cannot setup interrupt handler2\n"); 324 goto fail; 325 } 326 327 /* Attach MII driver */ 328 mii_flags = 0; 329 switch (sc->phy_mode) 330 { 331 case MII_CONTYPE_RGMII_ID: 332 mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY; 333 break; 334 case MII_CONTYPE_RGMII_RXID: 335 mii_flags |= MIIF_RX_DELAY; 336 break; 337 case MII_CONTYPE_RGMII_TXID: 338 mii_flags |= MIIF_TX_DELAY; 339 break; 340 default: 341 break; 342 } 343 error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change, 344 gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 345 mii_flags); 346 if (error != 0) { 347 device_printf(dev, "cannot attach PHY\n"); 348 goto fail; 349 } 350 351 /* If address was not found, create one based on the hostid and name. */ 352 if (eaddr_found == 0) 353 ether_gen_addr(sc->ifp, &eaddr); 354 /* Attach ethernet interface */ 355 ether_ifattach(sc->ifp, eaddr.octet); 356 357 fail: 358 if (error) 359 gen_destroy(sc); 360 return (error); 361 } 362 363 /* Free resources after failed attach. This is not a complete detach. */ 364 static void 365 gen_destroy(struct gen_softc *sc) 366 { 367 368 if (sc->miibus) { /* can't happen */ 369 device_delete_child(sc->dev, sc->miibus); 370 sc->miibus = NULL; 371 } 372 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih); 373 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2); 374 gen_bus_dma_teardown(sc); 375 callout_drain(&sc->stat_ch); 376 if (mtx_initialized(&sc->mtx)) 377 mtx_destroy(&sc->mtx); 378 bus_release_resources(sc->dev, gen_spec, sc->res); 379 if (sc->ifp != NULL) { 380 if_free(sc->ifp); 381 sc->ifp = NULL; 382 } 383 } 384 385 static int 386 gen_get_phy_mode(device_t dev) 387 { 388 struct gen_softc *sc; 389 phandle_t node; 390 mii_contype_t type; 391 int error = 0; 392 393 sc = device_get_softc(dev); 394 node = ofw_bus_get_node(dev); 395 type = mii_fdt_get_contype(node); 396 397 switch (type) { 398 case MII_CONTYPE_RGMII: 399 case MII_CONTYPE_RGMII_ID: 400 case MII_CONTYPE_RGMII_RXID: 401 case MII_CONTYPE_RGMII_TXID: 402 sc->phy_mode = type; 403 break; 404 default: 405 device_printf(dev, "unknown phy-mode '%s'\n", 406 mii_fdt_contype_to_name(type)); 407 error = ENXIO; 408 break; 409 } 410 411 return (error); 412 } 413 414 static bool 415 gen_get_eaddr(device_t dev, struct ether_addr *eaddr) 416 { 417 struct gen_softc *sc; 418 uint32_t maclo, machi, val; 419 phandle_t node; 420 421 sc = device_get_softc(dev); 422 423 node = ofw_bus_get_node(dev); 424 if (OF_getprop(node, "mac-address", eaddr->octet, 425 ETHER_ADDR_LEN) != -1 || 426 OF_getprop(node, "local-mac-address", eaddr->octet, 427 ETHER_ADDR_LEN) != -1 || 428 OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1) 429 return (true); 430 431 device_printf(dev, "No Ethernet address found in fdt!\n"); 432 maclo = machi = 0; 433 434 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL); 435 if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) { 436 maclo = htobe32(RD4(sc, GENET_UMAC_MAC0)); 437 machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff); 438 } 439 440 if (maclo == 0 && machi == 0) { 441 if (bootverbose) 442 device_printf(dev, 443 "No Ethernet address found in controller\n"); 444 return (false); 445 } else { 446 eaddr->octet[0] = maclo & 0xff; 447 eaddr->octet[1] = (maclo >> 8) & 0xff; 448 eaddr->octet[2] = (maclo >> 16) & 0xff; 449 eaddr->octet[3] = (maclo >> 24) & 0xff; 450 eaddr->octet[4] = machi & 0xff; 451 eaddr->octet[5] = (machi >> 8) & 0xff; 452 return (true); 453 } 454 } 455 456 static void 457 gen_reset(struct gen_softc *sc) 458 { 459 uint32_t val; 460 461 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL); 462 val |= GENET_SYS_RBUF_FLUSH_RESET; 463 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val); 464 DELAY(10); 465 466 val &= ~GENET_SYS_RBUF_FLUSH_RESET; 467 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val); 468 DELAY(10); 469 470 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0); 471 DELAY(10); 472 473 WR4(sc, GENET_UMAC_CMD, 0); 474 WR4(sc, GENET_UMAC_CMD, 475 GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET); 476 DELAY(10); 477 WR4(sc, GENET_UMAC_CMD, 0); 478 479 WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT | 480 GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX); 481 WR4(sc, GENET_UMAC_MIB_CTRL, 0); 482 } 483 484 static void 485 gen_enable(struct gen_softc *sc) 486 { 487 u_int val; 488 489 WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536); 490 491 val = RD4(sc, GENET_RBUF_CTRL); 492 val |= GENET_RBUF_ALIGN_2B; 493 WR4(sc, GENET_RBUF_CTRL, val); 494 495 WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1); 496 497 /* Enable transmitter and receiver */ 498 val = RD4(sc, GENET_UMAC_CMD); 499 val |= GENET_UMAC_CMD_TXEN; 500 val |= GENET_UMAC_CMD_RXEN; 501 WR4(sc, GENET_UMAC_CMD, val); 502 503 /* Enable interrupts */ 504 gen_enable_intr(sc); 505 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 506 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE); 507 } 508 509 static void 510 gen_disable_intr(struct gen_softc *sc) 511 { 512 /* Disable interrupts */ 513 WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff); 514 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 0xffffffff); 515 } 516 517 static void 518 gen_disable(struct gen_softc *sc) 519 { 520 uint32_t val; 521 522 /* Stop receiver */ 523 val = RD4(sc, GENET_UMAC_CMD); 524 val &= ~GENET_UMAC_CMD_RXEN; 525 WR4(sc, GENET_UMAC_CMD, val); 526 527 /* Stop transmitter */ 528 val = RD4(sc, GENET_UMAC_CMD); 529 val &= ~GENET_UMAC_CMD_TXEN; 530 WR4(sc, GENET_UMAC_CMD, val); 531 532 /* Disable Interrupt */ 533 gen_disable_intr(sc); 534 } 535 536 static void 537 gen_enable_offload(struct gen_softc *sc) 538 { 539 uint32_t check_ctrl, buf_ctrl; 540 541 check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL); 542 buf_ctrl = RD4(sc, GENET_RBUF_CTRL); 543 if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) { 544 check_ctrl |= GENET_RBUF_CHECK_CTRL_EN; 545 buf_ctrl |= GENET_RBUF_64B_EN; 546 } else { 547 check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN; 548 buf_ctrl &= ~GENET_RBUF_64B_EN; 549 } 550 WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl); 551 WR4(sc, GENET_RBUF_CTRL, buf_ctrl); 552 553 buf_ctrl = RD4(sc, GENET_TBUF_CTRL); 554 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 555 0) 556 buf_ctrl |= GENET_RBUF_64B_EN; 557 else 558 buf_ctrl &= ~GENET_RBUF_64B_EN; 559 WR4(sc, GENET_TBUF_CTRL, buf_ctrl); 560 } 561 562 static void 563 gen_dma_disable(struct gen_softc *sc) 564 { 565 int val; 566 567 val = RD4(sc, GENET_TX_DMA_CTRL); 568 val &= ~GENET_TX_DMA_CTRL_EN; 569 val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE); 570 WR4(sc, GENET_TX_DMA_CTRL, val); 571 572 val = RD4(sc, GENET_RX_DMA_CTRL); 573 val &= ~GENET_RX_DMA_CTRL_EN; 574 val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE); 575 WR4(sc, GENET_RX_DMA_CTRL, val); 576 } 577 578 static int 579 gen_bus_dma_init(struct gen_softc *sc) 580 { 581 device_t dev = sc->dev; 582 int i, error; 583 584 error = bus_dma_tag_create( 585 bus_get_dma_tag(dev), /* Parent tag */ 586 4, 0, /* alignment, boundary */ 587 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */ 588 BUS_SPACE_MAXADDR, /* highaddr */ 589 NULL, NULL, /* filter, filterarg */ 590 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ 591 MCLBYTES, /* maxsegsize */ 592 0, /* flags */ 593 NULL, NULL, /* lockfunc, lockarg */ 594 &sc->tx_buf_tag); 595 if (error != 0) { 596 device_printf(dev, "cannot create TX buffer tag\n"); 597 return (error); 598 } 599 600 for (i = 0; i < TX_DESC_COUNT; i++) { 601 error = bus_dmamap_create(sc->tx_buf_tag, 0, 602 &sc->tx_ring_ent[i].map); 603 if (error != 0) { 604 device_printf(dev, "cannot create TX buffer map\n"); 605 return (error); 606 } 607 } 608 609 error = bus_dma_tag_create( 610 bus_get_dma_tag(dev), /* Parent tag */ 611 4, 0, /* alignment, boundary */ 612 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */ 613 BUS_SPACE_MAXADDR, /* highaddr */ 614 NULL, NULL, /* filter, filterarg */ 615 MCLBYTES, 1, /* maxsize, nsegs */ 616 MCLBYTES, /* maxsegsize */ 617 0, /* flags */ 618 NULL, NULL, /* lockfunc, lockarg */ 619 &sc->rx_buf_tag); 620 if (error != 0) { 621 device_printf(dev, "cannot create RX buffer tag\n"); 622 return (error); 623 } 624 625 for (i = 0; i < RX_DESC_COUNT; i++) { 626 error = bus_dmamap_create(sc->rx_buf_tag, 0, 627 &sc->rx_ring_ent[i].map); 628 if (error != 0) { 629 device_printf(dev, "cannot create RX buffer map\n"); 630 return (error); 631 } 632 } 633 return (0); 634 } 635 636 static void 637 gen_bus_dma_teardown(struct gen_softc *sc) 638 { 639 int i, error; 640 641 if (sc->tx_buf_tag != NULL) { 642 for (i = 0; i < TX_DESC_COUNT; i++) { 643 error = bus_dmamap_destroy(sc->tx_buf_tag, 644 sc->tx_ring_ent[i].map); 645 sc->tx_ring_ent[i].map = NULL; 646 if (error) 647 device_printf(sc->dev, 648 "%s: bus_dmamap_destroy failed: %d\n", 649 __func__, error); 650 } 651 error = bus_dma_tag_destroy(sc->tx_buf_tag); 652 sc->tx_buf_tag = NULL; 653 if (error) 654 device_printf(sc->dev, 655 "%s: bus_dma_tag_destroy failed: %d\n", __func__, 656 error); 657 } 658 659 if (sc->tx_buf_tag != NULL) { 660 for (i = 0; i < RX_DESC_COUNT; i++) { 661 error = bus_dmamap_destroy(sc->rx_buf_tag, 662 sc->rx_ring_ent[i].map); 663 sc->rx_ring_ent[i].map = NULL; 664 if (error) 665 device_printf(sc->dev, 666 "%s: bus_dmamap_destroy failed: %d\n", 667 __func__, error); 668 } 669 error = bus_dma_tag_destroy(sc->rx_buf_tag); 670 sc->rx_buf_tag = NULL; 671 if (error) 672 device_printf(sc->dev, 673 "%s: bus_dma_tag_destroy failed: %d\n", __func__, 674 error); 675 } 676 } 677 678 static void 679 gen_enable_intr(struct gen_softc *sc) 680 { 681 682 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 683 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE); 684 } 685 686 /* 687 * "queue" is the software queue index (0-4); "qid" is the hardware index 688 * (0-16). "base" is the starting index in the ring array. 689 */ 690 static void 691 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base, 692 int nentries) 693 { 694 struct tx_queue *q; 695 uint32_t val; 696 697 q = &sc->tx_queue[queue]; 698 q->entries = &sc->tx_ring_ent[base]; 699 q->hwindex = qid; 700 q->nentries = nentries; 701 702 /* TX ring */ 703 704 q->queued = 0; 705 q->cons_idx = q->prod_idx = 0; 706 707 WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08); 708 709 WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0); 710 WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0); 711 WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0); 712 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0); 713 WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid), 714 (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) | 715 (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK)); 716 WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0); 717 WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0); 718 WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid), 719 TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1); 720 WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0); 721 WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1); 722 WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0); 723 WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0); 724 WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0); 725 726 WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */ 727 728 /* Enable transmit DMA */ 729 val = RD4(sc, GENET_TX_DMA_CTRL); 730 val |= GENET_TX_DMA_CTRL_EN; 731 val |= GENET_TX_DMA_CTRL_RBUF_EN(qid); 732 WR4(sc, GENET_TX_DMA_CTRL, val); 733 } 734 735 /* 736 * "queue" is the software queue index (0-4); "qid" is the hardware index 737 * (0-16). "base" is the starting index in the ring array. 738 */ 739 static void 740 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base, 741 int nentries) 742 { 743 struct rx_queue *q; 744 uint32_t val; 745 int i; 746 747 q = &sc->rx_queue[queue]; 748 q->entries = &sc->rx_ring_ent[base]; 749 q->hwindex = qid; 750 q->nentries = nentries; 751 q->cons_idx = q->prod_idx = 0; 752 753 WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08); 754 755 WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0); 756 WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0); 757 WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0); 758 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0); 759 WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid), 760 (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) | 761 (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK)); 762 WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0); 763 WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0); 764 WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid), 765 RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1); 766 WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0); 767 WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid), 768 (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4)); 769 WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0); 770 WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0); 771 772 WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */ 773 774 /* fill ring */ 775 for (i = 0; i < RX_DESC_COUNT; i++) 776 gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i); 777 778 /* Enable receive DMA */ 779 val = RD4(sc, GENET_RX_DMA_CTRL); 780 val |= GENET_RX_DMA_CTRL_EN; 781 val |= GENET_RX_DMA_CTRL_RBUF_EN(qid); 782 WR4(sc, GENET_RX_DMA_CTRL, val); 783 } 784 785 static void 786 gen_init_txrings(struct gen_softc *sc) 787 { 788 int base = 0; 789 #ifdef PRI_RINGS 790 int i; 791 792 /* init priority rings */ 793 for (i = 0; i < PRI_RINGS; i++) { 794 gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT); 795 sc->tx_queue[i].queue = i; 796 base += TX_DESC_PRICOUNT; 797 dma_ring_conf |= 1 << i; 798 dma_control |= DMA_RENABLE(i); 799 } 800 #endif 801 802 /* init GENET_DMA_DEFAULT_QUEUE (16) */ 803 gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base, 804 TX_DESC_COUNT); 805 sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE; 806 } 807 808 static void 809 gen_init_rxrings(struct gen_softc *sc) 810 { 811 int base = 0; 812 #ifdef PRI_RINGS 813 int i; 814 815 /* init priority rings */ 816 for (i = 0; i < PRI_RINGS; i++) { 817 gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT); 818 sc->rx_queue[i].queue = i; 819 base += TX_DESC_PRICOUNT; 820 dma_ring_conf |= 1 << i; 821 dma_control |= DMA_RENABLE(i); 822 } 823 #endif 824 825 /* init GENET_DMA_DEFAULT_QUEUE (16) */ 826 gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base, 827 RX_DESC_COUNT); 828 sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE; 829 830 } 831 832 static void 833 gen_stop(struct gen_softc *sc) 834 { 835 int i; 836 struct gen_ring_ent *ent; 837 838 GEN_ASSERT_LOCKED(sc); 839 840 callout_stop(&sc->stat_ch); 841 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); 842 gen_reset(sc); 843 gen_disable(sc); 844 gen_dma_disable(sc); 845 846 /* Clear the tx/rx ring buffer */ 847 for (i = 0; i < TX_DESC_COUNT; i++) { 848 ent = &sc->tx_ring_ent[i]; 849 if (ent->mbuf != NULL) { 850 bus_dmamap_sync(sc->tx_buf_tag, ent->map, 851 BUS_DMASYNC_POSTWRITE); 852 bus_dmamap_unload(sc->tx_buf_tag, ent->map); 853 m_freem(ent->mbuf); 854 ent->mbuf = NULL; 855 } 856 } 857 858 for (i = 0; i < RX_DESC_COUNT; i++) { 859 ent = &sc->rx_ring_ent[i]; 860 if (ent->mbuf != NULL) { 861 bus_dmamap_sync(sc->rx_buf_tag, ent->map, 862 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 863 bus_dmamap_unload(sc->rx_buf_tag, ent->map); 864 m_freem(ent->mbuf); 865 ent->mbuf = NULL; 866 } 867 } 868 } 869 870 static void 871 gen_init_locked(struct gen_softc *sc) 872 { 873 struct mii_data *mii; 874 if_t ifp; 875 876 mii = device_get_softc(sc->miibus); 877 ifp = sc->ifp; 878 879 GEN_ASSERT_LOCKED(sc); 880 881 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 882 return; 883 884 switch (sc->phy_mode) 885 { 886 case MII_CONTYPE_RGMII: 887 case MII_CONTYPE_RGMII_ID: 888 case MII_CONTYPE_RGMII_RXID: 889 case MII_CONTYPE_RGMII_TXID: 890 WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY); 891 break; 892 default: 893 WR4(sc, GENET_SYS_PORT_CTRL, 0); 894 } 895 896 gen_set_enaddr(sc); 897 898 /* Setup RX filter */ 899 gen_setup_rxfilter(sc); 900 901 gen_init_txrings(sc); 902 gen_init_rxrings(sc); 903 gen_enable(sc); 904 gen_enable_offload(sc); 905 906 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 907 908 mii_mediachg(mii); 909 callout_reset(&sc->stat_ch, hz, gen_tick, sc); 910 } 911 912 static void 913 gen_init(void *softc) 914 { 915 struct gen_softc *sc; 916 917 sc = softc; 918 GEN_LOCK(sc); 919 gen_init_locked(sc); 920 GEN_UNLOCK(sc); 921 } 922 923 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 924 925 static void 926 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea) 927 { 928 uint32_t addr0 = (ea[0] << 8) | ea[1]; 929 uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]; 930 931 WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0); 932 WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1); 933 } 934 935 static u_int 936 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count) 937 { 938 struct gen_softc *sc = arg; 939 940 /* "count + 2" to account for unicast and broadcast */ 941 gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl)); 942 return (1); /* increment to count */ 943 } 944 945 static void 946 gen_setup_rxfilter(struct gen_softc *sc) 947 { 948 if_t ifp = sc->ifp; 949 uint32_t cmd, mdf_ctrl; 950 u_int n; 951 952 GEN_ASSERT_LOCKED(sc); 953 954 cmd = RD4(sc, GENET_UMAC_CMD); 955 956 /* 957 * Count the required number of hardware filters. We need one 958 * for each multicast address, plus one for our own address and 959 * the broadcast address. 960 */ 961 n = if_llmaddr_count(ifp) + 2; 962 963 if (n > GENET_MAX_MDF_FILTER) 964 if_setflagbits(ifp, IFF_ALLMULTI, 0); 965 else 966 if_setflagbits(ifp, 0, IFF_ALLMULTI); 967 968 if ((if_getflags(ifp) & (IFF_PROMISC|IFF_ALLMULTI)) != 0) { 969 cmd |= GENET_UMAC_CMD_PROMISC; 970 mdf_ctrl = 0; 971 } else { 972 cmd &= ~GENET_UMAC_CMD_PROMISC; 973 gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr); 974 gen_setup_rxfilter_mdf(sc, 1, if_getlladdr(ifp)); 975 (void) if_foreach_llmaddr(ifp, gen_setup_multi, sc); 976 mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1) &~ 977 (__BIT(GENET_MAX_MDF_FILTER - n) - 1); 978 } 979 980 WR4(sc, GENET_UMAC_CMD, cmd); 981 WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl); 982 } 983 984 static void 985 gen_set_enaddr(struct gen_softc *sc) 986 { 987 uint8_t *enaddr; 988 uint32_t val; 989 if_t ifp; 990 991 GEN_ASSERT_LOCKED(sc); 992 993 ifp = sc->ifp; 994 995 /* Write our unicast address */ 996 enaddr = if_getlladdr(ifp); 997 /* Write hardware address */ 998 val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) | 999 (enaddr[0] << 24); 1000 WR4(sc, GENET_UMAC_MAC0, val); 1001 val = enaddr[5] | (enaddr[4] << 8); 1002 WR4(sc, GENET_UMAC_MAC1, val); 1003 } 1004 1005 static void 1006 gen_start_locked(struct gen_softc *sc) 1007 { 1008 struct mbuf *m; 1009 if_t ifp; 1010 int err; 1011 1012 GEN_ASSERT_LOCKED(sc); 1013 1014 if (!sc->link) 1015 return; 1016 1017 ifp = sc->ifp; 1018 1019 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 1020 IFF_DRV_RUNNING) 1021 return; 1022 1023 while (true) { 1024 m = if_dequeue(ifp); 1025 if (m == NULL) 1026 break; 1027 1028 err = gen_encap(sc, &m); 1029 if (err != 0) { 1030 if (err == ENOBUFS) 1031 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1032 else if (m == NULL) 1033 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1034 if (m != NULL) 1035 if_sendq_prepend(ifp, m); 1036 break; 1037 } 1038 bpf_mtap_if(ifp, m); 1039 } 1040 } 1041 1042 static void 1043 gen_start(if_t ifp) 1044 { 1045 struct gen_softc *sc; 1046 1047 sc = if_getsoftc(ifp); 1048 1049 GEN_LOCK(sc); 1050 gen_start_locked(sc); 1051 GEN_UNLOCK(sc); 1052 } 1053 1054 /* Test for any delayed checksum */ 1055 #define CSUM_DELAY_ANY (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP) 1056 1057 static int 1058 gen_encap(struct gen_softc *sc, struct mbuf **mp) 1059 { 1060 bus_dmamap_t map; 1061 bus_dma_segment_t segs[TX_MAX_SEGS]; 1062 int error, nsegs, cur, first, i, index, offset; 1063 uint32_t csuminfo, length_status, csum_flags = 0, csumdata; 1064 struct mbuf *m; 1065 struct statusblock *sb = NULL; 1066 struct tx_queue *q; 1067 struct gen_ring_ent *ent; 1068 1069 GEN_ASSERT_LOCKED(sc); 1070 1071 q = &sc->tx_queue[DEF_TXQUEUE]; 1072 if (q->queued == q->nentries) { 1073 /* tx_queue is full */ 1074 return (ENOBUFS); 1075 } 1076 1077 m = *mp; 1078 1079 /* 1080 * Don't attempt to send packets with only an Ethernet header in 1081 * first mbuf; see comment above with gen_tx_hdr_min. 1082 */ 1083 if (m->m_len == sizeof(struct ether_header)) { 1084 m = m_pullup(m, MIN(m->m_pkthdr.len, gen_tx_hdr_min)); 1085 if (m == NULL) { 1086 if (if_getflags(sc->ifp) & IFF_DEBUG) 1087 device_printf(sc->dev, 1088 "header pullup fail\n"); 1089 *mp = NULL; 1090 return (ENOMEM); 1091 } 1092 } 1093 1094 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 1095 0) { 1096 csum_flags = m->m_pkthdr.csum_flags; 1097 csumdata = m->m_pkthdr.csum_data; 1098 M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT); 1099 if (m == NULL) { 1100 if (if_getflags(sc->ifp) & IFF_DEBUG) 1101 device_printf(sc->dev, "prepend fail\n"); 1102 *mp = NULL; 1103 return (ENOMEM); 1104 } 1105 offset = gen_parse_tx(m, csum_flags); 1106 sb = mtod(m, struct statusblock *); 1107 if ((csum_flags & CSUM_DELAY_ANY) != 0) { 1108 csuminfo = (offset << TXCSUM_OFF_SHIFT) | 1109 (offset + csumdata); 1110 csuminfo |= TXCSUM_LEN_VALID; 1111 if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP)) 1112 csuminfo |= TXCSUM_UDP; 1113 sb->txcsuminfo = csuminfo; 1114 } else 1115 sb->txcsuminfo = 0; 1116 } 1117 1118 *mp = m; 1119 1120 cur = first = q->cur; 1121 ent = &q->entries[cur]; 1122 map = ent->map; 1123 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs, 1124 &nsegs, BUS_DMA_NOWAIT); 1125 if (error == EFBIG) { 1126 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); 1127 if (m == NULL) { 1128 device_printf(sc->dev, 1129 "gen_encap: m_collapse failed\n"); 1130 m_freem(*mp); 1131 *mp = NULL; 1132 return (ENOMEM); 1133 } 1134 *mp = m; 1135 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, 1136 segs, &nsegs, BUS_DMA_NOWAIT); 1137 if (error != 0) { 1138 m_freem(*mp); 1139 *mp = NULL; 1140 } 1141 } 1142 if (error != 0) { 1143 device_printf(sc->dev, 1144 "gen_encap: bus_dmamap_load_mbuf_sg failed\n"); 1145 return (error); 1146 } 1147 if (nsegs == 0) { 1148 m_freem(*mp); 1149 *mp = NULL; 1150 return (EIO); 1151 } 1152 1153 /* Remove statusblock after mapping, before possible requeue or bpf. */ 1154 if (sb != NULL) { 1155 m->m_data += sizeof(struct statusblock); 1156 m->m_len -= sizeof(struct statusblock); 1157 m->m_pkthdr.len -= sizeof(struct statusblock); 1158 } 1159 if (q->queued + nsegs > q->nentries) { 1160 bus_dmamap_unload(sc->tx_buf_tag, map); 1161 return (ENOBUFS); 1162 } 1163 1164 bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE); 1165 1166 index = q->prod_idx & (q->nentries - 1); 1167 for (i = 0; i < nsegs; i++) { 1168 ent = &q->entries[cur]; 1169 length_status = GENET_TX_DESC_STATUS_QTAG_MASK; 1170 if (i == 0) { 1171 length_status |= GENET_TX_DESC_STATUS_SOP | 1172 GENET_TX_DESC_STATUS_CRC; 1173 if ((csum_flags & CSUM_DELAY_ANY) != 0) 1174 length_status |= GENET_TX_DESC_STATUS_CKSUM; 1175 } 1176 if (i == nsegs - 1) 1177 length_status |= GENET_TX_DESC_STATUS_EOP; 1178 1179 length_status |= segs[i].ds_len << 1180 GENET_TX_DESC_STATUS_BUFLEN_SHIFT; 1181 1182 WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), 1183 (uint32_t)segs[i].ds_addr); 1184 WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), 1185 (uint32_t)(segs[i].ds_addr >> 32)); 1186 WR4(sc, GENET_TX_DESC_STATUS(index), length_status); 1187 1188 ++q->queued; 1189 cur = TX_NEXT(cur, q->nentries); 1190 index = TX_NEXT(index, q->nentries); 1191 } 1192 1193 q->prod_idx += nsegs; 1194 q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK; 1195 /* We probably don't need to write the producer index on every iter */ 1196 if (nsegs != 0) 1197 WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx); 1198 q->cur = cur; 1199 1200 /* Store mbuf in the last segment */ 1201 q->entries[first].mbuf = m; 1202 1203 return (0); 1204 } 1205 1206 /* 1207 * Parse a packet to find the offset of the transport header for checksum 1208 * offload. Ensure that the link and network headers are contiguous with 1209 * the status block, or transmission fails. 1210 */ 1211 static int 1212 gen_parse_tx(struct mbuf *m, int csum_flags) 1213 { 1214 int offset, off_in_m; 1215 bool copy = false, shift = false; 1216 u_char *p, *copy_p = NULL; 1217 struct mbuf *m0 = m; 1218 uint16_t ether_type; 1219 1220 if (m->m_len == sizeof(struct statusblock)) { 1221 /* M_PREPEND placed statusblock at end; move to beginning */ 1222 m->m_data = m->m_pktdat; 1223 copy_p = mtodo(m, sizeof(struct statusblock)); 1224 m = m->m_next; 1225 off_in_m = 0; 1226 p = mtod(m, u_char *); 1227 copy = true; 1228 } else { 1229 /* 1230 * If statusblock is not at beginning of mbuf (likely), 1231 * then remember to move mbuf contents down before copying 1232 * after them. 1233 */ 1234 if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat) 1235 shift = true; 1236 p = mtodo(m, sizeof(struct statusblock)); 1237 off_in_m = sizeof(struct statusblock); 1238 } 1239 1240 /* 1241 * If headers need to be copied contiguous to statusblock, do so. 1242 * If copying to the internal mbuf data area, and the status block 1243 * is not at the beginning of that area, shift the status block (which 1244 * is empty) and following data. 1245 */ 1246 #define COPY(size) { \ 1247 int hsize = size; \ 1248 if (copy) { \ 1249 if (shift) { \ 1250 u_char *p0; \ 1251 shift = false; \ 1252 p0 = mtodo(m0, sizeof(struct statusblock)); \ 1253 m0->m_data = m0->m_pktdat; \ 1254 bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\ 1255 m0->m_len - sizeof(struct statusblock)); \ 1256 copy_p = mtodo(m0, m0->m_len); \ 1257 } \ 1258 bcopy(p, copy_p, hsize); \ 1259 m0->m_len += hsize; \ 1260 m->m_len -= hsize; \ 1261 m->m_data += hsize; \ 1262 } \ 1263 copy_p += hsize; \ 1264 } 1265 1266 KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) + 1267 sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__)); 1268 1269 if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) { 1270 offset = sizeof(struct ether_vlan_header); 1271 ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto); 1272 COPY(sizeof(struct ether_vlan_header)); 1273 if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) { 1274 m = m->m_next; 1275 off_in_m = 0; 1276 p = mtod(m, u_char *); 1277 copy = true; 1278 } else { 1279 off_in_m += sizeof(struct ether_vlan_header); 1280 p += sizeof(struct ether_vlan_header); 1281 } 1282 } else { 1283 offset = sizeof(struct ether_header); 1284 ether_type = ntohs(((struct ether_header *)p)->ether_type); 1285 COPY(sizeof(struct ether_header)); 1286 if (m->m_len == off_in_m + sizeof(struct ether_header)) { 1287 m = m->m_next; 1288 off_in_m = 0; 1289 p = mtod(m, u_char *); 1290 copy = true; 1291 } else { 1292 off_in_m += sizeof(struct ether_header); 1293 p += sizeof(struct ether_header); 1294 } 1295 } 1296 if (ether_type == ETHERTYPE_IP) { 1297 COPY(((struct ip *)p)->ip_hl << 2); 1298 offset += ((struct ip *)p)->ip_hl << 2; 1299 } else if (ether_type == ETHERTYPE_IPV6) { 1300 COPY(sizeof(struct ip6_hdr)); 1301 offset += sizeof(struct ip6_hdr); 1302 } else { 1303 /* 1304 * Unknown whether most other cases require moving a header; 1305 * ARP works without. However, Wake On LAN packets sent 1306 * by wake(8) via BPF need something like this. 1307 */ 1308 COPY(MIN(gen_tx_hdr_min, m->m_len)); 1309 offset += MIN(gen_tx_hdr_min, m->m_len); 1310 } 1311 return (offset); 1312 #undef COPY 1313 } 1314 1315 static void 1316 gen_intr(void *arg) 1317 { 1318 struct gen_softc *sc = arg; 1319 uint32_t val; 1320 1321 GEN_LOCK(sc); 1322 1323 val = RD4(sc, GENET_INTRL2_CPU_STAT); 1324 val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK); 1325 WR4(sc, GENET_INTRL2_CPU_CLEAR, val); 1326 1327 if (val & GENET_IRQ_RXDMA_DONE) 1328 gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]); 1329 1330 if (val & GENET_IRQ_TXDMA_DONE) { 1331 gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]); 1332 if (!if_sendq_empty(sc->ifp)) 1333 gen_start_locked(sc); 1334 } 1335 1336 GEN_UNLOCK(sc); 1337 } 1338 1339 static int 1340 gen_rxintr(struct gen_softc *sc, struct rx_queue *q) 1341 { 1342 if_t ifp; 1343 struct mbuf *m, *mh, *mt; 1344 struct statusblock *sb = NULL; 1345 int error, index, len, cnt, npkt, n; 1346 uint32_t status, prod_idx, total; 1347 1348 ifp = sc->ifp; 1349 mh = mt = NULL; 1350 cnt = 0; 1351 npkt = 0; 1352 1353 prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) & 1354 GENET_RX_DMA_PROD_CONS_MASK; 1355 total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK; 1356 1357 index = q->cons_idx & (RX_DESC_COUNT - 1); 1358 for (n = 0; n < total; n++) { 1359 bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map, 1360 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1361 bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map); 1362 1363 m = q->entries[index].mbuf; 1364 1365 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 1366 sb = mtod(m, struct statusblock *); 1367 status = sb->status_buflen; 1368 } else 1369 status = RD4(sc, GENET_RX_DESC_STATUS(index)); 1370 1371 len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >> 1372 GENET_RX_DESC_STATUS_BUFLEN_SHIFT; 1373 1374 /* check for errors */ 1375 if ((status & 1376 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP | 1377 GENET_RX_DESC_STATUS_RX_ERROR)) != 1378 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) { 1379 if (if_getflags(ifp) & IFF_DEBUG) 1380 device_printf(sc->dev, 1381 "error/frag %x csum %x\n", status, 1382 sb->rxcsum); 1383 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1384 continue; 1385 } 1386 1387 error = gen_newbuf_rx(sc, q, index); 1388 if (error != 0) { 1389 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1390 if (if_getflags(ifp) & IFF_DEBUG) 1391 device_printf(sc->dev, "gen_newbuf_rx %d\n", 1392 error); 1393 /* reuse previous mbuf */ 1394 (void) gen_mapbuf_rx(sc, q, index, m); 1395 continue; 1396 } 1397 1398 if (sb != NULL) { 1399 if (status & GENET_RX_DESC_STATUS_CKSUM_OK) { 1400 /* L4 checksum checked; not sure about L3. */ 1401 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | 1402 CSUM_PSEUDO_HDR; 1403 m->m_pkthdr.csum_data = 0xffff; 1404 } 1405 m->m_data += sizeof(struct statusblock); 1406 m->m_len -= sizeof(struct statusblock); 1407 len -= sizeof(struct statusblock); 1408 } 1409 if (len > ETHER_ALIGN) { 1410 m_adj(m, ETHER_ALIGN); 1411 len -= ETHER_ALIGN; 1412 } 1413 1414 m->m_pkthdr.rcvif = ifp; 1415 m->m_pkthdr.len = len; 1416 m->m_len = len; 1417 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1418 1419 m->m_nextpkt = NULL; 1420 if (mh == NULL) 1421 mh = m; 1422 else 1423 mt->m_nextpkt = m; 1424 mt = m; 1425 ++cnt; 1426 ++npkt; 1427 1428 index = RX_NEXT(index, q->nentries); 1429 1430 q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK; 1431 WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx); 1432 1433 if (cnt == gen_rx_batch) { 1434 GEN_UNLOCK(sc); 1435 if_input(ifp, mh); 1436 GEN_LOCK(sc); 1437 mh = mt = NULL; 1438 cnt = 0; 1439 } 1440 } 1441 1442 if (mh != NULL) { 1443 GEN_UNLOCK(sc); 1444 if_input(ifp, mh); 1445 GEN_LOCK(sc); 1446 } 1447 1448 return (npkt); 1449 } 1450 1451 static void 1452 gen_txintr(struct gen_softc *sc, struct tx_queue *q) 1453 { 1454 uint32_t cons_idx, total; 1455 struct gen_ring_ent *ent; 1456 if_t ifp; 1457 int i, prog; 1458 1459 GEN_ASSERT_LOCKED(sc); 1460 1461 ifp = sc->ifp; 1462 1463 cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) & 1464 GENET_TX_DMA_PROD_CONS_MASK; 1465 total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK; 1466 1467 prog = 0; 1468 for (i = q->next; q->queued > 0 && total > 0; 1469 i = TX_NEXT(i, q->nentries), total--) { 1470 /* XXX check for errors */ 1471 1472 ent = &q->entries[i]; 1473 if (ent->mbuf != NULL) { 1474 bus_dmamap_sync(sc->tx_buf_tag, ent->map, 1475 BUS_DMASYNC_POSTWRITE); 1476 bus_dmamap_unload(sc->tx_buf_tag, ent->map); 1477 m_freem(ent->mbuf); 1478 ent->mbuf = NULL; 1479 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1480 } 1481 1482 prog++; 1483 --q->queued; 1484 } 1485 1486 if (prog > 0) { 1487 q->next = i; 1488 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1489 } 1490 1491 q->cons_idx = cons_idx; 1492 } 1493 1494 static void 1495 gen_intr2(void *arg) 1496 { 1497 struct gen_softc *sc = arg; 1498 1499 device_printf(sc->dev, "gen_intr2\n"); 1500 } 1501 1502 static int 1503 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index) 1504 { 1505 struct mbuf *m; 1506 1507 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1508 if (m == NULL) 1509 return (ENOBUFS); 1510 1511 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 1512 m_adj(m, ETHER_ALIGN); 1513 1514 return (gen_mapbuf_rx(sc, q, index, m)); 1515 } 1516 1517 static int 1518 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index, 1519 struct mbuf *m) 1520 { 1521 bus_dma_segment_t seg; 1522 bus_dmamap_t map; 1523 int nsegs; 1524 1525 map = q->entries[index].map; 1526 if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs, 1527 BUS_DMA_NOWAIT) != 0) { 1528 m_freem(m); 1529 return (ENOBUFS); 1530 } 1531 1532 bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD); 1533 1534 q->entries[index].mbuf = m; 1535 WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr); 1536 WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32)); 1537 1538 return (0); 1539 } 1540 1541 static int 1542 gen_ioctl(if_t ifp, u_long cmd, caddr_t data) 1543 { 1544 struct gen_softc *sc; 1545 struct mii_data *mii; 1546 struct ifreq *ifr; 1547 int flags, enable, error; 1548 1549 sc = if_getsoftc(ifp); 1550 mii = device_get_softc(sc->miibus); 1551 ifr = (struct ifreq *)data; 1552 error = 0; 1553 1554 switch (cmd) { 1555 case SIOCSIFFLAGS: 1556 GEN_LOCK(sc); 1557 if (if_getflags(ifp) & IFF_UP) { 1558 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1559 flags = if_getflags(ifp) ^ sc->if_flags; 1560 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 1561 gen_setup_rxfilter(sc); 1562 } else 1563 gen_init_locked(sc); 1564 } else { 1565 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1566 gen_stop(sc); 1567 } 1568 sc->if_flags = if_getflags(ifp); 1569 GEN_UNLOCK(sc); 1570 break; 1571 1572 case SIOCADDMULTI: 1573 case SIOCDELMULTI: 1574 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1575 GEN_LOCK(sc); 1576 gen_setup_rxfilter(sc); 1577 GEN_UNLOCK(sc); 1578 } 1579 break; 1580 1581 case SIOCSIFMEDIA: 1582 case SIOCGIFMEDIA: 1583 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1584 break; 1585 1586 case SIOCSIFCAP: 1587 enable = if_getcapenable(ifp); 1588 flags = ifr->ifr_reqcap ^ enable; 1589 if (flags & IFCAP_RXCSUM) 1590 enable ^= IFCAP_RXCSUM; 1591 if (flags & IFCAP_RXCSUM_IPV6) 1592 enable ^= IFCAP_RXCSUM_IPV6; 1593 if (flags & IFCAP_TXCSUM) 1594 enable ^= IFCAP_TXCSUM; 1595 if (flags & IFCAP_TXCSUM_IPV6) 1596 enable ^= IFCAP_TXCSUM_IPV6; 1597 if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) 1598 if_sethwassist(ifp, GEN_CSUM_FEATURES); 1599 else 1600 if_sethwassist(ifp, 0); 1601 if_setcapenable(ifp, enable); 1602 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1603 gen_enable_offload(sc); 1604 break; 1605 1606 default: 1607 error = ether_ioctl(ifp, cmd, data); 1608 break; 1609 } 1610 return (error); 1611 } 1612 1613 static void 1614 gen_tick(void *softc) 1615 { 1616 struct gen_softc *sc; 1617 struct mii_data *mii; 1618 if_t ifp; 1619 int link; 1620 1621 sc = softc; 1622 ifp = sc->ifp; 1623 mii = device_get_softc(sc->miibus); 1624 1625 GEN_ASSERT_LOCKED(sc); 1626 1627 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1628 return; 1629 1630 link = sc->link; 1631 mii_tick(mii); 1632 if (sc->link && !link) 1633 gen_start_locked(sc); 1634 1635 callout_reset(&sc->stat_ch, hz, gen_tick, sc); 1636 } 1637 1638 #define MII_BUSY_RETRY 1000 1639 1640 static int 1641 gen_miibus_readreg(device_t dev, int phy, int reg) 1642 { 1643 struct gen_softc *sc; 1644 int retry, val; 1645 1646 sc = device_get_softc(dev); 1647 val = 0; 1648 1649 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ | 1650 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT)); 1651 val = RD4(sc, GENET_MDIO_CMD); 1652 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY); 1653 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 1654 if (((val = RD4(sc, GENET_MDIO_CMD)) & 1655 GENET_MDIO_START_BUSY) == 0) { 1656 if (val & GENET_MDIO_READ_FAILED) 1657 return (0); /* -1? */ 1658 val &= GENET_MDIO_VAL_MASK; 1659 break; 1660 } 1661 DELAY(10); 1662 } 1663 1664 if (retry == 0) 1665 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 1666 phy, reg); 1667 1668 return (val); 1669 } 1670 1671 static int 1672 gen_miibus_writereg(device_t dev, int phy, int reg, int val) 1673 { 1674 struct gen_softc *sc; 1675 int retry; 1676 1677 sc = device_get_softc(dev); 1678 1679 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE | 1680 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) | 1681 (val & GENET_MDIO_VAL_MASK)); 1682 val = RD4(sc, GENET_MDIO_CMD); 1683 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY); 1684 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 1685 val = RD4(sc, GENET_MDIO_CMD); 1686 if ((val & GENET_MDIO_START_BUSY) == 0) 1687 break; 1688 DELAY(10); 1689 } 1690 if (retry == 0) 1691 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 1692 phy, reg); 1693 1694 return (0); 1695 } 1696 1697 static void 1698 gen_update_link_locked(struct gen_softc *sc) 1699 { 1700 struct mii_data *mii; 1701 uint32_t val; 1702 u_int speed; 1703 1704 GEN_ASSERT_LOCKED(sc); 1705 1706 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) 1707 return; 1708 mii = device_get_softc(sc->miibus); 1709 1710 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1711 (IFM_ACTIVE | IFM_AVALID)) { 1712 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1713 case IFM_1000_T: 1714 case IFM_1000_SX: 1715 speed = GENET_UMAC_CMD_SPEED_1000; 1716 sc->link = 1; 1717 break; 1718 case IFM_100_TX: 1719 speed = GENET_UMAC_CMD_SPEED_100; 1720 sc->link = 1; 1721 break; 1722 case IFM_10_T: 1723 speed = GENET_UMAC_CMD_SPEED_10; 1724 sc->link = 1; 1725 break; 1726 default: 1727 sc->link = 0; 1728 break; 1729 } 1730 } else 1731 sc->link = 0; 1732 1733 if (sc->link == 0) 1734 return; 1735 1736 val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL); 1737 val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE; 1738 val |= GENET_EXT_RGMII_OOB_RGMII_LINK; 1739 val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN; 1740 if (sc->phy_mode == MII_CONTYPE_RGMII) 1741 val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE; 1742 else 1743 val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE; 1744 WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val); 1745 1746 val = RD4(sc, GENET_UMAC_CMD); 1747 val &= ~GENET_UMAC_CMD_SPEED; 1748 val |= speed; 1749 WR4(sc, GENET_UMAC_CMD, val); 1750 } 1751 1752 static void 1753 gen_link_task(void *arg, int pending) 1754 { 1755 struct gen_softc *sc; 1756 1757 sc = arg; 1758 1759 GEN_LOCK(sc); 1760 gen_update_link_locked(sc); 1761 GEN_UNLOCK(sc); 1762 } 1763 1764 static void 1765 gen_miibus_statchg(device_t dev) 1766 { 1767 struct gen_softc *sc; 1768 1769 sc = device_get_softc(dev); 1770 1771 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 1772 } 1773 1774 static void 1775 gen_media_status(if_t ifp, struct ifmediareq *ifmr) 1776 { 1777 struct gen_softc *sc; 1778 struct mii_data *mii; 1779 1780 sc = if_getsoftc(ifp); 1781 mii = device_get_softc(sc->miibus); 1782 1783 GEN_LOCK(sc); 1784 mii_pollstat(mii); 1785 ifmr->ifm_active = mii->mii_media_active; 1786 ifmr->ifm_status = mii->mii_media_status; 1787 GEN_UNLOCK(sc); 1788 } 1789 1790 static int 1791 gen_media_change(if_t ifp) 1792 { 1793 struct gen_softc *sc; 1794 struct mii_data *mii; 1795 int error; 1796 1797 sc = if_getsoftc(ifp); 1798 mii = device_get_softc(sc->miibus); 1799 1800 GEN_LOCK(sc); 1801 error = mii_mediachg(mii); 1802 GEN_UNLOCK(sc); 1803 1804 return (error); 1805 } 1806 1807 static device_method_t gen_methods[] = { 1808 /* Device interface */ 1809 DEVMETHOD(device_probe, gen_probe), 1810 DEVMETHOD(device_attach, gen_attach), 1811 1812 /* MII interface */ 1813 DEVMETHOD(miibus_readreg, gen_miibus_readreg), 1814 DEVMETHOD(miibus_writereg, gen_miibus_writereg), 1815 DEVMETHOD(miibus_statchg, gen_miibus_statchg), 1816 1817 DEVMETHOD_END 1818 }; 1819 1820 static driver_t gen_driver = { 1821 "genet", 1822 gen_methods, 1823 sizeof(struct gen_softc), 1824 }; 1825 1826 DRIVER_MODULE(genet, simplebus, gen_driver, 0, 0); 1827 DRIVER_MODULE(miibus, genet, miibus_driver, 0, 0); 1828 MODULE_DEPEND(genet, ether, 1, 1, 1); 1829 MODULE_DEPEND(genet, miibus, 1, 1, 1); 1830