1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Alstom Group. 5 * Copyright (c) 2021 Semihalf. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 #include <sys/param.h> 30 #include <sys/bus.h> 31 #include <sys/endian.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/rman.h> 35 #include <sys/socket.h> 36 #include <sys/sockio.h> 37 38 #include <machine/bus.h> 39 #include <machine/resource.h> 40 41 #include <net/ethernet.h> 42 #include <net/if.h> 43 #include <net/if_dl.h> 44 #include <net/if_var.h> 45 #include <net/if_types.h> 46 #include <net/if_media.h> 47 #include <net/iflib.h> 48 49 #include <dev/enetc/enetc_hw.h> 50 #include <dev/enetc/enetc.h> 51 #include <dev/enetc/enetc_mdio.h> 52 #include <dev/mii/mii.h> 53 #include <dev/mii/miivar.h> 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 57 #include <dev/ofw/ofw_bus.h> 58 #include <dev/ofw/ofw_bus_subr.h> 59 60 #include "ifdi_if.h" 61 #include "miibus_if.h" 62 63 static device_register_t enetc_register; 64 65 static ifdi_attach_pre_t enetc_attach_pre; 66 static ifdi_attach_post_t enetc_attach_post; 67 static ifdi_detach_t enetc_detach; 68 69 static ifdi_tx_queues_alloc_t enetc_tx_queues_alloc; 70 static ifdi_rx_queues_alloc_t enetc_rx_queues_alloc; 71 static ifdi_queues_free_t enetc_queues_free; 72 73 static ifdi_init_t enetc_init; 74 static ifdi_stop_t enetc_stop; 75 76 static ifdi_msix_intr_assign_t enetc_msix_intr_assign; 77 static ifdi_tx_queue_intr_enable_t enetc_tx_queue_intr_enable; 78 static ifdi_rx_queue_intr_enable_t enetc_rx_queue_intr_enable; 79 static ifdi_intr_enable_t enetc_intr_enable; 80 static ifdi_intr_disable_t enetc_intr_disable; 81 82 static int enetc_isc_txd_encap(void*, if_pkt_info_t); 83 static void enetc_isc_txd_flush(void*, uint16_t, qidx_t); 84 static int enetc_isc_txd_credits_update(void*, uint16_t, bool); 85 static int enetc_isc_rxd_available(void*, uint16_t, qidx_t, qidx_t); 86 static int enetc_isc_rxd_pkt_get(void*, if_rxd_info_t); 87 static void enetc_isc_rxd_refill(void*, if_rxd_update_t); 88 static void enetc_isc_rxd_flush(void*, uint16_t, uint8_t, qidx_t); 89 90 static void enetc_vlan_register(if_ctx_t, uint16_t); 91 static void enetc_vlan_unregister(if_ctx_t, uint16_t); 92 93 static uint64_t enetc_get_counter(if_ctx_t, ift_counter); 94 static int enetc_promisc_set(if_ctx_t, int); 95 static int enetc_mtu_set(if_ctx_t, uint32_t); 96 static void enetc_setup_multicast(if_ctx_t); 97 static void enetc_timer(if_ctx_t, uint16_t); 98 static void enetc_update_admin_status(if_ctx_t); 99 100 static miibus_readreg_t enetc_miibus_readreg; 101 static miibus_writereg_t enetc_miibus_writereg; 102 static miibus_linkchg_t enetc_miibus_linkchg; 103 static miibus_statchg_t enetc_miibus_statchg; 104 105 static int enetc_media_change(if_t); 106 static void enetc_media_status(if_t, struct ifmediareq*); 107 108 static int enetc_fixed_media_change(if_t); 109 static void enetc_fixed_media_status(if_t, struct ifmediareq*); 110 111 static void enetc_max_nqueues(struct enetc_softc*, int*, int*); 112 static int enetc_setup_phy(struct enetc_softc*); 113 114 static void enetc_get_hwaddr(struct enetc_softc*); 115 static void enetc_set_hwaddr(struct enetc_softc*); 116 static int enetc_setup_rss(struct enetc_softc*); 117 118 static void enetc_init_hw(struct enetc_softc*); 119 static void enetc_init_ctrl(struct enetc_softc*); 120 static void enetc_init_tx(struct enetc_softc*); 121 static void enetc_init_rx(struct enetc_softc*); 122 123 static int enetc_ctrl_send(struct enetc_softc*, 124 uint16_t, uint16_t, iflib_dma_info_t); 125 126 static const char enetc_driver_version[] = "1.0.0"; 127 128 static pci_vendor_info_t enetc_vendor_info_array[] = { 129 PVID(PCI_VENDOR_FREESCALE, ENETC_DEV_ID_PF, 130 "Freescale ENETC PCIe Gigabit Ethernet Controller"), 131 PVID_END 132 }; 133 134 #define ENETC_IFCAPS (IFCAP_VLAN_MTU | IFCAP_RXCSUM | IFCAP_JUMBO_MTU | \ 135 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER) 136 137 static device_method_t enetc_methods[] = { 138 DEVMETHOD(device_register, enetc_register), 139 DEVMETHOD(device_probe, iflib_device_probe), 140 DEVMETHOD(device_attach, iflib_device_attach), 141 DEVMETHOD(device_detach, iflib_device_detach), 142 DEVMETHOD(device_shutdown, iflib_device_shutdown), 143 DEVMETHOD(device_suspend, iflib_device_suspend), 144 DEVMETHOD(device_resume, iflib_device_resume), 145 146 DEVMETHOD(miibus_readreg, enetc_miibus_readreg), 147 DEVMETHOD(miibus_writereg, enetc_miibus_writereg), 148 DEVMETHOD(miibus_linkchg, enetc_miibus_linkchg), 149 DEVMETHOD(miibus_statchg, enetc_miibus_statchg), 150 151 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 152 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 153 DEVMETHOD(bus_release_resource, bus_generic_release_resource), 154 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 155 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 156 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), 157 DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), 158 159 DEVMETHOD_END 160 }; 161 162 static driver_t enetc_driver = { 163 "enetc", enetc_methods, sizeof(struct enetc_softc) 164 }; 165 166 DRIVER_MODULE(miibus, enetc, miibus_fdt_driver, NULL, NULL); 167 /* Make sure miibus gets procesed first. */ 168 DRIVER_MODULE_ORDERED(enetc, pci, enetc_driver, NULL, NULL, SI_ORDER_ANY); 169 MODULE_VERSION(enetc, 1); 170 171 IFLIB_PNP_INFO(pci, enetc, enetc_vendor_info_array); 172 173 MODULE_DEPEND(enetc, ether, 1, 1, 1); 174 MODULE_DEPEND(enetc, iflib, 1, 1, 1); 175 MODULE_DEPEND(enetc, miibus, 1, 1, 1); 176 177 static device_method_t enetc_iflib_methods[] = { 178 DEVMETHOD(ifdi_attach_pre, enetc_attach_pre), 179 DEVMETHOD(ifdi_attach_post, enetc_attach_post), 180 DEVMETHOD(ifdi_detach, enetc_detach), 181 182 DEVMETHOD(ifdi_init, enetc_init), 183 DEVMETHOD(ifdi_stop, enetc_stop), 184 185 DEVMETHOD(ifdi_tx_queues_alloc, enetc_tx_queues_alloc), 186 DEVMETHOD(ifdi_rx_queues_alloc, enetc_rx_queues_alloc), 187 DEVMETHOD(ifdi_queues_free, enetc_queues_free), 188 189 DEVMETHOD(ifdi_msix_intr_assign, enetc_msix_intr_assign), 190 DEVMETHOD(ifdi_tx_queue_intr_enable, enetc_tx_queue_intr_enable), 191 DEVMETHOD(ifdi_rx_queue_intr_enable, enetc_rx_queue_intr_enable), 192 DEVMETHOD(ifdi_intr_enable, enetc_intr_enable), 193 DEVMETHOD(ifdi_intr_disable, enetc_intr_disable), 194 195 DEVMETHOD(ifdi_vlan_register, enetc_vlan_register), 196 DEVMETHOD(ifdi_vlan_unregister, enetc_vlan_unregister), 197 198 DEVMETHOD(ifdi_get_counter, enetc_get_counter), 199 DEVMETHOD(ifdi_mtu_set, enetc_mtu_set), 200 DEVMETHOD(ifdi_multi_set, enetc_setup_multicast), 201 DEVMETHOD(ifdi_promisc_set, enetc_promisc_set), 202 DEVMETHOD(ifdi_timer, enetc_timer), 203 DEVMETHOD(ifdi_update_admin_status, enetc_update_admin_status), 204 205 DEVMETHOD_END 206 }; 207 208 static driver_t enetc_iflib_driver = { 209 "enetc", enetc_iflib_methods, sizeof(struct enetc_softc) 210 }; 211 212 static struct if_txrx enetc_txrx = { 213 .ift_txd_encap = enetc_isc_txd_encap, 214 .ift_txd_flush = enetc_isc_txd_flush, 215 .ift_txd_credits_update = enetc_isc_txd_credits_update, 216 .ift_rxd_available = enetc_isc_rxd_available, 217 .ift_rxd_pkt_get = enetc_isc_rxd_pkt_get, 218 .ift_rxd_refill = enetc_isc_rxd_refill, 219 .ift_rxd_flush = enetc_isc_rxd_flush 220 }; 221 222 static struct if_shared_ctx enetc_sctx_init = { 223 .isc_magic = IFLIB_MAGIC, 224 225 .isc_q_align = ENETC_RING_ALIGN, 226 227 .isc_tx_maxsize = ENETC_MAX_FRAME_LEN, 228 .isc_tx_maxsegsize = PAGE_SIZE, 229 230 .isc_rx_maxsize = ENETC_MAX_FRAME_LEN, 231 .isc_rx_maxsegsize = ENETC_MAX_FRAME_LEN, 232 .isc_rx_nsegments = ENETC_MAX_SCATTER, 233 234 .isc_admin_intrcnt = 0, 235 236 .isc_nfl = 1, 237 .isc_nrxqs = 1, 238 .isc_ntxqs = 1, 239 240 .isc_vendor_info = enetc_vendor_info_array, 241 .isc_driver_version = enetc_driver_version, 242 .isc_driver = &enetc_iflib_driver, 243 244 .isc_flags = IFLIB_DRIVER_MEDIA | IFLIB_PRESERVE_TX_INDICES, 245 .isc_ntxd_min = {ENETC_MIN_DESC}, 246 .isc_ntxd_max = {ENETC_MAX_DESC}, 247 .isc_ntxd_default = {ENETC_DEFAULT_DESC}, 248 .isc_nrxd_min = {ENETC_MIN_DESC}, 249 .isc_nrxd_max = {ENETC_MAX_DESC}, 250 .isc_nrxd_default = {ENETC_DEFAULT_DESC} 251 }; 252 253 static void* 254 enetc_register(device_t dev) 255 { 256 257 if (!ofw_bus_status_okay(dev)) 258 return (NULL); 259 260 return (&enetc_sctx_init); 261 } 262 263 static void 264 enetc_max_nqueues(struct enetc_softc *sc, int *max_tx_nqueues, 265 int *max_rx_nqueues) 266 { 267 uint32_t val; 268 269 val = ENETC_PORT_RD4(sc, ENETC_PCAPR0); 270 *max_tx_nqueues = MIN(ENETC_PCAPR0_TXBDR(val), ENETC_MAX_QUEUES); 271 *max_rx_nqueues = MIN(ENETC_PCAPR0_RXBDR(val), ENETC_MAX_QUEUES); 272 } 273 274 static int 275 enetc_setup_fixed(struct enetc_softc *sc, phandle_t node) 276 { 277 ssize_t size; 278 int speed; 279 280 size = OF_getencprop(node, "speed", &speed, sizeof(speed)); 281 if (size <= 0) { 282 device_printf(sc->dev, 283 "Device has fixed-link node without link speed specified\n"); 284 return (ENXIO); 285 } 286 switch (speed) { 287 case 10: 288 speed = IFM_10_T; 289 break; 290 case 100: 291 speed = IFM_100_TX; 292 break; 293 case 1000: 294 speed = IFM_1000_T; 295 break; 296 case 2500: 297 speed = IFM_2500_T; 298 break; 299 default: 300 device_printf(sc->dev, "Unsupported link speed value of %d\n", 301 speed); 302 return (ENXIO); 303 } 304 speed |= IFM_ETHER; 305 306 if (OF_hasprop(node, "full-duplex")) 307 speed |= IFM_FDX; 308 else 309 speed |= IFM_HDX; 310 311 sc->fixed_link = true; 312 313 ifmedia_init(&sc->fixed_ifmedia, 0, enetc_fixed_media_change, 314 enetc_fixed_media_status); 315 ifmedia_add(&sc->fixed_ifmedia, speed, 0, NULL); 316 ifmedia_set(&sc->fixed_ifmedia, speed); 317 sc->shared->isc_media = &sc->fixed_ifmedia; 318 319 return (0); 320 } 321 322 static int 323 enetc_setup_phy(struct enetc_softc *sc) 324 { 325 phandle_t node, fixed_link, phy_handle; 326 struct mii_data *miid; 327 int phy_addr, error; 328 ssize_t size; 329 330 node = ofw_bus_get_node(sc->dev); 331 fixed_link = ofw_bus_find_child(node, "fixed-link"); 332 if (fixed_link != 0) 333 return (enetc_setup_fixed(sc, fixed_link)); 334 335 size = OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle)); 336 if (size <= 0) { 337 device_printf(sc->dev, 338 "Failed to acquire PHY handle from FDT.\n"); 339 return (ENXIO); 340 } 341 phy_handle = OF_node_from_xref(phy_handle); 342 size = OF_getencprop(phy_handle, "reg", &phy_addr, sizeof(phy_addr)); 343 if (size <= 0) { 344 device_printf(sc->dev, "Failed to obtain PHY address\n"); 345 return (ENXIO); 346 } 347 error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(sc->ctx), 348 enetc_media_change, enetc_media_status, 349 BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 350 if (error != 0) { 351 device_printf(sc->dev, "mii_attach failed\n"); 352 return (error); 353 } 354 miid = device_get_softc(sc->miibus); 355 sc->shared->isc_media = &miid->mii_media; 356 357 return (0); 358 } 359 360 static int 361 enetc_attach_pre(if_ctx_t ctx) 362 { 363 if_softc_ctx_t scctx; 364 struct enetc_softc *sc; 365 int error, rid; 366 367 sc = iflib_get_softc(ctx); 368 scctx = iflib_get_softc_ctx(ctx); 369 sc->ctx = ctx; 370 sc->dev = iflib_get_dev(ctx); 371 sc->shared = scctx; 372 373 mtx_init(&sc->mii_lock, "enetc_mdio", NULL, MTX_DEF); 374 375 pci_save_state(sc->dev); 376 pcie_flr(sc->dev, 1000, false); 377 pci_restore_state(sc->dev); 378 379 rid = PCIR_BAR(ENETC_BAR_REGS); 380 sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 381 if (sc->regs == NULL) { 382 device_printf(sc->dev, 383 "Failed to allocate BAR %d\n", ENETC_BAR_REGS); 384 return (ENXIO); 385 } 386 387 error = iflib_dma_alloc_align(ctx, 388 ENETC_MIN_DESC * sizeof(struct enetc_cbd), 389 ENETC_RING_ALIGN, 390 &sc->ctrl_queue.dma, 391 0); 392 if (error != 0) { 393 device_printf(sc->dev, "Failed to allocate control ring\n"); 394 goto fail; 395 } 396 sc->ctrl_queue.ring = (struct enetc_cbd*)sc->ctrl_queue.dma.idi_vaddr; 397 398 scctx->isc_txrx = &enetc_txrx; 399 scctx->isc_tx_nsegments = ENETC_MAX_SCATTER; 400 enetc_max_nqueues(sc, &scctx->isc_nrxqsets_max, &scctx->isc_ntxqsets_max); 401 402 if (scctx->isc_ntxd[0] % ENETC_DESC_ALIGN != 0) { 403 device_printf(sc->dev, 404 "The number of TX descriptors has to be a multiple of %d\n", 405 ENETC_DESC_ALIGN); 406 error = EINVAL; 407 goto fail; 408 } 409 if (scctx->isc_nrxd[0] % ENETC_DESC_ALIGN != 0) { 410 device_printf(sc->dev, 411 "The number of RX descriptors has to be a multiple of %d\n", 412 ENETC_DESC_ALIGN); 413 error = EINVAL; 414 goto fail; 415 } 416 scctx->isc_txqsizes[0] = scctx->isc_ntxd[0] * sizeof(union enetc_tx_bd); 417 scctx->isc_rxqsizes[0] = scctx->isc_nrxd[0] * sizeof(union enetc_rx_bd); 418 scctx->isc_txd_size[0] = sizeof(union enetc_tx_bd); 419 scctx->isc_rxd_size[0] = sizeof(union enetc_rx_bd); 420 scctx->isc_tx_csum_flags = 0; 421 scctx->isc_capabilities = scctx->isc_capenable = ENETC_IFCAPS; 422 423 error = enetc_mtu_set(ctx, ETHERMTU); 424 if (error != 0) 425 goto fail; 426 427 scctx->isc_msix_bar = pci_msix_table_bar(sc->dev); 428 429 error = enetc_setup_phy(sc); 430 if (error != 0) 431 goto fail; 432 433 enetc_get_hwaddr(sc); 434 435 return (0); 436 fail: 437 enetc_detach(ctx); 438 return (error); 439 } 440 441 static int 442 enetc_attach_post(if_ctx_t ctx) 443 { 444 445 enetc_init_hw(iflib_get_softc(ctx)); 446 return (0); 447 } 448 449 static int 450 enetc_detach(if_ctx_t ctx) 451 { 452 struct enetc_softc *sc; 453 int error = 0, i; 454 455 sc = iflib_get_softc(ctx); 456 457 for (i = 0; i < sc->rx_num_queues; i++) 458 iflib_irq_free(ctx, &sc->rx_queues[i].irq); 459 460 if (sc->miibus != NULL) 461 device_delete_child(sc->dev, sc->miibus); 462 463 if (sc->regs != NULL) 464 error = bus_release_resource(sc->dev, SYS_RES_MEMORY, 465 rman_get_rid(sc->regs), sc->regs); 466 467 if (sc->ctrl_queue.dma.idi_size != 0) 468 iflib_dma_free(&sc->ctrl_queue.dma); 469 470 mtx_destroy(&sc->mii_lock); 471 472 return (error); 473 } 474 475 static int 476 enetc_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 477 int ntxqs, int ntxqsets) 478 { 479 struct enetc_softc *sc; 480 struct enetc_tx_queue *queue; 481 int i; 482 483 sc = iflib_get_softc(ctx); 484 485 MPASS(ntxqs == 1); 486 487 sc->tx_queues = mallocarray(sc->tx_num_queues, 488 sizeof(struct enetc_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO); 489 if (sc->tx_queues == NULL) { 490 device_printf(sc->dev, 491 "Failed to allocate memory for TX queues.\n"); 492 return (ENOMEM); 493 } 494 495 for (i = 0; i < sc->tx_num_queues; i++) { 496 queue = &sc->tx_queues[i]; 497 queue->sc = sc; 498 queue->ring = (union enetc_tx_bd*)(vaddrs[i]); 499 queue->ring_paddr = paddrs[i]; 500 queue->cidx = 0; 501 } 502 503 return (0); 504 } 505 506 static int 507 enetc_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 508 int nrxqs, int nrxqsets) 509 { 510 struct enetc_softc *sc; 511 struct enetc_rx_queue *queue; 512 int i; 513 514 sc = iflib_get_softc(ctx); 515 MPASS(nrxqs == 1); 516 517 sc->rx_queues = mallocarray(sc->rx_num_queues, 518 sizeof(struct enetc_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO); 519 if (sc->rx_queues == NULL) { 520 device_printf(sc->dev, 521 "Failed to allocate memory for RX queues.\n"); 522 return (ENOMEM); 523 } 524 525 for (i = 0; i < sc->rx_num_queues; i++) { 526 queue = &sc->rx_queues[i]; 527 queue->sc = sc; 528 queue->qid = i; 529 queue->ring = (union enetc_rx_bd*)(vaddrs[i]); 530 queue->ring_paddr = paddrs[i]; 531 } 532 533 return (0); 534 } 535 536 static void 537 enetc_queues_free(if_ctx_t ctx) 538 { 539 struct enetc_softc *sc; 540 541 sc = iflib_get_softc(ctx); 542 543 if (sc->tx_queues != NULL) { 544 free(sc->tx_queues, M_DEVBUF); 545 sc->tx_queues = NULL; 546 } 547 if (sc->rx_queues != NULL) { 548 free(sc->rx_queues, M_DEVBUF); 549 sc->rx_queues = NULL; 550 } 551 } 552 553 static void 554 enetc_get_hwaddr(struct enetc_softc *sc) 555 { 556 struct ether_addr hwaddr; 557 uint16_t high; 558 uint32_t low; 559 560 low = ENETC_PORT_RD4(sc, ENETC_PSIPMAR0(0)); 561 high = ENETC_PORT_RD2(sc, ENETC_PSIPMAR1(0)); 562 563 memcpy(&hwaddr.octet[0], &low, 4); 564 memcpy(&hwaddr.octet[4], &high, 2); 565 566 if (ETHER_IS_BROADCAST(hwaddr.octet) || 567 ETHER_IS_MULTICAST(hwaddr.octet) || 568 ETHER_IS_ZERO(hwaddr.octet)) { 569 ether_gen_addr(iflib_get_ifp(sc->ctx), &hwaddr); 570 device_printf(sc->dev, 571 "Failed to obtain MAC address, using a random one\n"); 572 memcpy(&low, &hwaddr.octet[0], 4); 573 memcpy(&high, &hwaddr.octet[4], 2); 574 } 575 576 iflib_set_mac(sc->ctx, hwaddr.octet); 577 } 578 579 static void 580 enetc_set_hwaddr(struct enetc_softc *sc) 581 { 582 if_t ifp; 583 uint16_t high; 584 uint32_t low; 585 uint8_t *hwaddr; 586 587 ifp = iflib_get_ifp(sc->ctx); 588 hwaddr = (uint8_t*)if_getlladdr(ifp); 589 low = *((uint32_t*)hwaddr); 590 high = *((uint16_t*)(hwaddr+4)); 591 592 ENETC_PORT_WR4(sc, ENETC_PSIPMAR0(0), low); 593 ENETC_PORT_WR2(sc, ENETC_PSIPMAR1(0), high); 594 } 595 596 static int 597 enetc_setup_rss(struct enetc_softc *sc) 598 { 599 struct iflib_dma_info dma; 600 int error, i, buckets_num = 0; 601 uint8_t *rss_table; 602 uint32_t reg; 603 604 reg = ENETC_RD4(sc, ENETC_SIPCAPR0); 605 if (reg & ENETC_SIPCAPR0_RSS) { 606 reg = ENETC_RD4(sc, ENETC_SIRSSCAPR); 607 buckets_num = ENETC_SIRSSCAPR_GET_NUM_RSS(reg); 608 } 609 if (buckets_num == 0) 610 return (ENOTSUP); 611 612 for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / sizeof(uint32_t); i++) { 613 arc4rand((uint8_t *)®, sizeof(reg), 0); 614 ENETC_PORT_WR4(sc, ENETC_PRSSK(i), reg); 615 } 616 617 ENETC_WR4(sc, ENETC_SIRBGCR, sc->rx_num_queues); 618 619 error = iflib_dma_alloc_align(sc->ctx, 620 buckets_num * sizeof(*rss_table), 621 ENETC_RING_ALIGN, 622 &dma, 623 0); 624 if (error != 0) { 625 device_printf(sc->dev, "Failed to allocate DMA buffer for RSS\n"); 626 return (error); 627 } 628 rss_table = (uint8_t *)dma.idi_vaddr; 629 630 for (i = 0; i < buckets_num; i++) 631 rss_table[i] = i % sc->rx_num_queues; 632 633 error = enetc_ctrl_send(sc, (BDCR_CMD_RSS << 8) | BDCR_CMD_RSS_WRITE, 634 buckets_num * sizeof(*rss_table), &dma); 635 if (error != 0) 636 device_printf(sc->dev, "Failed to setup RSS table\n"); 637 638 iflib_dma_free(&dma); 639 640 return (error); 641 } 642 643 static int 644 enetc_ctrl_send(struct enetc_softc *sc, uint16_t cmd, uint16_t size, 645 iflib_dma_info_t dma) 646 { 647 struct enetc_ctrl_queue *queue; 648 struct enetc_cbd *desc; 649 int timeout = 1000; 650 651 queue = &sc->ctrl_queue; 652 desc = &queue->ring[queue->pidx]; 653 654 if (++queue->pidx == ENETC_MIN_DESC) 655 queue->pidx = 0; 656 657 desc->addr[0] = (uint32_t)dma->idi_paddr; 658 desc->addr[1] = (uint32_t)(dma->idi_paddr >> 32); 659 desc->index = 0; 660 desc->length = (uint16_t)size; 661 desc->cmd = (uint8_t)cmd; 662 desc->cls = (uint8_t)(cmd >> 8); 663 desc->status_flags = 0; 664 665 /* Sync command packet, */ 666 bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_PREWRITE); 667 /* and the control ring. */ 668 bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE); 669 ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx); 670 671 while (--timeout != 0) { 672 DELAY(20); 673 if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx) 674 break; 675 } 676 677 if (timeout == 0) 678 return (ETIMEDOUT); 679 680 bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD); 681 return (0); 682 } 683 684 static void 685 enetc_init_hw(struct enetc_softc *sc) 686 { 687 uint32_t val; 688 int error; 689 690 ENETC_PORT_WR4(sc, ENETC_PM0_CMD_CFG, 691 ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC | 692 ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); 693 ENETC_PORT_WR4(sc, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL); 694 val = ENETC_PSICFGR0_SET_TXBDR(sc->tx_num_queues); 695 val |= ENETC_PSICFGR0_SET_RXBDR(sc->rx_num_queues); 696 val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); 697 ENETC_PORT_WR4(sc, ENETC_PSICFGR0(0), val); 698 ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1)); 699 ENETC_PORT_WR4(sc, ENETC_PVCLCTR, ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); 700 ENETC_PORT_WR4(sc, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS); 701 ENETC_PORT_WR4(sc, ENETC_PAR_PORT_CFG, ENETC_PAR_PORT_L4CD); 702 ENETC_PORT_WR4(sc, ENETC_PMR, ENETC_PMR_SI0EN | ENETC_PMR_PSPEED_1000M); 703 704 ENETC_WR4(sc, ENETC_SICAR0, 705 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 706 ENETC_WR4(sc, ENETC_SICAR1, ENETC_SICAR_MSI); 707 ENETC_WR4(sc, ENETC_SICAR2, 708 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 709 710 enetc_init_ctrl(sc); 711 error = enetc_setup_rss(sc); 712 if (error != 0) 713 ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN); 714 else 715 ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN | ENETC_SIMR_RSSE); 716 717 } 718 719 static void 720 enetc_init_ctrl(struct enetc_softc *sc) 721 { 722 struct enetc_ctrl_queue *queue = &sc->ctrl_queue; 723 724 ENETC_WR4(sc, ENETC_SICBDRBAR0, 725 (uint32_t)queue->dma.idi_paddr); 726 ENETC_WR4(sc, ENETC_SICBDRBAR1, 727 (uint32_t)(queue->dma.idi_paddr >> 32)); 728 ENETC_WR4(sc, ENETC_SICBDRLENR, 729 queue->dma.idi_size / sizeof(struct enetc_cbd)); 730 731 queue->pidx = 0; 732 ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx); 733 ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx); 734 ENETC_WR4(sc, ENETC_SICBDRMR, ENETC_SICBDRMR_EN); 735 } 736 737 static void 738 enetc_init_tx(struct enetc_softc *sc) 739 { 740 struct enetc_tx_queue *queue; 741 int i; 742 743 for (i = 0; i < sc->tx_num_queues; i++) { 744 queue = &sc->tx_queues[i]; 745 746 ENETC_TXQ_WR4(sc, i, ENETC_TBBAR0, 747 (uint32_t)queue->ring_paddr); 748 ENETC_TXQ_WR4(sc, i, ENETC_TBBAR1, 749 (uint32_t)(queue->ring_paddr >> 32)); 750 ENETC_TXQ_WR4(sc, i, ENETC_TBLENR, sc->tx_queue_size); 751 752 /* 753 * Even though it is undoccumented resetting the TX ring 754 * indices results in TX hang. 755 * Do the same as Linux and simply keep those unchanged 756 * for the drivers lifetime. 757 */ 758 #if 0 759 ENETC_TXQ_WR4(sc, i, ENETC_TBPIR, 0); 760 ENETC_TXQ_WR4(sc, i, ENETC_TBCIR, 0); 761 #endif 762 ENETC_TXQ_WR4(sc, i, ENETC_TBMR, ENETC_TBMR_EN); 763 } 764 765 } 766 767 static void 768 enetc_init_rx(struct enetc_softc *sc) 769 { 770 struct enetc_rx_queue *queue; 771 uint32_t rx_buf_size; 772 int i; 773 774 rx_buf_size = iflib_get_rx_mbuf_sz(sc->ctx); 775 776 for (i = 0; i < sc->rx_num_queues; i++) { 777 queue = &sc->rx_queues[i]; 778 779 ENETC_RXQ_WR4(sc, i, ENETC_RBBAR0, 780 (uint32_t)queue->ring_paddr); 781 ENETC_RXQ_WR4(sc, i, ENETC_RBBAR1, 782 (uint32_t)(queue->ring_paddr >> 32)); 783 ENETC_RXQ_WR4(sc, i, ENETC_RBLENR, sc->rx_queue_size); 784 ENETC_RXQ_WR4(sc, i, ENETC_RBBSR, rx_buf_size); 785 ENETC_RXQ_WR4(sc, i, ENETC_RBPIR, 0); 786 ENETC_RXQ_WR4(sc, i, ENETC_RBCIR, 0); 787 queue->enabled = false; 788 } 789 } 790 791 static u_int 792 enetc_hash_mac(void *arg, struct sockaddr_dl *sdl, u_int cnt) 793 { 794 uint64_t *bitmap = arg; 795 uint64_t address = 0; 796 uint8_t hash = 0; 797 bool bit; 798 int i, j; 799 800 bcopy(LLADDR(sdl), &address, ETHER_ADDR_LEN); 801 802 /* 803 * The six bit hash is calculated by xoring every 804 * 6th bit of the address. 805 * It is then used as an index in a bitmap that is 806 * written to the device. 807 */ 808 for (i = 0; i < 6; i++) { 809 bit = 0; 810 for (j = 0; j < 8; j++) 811 bit ^= !!(address & BIT(i + j*6)); 812 813 hash |= bit << i; 814 } 815 816 *bitmap |= (1 << hash); 817 return (1); 818 } 819 820 static void 821 enetc_setup_multicast(if_ctx_t ctx) 822 { 823 struct enetc_softc *sc; 824 if_t ifp; 825 uint64_t bitmap = 0; 826 uint8_t revid; 827 828 sc = iflib_get_softc(ctx); 829 ifp = iflib_get_ifp(ctx); 830 revid = pci_get_revid(sc->dev); 831 832 if_foreach_llmaddr(ifp, enetc_hash_mac, &bitmap); 833 834 /* 835 * In revid 1 of this chip the positions multicast and unicast 836 * hash filter registers are flipped. 837 */ 838 ENETC_PORT_WR4(sc, ENETC_PSIMMHFR0(0, revid == 1), bitmap & UINT32_MAX); 839 ENETC_PORT_WR4(sc, ENETC_PSIMMHFR1(0), bitmap >> 32); 840 841 } 842 843 static uint8_t 844 enetc_hash_vid(uint16_t vid) 845 { 846 uint8_t hash = 0; 847 bool bit; 848 int i; 849 850 for (i = 0;i < 6;i++) { 851 bit = vid & BIT(i); 852 bit ^= !!(vid & BIT(i + 6)); 853 hash |= bit << i; 854 } 855 856 return (hash); 857 } 858 859 static void 860 enetc_vlan_register(if_ctx_t ctx, uint16_t vid) 861 { 862 struct enetc_softc *sc; 863 uint8_t hash; 864 uint64_t bitmap; 865 866 sc = iflib_get_softc(ctx); 867 hash = enetc_hash_vid(vid); 868 869 /* Check if hash is already present in the bitmap. */ 870 if (++sc->vlan_bitmap[hash] != 1) 871 return; 872 873 bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0)); 874 bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32; 875 bitmap |= BIT(hash); 876 ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX); 877 ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32); 878 } 879 880 static void 881 enetc_vlan_unregister(if_ctx_t ctx, uint16_t vid) 882 { 883 struct enetc_softc *sc; 884 uint8_t hash; 885 uint64_t bitmap; 886 887 sc = iflib_get_softc(ctx); 888 hash = enetc_hash_vid(vid); 889 890 MPASS(sc->vlan_bitmap[hash] > 0); 891 if (--sc->vlan_bitmap[hash] != 0) 892 return; 893 894 bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0)); 895 bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32; 896 bitmap &= ~BIT(hash); 897 ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX); 898 ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32); 899 } 900 901 static void 902 enetc_init(if_ctx_t ctx) 903 { 904 struct enetc_softc *sc; 905 struct mii_data *miid; 906 if_t ifp; 907 uint16_t max_frame_length; 908 int baudrate; 909 910 sc = iflib_get_softc(ctx); 911 ifp = iflib_get_ifp(ctx); 912 913 max_frame_length = sc->shared->isc_max_frame_size; 914 MPASS(max_frame_length < ENETC_MAX_FRAME_LEN); 915 916 /* Set max RX and TX frame lengths. */ 917 ENETC_PORT_WR4(sc, ENETC_PM0_MAXFRM, max_frame_length); 918 ENETC_PORT_WR4(sc, ENETC_PTCMSDUR(0), max_frame_length); 919 ENETC_PORT_WR4(sc, ENETC_PTXMBAR, 2 * max_frame_length); 920 921 /* Set "VLAN promiscious" mode if filtering is disabled. */ 922 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) 923 ENETC_PORT_WR4(sc, ENETC_PSIPVMR, 924 ENETC_PSIPVMR_SET_VUTA(1) | ENETC_PSIPVMR_SET_VP(1)); 925 else 926 ENETC_PORT_WR4(sc, ENETC_PSIPVMR, 927 ENETC_PSIPVMR_SET_VUTA(1)); 928 929 sc->rbmr = ENETC_RBMR_EN; 930 931 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) 932 sc->rbmr |= ENETC_RBMR_VTE; 933 934 /* Write MAC address to hardware. */ 935 enetc_set_hwaddr(sc); 936 937 enetc_init_tx(sc); 938 enetc_init_rx(sc); 939 940 if (sc->fixed_link) { 941 baudrate = ifmedia_baudrate(sc->fixed_ifmedia.ifm_cur->ifm_media); 942 iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate); 943 } else { 944 /* 945 * Can't return an error from this function, there is not much 946 * we can do if this fails. 947 */ 948 miid = device_get_softc(sc->miibus); 949 (void)mii_mediachg(miid); 950 } 951 952 enetc_promisc_set(ctx, if_getflags(ifp)); 953 } 954 955 static void 956 enetc_disable_txq(struct enetc_softc *sc, int qid) 957 { 958 qidx_t cidx, pidx; 959 int timeout = 10000; /* this * DELAY(100) = 1s */ 960 961 /* At this point iflib shouldn't be enquing any more frames. */ 962 pidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBPIR); 963 cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR); 964 965 while (pidx != cidx && timeout--) { 966 DELAY(100); 967 cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR); 968 } 969 970 if (timeout == 0) 971 device_printf(sc->dev, 972 "Timeout while waiting for txq%d to stop transmitting packets\n", 973 qid); 974 975 ENETC_TXQ_WR4(sc, qid, ENETC_TBMR, 0); 976 } 977 978 static void 979 enetc_stop(if_ctx_t ctx) 980 { 981 struct enetc_softc *sc; 982 int i; 983 984 sc = iflib_get_softc(ctx); 985 986 for (i = 0; i < sc->rx_num_queues; i++) 987 ENETC_RXQ_WR4(sc, i, ENETC_RBMR, 0); 988 989 for (i = 0; i < sc->tx_num_queues; i++) 990 enetc_disable_txq(sc, i); 991 } 992 993 static int 994 enetc_msix_intr_assign(if_ctx_t ctx, int msix) 995 { 996 struct enetc_softc *sc; 997 struct enetc_rx_queue *rx_queue; 998 struct enetc_tx_queue *tx_queue; 999 int vector = 0, i, error; 1000 char irq_name[16]; 1001 1002 sc = iflib_get_softc(ctx); 1003 1004 MPASS(sc->rx_num_queues + 1 <= ENETC_MSIX_COUNT); 1005 MPASS(sc->rx_num_queues == sc->tx_num_queues); 1006 1007 for (i = 0; i < sc->rx_num_queues; i++, vector++) { 1008 rx_queue = &sc->rx_queues[i]; 1009 snprintf(irq_name, sizeof(irq_name), "rxtxq%d", i); 1010 error = iflib_irq_alloc_generic(ctx, 1011 &rx_queue->irq, vector + 1, IFLIB_INTR_RXTX, 1012 NULL, rx_queue, i, irq_name); 1013 if (error != 0) 1014 goto fail; 1015 1016 ENETC_WR4(sc, ENETC_SIMSIRRV(i), vector); 1017 ENETC_RXQ_WR4(sc, i, ENETC_RBICR1, ENETC_RX_INTR_TIME_THR); 1018 ENETC_RXQ_WR4(sc, i, ENETC_RBICR0, 1019 ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR)); 1020 } 1021 vector = 0; 1022 for (i = 0;i < sc->tx_num_queues; i++, vector++) { 1023 tx_queue = &sc->tx_queues[i]; 1024 snprintf(irq_name, sizeof(irq_name), "txq%d", i); 1025 iflib_softirq_alloc_generic(ctx, &tx_queue->irq, 1026 IFLIB_INTR_TX, tx_queue, i, irq_name); 1027 1028 ENETC_WR4(sc, ENETC_SIMSITRV(i), vector); 1029 } 1030 1031 return (0); 1032 fail: 1033 for (i = 0; i < sc->rx_num_queues; i++) { 1034 rx_queue = &sc->rx_queues[i]; 1035 iflib_irq_free(ctx, &rx_queue->irq); 1036 } 1037 return (error); 1038 } 1039 1040 static int 1041 enetc_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) 1042 { 1043 struct enetc_softc *sc; 1044 1045 sc = iflib_get_softc(ctx); 1046 ENETC_TXQ_RD4(sc, qid, ENETC_TBIDR); 1047 return (0); 1048 } 1049 1050 static int 1051 enetc_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) 1052 { 1053 struct enetc_softc *sc; 1054 1055 sc = iflib_get_softc(ctx); 1056 ENETC_RXQ_RD4(sc, qid, ENETC_RBIDR); 1057 return (0); 1058 } 1059 static void 1060 enetc_intr_enable(if_ctx_t ctx) 1061 { 1062 struct enetc_softc *sc; 1063 int i; 1064 1065 sc = iflib_get_softc(ctx); 1066 1067 for (i = 0; i < sc->rx_num_queues; i++) 1068 ENETC_RXQ_WR4(sc, i, ENETC_RBIER, ENETC_RBIER_RXTIE); 1069 1070 for (i = 0; i < sc->tx_num_queues; i++) 1071 ENETC_TXQ_WR4(sc, i, ENETC_TBIER, ENETC_TBIER_TXF); 1072 } 1073 1074 static void 1075 enetc_intr_disable(if_ctx_t ctx) 1076 { 1077 struct enetc_softc *sc; 1078 int i; 1079 1080 sc = iflib_get_softc(ctx); 1081 1082 for (i = 0; i < sc->rx_num_queues; i++) 1083 ENETC_RXQ_WR4(sc, i, ENETC_RBIER, 0); 1084 1085 for (i = 0; i < sc->tx_num_queues; i++) 1086 ENETC_TXQ_WR4(sc, i, ENETC_TBIER, 0); 1087 } 1088 1089 static int 1090 enetc_isc_txd_encap(void *data, if_pkt_info_t ipi) 1091 { 1092 struct enetc_softc *sc = data; 1093 struct enetc_tx_queue *queue; 1094 union enetc_tx_bd *desc; 1095 bus_dma_segment_t *segs; 1096 qidx_t pidx, queue_len; 1097 qidx_t i = 0; 1098 1099 queue = &sc->tx_queues[ipi->ipi_qsidx]; 1100 segs = ipi->ipi_segs; 1101 pidx = ipi->ipi_pidx; 1102 queue_len = sc->tx_queue_size; 1103 1104 /* 1105 * First descriptor is special. We use it to set frame 1106 * related information and offloads, e.g. VLAN tag. 1107 */ 1108 desc = &queue->ring[pidx]; 1109 bzero(desc, sizeof(*desc)); 1110 desc->frm_len = ipi->ipi_len; 1111 desc->addr = segs[i].ds_addr; 1112 desc->buf_len = segs[i].ds_len; 1113 if (ipi->ipi_flags & IPI_TX_INTR) 1114 desc->flags = ENETC_TXBD_FLAGS_FI; 1115 1116 i++; 1117 if (++pidx == queue_len) 1118 pidx = 0; 1119 1120 if (ipi->ipi_mflags & M_VLANTAG) { 1121 /* VLAN tag is inserted in a separate descriptor. */ 1122 desc->flags |= ENETC_TXBD_FLAGS_EX; 1123 desc = &queue->ring[pidx]; 1124 bzero(desc, sizeof(*desc)); 1125 desc->ext.vid = ipi->ipi_vtag; 1126 desc->ext.e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS; 1127 if (++pidx == queue_len) 1128 pidx = 0; 1129 } 1130 1131 /* Now add remaining descriptors. */ 1132 for (;i < ipi->ipi_nsegs; i++) { 1133 desc = &queue->ring[pidx]; 1134 bzero(desc, sizeof(*desc)); 1135 desc->addr = segs[i].ds_addr; 1136 desc->buf_len = segs[i].ds_len; 1137 1138 if (++pidx == queue_len) 1139 pidx = 0; 1140 } 1141 1142 desc->flags |= ENETC_TXBD_FLAGS_F; 1143 ipi->ipi_new_pidx = pidx; 1144 1145 return (0); 1146 } 1147 1148 static void 1149 enetc_isc_txd_flush(void *data, uint16_t qid, qidx_t pidx) 1150 { 1151 struct enetc_softc *sc = data; 1152 1153 ENETC_TXQ_WR4(sc, qid, ENETC_TBPIR, pidx); 1154 } 1155 1156 static int 1157 enetc_isc_txd_credits_update(void *data, uint16_t qid, bool clear) 1158 { 1159 struct enetc_softc *sc = data; 1160 struct enetc_tx_queue *queue; 1161 int cidx, hw_cidx, count; 1162 1163 queue = &sc->tx_queues[qid]; 1164 hw_cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR) & ENETC_TBCIR_IDX_MASK; 1165 cidx = queue->cidx; 1166 1167 /* 1168 * RM states that the ring can hold at most ring_size - 1 descriptors. 1169 * Thanks to that we can assume that the ring is empty if cidx == pidx. 1170 * This requirement is guaranteed implicitly by iflib as it will only 1171 * encap a new frame if we have at least nfrags + 2 descriptors available 1172 * on the ring. This driver uses at most one additional descriptor for 1173 * VLAN tag insertion. 1174 * Also RM states that the TBCIR register is only updated once all 1175 * descriptors in the chain have been processed. 1176 */ 1177 if (cidx == hw_cidx) 1178 return (0); 1179 1180 if (!clear) 1181 return (1); 1182 1183 count = hw_cidx - cidx; 1184 if (count < 0) 1185 count += sc->tx_queue_size; 1186 1187 queue->cidx = hw_cidx; 1188 1189 return (count); 1190 } 1191 1192 static int 1193 enetc_isc_rxd_available(void *data, uint16_t qid, qidx_t pidx, qidx_t budget) 1194 { 1195 struct enetc_softc *sc = data; 1196 struct enetc_rx_queue *queue; 1197 qidx_t hw_pidx, queue_len; 1198 union enetc_rx_bd *desc; 1199 int count = 0; 1200 1201 queue = &sc->rx_queues[qid]; 1202 desc = &queue->ring[pidx]; 1203 queue_len = sc->rx_queue_size; 1204 1205 if (desc->r.lstatus == 0) 1206 return (0); 1207 1208 if (budget == 1) 1209 return (1); 1210 1211 hw_pidx = ENETC_RXQ_RD4(sc, qid, ENETC_RBPIR); 1212 while (pidx != hw_pidx && count < budget) { 1213 desc = &queue->ring[pidx]; 1214 if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F) 1215 count++; 1216 1217 if (++pidx == queue_len) 1218 pidx = 0; 1219 } 1220 1221 return (count); 1222 } 1223 1224 static int 1225 enetc_isc_rxd_pkt_get(void *data, if_rxd_info_t ri) 1226 { 1227 struct enetc_softc *sc = data; 1228 struct enetc_rx_queue *queue; 1229 union enetc_rx_bd *desc; 1230 uint16_t buf_len, pkt_size = 0; 1231 qidx_t cidx, queue_len; 1232 uint32_t status; 1233 int i; 1234 1235 cidx = ri->iri_cidx; 1236 queue = &sc->rx_queues[ri->iri_qsidx]; 1237 desc = &queue->ring[cidx]; 1238 status = desc->r.lstatus; 1239 queue_len = sc->rx_queue_size; 1240 1241 /* 1242 * Ready bit will be set only when all descriptors 1243 * in the chain have been processed. 1244 */ 1245 if ((status & ENETC_RXBD_LSTATUS_R) == 0) 1246 return (EAGAIN); 1247 1248 /* Pass RSS hash. */ 1249 if (status & ENETC_RXBD_FLAG_RSSV) { 1250 ri->iri_flowid = desc->r.rss_hash; 1251 ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH; 1252 } 1253 1254 /* Pass IP checksum status. */ 1255 ri->iri_csum_flags = CSUM_IP_CHECKED; 1256 if ((desc->r.parse_summary & ENETC_RXBD_PARSER_ERROR) == 0) 1257 ri->iri_csum_flags |= CSUM_IP_VALID; 1258 1259 /* Pass extracted VLAN tag. */ 1260 if (status & ENETC_RXBD_FLAG_VLAN) { 1261 ri->iri_vtag = desc->r.vlan_opt; 1262 ri->iri_flags = M_VLANTAG; 1263 } 1264 1265 for (i = 0; i < ENETC_MAX_SCATTER; i++) { 1266 buf_len = desc->r.buf_len; 1267 ri->iri_frags[i].irf_idx = cidx; 1268 ri->iri_frags[i].irf_len = buf_len; 1269 pkt_size += buf_len; 1270 if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F) 1271 break; 1272 1273 if (++cidx == queue_len) 1274 cidx = 0; 1275 1276 desc = &queue->ring[cidx]; 1277 } 1278 ri->iri_nfrags = i + 1; 1279 ri->iri_len = pkt_size; 1280 1281 MPASS(desc->r.lstatus & ENETC_RXBD_LSTATUS_F); 1282 if (status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)) 1283 return (EBADMSG); 1284 1285 return (0); 1286 } 1287 1288 static void 1289 enetc_isc_rxd_refill(void *data, if_rxd_update_t iru) 1290 { 1291 struct enetc_softc *sc = data; 1292 struct enetc_rx_queue *queue; 1293 union enetc_rx_bd *desc; 1294 qidx_t pidx, queue_len; 1295 uint64_t *paddrs; 1296 int i, count; 1297 1298 queue = &sc->rx_queues[iru->iru_qsidx]; 1299 paddrs = iru->iru_paddrs; 1300 pidx = iru->iru_pidx; 1301 count = iru->iru_count; 1302 queue_len = sc->rx_queue_size; 1303 1304 for (i = 0; i < count; i++) { 1305 desc = &queue->ring[pidx]; 1306 bzero(desc, sizeof(*desc)); 1307 1308 desc->w.addr = paddrs[i]; 1309 if (++pidx == queue_len) 1310 pidx = 0; 1311 } 1312 /* 1313 * After enabling the queue NIC will prefetch the first 1314 * 8 descriptors. It probably assumes that the RX is fully 1315 * refilled when cidx == pidx. 1316 * Enable it only if we have enough descriptors ready on the ring. 1317 */ 1318 if (!queue->enabled && pidx >= 8) { 1319 ENETC_RXQ_WR4(sc, iru->iru_qsidx, ENETC_RBMR, sc->rbmr); 1320 queue->enabled = true; 1321 } 1322 } 1323 1324 static void 1325 enetc_isc_rxd_flush(void *data, uint16_t qid, uint8_t flid, qidx_t pidx) 1326 { 1327 struct enetc_softc *sc = data; 1328 1329 ENETC_RXQ_WR4(sc, qid, ENETC_RBCIR, pidx); 1330 } 1331 1332 static uint64_t 1333 enetc_get_counter(if_ctx_t ctx, ift_counter cnt) 1334 { 1335 struct enetc_softc *sc; 1336 if_t ifp; 1337 1338 sc = iflib_get_softc(ctx); 1339 ifp = iflib_get_ifp(ctx); 1340 1341 switch (cnt) { 1342 case IFCOUNTER_IERRORS: 1343 return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR)); 1344 case IFCOUNTER_OERRORS: 1345 return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR)); 1346 default: 1347 return (if_get_counter_default(ifp, cnt)); 1348 } 1349 } 1350 1351 static int 1352 enetc_mtu_set(if_ctx_t ctx, uint32_t mtu) 1353 { 1354 struct enetc_softc *sc = iflib_get_softc(ctx); 1355 uint32_t max_frame_size; 1356 1357 max_frame_size = mtu + 1358 ETHER_HDR_LEN + 1359 ETHER_CRC_LEN + 1360 sizeof(struct ether_vlan_header); 1361 1362 if (max_frame_size > ENETC_MAX_FRAME_LEN) 1363 return (EINVAL); 1364 1365 sc->shared->isc_max_frame_size = max_frame_size; 1366 1367 return (0); 1368 } 1369 1370 static int 1371 enetc_promisc_set(if_ctx_t ctx, int flags) 1372 { 1373 struct enetc_softc *sc; 1374 uint32_t reg = 0; 1375 1376 sc = iflib_get_softc(ctx); 1377 1378 if (flags & IFF_PROMISC) 1379 reg = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); 1380 else if (flags & IFF_ALLMULTI) 1381 reg = ENETC_PSIPMR_SET_MP(0); 1382 1383 ENETC_PORT_WR4(sc, ENETC_PSIPMR, reg); 1384 1385 return (0); 1386 } 1387 1388 static void 1389 enetc_timer(if_ctx_t ctx, uint16_t qid) 1390 { 1391 /* 1392 * Poll PHY status. Do this only for qid 0 to save 1393 * some cycles. 1394 */ 1395 if (qid == 0) 1396 iflib_admin_intr_deferred(ctx); 1397 } 1398 1399 static void 1400 enetc_update_admin_status(if_ctx_t ctx) 1401 { 1402 struct enetc_softc *sc; 1403 struct mii_data *miid; 1404 1405 sc = iflib_get_softc(ctx); 1406 1407 if (!sc->fixed_link) { 1408 miid = device_get_softc(sc->miibus); 1409 mii_tick(miid); 1410 } 1411 } 1412 1413 static int 1414 enetc_miibus_readreg(device_t dev, int phy, int reg) 1415 { 1416 struct enetc_softc *sc; 1417 int val; 1418 1419 sc = iflib_get_softc(device_get_softc(dev)); 1420 1421 mtx_lock(&sc->mii_lock); 1422 val = enetc_mdio_read(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE, 1423 phy, reg); 1424 mtx_unlock(&sc->mii_lock); 1425 1426 return (val); 1427 } 1428 1429 static int 1430 enetc_miibus_writereg(device_t dev, int phy, int reg, int data) 1431 { 1432 struct enetc_softc *sc; 1433 int ret; 1434 1435 sc = iflib_get_softc(device_get_softc(dev)); 1436 1437 mtx_lock(&sc->mii_lock); 1438 ret = enetc_mdio_write(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE, 1439 phy, reg, data); 1440 mtx_unlock(&sc->mii_lock); 1441 1442 return (ret); 1443 } 1444 1445 static void 1446 enetc_miibus_linkchg(device_t dev) 1447 { 1448 1449 enetc_miibus_statchg(dev); 1450 } 1451 1452 static void 1453 enetc_miibus_statchg(device_t dev) 1454 { 1455 struct enetc_softc *sc; 1456 struct mii_data *miid; 1457 int link_state, baudrate; 1458 1459 sc = iflib_get_softc(device_get_softc(dev)); 1460 miid = device_get_softc(sc->miibus); 1461 1462 baudrate = ifmedia_baudrate(miid->mii_media_active); 1463 if (miid->mii_media_status & IFM_AVALID) { 1464 if (miid->mii_media_status & IFM_ACTIVE) 1465 link_state = LINK_STATE_UP; 1466 else 1467 link_state = LINK_STATE_DOWN; 1468 } else { 1469 link_state = LINK_STATE_UNKNOWN; 1470 } 1471 1472 iflib_link_state_change(sc->ctx, link_state, baudrate); 1473 1474 } 1475 1476 static int 1477 enetc_media_change(if_t ifp) 1478 { 1479 struct enetc_softc *sc; 1480 struct mii_data *miid; 1481 1482 sc = iflib_get_softc(if_getsoftc(ifp)); 1483 miid = device_get_softc(sc->miibus); 1484 1485 mii_mediachg(miid); 1486 return (0); 1487 } 1488 1489 static void 1490 enetc_media_status(if_t ifp, struct ifmediareq* ifmr) 1491 { 1492 struct enetc_softc *sc; 1493 struct mii_data *miid; 1494 1495 sc = iflib_get_softc(if_getsoftc(ifp)); 1496 miid = device_get_softc(sc->miibus); 1497 1498 mii_pollstat(miid); 1499 1500 ifmr->ifm_active = miid->mii_media_active; 1501 ifmr->ifm_status = miid->mii_media_status; 1502 } 1503 1504 static int 1505 enetc_fixed_media_change(if_t ifp) 1506 { 1507 1508 if_printf(ifp, "Can't change media in fixed-link mode.\n"); 1509 return (0); 1510 } 1511 static void 1512 enetc_fixed_media_status(if_t ifp, struct ifmediareq* ifmr) 1513 { 1514 struct enetc_softc *sc; 1515 1516 sc = iflib_get_softc(if_getsoftc(ifp)); 1517 1518 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1519 ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media; 1520 return; 1521 } 1522