1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2021 Alstom Group. 5 * Copyright (c) 2021 Semihalf. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/bus.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/rman.h> 37 #include <sys/socket.h> 38 #include <sys/sockio.h> 39 40 #include <machine/bus.h> 41 #include <machine/resource.h> 42 43 #include <net/ethernet.h> 44 #include <net/if.h> 45 #include <net/if_dl.h> 46 #include <net/if_var.h> 47 #include <net/if_types.h> 48 #include <net/if_media.h> 49 #include <net/iflib.h> 50 51 #include <dev/enetc/enetc_hw.h> 52 #include <dev/enetc/enetc.h> 53 #include <dev/enetc/enetc_mdio.h> 54 #include <dev/mii/mii.h> 55 #include <dev/mii/miivar.h> 56 #include <dev/pci/pcireg.h> 57 #include <dev/pci/pcivar.h> 58 59 #include <dev/ofw/ofw_bus.h> 60 #include <dev/ofw/ofw_bus_subr.h> 61 62 #include "ifdi_if.h" 63 #include "miibus_if.h" 64 65 static device_register_t enetc_register; 66 67 static ifdi_attach_pre_t enetc_attach_pre; 68 static ifdi_attach_post_t enetc_attach_post; 69 static ifdi_detach_t enetc_detach; 70 71 static ifdi_tx_queues_alloc_t enetc_tx_queues_alloc; 72 static ifdi_rx_queues_alloc_t enetc_rx_queues_alloc; 73 static ifdi_queues_free_t enetc_queues_free; 74 75 static ifdi_init_t enetc_init; 76 static ifdi_stop_t enetc_stop; 77 78 static ifdi_msix_intr_assign_t enetc_msix_intr_assign; 79 static ifdi_tx_queue_intr_enable_t enetc_tx_queue_intr_enable; 80 static ifdi_rx_queue_intr_enable_t enetc_rx_queue_intr_enable; 81 static ifdi_intr_enable_t enetc_intr_enable; 82 static ifdi_intr_disable_t enetc_intr_disable; 83 84 static int enetc_isc_txd_encap(void*, if_pkt_info_t); 85 static void enetc_isc_txd_flush(void*, uint16_t, qidx_t); 86 static int enetc_isc_txd_credits_update(void*, uint16_t, bool); 87 static int enetc_isc_rxd_available(void*, uint16_t, qidx_t, qidx_t); 88 static int enetc_isc_rxd_pkt_get(void*, if_rxd_info_t); 89 static void enetc_isc_rxd_refill(void*, if_rxd_update_t); 90 static void enetc_isc_rxd_flush(void*, uint16_t, uint8_t, qidx_t); 91 92 static void enetc_vlan_register(if_ctx_t, uint16_t); 93 static void enetc_vlan_unregister(if_ctx_t, uint16_t); 94 95 static uint64_t enetc_get_counter(if_ctx_t, ift_counter); 96 static int enetc_promisc_set(if_ctx_t, int); 97 static int enetc_mtu_set(if_ctx_t, uint32_t); 98 static void enetc_setup_multicast(if_ctx_t); 99 static void enetc_timer(if_ctx_t, uint16_t); 100 static void enetc_update_admin_status(if_ctx_t); 101 102 static miibus_readreg_t enetc_miibus_readreg; 103 static miibus_writereg_t enetc_miibus_writereg; 104 static miibus_linkchg_t enetc_miibus_linkchg; 105 static miibus_statchg_t enetc_miibus_statchg; 106 107 static int enetc_media_change(if_t); 108 static void enetc_media_status(if_t, struct ifmediareq*); 109 110 static int enetc_fixed_media_change(if_t); 111 static void enetc_fixed_media_status(if_t, struct ifmediareq*); 112 113 static void enetc_max_nqueues(struct enetc_softc*, int*, int*); 114 static int enetc_setup_phy(struct enetc_softc*); 115 116 static void enetc_get_hwaddr(struct enetc_softc*); 117 static void enetc_set_hwaddr(struct enetc_softc*); 118 static int enetc_setup_rss(struct enetc_softc*); 119 120 static void enetc_init_hw(struct enetc_softc*); 121 static void enetc_init_ctrl(struct enetc_softc*); 122 static void enetc_init_tx(struct enetc_softc*); 123 static void enetc_init_rx(struct enetc_softc*); 124 125 static int enetc_ctrl_send(struct enetc_softc*, 126 uint16_t, uint16_t, iflib_dma_info_t); 127 128 static const char enetc_driver_version[] = "1.0.0"; 129 130 static pci_vendor_info_t enetc_vendor_info_array[] = { 131 PVID(PCI_VENDOR_FREESCALE, ENETC_DEV_ID_PF, 132 "Freescale ENETC PCIe Gigabit Ethernet Controller"), 133 PVID_END 134 }; 135 136 #define ENETC_IFCAPS (IFCAP_VLAN_MTU | IFCAP_RXCSUM | IFCAP_JUMBO_MTU | \ 137 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER) 138 139 static device_method_t enetc_methods[] = { 140 DEVMETHOD(device_register, enetc_register), 141 DEVMETHOD(device_probe, iflib_device_probe), 142 DEVMETHOD(device_attach, iflib_device_attach), 143 DEVMETHOD(device_detach, iflib_device_detach), 144 DEVMETHOD(device_shutdown, iflib_device_shutdown), 145 DEVMETHOD(device_suspend, iflib_device_suspend), 146 DEVMETHOD(device_resume, iflib_device_resume), 147 148 DEVMETHOD(miibus_readreg, enetc_miibus_readreg), 149 DEVMETHOD(miibus_writereg, enetc_miibus_writereg), 150 DEVMETHOD(miibus_linkchg, enetc_miibus_linkchg), 151 DEVMETHOD(miibus_statchg, enetc_miibus_statchg), 152 153 DEVMETHOD_END 154 }; 155 156 static driver_t enetc_driver = { 157 "enetc", enetc_methods, sizeof(struct enetc_softc) 158 }; 159 160 static devclass_t enetc_devclass; 161 DRIVER_MODULE(miibus, enetc, miibus_driver, miibus_devclass, NULL, NULL); 162 DRIVER_MODULE(enetc, pci, enetc_driver, enetc_devclass, NULL, NULL); 163 MODULE_VERSION(enetc, 1); 164 165 IFLIB_PNP_INFO(pci, enetc, enetc_vendor_info_array); 166 167 MODULE_DEPEND(enetc, ether, 1, 1, 1); 168 MODULE_DEPEND(enetc, iflib, 1, 1, 1); 169 MODULE_DEPEND(enetc, miibus, 1, 1, 1); 170 171 static device_method_t enetc_iflib_methods[] = { 172 DEVMETHOD(ifdi_attach_pre, enetc_attach_pre), 173 DEVMETHOD(ifdi_attach_post, enetc_attach_post), 174 DEVMETHOD(ifdi_detach, enetc_detach), 175 176 DEVMETHOD(ifdi_init, enetc_init), 177 DEVMETHOD(ifdi_stop, enetc_stop), 178 179 DEVMETHOD(ifdi_tx_queues_alloc, enetc_tx_queues_alloc), 180 DEVMETHOD(ifdi_rx_queues_alloc, enetc_rx_queues_alloc), 181 DEVMETHOD(ifdi_queues_free, enetc_queues_free), 182 183 DEVMETHOD(ifdi_msix_intr_assign, enetc_msix_intr_assign), 184 DEVMETHOD(ifdi_tx_queue_intr_enable, enetc_tx_queue_intr_enable), 185 DEVMETHOD(ifdi_rx_queue_intr_enable, enetc_rx_queue_intr_enable), 186 DEVMETHOD(ifdi_intr_enable, enetc_intr_enable), 187 DEVMETHOD(ifdi_intr_disable, enetc_intr_disable), 188 189 DEVMETHOD(ifdi_vlan_register, enetc_vlan_register), 190 DEVMETHOD(ifdi_vlan_unregister, enetc_vlan_unregister), 191 192 DEVMETHOD(ifdi_get_counter, enetc_get_counter), 193 DEVMETHOD(ifdi_mtu_set, enetc_mtu_set), 194 DEVMETHOD(ifdi_multi_set, enetc_setup_multicast), 195 DEVMETHOD(ifdi_promisc_set, enetc_promisc_set), 196 DEVMETHOD(ifdi_timer, enetc_timer), 197 DEVMETHOD(ifdi_update_admin_status, enetc_update_admin_status), 198 199 DEVMETHOD_END 200 }; 201 202 static driver_t enetc_iflib_driver = { 203 "enetc", enetc_iflib_methods, sizeof(struct enetc_softc) 204 }; 205 206 static struct if_txrx enetc_txrx = { 207 .ift_txd_encap = enetc_isc_txd_encap, 208 .ift_txd_flush = enetc_isc_txd_flush, 209 .ift_txd_credits_update = enetc_isc_txd_credits_update, 210 .ift_rxd_available = enetc_isc_rxd_available, 211 .ift_rxd_pkt_get = enetc_isc_rxd_pkt_get, 212 .ift_rxd_refill = enetc_isc_rxd_refill, 213 .ift_rxd_flush = enetc_isc_rxd_flush 214 }; 215 216 static struct if_shared_ctx enetc_sctx_init = { 217 .isc_magic = IFLIB_MAGIC, 218 219 .isc_q_align = ENETC_RING_ALIGN, 220 221 .isc_tx_maxsize = ENETC_MAX_FRAME_LEN, 222 .isc_tx_maxsegsize = PAGE_SIZE, 223 224 .isc_rx_maxsize = ENETC_MAX_FRAME_LEN, 225 .isc_rx_maxsegsize = ENETC_MAX_FRAME_LEN, 226 .isc_rx_nsegments = ENETC_MAX_SCATTER, 227 228 .isc_admin_intrcnt = 0, 229 230 .isc_nfl = 1, 231 .isc_nrxqs = 1, 232 .isc_ntxqs = 1, 233 234 .isc_vendor_info = enetc_vendor_info_array, 235 .isc_driver_version = enetc_driver_version, 236 .isc_driver = &enetc_iflib_driver, 237 238 .isc_flags = IFLIB_DRIVER_MEDIA | IFLIB_PRESERVE_TX_INDICES, 239 .isc_ntxd_min = {ENETC_MIN_DESC}, 240 .isc_ntxd_max = {ENETC_MAX_DESC}, 241 .isc_ntxd_default = {ENETC_DEFAULT_DESC}, 242 .isc_nrxd_min = {ENETC_MIN_DESC}, 243 .isc_nrxd_max = {ENETC_MAX_DESC}, 244 .isc_nrxd_default = {ENETC_DEFAULT_DESC} 245 }; 246 247 static void* 248 enetc_register(device_t dev) 249 { 250 251 if (!ofw_bus_status_okay(dev)) 252 return (NULL); 253 254 return (&enetc_sctx_init); 255 } 256 257 static void 258 enetc_max_nqueues(struct enetc_softc *sc, int *max_tx_nqueues, 259 int *max_rx_nqueues) 260 { 261 uint32_t val; 262 263 val = ENETC_PORT_RD4(sc, ENETC_PCAPR0); 264 *max_tx_nqueues = MIN(ENETC_PCAPR0_TXBDR(val), ENETC_MAX_QUEUES); 265 *max_rx_nqueues = MIN(ENETC_PCAPR0_RXBDR(val), ENETC_MAX_QUEUES); 266 } 267 268 static int 269 enetc_setup_fixed(struct enetc_softc *sc, phandle_t node) 270 { 271 ssize_t size; 272 int speed; 273 274 size = OF_getencprop(node, "speed", &speed, sizeof(speed)); 275 if (size <= 0) { 276 device_printf(sc->dev, 277 "Device has fixed-link node without link speed specified\n"); 278 return (ENXIO); 279 } 280 switch (speed) { 281 case 10: 282 speed = IFM_10_T; 283 break; 284 case 100: 285 speed = IFM_100_TX; 286 break; 287 case 1000: 288 speed = IFM_1000_T; 289 break; 290 case 2500: 291 speed = IFM_2500_T; 292 break; 293 default: 294 device_printf(sc->dev, "Unsupported link speed value of %d\n", 295 speed); 296 return (ENXIO); 297 } 298 speed |= IFM_ETHER; 299 300 if (OF_hasprop(node, "full-duplex")) 301 speed |= IFM_FDX; 302 else 303 speed |= IFM_HDX; 304 305 sc->fixed_link = true; 306 307 ifmedia_init(&sc->fixed_ifmedia, 0, enetc_fixed_media_change, 308 enetc_fixed_media_status); 309 ifmedia_add(&sc->fixed_ifmedia, speed, 0, NULL); 310 ifmedia_set(&sc->fixed_ifmedia, speed); 311 sc->shared->isc_media = &sc->fixed_ifmedia; 312 313 return (0); 314 } 315 316 static int 317 enetc_setup_phy(struct enetc_softc *sc) 318 { 319 phandle_t node, fixed_link, phy_handle; 320 struct mii_data *miid; 321 int phy_addr, error; 322 ssize_t size; 323 324 node = ofw_bus_get_node(sc->dev); 325 fixed_link = ofw_bus_find_child(node, "fixed-link"); 326 if (fixed_link != 0) 327 return (enetc_setup_fixed(sc, fixed_link)); 328 329 size = OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle)); 330 if (size <= 0) { 331 device_printf(sc->dev, 332 "Failed to acquire PHY handle from FDT.\n"); 333 return (ENXIO); 334 } 335 phy_handle = OF_node_from_xref(phy_handle); 336 size = OF_getencprop(phy_handle, "reg", &phy_addr, sizeof(phy_addr)); 337 if (size <= 0) { 338 device_printf(sc->dev, "Failed to obtain PHY address\n"); 339 return (ENXIO); 340 } 341 error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(sc->ctx), 342 enetc_media_change, enetc_media_status, 343 BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 344 if (error != 0) { 345 device_printf(sc->dev, "mii_attach failed\n"); 346 return (error); 347 } 348 miid = device_get_softc(sc->miibus); 349 sc->shared->isc_media = &miid->mii_media; 350 351 return (0); 352 } 353 354 static int 355 enetc_attach_pre(if_ctx_t ctx) 356 { 357 struct ifnet *ifp; 358 if_softc_ctx_t scctx; 359 struct enetc_softc *sc; 360 int error, rid; 361 362 sc = iflib_get_softc(ctx); 363 scctx = iflib_get_softc_ctx(ctx); 364 sc->ctx = ctx; 365 sc->dev = iflib_get_dev(ctx); 366 sc->shared = scctx; 367 ifp = iflib_get_ifp(ctx); 368 369 pci_save_state(sc->dev); 370 pcie_flr(sc->dev, 1000, false); 371 pci_restore_state(sc->dev); 372 373 rid = PCIR_BAR(ENETC_BAR_REGS); 374 sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 375 if (sc->regs == NULL) { 376 device_printf(sc->dev, 377 "Failed to allocate BAR %d\n", ENETC_BAR_REGS); 378 return (ENXIO); 379 } 380 381 error = iflib_dma_alloc_align(ctx, 382 ENETC_MIN_DESC * sizeof(struct enetc_cbd), 383 ENETC_RING_ALIGN, 384 &sc->ctrl_queue.dma, 385 0); 386 if (error != 0) { 387 device_printf(sc->dev, "Failed to allocate control ring\n"); 388 goto fail; 389 } 390 sc->ctrl_queue.ring = (struct enetc_cbd*)sc->ctrl_queue.dma.idi_vaddr; 391 392 scctx->isc_txrx = &enetc_txrx; 393 scctx->isc_tx_nsegments = ENETC_MAX_SCATTER; 394 enetc_max_nqueues(sc, &scctx->isc_nrxqsets_max, &scctx->isc_ntxqsets_max); 395 396 if (scctx->isc_ntxd[0] % ENETC_DESC_ALIGN != 0) { 397 device_printf(sc->dev, 398 "The number of TX descriptors has to be a multiple of %d\n", 399 ENETC_DESC_ALIGN); 400 error = EINVAL; 401 goto fail; 402 } 403 if (scctx->isc_nrxd[0] % ENETC_DESC_ALIGN != 0) { 404 device_printf(sc->dev, 405 "The number of RX descriptors has to be a multiple of %d\n", 406 ENETC_DESC_ALIGN); 407 error = EINVAL; 408 goto fail; 409 } 410 scctx->isc_txqsizes[0] = scctx->isc_ntxd[0] * sizeof(union enetc_tx_bd); 411 scctx->isc_rxqsizes[0] = scctx->isc_nrxd[0] * sizeof(union enetc_rx_bd); 412 scctx->isc_txd_size[0] = sizeof(union enetc_tx_bd); 413 scctx->isc_rxd_size[0] = sizeof(union enetc_rx_bd); 414 scctx->isc_tx_csum_flags = 0; 415 scctx->isc_capabilities = scctx->isc_capenable = ENETC_IFCAPS; 416 417 error = enetc_mtu_set(ctx, ETHERMTU); 418 if (error != 0) 419 goto fail; 420 421 scctx->isc_msix_bar = pci_msix_table_bar(sc->dev); 422 423 error = enetc_setup_phy(sc); 424 if (error != 0) 425 goto fail; 426 427 enetc_get_hwaddr(sc); 428 429 return (0); 430 fail: 431 enetc_detach(ctx); 432 return (error); 433 } 434 435 static int 436 enetc_attach_post(if_ctx_t ctx) 437 { 438 439 enetc_init_hw(iflib_get_softc(ctx)); 440 return (0); 441 } 442 443 static int 444 enetc_detach(if_ctx_t ctx) 445 { 446 struct enetc_softc *sc; 447 int error = 0, i; 448 449 sc = iflib_get_softc(ctx); 450 451 for (i = 0; i < sc->rx_num_queues; i++) 452 iflib_irq_free(ctx, &sc->rx_queues[i].irq); 453 454 if (sc->miibus != NULL) 455 device_delete_child(sc->dev, sc->miibus); 456 457 if (sc->regs != NULL) 458 error = bus_release_resource(sc->dev, SYS_RES_MEMORY, 459 rman_get_rid(sc->regs), sc->regs); 460 461 if (sc->ctrl_queue.dma.idi_size != 0) 462 iflib_dma_free(&sc->ctrl_queue.dma); 463 464 return (error); 465 } 466 467 static int 468 enetc_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 469 int ntxqs, int ntxqsets) 470 { 471 struct enetc_softc *sc; 472 struct enetc_tx_queue *queue; 473 int i; 474 475 sc = iflib_get_softc(ctx); 476 477 MPASS(ntxqs == 1); 478 479 sc->tx_queues = mallocarray(sc->tx_num_queues, 480 sizeof(struct enetc_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO); 481 if (sc->tx_queues == NULL) { 482 device_printf(sc->dev, 483 "Failed to allocate memory for TX queues.\n"); 484 return (ENOMEM); 485 } 486 487 for (i = 0; i < sc->tx_num_queues; i++) { 488 queue = &sc->tx_queues[i]; 489 queue->sc = sc; 490 queue->ring = (union enetc_tx_bd*)(vaddrs[i]); 491 queue->ring_paddr = paddrs[i]; 492 queue->next_to_clean = 0; 493 queue->ring_full = false; 494 } 495 496 return (0); 497 } 498 499 static int 500 enetc_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 501 int nrxqs, int nrxqsets) 502 { 503 struct enetc_softc *sc; 504 struct enetc_rx_queue *queue; 505 int i; 506 507 sc = iflib_get_softc(ctx); 508 MPASS(nrxqs == 1); 509 510 sc->rx_queues = mallocarray(sc->rx_num_queues, 511 sizeof(struct enetc_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO); 512 if (sc->rx_queues == NULL) { 513 device_printf(sc->dev, 514 "Failed to allocate memory for RX queues.\n"); 515 return (ENOMEM); 516 } 517 518 for (i = 0; i < sc->rx_num_queues; i++) { 519 queue = &sc->rx_queues[i]; 520 queue->sc = sc; 521 queue->qid = i; 522 queue->ring = (union enetc_rx_bd*)(vaddrs[i]); 523 queue->ring_paddr = paddrs[i]; 524 } 525 526 return (0); 527 } 528 529 static void 530 enetc_queues_free(if_ctx_t ctx) 531 { 532 struct enetc_softc *sc; 533 534 sc = iflib_get_softc(ctx); 535 536 if (sc->tx_queues != NULL) { 537 free(sc->tx_queues, M_DEVBUF); 538 sc->tx_queues = NULL; 539 } 540 if (sc->rx_queues != NULL) { 541 free(sc->rx_queues, M_DEVBUF); 542 sc->rx_queues = NULL; 543 } 544 } 545 546 static void 547 enetc_get_hwaddr(struct enetc_softc *sc) 548 { 549 struct ether_addr hwaddr; 550 uint16_t high; 551 uint32_t low; 552 553 low = ENETC_PORT_RD4(sc, ENETC_PSIPMAR0(0)); 554 high = ENETC_PORT_RD2(sc, ENETC_PSIPMAR1(0)); 555 556 memcpy(&hwaddr.octet[0], &low, 4); 557 memcpy(&hwaddr.octet[4], &high, 2); 558 559 if (ETHER_IS_BROADCAST(hwaddr.octet) || 560 ETHER_IS_MULTICAST(hwaddr.octet) || 561 ETHER_IS_ZERO(hwaddr.octet)) { 562 ether_gen_addr(iflib_get_ifp(sc->ctx), &hwaddr); 563 device_printf(sc->dev, 564 "Failed to obtain MAC address, using a random one\n"); 565 memcpy(&low, &hwaddr.octet[0], 4); 566 memcpy(&high, &hwaddr.octet[4], 2); 567 } 568 569 iflib_set_mac(sc->ctx, hwaddr.octet); 570 } 571 572 static void 573 enetc_set_hwaddr(struct enetc_softc *sc) 574 { 575 struct ifnet *ifp; 576 uint16_t high; 577 uint32_t low; 578 uint8_t *hwaddr; 579 580 ifp = iflib_get_ifp(sc->ctx); 581 hwaddr = (uint8_t*)if_getlladdr(ifp); 582 low = *((uint32_t*)hwaddr); 583 high = *((uint16_t*)(hwaddr+4)); 584 585 ENETC_PORT_WR4(sc, ENETC_PSIPMAR0(0), low); 586 ENETC_PORT_WR2(sc, ENETC_PSIPMAR1(0), high); 587 } 588 589 static int 590 enetc_setup_rss(struct enetc_softc *sc) 591 { 592 struct iflib_dma_info dma; 593 int error, i, buckets_num = 0; 594 uint8_t *rss_table; 595 uint32_t reg; 596 597 reg = ENETC_RD4(sc, ENETC_SIPCAPR0); 598 if (reg & ENETC_SIPCAPR0_RSS) { 599 reg = ENETC_RD4(sc, ENETC_SIRSSCAPR); 600 buckets_num = ENETC_SIRSSCAPR_GET_NUM_RSS(reg); 601 } 602 if (buckets_num == 0) 603 return (ENOTSUP); 604 605 for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / sizeof(uint32_t); i++) { 606 arc4rand((uint8_t *)®, sizeof(reg), 0); 607 ENETC_PORT_WR4(sc, ENETC_PRSSK(i), reg); 608 } 609 610 ENETC_WR4(sc, ENETC_SIRBGCR, sc->rx_num_queues); 611 612 error = iflib_dma_alloc_align(sc->ctx, 613 buckets_num * sizeof(*rss_table), 614 ENETC_RING_ALIGN, 615 &dma, 616 0); 617 if (error != 0) { 618 device_printf(sc->dev, "Failed to allocate DMA buffer for RSS\n"); 619 return (error); 620 } 621 rss_table = (uint8_t *)dma.idi_vaddr; 622 623 for (i = 0; i < buckets_num; i++) 624 rss_table[i] = i % sc->rx_num_queues; 625 626 error = enetc_ctrl_send(sc, (BDCR_CMD_RSS << 8) | BDCR_CMD_RSS_WRITE, 627 buckets_num * sizeof(*rss_table), &dma); 628 if (error != 0) 629 device_printf(sc->dev, "Failed to setup RSS table\n"); 630 631 iflib_dma_free(&dma); 632 633 return (error); 634 } 635 636 static int 637 enetc_ctrl_send(struct enetc_softc *sc, uint16_t cmd, uint16_t size, 638 iflib_dma_info_t dma) 639 { 640 struct enetc_ctrl_queue *queue; 641 struct enetc_cbd *desc; 642 int timeout = 1000; 643 644 queue = &sc->ctrl_queue; 645 desc = &queue->ring[queue->pidx]; 646 647 if (++queue->pidx == ENETC_MIN_DESC) 648 queue->pidx = 0; 649 650 desc->addr[0] = (uint32_t)dma->idi_paddr; 651 desc->addr[1] = (uint32_t)(dma->idi_paddr >> 32); 652 desc->index = 0; 653 desc->length = (uint16_t)size; 654 desc->cmd = (uint8_t)cmd; 655 desc->cls = (uint8_t)(cmd >> 8); 656 desc->status_flags = 0; 657 658 /* Sync command packet, */ 659 bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_PREWRITE); 660 /* and the control ring. */ 661 bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE); 662 ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx); 663 664 while (--timeout != 0) { 665 DELAY(20); 666 if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx) 667 break; 668 } 669 670 if (timeout == 0) 671 return (ETIMEDOUT); 672 673 bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD); 674 return (0); 675 } 676 677 static void 678 enetc_init_hw(struct enetc_softc *sc) 679 { 680 uint32_t val; 681 int error; 682 683 ENETC_PORT_WR4(sc, ENETC_PM0_CMD_CFG, 684 ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC | 685 ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); 686 ENETC_PORT_WR4(sc, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL); 687 val = ENETC_PSICFGR0_SET_TXBDR(sc->tx_num_queues); 688 val |= ENETC_PSICFGR0_SET_RXBDR(sc->rx_num_queues); 689 val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); 690 ENETC_PORT_WR4(sc, ENETC_PSICFGR0(0), val); 691 ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1)); 692 ENETC_PORT_WR4(sc, ENETC_PVCLCTR, ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); 693 ENETC_PORT_WR4(sc, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS); 694 ENETC_PORT_WR4(sc, ENETC_PAR_PORT_CFG, ENETC_PAR_PORT_L4CD); 695 ENETC_PORT_WR4(sc, ENETC_PMR, ENETC_PMR_SI0EN | ENETC_PMR_PSPEED_1000M); 696 697 ENETC_WR4(sc, ENETC_SICAR0, 698 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 699 ENETC_WR4(sc, ENETC_SICAR1, ENETC_SICAR_MSI); 700 ENETC_WR4(sc, ENETC_SICAR2, 701 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 702 703 enetc_init_ctrl(sc); 704 error = enetc_setup_rss(sc); 705 if (error != 0) 706 ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN); 707 else 708 ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN | ENETC_SIMR_RSSE); 709 710 } 711 712 static void 713 enetc_init_ctrl(struct enetc_softc *sc) 714 { 715 struct enetc_ctrl_queue *queue = &sc->ctrl_queue; 716 717 ENETC_WR4(sc, ENETC_SICBDRBAR0, 718 (uint32_t)queue->dma.idi_paddr); 719 ENETC_WR4(sc, ENETC_SICBDRBAR1, 720 (uint32_t)(queue->dma.idi_paddr >> 32)); 721 ENETC_WR4(sc, ENETC_SICBDRLENR, 722 queue->dma.idi_size / sizeof(struct enetc_cbd)); 723 724 queue->pidx = 0; 725 ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx); 726 ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx); 727 ENETC_WR4(sc, ENETC_SICBDRMR, ENETC_SICBDRMR_EN); 728 } 729 730 static void 731 enetc_init_tx(struct enetc_softc *sc) 732 { 733 struct enetc_tx_queue *queue; 734 int i; 735 736 for (i = 0; i < sc->tx_num_queues; i++) { 737 queue = &sc->tx_queues[i]; 738 739 ENETC_TXQ_WR4(sc, i, ENETC_TBBAR0, 740 (uint32_t)queue->ring_paddr); 741 ENETC_TXQ_WR4(sc, i, ENETC_TBBAR1, 742 (uint32_t)(queue->ring_paddr >> 32)); 743 ENETC_TXQ_WR4(sc, i, ENETC_TBLENR, sc->tx_queue_size); 744 745 /* 746 * Even though it is undoccumented resetting the TX ring 747 * indices results in TX hang. 748 * Do the same as Linux and simply keep those unchanged 749 * for the drivers lifetime. 750 */ 751 #if 0 752 ENETC_TXQ_WR4(sc, i, ENETC_TBPIR, 0); 753 ENETC_TXQ_WR4(sc, i, ENETC_TBCIR, 0); 754 #endif 755 ENETC_TXQ_WR4(sc, i, ENETC_TBMR, ENETC_TBMR_EN); 756 } 757 758 } 759 760 static void 761 enetc_init_rx(struct enetc_softc *sc) 762 { 763 struct enetc_rx_queue *queue; 764 uint32_t rx_buf_size; 765 int i; 766 767 rx_buf_size = iflib_get_rx_mbuf_sz(sc->ctx); 768 769 for (i = 0; i < sc->rx_num_queues; i++) { 770 queue = &sc->rx_queues[i]; 771 772 ENETC_RXQ_WR4(sc, i, ENETC_RBBAR0, 773 (uint32_t)queue->ring_paddr); 774 ENETC_RXQ_WR4(sc, i, ENETC_RBBAR1, 775 (uint32_t)(queue->ring_paddr >> 32)); 776 ENETC_RXQ_WR4(sc, i, ENETC_RBLENR, sc->rx_queue_size); 777 ENETC_RXQ_WR4(sc, i, ENETC_RBBSR, rx_buf_size); 778 ENETC_RXQ_WR4(sc, i, ENETC_RBPIR, 0); 779 ENETC_RXQ_WR4(sc, i, ENETC_RBCIR, 0); 780 queue->enabled = false; 781 } 782 } 783 784 static u_int 785 enetc_hash_mac(void *arg, struct sockaddr_dl *sdl, u_int cnt) 786 { 787 uint64_t *bitmap = arg; 788 uint64_t address = 0; 789 uint8_t hash = 0; 790 bool bit; 791 int i, j; 792 793 bcopy(LLADDR(sdl), &address, ETHER_ADDR_LEN); 794 795 /* 796 * The six bit hash is calculated by xoring every 797 * 6th bit of the address. 798 * It is then used as an index in a bitmap that is 799 * written to the device. 800 */ 801 for (i = 0; i < 6; i++) { 802 bit = 0; 803 for (j = 0; j < 8; j++) 804 bit ^= address & BIT(i + j*6); 805 806 hash |= bit << i; 807 } 808 809 *bitmap |= (1 << hash); 810 return (1); 811 } 812 813 static void 814 enetc_setup_multicast(if_ctx_t ctx) 815 { 816 struct enetc_softc *sc; 817 struct ifnet *ifp; 818 uint64_t bitmap = 0; 819 uint8_t revid; 820 821 sc = iflib_get_softc(ctx); 822 ifp = iflib_get_ifp(ctx); 823 revid = pci_get_revid(sc->dev); 824 825 if_foreach_llmaddr(ifp, enetc_hash_mac, &bitmap); 826 827 /* 828 * In revid 1 of this chip the positions multicast and unicast 829 * hash filter registers are flipped. 830 */ 831 ENETC_PORT_WR4(sc, ENETC_PSIMMHFR0(0, revid == 1), bitmap & UINT32_MAX); 832 ENETC_PORT_WR4(sc, ENETC_PSIMMHFR1(0), bitmap >> 32); 833 834 } 835 836 static uint8_t 837 enetc_hash_vid(uint16_t vid) 838 { 839 uint8_t hash = 0; 840 bool bit; 841 int i; 842 843 for (i = 0;i < 6;i++) { 844 bit = vid & BIT(i); 845 bit ^= vid & BIT(i + 6); 846 hash |= bit << i; 847 } 848 849 return (hash); 850 } 851 852 static void 853 enetc_vlan_register(if_ctx_t ctx, uint16_t vid) 854 { 855 struct enetc_softc *sc; 856 uint8_t hash; 857 uint64_t bitmap; 858 859 sc = iflib_get_softc(ctx); 860 hash = enetc_hash_vid(vid); 861 862 /* Check if hash is alredy present in the bitmap. */ 863 if (++sc->vlan_bitmap[hash] != 1) 864 return; 865 866 bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0)); 867 bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32; 868 bitmap |= BIT(hash); 869 ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX); 870 ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32); 871 } 872 873 static void 874 enetc_vlan_unregister(if_ctx_t ctx, uint16_t vid) 875 { 876 struct enetc_softc *sc; 877 uint8_t hash; 878 uint64_t bitmap; 879 880 sc = iflib_get_softc(ctx); 881 hash = enetc_hash_vid(vid); 882 883 MPASS(sc->vlan_bitmap[hash] > 0); 884 if (--sc->vlan_bitmap[hash] != 0) 885 return; 886 887 bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0)); 888 bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32; 889 bitmap &= ~BIT(hash); 890 ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX); 891 ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32); 892 } 893 894 static void 895 enetc_init(if_ctx_t ctx) 896 { 897 struct enetc_softc *sc; 898 struct mii_data *miid; 899 struct ifnet *ifp; 900 uint16_t max_frame_length; 901 int baudrate; 902 903 sc = iflib_get_softc(ctx); 904 ifp = iflib_get_ifp(ctx); 905 906 max_frame_length = sc->shared->isc_max_frame_size; 907 MPASS(max_frame_length < ENETC_MAX_FRAME_LEN); 908 909 /* Set max RX and TX frame lengths. */ 910 ENETC_PORT_WR4(sc, ENETC_PM0_MAXFRM, max_frame_length); 911 ENETC_PORT_WR4(sc, ENETC_PTCMSDUR(0), max_frame_length); 912 ENETC_PORT_WR4(sc, ENETC_PTXMBAR, 2 * max_frame_length); 913 914 /* Set "VLAN promiscious" mode if filtering is disabled. */ 915 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) 916 ENETC_PORT_WR4(sc, ENETC_PSIPVMR, 917 ENETC_PSIPVMR_SET_VUTA(1) | ENETC_PSIPVMR_SET_VP(1)); 918 else 919 ENETC_PORT_WR4(sc, ENETC_PSIPVMR, 920 ENETC_PSIPVMR_SET_VUTA(1)); 921 922 sc->rbmr = ENETC_RBMR_EN | ENETC_RBMR_AL; 923 924 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) 925 sc->rbmr |= ENETC_RBMR_VTE; 926 927 /* Write MAC address to hardware. */ 928 enetc_set_hwaddr(sc); 929 930 enetc_init_tx(sc); 931 enetc_init_rx(sc); 932 933 if (sc->fixed_link) { 934 baudrate = ifmedia_baudrate(sc->fixed_ifmedia.ifm_cur->ifm_media); 935 iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate); 936 } else { 937 /* 938 * Can't return an error from this function, there is not much 939 * we can do if this fails. 940 */ 941 miid = device_get_softc(sc->miibus); 942 (void)mii_mediachg(miid); 943 } 944 945 enetc_promisc_set(ctx, if_getflags(ifp)); 946 } 947 948 static void 949 enetc_stop(if_ctx_t ctx) 950 { 951 struct enetc_softc *sc; 952 int i; 953 954 sc = iflib_get_softc(ctx); 955 956 for (i = 0; i < sc->tx_num_queues; i++) 957 ENETC_TXQ_WR4(sc, i, ENETC_TBMR, 0); 958 959 for (i = 0; i < sc->rx_num_queues; i++) 960 ENETC_RXQ_WR4(sc, i, ENETC_RBMR, 0); 961 } 962 963 static int 964 enetc_msix_intr_assign(if_ctx_t ctx, int msix) 965 { 966 struct enetc_softc *sc; 967 struct enetc_rx_queue *rx_queue; 968 struct enetc_tx_queue *tx_queue; 969 int vector = 0, i, error; 970 char irq_name[16]; 971 972 sc = iflib_get_softc(ctx); 973 974 MPASS(sc->rx_num_queues + 1 <= ENETC_MSIX_COUNT); 975 MPASS(sc->rx_num_queues == sc->tx_num_queues); 976 977 for (i = 0; i < sc->rx_num_queues; i++, vector++) { 978 rx_queue = &sc->rx_queues[i]; 979 snprintf(irq_name, sizeof(irq_name), "rxtxq%d", i); 980 error = iflib_irq_alloc_generic(ctx, 981 &rx_queue->irq, vector + 1, IFLIB_INTR_RXTX, 982 NULL, rx_queue, i, irq_name); 983 if (error != 0) 984 goto fail; 985 986 ENETC_WR4(sc, ENETC_SIMSIRRV(i), vector); 987 ENETC_RXQ_WR4(sc, i, ENETC_RBICR1, ENETC_RX_INTR_TIME_THR); 988 ENETC_RXQ_WR4(sc, i, ENETC_RBICR0, 989 ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR)); 990 } 991 vector = 0; 992 for (i = 0;i < sc->tx_num_queues; i++, vector++) { 993 tx_queue = &sc->tx_queues[i]; 994 snprintf(irq_name, sizeof(irq_name), "txq%d", i); 995 iflib_softirq_alloc_generic(ctx, &tx_queue->irq, 996 IFLIB_INTR_TX, tx_queue, i, irq_name); 997 998 ENETC_WR4(sc, ENETC_SIMSITRV(i), vector); 999 } 1000 1001 return (0); 1002 fail: 1003 for (i = 0; i < sc->rx_num_queues; i++) { 1004 rx_queue = &sc->rx_queues[i]; 1005 iflib_irq_free(ctx, &rx_queue->irq); 1006 } 1007 return (error); 1008 } 1009 1010 static int 1011 enetc_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) 1012 { 1013 struct enetc_softc *sc; 1014 1015 sc = iflib_get_softc(ctx); 1016 ENETC_TXQ_RD4(sc, qid, ENETC_TBIDR); 1017 return (0); 1018 } 1019 1020 static int 1021 enetc_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) 1022 { 1023 struct enetc_softc *sc; 1024 1025 sc = iflib_get_softc(ctx); 1026 ENETC_RXQ_RD4(sc, qid, ENETC_RBIDR); 1027 return (0); 1028 } 1029 static void 1030 enetc_intr_enable(if_ctx_t ctx) 1031 { 1032 struct enetc_softc *sc; 1033 int i; 1034 1035 sc = iflib_get_softc(ctx); 1036 1037 for (i = 0; i < sc->rx_num_queues; i++) 1038 ENETC_RXQ_WR4(sc, i, ENETC_RBIER, ENETC_RBIER_RXTIE); 1039 1040 for (i = 0; i < sc->tx_num_queues; i++) 1041 ENETC_TXQ_WR4(sc, i, ENETC_TBIER, ENETC_TBIER_TXF); 1042 } 1043 1044 static void 1045 enetc_intr_disable(if_ctx_t ctx) 1046 { 1047 struct enetc_softc *sc; 1048 int i; 1049 1050 sc = iflib_get_softc(ctx); 1051 1052 for (i = 0; i < sc->rx_num_queues; i++) 1053 ENETC_RXQ_WR4(sc, i, ENETC_RBIER, 0); 1054 1055 for (i = 0; i < sc->tx_num_queues; i++) 1056 ENETC_TXQ_WR4(sc, i, ENETC_TBIER, 0); 1057 } 1058 1059 static int 1060 enetc_isc_txd_encap(void *data, if_pkt_info_t ipi) 1061 { 1062 struct enetc_softc *sc = data; 1063 struct enetc_tx_queue *queue; 1064 union enetc_tx_bd *desc; 1065 bus_dma_segment_t *segs; 1066 qidx_t pidx, queue_len; 1067 qidx_t i = 0; 1068 1069 queue = &sc->tx_queues[ipi->ipi_qsidx]; 1070 segs = ipi->ipi_segs; 1071 pidx = ipi->ipi_pidx; 1072 queue_len = sc->tx_queue_size; 1073 1074 /* 1075 * First descriptor is special. We use it to set frame 1076 * related information and offloads, e.g. VLAN tag. 1077 */ 1078 desc = &queue->ring[pidx]; 1079 bzero(desc, sizeof(*desc)); 1080 desc->frm_len = ipi->ipi_len; 1081 desc->addr = segs[i].ds_addr; 1082 desc->buf_len = segs[i].ds_len; 1083 if (ipi->ipi_flags & IPI_TX_INTR) 1084 desc->flags = ENETC_TXBD_FLAGS_FI; 1085 1086 i++; 1087 if (++pidx == queue_len) 1088 pidx = 0; 1089 1090 if (ipi->ipi_mflags & M_VLANTAG) { 1091 /* VLAN tag is inserted in a separate descriptor. */ 1092 desc->flags |= ENETC_TXBD_FLAGS_EX; 1093 desc = &queue->ring[pidx]; 1094 bzero(desc, sizeof(*desc)); 1095 desc->ext.vid = ipi->ipi_vtag; 1096 desc->ext.e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS; 1097 if (++pidx == queue_len) 1098 pidx = 0; 1099 } 1100 1101 /* Now add remaining descriptors. */ 1102 for (;i < ipi->ipi_nsegs; i++) { 1103 desc = &queue->ring[pidx]; 1104 bzero(desc, sizeof(*desc)); 1105 desc->addr = segs[i].ds_addr; 1106 desc->buf_len = segs[i].ds_len; 1107 1108 if (++pidx == queue_len) 1109 pidx = 0; 1110 } 1111 1112 desc->flags |= ENETC_TXBD_FLAGS_F; 1113 ipi->ipi_new_pidx = pidx; 1114 if (pidx == queue->next_to_clean) 1115 queue->ring_full = true; 1116 1117 return (0); 1118 } 1119 1120 static void 1121 enetc_isc_txd_flush(void *data, uint16_t qid, qidx_t pidx) 1122 { 1123 struct enetc_softc *sc = data; 1124 1125 ENETC_TXQ_WR4(sc, qid, ENETC_TBPIR, pidx); 1126 } 1127 1128 static int 1129 enetc_isc_txd_credits_update(void *data, uint16_t qid, bool clear) 1130 { 1131 struct enetc_softc *sc = data; 1132 struct enetc_tx_queue *queue; 1133 qidx_t next_to_clean, next_to_process; 1134 int clean_count; 1135 1136 queue = &sc->tx_queues[qid]; 1137 next_to_process = 1138 ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR) & ENETC_TBCIR_IDX_MASK; 1139 next_to_clean = queue->next_to_clean; 1140 1141 if (next_to_clean == next_to_process && !queue->ring_full) 1142 return (0); 1143 1144 if (!clear) 1145 return (1); 1146 1147 clean_count = next_to_process - next_to_clean; 1148 if (clean_count <= 0) 1149 clean_count += sc->tx_queue_size; 1150 1151 queue->next_to_clean = next_to_process; 1152 queue->ring_full = false; 1153 1154 return (clean_count); 1155 } 1156 1157 static int 1158 enetc_isc_rxd_available(void *data, uint16_t qid, qidx_t pidx, qidx_t budget) 1159 { 1160 struct enetc_softc *sc = data; 1161 struct enetc_rx_queue *queue; 1162 qidx_t hw_pidx, queue_len; 1163 union enetc_rx_bd *desc; 1164 int count = 0; 1165 1166 queue = &sc->rx_queues[qid]; 1167 desc = &queue->ring[pidx]; 1168 queue_len = sc->rx_queue_size; 1169 1170 if (desc->r.lstatus == 0) 1171 return (0); 1172 1173 if (budget == 1) 1174 return (1); 1175 1176 hw_pidx = ENETC_RXQ_RD4(sc, qid, ENETC_RBPIR); 1177 while (pidx != hw_pidx && count < budget) { 1178 desc = &queue->ring[pidx]; 1179 if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F) 1180 count++; 1181 1182 if (++pidx == queue_len) 1183 pidx = 0; 1184 } 1185 1186 return (count); 1187 } 1188 1189 static int 1190 enetc_isc_rxd_pkt_get(void *data, if_rxd_info_t ri) 1191 { 1192 struct enetc_softc *sc = data; 1193 struct enetc_rx_queue *queue; 1194 union enetc_rx_bd *desc; 1195 uint16_t buf_len, pkt_size = 0; 1196 qidx_t cidx, queue_len; 1197 uint32_t status; 1198 int i; 1199 1200 cidx = ri->iri_cidx; 1201 queue = &sc->rx_queues[ri->iri_qsidx]; 1202 desc = &queue->ring[cidx]; 1203 status = desc->r.lstatus; 1204 queue_len = sc->rx_queue_size; 1205 1206 /* 1207 * Ready bit will be set only when all descriptors 1208 * in the chain have been processed. 1209 */ 1210 if ((status & ENETC_RXBD_LSTATUS_R) == 0) 1211 return (EAGAIN); 1212 1213 /* Pass RSS hash. */ 1214 if (status & ENETC_RXBD_FLAG_RSSV) { 1215 ri->iri_flowid = desc->r.rss_hash; 1216 ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH; 1217 } 1218 1219 /* Pass IP checksum status. */ 1220 ri->iri_csum_flags = CSUM_IP_CHECKED; 1221 if ((desc->r.parse_summary & ENETC_RXBD_PARSER_ERROR) == 0) 1222 ri->iri_csum_flags |= CSUM_IP_VALID; 1223 1224 /* Pass extracted VLAN tag. */ 1225 if (status & ENETC_RXBD_FLAG_VLAN) { 1226 ri->iri_vtag = desc->r.vlan_opt; 1227 ri->iri_flags = M_VLANTAG; 1228 } 1229 1230 for (i = 0; i < ENETC_MAX_SCATTER; i++) { 1231 buf_len = desc->r.buf_len; 1232 ri->iri_frags[i].irf_idx = cidx; 1233 ri->iri_frags[i].irf_len = buf_len; 1234 pkt_size += buf_len; 1235 if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F) 1236 break; 1237 1238 if (++cidx == queue_len) 1239 cidx = 0; 1240 1241 desc = &queue->ring[cidx]; 1242 } 1243 ri->iri_nfrags = i + 1; 1244 ri->iri_len = pkt_size + ENETC_RX_IP_ALIGN; 1245 ri->iri_pad = ENETC_RX_IP_ALIGN; 1246 1247 MPASS(desc->r.lstatus & ENETC_RXBD_LSTATUS_F); 1248 if (status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)) 1249 return (EBADMSG); 1250 1251 return (0); 1252 } 1253 1254 static void 1255 enetc_isc_rxd_refill(void *data, if_rxd_update_t iru) 1256 { 1257 struct enetc_softc *sc = data; 1258 struct enetc_rx_queue *queue; 1259 union enetc_rx_bd *desc; 1260 qidx_t pidx, queue_len; 1261 uint64_t *paddrs; 1262 int i, count; 1263 1264 queue = &sc->rx_queues[iru->iru_qsidx]; 1265 paddrs = iru->iru_paddrs; 1266 pidx = iru->iru_pidx; 1267 count = iru->iru_count; 1268 queue_len = sc->rx_queue_size; 1269 1270 for (i = 0; i < count; i++) { 1271 desc = &queue->ring[pidx]; 1272 bzero(desc, sizeof(*desc)); 1273 1274 desc->w.addr = paddrs[i]; 1275 if (++pidx == queue_len) 1276 pidx = 0; 1277 } 1278 /* 1279 * After enabling the queue NIC will prefetch the first 1280 * 8 descriptors. It probably assumes that the RX is fully 1281 * refilled when cidx == pidx. 1282 * Enable it only if we have enough decriptors ready on the ring. 1283 */ 1284 if (!queue->enabled && pidx >= 8) { 1285 ENETC_RXQ_WR4(sc, iru->iru_qsidx, ENETC_RBMR, sc->rbmr); 1286 queue->enabled = true; 1287 } 1288 } 1289 1290 static void 1291 enetc_isc_rxd_flush(void *data, uint16_t qid, uint8_t flid, qidx_t pidx) 1292 { 1293 struct enetc_softc *sc = data; 1294 1295 ENETC_RXQ_WR4(sc, qid, ENETC_RBCIR, pidx); 1296 } 1297 1298 static uint64_t 1299 enetc_get_counter(if_ctx_t ctx, ift_counter cnt) 1300 { 1301 struct enetc_softc *sc; 1302 struct ifnet *ifp; 1303 1304 sc = iflib_get_softc(ctx); 1305 ifp = iflib_get_ifp(ctx); 1306 1307 switch (cnt) { 1308 case IFCOUNTER_IERRORS: 1309 return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR)); 1310 case IFCOUNTER_OERRORS: 1311 return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR)); 1312 default: 1313 return (if_get_counter_default(ifp, cnt)); 1314 } 1315 } 1316 1317 static int 1318 enetc_mtu_set(if_ctx_t ctx, uint32_t mtu) 1319 { 1320 struct enetc_softc *sc = iflib_get_softc(ctx); 1321 uint32_t max_frame_size; 1322 1323 max_frame_size = mtu + 1324 ETHER_HDR_LEN + 1325 ETHER_CRC_LEN + 1326 sizeof(struct ether_vlan_header); 1327 1328 if (max_frame_size > ENETC_MAX_FRAME_LEN) 1329 return (EINVAL); 1330 1331 sc->shared->isc_max_frame_size = max_frame_size; 1332 1333 return (0); 1334 } 1335 1336 static int 1337 enetc_promisc_set(if_ctx_t ctx, int flags) 1338 { 1339 struct enetc_softc *sc; 1340 uint32_t reg = 0; 1341 1342 sc = iflib_get_softc(ctx); 1343 1344 if (flags & IFF_PROMISC) 1345 reg = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); 1346 else if (flags & IFF_ALLMULTI) 1347 reg = ENETC_PSIPMR_SET_MP(0); 1348 1349 ENETC_PORT_WR4(sc, ENETC_PSIPMR, reg); 1350 1351 return (0); 1352 } 1353 1354 static void 1355 enetc_timer(if_ctx_t ctx, uint16_t qid) 1356 { 1357 /* 1358 * Poll PHY status. Do this only for qid 0 to save 1359 * some cycles. 1360 */ 1361 if (qid == 0) 1362 iflib_admin_intr_deferred(ctx); 1363 } 1364 1365 static void 1366 enetc_update_admin_status(if_ctx_t ctx) 1367 { 1368 struct enetc_softc *sc; 1369 struct mii_data *miid; 1370 1371 sc = iflib_get_softc(ctx); 1372 1373 if (!sc->fixed_link) { 1374 miid = device_get_softc(sc->miibus); 1375 mii_tick(miid); 1376 } 1377 } 1378 1379 static int 1380 enetc_miibus_readreg(device_t dev, int phy, int reg) 1381 { 1382 struct enetc_softc *sc; 1383 1384 sc = iflib_get_softc(device_get_softc(dev)); 1385 return (enetc_mdio_read(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE, 1386 phy, reg)); 1387 } 1388 1389 static int 1390 enetc_miibus_writereg(device_t dev, int phy, int reg, int data) 1391 { 1392 struct enetc_softc *sc; 1393 1394 sc = iflib_get_softc(device_get_softc(dev)); 1395 return (enetc_mdio_write(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE, 1396 phy, reg, data)); 1397 } 1398 1399 static void 1400 enetc_miibus_linkchg(device_t dev) 1401 { 1402 1403 enetc_miibus_statchg(dev); 1404 } 1405 1406 static void 1407 enetc_miibus_statchg(device_t dev) 1408 { 1409 struct enetc_softc *sc; 1410 struct mii_data *miid; 1411 int link_state, baudrate; 1412 1413 sc = iflib_get_softc(device_get_softc(dev)); 1414 miid = device_get_softc(sc->miibus); 1415 1416 baudrate = ifmedia_baudrate(miid->mii_media_active); 1417 if (miid->mii_media_status & IFM_AVALID) { 1418 if (miid->mii_media_status & IFM_ACTIVE) 1419 link_state = LINK_STATE_UP; 1420 else 1421 link_state = LINK_STATE_DOWN; 1422 } else { 1423 link_state = LINK_STATE_UNKNOWN; 1424 } 1425 1426 iflib_link_state_change(sc->ctx, link_state, baudrate); 1427 1428 } 1429 1430 static int 1431 enetc_media_change(if_t ifp) 1432 { 1433 struct enetc_softc *sc; 1434 struct mii_data *miid; 1435 1436 sc = iflib_get_softc(ifp->if_softc); 1437 miid = device_get_softc(sc->miibus); 1438 1439 mii_mediachg(miid); 1440 return (0); 1441 } 1442 1443 static void 1444 enetc_media_status(if_t ifp, struct ifmediareq* ifmr) 1445 { 1446 struct enetc_softc *sc; 1447 struct mii_data *miid; 1448 1449 sc = iflib_get_softc(ifp->if_softc); 1450 miid = device_get_softc(sc->miibus); 1451 1452 mii_pollstat(miid); 1453 1454 ifmr->ifm_active = miid->mii_media_active; 1455 ifmr->ifm_status = miid->mii_media_status; 1456 } 1457 1458 static int 1459 enetc_fixed_media_change(if_t ifp) 1460 { 1461 1462 if_printf(ifp, "Can't change media in fixed-link mode.\n"); 1463 return (0); 1464 } 1465 static void 1466 enetc_fixed_media_status(if_t ifp, struct ifmediareq* ifmr) 1467 { 1468 struct enetc_softc *sc; 1469 1470 sc = iflib_get_softc(ifp->if_softc); 1471 1472 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1473 ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media; 1474 return; 1475 } 1476