1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 The FreeBSD Foundation, Inc. 5 * 6 * This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca> 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Microchip LAN7430/LAN7431 PCIe to Gigabit Ethernet Controller driver. 35 * 36 * Product information: 37 * LAN7430 https://www.microchip.com/en-us/product/LAN7430 38 * - Integrated IEEE 802.3 compliant PHY 39 * LAN7431 https://www.microchip.com/en-us/product/LAN7431 40 * - RGMII Interface 41 * 42 * This driver uses the iflib interface and the default 'ukphy' PHY driver. 43 * 44 * UNIMPLEMENTED FEATURES 45 * ---------------------- 46 * A number of features supported by LAN743X device are not yet implemented in 47 * this driver: 48 * 49 * - Multiple (up to 4) RX queues support 50 * - Just needs to remove asserts and malloc multiple `rx_ring_data` 51 * structs based on ncpus. 52 * - RX/TX Checksum Offloading support 53 * - VLAN support 54 * - Receive Packet Filtering (Multicast Perfect/Hash Address) support 55 * - Wake on LAN (WoL) support 56 * - TX LSO support 57 * - Receive Side Scaling (RSS) support 58 * - Debugging Capabilities: 59 * - Could include MAC statistics and 60 * error status registers in sysctl. 61 */ 62 63 #include <sys/param.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kdb.h> 67 #include <sys/kernel.h> 68 #include <sys/module.h> 69 #include <sys/rman.h> 70 #include <sys/socket.h> 71 #include <sys/sockio.h> 72 #include <machine/bus.h> 73 #include <machine/resource.h> 74 75 #include <net/ethernet.h> 76 #include <net/if.h> 77 #include <net/if_var.h> 78 #include <net/if_types.h> 79 #include <net/if_media.h> 80 #include <net/iflib.h> 81 82 #include <dev/mgb/if_mgb.h> 83 #include <dev/mii/mii.h> 84 #include <dev/mii/miivar.h> 85 #include <dev/pci/pcireg.h> 86 #include <dev/pci/pcivar.h> 87 88 #include "ifdi_if.h" 89 #include "miibus_if.h" 90 91 static pci_vendor_info_t mgb_vendor_info_array[] = { 92 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7430_DEVICE_ID, 93 "Microchip LAN7430 PCIe Gigabit Ethernet Controller"), 94 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7431_DEVICE_ID, 95 "Microchip LAN7431 PCIe Gigabit Ethernet Controller"), 96 PVID_END 97 }; 98 99 /* Device methods */ 100 static device_register_t mgb_register; 101 102 /* IFLIB methods */ 103 static ifdi_attach_pre_t mgb_attach_pre; 104 static ifdi_attach_post_t mgb_attach_post; 105 static ifdi_detach_t mgb_detach; 106 107 static ifdi_tx_queues_alloc_t mgb_tx_queues_alloc; 108 static ifdi_rx_queues_alloc_t mgb_rx_queues_alloc; 109 static ifdi_queues_free_t mgb_queues_free; 110 111 static ifdi_init_t mgb_init; 112 static ifdi_stop_t mgb_stop; 113 114 static ifdi_msix_intr_assign_t mgb_msix_intr_assign; 115 static ifdi_tx_queue_intr_enable_t mgb_tx_queue_intr_enable; 116 static ifdi_rx_queue_intr_enable_t mgb_rx_queue_intr_enable; 117 static ifdi_intr_enable_t mgb_intr_enable_all; 118 static ifdi_intr_disable_t mgb_intr_disable_all; 119 120 /* IFLIB_TXRX methods */ 121 static int mgb_isc_txd_encap(void *, 122 if_pkt_info_t); 123 static void mgb_isc_txd_flush(void *, 124 uint16_t, qidx_t); 125 static int mgb_isc_txd_credits_update(void *, 126 uint16_t, bool); 127 static int mgb_isc_rxd_available(void *, 128 uint16_t, qidx_t, qidx_t); 129 static int mgb_isc_rxd_pkt_get(void *, 130 if_rxd_info_t); 131 static void mgb_isc_rxd_refill(void *, 132 if_rxd_update_t); 133 static void mgb_isc_rxd_flush(void *, 134 uint16_t, uint8_t, qidx_t); 135 136 /* Interrupts */ 137 static driver_filter_t mgb_legacy_intr; 138 static driver_filter_t mgb_admin_intr; 139 static driver_filter_t mgb_rxq_intr; 140 static bool mgb_intr_test(struct mgb_softc *); 141 142 /* MII methods */ 143 static miibus_readreg_t mgb_miibus_readreg; 144 static miibus_writereg_t mgb_miibus_writereg; 145 static miibus_linkchg_t mgb_miibus_linkchg; 146 static miibus_statchg_t mgb_miibus_statchg; 147 148 static int mgb_media_change(if_t); 149 static void mgb_media_status(if_t, 150 struct ifmediareq *); 151 152 /* Helper/Test functions */ 153 static int mgb_test_bar(struct mgb_softc *); 154 static int mgb_alloc_regs(struct mgb_softc *); 155 static int mgb_release_regs(struct mgb_softc *); 156 157 static void mgb_get_ethaddr(struct mgb_softc *, 158 struct ether_addr *); 159 160 static int mgb_wait_for_bits(struct mgb_softc *, 161 int, int, int); 162 163 /* H/W init, reset and teardown helpers */ 164 static int mgb_hw_init(struct mgb_softc *); 165 static int mgb_hw_teardown(struct mgb_softc *); 166 static int mgb_hw_reset(struct mgb_softc *); 167 static int mgb_mac_init(struct mgb_softc *); 168 static int mgb_dmac_reset(struct mgb_softc *); 169 static int mgb_phy_reset(struct mgb_softc *); 170 171 static int mgb_dma_init(struct mgb_softc *); 172 static int mgb_dma_tx_ring_init(struct mgb_softc *, 173 int); 174 static int mgb_dma_rx_ring_init(struct mgb_softc *, 175 int); 176 177 static int mgb_dmac_control(struct mgb_softc *, 178 int, int, enum mgb_dmac_cmd); 179 static int mgb_fct_control(struct mgb_softc *, 180 int, int, enum mgb_fct_cmd); 181 182 /********************************************************************* 183 * FreeBSD Device Interface Entry Points 184 *********************************************************************/ 185 186 static device_method_t mgb_methods[] = { 187 /* Device interface */ 188 DEVMETHOD(device_register, mgb_register), 189 DEVMETHOD(device_probe, iflib_device_probe), 190 DEVMETHOD(device_attach, iflib_device_attach), 191 DEVMETHOD(device_detach, iflib_device_detach), 192 DEVMETHOD(device_shutdown, iflib_device_shutdown), 193 DEVMETHOD(device_suspend, iflib_device_suspend), 194 DEVMETHOD(device_resume, iflib_device_resume), 195 196 /* MII Interface */ 197 DEVMETHOD(miibus_readreg, mgb_miibus_readreg), 198 DEVMETHOD(miibus_writereg, mgb_miibus_writereg), 199 DEVMETHOD(miibus_linkchg, mgb_miibus_linkchg), 200 DEVMETHOD(miibus_statchg, mgb_miibus_statchg), 201 202 DEVMETHOD_END 203 }; 204 205 static driver_t mgb_driver = { 206 "mgb", mgb_methods, sizeof(struct mgb_softc) 207 }; 208 209 DRIVER_MODULE(mgb, pci, mgb_driver, NULL, NULL); 210 IFLIB_PNP_INFO(pci, mgb, mgb_vendor_info_array); 211 MODULE_VERSION(mgb, 1); 212 213 #if 0 /* MIIBUS_DEBUG */ 214 /* If MIIBUS debug stuff is in attach then order matters. Use below instead. */ 215 DRIVER_MODULE_ORDERED(miibus, mgb, miibus_driver, NULL, NULL, 216 SI_ORDER_ANY); 217 #endif /* MIIBUS_DEBUG */ 218 DRIVER_MODULE(miibus, mgb, miibus_driver, NULL, NULL); 219 220 MODULE_DEPEND(mgb, pci, 1, 1, 1); 221 MODULE_DEPEND(mgb, ether, 1, 1, 1); 222 MODULE_DEPEND(mgb, miibus, 1, 1, 1); 223 MODULE_DEPEND(mgb, iflib, 1, 1, 1); 224 225 static device_method_t mgb_iflib_methods[] = { 226 DEVMETHOD(ifdi_attach_pre, mgb_attach_pre), 227 DEVMETHOD(ifdi_attach_post, mgb_attach_post), 228 DEVMETHOD(ifdi_detach, mgb_detach), 229 230 DEVMETHOD(ifdi_init, mgb_init), 231 DEVMETHOD(ifdi_stop, mgb_stop), 232 233 DEVMETHOD(ifdi_tx_queues_alloc, mgb_tx_queues_alloc), 234 DEVMETHOD(ifdi_rx_queues_alloc, mgb_rx_queues_alloc), 235 DEVMETHOD(ifdi_queues_free, mgb_queues_free), 236 237 DEVMETHOD(ifdi_msix_intr_assign, mgb_msix_intr_assign), 238 DEVMETHOD(ifdi_tx_queue_intr_enable, mgb_tx_queue_intr_enable), 239 DEVMETHOD(ifdi_rx_queue_intr_enable, mgb_rx_queue_intr_enable), 240 DEVMETHOD(ifdi_intr_enable, mgb_intr_enable_all), 241 DEVMETHOD(ifdi_intr_disable, mgb_intr_disable_all), 242 243 #if 0 /* Not yet implemented IFLIB methods */ 244 /* 245 * Set multicast addresses, mtu and promiscuous mode 246 */ 247 DEVMETHOD(ifdi_multi_set, mgb_multi_set), 248 DEVMETHOD(ifdi_mtu_set, mgb_mtu_set), 249 DEVMETHOD(ifdi_promisc_set, mgb_promisc_set), 250 251 /* 252 * Needed for VLAN support 253 */ 254 DEVMETHOD(ifdi_vlan_register, mgb_vlan_register), 255 DEVMETHOD(ifdi_vlan_unregister, mgb_vlan_unregister), 256 257 /* 258 * Needed for WOL support 259 * at the very least. 260 */ 261 DEVMETHOD(ifdi_shutdown, mgb_shutdown), 262 DEVMETHOD(ifdi_suspend, mgb_suspend), 263 DEVMETHOD(ifdi_resume, mgb_resume), 264 #endif /* UNUSED_IFLIB_METHODS */ 265 DEVMETHOD_END 266 }; 267 268 static driver_t mgb_iflib_driver = { 269 "mgb", mgb_iflib_methods, sizeof(struct mgb_softc) 270 }; 271 272 static struct if_txrx mgb_txrx = { 273 .ift_txd_encap = mgb_isc_txd_encap, 274 .ift_txd_flush = mgb_isc_txd_flush, 275 .ift_txd_credits_update = mgb_isc_txd_credits_update, 276 .ift_rxd_available = mgb_isc_rxd_available, 277 .ift_rxd_pkt_get = mgb_isc_rxd_pkt_get, 278 .ift_rxd_refill = mgb_isc_rxd_refill, 279 .ift_rxd_flush = mgb_isc_rxd_flush, 280 281 .ift_legacy_intr = mgb_legacy_intr 282 }; 283 284 static struct if_shared_ctx mgb_sctx_init = { 285 .isc_magic = IFLIB_MAGIC, 286 287 .isc_q_align = PAGE_SIZE, 288 .isc_admin_intrcnt = 1, 289 .isc_flags = IFLIB_DRIVER_MEDIA /* | IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ*/, 290 291 .isc_vendor_info = mgb_vendor_info_array, 292 .isc_driver_version = "1", 293 .isc_driver = &mgb_iflib_driver, 294 /* 2 queues per set for TX and RX (ring queue, head writeback queue) */ 295 .isc_ntxqs = 2, 296 297 .isc_tx_maxsize = MGB_DMA_MAXSEGS * MCLBYTES, 298 /* .isc_tx_nsegments = MGB_DMA_MAXSEGS, */ 299 .isc_tx_maxsegsize = MCLBYTES, 300 301 .isc_ntxd_min = {1, 1}, /* Will want to make this bigger */ 302 .isc_ntxd_max = {MGB_DMA_RING_SIZE, 1}, 303 .isc_ntxd_default = {MGB_DMA_RING_SIZE, 1}, 304 305 .isc_nrxqs = 2, 306 307 .isc_rx_maxsize = MCLBYTES, 308 .isc_rx_nsegments = 1, 309 .isc_rx_maxsegsize = MCLBYTES, 310 311 .isc_nrxd_min = {1, 1}, /* Will want to make this bigger */ 312 .isc_nrxd_max = {MGB_DMA_RING_SIZE, 1}, 313 .isc_nrxd_default = {MGB_DMA_RING_SIZE, 1}, 314 315 .isc_nfl = 1, /*one free list since there is only one queue */ 316 #if 0 /* UNUSED_CTX */ 317 318 .isc_tso_maxsize = MGB_TSO_MAXSIZE + sizeof(struct ether_vlan_header), 319 .isc_tso_maxsegsize = MGB_TX_MAXSEGSIZE, 320 #endif /* UNUSED_CTX */ 321 }; 322 323 /*********************************************************************/ 324 325 static void * 326 mgb_register(device_t dev) 327 { 328 329 return (&mgb_sctx_init); 330 } 331 332 static int 333 mgb_attach_pre(if_ctx_t ctx) 334 { 335 struct mgb_softc *sc; 336 if_softc_ctx_t scctx; 337 int error, phyaddr, rid; 338 struct ether_addr hwaddr; 339 struct mii_data *miid; 340 341 sc = iflib_get_softc(ctx); 342 sc->ctx = ctx; 343 sc->dev = iflib_get_dev(ctx); 344 scctx = iflib_get_softc_ctx(ctx); 345 346 /* IFLIB required setup */ 347 scctx->isc_txrx = &mgb_txrx; 348 scctx->isc_tx_nsegments = MGB_DMA_MAXSEGS; 349 /* Ring desc queues */ 350 scctx->isc_txqsizes[0] = sizeof(struct mgb_ring_desc) * 351 scctx->isc_ntxd[0]; 352 scctx->isc_rxqsizes[0] = sizeof(struct mgb_ring_desc) * 353 scctx->isc_nrxd[0]; 354 355 /* Head WB queues */ 356 scctx->isc_txqsizes[1] = sizeof(uint32_t) * scctx->isc_ntxd[1]; 357 scctx->isc_rxqsizes[1] = sizeof(uint32_t) * scctx->isc_nrxd[1]; 358 359 /* XXX: Must have 1 txqset, but can have up to 4 rxqsets */ 360 scctx->isc_nrxqsets = 1; 361 scctx->isc_ntxqsets = 1; 362 363 /* scctx->isc_tx_csum_flags = (CSUM_TCP | CSUM_UDP) | 364 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TSO */ 365 scctx->isc_tx_csum_flags = 0; 366 scctx->isc_capabilities = scctx->isc_capenable = 0; 367 #if 0 368 /* 369 * CSUM, TSO and VLAN support are TBD 370 */ 371 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | 372 IFCAP_TSO4 | IFCAP_TSO6 | 373 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | 374 IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 375 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | 376 IFCAP_JUMBO_MTU; 377 scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER; 378 #endif 379 380 /* get the BAR */ 381 error = mgb_alloc_regs(sc); 382 if (error != 0) { 383 device_printf(sc->dev, 384 "Unable to allocate bus resource: registers.\n"); 385 goto fail; 386 } 387 388 error = mgb_test_bar(sc); 389 if (error != 0) 390 goto fail; 391 392 error = mgb_hw_init(sc); 393 if (error != 0) { 394 device_printf(sc->dev, 395 "MGB device init failed. (err: %d)\n", error); 396 goto fail; 397 } 398 399 switch (pci_get_device(sc->dev)) { 400 case MGB_LAN7430_DEVICE_ID: 401 phyaddr = 1; 402 break; 403 case MGB_LAN7431_DEVICE_ID: 404 default: 405 phyaddr = MII_PHY_ANY; 406 break; 407 } 408 409 /* XXX: Would be nice(r) if locked methods were here */ 410 error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(ctx), 411 mgb_media_change, mgb_media_status, 412 BMSR_DEFCAPMASK, phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE); 413 if (error != 0) { 414 device_printf(sc->dev, "Failed to attach MII interface\n"); 415 goto fail; 416 } 417 418 miid = device_get_softc(sc->miibus); 419 scctx->isc_media = &miid->mii_media; 420 421 scctx->isc_msix_bar = pci_msix_table_bar(sc->dev); 422 /** Setup PBA BAR **/ 423 rid = pci_msix_pba_bar(sc->dev); 424 if (rid != scctx->isc_msix_bar) { 425 sc->pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 426 &rid, RF_ACTIVE); 427 if (sc->pba == NULL) { 428 error = ENXIO; 429 device_printf(sc->dev, "Failed to setup PBA BAR\n"); 430 goto fail; 431 } 432 } 433 434 mgb_get_ethaddr(sc, &hwaddr); 435 if (ETHER_IS_BROADCAST(hwaddr.octet) || 436 ETHER_IS_MULTICAST(hwaddr.octet) || 437 ETHER_IS_ZERO(hwaddr.octet)) 438 ether_gen_addr(iflib_get_ifp(ctx), &hwaddr); 439 440 /* 441 * XXX: if the MAC address was generated the linux driver 442 * writes it back to the device. 443 */ 444 iflib_set_mac(ctx, hwaddr.octet); 445 446 /* Map all vectors to vector 0 (admin interrupts) by default. */ 447 CSR_WRITE_REG(sc, MGB_INTR_VEC_RX_MAP, 0); 448 CSR_WRITE_REG(sc, MGB_INTR_VEC_TX_MAP, 0); 449 CSR_WRITE_REG(sc, MGB_INTR_VEC_OTHER_MAP, 0); 450 451 return (0); 452 453 fail: 454 mgb_detach(ctx); 455 return (error); 456 } 457 458 static int 459 mgb_attach_post(if_ctx_t ctx) 460 { 461 struct mgb_softc *sc; 462 463 sc = iflib_get_softc(ctx); 464 465 device_printf(sc->dev, "Interrupt test: %s\n", 466 (mgb_intr_test(sc) ? "PASS" : "FAIL")); 467 468 return (0); 469 } 470 471 static int 472 mgb_detach(if_ctx_t ctx) 473 { 474 struct mgb_softc *sc; 475 int error; 476 477 sc = iflib_get_softc(ctx); 478 479 /* XXX: Should report errors but still detach everything. */ 480 error = mgb_hw_teardown(sc); 481 482 /* Release IRQs */ 483 iflib_irq_free(ctx, &sc->rx_irq); 484 iflib_irq_free(ctx, &sc->admin_irq); 485 486 if (sc->miibus != NULL) 487 device_delete_child(sc->dev, sc->miibus); 488 489 if (sc->pba != NULL) 490 error = bus_release_resource(sc->dev, SYS_RES_MEMORY, 491 rman_get_rid(sc->pba), sc->pba); 492 sc->pba = NULL; 493 494 error = mgb_release_regs(sc); 495 496 return (error); 497 } 498 499 static int 500 mgb_media_change(if_t ifp) 501 { 502 struct mii_data *miid; 503 struct mii_softc *miisc; 504 struct mgb_softc *sc; 505 if_ctx_t ctx; 506 int needs_reset; 507 508 ctx = if_getsoftc(ifp); 509 sc = iflib_get_softc(ctx); 510 miid = device_get_softc(sc->miibus); 511 LIST_FOREACH(miisc, &miid->mii_phys, mii_list) 512 PHY_RESET(miisc); 513 514 needs_reset = mii_mediachg(miid); 515 if (needs_reset != 0) 516 if_init(ifp, ctx); 517 return (needs_reset); 518 } 519 520 static void 521 mgb_media_status(if_t ifp, struct ifmediareq *ifmr) 522 { 523 struct mgb_softc *sc; 524 struct mii_data *miid; 525 526 sc = iflib_get_softc(if_getsoftc(ifp)); 527 miid = device_get_softc(sc->miibus); 528 if ((if_getflags(ifp) & IFF_UP) == 0) 529 return; 530 531 mii_pollstat(miid); 532 ifmr->ifm_active = miid->mii_media_active; 533 ifmr->ifm_status = miid->mii_media_status; 534 } 535 536 static int 537 mgb_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, 538 int ntxqsets) 539 { 540 struct mgb_softc *sc; 541 struct mgb_ring_data *rdata; 542 int q; 543 544 sc = iflib_get_softc(ctx); 545 KASSERT(ntxqsets == 1, ("ntxqsets = %d", ntxqsets)); 546 rdata = &sc->tx_ring_data; 547 for (q = 0; q < ntxqsets; q++) { 548 KASSERT(ntxqs == 2, ("ntxqs = %d", ntxqs)); 549 /* Ring */ 550 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * ntxqs + 0]; 551 rdata->ring_bus_addr = paddrs[q * ntxqs + 0]; 552 553 /* Head WB */ 554 rdata->head_wb = (uint32_t *) vaddrs[q * ntxqs + 1]; 555 rdata->head_wb_bus_addr = paddrs[q * ntxqs + 1]; 556 } 557 return (0); 558 } 559 560 static int 561 mgb_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, 562 int nrxqsets) 563 { 564 struct mgb_softc *sc; 565 struct mgb_ring_data *rdata; 566 int q; 567 568 sc = iflib_get_softc(ctx); 569 KASSERT(nrxqsets == 1, ("nrxqsets = %d", nrxqsets)); 570 rdata = &sc->rx_ring_data; 571 for (q = 0; q < nrxqsets; q++) { 572 KASSERT(nrxqs == 2, ("nrxqs = %d", nrxqs)); 573 /* Ring */ 574 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * nrxqs + 0]; 575 rdata->ring_bus_addr = paddrs[q * nrxqs + 0]; 576 577 /* Head WB */ 578 rdata->head_wb = (uint32_t *) vaddrs[q * nrxqs + 1]; 579 rdata->head_wb_bus_addr = paddrs[q * nrxqs + 1]; 580 } 581 return (0); 582 } 583 584 static void 585 mgb_queues_free(if_ctx_t ctx) 586 { 587 struct mgb_softc *sc; 588 589 sc = iflib_get_softc(ctx); 590 591 memset(&sc->rx_ring_data, 0, sizeof(struct mgb_ring_data)); 592 memset(&sc->tx_ring_data, 0, sizeof(struct mgb_ring_data)); 593 } 594 595 static void 596 mgb_init(if_ctx_t ctx) 597 { 598 struct mgb_softc *sc; 599 struct mii_data *miid; 600 int error; 601 602 sc = iflib_get_softc(ctx); 603 miid = device_get_softc(sc->miibus); 604 device_printf(sc->dev, "running init ...\n"); 605 606 mgb_dma_init(sc); 607 608 /* XXX: Turn off perfect filtering, turn on (broad|multi|uni)cast rx */ 609 CSR_CLEAR_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_PERFECT_FILTER); 610 CSR_UPDATE_REG(sc, MGB_RFE_CTL, 611 MGB_RFE_ALLOW_BROADCAST | 612 MGB_RFE_ALLOW_MULTICAST | 613 MGB_RFE_ALLOW_UNICAST); 614 615 error = mii_mediachg(miid); 616 /* Not much we can do if this fails. */ 617 if (error) 618 device_printf(sc->dev, "%s: mii_mediachg returned %d", __func__, 619 error); 620 } 621 622 #if 0 623 static void 624 mgb_dump_some_stats(struct mgb_softc *sc) 625 { 626 int i; 627 int first_stat = 0x1200; 628 int last_stat = 0x12FC; 629 630 for (i = first_stat; i <= last_stat; i += 4) 631 if (CSR_READ_REG(sc, i) != 0) 632 device_printf(sc->dev, "0x%04x: 0x%08x\n", i, 633 CSR_READ_REG(sc, i)); 634 char *stat_names[] = { 635 "MAC_ERR_STS ", 636 "FCT_INT_STS ", 637 "DMAC_CFG ", 638 "DMAC_CMD ", 639 "DMAC_INT_STS ", 640 "DMAC_INT_EN ", 641 "DMAC_RX_ERR_STS0 ", 642 "DMAC_RX_ERR_STS1 ", 643 "DMAC_RX_ERR_STS2 ", 644 "DMAC_RX_ERR_STS3 ", 645 "INT_STS ", 646 "INT_EN ", 647 "INT_VEC_EN ", 648 "INT_VEC_MAP0 ", 649 "INT_VEC_MAP1 ", 650 "INT_VEC_MAP2 ", 651 "TX_HEAD0", 652 "TX_TAIL0", 653 "DMAC_TX_ERR_STS0 ", 654 NULL 655 }; 656 int stats[] = { 657 0x114, 658 0xA0, 659 0xC00, 660 0xC0C, 661 0xC10, 662 0xC14, 663 0xC60, 664 0xCA0, 665 0xCE0, 666 0xD20, 667 0x780, 668 0x788, 669 0x794, 670 0x7A0, 671 0x7A4, 672 0x780, 673 0xD58, 674 0xD5C, 675 0xD60, 676 0x0 677 }; 678 i = 0; 679 printf("==============================\n"); 680 while (stats[i++]) 681 device_printf(sc->dev, "%s at offset 0x%04x = 0x%08x\n", 682 stat_names[i - 1], stats[i - 1], 683 CSR_READ_REG(sc, stats[i - 1])); 684 printf("==== TX RING DESCS ====\n"); 685 for (i = 0; i < MGB_DMA_RING_SIZE; i++) 686 device_printf(sc->dev, "ring[%d].data0=0x%08x\n" 687 "ring[%d].data1=0x%08x\n" 688 "ring[%d].data2=0x%08x\n" 689 "ring[%d].data3=0x%08x\n", 690 i, sc->tx_ring_data.ring[i].ctl, 691 i, sc->tx_ring_data.ring[i].addr.low, 692 i, sc->tx_ring_data.ring[i].addr.high, 693 i, sc->tx_ring_data.ring[i].sts); 694 device_printf(sc->dev, "==== DUMP_TX_DMA_RAM ====\n"); 695 CSR_WRITE_REG(sc, 0x24, 0xF); // DP_SEL & TX_RAM_0 696 for (i = 0; i < 128; i++) { 697 CSR_WRITE_REG(sc, 0x2C, i); // DP_ADDR 698 699 CSR_WRITE_REG(sc, 0x28, 0); // DP_CMD 700 701 while ((CSR_READ_REG(sc, 0x24) & 0x80000000) == 0) // DP_SEL & READY 702 DELAY(1000); 703 704 device_printf(sc->dev, "DMAC_TX_RAM_0[%u]=%08x\n", i, 705 CSR_READ_REG(sc, 0x30)); // DP_DATA 706 } 707 } 708 #endif 709 710 static void 711 mgb_stop(if_ctx_t ctx) 712 { 713 struct mgb_softc *sc ; 714 if_softc_ctx_t scctx; 715 int i; 716 717 sc = iflib_get_softc(ctx); 718 scctx = iflib_get_softc_ctx(ctx); 719 720 /* XXX: Could potentially timeout */ 721 for (i = 0; i < scctx->isc_nrxqsets; i++) { 722 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_STOP); 723 mgb_fct_control(sc, MGB_FCT_RX_CTL, 0, FCT_DISABLE); 724 } 725 for (i = 0; i < scctx->isc_ntxqsets; i++) { 726 mgb_dmac_control(sc, MGB_DMAC_TX_START, 0, DMAC_STOP); 727 mgb_fct_control(sc, MGB_FCT_TX_CTL, 0, FCT_DISABLE); 728 } 729 } 730 731 static int 732 mgb_legacy_intr(void *xsc) 733 { 734 struct mgb_softc *sc; 735 736 sc = xsc; 737 iflib_admin_intr_deferred(sc->ctx); 738 return (FILTER_HANDLED); 739 } 740 741 static int 742 mgb_rxq_intr(void *xsc) 743 { 744 struct mgb_softc *sc; 745 if_softc_ctx_t scctx; 746 uint32_t intr_sts, intr_en; 747 int qidx; 748 749 sc = xsc; 750 scctx = iflib_get_softc_ctx(sc->ctx); 751 752 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS); 753 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET); 754 intr_sts &= intr_en; 755 756 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) { 757 if ((intr_sts & MGB_INTR_STS_RX(qidx))){ 758 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, 759 MGB_INTR_STS_RX(qidx)); 760 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_RX(qidx)); 761 } 762 } 763 return (FILTER_SCHEDULE_THREAD); 764 } 765 766 static int 767 mgb_admin_intr(void *xsc) 768 { 769 struct mgb_softc *sc; 770 if_softc_ctx_t scctx; 771 uint32_t intr_sts, intr_en; 772 int qidx; 773 774 sc = xsc; 775 scctx = iflib_get_softc_ctx(sc->ctx); 776 777 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS); 778 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET); 779 intr_sts &= intr_en; 780 781 /* TODO: shouldn't continue if suspended */ 782 if ((intr_sts & MGB_INTR_STS_ANY) == 0) 783 return (FILTER_STRAY); 784 if ((intr_sts & MGB_INTR_STS_TEST) != 0) { 785 sc->isr_test_flag = true; 786 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST); 787 return (FILTER_HANDLED); 788 } 789 if ((intr_sts & MGB_INTR_STS_RX_ANY) != 0) { 790 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) { 791 if ((intr_sts & MGB_INTR_STS_RX(qidx))){ 792 iflib_rx_intr_deferred(sc->ctx, qidx); 793 } 794 } 795 return (FILTER_HANDLED); 796 } 797 /* XXX: TX interrupts should not occur */ 798 if ((intr_sts & MGB_INTR_STS_TX_ANY) != 0) { 799 for (qidx = 0; qidx < scctx->isc_ntxqsets; qidx++) { 800 if ((intr_sts & MGB_INTR_STS_RX(qidx))) { 801 /* clear the interrupt sts and run handler */ 802 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, 803 MGB_INTR_STS_TX(qidx)); 804 CSR_WRITE_REG(sc, MGB_INTR_STS, 805 MGB_INTR_STS_TX(qidx)); 806 iflib_tx_intr_deferred(sc->ctx, qidx); 807 } 808 } 809 return (FILTER_HANDLED); 810 } 811 812 return (FILTER_SCHEDULE_THREAD); 813 } 814 815 static int 816 mgb_msix_intr_assign(if_ctx_t ctx, int msix) 817 { 818 struct mgb_softc *sc; 819 if_softc_ctx_t scctx; 820 int error, i, vectorid; 821 char irq_name[16]; 822 823 sc = iflib_get_softc(ctx); 824 scctx = iflib_get_softc_ctx(ctx); 825 826 KASSERT(scctx->isc_nrxqsets == 1 && scctx->isc_ntxqsets == 1, 827 ("num rxqsets/txqsets != 1 ")); 828 829 /* 830 * First vector should be admin interrupts, others vectors are TX/RX 831 * 832 * RIDs start at 1, and vector ids start at 0. 833 */ 834 vectorid = 0; 835 error = iflib_irq_alloc_generic(ctx, &sc->admin_irq, vectorid + 1, 836 IFLIB_INTR_ADMIN, mgb_admin_intr, sc, 0, "admin"); 837 if (error) { 838 device_printf(sc->dev, 839 "Failed to register admin interrupt handler\n"); 840 return (error); 841 } 842 843 for (i = 0; i < scctx->isc_nrxqsets; i++) { 844 vectorid++; 845 snprintf(irq_name, sizeof(irq_name), "rxq%d", i); 846 error = iflib_irq_alloc_generic(ctx, &sc->rx_irq, vectorid + 1, 847 IFLIB_INTR_RXTX, mgb_rxq_intr, sc, i, irq_name); 848 if (error) { 849 device_printf(sc->dev, 850 "Failed to register rxq %d interrupt handler\n", i); 851 return (error); 852 } 853 CSR_UPDATE_REG(sc, MGB_INTR_VEC_RX_MAP, 854 MGB_INTR_VEC_MAP(vectorid, i)); 855 } 856 857 /* Not actually mapping hw TX interrupts ... */ 858 for (i = 0; i < scctx->isc_ntxqsets; i++) { 859 snprintf(irq_name, sizeof(irq_name), "txq%d", i); 860 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, 861 irq_name); 862 } 863 864 return (0); 865 } 866 867 static void 868 mgb_intr_enable_all(if_ctx_t ctx) 869 { 870 struct mgb_softc *sc; 871 if_softc_ctx_t scctx; 872 int i, dmac_enable = 0, intr_sts = 0, vec_en = 0; 873 874 sc = iflib_get_softc(ctx); 875 scctx = iflib_get_softc_ctx(ctx); 876 intr_sts |= MGB_INTR_STS_ANY; 877 vec_en |= MGB_INTR_STS_ANY; 878 879 for (i = 0; i < scctx->isc_nrxqsets; i++) { 880 intr_sts |= MGB_INTR_STS_RX(i); 881 dmac_enable |= MGB_DMAC_RX_INTR_ENBL(i); 882 vec_en |= MGB_INTR_RX_VEC_STS(i); 883 } 884 885 /* TX interrupts aren't needed ... */ 886 887 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, intr_sts); 888 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, vec_en); 889 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, dmac_enable); 890 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, dmac_enable); 891 } 892 893 static void 894 mgb_intr_disable_all(if_ctx_t ctx) 895 { 896 struct mgb_softc *sc; 897 898 sc = iflib_get_softc(ctx); 899 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, UINT32_MAX); 900 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_CLR, UINT32_MAX); 901 CSR_WRITE_REG(sc, MGB_INTR_STS, UINT32_MAX); 902 903 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_CLR, UINT32_MAX); 904 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, UINT32_MAX); 905 } 906 907 static int 908 mgb_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) 909 { 910 /* called after successful rx isr */ 911 struct mgb_softc *sc; 912 913 sc = iflib_get_softc(ctx); 914 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_RX_VEC_STS(qid)); 915 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_RX(qid)); 916 917 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_RX_INTR_ENBL(qid)); 918 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_RX_INTR_ENBL(qid)); 919 return (0); 920 } 921 922 static int 923 mgb_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) 924 { 925 /* XXX: not called (since tx interrupts not used) */ 926 struct mgb_softc *sc; 927 928 sc = iflib_get_softc(ctx); 929 930 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_TX(qid)); 931 932 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_TX_INTR_ENBL(qid)); 933 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_TX_INTR_ENBL(qid)); 934 return (0); 935 } 936 937 static bool 938 mgb_intr_test(struct mgb_softc *sc) 939 { 940 int i; 941 942 sc->isr_test_flag = false; 943 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST); 944 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_STS_ANY); 945 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, 946 MGB_INTR_STS_ANY | MGB_INTR_STS_TEST); 947 CSR_WRITE_REG(sc, MGB_INTR_SET, MGB_INTR_STS_TEST); 948 if (sc->isr_test_flag) 949 return (true); 950 for (i = 0; i < MGB_TIMEOUT; i++) { 951 DELAY(10); 952 if (sc->isr_test_flag) 953 break; 954 } 955 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TEST); 956 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST); 957 return (sc->isr_test_flag); 958 } 959 960 static int 961 mgb_isc_txd_encap(void *xsc , if_pkt_info_t ipi) 962 { 963 struct mgb_softc *sc; 964 struct mgb_ring_data *rdata; 965 struct mgb_ring_desc *txd; 966 bus_dma_segment_t *segs; 967 qidx_t pidx, nsegs; 968 int i; 969 970 KASSERT(ipi->ipi_qsidx == 0, 971 ("tried to refill TX Channel %d.\n", ipi->ipi_qsidx)); 972 sc = xsc; 973 rdata = &sc->tx_ring_data; 974 975 pidx = ipi->ipi_pidx; 976 segs = ipi->ipi_segs; 977 nsegs = ipi->ipi_nsegs; 978 979 /* For each seg, create a descriptor */ 980 for (i = 0; i < nsegs; ++i) { 981 KASSERT(nsegs == 1, ("Multisegment packet !!!!!\n")); 982 txd = &rdata->ring[pidx]; 983 txd->ctl = htole32( 984 (segs[i].ds_len & MGB_DESC_CTL_BUFLEN_MASK ) | 985 /* 986 * XXX: This will be wrong in the multipacket case 987 * I suspect FS should be for the first packet and 988 * LS should be for the last packet 989 */ 990 MGB_TX_DESC_CTL_FS | MGB_TX_DESC_CTL_LS | 991 MGB_DESC_CTL_FCS); 992 txd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32( 993 segs[i].ds_addr)); 994 txd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32( 995 segs[i].ds_addr)); 996 txd->sts = htole32( 997 (segs[i].ds_len << 16) & MGB_DESC_FRAME_LEN_MASK); 998 pidx = MGB_NEXT_RING_IDX(pidx); 999 } 1000 ipi->ipi_new_pidx = pidx; 1001 return (0); 1002 } 1003 1004 static void 1005 mgb_isc_txd_flush(void *xsc, uint16_t txqid, qidx_t pidx) 1006 { 1007 struct mgb_softc *sc; 1008 struct mgb_ring_data *rdata; 1009 1010 KASSERT(txqid == 0, ("tried to flush TX Channel %d.\n", txqid)); 1011 sc = xsc; 1012 rdata = &sc->tx_ring_data; 1013 1014 if (rdata->last_tail != pidx) { 1015 rdata->last_tail = pidx; 1016 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(txqid), rdata->last_tail); 1017 } 1018 } 1019 1020 static int 1021 mgb_isc_txd_credits_update(void *xsc, uint16_t txqid, bool clear) 1022 { 1023 struct mgb_softc *sc; 1024 struct mgb_ring_desc *txd; 1025 struct mgb_ring_data *rdata; 1026 int processed = 0; 1027 1028 /* 1029 * > If clear is true, we need to report the number of TX command ring 1030 * > descriptors that have been processed by the device. If clear is 1031 * > false, we just need to report whether or not at least one TX 1032 * > command ring descriptor has been processed by the device. 1033 * - vmx driver 1034 */ 1035 KASSERT(txqid == 0, ("tried to credits_update TX Channel %d.\n", 1036 txqid)); 1037 sc = xsc; 1038 rdata = &sc->tx_ring_data; 1039 1040 while (*(rdata->head_wb) != rdata->last_head) { 1041 if (!clear) 1042 return (1); 1043 1044 txd = &rdata->ring[rdata->last_head]; 1045 memset(txd, 0, sizeof(struct mgb_ring_desc)); 1046 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head); 1047 processed++; 1048 } 1049 1050 return (processed); 1051 } 1052 1053 static int 1054 mgb_isc_rxd_available(void *xsc, uint16_t rxqid, qidx_t idx, qidx_t budget) 1055 { 1056 struct mgb_softc *sc; 1057 struct mgb_ring_data *rdata; 1058 int avail = 0; 1059 1060 sc = xsc; 1061 KASSERT(rxqid == 0, ("tried to check availability in RX Channel %d.\n", 1062 rxqid)); 1063 1064 rdata = &sc->rx_ring_data; 1065 for (; idx != *(rdata->head_wb); idx = MGB_NEXT_RING_IDX(idx)) { 1066 avail++; 1067 /* XXX: Could verify desc is device owned here */ 1068 if (avail == budget) 1069 break; 1070 } 1071 return (avail); 1072 } 1073 1074 static int 1075 mgb_isc_rxd_pkt_get(void *xsc, if_rxd_info_t ri) 1076 { 1077 struct mgb_softc *sc; 1078 struct mgb_ring_data *rdata; 1079 struct mgb_ring_desc rxd; 1080 int total_len; 1081 1082 KASSERT(ri->iri_qsidx == 0, 1083 ("tried to check availability in RX Channel %d\n", ri->iri_qsidx)); 1084 sc = xsc; 1085 total_len = 0; 1086 rdata = &sc->rx_ring_data; 1087 1088 while (*(rdata->head_wb) != rdata->last_head) { 1089 /* copy ring desc and do swapping */ 1090 rxd = rdata->ring[rdata->last_head]; 1091 rxd.ctl = le32toh(rxd.ctl); 1092 rxd.addr.low = le32toh(rxd.ctl); 1093 rxd.addr.high = le32toh(rxd.ctl); 1094 rxd.sts = le32toh(rxd.ctl); 1095 1096 if ((rxd.ctl & MGB_DESC_CTL_OWN) != 0) { 1097 device_printf(sc->dev, 1098 "Tried to read descriptor ... " 1099 "found that it's owned by the driver\n"); 1100 return (EINVAL); 1101 } 1102 if ((rxd.ctl & MGB_RX_DESC_CTL_FS) == 0) { 1103 device_printf(sc->dev, 1104 "Tried to read descriptor ... " 1105 "found that FS is not set.\n"); 1106 device_printf(sc->dev, "Tried to read descriptor ... that it FS is not set.\n"); 1107 return (EINVAL); 1108 } 1109 /* XXX: Multi-packet support */ 1110 if ((rxd.ctl & MGB_RX_DESC_CTL_LS) == 0) { 1111 device_printf(sc->dev, 1112 "Tried to read descriptor ... " 1113 "found that LS is not set. (Multi-buffer packets not yet supported)\n"); 1114 return (EINVAL); 1115 } 1116 ri->iri_frags[0].irf_flid = 0; 1117 ri->iri_frags[0].irf_idx = rdata->last_head; 1118 ri->iri_frags[0].irf_len = MGB_DESC_GET_FRAME_LEN(&rxd); 1119 total_len += ri->iri_frags[0].irf_len; 1120 1121 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head); 1122 break; 1123 } 1124 ri->iri_nfrags = 1; 1125 ri->iri_len = total_len; 1126 1127 return (0); 1128 } 1129 1130 static void 1131 mgb_isc_rxd_refill(void *xsc, if_rxd_update_t iru) 1132 { 1133 struct mgb_softc *sc; 1134 struct mgb_ring_data *rdata; 1135 struct mgb_ring_desc *rxd; 1136 uint64_t *paddrs; 1137 qidx_t *idxs; 1138 qidx_t idx; 1139 int count, len; 1140 1141 count = iru->iru_count; 1142 len = iru->iru_buf_size; 1143 idxs = iru->iru_idxs; 1144 paddrs = iru->iru_paddrs; 1145 KASSERT(iru->iru_qsidx == 0, 1146 ("tried to refill RX Channel %d.\n", iru->iru_qsidx)); 1147 1148 sc = xsc; 1149 rdata = &sc->rx_ring_data; 1150 1151 while (count > 0) { 1152 idx = idxs[--count]; 1153 rxd = &rdata->ring[idx]; 1154 1155 rxd->sts = 0; 1156 rxd->addr.low = 1157 htole32(CSR_TRANSLATE_ADDR_LOW32(paddrs[count])); 1158 rxd->addr.high = 1159 htole32(CSR_TRANSLATE_ADDR_HIGH32(paddrs[count])); 1160 rxd->ctl = htole32(MGB_DESC_CTL_OWN | 1161 (len & MGB_DESC_CTL_BUFLEN_MASK)); 1162 } 1163 return; 1164 } 1165 1166 static void 1167 mgb_isc_rxd_flush(void *xsc, uint16_t rxqid, uint8_t flid, qidx_t pidx) 1168 { 1169 struct mgb_softc *sc; 1170 1171 sc = xsc; 1172 1173 KASSERT(rxqid == 0, ("tried to flush RX Channel %d.\n", rxqid)); 1174 /* 1175 * According to the programming guide, last_tail must be set to 1176 * the last valid RX descriptor, rather than to the one past that. 1177 * Note that this is not true for the TX ring! 1178 */ 1179 sc->rx_ring_data.last_tail = MGB_PREV_RING_IDX(pidx); 1180 CSR_WRITE_REG(sc, MGB_DMA_RX_TAIL(rxqid), sc->rx_ring_data.last_tail); 1181 return; 1182 } 1183 1184 static int 1185 mgb_test_bar(struct mgb_softc *sc) 1186 { 1187 uint32_t id_rev, dev_id; 1188 1189 id_rev = CSR_READ_REG(sc, 0); 1190 dev_id = id_rev >> 16; 1191 if (dev_id == MGB_LAN7430_DEVICE_ID || 1192 dev_id == MGB_LAN7431_DEVICE_ID) { 1193 return (0); 1194 } else { 1195 device_printf(sc->dev, "ID check failed.\n"); 1196 return (ENXIO); 1197 } 1198 } 1199 1200 static int 1201 mgb_alloc_regs(struct mgb_softc *sc) 1202 { 1203 int rid; 1204 1205 rid = PCIR_BAR(MGB_BAR); 1206 pci_enable_busmaster(sc->dev); 1207 sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1208 &rid, RF_ACTIVE); 1209 if (sc->regs == NULL) 1210 return (ENXIO); 1211 1212 return (0); 1213 } 1214 1215 static int 1216 mgb_release_regs(struct mgb_softc *sc) 1217 { 1218 int error = 0; 1219 1220 if (sc->regs != NULL) 1221 error = bus_release_resource(sc->dev, SYS_RES_MEMORY, 1222 rman_get_rid(sc->regs), sc->regs); 1223 sc->regs = NULL; 1224 pci_disable_busmaster(sc->dev); 1225 return (error); 1226 } 1227 1228 static int 1229 mgb_dma_init(struct mgb_softc *sc) 1230 { 1231 if_softc_ctx_t scctx; 1232 int ch, error = 0; 1233 1234 scctx = iflib_get_softc_ctx(sc->ctx); 1235 1236 for (ch = 0; ch < scctx->isc_nrxqsets; ch++) 1237 if ((error = mgb_dma_rx_ring_init(sc, ch))) 1238 goto fail; 1239 1240 for (ch = 0; ch < scctx->isc_nrxqsets; ch++) 1241 if ((error = mgb_dma_tx_ring_init(sc, ch))) 1242 goto fail; 1243 1244 fail: 1245 return (error); 1246 } 1247 1248 static int 1249 mgb_dma_rx_ring_init(struct mgb_softc *sc, int channel) 1250 { 1251 struct mgb_ring_data *rdata; 1252 int ring_config, error = 0; 1253 1254 rdata = &sc->rx_ring_data; 1255 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_RESET); 1256 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_RX_START, channel), 1257 ("Trying to init channels when not in init state\n")); 1258 1259 /* write ring address */ 1260 if (rdata->ring_bus_addr == 0) { 1261 device_printf(sc->dev, "Invalid ring bus addr.\n"); 1262 goto fail; 1263 } 1264 1265 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_H(channel), 1266 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr)); 1267 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_L(channel), 1268 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr)); 1269 1270 /* write head pointer writeback address */ 1271 if (rdata->head_wb_bus_addr == 0) { 1272 device_printf(sc->dev, "Invalid head wb bus addr.\n"); 1273 goto fail; 1274 } 1275 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_H(channel), 1276 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr)); 1277 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_L(channel), 1278 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr)); 1279 1280 /* Enable head pointer writeback */ 1281 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG0(channel), MGB_DMA_HEAD_WB_ENBL); 1282 1283 ring_config = CSR_READ_REG(sc, MGB_DMA_RX_CONFIG1(channel)); 1284 /* ring size */ 1285 ring_config &= ~MGB_DMA_RING_LEN_MASK; 1286 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK); 1287 /* packet padding (PAD_2 is better for IP header alignment ...) */ 1288 ring_config &= ~MGB_DMA_RING_PAD_MASK; 1289 ring_config |= (MGB_DMA_RING_PAD_0 & MGB_DMA_RING_PAD_MASK); 1290 1291 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG1(channel), ring_config); 1292 1293 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_RX_HEAD(channel)); 1294 1295 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_RESET); 1296 if (error != 0) { 1297 device_printf(sc->dev, "Failed to reset RX FCT.\n"); 1298 goto fail; 1299 } 1300 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_ENABLE); 1301 if (error != 0) { 1302 device_printf(sc->dev, "Failed to enable RX FCT.\n"); 1303 goto fail; 1304 } 1305 mgb_dmac_control(sc, MGB_DMAC_RX_START, channel, DMAC_START); 1306 if (error != 0) 1307 device_printf(sc->dev, "Failed to start RX DMAC.\n"); 1308 fail: 1309 return (error); 1310 } 1311 1312 static int 1313 mgb_dma_tx_ring_init(struct mgb_softc *sc, int channel) 1314 { 1315 struct mgb_ring_data *rdata; 1316 int ring_config, error = 0; 1317 1318 rdata = &sc->tx_ring_data; 1319 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_RESET))) { 1320 device_printf(sc->dev, "Failed to reset TX FCT.\n"); 1321 goto fail; 1322 } 1323 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, 1324 FCT_ENABLE))) { 1325 device_printf(sc->dev, "Failed to enable TX FCT.\n"); 1326 goto fail; 1327 } 1328 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel, 1329 DMAC_RESET))) { 1330 device_printf(sc->dev, "Failed to reset TX DMAC.\n"); 1331 goto fail; 1332 } 1333 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_TX_START, channel), 1334 ("Trying to init channels in not init state\n")); 1335 1336 /* write ring address */ 1337 if (rdata->ring_bus_addr == 0) { 1338 device_printf(sc->dev, "Invalid ring bus addr.\n"); 1339 goto fail; 1340 } 1341 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_H(channel), 1342 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr)); 1343 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_L(channel), 1344 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr)); 1345 1346 /* write ring size */ 1347 ring_config = CSR_READ_REG(sc, MGB_DMA_TX_CONFIG1(channel)); 1348 ring_config &= ~MGB_DMA_RING_LEN_MASK; 1349 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK); 1350 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG1(channel), ring_config); 1351 1352 /* Enable interrupt on completion and head pointer writeback */ 1353 ring_config = (MGB_DMA_HEAD_WB_LS_ENBL | MGB_DMA_HEAD_WB_ENBL); 1354 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG0(channel), ring_config); 1355 1356 /* write head pointer writeback address */ 1357 if (rdata->head_wb_bus_addr == 0) { 1358 device_printf(sc->dev, "Invalid head wb bus addr.\n"); 1359 goto fail; 1360 } 1361 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_H(channel), 1362 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr)); 1363 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_L(channel), 1364 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr)); 1365 1366 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_TX_HEAD(channel)); 1367 KASSERT(rdata->last_head == 0, ("MGB_DMA_TX_HEAD was not reset.\n")); 1368 rdata->last_tail = 0; 1369 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(channel), rdata->last_tail); 1370 1371 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel, 1372 DMAC_START))) 1373 device_printf(sc->dev, "Failed to start TX DMAC.\n"); 1374 fail: 1375 return (error); 1376 } 1377 1378 static int 1379 mgb_dmac_control(struct mgb_softc *sc, int start, int channel, 1380 enum mgb_dmac_cmd cmd) 1381 { 1382 int error = 0; 1383 1384 switch (cmd) { 1385 case DMAC_RESET: 1386 CSR_WRITE_REG(sc, MGB_DMAC_CMD, 1387 MGB_DMAC_CMD_RESET(start, channel)); 1388 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, 1389 MGB_DMAC_CMD_RESET(start, channel)); 1390 break; 1391 1392 case DMAC_START: 1393 /* 1394 * NOTE: this simplifies the logic, since it will never 1395 * try to start in STOP_PENDING, but it also increases work. 1396 */ 1397 error = mgb_dmac_control(sc, start, channel, DMAC_STOP); 1398 if (error != 0) 1399 return (error); 1400 CSR_WRITE_REG(sc, MGB_DMAC_CMD, 1401 MGB_DMAC_CMD_START(start, channel)); 1402 break; 1403 1404 case DMAC_STOP: 1405 CSR_WRITE_REG(sc, MGB_DMAC_CMD, 1406 MGB_DMAC_CMD_STOP(start, channel)); 1407 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 1408 MGB_DMAC_CMD_STOP(start, channel), 1409 MGB_DMAC_CMD_START(start, channel)); 1410 break; 1411 } 1412 return (error); 1413 } 1414 1415 static int 1416 mgb_fct_control(struct mgb_softc *sc, int reg, int channel, 1417 enum mgb_fct_cmd cmd) 1418 { 1419 1420 switch (cmd) { 1421 case FCT_RESET: 1422 CSR_WRITE_REG(sc, reg, MGB_FCT_RESET(channel)); 1423 return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_RESET(channel))); 1424 case FCT_ENABLE: 1425 CSR_WRITE_REG(sc, reg, MGB_FCT_ENBL(channel)); 1426 return (0); 1427 case FCT_DISABLE: 1428 CSR_WRITE_REG(sc, reg, MGB_FCT_DSBL(channel)); 1429 return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_ENBL(channel))); 1430 } 1431 } 1432 1433 static int 1434 mgb_hw_teardown(struct mgb_softc *sc) 1435 { 1436 int err = 0; 1437 1438 /* Stop MAC */ 1439 CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL); 1440 CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL); 1441 if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0))) 1442 return (err); 1443 if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0))) 1444 return (err); 1445 return (err); 1446 } 1447 1448 static int 1449 mgb_hw_init(struct mgb_softc *sc) 1450 { 1451 int error = 0; 1452 1453 error = mgb_hw_reset(sc); 1454 if (error != 0) 1455 goto fail; 1456 1457 mgb_mac_init(sc); 1458 1459 error = mgb_phy_reset(sc); 1460 if (error != 0) 1461 goto fail; 1462 1463 error = mgb_dmac_reset(sc); 1464 if (error != 0) 1465 goto fail; 1466 1467 fail: 1468 return (error); 1469 } 1470 1471 static int 1472 mgb_hw_reset(struct mgb_softc *sc) 1473 { 1474 1475 CSR_UPDATE_REG(sc, MGB_HW_CFG, MGB_LITE_RESET); 1476 return (mgb_wait_for_bits(sc, MGB_HW_CFG, 0, MGB_LITE_RESET)); 1477 } 1478 1479 static int 1480 mgb_mac_init(struct mgb_softc *sc) 1481 { 1482 1483 /** 1484 * enable automatic duplex detection and 1485 * automatic speed detection 1486 */ 1487 CSR_UPDATE_REG(sc, MGB_MAC_CR, MGB_MAC_ADD_ENBL | MGB_MAC_ASD_ENBL); 1488 CSR_UPDATE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL); 1489 CSR_UPDATE_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL); 1490 1491 return (MGB_STS_OK); 1492 } 1493 1494 static int 1495 mgb_phy_reset(struct mgb_softc *sc) 1496 { 1497 1498 CSR_UPDATE_BYTE(sc, MGB_PMT_CTL, MGB_PHY_RESET); 1499 if (mgb_wait_for_bits(sc, MGB_PMT_CTL, 0, MGB_PHY_RESET) == 1500 MGB_STS_TIMEOUT) 1501 return (MGB_STS_TIMEOUT); 1502 return (mgb_wait_for_bits(sc, MGB_PMT_CTL, MGB_PHY_READY, 0)); 1503 } 1504 1505 static int 1506 mgb_dmac_reset(struct mgb_softc *sc) 1507 { 1508 1509 CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_RESET); 1510 return (mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_RESET)); 1511 } 1512 1513 static int 1514 mgb_wait_for_bits(struct mgb_softc *sc, int reg, int set_bits, int clear_bits) 1515 { 1516 int i, val; 1517 1518 i = 0; 1519 do { 1520 /* 1521 * XXX: Datasheets states delay should be > 5 microseconds 1522 * for device reset. 1523 */ 1524 DELAY(100); 1525 val = CSR_READ_REG(sc, reg); 1526 if ((val & set_bits) == set_bits && (val & clear_bits) == 0) 1527 return (MGB_STS_OK); 1528 } while (i++ < MGB_TIMEOUT); 1529 1530 return (MGB_STS_TIMEOUT); 1531 } 1532 1533 static void 1534 mgb_get_ethaddr(struct mgb_softc *sc, struct ether_addr *dest) 1535 { 1536 1537 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_L, &dest->octet[0], 4); 1538 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_H, &dest->octet[4], 2); 1539 } 1540 1541 static int 1542 mgb_miibus_readreg(device_t dev, int phy, int reg) 1543 { 1544 struct mgb_softc *sc; 1545 int mii_access; 1546 1547 sc = iflib_get_softc(device_get_softc(dev)); 1548 1549 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == 1550 MGB_STS_TIMEOUT) 1551 return (EIO); 1552 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT; 1553 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT; 1554 mii_access |= MGB_MII_BUSY | MGB_MII_READ; 1555 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access); 1556 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == 1557 MGB_STS_TIMEOUT) 1558 return (EIO); 1559 return (CSR_READ_2_BYTES(sc, MGB_MII_DATA)); 1560 } 1561 1562 static int 1563 mgb_miibus_writereg(device_t dev, int phy, int reg, int data) 1564 { 1565 struct mgb_softc *sc; 1566 int mii_access; 1567 1568 sc = iflib_get_softc(device_get_softc(dev)); 1569 1570 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == 1571 MGB_STS_TIMEOUT) 1572 return (EIO); 1573 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT; 1574 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT; 1575 mii_access |= MGB_MII_BUSY | MGB_MII_WRITE; 1576 CSR_WRITE_REG(sc, MGB_MII_DATA, data); 1577 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access); 1578 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == 1579 MGB_STS_TIMEOUT) 1580 return (EIO); 1581 return (0); 1582 } 1583 1584 /* XXX: May need to lock these up */ 1585 static void 1586 mgb_miibus_statchg(device_t dev) 1587 { 1588 struct mgb_softc *sc; 1589 struct mii_data *miid; 1590 1591 sc = iflib_get_softc(device_get_softc(dev)); 1592 miid = device_get_softc(sc->miibus); 1593 /* Update baudrate in iflib */ 1594 sc->baudrate = ifmedia_baudrate(miid->mii_media_active); 1595 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate); 1596 } 1597 1598 static void 1599 mgb_miibus_linkchg(device_t dev) 1600 { 1601 struct mgb_softc *sc; 1602 struct mii_data *miid; 1603 int link_state; 1604 1605 sc = iflib_get_softc(device_get_softc(dev)); 1606 miid = device_get_softc(sc->miibus); 1607 /* XXX: copied from miibus_linkchg **/ 1608 if (miid->mii_media_status & IFM_AVALID) { 1609 if (miid->mii_media_status & IFM_ACTIVE) 1610 link_state = LINK_STATE_UP; 1611 else 1612 link_state = LINK_STATE_DOWN; 1613 } else 1614 link_state = LINK_STATE_UNKNOWN; 1615 sc->link_state = link_state; 1616 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate); 1617 } 1618