1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 The FreeBSD Foundation, Inc. 5 * 6 * This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca> 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Microchip LAN7430/LAN7431 PCIe to Gigabit Ethernet Controller driver. 35 * 36 * Product information: 37 * LAN7430 https://www.microchip.com/en-us/product/LAN7430 38 * - Integrated IEEE 802.3 compliant PHY 39 * LAN7431 https://www.microchip.com/en-us/product/LAN7431 40 * - RGMII Interface 41 * 42 * This driver uses the iflib interface and the default 'ukphy' PHY driver. 43 * 44 * UNIMPLEMENTED FEATURES 45 * ---------------------- 46 * A number of features supported by LAN743X device are not yet implemented in 47 * this driver: 48 * 49 * - Multiple (up to 4) RX queues support 50 * - Just needs to remove asserts and malloc multiple `rx_ring_data` 51 * structs based on ncpus. 52 * - RX/TX Checksum Offloading support 53 * - VLAN support 54 * - Receive Packet Filtering (Multicast Perfect/Hash Address) support 55 * - Wake on LAN (WoL) support 56 * - TX LSO support 57 * - Receive Side Scaling (RSS) support 58 * - Debugging Capabilities: 59 * - Could include MAC statistics and 60 * error status registers in sysctl. 61 */ 62 63 #include <sys/param.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kdb.h> 67 #include <sys/kernel.h> 68 #include <sys/module.h> 69 #include <sys/rman.h> 70 #include <sys/socket.h> 71 #include <sys/sockio.h> 72 #include <machine/bus.h> 73 #include <machine/resource.h> 74 75 #include <net/ethernet.h> 76 #include <net/if.h> 77 #include <net/if_var.h> 78 #include <net/if_types.h> 79 #include <net/if_media.h> 80 #include <net/iflib.h> 81 82 #include <dev/mgb/if_mgb.h> 83 #include <dev/mii/mii.h> 84 #include <dev/mii/miivar.h> 85 #include <dev/pci/pcireg.h> 86 #include <dev/pci/pcivar.h> 87 88 #include "ifdi_if.h" 89 #include "miibus_if.h" 90 91 static pci_vendor_info_t mgb_vendor_info_array[] = { 92 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7430_DEVICE_ID, 93 "Microchip LAN7430 PCIe Gigabit Ethernet Controller"), 94 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7431_DEVICE_ID, 95 "Microchip LAN7431 PCIe Gigabit Ethernet Controller"), 96 PVID_END 97 }; 98 99 /* Device methods */ 100 static device_register_t mgb_register; 101 102 /* IFLIB methods */ 103 static ifdi_attach_pre_t mgb_attach_pre; 104 static ifdi_attach_post_t mgb_attach_post; 105 static ifdi_detach_t mgb_detach; 106 107 static ifdi_tx_queues_alloc_t mgb_tx_queues_alloc; 108 static ifdi_rx_queues_alloc_t mgb_rx_queues_alloc; 109 static ifdi_queues_free_t mgb_queues_free; 110 111 static ifdi_init_t mgb_init; 112 static ifdi_stop_t mgb_stop; 113 114 static ifdi_msix_intr_assign_t mgb_msix_intr_assign; 115 static ifdi_tx_queue_intr_enable_t mgb_tx_queue_intr_enable; 116 static ifdi_rx_queue_intr_enable_t mgb_rx_queue_intr_enable; 117 static ifdi_intr_enable_t mgb_intr_enable_all; 118 static ifdi_intr_disable_t mgb_intr_disable_all; 119 120 /* IFLIB_TXRX methods */ 121 static int mgb_isc_txd_encap(void *, 122 if_pkt_info_t); 123 static void mgb_isc_txd_flush(void *, 124 uint16_t, qidx_t); 125 static int mgb_isc_txd_credits_update(void *, 126 uint16_t, bool); 127 static int mgb_isc_rxd_available(void *, 128 uint16_t, qidx_t, qidx_t); 129 static int mgb_isc_rxd_pkt_get(void *, 130 if_rxd_info_t); 131 static void mgb_isc_rxd_refill(void *, 132 if_rxd_update_t); 133 static void mgb_isc_rxd_flush(void *, 134 uint16_t, uint8_t, qidx_t); 135 136 /* Interrupts */ 137 static driver_filter_t mgb_legacy_intr; 138 static driver_filter_t mgb_admin_intr; 139 static driver_filter_t mgb_rxq_intr; 140 static bool mgb_intr_test(struct mgb_softc *); 141 142 /* MII methods */ 143 static miibus_readreg_t mgb_miibus_readreg; 144 static miibus_writereg_t mgb_miibus_writereg; 145 static miibus_linkchg_t mgb_miibus_linkchg; 146 static miibus_statchg_t mgb_miibus_statchg; 147 148 static int mgb_media_change(if_t); 149 static void mgb_media_status(if_t, 150 struct ifmediareq *); 151 152 /* Helper/Test functions */ 153 static int mgb_test_bar(struct mgb_softc *); 154 static int mgb_alloc_regs(struct mgb_softc *); 155 static int mgb_release_regs(struct mgb_softc *); 156 157 static void mgb_get_ethaddr(struct mgb_softc *, 158 struct ether_addr *); 159 160 static int mgb_wait_for_bits(struct mgb_softc *, 161 int, int, int); 162 163 /* H/W init, reset and teardown helpers */ 164 static int mgb_hw_init(struct mgb_softc *); 165 static int mgb_hw_teardown(struct mgb_softc *); 166 static int mgb_hw_reset(struct mgb_softc *); 167 static int mgb_mac_init(struct mgb_softc *); 168 static int mgb_dmac_reset(struct mgb_softc *); 169 static int mgb_phy_reset(struct mgb_softc *); 170 171 static int mgb_dma_init(struct mgb_softc *); 172 static int mgb_dma_tx_ring_init(struct mgb_softc *, 173 int); 174 static int mgb_dma_rx_ring_init(struct mgb_softc *, 175 int); 176 177 static int mgb_dmac_control(struct mgb_softc *, 178 int, int, enum mgb_dmac_cmd); 179 static int mgb_fct_control(struct mgb_softc *, 180 int, int, enum mgb_fct_cmd); 181 182 /********************************************************************* 183 * FreeBSD Device Interface Entry Points 184 *********************************************************************/ 185 186 static device_method_t mgb_methods[] = { 187 /* Device interface */ 188 DEVMETHOD(device_register, mgb_register), 189 DEVMETHOD(device_probe, iflib_device_probe), 190 DEVMETHOD(device_attach, iflib_device_attach), 191 DEVMETHOD(device_detach, iflib_device_detach), 192 DEVMETHOD(device_shutdown, iflib_device_shutdown), 193 DEVMETHOD(device_suspend, iflib_device_suspend), 194 DEVMETHOD(device_resume, iflib_device_resume), 195 196 /* MII Interface */ 197 DEVMETHOD(miibus_readreg, mgb_miibus_readreg), 198 DEVMETHOD(miibus_writereg, mgb_miibus_writereg), 199 DEVMETHOD(miibus_linkchg, mgb_miibus_linkchg), 200 DEVMETHOD(miibus_statchg, mgb_miibus_statchg), 201 202 DEVMETHOD_END 203 }; 204 205 static driver_t mgb_driver = { 206 "mgb", mgb_methods, sizeof(struct mgb_softc) 207 }; 208 209 devclass_t mgb_devclass; 210 DRIVER_MODULE(mgb, pci, mgb_driver, mgb_devclass, NULL, NULL); 211 IFLIB_PNP_INFO(pci, mgb, mgb_vendor_info_array); 212 MODULE_VERSION(mgb, 1); 213 214 #if 0 /* MIIBUS_DEBUG */ 215 /* If MIIBUS debug stuff is in attach then order matters. Use below instead. */ 216 DRIVER_MODULE_ORDERED(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL, 217 SI_ORDER_ANY); 218 #endif /* MIIBUS_DEBUG */ 219 DRIVER_MODULE(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL); 220 221 MODULE_DEPEND(mgb, pci, 1, 1, 1); 222 MODULE_DEPEND(mgb, ether, 1, 1, 1); 223 MODULE_DEPEND(mgb, miibus, 1, 1, 1); 224 MODULE_DEPEND(mgb, iflib, 1, 1, 1); 225 226 static device_method_t mgb_iflib_methods[] = { 227 DEVMETHOD(ifdi_attach_pre, mgb_attach_pre), 228 DEVMETHOD(ifdi_attach_post, mgb_attach_post), 229 DEVMETHOD(ifdi_detach, mgb_detach), 230 231 DEVMETHOD(ifdi_init, mgb_init), 232 DEVMETHOD(ifdi_stop, mgb_stop), 233 234 DEVMETHOD(ifdi_tx_queues_alloc, mgb_tx_queues_alloc), 235 DEVMETHOD(ifdi_rx_queues_alloc, mgb_rx_queues_alloc), 236 DEVMETHOD(ifdi_queues_free, mgb_queues_free), 237 238 DEVMETHOD(ifdi_msix_intr_assign, mgb_msix_intr_assign), 239 DEVMETHOD(ifdi_tx_queue_intr_enable, mgb_tx_queue_intr_enable), 240 DEVMETHOD(ifdi_rx_queue_intr_enable, mgb_rx_queue_intr_enable), 241 DEVMETHOD(ifdi_intr_enable, mgb_intr_enable_all), 242 DEVMETHOD(ifdi_intr_disable, mgb_intr_disable_all), 243 244 #if 0 /* Not yet implemented IFLIB methods */ 245 /* 246 * Set multicast addresses, mtu and promiscuous mode 247 */ 248 DEVMETHOD(ifdi_multi_set, mgb_multi_set), 249 DEVMETHOD(ifdi_mtu_set, mgb_mtu_set), 250 DEVMETHOD(ifdi_promisc_set, mgb_promisc_set), 251 252 /* 253 * Needed for VLAN support 254 */ 255 DEVMETHOD(ifdi_vlan_register, mgb_vlan_register), 256 DEVMETHOD(ifdi_vlan_unregister, mgb_vlan_unregister), 257 258 /* 259 * Needed for WOL support 260 * at the very least. 261 */ 262 DEVMETHOD(ifdi_shutdown, mgb_shutdown), 263 DEVMETHOD(ifdi_suspend, mgb_suspend), 264 DEVMETHOD(ifdi_resume, mgb_resume), 265 #endif /* UNUSED_IFLIB_METHODS */ 266 DEVMETHOD_END 267 }; 268 269 static driver_t mgb_iflib_driver = { 270 "mgb", mgb_iflib_methods, sizeof(struct mgb_softc) 271 }; 272 273 struct if_txrx mgb_txrx = { 274 .ift_txd_encap = mgb_isc_txd_encap, 275 .ift_txd_flush = mgb_isc_txd_flush, 276 .ift_txd_credits_update = mgb_isc_txd_credits_update, 277 .ift_rxd_available = mgb_isc_rxd_available, 278 .ift_rxd_pkt_get = mgb_isc_rxd_pkt_get, 279 .ift_rxd_refill = mgb_isc_rxd_refill, 280 .ift_rxd_flush = mgb_isc_rxd_flush, 281 282 .ift_legacy_intr = mgb_legacy_intr 283 }; 284 285 struct if_shared_ctx mgb_sctx_init = { 286 .isc_magic = IFLIB_MAGIC, 287 288 .isc_q_align = PAGE_SIZE, 289 .isc_admin_intrcnt = 1, 290 .isc_flags = IFLIB_DRIVER_MEDIA /* | IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ*/, 291 292 .isc_vendor_info = mgb_vendor_info_array, 293 .isc_driver_version = "1", 294 .isc_driver = &mgb_iflib_driver, 295 /* 2 queues per set for TX and RX (ring queue, head writeback queue) */ 296 .isc_ntxqs = 2, 297 298 .isc_tx_maxsize = MGB_DMA_MAXSEGS * MCLBYTES, 299 /* .isc_tx_nsegments = MGB_DMA_MAXSEGS, */ 300 .isc_tx_maxsegsize = MCLBYTES, 301 302 .isc_ntxd_min = {1, 1}, /* Will want to make this bigger */ 303 .isc_ntxd_max = {MGB_DMA_RING_SIZE, 1}, 304 .isc_ntxd_default = {MGB_DMA_RING_SIZE, 1}, 305 306 .isc_nrxqs = 2, 307 308 .isc_rx_maxsize = MCLBYTES, 309 .isc_rx_nsegments = 1, 310 .isc_rx_maxsegsize = MCLBYTES, 311 312 .isc_nrxd_min = {1, 1}, /* Will want to make this bigger */ 313 .isc_nrxd_max = {MGB_DMA_RING_SIZE, 1}, 314 .isc_nrxd_default = {MGB_DMA_RING_SIZE, 1}, 315 316 .isc_nfl = 1, /*one free list since there is only one queue */ 317 #if 0 /* UNUSED_CTX */ 318 319 .isc_tso_maxsize = MGB_TSO_MAXSIZE + sizeof(struct ether_vlan_header), 320 .isc_tso_maxsegsize = MGB_TX_MAXSEGSIZE, 321 #endif /* UNUSED_CTX */ 322 }; 323 324 /*********************************************************************/ 325 326 static void * 327 mgb_register(device_t dev) 328 { 329 330 return (&mgb_sctx_init); 331 } 332 333 static int 334 mgb_attach_pre(if_ctx_t ctx) 335 { 336 struct mgb_softc *sc; 337 if_softc_ctx_t scctx; 338 int error, phyaddr, rid; 339 struct ether_addr hwaddr; 340 struct mii_data *miid; 341 342 sc = iflib_get_softc(ctx); 343 sc->ctx = ctx; 344 sc->dev = iflib_get_dev(ctx); 345 scctx = iflib_get_softc_ctx(ctx); 346 347 /* IFLIB required setup */ 348 scctx->isc_txrx = &mgb_txrx; 349 scctx->isc_tx_nsegments = MGB_DMA_MAXSEGS; 350 /* Ring desc queues */ 351 scctx->isc_txqsizes[0] = sizeof(struct mgb_ring_desc) * 352 scctx->isc_ntxd[0]; 353 scctx->isc_rxqsizes[0] = sizeof(struct mgb_ring_desc) * 354 scctx->isc_nrxd[0]; 355 356 /* Head WB queues */ 357 scctx->isc_txqsizes[1] = sizeof(uint32_t) * scctx->isc_ntxd[1]; 358 scctx->isc_rxqsizes[1] = sizeof(uint32_t) * scctx->isc_nrxd[1]; 359 360 /* XXX: Must have 1 txqset, but can have up to 4 rxqsets */ 361 scctx->isc_nrxqsets = 1; 362 scctx->isc_ntxqsets = 1; 363 364 /* scctx->isc_tx_csum_flags = (CSUM_TCP | CSUM_UDP) | 365 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TSO */ 366 scctx->isc_tx_csum_flags = 0; 367 scctx->isc_capabilities = scctx->isc_capenable = 0; 368 #if 0 369 /* 370 * CSUM, TSO and VLAN support are TBD 371 */ 372 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | 373 IFCAP_TSO4 | IFCAP_TSO6 | 374 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | 375 IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 376 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | 377 IFCAP_JUMBO_MTU; 378 scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER; 379 #endif 380 381 /* get the BAR */ 382 error = mgb_alloc_regs(sc); 383 if (error != 0) { 384 device_printf(sc->dev, 385 "Unable to allocate bus resource: registers.\n"); 386 goto fail; 387 } 388 389 error = mgb_test_bar(sc); 390 if (error != 0) 391 goto fail; 392 393 error = mgb_hw_init(sc); 394 if (error != 0) { 395 device_printf(sc->dev, 396 "MGB device init failed. (err: %d)\n", error); 397 goto fail; 398 } 399 400 switch (pci_get_device(sc->dev)) 401 { 402 case MGB_LAN7430_DEVICE_ID: 403 phyaddr = 1; 404 break; 405 case MGB_LAN7431_DEVICE_ID: 406 default: 407 phyaddr = MII_PHY_ANY; 408 break; 409 } 410 411 /* XXX: Would be nice(r) if locked methods were here */ 412 error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(ctx), 413 mgb_media_change, mgb_media_status, 414 BMSR_DEFCAPMASK, phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE); 415 if (error != 0) { 416 device_printf(sc->dev, "Failed to attach MII interface\n"); 417 goto fail; 418 } 419 420 miid = device_get_softc(sc->miibus); 421 scctx->isc_media = &miid->mii_media; 422 423 scctx->isc_msix_bar = pci_msix_table_bar(sc->dev); 424 /** Setup PBA BAR **/ 425 rid = pci_msix_pba_bar(sc->dev); 426 if (rid != scctx->isc_msix_bar) { 427 sc->pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 428 &rid, RF_ACTIVE); 429 if (sc->pba == NULL) { 430 error = ENXIO; 431 device_printf(sc->dev, "Failed to setup PBA BAR\n"); 432 goto fail; 433 } 434 } 435 436 mgb_get_ethaddr(sc, &hwaddr); 437 if (ETHER_IS_BROADCAST(hwaddr.octet) || 438 ETHER_IS_MULTICAST(hwaddr.octet) || 439 ETHER_IS_ZERO(hwaddr.octet)) 440 ether_gen_addr(iflib_get_ifp(ctx), &hwaddr); 441 442 /* 443 * XXX: if the MAC address was generated the linux driver 444 * writes it back to the device. 445 */ 446 iflib_set_mac(ctx, hwaddr.octet); 447 448 /* Map all vectors to vector 0 (admin interrupts) by default. */ 449 CSR_WRITE_REG(sc, MGB_INTR_VEC_RX_MAP, 0); 450 CSR_WRITE_REG(sc, MGB_INTR_VEC_TX_MAP, 0); 451 CSR_WRITE_REG(sc, MGB_INTR_VEC_OTHER_MAP, 0); 452 453 return (0); 454 455 fail: 456 mgb_detach(ctx); 457 return (error); 458 } 459 460 static int 461 mgb_attach_post(if_ctx_t ctx) 462 { 463 struct mgb_softc *sc; 464 465 sc = iflib_get_softc(ctx); 466 467 device_printf(sc->dev, "Interrupt test: %s\n", 468 (mgb_intr_test(sc) ? "PASS" : "FAIL")); 469 470 return (0); 471 } 472 473 static int 474 mgb_detach(if_ctx_t ctx) 475 { 476 struct mgb_softc *sc; 477 int error; 478 479 sc = iflib_get_softc(ctx); 480 481 /* XXX: Should report errors but still detach everything. */ 482 error = mgb_hw_teardown(sc); 483 484 /* Release IRQs */ 485 iflib_irq_free(ctx, &sc->rx_irq); 486 iflib_irq_free(ctx, &sc->admin_irq); 487 488 if (sc->miibus != NULL) 489 device_delete_child(sc->dev, sc->miibus); 490 491 if (sc->pba != NULL) 492 error = bus_release_resource(sc->dev, SYS_RES_MEMORY, 493 rman_get_rid(sc->pba), sc->pba); 494 sc->pba = NULL; 495 496 error = mgb_release_regs(sc); 497 498 return (error); 499 } 500 501 static int 502 mgb_media_change(if_t ifp) 503 { 504 struct mii_data *miid; 505 struct mii_softc *miisc; 506 struct mgb_softc *sc; 507 if_ctx_t ctx; 508 int needs_reset; 509 510 ctx = if_getsoftc(ifp); 511 sc = iflib_get_softc(ctx); 512 miid = device_get_softc(sc->miibus); 513 LIST_FOREACH(miisc, &miid->mii_phys, mii_list) 514 PHY_RESET(miisc); 515 516 needs_reset = mii_mediachg(miid); 517 if (needs_reset != 0) 518 ifp->if_init(ctx); 519 return (needs_reset); 520 } 521 522 static void 523 mgb_media_status(if_t ifp, struct ifmediareq *ifmr) 524 { 525 struct mgb_softc *sc; 526 struct mii_data *miid; 527 528 sc = iflib_get_softc(if_getsoftc(ifp)); 529 miid = device_get_softc(sc->miibus); 530 if ((if_getflags(ifp) & IFF_UP) == 0) 531 return; 532 533 mii_pollstat(miid); 534 ifmr->ifm_active = miid->mii_media_active; 535 ifmr->ifm_status = miid->mii_media_status; 536 } 537 538 static int 539 mgb_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, 540 int ntxqsets) 541 { 542 struct mgb_softc *sc; 543 struct mgb_ring_data *rdata; 544 int q; 545 546 sc = iflib_get_softc(ctx); 547 KASSERT(ntxqsets == 1, ("ntxqsets = %d", ntxqsets)); 548 rdata = &sc->tx_ring_data; 549 for (q = 0; q < ntxqsets; q++) { 550 KASSERT(ntxqs == 2, ("ntxqs = %d", ntxqs)); 551 /* Ring */ 552 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * ntxqs + 0]; 553 rdata->ring_bus_addr = paddrs[q * ntxqs + 0]; 554 555 /* Head WB */ 556 rdata->head_wb = (uint32_t *) vaddrs[q * ntxqs + 1]; 557 rdata->head_wb_bus_addr = paddrs[q * ntxqs + 1]; 558 } 559 return 0; 560 } 561 562 static int 563 mgb_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, 564 int nrxqsets) 565 { 566 struct mgb_softc *sc; 567 struct mgb_ring_data *rdata; 568 int q; 569 570 sc = iflib_get_softc(ctx); 571 KASSERT(nrxqsets == 1, ("nrxqsets = %d", nrxqsets)); 572 rdata = &sc->rx_ring_data; 573 for (q = 0; q < nrxqsets; q++) { 574 KASSERT(nrxqs == 2, ("nrxqs = %d", nrxqs)); 575 /* Ring */ 576 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * nrxqs + 0]; 577 rdata->ring_bus_addr = paddrs[q * nrxqs + 0]; 578 579 /* Head WB */ 580 rdata->head_wb = (uint32_t *) vaddrs[q * nrxqs + 1]; 581 rdata->head_wb_bus_addr = paddrs[q * nrxqs + 1]; 582 } 583 return 0; 584 } 585 586 static void 587 mgb_queues_free(if_ctx_t ctx) 588 { 589 struct mgb_softc *sc; 590 591 sc = iflib_get_softc(ctx); 592 593 memset(&sc->rx_ring_data, 0, sizeof(struct mgb_ring_data)); 594 memset(&sc->tx_ring_data, 0, sizeof(struct mgb_ring_data)); 595 } 596 597 static void 598 mgb_init(if_ctx_t ctx) 599 { 600 struct mgb_softc *sc; 601 struct mii_data *miid; 602 int error; 603 604 sc = iflib_get_softc(ctx); 605 miid = device_get_softc(sc->miibus); 606 device_printf(sc->dev, "running init ...\n"); 607 608 mgb_dma_init(sc); 609 610 /* XXX: Turn off perfect filtering, turn on (broad|multi|uni)cast rx */ 611 CSR_CLEAR_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_PERFECT_FILTER); 612 CSR_UPDATE_REG(sc, MGB_RFE_CTL, 613 MGB_RFE_ALLOW_BROADCAST | 614 MGB_RFE_ALLOW_MULTICAST | 615 MGB_RFE_ALLOW_UNICAST); 616 617 error = mii_mediachg(miid); 618 /* Not much we can do if this fails. */ 619 if (error) 620 device_printf(sc->dev, "%s: mii_mediachg returned %d", __func__, 621 error); 622 } 623 624 #ifdef DEBUG 625 static void 626 mgb_dump_some_stats(struct mgb_softc *sc) 627 { 628 int i; 629 int first_stat = 0x1200; 630 int last_stat = 0x12FC; 631 632 for (i = first_stat; i <= last_stat; i += 4) 633 if (CSR_READ_REG(sc, i) != 0) 634 device_printf(sc->dev, "0x%04x: 0x%08x\n", i, 635 CSR_READ_REG(sc, i)); 636 char *stat_names[] = { 637 "MAC_ERR_STS ", 638 "FCT_INT_STS ", 639 "DMAC_CFG ", 640 "DMAC_CMD ", 641 "DMAC_INT_STS ", 642 "DMAC_INT_EN ", 643 "DMAC_RX_ERR_STS0 ", 644 "DMAC_RX_ERR_STS1 ", 645 "DMAC_RX_ERR_STS2 ", 646 "DMAC_RX_ERR_STS3 ", 647 "INT_STS ", 648 "INT_EN ", 649 "INT_VEC_EN ", 650 "INT_VEC_MAP0 ", 651 "INT_VEC_MAP1 ", 652 "INT_VEC_MAP2 ", 653 "TX_HEAD0", 654 "TX_TAIL0", 655 "DMAC_TX_ERR_STS0 ", 656 NULL 657 }; 658 int stats[] = { 659 0x114, 660 0xA0, 661 0xC00, 662 0xC0C, 663 0xC10, 664 0xC14, 665 0xC60, 666 0xCA0, 667 0xCE0, 668 0xD20, 669 0x780, 670 0x788, 671 0x794, 672 0x7A0, 673 0x7A4, 674 0x780, 675 0xD58, 676 0xD5C, 677 0xD60, 678 0x0 679 }; 680 i = 0; 681 printf("==============================\n"); 682 while (stats[i++]) 683 device_printf(sc->dev, "%s at offset 0x%04x = 0x%08x\n", 684 stat_names[i - 1], stats[i - 1], 685 CSR_READ_REG(sc, stats[i - 1])); 686 printf("==== TX RING DESCS ====\n"); 687 for (i = 0; i < MGB_DMA_RING_SIZE; i++) 688 device_printf(sc->dev, "ring[%d].data0=0x%08x\n" 689 "ring[%d].data1=0x%08x\n" 690 "ring[%d].data2=0x%08x\n" 691 "ring[%d].data3=0x%08x\n", 692 i, sc->tx_ring_data.ring[i].ctl, 693 i, sc->tx_ring_data.ring[i].addr.low, 694 i, sc->tx_ring_data.ring[i].addr.high, 695 i, sc->tx_ring_data.ring[i].sts); 696 device_printf(sc->dev, "==== DUMP_TX_DMA_RAM ====\n"); 697 int i; 698 CSR_WRITE_REG(sc, 0x24, 0xF); // DP_SEL & TX_RAM_0 699 for (i = 0; i < 128; i++) { 700 CSR_WRITE_REG(sc, 0x2C, i); // DP_ADDR 701 702 CSR_WRITE_REG(sc, 0x28, 0); // DP_CMD 703 704 while ((CSR_READ_REG(sc, 0x24) & 0x80000000) == 0) // DP_SEL & READY 705 DELAY(1000); 706 707 device_printf(sc->dev, "DMAC_TX_RAM_0[%u]=%08x\n", i, 708 CSR_READ_REG(sc, 0x30)); // DP_DATA 709 } 710 } 711 #endif 712 713 static void 714 mgb_stop(if_ctx_t ctx) 715 { 716 struct mgb_softc *sc ; 717 if_softc_ctx_t scctx; 718 int i; 719 720 sc = iflib_get_softc(ctx); 721 scctx = iflib_get_softc_ctx(ctx); 722 723 /* XXX: Could potentially timeout */ 724 for (i = 0; i < scctx->isc_nrxqsets; i++) { 725 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_STOP); 726 mgb_fct_control(sc, MGB_FCT_RX_CTL, 0, FCT_DISABLE); 727 } 728 for (i = 0; i < scctx->isc_ntxqsets; i++) { 729 mgb_dmac_control(sc, MGB_DMAC_TX_START, 0, DMAC_STOP); 730 mgb_fct_control(sc, MGB_FCT_TX_CTL, 0, FCT_DISABLE); 731 } 732 } 733 734 static int 735 mgb_legacy_intr(void *xsc) 736 { 737 struct mgb_softc *sc; 738 739 sc = xsc; 740 iflib_admin_intr_deferred(sc->ctx); 741 return (FILTER_HANDLED); 742 } 743 744 static int 745 mgb_rxq_intr(void *xsc) 746 { 747 struct mgb_softc *sc; 748 if_softc_ctx_t scctx; 749 uint32_t intr_sts, intr_en; 750 int qidx; 751 752 sc = xsc; 753 scctx = iflib_get_softc_ctx(sc->ctx); 754 755 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS); 756 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET); 757 intr_sts &= intr_en; 758 759 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) { 760 if ((intr_sts & MGB_INTR_STS_RX(qidx))){ 761 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, 762 MGB_INTR_STS_RX(qidx)); 763 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_RX(qidx)); 764 } 765 } 766 return (FILTER_SCHEDULE_THREAD); 767 } 768 769 static int 770 mgb_admin_intr(void *xsc) 771 { 772 struct mgb_softc *sc; 773 if_softc_ctx_t scctx; 774 uint32_t intr_sts, intr_en; 775 int qidx; 776 777 sc = xsc; 778 scctx = iflib_get_softc_ctx(sc->ctx); 779 780 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS); 781 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET); 782 intr_sts &= intr_en; 783 784 /* 785 * NOTE: Debugging printfs here 786 * will likely cause interrupt test failure. 787 */ 788 789 /* TODO: shouldn't continue if suspended */ 790 if ((intr_sts & MGB_INTR_STS_ANY) == 0) 791 { 792 device_printf(sc->dev, "non-mgb interrupt triggered.\n"); 793 return (FILTER_SCHEDULE_THREAD); 794 } 795 if ((intr_sts & MGB_INTR_STS_TEST) != 0) 796 { 797 sc->isr_test_flag = true; 798 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST); 799 return (FILTER_HANDLED); 800 } 801 if ((intr_sts & MGB_INTR_STS_RX_ANY) != 0) 802 { 803 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) { 804 if ((intr_sts & MGB_INTR_STS_RX(qidx))){ 805 iflib_rx_intr_deferred(sc->ctx, qidx); 806 } 807 } 808 return (FILTER_HANDLED); 809 } 810 /* XXX: TX interrupts should not occur */ 811 if ((intr_sts & MGB_INTR_STS_TX_ANY) != 0) 812 { 813 for (qidx = 0; qidx < scctx->isc_ntxqsets; qidx++) { 814 if ((intr_sts & MGB_INTR_STS_RX(qidx))) { 815 /* clear the interrupt sts and run handler */ 816 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, 817 MGB_INTR_STS_TX(qidx)); 818 CSR_WRITE_REG(sc, MGB_INTR_STS, 819 MGB_INTR_STS_TX(qidx)); 820 iflib_tx_intr_deferred(sc->ctx, qidx); 821 } 822 } 823 return (FILTER_HANDLED); 824 } 825 826 return (FILTER_SCHEDULE_THREAD); 827 } 828 829 static int 830 mgb_msix_intr_assign(if_ctx_t ctx, int msix) 831 { 832 struct mgb_softc *sc; 833 if_softc_ctx_t scctx; 834 int error, i, vectorid; 835 char irq_name[16]; 836 837 sc = iflib_get_softc(ctx); 838 scctx = iflib_get_softc_ctx(ctx); 839 840 KASSERT(scctx->isc_nrxqsets == 1 && scctx->isc_ntxqsets == 1, 841 ("num rxqsets/txqsets != 1 ")); 842 843 /* 844 * First vector should be admin interrupts, others vectors are TX/RX 845 * 846 * RIDs start at 1, and vector ids start at 0. 847 */ 848 vectorid = 0; 849 error = iflib_irq_alloc_generic(ctx, &sc->admin_irq, vectorid + 1, 850 IFLIB_INTR_ADMIN, mgb_admin_intr, sc, 0, "admin"); 851 if (error) { 852 device_printf(sc->dev, 853 "Failed to register admin interrupt handler\n"); 854 return (error); 855 } 856 857 for (i = 0; i < scctx->isc_nrxqsets; i++) { 858 vectorid++; 859 snprintf(irq_name, sizeof(irq_name), "rxq%d", i); 860 error = iflib_irq_alloc_generic(ctx, &sc->rx_irq, vectorid + 1, 861 IFLIB_INTR_RXTX, mgb_rxq_intr, sc, i, irq_name); 862 if (error) { 863 device_printf(sc->dev, 864 "Failed to register rxq %d interrupt handler\n", i); 865 return (error); 866 } 867 CSR_UPDATE_REG(sc, MGB_INTR_VEC_RX_MAP, 868 MGB_INTR_VEC_MAP(vectorid, i)); 869 } 870 871 /* Not actually mapping hw TX interrupts ... */ 872 for (i = 0; i < scctx->isc_ntxqsets; i++) { 873 snprintf(irq_name, sizeof(irq_name), "txq%d", i); 874 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, 875 irq_name); 876 } 877 878 return (0); 879 } 880 881 static void 882 mgb_intr_enable_all(if_ctx_t ctx) 883 { 884 struct mgb_softc *sc; 885 if_softc_ctx_t scctx; 886 int i, dmac_enable = 0, intr_sts = 0, vec_en = 0; 887 888 sc = iflib_get_softc(ctx); 889 scctx = iflib_get_softc_ctx(ctx); 890 intr_sts |= MGB_INTR_STS_ANY; 891 vec_en |= MGB_INTR_STS_ANY; 892 893 for (i = 0; i < scctx->isc_nrxqsets; i++) { 894 intr_sts |= MGB_INTR_STS_RX(i); 895 dmac_enable |= MGB_DMAC_RX_INTR_ENBL(i); 896 vec_en |= MGB_INTR_RX_VEC_STS(i); 897 } 898 899 /* TX interrupts aren't needed ... */ 900 901 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, intr_sts); 902 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, vec_en); 903 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, dmac_enable); 904 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, dmac_enable); 905 } 906 907 static void 908 mgb_intr_disable_all(if_ctx_t ctx) 909 { 910 struct mgb_softc *sc; 911 912 sc = iflib_get_softc(ctx); 913 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, UINT32_MAX); 914 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_CLR, UINT32_MAX); 915 CSR_WRITE_REG(sc, MGB_INTR_STS, UINT32_MAX); 916 917 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_CLR, UINT32_MAX); 918 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, UINT32_MAX); 919 } 920 921 static int 922 mgb_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) 923 { 924 /* called after successful rx isr */ 925 struct mgb_softc *sc; 926 927 sc = iflib_get_softc(ctx); 928 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_RX_VEC_STS(qid)); 929 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_RX(qid)); 930 931 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_RX_INTR_ENBL(qid)); 932 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_RX_INTR_ENBL(qid)); 933 return (0); 934 } 935 936 static int 937 mgb_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) 938 { 939 /* XXX: not called (since tx interrupts not used) */ 940 struct mgb_softc *sc; 941 942 sc = iflib_get_softc(ctx); 943 944 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_TX(qid)); 945 946 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_TX_INTR_ENBL(qid)); 947 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_TX_INTR_ENBL(qid)); 948 return (0); 949 } 950 951 static bool 952 mgb_intr_test(struct mgb_softc *sc) 953 { 954 int i; 955 956 sc->isr_test_flag = false; 957 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST); 958 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_STS_ANY); 959 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, 960 MGB_INTR_STS_ANY | MGB_INTR_STS_TEST); 961 CSR_WRITE_REG(sc, MGB_INTR_SET, MGB_INTR_STS_TEST); 962 if (sc->isr_test_flag) 963 return true; 964 for (i = 0; i < MGB_TIMEOUT; i++) { 965 DELAY(10); 966 if (sc->isr_test_flag) 967 break; 968 } 969 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TEST); 970 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST); 971 return sc->isr_test_flag; 972 } 973 974 static int 975 mgb_isc_txd_encap(void *xsc , if_pkt_info_t ipi) 976 { 977 struct mgb_softc *sc; 978 if_softc_ctx_t scctx; 979 struct mgb_ring_data *rdata; 980 struct mgb_ring_desc *txd; 981 bus_dma_segment_t *segs; 982 qidx_t pidx, nsegs; 983 int i; 984 985 KASSERT(ipi->ipi_qsidx == 0, 986 ("tried to refill TX Channel %d.\n", ipi->ipi_qsidx)); 987 sc = xsc; 988 scctx = iflib_get_softc_ctx(sc->ctx); 989 rdata = &sc->tx_ring_data; 990 991 pidx = ipi->ipi_pidx; 992 segs = ipi->ipi_segs; 993 nsegs = ipi->ipi_nsegs; 994 995 /* For each seg, create a descriptor */ 996 for (i = 0; i < nsegs; ++i) { 997 KASSERT(nsegs == 1, ("Multisegment packet !!!!!\n")); 998 txd = &rdata->ring[pidx]; 999 txd->ctl = htole32( 1000 (segs[i].ds_len & MGB_DESC_CTL_BUFLEN_MASK ) | 1001 /* 1002 * XXX: This will be wrong in the multipacket case 1003 * I suspect FS should be for the first packet and 1004 * LS should be for the last packet 1005 */ 1006 MGB_TX_DESC_CTL_FS | MGB_TX_DESC_CTL_LS | 1007 MGB_DESC_CTL_FCS); 1008 txd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32( 1009 segs[i].ds_addr)); 1010 txd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32( 1011 segs[i].ds_addr)); 1012 txd->sts = htole32( 1013 (segs[i].ds_len << 16) & MGB_DESC_FRAME_LEN_MASK); 1014 pidx = MGB_NEXT_RING_IDX(pidx); 1015 } 1016 ipi->ipi_new_pidx = pidx; 1017 return (0); 1018 } 1019 1020 static void 1021 mgb_isc_txd_flush(void *xsc, uint16_t txqid, qidx_t pidx) 1022 { 1023 struct mgb_softc *sc; 1024 struct mgb_ring_data *rdata; 1025 1026 KASSERT(txqid == 0, ("tried to flush TX Channel %d.\n", txqid)); 1027 sc = xsc; 1028 rdata = &sc->tx_ring_data; 1029 1030 if (rdata->last_tail != pidx) { 1031 rdata->last_tail = pidx; 1032 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(txqid), rdata->last_tail); 1033 } 1034 } 1035 1036 static int 1037 mgb_isc_txd_credits_update(void *xsc, uint16_t txqid, bool clear) 1038 { 1039 struct mgb_softc *sc; 1040 struct mgb_ring_desc *txd; 1041 struct mgb_ring_data *rdata; 1042 int processed = 0; 1043 1044 /* 1045 * > If clear is true, we need to report the number of TX command ring 1046 * > descriptors that have been processed by the device. If clear is 1047 * > false, we just need to report whether or not at least one TX 1048 * > command ring descriptor has been processed by the device. 1049 * - vmx driver 1050 */ 1051 KASSERT(txqid == 0, ("tried to credits_update TX Channel %d.\n", 1052 txqid)); 1053 sc = xsc; 1054 rdata = &sc->tx_ring_data; 1055 1056 while (*(rdata->head_wb) != rdata->last_head) { 1057 if (!clear) 1058 return 1; 1059 1060 txd = &rdata->ring[rdata->last_head]; 1061 memset(txd, 0, sizeof(struct mgb_ring_desc)); 1062 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head); 1063 processed++; 1064 } 1065 1066 return (processed); 1067 } 1068 1069 static int 1070 mgb_isc_rxd_available(void *xsc, uint16_t rxqid, qidx_t idx, qidx_t budget) 1071 { 1072 struct mgb_softc *sc; 1073 if_softc_ctx_t scctx; 1074 struct mgb_ring_data *rdata; 1075 int avail = 0; 1076 1077 sc = xsc; 1078 KASSERT(rxqid == 0, ("tried to check availability in RX Channel %d.\n", 1079 rxqid)); 1080 1081 rdata = &sc->rx_ring_data; 1082 scctx = iflib_get_softc_ctx(sc->ctx); 1083 for (; idx != *(rdata->head_wb); 1084 idx = MGB_NEXT_RING_IDX(idx)) { 1085 avail++; 1086 /* XXX: Could verify desc is device owned here */ 1087 if (avail == budget) 1088 break; 1089 } 1090 return (avail); 1091 } 1092 1093 static int 1094 mgb_isc_rxd_pkt_get(void *xsc, if_rxd_info_t ri) 1095 { 1096 struct mgb_softc *sc; 1097 struct mgb_ring_data *rdata; 1098 struct mgb_ring_desc rxd; 1099 int total_len; 1100 1101 KASSERT(ri->iri_qsidx == 0, 1102 ("tried to check availability in RX Channel %d\n", ri->iri_qsidx)); 1103 sc = xsc; 1104 total_len = 0; 1105 rdata = &sc->rx_ring_data; 1106 1107 while (*(rdata->head_wb) != rdata->last_head) { 1108 /* copy ring desc and do swapping */ 1109 rxd = rdata->ring[rdata->last_head]; 1110 rxd.ctl = le32toh(rxd.ctl); 1111 rxd.addr.low = le32toh(rxd.ctl); 1112 rxd.addr.high = le32toh(rxd.ctl); 1113 rxd.sts = le32toh(rxd.ctl); 1114 1115 if ((rxd.ctl & MGB_DESC_CTL_OWN) != 0) { 1116 device_printf(sc->dev, 1117 "Tried to read descriptor ... " 1118 "found that it's owned by the driver\n"); 1119 return EINVAL; 1120 } 1121 if ((rxd.ctl & MGB_RX_DESC_CTL_FS) == 0) { 1122 device_printf(sc->dev, 1123 "Tried to read descriptor ... " 1124 "found that FS is not set.\n"); 1125 device_printf(sc->dev, "Tried to read descriptor ... that it FS is not set.\n"); 1126 return EINVAL; 1127 } 1128 /* XXX: Multi-packet support */ 1129 if ((rxd.ctl & MGB_RX_DESC_CTL_LS) == 0) { 1130 device_printf(sc->dev, 1131 "Tried to read descriptor ... " 1132 "found that LS is not set. (Multi-buffer packets not yet supported)\n"); 1133 return EINVAL; 1134 } 1135 ri->iri_frags[0].irf_flid = 0; 1136 ri->iri_frags[0].irf_idx = rdata->last_head; 1137 ri->iri_frags[0].irf_len = MGB_DESC_GET_FRAME_LEN(&rxd); 1138 total_len += ri->iri_frags[0].irf_len; 1139 1140 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head); 1141 break; 1142 } 1143 ri->iri_nfrags = 1; 1144 ri->iri_len = total_len; 1145 1146 return (0); 1147 } 1148 1149 static void 1150 mgb_isc_rxd_refill(void *xsc, if_rxd_update_t iru) 1151 { 1152 if_softc_ctx_t scctx; 1153 struct mgb_softc *sc; 1154 struct mgb_ring_data *rdata; 1155 struct mgb_ring_desc *rxd; 1156 uint64_t *paddrs; 1157 qidx_t *idxs; 1158 qidx_t idx; 1159 int count, len; 1160 1161 count = iru->iru_count; 1162 len = iru->iru_buf_size; 1163 idxs = iru->iru_idxs; 1164 paddrs = iru->iru_paddrs; 1165 KASSERT(iru->iru_qsidx == 0, 1166 ("tried to refill RX Channel %d.\n", iru->iru_qsidx)); 1167 1168 sc = xsc; 1169 scctx = iflib_get_softc_ctx(sc->ctx); 1170 rdata = &sc->rx_ring_data; 1171 1172 while (count > 0) { 1173 idx = idxs[--count]; 1174 rxd = &rdata->ring[idx]; 1175 1176 rxd->sts = 0; 1177 rxd->addr.low = 1178 htole32(CSR_TRANSLATE_ADDR_LOW32(paddrs[count])); 1179 rxd->addr.high = 1180 htole32(CSR_TRANSLATE_ADDR_HIGH32(paddrs[count])); 1181 rxd->ctl = htole32(MGB_DESC_CTL_OWN | 1182 (len & MGB_DESC_CTL_BUFLEN_MASK)); 1183 } 1184 return; 1185 } 1186 1187 static void 1188 mgb_isc_rxd_flush(void *xsc, uint16_t rxqid, uint8_t flid, qidx_t pidx) 1189 { 1190 struct mgb_softc *sc; 1191 1192 sc = xsc; 1193 1194 KASSERT(rxqid == 0, ("tried to flush RX Channel %d.\n", rxqid)); 1195 /* 1196 * According to the programming guide, last_tail must be set to 1197 * the last valid RX descriptor, rather than to the one past that. 1198 * Note that this is not true for the TX ring! 1199 */ 1200 sc->rx_ring_data.last_tail = MGB_PREV_RING_IDX(pidx); 1201 CSR_WRITE_REG(sc, MGB_DMA_RX_TAIL(rxqid), sc->rx_ring_data.last_tail); 1202 return; 1203 } 1204 1205 static int 1206 mgb_test_bar(struct mgb_softc *sc) 1207 { 1208 uint32_t id_rev, dev_id, rev; 1209 1210 id_rev = CSR_READ_REG(sc, 0); 1211 dev_id = id_rev >> 16; 1212 rev = id_rev & 0xFFFF; 1213 if (dev_id == MGB_LAN7430_DEVICE_ID || 1214 dev_id == MGB_LAN7431_DEVICE_ID) { 1215 return 0; 1216 } else { 1217 device_printf(sc->dev, "ID check failed.\n"); 1218 return ENXIO; 1219 } 1220 } 1221 1222 static int 1223 mgb_alloc_regs(struct mgb_softc *sc) 1224 { 1225 int rid; 1226 1227 rid = PCIR_BAR(MGB_BAR); 1228 pci_enable_busmaster(sc->dev); 1229 sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1230 &rid, RF_ACTIVE); 1231 if (sc->regs == NULL) 1232 return ENXIO; 1233 1234 return (0); 1235 } 1236 1237 static int 1238 mgb_release_regs(struct mgb_softc *sc) 1239 { 1240 int error = 0; 1241 1242 if (sc->regs != NULL) 1243 error = bus_release_resource(sc->dev, SYS_RES_MEMORY, 1244 rman_get_rid(sc->regs), sc->regs); 1245 sc->regs = NULL; 1246 pci_disable_busmaster(sc->dev); 1247 return error; 1248 } 1249 1250 static int 1251 mgb_dma_init(struct mgb_softc *sc) 1252 { 1253 if_softc_ctx_t scctx; 1254 int ch, error = 0; 1255 1256 scctx = iflib_get_softc_ctx(sc->ctx); 1257 1258 for (ch = 0; ch < scctx->isc_nrxqsets; ch++) 1259 if ((error = mgb_dma_rx_ring_init(sc, ch))) 1260 goto fail; 1261 1262 for (ch = 0; ch < scctx->isc_nrxqsets; ch++) 1263 if ((error = mgb_dma_tx_ring_init(sc, ch))) 1264 goto fail; 1265 1266 fail: 1267 return error; 1268 } 1269 1270 static int 1271 mgb_dma_rx_ring_init(struct mgb_softc *sc, int channel) 1272 { 1273 struct mgb_ring_data *rdata; 1274 int ring_config, error = 0; 1275 1276 rdata = &sc->rx_ring_data; 1277 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_RESET); 1278 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_RX_START, channel), 1279 ("Trying to init channels when not in init state\n")); 1280 1281 /* write ring address */ 1282 if (rdata->ring_bus_addr == 0) { 1283 device_printf(sc->dev, "Invalid ring bus addr.\n"); 1284 goto fail; 1285 } 1286 1287 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_H(channel), 1288 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr)); 1289 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_L(channel), 1290 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr)); 1291 1292 /* write head pointer writeback address */ 1293 if (rdata->head_wb_bus_addr == 0) { 1294 device_printf(sc->dev, "Invalid head wb bus addr.\n"); 1295 goto fail; 1296 } 1297 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_H(channel), 1298 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr)); 1299 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_L(channel), 1300 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr)); 1301 1302 /* Enable head pointer writeback */ 1303 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG0(channel), MGB_DMA_HEAD_WB_ENBL); 1304 1305 ring_config = CSR_READ_REG(sc, MGB_DMA_RX_CONFIG1(channel)); 1306 /* ring size */ 1307 ring_config &= ~MGB_DMA_RING_LEN_MASK; 1308 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK); 1309 /* packet padding (PAD_2 is better for IP header alignment ...) */ 1310 ring_config &= ~MGB_DMA_RING_PAD_MASK; 1311 ring_config |= (MGB_DMA_RING_PAD_0 & MGB_DMA_RING_PAD_MASK); 1312 1313 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG1(channel), ring_config); 1314 1315 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_RX_HEAD(channel)); 1316 1317 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_RESET); 1318 if (error != 0) { 1319 device_printf(sc->dev, "Failed to reset RX FCT.\n"); 1320 goto fail; 1321 } 1322 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_ENABLE); 1323 if (error != 0) { 1324 device_printf(sc->dev, "Failed to enable RX FCT.\n"); 1325 goto fail; 1326 } 1327 mgb_dmac_control(sc, MGB_DMAC_RX_START, channel, DMAC_START); 1328 if (error != 0) 1329 device_printf(sc->dev, "Failed to start RX DMAC.\n"); 1330 fail: 1331 return (error); 1332 } 1333 1334 static int 1335 mgb_dma_tx_ring_init(struct mgb_softc *sc, int channel) 1336 { 1337 struct mgb_ring_data *rdata; 1338 int ring_config, error = 0; 1339 1340 rdata = &sc->tx_ring_data; 1341 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_RESET))) { 1342 device_printf(sc->dev, "Failed to reset TX FCT.\n"); 1343 goto fail; 1344 } 1345 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, 1346 FCT_ENABLE))) { 1347 device_printf(sc->dev, "Failed to enable TX FCT.\n"); 1348 goto fail; 1349 } 1350 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel, 1351 DMAC_RESET))) { 1352 device_printf(sc->dev, "Failed to reset TX DMAC.\n"); 1353 goto fail; 1354 } 1355 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_TX_START, channel), 1356 ("Trying to init channels in not init state\n")); 1357 1358 /* write ring address */ 1359 if (rdata->ring_bus_addr == 0) { 1360 device_printf(sc->dev, "Invalid ring bus addr.\n"); 1361 goto fail; 1362 } 1363 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_H(channel), 1364 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr)); 1365 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_L(channel), 1366 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr)); 1367 1368 /* write ring size */ 1369 ring_config = CSR_READ_REG(sc, MGB_DMA_TX_CONFIG1(channel)); 1370 ring_config &= ~MGB_DMA_RING_LEN_MASK; 1371 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK); 1372 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG1(channel), ring_config); 1373 1374 /* Enable interrupt on completion and head pointer writeback */ 1375 ring_config = (MGB_DMA_HEAD_WB_LS_ENBL | MGB_DMA_HEAD_WB_ENBL); 1376 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG0(channel), ring_config); 1377 1378 /* write head pointer writeback address */ 1379 if (rdata->head_wb_bus_addr == 0) { 1380 device_printf(sc->dev, "Invalid head wb bus addr.\n"); 1381 goto fail; 1382 } 1383 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_H(channel), 1384 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr)); 1385 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_L(channel), 1386 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr)); 1387 1388 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_TX_HEAD(channel)); 1389 KASSERT(rdata->last_head == 0, ("MGB_DMA_TX_HEAD was not reset.\n")); 1390 rdata->last_tail = 0; 1391 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(channel), rdata->last_tail); 1392 1393 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel, 1394 DMAC_START))) 1395 device_printf(sc->dev, "Failed to start TX DMAC.\n"); 1396 fail: 1397 return error; 1398 } 1399 1400 static int 1401 mgb_dmac_control(struct mgb_softc *sc, int start, int channel, 1402 enum mgb_dmac_cmd cmd) 1403 { 1404 int error = 0; 1405 1406 switch (cmd) { 1407 case DMAC_RESET: 1408 CSR_WRITE_REG(sc, MGB_DMAC_CMD, 1409 MGB_DMAC_CMD_RESET(start, channel)); 1410 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, 1411 MGB_DMAC_CMD_RESET(start, channel)); 1412 break; 1413 1414 case DMAC_START: 1415 /* 1416 * NOTE: this simplifies the logic, since it will never 1417 * try to start in STOP_PENDING, but it also increases work. 1418 */ 1419 error = mgb_dmac_control(sc, start, channel, DMAC_STOP); 1420 if (error != 0) 1421 return error; 1422 CSR_WRITE_REG(sc, MGB_DMAC_CMD, 1423 MGB_DMAC_CMD_START(start, channel)); 1424 break; 1425 1426 case DMAC_STOP: 1427 CSR_WRITE_REG(sc, MGB_DMAC_CMD, 1428 MGB_DMAC_CMD_STOP(start, channel)); 1429 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 1430 MGB_DMAC_CMD_STOP(start, channel), 1431 MGB_DMAC_CMD_START(start, channel)); 1432 break; 1433 } 1434 return error; 1435 } 1436 1437 static int 1438 mgb_fct_control(struct mgb_softc *sc, int reg, int channel, 1439 enum mgb_fct_cmd cmd) 1440 { 1441 1442 switch (cmd) { 1443 case FCT_RESET: 1444 CSR_WRITE_REG(sc, reg, MGB_FCT_RESET(channel)); 1445 return mgb_wait_for_bits(sc, reg, 0, MGB_FCT_RESET(channel)); 1446 case FCT_ENABLE: 1447 CSR_WRITE_REG(sc, reg, MGB_FCT_ENBL(channel)); 1448 return (0); 1449 case FCT_DISABLE: 1450 CSR_WRITE_REG(sc, reg, MGB_FCT_DSBL(channel)); 1451 return mgb_wait_for_bits(sc, reg, 0, MGB_FCT_ENBL(channel)); 1452 } 1453 } 1454 1455 static int 1456 mgb_hw_teardown(struct mgb_softc *sc) 1457 { 1458 int err = 0; 1459 1460 /* Stop MAC */ 1461 CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL); 1462 CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL); 1463 if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0))) 1464 return (err); 1465 if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0))) 1466 return (err); 1467 return (err); 1468 } 1469 1470 static int 1471 mgb_hw_init(struct mgb_softc *sc) 1472 { 1473 int error = 0; 1474 1475 error = mgb_hw_reset(sc); 1476 if (error != 0) 1477 goto fail; 1478 1479 mgb_mac_init(sc); 1480 1481 error = mgb_phy_reset(sc); 1482 if (error != 0) 1483 goto fail; 1484 1485 error = mgb_dmac_reset(sc); 1486 if (error != 0) 1487 goto fail; 1488 1489 fail: 1490 return error; 1491 } 1492 1493 static int 1494 mgb_hw_reset(struct mgb_softc *sc) 1495 { 1496 1497 CSR_UPDATE_REG(sc, MGB_HW_CFG, MGB_LITE_RESET); 1498 return (mgb_wait_for_bits(sc, MGB_HW_CFG, 0, MGB_LITE_RESET)); 1499 } 1500 1501 static int 1502 mgb_mac_init(struct mgb_softc *sc) 1503 { 1504 1505 /** 1506 * enable automatic duplex detection and 1507 * automatic speed detection 1508 */ 1509 CSR_UPDATE_REG(sc, MGB_MAC_CR, MGB_MAC_ADD_ENBL | MGB_MAC_ASD_ENBL); 1510 CSR_UPDATE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL); 1511 CSR_UPDATE_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL); 1512 1513 return MGB_STS_OK; 1514 } 1515 1516 static int 1517 mgb_phy_reset(struct mgb_softc *sc) 1518 { 1519 1520 CSR_UPDATE_BYTE(sc, MGB_PMT_CTL, MGB_PHY_RESET); 1521 if (mgb_wait_for_bits(sc, MGB_PMT_CTL, 0, MGB_PHY_RESET) == 1522 MGB_STS_TIMEOUT) 1523 return MGB_STS_TIMEOUT; 1524 return (mgb_wait_for_bits(sc, MGB_PMT_CTL, MGB_PHY_READY, 0)); 1525 } 1526 1527 static int 1528 mgb_dmac_reset(struct mgb_softc *sc) 1529 { 1530 1531 CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_RESET); 1532 return (mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_RESET)); 1533 } 1534 1535 static int 1536 mgb_wait_for_bits(struct mgb_softc *sc, int reg, int set_bits, int clear_bits) 1537 { 1538 int i, val; 1539 1540 i = 0; 1541 do { 1542 /* 1543 * XXX: Datasheets states delay should be > 5 microseconds 1544 * for device reset. 1545 */ 1546 DELAY(100); 1547 val = CSR_READ_REG(sc, reg); 1548 if ((val & set_bits) == set_bits && 1549 (val & clear_bits) == 0) 1550 return MGB_STS_OK; 1551 } while (i++ < MGB_TIMEOUT); 1552 1553 return MGB_STS_TIMEOUT; 1554 } 1555 1556 static void 1557 mgb_get_ethaddr(struct mgb_softc *sc, struct ether_addr *dest) 1558 { 1559 1560 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_L, &dest->octet[0], 4); 1561 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_H, &dest->octet[4], 2); 1562 } 1563 1564 static int 1565 mgb_miibus_readreg(device_t dev, int phy, int reg) 1566 { 1567 struct mgb_softc *sc; 1568 int mii_access; 1569 1570 sc = iflib_get_softc(device_get_softc(dev)); 1571 1572 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == 1573 MGB_STS_TIMEOUT) 1574 return EIO; 1575 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT; 1576 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT; 1577 mii_access |= MGB_MII_BUSY | MGB_MII_READ; 1578 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access); 1579 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == 1580 MGB_STS_TIMEOUT) 1581 return EIO; 1582 return (CSR_READ_2_BYTES(sc, MGB_MII_DATA)); 1583 } 1584 1585 static int 1586 mgb_miibus_writereg(device_t dev, int phy, int reg, int data) 1587 { 1588 struct mgb_softc *sc; 1589 int mii_access; 1590 1591 sc = iflib_get_softc(device_get_softc(dev)); 1592 1593 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 1594 0, MGB_MII_BUSY) == MGB_STS_TIMEOUT) 1595 return EIO; 1596 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT; 1597 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT; 1598 mii_access |= MGB_MII_BUSY | MGB_MII_WRITE; 1599 CSR_WRITE_REG(sc, MGB_MII_DATA, data); 1600 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access); 1601 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == 1602 MGB_STS_TIMEOUT) 1603 return EIO; 1604 return 0; 1605 } 1606 1607 /* XXX: May need to lock these up */ 1608 static void 1609 mgb_miibus_statchg(device_t dev) 1610 { 1611 struct mgb_softc *sc; 1612 struct mii_data *miid; 1613 1614 sc = iflib_get_softc(device_get_softc(dev)); 1615 miid = device_get_softc(sc->miibus); 1616 /* Update baudrate in iflib */ 1617 sc->baudrate = ifmedia_baudrate(miid->mii_media_active); 1618 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate); 1619 } 1620 1621 static void 1622 mgb_miibus_linkchg(device_t dev) 1623 { 1624 struct mgb_softc *sc; 1625 struct mii_data *miid; 1626 int link_state; 1627 1628 sc = iflib_get_softc(device_get_softc(dev)); 1629 miid = device_get_softc(sc->miibus); 1630 /* XXX: copied from miibus_linkchg **/ 1631 if (miid->mii_media_status & IFM_AVALID) { 1632 if (miid->mii_media_status & IFM_ACTIVE) 1633 link_state = LINK_STATE_UP; 1634 else 1635 link_state = LINK_STATE_DOWN; 1636 } else 1637 link_state = LINK_STATE_UNKNOWN; 1638 sc->link_state = link_state; 1639 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate); 1640 } 1641