1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2020 Advanced Micro Devices, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * Contact Information : 28 * Rajesh Kumar <rajesh1.kumar@amd.com> 29 * Shreyank Amartya <Shreyank.Amartya@amd.com> 30 */ 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/kernel.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/mutex.h> 38 #include <sys/rman.h> 39 #include <sys/socket.h> 40 #include <sys/sysctl.h> 41 #include <sys/systm.h> 42 43 #include <net/if.h> 44 #include <net/if_media.h> 45 46 #include <dev/mii/mii.h> 47 #include <dev/mii/miivar.h> 48 49 #include <dev/pci/pcireg.h> 50 #include <dev/pci/pcivar.h> 51 52 #include "xgbe.h" 53 #include "xgbe-common.h" 54 55 #include "miibus_if.h" 56 #include "ifdi_if.h" 57 #include "opt_inet.h" 58 #include "opt_inet6.h" 59 60 MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data"); 61 62 extern struct if_txrx axgbe_txrx; 63 static int axgbe_sph_enable; 64 65 /* Function prototypes */ 66 static void *axgbe_register(device_t); 67 static int axgbe_if_attach_pre(if_ctx_t); 68 static int axgbe_if_attach_post(if_ctx_t); 69 static int axgbe_if_detach(if_ctx_t); 70 static void axgbe_if_stop(if_ctx_t); 71 static void axgbe_if_init(if_ctx_t); 72 73 /* Queue related routines */ 74 static int axgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 75 static int axgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 76 static int axgbe_alloc_channels(if_ctx_t); 77 static void axgbe_free_channels(struct axgbe_if_softc *); 78 static void axgbe_if_queues_free(if_ctx_t); 79 static int axgbe_if_tx_queue_intr_enable(if_ctx_t, uint16_t); 80 static int axgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 81 82 /* Interrupt related routines */ 83 static void axgbe_if_disable_intr(if_ctx_t); 84 static void axgbe_if_enable_intr(if_ctx_t); 85 static int axgbe_if_msix_intr_assign(if_ctx_t, int); 86 static void xgbe_free_intr(struct xgbe_prv_data *, struct resource *, void *, int); 87 88 /* Init and Iflib routines */ 89 static void axgbe_pci_init(struct xgbe_prv_data *); 90 static void axgbe_pci_stop(if_ctx_t); 91 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *, struct xgbe_channel *); 92 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *); 93 static int axgbe_if_mtu_set(if_ctx_t, uint32_t); 94 static void axgbe_if_update_admin_status(if_ctx_t); 95 static void axgbe_if_media_status(if_ctx_t, struct ifmediareq *); 96 static int axgbe_if_media_change(if_ctx_t); 97 static int axgbe_if_promisc_set(if_ctx_t, int); 98 static uint64_t axgbe_if_get_counter(if_ctx_t, ift_counter); 99 static void axgbe_if_vlan_register(if_ctx_t, uint16_t); 100 static void axgbe_if_vlan_unregister(if_ctx_t, uint16_t); 101 #if __FreeBSD_version >= 1300000 102 static bool axgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); 103 #endif 104 static void axgbe_set_counts(if_ctx_t); 105 static void axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *); 106 107 /* MII interface registered functions */ 108 static int axgbe_miibus_readreg(device_t, int, int); 109 static int axgbe_miibus_writereg(device_t, int, int, int); 110 static void axgbe_miibus_statchg(device_t); 111 112 /* ISR routines */ 113 static int axgbe_dev_isr(void *); 114 static void axgbe_ecc_isr(void *); 115 static void axgbe_i2c_isr(void *); 116 static void axgbe_an_isr(void *); 117 static int axgbe_msix_que(void *); 118 119 /* Timer routines */ 120 static void xgbe_service(void *, int); 121 static void xgbe_service_timer(void *); 122 static void xgbe_init_timers(struct xgbe_prv_data *); 123 static void xgbe_stop_timers(struct xgbe_prv_data *); 124 125 /* Dump routines */ 126 static void xgbe_dump_prop_registers(struct xgbe_prv_data *); 127 128 /* 129 * Allocate only for MAC (BAR0) and PCS (BAR1) registers, and just point the 130 * MSI-X table bar (BAR5) to iflib. iflib will do the allocation for MSI-X 131 * table. 132 */ 133 static struct resource_spec axgbe_pci_mac_spec[] = { 134 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* MAC regs */ 135 { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, /* PCS regs */ 136 { -1, 0 } 137 }; 138 139 static const pci_vendor_info_t axgbe_vendor_info_array[] = 140 { 141 PVID(0x1022, 0x1458, "AMD 10 Gigabit Ethernet Driver"), 142 PVID(0x1022, 0x1459, "AMD 10 Gigabit Ethernet Driver"), 143 PVID_END 144 }; 145 146 static struct xgbe_version_data xgbe_v2a = { 147 .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, 148 .xpcs_access = XGBE_XPCS_ACCESS_V2, 149 .mmc_64bit = 1, 150 .tx_max_fifo_size = 229376, 151 .rx_max_fifo_size = 229376, 152 .tx_tstamp_workaround = 1, 153 .ecc_support = 1, 154 .i2c_support = 1, 155 .irq_reissue_support = 1, 156 .tx_desc_prefetch = 5, 157 .rx_desc_prefetch = 5, 158 .an_cdr_workaround = 1, 159 }; 160 161 static struct xgbe_version_data xgbe_v2b = { 162 .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, 163 .xpcs_access = XGBE_XPCS_ACCESS_V2, 164 .mmc_64bit = 1, 165 .tx_max_fifo_size = 65536, 166 .rx_max_fifo_size = 65536, 167 .tx_tstamp_workaround = 1, 168 .ecc_support = 1, 169 .i2c_support = 1, 170 .irq_reissue_support = 1, 171 .tx_desc_prefetch = 5, 172 .rx_desc_prefetch = 5, 173 .an_cdr_workaround = 1, 174 }; 175 176 /* Device Interface */ 177 static device_method_t ax_methods[] = { 178 DEVMETHOD(device_register, axgbe_register), 179 DEVMETHOD(device_probe, iflib_device_probe), 180 DEVMETHOD(device_attach, iflib_device_attach), 181 DEVMETHOD(device_detach, iflib_device_detach), 182 183 /* MII interface */ 184 DEVMETHOD(miibus_readreg, axgbe_miibus_readreg), 185 DEVMETHOD(miibus_writereg, axgbe_miibus_writereg), 186 DEVMETHOD(miibus_statchg, axgbe_miibus_statchg), 187 188 DEVMETHOD_END 189 }; 190 191 static driver_t ax_driver = { 192 "ax", ax_methods, sizeof(struct axgbe_if_softc), 193 }; 194 195 DRIVER_MODULE(axp, pci, ax_driver, 0, 0); 196 DRIVER_MODULE(miibus, ax, miibus_driver, 0, 0); 197 IFLIB_PNP_INFO(pci, ax_driver, axgbe_vendor_info_array); 198 199 MODULE_DEPEND(ax, pci, 1, 1, 1); 200 MODULE_DEPEND(ax, ether, 1, 1, 1); 201 MODULE_DEPEND(ax, iflib, 1, 1, 1); 202 MODULE_DEPEND(ax, miibus, 1, 1, 1); 203 204 /* Iflib Interface */ 205 static device_method_t axgbe_if_methods[] = { 206 DEVMETHOD(ifdi_attach_pre, axgbe_if_attach_pre), 207 DEVMETHOD(ifdi_attach_post, axgbe_if_attach_post), 208 DEVMETHOD(ifdi_detach, axgbe_if_detach), 209 DEVMETHOD(ifdi_init, axgbe_if_init), 210 DEVMETHOD(ifdi_stop, axgbe_if_stop), 211 DEVMETHOD(ifdi_msix_intr_assign, axgbe_if_msix_intr_assign), 212 DEVMETHOD(ifdi_intr_enable, axgbe_if_enable_intr), 213 DEVMETHOD(ifdi_intr_disable, axgbe_if_disable_intr), 214 DEVMETHOD(ifdi_tx_queue_intr_enable, axgbe_if_tx_queue_intr_enable), 215 DEVMETHOD(ifdi_rx_queue_intr_enable, axgbe_if_rx_queue_intr_enable), 216 DEVMETHOD(ifdi_tx_queues_alloc, axgbe_if_tx_queues_alloc), 217 DEVMETHOD(ifdi_rx_queues_alloc, axgbe_if_rx_queues_alloc), 218 DEVMETHOD(ifdi_queues_free, axgbe_if_queues_free), 219 DEVMETHOD(ifdi_update_admin_status, axgbe_if_update_admin_status), 220 DEVMETHOD(ifdi_mtu_set, axgbe_if_mtu_set), 221 DEVMETHOD(ifdi_media_status, axgbe_if_media_status), 222 DEVMETHOD(ifdi_media_change, axgbe_if_media_change), 223 DEVMETHOD(ifdi_promisc_set, axgbe_if_promisc_set), 224 DEVMETHOD(ifdi_get_counter, axgbe_if_get_counter), 225 DEVMETHOD(ifdi_vlan_register, axgbe_if_vlan_register), 226 DEVMETHOD(ifdi_vlan_unregister, axgbe_if_vlan_unregister), 227 #if __FreeBSD_version >= 1300000 228 DEVMETHOD(ifdi_needs_restart, axgbe_if_needs_restart), 229 #endif 230 DEVMETHOD_END 231 }; 232 233 static driver_t axgbe_if_driver = { 234 "axgbe_if", axgbe_if_methods, sizeof(struct axgbe_if_softc) 235 }; 236 237 /* Iflib Shared Context */ 238 static struct if_shared_ctx axgbe_sctx_init = { 239 .isc_magic = IFLIB_MAGIC, 240 .isc_driver = &axgbe_if_driver, 241 .isc_q_align = PAGE_SIZE, 242 .isc_tx_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header), 243 .isc_tx_maxsegsize = PAGE_SIZE, 244 .isc_tso_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header), 245 .isc_tso_maxsegsize = PAGE_SIZE, 246 .isc_rx_maxsize = MJUM9BYTES, 247 .isc_rx_maxsegsize = MJUM9BYTES, 248 .isc_rx_nsegments = 1, 249 .isc_admin_intrcnt = 4, 250 251 .isc_vendor_info = axgbe_vendor_info_array, 252 .isc_driver_version = XGBE_DRV_VERSION, 253 254 .isc_ntxd_min = {XGBE_TX_DESC_CNT_MIN}, 255 .isc_ntxd_default = {XGBE_TX_DESC_CNT_DEFAULT}, 256 .isc_ntxd_max = {XGBE_TX_DESC_CNT_MAX}, 257 258 .isc_ntxqs = 1, 259 .isc_flags = IFLIB_TSO_INIT_IP | IFLIB_NEED_SCRATCH | 260 IFLIB_NEED_ZERO_CSUM | IFLIB_NEED_ETHER_PAD, 261 }; 262 263 static void * 264 axgbe_register(device_t dev) 265 { 266 int axgbe_nfl; 267 int axgbe_nrxqs; 268 int error, i; 269 char *value = NULL; 270 271 value = kern_getenv("dev.ax.sph_enable"); 272 if (value) { 273 axgbe_sph_enable = strtol(value, NULL, 10); 274 freeenv(value); 275 } else { 276 /* 277 * No tunable found, generate one with default values 278 * Note: only a reboot will reveal the new kenv 279 */ 280 error = kern_setenv("dev.ax.sph_enable", "1"); 281 if (error) { 282 printf("Error setting tunable, using default driver values\n"); 283 } 284 axgbe_sph_enable = 1; 285 } 286 287 if (!axgbe_sph_enable) { 288 axgbe_nfl = 1; 289 axgbe_nrxqs = 1; 290 } else { 291 axgbe_nfl = 2; 292 axgbe_nrxqs = 2; 293 } 294 295 axgbe_sctx_init.isc_nfl = axgbe_nfl; 296 axgbe_sctx_init.isc_nrxqs = axgbe_nrxqs; 297 298 for (i = 0 ; i < axgbe_nrxqs ; i++) { 299 axgbe_sctx_init.isc_nrxd_min[i] = XGBE_RX_DESC_CNT_MIN; 300 axgbe_sctx_init.isc_nrxd_default[i] = XGBE_RX_DESC_CNT_DEFAULT; 301 axgbe_sctx_init.isc_nrxd_max[i] = XGBE_RX_DESC_CNT_MAX; 302 } 303 304 return (&axgbe_sctx_init); 305 } 306 307 /* MII Interface Functions */ 308 static int 309 axgbe_miibus_readreg(device_t dev, int phy, int reg) 310 { 311 struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); 312 struct xgbe_prv_data *pdata = &sc->pdata; 313 int val; 314 315 axgbe_printf(3, "%s: phy %d reg %d\n", __func__, phy, reg); 316 317 val = xgbe_phy_mii_read(pdata, phy, reg); 318 319 axgbe_printf(2, "%s: val 0x%x\n", __func__, val); 320 return (val & 0xFFFF); 321 } 322 323 static int 324 axgbe_miibus_writereg(device_t dev, int phy, int reg, int val) 325 { 326 struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); 327 struct xgbe_prv_data *pdata = &sc->pdata; 328 329 axgbe_printf(3, "%s: phy %d reg %d val 0x%x\n", __func__, phy, reg, val); 330 331 xgbe_phy_mii_write(pdata, phy, reg, val); 332 333 return(0); 334 } 335 336 static void 337 axgbe_miibus_statchg(device_t dev) 338 { 339 struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); 340 struct xgbe_prv_data *pdata = &sc->pdata; 341 struct mii_data *mii = device_get_softc(pdata->axgbe_miibus); 342 if_t ifp = pdata->netdev; 343 int bmsr; 344 345 axgbe_printf(2, "%s: Link %d/%d\n", __func__, pdata->phy.link, 346 pdata->phy_link); 347 348 if (mii == NULL || ifp == NULL || 349 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 350 return; 351 352 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 353 (IFM_ACTIVE | IFM_AVALID)) { 354 355 switch (IFM_SUBTYPE(mii->mii_media_active)) { 356 case IFM_10_T: 357 case IFM_100_TX: 358 pdata->phy.link = 1; 359 break; 360 case IFM_1000_T: 361 case IFM_1000_SX: 362 case IFM_2500_SX: 363 pdata->phy.link = 1; 364 break; 365 default: 366 pdata->phy.link = 0; 367 break; 368 } 369 } else 370 pdata->phy_link = 0; 371 372 bmsr = axgbe_miibus_readreg(pdata->dev, pdata->mdio_addr, MII_BMSR); 373 if (bmsr & BMSR_ANEG) { 374 375 axgbe_printf(2, "%s: Autoneg Done\n", __func__); 376 377 /* Raise AN Interrupt */ 378 XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 379 XGBE_AN_CL73_INT_MASK); 380 } 381 } 382 383 static int 384 axgbe_if_attach_pre(if_ctx_t ctx) 385 { 386 struct axgbe_if_softc *sc; 387 struct xgbe_prv_data *pdata; 388 struct resource *mac_res[2]; 389 if_softc_ctx_t scctx; 390 if_shared_ctx_t sctx; 391 device_t dev; 392 unsigned int ma_lo, ma_hi; 393 unsigned int reg; 394 int ret; 395 396 sc = iflib_get_softc(ctx); 397 sc->pdata.dev = dev = iflib_get_dev(ctx); 398 sc->sctx = sctx = iflib_get_sctx(ctx); 399 sc->scctx = scctx = iflib_get_softc_ctx(ctx); 400 sc->media = iflib_get_media(ctx); 401 sc->ctx = ctx; 402 sc->link_status = LINK_STATE_DOWN; 403 pdata = &sc->pdata; 404 pdata->netdev = iflib_get_ifp(ctx); 405 406 spin_lock_init(&pdata->xpcs_lock); 407 408 /* Initialize locks */ 409 mtx_init(&pdata->rss_mutex, "xgbe rss mutex lock", NULL, MTX_DEF); 410 mtx_init(&pdata->mdio_mutex, "xgbe MDIO mutex lock", NULL, MTX_SPIN); 411 412 /* Allocate VLAN bitmap */ 413 pdata->active_vlans = bit_alloc(VLAN_NVID, M_AXGBE, M_WAITOK|M_ZERO); 414 pdata->num_active_vlans = 0; 415 416 /* Get the version data */ 417 DBGPR("%s: Device ID: 0x%x\n", __func__, pci_get_device(dev)); 418 if (pci_get_device(dev) == 0x1458) 419 sc->pdata.vdata = &xgbe_v2a; 420 else if (pci_get_device(dev) == 0x1459) 421 sc->pdata.vdata = &xgbe_v2b; 422 423 /* PCI setup */ 424 if (bus_alloc_resources(dev, axgbe_pci_mac_spec, mac_res)) { 425 axgbe_error("Unable to allocate bus resources\n"); 426 ret = ENXIO; 427 goto free_vlans; 428 } 429 430 sc->pdata.xgmac_res = mac_res[0]; 431 sc->pdata.xpcs_res = mac_res[1]; 432 433 /* Set the PCS indirect addressing definition registers*/ 434 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 435 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 436 437 /* Configure the PCS indirect addressing support */ 438 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); 439 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 440 pdata->xpcs_window <<= 6; 441 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 442 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 443 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 444 DBGPR("xpcs window def : %#010x\n", 445 pdata->xpcs_window_def_reg); 446 DBGPR("xpcs window sel : %#010x\n", 447 pdata->xpcs_window_sel_reg); 448 DBGPR("xpcs window : %#010x\n", 449 pdata->xpcs_window); 450 DBGPR("xpcs window size : %#010x\n", 451 pdata->xpcs_window_size); 452 DBGPR("xpcs window mask : %#010x\n", 453 pdata->xpcs_window_mask); 454 455 /* Enable all interrupts in the hardware */ 456 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 457 458 /* Retrieve the MAC address */ 459 ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 460 ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 461 pdata->mac_addr[0] = ma_lo & 0xff; 462 pdata->mac_addr[1] = (ma_lo >> 8) & 0xff; 463 pdata->mac_addr[2] = (ma_lo >>16) & 0xff; 464 pdata->mac_addr[3] = (ma_lo >> 24) & 0xff; 465 pdata->mac_addr[4] = ma_hi & 0xff; 466 pdata->mac_addr[5] = (ma_hi >> 8) & 0xff; 467 if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID)) { 468 axgbe_error("Invalid mac address\n"); 469 ret = EINVAL; 470 goto release_bus_resource; 471 } 472 iflib_set_mac(ctx, pdata->mac_addr); 473 474 /* Clock settings */ 475 pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ; 476 pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ; 477 478 /* Set the DMA coherency values */ 479 pdata->coherent = 1; 480 pdata->arcr = XGBE_DMA_PCI_ARCR; 481 pdata->awcr = XGBE_DMA_PCI_AWCR; 482 pdata->awarcr = XGBE_DMA_PCI_AWARCR; 483 484 /* Read the port property registers */ 485 pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0); 486 pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1); 487 pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2); 488 pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3); 489 pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4); 490 DBGPR("port property 0 = %#010x\n", pdata->pp0); 491 DBGPR("port property 1 = %#010x\n", pdata->pp1); 492 DBGPR("port property 2 = %#010x\n", pdata->pp2); 493 DBGPR("port property 3 = %#010x\n", pdata->pp3); 494 DBGPR("port property 4 = %#010x\n", pdata->pp4); 495 496 /* Set the maximum channels and queues */ 497 pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, 498 MAX_TX_DMA); 499 pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, 500 MAX_RX_DMA); 501 pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, 502 MAX_TX_QUEUES); 503 pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, 504 MAX_RX_QUEUES); 505 DBGPR("max tx/rx channel count = %u/%u\n", 506 pdata->tx_max_channel_count, pdata->rx_max_channel_count); 507 DBGPR("max tx/rx hw queue count = %u/%u\n", 508 pdata->tx_max_q_count, pdata->rx_max_q_count); 509 510 axgbe_set_counts(ctx); 511 512 /* Set the maximum fifo amounts */ 513 pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, 514 TX_FIFO_SIZE); 515 pdata->tx_max_fifo_size *= 16384; 516 pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size, 517 pdata->vdata->tx_max_fifo_size); 518 pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, 519 RX_FIFO_SIZE); 520 pdata->rx_max_fifo_size *= 16384; 521 pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size, 522 pdata->vdata->rx_max_fifo_size); 523 DBGPR("max tx/rx max fifo size = %u/%u\n", 524 pdata->tx_max_fifo_size, pdata->rx_max_fifo_size); 525 526 /* Initialize IFLIB if_softc_ctx_t */ 527 axgbe_init_iflib_softc_ctx(sc); 528 529 /* Alloc channels */ 530 if (axgbe_alloc_channels(ctx)) { 531 axgbe_error("Unable to allocate channel memory\n"); 532 ret = ENOMEM; 533 goto release_bus_resource; 534 } 535 536 TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata); 537 538 /* create the workqueue */ 539 pdata->dev_workqueue = taskqueue_create("axgbe", M_WAITOK, 540 taskqueue_thread_enqueue, &pdata->dev_workqueue); 541 if (pdata->dev_workqueue == NULL) { 542 axgbe_error("Unable to allocate workqueue\n"); 543 ret = ENOMEM; 544 goto free_channels; 545 } 546 ret = taskqueue_start_threads(&pdata->dev_workqueue, 1, PI_NET, 547 "axgbe dev taskq"); 548 if (ret) { 549 axgbe_error("Unable to start taskqueue\n"); 550 ret = ENOMEM; 551 goto free_task_queue; 552 } 553 554 /* Init timers */ 555 xgbe_init_timers(pdata); 556 557 return (0); 558 559 free_task_queue: 560 taskqueue_free(pdata->dev_workqueue); 561 562 free_channels: 563 axgbe_free_channels(sc); 564 565 release_bus_resource: 566 bus_release_resources(dev, axgbe_pci_mac_spec, mac_res); 567 568 free_vlans: 569 free(pdata->active_vlans, M_AXGBE); 570 571 return (ret); 572 } /* axgbe_if_attach_pre */ 573 574 static void 575 xgbe_init_all_fptrs(struct xgbe_prv_data *pdata) 576 { 577 xgbe_init_function_ptrs_dev(&pdata->hw_if); 578 xgbe_init_function_ptrs_phy(&pdata->phy_if); 579 xgbe_init_function_ptrs_i2c(&pdata->i2c_if); 580 xgbe_init_function_ptrs_desc(&pdata->desc_if); 581 582 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 583 } 584 585 static void 586 axgbe_set_counts(if_ctx_t ctx) 587 { 588 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 589 struct xgbe_prv_data *pdata = &sc->pdata; 590 cpuset_t lcpus; 591 int cpu_count, err; 592 size_t len; 593 594 /* Set all function pointers */ 595 xgbe_init_all_fptrs(pdata); 596 597 /* Populate the hardware features */ 598 xgbe_get_all_hw_features(pdata); 599 600 if (!pdata->tx_max_channel_count) 601 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 602 if (!pdata->rx_max_channel_count) 603 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 604 605 if (!pdata->tx_max_q_count) 606 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 607 if (!pdata->rx_max_q_count) 608 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 609 610 /* 611 * Calculate the number of Tx and Rx rings to be created 612 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 613 * the number of Tx queues to the number of Tx channels 614 * enabled 615 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 616 * number of Rx queues or maximum allowed 617 */ 618 619 /* Get cpu count from sysctl */ 620 len = sizeof(cpu_count); 621 err = kernel_sysctlbyname(curthread, "hw.ncpu", &cpu_count, &len, NULL, 622 0, NULL, 0); 623 if (err) { 624 axgbe_error("Unable to fetch number of cpus\n"); 625 cpu_count = 1; 626 } 627 628 if (bus_get_cpus(pdata->dev, INTR_CPUS, sizeof(lcpus), &lcpus) != 0) { 629 axgbe_error("Unable to fetch CPU list\n"); 630 /* TODO - handle CPU_COPY(&all_cpus, &lcpus); */ 631 } 632 633 DBGPR("ncpu %d intrcpu %d\n", cpu_count, CPU_COUNT(&lcpus)); 634 635 pdata->tx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.tx_ch_cnt); 636 pdata->tx_ring_count = min(pdata->tx_ring_count, 637 pdata->tx_max_channel_count); 638 pdata->tx_ring_count = min(pdata->tx_ring_count, pdata->tx_max_q_count); 639 640 pdata->tx_q_count = pdata->tx_ring_count; 641 642 pdata->rx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.rx_ch_cnt); 643 pdata->rx_ring_count = min(pdata->rx_ring_count, 644 pdata->rx_max_channel_count); 645 646 pdata->rx_q_count = min(pdata->hw_feat.rx_q_cnt, pdata->rx_max_q_count); 647 648 DBGPR("TX/RX max channel count = %u/%u\n", 649 pdata->tx_max_channel_count, pdata->rx_max_channel_count); 650 DBGPR("TX/RX max queue count = %u/%u\n", 651 pdata->tx_max_q_count, pdata->rx_max_q_count); 652 DBGPR("TX/RX DMA ring count = %u/%u\n", 653 pdata->tx_ring_count, pdata->rx_ring_count); 654 DBGPR("TX/RX hardware queue count = %u/%u\n", 655 pdata->tx_q_count, pdata->rx_q_count); 656 } /* axgbe_set_counts */ 657 658 static void 659 axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *sc) 660 { 661 struct xgbe_prv_data *pdata = &sc->pdata; 662 if_softc_ctx_t scctx = sc->scctx; 663 if_shared_ctx_t sctx = sc->sctx; 664 int i; 665 666 scctx->isc_nrxqsets = pdata->rx_q_count; 667 scctx->isc_ntxqsets = pdata->tx_q_count; 668 scctx->isc_msix_bar = pci_msix_table_bar(pdata->dev); 669 scctx->isc_tx_nsegments = 32; 670 671 for (i = 0; i < sctx->isc_ntxqs; i++) { 672 scctx->isc_txqsizes[i] = 673 roundup2(scctx->isc_ntxd[i] * sizeof(struct xgbe_ring_desc), 674 128); 675 scctx->isc_txd_size[i] = sizeof(struct xgbe_ring_desc); 676 } 677 678 for (i = 0; i < sctx->isc_nrxqs; i++) { 679 scctx->isc_rxqsizes[i] = 680 roundup2(scctx->isc_nrxd[i] * sizeof(struct xgbe_ring_desc), 681 128); 682 scctx->isc_rxd_size[i] = sizeof(struct xgbe_ring_desc); 683 } 684 685 scctx->isc_tx_tso_segments_max = 32; 686 scctx->isc_tx_tso_size_max = XGBE_TSO_MAX_SIZE; 687 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 688 689 /* 690 * Set capabilities 691 * 1) IFLIB automatically adds IFCAP_HWSTATS, so need to set explicitly 692 * 2) isc_tx_csum_flags is mandatory if IFCAP_TXCSUM (included in 693 * IFCAP_HWCSUM) is set 694 */ 695 scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP | 696 CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6 | 697 CSUM_TSO); 698 scctx->isc_capenable = (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | 699 IFCAP_JUMBO_MTU | 700 IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER | 701 IFCAP_VLAN_HWCSUM | 702 IFCAP_TSO | IFCAP_VLAN_HWTSO); 703 scctx->isc_capabilities = scctx->isc_capenable; 704 705 /* 706 * Set rss_table_size alone when adding RSS support. rss_table_mask 707 * will be set by IFLIB based on rss_table_size 708 */ 709 scctx->isc_rss_table_size = XGBE_RSS_MAX_TABLE_SIZE; 710 711 scctx->isc_ntxqsets_max = XGBE_MAX_QUEUES; 712 scctx->isc_nrxqsets_max = XGBE_MAX_QUEUES; 713 714 scctx->isc_txrx = &axgbe_txrx; 715 } 716 717 static int 718 axgbe_alloc_channels(if_ctx_t ctx) 719 { 720 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 721 struct xgbe_prv_data *pdata = &sc->pdata; 722 struct xgbe_channel *channel; 723 int i, j, count; 724 725 DBGPR("%s: txqs %d rxqs %d\n", __func__, pdata->tx_ring_count, 726 pdata->rx_ring_count); 727 728 /* Iflibe sets based on isc_ntxqsets/nrxqsets */ 729 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); 730 731 /* Allocate channel memory */ 732 for (i = 0; i < count ; i++) { 733 channel = (struct xgbe_channel*)malloc(sizeof(struct xgbe_channel), 734 M_AXGBE, M_NOWAIT | M_ZERO); 735 736 if (channel == NULL) { 737 for (j = 0; j < i; j++) { 738 free(pdata->channel[j], M_AXGBE); 739 pdata->channel[j] = NULL; 740 } 741 return (ENOMEM); 742 } 743 744 pdata->channel[i] = channel; 745 } 746 747 pdata->total_channel_count = count; 748 DBGPR("Channel count set to: %u\n", pdata->total_channel_count); 749 750 for (i = 0; i < count; i++) { 751 752 channel = pdata->channel[i]; 753 snprintf(channel->name, sizeof(channel->name), "channel-%d",i); 754 755 channel->pdata = pdata; 756 channel->queue_index = i; 757 channel->dma_tag = rman_get_bustag(pdata->xgmac_res); 758 bus_space_subregion(channel->dma_tag, 759 rman_get_bushandle(pdata->xgmac_res), 760 DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC, 761 &channel->dma_handle); 762 channel->tx_ring = NULL; 763 channel->rx_ring = NULL; 764 } 765 766 return (0); 767 } /* axgbe_alloc_channels */ 768 769 static void 770 axgbe_free_channels(struct axgbe_if_softc *sc) 771 { 772 struct xgbe_prv_data *pdata = &sc->pdata; 773 int i; 774 775 for (i = 0; i < pdata->total_channel_count ; i++) { 776 free(pdata->channel[i], M_AXGBE); 777 pdata->channel[i] = NULL; 778 } 779 780 pdata->total_channel_count = 0; 781 pdata->channel_count = 0; 782 } 783 784 static void 785 xgbe_service(void *ctx, int pending) 786 { 787 struct xgbe_prv_data *pdata = ctx; 788 struct axgbe_if_softc *sc = (struct axgbe_if_softc *)pdata; 789 bool prev_state = false; 790 791 /* Get previous link status */ 792 prev_state = pdata->phy.link; 793 794 pdata->phy_if.phy_status(pdata); 795 796 if (prev_state != pdata->phy.link) { 797 pdata->phy_link = pdata->phy.link; 798 axgbe_if_update_admin_status(sc->ctx); 799 } 800 801 callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata); 802 } 803 804 static void 805 xgbe_service_timer(void *data) 806 { 807 struct xgbe_prv_data *pdata = data; 808 809 taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work); 810 } 811 812 static void 813 xgbe_init_timers(struct xgbe_prv_data *pdata) 814 { 815 callout_init(&pdata->service_timer, 1); 816 } 817 818 static void 819 xgbe_start_timers(struct xgbe_prv_data *pdata) 820 { 821 callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata); 822 } 823 824 static void 825 xgbe_stop_timers(struct xgbe_prv_data *pdata) 826 { 827 callout_drain(&pdata->service_timer); 828 callout_stop(&pdata->service_timer); 829 } 830 831 static void 832 xgbe_dump_phy_registers(struct xgbe_prv_data *pdata) 833 { 834 axgbe_printf(1, "\n************* PHY Reg dump *********************\n"); 835 836 axgbe_printf(1, "PCS Control Reg (%#06x) = %#06x\n", MDIO_CTRL1, 837 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1)); 838 axgbe_printf(1, "PCS Status Reg (%#06x) = %#06x\n", MDIO_STAT1, 839 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1)); 840 axgbe_printf(1, "Phy Id (PHYS ID 1 %#06x)= %#06x\n", MDIO_DEVID1, 841 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1)); 842 axgbe_printf(1, "Phy Id (PHYS ID 2 %#06x)= %#06x\n", MDIO_DEVID2, 843 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2)); 844 axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS1, 845 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1)); 846 axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS2, 847 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2)); 848 axgbe_printf(1, "Auto-Neg Control Reg (%#06x) = %#06x\n", MDIO_CTRL1, 849 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1)); 850 axgbe_printf(1, "Auto-Neg Status Reg (%#06x) = %#06x\n", MDIO_STAT1, 851 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1)); 852 axgbe_printf(1, "Auto-Neg Ad Reg 1 (%#06x) = %#06x\n", 853 MDIO_AN_ADVERTISE, 854 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE)); 855 axgbe_printf(1, "Auto-Neg Ad Reg 2 (%#06x) = %#06x\n", 856 MDIO_AN_ADVERTISE + 1, 857 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1)); 858 axgbe_printf(1, "Auto-Neg Ad Reg 3 (%#06x) = %#06x\n", 859 MDIO_AN_ADVERTISE + 2, 860 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2)); 861 axgbe_printf(1, "Auto-Neg Completion Reg (%#06x) = %#06x\n", 862 MDIO_AN_COMP_STAT, 863 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT)); 864 865 axgbe_printf(1, "\n************************************************\n"); 866 } 867 868 static void 869 xgbe_dump_prop_registers(struct xgbe_prv_data *pdata) 870 { 871 int i; 872 873 axgbe_printf(1, "\n************* PROP Reg dump ********************\n"); 874 875 for (i = 0 ; i < 38 ; i++) { 876 axgbe_printf(1, "PROP Offset 0x%08x = %08x\n", 877 (XP_PROP_0 + (i * 4)), XP_IOREAD(pdata, 878 (XP_PROP_0 + (i * 4)))); 879 } 880 } 881 882 static void 883 xgbe_dump_dma_registers(struct xgbe_prv_data *pdata, int ch) 884 { 885 struct xgbe_channel *channel; 886 int i; 887 888 axgbe_printf(1, "\n************* DMA Reg dump *********************\n"); 889 890 axgbe_printf(1, "DMA MR Reg (%08x) = %08x\n", DMA_MR, 891 XGMAC_IOREAD(pdata, DMA_MR)); 892 axgbe_printf(1, "DMA SBMR Reg (%08x) = %08x\n", DMA_SBMR, 893 XGMAC_IOREAD(pdata, DMA_SBMR)); 894 axgbe_printf(1, "DMA ISR Reg (%08x) = %08x\n", DMA_ISR, 895 XGMAC_IOREAD(pdata, DMA_ISR)); 896 axgbe_printf(1, "DMA AXIARCR Reg (%08x) = %08x\n", DMA_AXIARCR, 897 XGMAC_IOREAD(pdata, DMA_AXIARCR)); 898 axgbe_printf(1, "DMA AXIAWCR Reg (%08x) = %08x\n", DMA_AXIAWCR, 899 XGMAC_IOREAD(pdata, DMA_AXIAWCR)); 900 axgbe_printf(1, "DMA AXIAWARCR Reg (%08x) = %08x\n", DMA_AXIAWARCR, 901 XGMAC_IOREAD(pdata, DMA_AXIAWARCR)); 902 axgbe_printf(1, "DMA DSR0 Reg (%08x) = %08x\n", DMA_DSR0, 903 XGMAC_IOREAD(pdata, DMA_DSR0)); 904 axgbe_printf(1, "DMA DSR1 Reg (%08x) = %08x\n", DMA_DSR1, 905 XGMAC_IOREAD(pdata, DMA_DSR1)); 906 axgbe_printf(1, "DMA DSR2 Reg (%08x) = %08x\n", DMA_DSR2, 907 XGMAC_IOREAD(pdata, DMA_DSR2)); 908 axgbe_printf(1, "DMA DSR3 Reg (%08x) = %08x\n", DMA_DSR3, 909 XGMAC_IOREAD(pdata, DMA_DSR3)); 910 axgbe_printf(1, "DMA DSR4 Reg (%08x) = %08x\n", DMA_DSR4, 911 XGMAC_IOREAD(pdata, DMA_DSR4)); 912 axgbe_printf(1, "DMA TXEDMACR Reg (%08x) = %08x\n", DMA_TXEDMACR, 913 XGMAC_IOREAD(pdata, DMA_TXEDMACR)); 914 axgbe_printf(1, "DMA RXEDMACR Reg (%08x) = %08x\n", DMA_RXEDMACR, 915 XGMAC_IOREAD(pdata, DMA_RXEDMACR)); 916 917 for (i = 0 ; i < 8 ; i++ ) { 918 919 if (ch >= 0) { 920 if (i != ch) 921 continue; 922 } 923 924 channel = pdata->channel[i]; 925 926 axgbe_printf(1, "\n************* DMA CH %d dump ****************\n", i); 927 928 axgbe_printf(1, "DMA_CH_CR Reg (%08x) = %08x\n", 929 DMA_CH_CR, XGMAC_DMA_IOREAD(channel, DMA_CH_CR)); 930 axgbe_printf(1, "DMA_CH_TCR Reg (%08x) = %08x\n", 931 DMA_CH_TCR, XGMAC_DMA_IOREAD(channel, DMA_CH_TCR)); 932 axgbe_printf(1, "DMA_CH_RCR Reg (%08x) = %08x\n", 933 DMA_CH_RCR, XGMAC_DMA_IOREAD(channel, DMA_CH_RCR)); 934 axgbe_printf(1, "DMA_CH_TDLR_HI Reg (%08x) = %08x\n", 935 DMA_CH_TDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_HI)); 936 axgbe_printf(1, "DMA_CH_TDLR_LO Reg (%08x) = %08x\n", 937 DMA_CH_TDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_LO)); 938 axgbe_printf(1, "DMA_CH_RDLR_HI Reg (%08x) = %08x\n", 939 DMA_CH_RDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_HI)); 940 axgbe_printf(1, "DMA_CH_RDLR_LO Reg (%08x) = %08x\n", 941 DMA_CH_RDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_LO)); 942 axgbe_printf(1, "DMA_CH_TDTR_LO Reg (%08x) = %08x\n", 943 DMA_CH_TDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO)); 944 axgbe_printf(1, "DMA_CH_RDTR_LO Reg (%08x) = %08x\n", 945 DMA_CH_RDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTR_LO)); 946 axgbe_printf(1, "DMA_CH_TDRLR Reg (%08x) = %08x\n", 947 DMA_CH_TDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_TDRLR)); 948 axgbe_printf(1, "DMA_CH_RDRLR Reg (%08x) = %08x\n", 949 DMA_CH_RDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_RDRLR)); 950 axgbe_printf(1, "DMA_CH_IER Reg (%08x) = %08x\n", 951 DMA_CH_IER, XGMAC_DMA_IOREAD(channel, DMA_CH_IER)); 952 axgbe_printf(1, "DMA_CH_RIWT Reg (%08x) = %08x\n", 953 DMA_CH_RIWT, XGMAC_DMA_IOREAD(channel, DMA_CH_RIWT)); 954 axgbe_printf(1, "DMA_CH_CATDR_LO Reg (%08x) = %08x\n", 955 DMA_CH_CATDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATDR_LO)); 956 axgbe_printf(1, "DMA_CH_CARDR_LO Reg (%08x) = %08x\n", 957 DMA_CH_CARDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARDR_LO)); 958 axgbe_printf(1, "DMA_CH_CATBR_HI Reg (%08x) = %08x\n", 959 DMA_CH_CATBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_HI)); 960 axgbe_printf(1, "DMA_CH_CATBR_LO Reg (%08x) = %08x\n", 961 DMA_CH_CATBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_LO)); 962 axgbe_printf(1, "DMA_CH_CARBR_HI Reg (%08x) = %08x\n", 963 DMA_CH_CARBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_HI)); 964 axgbe_printf(1, "DMA_CH_CARBR_LO Reg (%08x) = %08x\n", 965 DMA_CH_CARBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_LO)); 966 axgbe_printf(1, "DMA_CH_SR Reg (%08x) = %08x\n", 967 DMA_CH_SR, XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); 968 axgbe_printf(1, "DMA_CH_DSR Reg (%08x) = %08x\n", 969 DMA_CH_DSR, XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); 970 axgbe_printf(1, "DMA_CH_DCFL Reg (%08x) = %08x\n", 971 DMA_CH_DCFL, XGMAC_DMA_IOREAD(channel, DMA_CH_DCFL)); 972 axgbe_printf(1, "DMA_CH_MFC Reg (%08x) = %08x\n", 973 DMA_CH_MFC, XGMAC_DMA_IOREAD(channel, DMA_CH_MFC)); 974 axgbe_printf(1, "DMA_CH_TDTRO Reg (%08x) = %08x\n", 975 DMA_CH_TDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTRO)); 976 axgbe_printf(1, "DMA_CH_RDTRO Reg (%08x) = %08x\n", 977 DMA_CH_RDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTRO)); 978 axgbe_printf(1, "DMA_CH_TDWRO Reg (%08x) = %08x\n", 979 DMA_CH_TDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDWRO)); 980 axgbe_printf(1, "DMA_CH_RDWRO Reg (%08x) = %08x\n", 981 DMA_CH_RDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDWRO)); 982 } 983 } 984 985 static void 986 xgbe_dump_mtl_registers(struct xgbe_prv_data *pdata) 987 { 988 int i; 989 990 axgbe_printf(1, "\n************* MTL Reg dump *********************\n"); 991 992 axgbe_printf(1, "MTL OMR Reg (%08x) = %08x\n", MTL_OMR, 993 XGMAC_IOREAD(pdata, MTL_OMR)); 994 axgbe_printf(1, "MTL FDCR Reg (%08x) = %08x\n", MTL_FDCR, 995 XGMAC_IOREAD(pdata, MTL_FDCR)); 996 axgbe_printf(1, "MTL FDSR Reg (%08x) = %08x\n", MTL_FDSR, 997 XGMAC_IOREAD(pdata, MTL_FDSR)); 998 axgbe_printf(1, "MTL FDDR Reg (%08x) = %08x\n", MTL_FDDR, 999 XGMAC_IOREAD(pdata, MTL_FDDR)); 1000 axgbe_printf(1, "MTL ISR Reg (%08x) = %08x\n", MTL_ISR, 1001 XGMAC_IOREAD(pdata, MTL_ISR)); 1002 axgbe_printf(1, "MTL RQDCM0R Reg (%08x) = %08x\n", MTL_RQDCM0R, 1003 XGMAC_IOREAD(pdata, MTL_RQDCM0R)); 1004 axgbe_printf(1, "MTL RQDCM1R Reg (%08x) = %08x\n", MTL_RQDCM1R, 1005 XGMAC_IOREAD(pdata, MTL_RQDCM1R)); 1006 axgbe_printf(1, "MTL RQDCM2R Reg (%08x) = %08x\n", MTL_RQDCM2R, 1007 XGMAC_IOREAD(pdata, MTL_RQDCM2R)); 1008 axgbe_printf(1, "MTL TCPM0R Reg (%08x) = %08x\n", MTL_TCPM0R, 1009 XGMAC_IOREAD(pdata, MTL_TCPM0R)); 1010 axgbe_printf(1, "MTL TCPM1R Reg (%08x) = %08x\n", MTL_TCPM1R, 1011 XGMAC_IOREAD(pdata, MTL_TCPM1R)); 1012 1013 for (i = 0 ; i < 8 ; i++ ) { 1014 1015 axgbe_printf(1, "\n************* MTL CH %d dump ****************\n", i); 1016 1017 axgbe_printf(1, "MTL_Q_TQOMR Reg (%08x) = %08x\n", 1018 MTL_Q_TQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR)); 1019 axgbe_printf(1, "MTL_Q_TQUR Reg (%08x) = %08x\n", 1020 MTL_Q_TQUR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQUR)); 1021 axgbe_printf(1, "MTL_Q_TQDR Reg (%08x) = %08x\n", 1022 MTL_Q_TQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQDR)); 1023 axgbe_printf(1, "MTL_Q_TC0ETSCR Reg (%08x) = %08x\n", 1024 MTL_Q_TC0ETSCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSCR)); 1025 axgbe_printf(1, "MTL_Q_TC0ETSSR Reg (%08x) = %08x\n", 1026 MTL_Q_TC0ETSSR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSSR)); 1027 axgbe_printf(1, "MTL_Q_TC0QWR Reg (%08x) = %08x\n", 1028 MTL_Q_TC0QWR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0QWR)); 1029 1030 axgbe_printf(1, "MTL_Q_RQOMR Reg (%08x) = %08x\n", 1031 MTL_Q_RQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR)); 1032 axgbe_printf(1, "MTL_Q_RQMPOCR Reg (%08x) = %08x\n", 1033 MTL_Q_RQMPOCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQMPOCR)); 1034 axgbe_printf(1, "MTL_Q_RQDR Reg (%08x) = %08x\n", 1035 MTL_Q_RQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQDR)); 1036 axgbe_printf(1, "MTL_Q_RQCR Reg (%08x) = %08x\n", 1037 MTL_Q_RQCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQCR)); 1038 axgbe_printf(1, "MTL_Q_RQFCR Reg (%08x) = %08x\n", 1039 MTL_Q_RQFCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR)); 1040 axgbe_printf(1, "MTL_Q_IER Reg (%08x) = %08x\n", 1041 MTL_Q_IER, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_IER)); 1042 axgbe_printf(1, "MTL_Q_ISR Reg (%08x) = %08x\n", 1043 MTL_Q_ISR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR)); 1044 } 1045 } 1046 1047 static void 1048 xgbe_dump_mac_registers(struct xgbe_prv_data *pdata) 1049 { 1050 axgbe_printf(1, "\n************* MAC Reg dump **********************\n"); 1051 1052 axgbe_printf(1, "MAC TCR Reg (%08x) = %08x\n", MAC_TCR, 1053 XGMAC_IOREAD(pdata, MAC_TCR)); 1054 axgbe_printf(1, "MAC RCR Reg (%08x) = %08x\n", MAC_RCR, 1055 XGMAC_IOREAD(pdata, MAC_RCR)); 1056 axgbe_printf(1, "MAC PFR Reg (%08x) = %08x\n", MAC_PFR, 1057 XGMAC_IOREAD(pdata, MAC_PFR)); 1058 axgbe_printf(1, "MAC WTR Reg (%08x) = %08x\n", MAC_WTR, 1059 XGMAC_IOREAD(pdata, MAC_WTR)); 1060 axgbe_printf(1, "MAC HTR0 Reg (%08x) = %08x\n", MAC_HTR0, 1061 XGMAC_IOREAD(pdata, MAC_HTR0)); 1062 axgbe_printf(1, "MAC HTR1 Reg (%08x) = %08x\n", MAC_HTR1, 1063 XGMAC_IOREAD(pdata, MAC_HTR1)); 1064 axgbe_printf(1, "MAC HTR2 Reg (%08x) = %08x\n", MAC_HTR2, 1065 XGMAC_IOREAD(pdata, MAC_HTR2)); 1066 axgbe_printf(1, "MAC HTR3 Reg (%08x) = %08x\n", MAC_HTR3, 1067 XGMAC_IOREAD(pdata, MAC_HTR3)); 1068 axgbe_printf(1, "MAC HTR4 Reg (%08x) = %08x\n", MAC_HTR4, 1069 XGMAC_IOREAD(pdata, MAC_HTR4)); 1070 axgbe_printf(1, "MAC HTR5 Reg (%08x) = %08x\n", MAC_HTR5, 1071 XGMAC_IOREAD(pdata, MAC_HTR5)); 1072 axgbe_printf(1, "MAC HTR6 Reg (%08x) = %08x\n", MAC_HTR6, 1073 XGMAC_IOREAD(pdata, MAC_HTR6)); 1074 axgbe_printf(1, "MAC HTR7 Reg (%08x) = %08x\n", MAC_HTR7, 1075 XGMAC_IOREAD(pdata, MAC_HTR7)); 1076 axgbe_printf(1, "MAC VLANTR Reg (%08x) = %08x\n", MAC_VLANTR, 1077 XGMAC_IOREAD(pdata, MAC_VLANTR)); 1078 axgbe_printf(1, "MAC VLANHTR Reg (%08x) = %08x\n", MAC_VLANHTR, 1079 XGMAC_IOREAD(pdata, MAC_VLANHTR)); 1080 axgbe_printf(1, "MAC VLANIR Reg (%08x) = %08x\n", MAC_VLANIR, 1081 XGMAC_IOREAD(pdata, MAC_VLANIR)); 1082 axgbe_printf(1, "MAC IVLANIR Reg (%08x) = %08x\n", MAC_IVLANIR, 1083 XGMAC_IOREAD(pdata, MAC_IVLANIR)); 1084 axgbe_printf(1, "MAC RETMR Reg (%08x) = %08x\n", MAC_RETMR, 1085 XGMAC_IOREAD(pdata, MAC_RETMR)); 1086 axgbe_printf(1, "MAC Q0TFCR Reg (%08x) = %08x\n", MAC_Q0TFCR, 1087 XGMAC_IOREAD(pdata, MAC_Q0TFCR)); 1088 axgbe_printf(1, "MAC Q1TFCR Reg (%08x) = %08x\n", MAC_Q1TFCR, 1089 XGMAC_IOREAD(pdata, MAC_Q1TFCR)); 1090 axgbe_printf(1, "MAC Q2TFCR Reg (%08x) = %08x\n", MAC_Q2TFCR, 1091 XGMAC_IOREAD(pdata, MAC_Q2TFCR)); 1092 axgbe_printf(1, "MAC Q3TFCR Reg (%08x) = %08x\n", MAC_Q3TFCR, 1093 XGMAC_IOREAD(pdata, MAC_Q3TFCR)); 1094 axgbe_printf(1, "MAC Q4TFCR Reg (%08x) = %08x\n", MAC_Q4TFCR, 1095 XGMAC_IOREAD(pdata, MAC_Q4TFCR)); 1096 axgbe_printf(1, "MAC Q5TFCR Reg (%08x) = %08x\n", MAC_Q5TFCR, 1097 XGMAC_IOREAD(pdata, MAC_Q5TFCR)); 1098 axgbe_printf(1, "MAC Q6TFCR Reg (%08x) = %08x\n", MAC_Q6TFCR, 1099 XGMAC_IOREAD(pdata, MAC_Q6TFCR)); 1100 axgbe_printf(1, "MAC Q7TFCR Reg (%08x) = %08x\n", MAC_Q7TFCR, 1101 XGMAC_IOREAD(pdata, MAC_Q7TFCR)); 1102 axgbe_printf(1, "MAC RFCR Reg (%08x) = %08x\n", MAC_RFCR, 1103 XGMAC_IOREAD(pdata, MAC_RFCR)); 1104 axgbe_printf(1, "MAC RQC0R Reg (%08x) = %08x\n", MAC_RQC0R, 1105 XGMAC_IOREAD(pdata, MAC_RQC0R)); 1106 axgbe_printf(1, "MAC RQC1R Reg (%08x) = %08x\n", MAC_RQC1R, 1107 XGMAC_IOREAD(pdata, MAC_RQC1R)); 1108 axgbe_printf(1, "MAC RQC2R Reg (%08x) = %08x\n", MAC_RQC2R, 1109 XGMAC_IOREAD(pdata, MAC_RQC2R)); 1110 axgbe_printf(1, "MAC RQC3R Reg (%08x) = %08x\n", MAC_RQC3R, 1111 XGMAC_IOREAD(pdata, MAC_RQC3R)); 1112 axgbe_printf(1, "MAC ISR Reg (%08x) = %08x\n", MAC_ISR, 1113 XGMAC_IOREAD(pdata, MAC_ISR)); 1114 axgbe_printf(1, "MAC IER Reg (%08x) = %08x\n", MAC_IER, 1115 XGMAC_IOREAD(pdata, MAC_IER)); 1116 axgbe_printf(1, "MAC RTSR Reg (%08x) = %08x\n", MAC_RTSR, 1117 XGMAC_IOREAD(pdata, MAC_RTSR)); 1118 axgbe_printf(1, "MAC PMTCSR Reg (%08x) = %08x\n", MAC_PMTCSR, 1119 XGMAC_IOREAD(pdata, MAC_PMTCSR)); 1120 axgbe_printf(1, "MAC RWKPFR Reg (%08x) = %08x\n", MAC_RWKPFR, 1121 XGMAC_IOREAD(pdata, MAC_RWKPFR)); 1122 axgbe_printf(1, "MAC LPICSR Reg (%08x) = %08x\n", MAC_LPICSR, 1123 XGMAC_IOREAD(pdata, MAC_LPICSR)); 1124 axgbe_printf(1, "MAC LPITCR Reg (%08x) = %08x\n", MAC_LPITCR, 1125 XGMAC_IOREAD(pdata, MAC_LPITCR)); 1126 axgbe_printf(1, "MAC TIR Reg (%08x) = %08x\n", MAC_TIR, 1127 XGMAC_IOREAD(pdata, MAC_TIR)); 1128 axgbe_printf(1, "MAC VR Reg (%08x) = %08x\n", MAC_VR, 1129 XGMAC_IOREAD(pdata, MAC_VR)); 1130 axgbe_printf(1, "MAC DR Reg (%08x) = %08x\n", MAC_DR, 1131 XGMAC_IOREAD(pdata, MAC_DR)); 1132 axgbe_printf(1, "MAC HWF0R Reg (%08x) = %08x\n", MAC_HWF0R, 1133 XGMAC_IOREAD(pdata, MAC_HWF0R)); 1134 axgbe_printf(1, "MAC HWF1R Reg (%08x) = %08x\n", MAC_HWF1R, 1135 XGMAC_IOREAD(pdata, MAC_HWF1R)); 1136 axgbe_printf(1, "MAC HWF2R Reg (%08x) = %08x\n", MAC_HWF2R, 1137 XGMAC_IOREAD(pdata, MAC_HWF2R)); 1138 axgbe_printf(1, "MAC MDIOSCAR Reg (%08x) = %08x\n", MAC_MDIOSCAR, 1139 XGMAC_IOREAD(pdata, MAC_MDIOSCAR)); 1140 axgbe_printf(1, "MAC MDIOSCCDR Reg (%08x) = %08x\n", MAC_MDIOSCCDR, 1141 XGMAC_IOREAD(pdata, MAC_MDIOSCCDR)); 1142 axgbe_printf(1, "MAC MDIOISR Reg (%08x) = %08x\n", MAC_MDIOISR, 1143 XGMAC_IOREAD(pdata, MAC_MDIOISR)); 1144 axgbe_printf(1, "MAC MDIOIER Reg (%08x) = %08x\n", MAC_MDIOIER, 1145 XGMAC_IOREAD(pdata, MAC_MDIOIER)); 1146 axgbe_printf(1, "MAC MDIOCL22R Reg (%08x) = %08x\n", MAC_MDIOCL22R, 1147 XGMAC_IOREAD(pdata, MAC_MDIOCL22R)); 1148 axgbe_printf(1, "MAC GPIOCR Reg (%08x) = %08x\n", MAC_GPIOCR, 1149 XGMAC_IOREAD(pdata, MAC_GPIOCR)); 1150 axgbe_printf(1, "MAC GPIOSR Reg (%08x) = %08x\n", MAC_GPIOSR, 1151 XGMAC_IOREAD(pdata, MAC_GPIOSR)); 1152 axgbe_printf(1, "MAC MACA0HR Reg (%08x) = %08x\n", MAC_MACA0HR, 1153 XGMAC_IOREAD(pdata, MAC_MACA0HR)); 1154 axgbe_printf(1, "MAC MACA0LR Reg (%08x) = %08x\n", MAC_TCR, 1155 XGMAC_IOREAD(pdata, MAC_MACA0LR)); 1156 axgbe_printf(1, "MAC MACA1HR Reg (%08x) = %08x\n", MAC_MACA1HR, 1157 XGMAC_IOREAD(pdata, MAC_MACA1HR)); 1158 axgbe_printf(1, "MAC MACA1LR Reg (%08x) = %08x\n", MAC_MACA1LR, 1159 XGMAC_IOREAD(pdata, MAC_MACA1LR)); 1160 axgbe_printf(1, "MAC RSSCR Reg (%08x) = %08x\n", MAC_RSSCR, 1161 XGMAC_IOREAD(pdata, MAC_RSSCR)); 1162 axgbe_printf(1, "MAC RSSDR Reg (%08x) = %08x\n", MAC_RSSDR, 1163 XGMAC_IOREAD(pdata, MAC_RSSDR)); 1164 axgbe_printf(1, "MAC RSSAR Reg (%08x) = %08x\n", MAC_RSSAR, 1165 XGMAC_IOREAD(pdata, MAC_RSSAR)); 1166 axgbe_printf(1, "MAC TSCR Reg (%08x) = %08x\n", MAC_TSCR, 1167 XGMAC_IOREAD(pdata, MAC_TSCR)); 1168 axgbe_printf(1, "MAC SSIR Reg (%08x) = %08x\n", MAC_SSIR, 1169 XGMAC_IOREAD(pdata, MAC_SSIR)); 1170 axgbe_printf(1, "MAC STSR Reg (%08x) = %08x\n", MAC_STSR, 1171 XGMAC_IOREAD(pdata, MAC_STSR)); 1172 axgbe_printf(1, "MAC STNR Reg (%08x) = %08x\n", MAC_STNR, 1173 XGMAC_IOREAD(pdata, MAC_STNR)); 1174 axgbe_printf(1, "MAC STSUR Reg (%08x) = %08x\n", MAC_STSUR, 1175 XGMAC_IOREAD(pdata, MAC_STSUR)); 1176 axgbe_printf(1, "MAC STNUR Reg (%08x) = %08x\n", MAC_STNUR, 1177 XGMAC_IOREAD(pdata, MAC_STNUR)); 1178 axgbe_printf(1, "MAC TSAR Reg (%08x) = %08x\n", MAC_TSAR, 1179 XGMAC_IOREAD(pdata, MAC_TSAR)); 1180 axgbe_printf(1, "MAC TSSR Reg (%08x) = %08x\n", MAC_TSSR, 1181 XGMAC_IOREAD(pdata, MAC_TSSR)); 1182 axgbe_printf(1, "MAC TXSNR Reg (%08x) = %08x\n", MAC_TXSNR, 1183 XGMAC_IOREAD(pdata, MAC_TXSNR)); 1184 axgbe_printf(1, "MAC TXSSR Reg (%08x) = %08x\n", MAC_TXSSR, 1185 XGMAC_IOREAD(pdata, MAC_TXSSR)); 1186 } 1187 1188 static void 1189 xgbe_dump_rmon_counters(struct xgbe_prv_data *pdata) 1190 { 1191 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 1192 1193 axgbe_printf(1, "\n************* RMON counters dump ***************\n"); 1194 1195 pdata->hw_if.read_mmc_stats(pdata); 1196 1197 axgbe_printf(1, "rmon txoctetcount_gb (%08x) = %08lx\n", 1198 MMC_TXOCTETCOUNT_GB_LO, stats->txoctetcount_gb); 1199 axgbe_printf(1, "rmon txframecount_gb (%08x) = %08lx\n", 1200 MMC_TXFRAMECOUNT_GB_LO, stats->txframecount_gb); 1201 axgbe_printf(1, "rmon txbroadcastframes_g (%08x) = %08lx\n", 1202 MMC_TXBROADCASTFRAMES_G_LO, stats->txbroadcastframes_g); 1203 axgbe_printf(1, "rmon txmulticastframes_g (%08x) = %08lx\n", 1204 MMC_TXMULTICASTFRAMES_G_LO, stats->txmulticastframes_g); 1205 axgbe_printf(1, "rmon tx64octets_gb (%08x) = %08lx\n", 1206 MMC_TX64OCTETS_GB_LO, stats->tx64octets_gb); 1207 axgbe_printf(1, "rmon tx65to127octets_gb (%08x) = %08lx\n", 1208 MMC_TX65TO127OCTETS_GB_LO, stats->tx65to127octets_gb); 1209 axgbe_printf(1, "rmon tx128to255octets_gb (%08x) = %08lx\n", 1210 MMC_TX128TO255OCTETS_GB_LO, stats->tx128to255octets_gb); 1211 axgbe_printf(1, "rmon tx256to511octets_gb (%08x) = %08lx\n", 1212 MMC_TX256TO511OCTETS_GB_LO, stats->tx256to511octets_gb); 1213 axgbe_printf(1, "rmon tx512to1023octets_gb (%08x) = %08lx\n", 1214 MMC_TX512TO1023OCTETS_GB_LO, stats->tx512to1023octets_gb); 1215 axgbe_printf(1, "rmon tx1024tomaxoctets_gb (%08x) = %08lx\n", 1216 MMC_TX1024TOMAXOCTETS_GB_LO, stats->tx1024tomaxoctets_gb); 1217 axgbe_printf(1, "rmon txunicastframes_gb (%08x) = %08lx\n", 1218 MMC_TXUNICASTFRAMES_GB_LO, stats->txunicastframes_gb); 1219 axgbe_printf(1, "rmon txmulticastframes_gb (%08x) = %08lx\n", 1220 MMC_TXMULTICASTFRAMES_GB_LO, stats->txmulticastframes_gb); 1221 axgbe_printf(1, "rmon txbroadcastframes_gb (%08x) = %08lx\n", 1222 MMC_TXBROADCASTFRAMES_GB_LO, stats->txbroadcastframes_gb); 1223 axgbe_printf(1, "rmon txunderflowerror (%08x) = %08lx\n", 1224 MMC_TXUNDERFLOWERROR_LO, stats->txunderflowerror); 1225 axgbe_printf(1, "rmon txoctetcount_g (%08x) = %08lx\n", 1226 MMC_TXOCTETCOUNT_G_LO, stats->txoctetcount_g); 1227 axgbe_printf(1, "rmon txframecount_g (%08x) = %08lx\n", 1228 MMC_TXFRAMECOUNT_G_LO, stats->txframecount_g); 1229 axgbe_printf(1, "rmon txpauseframes (%08x) = %08lx\n", 1230 MMC_TXPAUSEFRAMES_LO, stats->txpauseframes); 1231 axgbe_printf(1, "rmon txvlanframes_g (%08x) = %08lx\n", 1232 MMC_TXVLANFRAMES_G_LO, stats->txvlanframes_g); 1233 axgbe_printf(1, "rmon rxframecount_gb (%08x) = %08lx\n", 1234 MMC_RXFRAMECOUNT_GB_LO, stats->rxframecount_gb); 1235 axgbe_printf(1, "rmon rxoctetcount_gb (%08x) = %08lx\n", 1236 MMC_RXOCTETCOUNT_GB_LO, stats->rxoctetcount_gb); 1237 axgbe_printf(1, "rmon rxoctetcount_g (%08x) = %08lx\n", 1238 MMC_RXOCTETCOUNT_G_LO, stats->rxoctetcount_g); 1239 axgbe_printf(1, "rmon rxbroadcastframes_g (%08x) = %08lx\n", 1240 MMC_RXBROADCASTFRAMES_G_LO, stats->rxbroadcastframes_g); 1241 axgbe_printf(1, "rmon rxmulticastframes_g (%08x) = %08lx\n", 1242 MMC_RXMULTICASTFRAMES_G_LO, stats->rxmulticastframes_g); 1243 axgbe_printf(1, "rmon rxcrcerror (%08x) = %08lx\n", 1244 MMC_RXCRCERROR_LO, stats->rxcrcerror); 1245 axgbe_printf(1, "rmon rxrunterror (%08x) = %08lx\n", 1246 MMC_RXRUNTERROR, stats->rxrunterror); 1247 axgbe_printf(1, "rmon rxjabbererror (%08x) = %08lx\n", 1248 MMC_RXJABBERERROR, stats->rxjabbererror); 1249 axgbe_printf(1, "rmon rxundersize_g (%08x) = %08lx\n", 1250 MMC_RXUNDERSIZE_G, stats->rxundersize_g); 1251 axgbe_printf(1, "rmon rxoversize_g (%08x) = %08lx\n", 1252 MMC_RXOVERSIZE_G, stats->rxoversize_g); 1253 axgbe_printf(1, "rmon rx64octets_gb (%08x) = %08lx\n", 1254 MMC_RX64OCTETS_GB_LO, stats->rx64octets_gb); 1255 axgbe_printf(1, "rmon rx65to127octets_gb (%08x) = %08lx\n", 1256 MMC_RX65TO127OCTETS_GB_LO, stats->rx65to127octets_gb); 1257 axgbe_printf(1, "rmon rx128to255octets_gb (%08x) = %08lx\n", 1258 MMC_RX128TO255OCTETS_GB_LO, stats->rx128to255octets_gb); 1259 axgbe_printf(1, "rmon rx256to511octets_gb (%08x) = %08lx\n", 1260 MMC_RX256TO511OCTETS_GB_LO, stats->rx256to511octets_gb); 1261 axgbe_printf(1, "rmon rx512to1023octets_gb (%08x) = %08lx\n", 1262 MMC_RX512TO1023OCTETS_GB_LO, stats->rx512to1023octets_gb); 1263 axgbe_printf(1, "rmon rx1024tomaxoctets_gb (%08x) = %08lx\n", 1264 MMC_RX1024TOMAXOCTETS_GB_LO, stats->rx1024tomaxoctets_gb); 1265 axgbe_printf(1, "rmon rxunicastframes_g (%08x) = %08lx\n", 1266 MMC_RXUNICASTFRAMES_G_LO, stats->rxunicastframes_g); 1267 axgbe_printf(1, "rmon rxlengtherror (%08x) = %08lx\n", 1268 MMC_RXLENGTHERROR_LO, stats->rxlengtherror); 1269 axgbe_printf(1, "rmon rxoutofrangetype (%08x) = %08lx\n", 1270 MMC_RXOUTOFRANGETYPE_LO, stats->rxoutofrangetype); 1271 axgbe_printf(1, "rmon rxpauseframes (%08x) = %08lx\n", 1272 MMC_RXPAUSEFRAMES_LO, stats->rxpauseframes); 1273 axgbe_printf(1, "rmon rxfifooverflow (%08x) = %08lx\n", 1274 MMC_RXFIFOOVERFLOW_LO, stats->rxfifooverflow); 1275 axgbe_printf(1, "rmon rxvlanframes_gb (%08x) = %08lx\n", 1276 MMC_RXVLANFRAMES_GB_LO, stats->rxvlanframes_gb); 1277 axgbe_printf(1, "rmon rxwatchdogerror (%08x) = %08lx\n", 1278 MMC_RXWATCHDOGERROR, stats->rxwatchdogerror); 1279 } 1280 1281 void 1282 xgbe_dump_i2c_registers(struct xgbe_prv_data *pdata) 1283 { 1284 axgbe_printf(1, "*************** I2C Registers **************\n"); 1285 axgbe_printf(1, " IC_CON : %010x\n", 1286 XI2C_IOREAD(pdata, 0x00)); 1287 axgbe_printf(1, " IC_TAR : %010x\n", 1288 XI2C_IOREAD(pdata, 0x04)); 1289 axgbe_printf(1, " IC_HS_MADDR : %010x\n", 1290 XI2C_IOREAD(pdata, 0x0c)); 1291 axgbe_printf(1, " IC_INTR_STAT : %010x\n", 1292 XI2C_IOREAD(pdata, 0x2c)); 1293 axgbe_printf(1, " IC_INTR_MASK : %010x\n", 1294 XI2C_IOREAD(pdata, 0x30)); 1295 axgbe_printf(1, " IC_RAW_INTR_STAT : %010x\n", 1296 XI2C_IOREAD(pdata, 0x34)); 1297 axgbe_printf(1, " IC_RX_TL : %010x\n", 1298 XI2C_IOREAD(pdata, 0x38)); 1299 axgbe_printf(1, " IC_TX_TL : %010x\n", 1300 XI2C_IOREAD(pdata, 0x3c)); 1301 axgbe_printf(1, " IC_ENABLE : %010x\n", 1302 XI2C_IOREAD(pdata, 0x6c)); 1303 axgbe_printf(1, " IC_STATUS : %010x\n", 1304 XI2C_IOREAD(pdata, 0x70)); 1305 axgbe_printf(1, " IC_TXFLR : %010x\n", 1306 XI2C_IOREAD(pdata, 0x74)); 1307 axgbe_printf(1, " IC_RXFLR : %010x\n", 1308 XI2C_IOREAD(pdata, 0x78)); 1309 axgbe_printf(1, " IC_ENABLE_STATUS : %010x\n", 1310 XI2C_IOREAD(pdata, 0x9c)); 1311 axgbe_printf(1, " IC_COMP_PARAM1 : %010x\n", 1312 XI2C_IOREAD(pdata, 0xf4)); 1313 } 1314 1315 static void 1316 xgbe_dump_active_vlans(struct xgbe_prv_data *pdata) 1317 { 1318 int i; 1319 1320 for(i=0 ; i<BITS_TO_LONGS(VLAN_NVID); i++) { 1321 if (i && (i%8 == 0)) 1322 axgbe_printf(1, "\n"); 1323 axgbe_printf(1, "vlans[%d]: 0x%08lx ", i, pdata->active_vlans[i]); 1324 } 1325 axgbe_printf(1, "\n"); 1326 } 1327 1328 static void 1329 xgbe_default_config(struct xgbe_prv_data *pdata) 1330 { 1331 pdata->blen = DMA_SBMR_BLEN_64; 1332 pdata->pbl = DMA_PBL_128; 1333 pdata->aal = 1; 1334 pdata->rd_osr_limit = 8; 1335 pdata->wr_osr_limit = 8; 1336 pdata->tx_sf_mode = MTL_TSF_ENABLE; 1337 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 1338 pdata->tx_osp_mode = DMA_OSP_ENABLE; 1339 pdata->rx_sf_mode = MTL_RSF_DISABLE; 1340 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 1341 pdata->pause_autoneg = 1; 1342 pdata->tx_pause = 1; 1343 pdata->rx_pause = 1; 1344 pdata->phy_speed = SPEED_UNKNOWN; 1345 pdata->power_down = 0; 1346 pdata->enable_rss = 1; 1347 } 1348 1349 static int 1350 axgbe_if_attach_post(if_ctx_t ctx) 1351 { 1352 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1353 struct xgbe_prv_data *pdata = &sc->pdata; 1354 if_t ifp = pdata->netdev; 1355 struct xgbe_phy_if *phy_if = &pdata->phy_if; 1356 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1357 if_softc_ctx_t scctx = sc->scctx; 1358 int i, ret; 1359 1360 /* set split header support based on tunable */ 1361 pdata->sph_enable = axgbe_sph_enable; 1362 1363 /* Initialize ECC timestamps */ 1364 pdata->tx_sec_period = ticks; 1365 pdata->tx_ded_period = ticks; 1366 pdata->rx_sec_period = ticks; 1367 pdata->rx_ded_period = ticks; 1368 pdata->desc_sec_period = ticks; 1369 pdata->desc_ded_period = ticks; 1370 1371 /* Reset the hardware */ 1372 ret = hw_if->exit(&sc->pdata); 1373 if (ret) 1374 axgbe_error("%s: exit error %d\n", __func__, ret); 1375 1376 /* Configure the defaults */ 1377 xgbe_default_config(pdata); 1378 1379 /* Set default max values if not provided */ 1380 if (!pdata->tx_max_fifo_size) 1381 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 1382 if (!pdata->rx_max_fifo_size) 1383 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 1384 1385 DBGPR("%s: tx fifo 0x%x rx fifo 0x%x\n", __func__, 1386 pdata->tx_max_fifo_size, pdata->rx_max_fifo_size); 1387 1388 /* Set and validate the number of descriptors for a ring */ 1389 MPASS(powerof2(XGBE_TX_DESC_CNT)); 1390 pdata->tx_desc_count = XGBE_TX_DESC_CNT; 1391 MPASS(powerof2(XGBE_RX_DESC_CNT)); 1392 pdata->rx_desc_count = XGBE_RX_DESC_CNT; 1393 1394 /* Adjust the number of queues based on interrupts assigned */ 1395 if (pdata->channel_irq_count) { 1396 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count, 1397 pdata->channel_irq_count); 1398 pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count, 1399 pdata->channel_irq_count); 1400 1401 DBGPR("adjusted TX %u/%u RX %u/%u\n", 1402 pdata->tx_ring_count, pdata->tx_q_count, 1403 pdata->rx_ring_count, pdata->rx_q_count); 1404 } 1405 1406 /* Set channel count based on interrupts assigned */ 1407 pdata->channel_count = max_t(unsigned int, scctx->isc_ntxqsets, 1408 scctx->isc_nrxqsets); 1409 DBGPR("Channel count set to: %u\n", pdata->channel_count); 1410 1411 /* Get RSS key */ 1412 #ifdef RSS 1413 rss_getkey((uint8_t *)pdata->rss_key); 1414 #else 1415 arc4rand(&pdata->rss_key, ARRAY_SIZE(pdata->rss_key), 0); 1416 #endif 1417 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); 1418 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); 1419 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 1420 1421 /* Initialize the PHY device */ 1422 pdata->sysctl_an_cdr_workaround = pdata->vdata->an_cdr_workaround; 1423 phy_if->phy_init(pdata); 1424 1425 /* Set the coalescing */ 1426 xgbe_init_rx_coalesce(&sc->pdata); 1427 xgbe_init_tx_coalesce(&sc->pdata); 1428 1429 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1430 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1431 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 1432 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1433 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1434 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, NULL); 1435 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1436 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1437 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SGMII, 0, NULL); 1438 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1439 ifmedia_add(sc->media, IFM_ETHER | IFM_100_SGMII, 0, NULL); 1440 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1441 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1442 1443 /* Initialize the phy */ 1444 pdata->phy_link = -1; 1445 pdata->phy_speed = SPEED_UNKNOWN; 1446 ret = phy_if->phy_reset(pdata); 1447 if (ret) 1448 return (ret); 1449 1450 /* Calculate the Rx buffer size before allocating rings */ 1451 ret = xgbe_calc_rx_buf_size(pdata->netdev, if_getmtu(pdata->netdev)); 1452 pdata->rx_buf_size = ret; 1453 DBGPR("%s: rx_buf_size %d\n", __func__, ret); 1454 1455 /* Setup RSS lookup table */ 1456 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) 1457 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, 1458 i % pdata->rx_ring_count); 1459 1460 /* 1461 * Mark the device down until it is initialized, which happens 1462 * when the device is accessed first (for configuring the iface, 1463 * eg: setting IP) 1464 */ 1465 set_bit(XGBE_DOWN, &pdata->dev_state); 1466 1467 DBGPR("mtu %d\n", if_getmtu(ifp)); 1468 scctx->isc_max_frame_size = if_getmtu(ifp) + 18; 1469 scctx->isc_min_frame_size = XGMAC_MIN_PACKET; 1470 1471 axgbe_sysctl_init(pdata); 1472 1473 axgbe_pci_init(pdata); 1474 1475 return (0); 1476 } /* axgbe_if_attach_post */ 1477 1478 static void 1479 xgbe_free_intr(struct xgbe_prv_data *pdata, struct resource *res, void *tag, 1480 int rid) 1481 { 1482 if (tag) 1483 bus_teardown_intr(pdata->dev, res, tag); 1484 1485 if (res) 1486 bus_release_resource(pdata->dev, SYS_RES_IRQ, rid, res); 1487 } 1488 1489 static void 1490 axgbe_interrupts_free(if_ctx_t ctx) 1491 { 1492 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1493 struct xgbe_prv_data *pdata = &sc->pdata; 1494 if_softc_ctx_t scctx = sc->scctx; 1495 struct xgbe_channel *channel; 1496 struct if_irq irq; 1497 int i; 1498 1499 axgbe_printf(2, "%s: mode %d\n", __func__, scctx->isc_intr); 1500 1501 /* Free dev_irq */ 1502 iflib_irq_free(ctx, &pdata->dev_irq); 1503 1504 /* Free ecc_irq */ 1505 xgbe_free_intr(pdata, pdata->ecc_irq_res, pdata->ecc_irq_tag, 1506 pdata->ecc_rid); 1507 1508 /* Free i2c_irq */ 1509 xgbe_free_intr(pdata, pdata->i2c_irq_res, pdata->i2c_irq_tag, 1510 pdata->i2c_rid); 1511 1512 /* Free an_irq */ 1513 xgbe_free_intr(pdata, pdata->an_irq_res, pdata->an_irq_tag, 1514 pdata->an_rid); 1515 1516 for (i = 0; i < scctx->isc_nrxqsets; i++) { 1517 1518 channel = pdata->channel[i]; 1519 axgbe_printf(2, "%s: rid %d\n", __func__, channel->dma_irq_rid); 1520 irq.ii_res = channel->dma_irq_res; 1521 irq.ii_tag = channel->dma_irq_tag; 1522 iflib_irq_free(ctx, &irq); 1523 } 1524 } 1525 1526 static int 1527 axgbe_if_detach(if_ctx_t ctx) 1528 { 1529 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1530 struct xgbe_prv_data *pdata = &sc->pdata; 1531 struct xgbe_phy_if *phy_if = &pdata->phy_if; 1532 struct resource *mac_res[2]; 1533 1534 mac_res[0] = pdata->xgmac_res; 1535 mac_res[1] = pdata->xpcs_res; 1536 1537 phy_if->phy_exit(pdata); 1538 1539 /* Free Interrupts */ 1540 axgbe_interrupts_free(ctx); 1541 1542 /* Free workqueues */ 1543 taskqueue_free(pdata->dev_workqueue); 1544 1545 /* Release bus resources */ 1546 bus_release_resources(iflib_get_dev(ctx), axgbe_pci_mac_spec, mac_res); 1547 1548 /* Free VLAN bitmap */ 1549 free(pdata->active_vlans, M_AXGBE); 1550 1551 axgbe_sysctl_exit(pdata); 1552 1553 return (0); 1554 } /* axgbe_if_detach */ 1555 1556 static void 1557 axgbe_pci_init(struct xgbe_prv_data *pdata) 1558 { 1559 struct xgbe_phy_if *phy_if = &pdata->phy_if; 1560 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1561 int ret = 0; 1562 1563 if (!__predict_false((test_bit(XGBE_DOWN, &pdata->dev_state)))) { 1564 axgbe_printf(1, "%s: Starting when XGBE_UP\n", __func__); 1565 return; 1566 } 1567 1568 hw_if->init(pdata); 1569 1570 ret = phy_if->phy_start(pdata); 1571 if (ret) { 1572 axgbe_error("%s: phy start %d\n", __func__, ret); 1573 ret = hw_if->exit(pdata); 1574 if (ret) 1575 axgbe_error("%s: exit error %d\n", __func__, ret); 1576 return; 1577 } 1578 1579 hw_if->enable_tx(pdata); 1580 hw_if->enable_rx(pdata); 1581 1582 xgbe_start_timers(pdata); 1583 1584 clear_bit(XGBE_DOWN, &pdata->dev_state); 1585 1586 xgbe_dump_phy_registers(pdata); 1587 xgbe_dump_prop_registers(pdata); 1588 xgbe_dump_dma_registers(pdata, -1); 1589 xgbe_dump_mtl_registers(pdata); 1590 xgbe_dump_mac_registers(pdata); 1591 xgbe_dump_rmon_counters(pdata); 1592 } 1593 1594 static void 1595 axgbe_if_init(if_ctx_t ctx) 1596 { 1597 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1598 struct xgbe_prv_data *pdata = &sc->pdata; 1599 1600 axgbe_pci_init(pdata); 1601 } 1602 1603 static void 1604 axgbe_pci_stop(if_ctx_t ctx) 1605 { 1606 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1607 struct xgbe_prv_data *pdata = &sc->pdata; 1608 struct xgbe_phy_if *phy_if = &pdata->phy_if; 1609 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1610 int ret; 1611 1612 if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) { 1613 axgbe_printf(1, "%s: Stopping when XGBE_DOWN\n", __func__); 1614 return; 1615 } 1616 1617 xgbe_stop_timers(pdata); 1618 taskqueue_drain_all(pdata->dev_workqueue); 1619 1620 hw_if->disable_tx(pdata); 1621 hw_if->disable_rx(pdata); 1622 1623 phy_if->phy_stop(pdata); 1624 1625 ret = hw_if->exit(pdata); 1626 if (ret) 1627 axgbe_error("%s: exit error %d\n", __func__, ret); 1628 1629 set_bit(XGBE_DOWN, &pdata->dev_state); 1630 } 1631 1632 static void 1633 axgbe_if_stop(if_ctx_t ctx) 1634 { 1635 axgbe_pci_stop(ctx); 1636 } 1637 1638 static void 1639 axgbe_if_disable_intr(if_ctx_t ctx) 1640 { 1641 /* TODO - implement */ 1642 } 1643 1644 static void 1645 axgbe_if_enable_intr(if_ctx_t ctx) 1646 { 1647 /* TODO - implement */ 1648 } 1649 1650 static int 1651 axgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int ntxqs, 1652 int ntxqsets) 1653 { 1654 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1655 struct xgbe_prv_data *pdata = &sc->pdata; 1656 if_softc_ctx_t scctx = sc->scctx; 1657 struct xgbe_channel *channel; 1658 struct xgbe_ring *tx_ring; 1659 int i, j, k; 1660 1661 MPASS(scctx->isc_ntxqsets > 0); 1662 MPASS(scctx->isc_ntxqsets == ntxqsets); 1663 MPASS(ntxqs == 1); 1664 1665 axgbe_printf(1, "%s: txqsets %d/%d txqs %d\n", __func__, 1666 scctx->isc_ntxqsets, ntxqsets, ntxqs); 1667 1668 for (i = 0 ; i < ntxqsets; i++) { 1669 1670 channel = pdata->channel[i]; 1671 1672 tx_ring = (struct xgbe_ring*)malloc(ntxqs * 1673 sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO); 1674 1675 if (tx_ring == NULL) { 1676 axgbe_error("Unable to allocate TX ring memory\n"); 1677 goto tx_ring_fail; 1678 } 1679 1680 channel->tx_ring = tx_ring; 1681 1682 for (j = 0; j < ntxqs; j++, tx_ring++) { 1683 tx_ring->rdata = 1684 (struct xgbe_ring_data*)malloc(scctx->isc_ntxd[j] * 1685 sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT); 1686 1687 /* Get the virtual & physical address of hw queues */ 1688 tx_ring->rdesc = (struct xgbe_ring_desc *)va[i*ntxqs + j]; 1689 tx_ring->rdesc_paddr = pa[i*ntxqs + j]; 1690 tx_ring->rdesc_count = scctx->isc_ntxd[j]; 1691 spin_lock_init(&tx_ring->lock); 1692 } 1693 } 1694 1695 axgbe_printf(1, "allocated for %d tx queues\n", scctx->isc_ntxqsets); 1696 1697 return (0); 1698 1699 tx_ring_fail: 1700 1701 for (j = 0; j < i ; j++) { 1702 1703 channel = pdata->channel[j]; 1704 1705 tx_ring = channel->tx_ring; 1706 for (k = 0; k < ntxqs ; k++, tx_ring++) { 1707 if (tx_ring && tx_ring->rdata) 1708 free(tx_ring->rdata, M_AXGBE); 1709 } 1710 free(channel->tx_ring, M_AXGBE); 1711 1712 channel->tx_ring = NULL; 1713 } 1714 1715 return (ENOMEM); 1716 1717 } /* axgbe_if_tx_queues_alloc */ 1718 1719 static int 1720 axgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int nrxqs, 1721 int nrxqsets) 1722 { 1723 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1724 struct xgbe_prv_data *pdata = &sc->pdata; 1725 if_softc_ctx_t scctx = sc->scctx; 1726 struct xgbe_channel *channel; 1727 struct xgbe_ring *rx_ring; 1728 int i, j, k; 1729 1730 MPASS(scctx->isc_nrxqsets > 0); 1731 MPASS(scctx->isc_nrxqsets == nrxqsets); 1732 if (!pdata->sph_enable) { 1733 MPASS(nrxqs == 1); 1734 } else { 1735 MPASS(nrxqs == 2); 1736 } 1737 1738 axgbe_printf(1, "%s: rxqsets %d/%d rxqs %d\n", __func__, 1739 scctx->isc_nrxqsets, nrxqsets, nrxqs); 1740 1741 for (i = 0 ; i < nrxqsets; i++) { 1742 1743 channel = pdata->channel[i]; 1744 1745 rx_ring = (struct xgbe_ring*)malloc(nrxqs * 1746 sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO); 1747 1748 if (rx_ring == NULL) { 1749 axgbe_error("Unable to allocate RX ring memory\n"); 1750 goto rx_ring_fail; 1751 } 1752 1753 channel->rx_ring = rx_ring; 1754 1755 for (j = 0; j < nrxqs; j++, rx_ring++) { 1756 rx_ring->rdata = 1757 (struct xgbe_ring_data*)malloc(scctx->isc_nrxd[j] * 1758 sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT); 1759 1760 /* Get the virtual and physical address of the hw queues */ 1761 rx_ring->rdesc = (struct xgbe_ring_desc *)va[i*nrxqs + j]; 1762 rx_ring->rdesc_paddr = pa[i*nrxqs + j]; 1763 rx_ring->rdesc_count = scctx->isc_nrxd[j]; 1764 spin_lock_init(&rx_ring->lock); 1765 } 1766 } 1767 1768 axgbe_printf(2, "allocated for %d rx queues\n", scctx->isc_nrxqsets); 1769 1770 return (0); 1771 1772 rx_ring_fail: 1773 1774 for (j = 0 ; j < i ; j++) { 1775 1776 channel = pdata->channel[j]; 1777 1778 rx_ring = channel->rx_ring; 1779 for (k = 0; k < nrxqs ; k++, rx_ring++) { 1780 if (rx_ring && rx_ring->rdata) 1781 free(rx_ring->rdata, M_AXGBE); 1782 } 1783 free(channel->rx_ring, M_AXGBE); 1784 1785 channel->rx_ring = NULL; 1786 } 1787 1788 return (ENOMEM); 1789 1790 } /* axgbe_if_rx_queues_alloc */ 1791 1792 static void 1793 axgbe_if_queues_free(if_ctx_t ctx) 1794 { 1795 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1796 struct xgbe_prv_data *pdata = &sc->pdata; 1797 if_softc_ctx_t scctx = sc->scctx; 1798 if_shared_ctx_t sctx = sc->sctx; 1799 struct xgbe_channel *channel; 1800 struct xgbe_ring *tx_ring; 1801 struct xgbe_ring *rx_ring; 1802 int i, j; 1803 1804 for (i = 0 ; i < scctx->isc_ntxqsets; i++) { 1805 1806 channel = pdata->channel[i]; 1807 1808 tx_ring = channel->tx_ring; 1809 for (j = 0; j < sctx->isc_ntxqs ; j++, tx_ring++) { 1810 if (tx_ring && tx_ring->rdata) 1811 free(tx_ring->rdata, M_AXGBE); 1812 } 1813 free(channel->tx_ring, M_AXGBE); 1814 channel->tx_ring = NULL; 1815 } 1816 1817 for (i = 0 ; i < scctx->isc_nrxqsets; i++) { 1818 1819 channel = pdata->channel[i]; 1820 1821 rx_ring = channel->rx_ring; 1822 for (j = 0; j < sctx->isc_nrxqs ; j++, rx_ring++) { 1823 if (rx_ring && rx_ring->rdata) 1824 free(rx_ring->rdata, M_AXGBE); 1825 } 1826 free(channel->rx_ring, M_AXGBE); 1827 channel->rx_ring = NULL; 1828 } 1829 1830 axgbe_free_channels(sc); 1831 } /* axgbe_if_queues_free */ 1832 1833 static void 1834 axgbe_if_vlan_register(if_ctx_t ctx, uint16_t vtag) 1835 { 1836 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1837 struct xgbe_prv_data *pdata = &sc->pdata; 1838 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1839 1840 if (!bit_test(pdata->active_vlans, vtag)) { 1841 axgbe_printf(0, "Registering VLAN %d\n", vtag); 1842 1843 bit_set(pdata->active_vlans, vtag); 1844 hw_if->update_vlan_hash_table(pdata); 1845 pdata->num_active_vlans++; 1846 1847 axgbe_printf(1, "Total active vlans: %d\n", 1848 pdata->num_active_vlans); 1849 } else 1850 axgbe_printf(0, "VLAN %d already registered\n", vtag); 1851 1852 xgbe_dump_active_vlans(pdata); 1853 } 1854 1855 static void 1856 axgbe_if_vlan_unregister(if_ctx_t ctx, uint16_t vtag) 1857 { 1858 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1859 struct xgbe_prv_data *pdata = &sc->pdata; 1860 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1861 1862 if (pdata->num_active_vlans == 0) { 1863 axgbe_printf(1, "No active VLANs to unregister\n"); 1864 return; 1865 } 1866 1867 if (bit_test(pdata->active_vlans, vtag)){ 1868 axgbe_printf(0, "Un-Registering VLAN %d\n", vtag); 1869 1870 bit_clear(pdata->active_vlans, vtag); 1871 hw_if->update_vlan_hash_table(pdata); 1872 pdata->num_active_vlans--; 1873 1874 axgbe_printf(1, "Total active vlans: %d\n", 1875 pdata->num_active_vlans); 1876 } else 1877 axgbe_printf(0, "VLAN %d already unregistered\n", vtag); 1878 1879 xgbe_dump_active_vlans(pdata); 1880 } 1881 1882 #if __FreeBSD_version >= 1300000 1883 static bool 1884 axgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1885 { 1886 switch (event) { 1887 case IFLIB_RESTART_VLAN_CONFIG: 1888 default: 1889 return (true); 1890 } 1891 } 1892 #endif 1893 1894 static int 1895 axgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) 1896 { 1897 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 1898 struct xgbe_prv_data *pdata = &sc->pdata; 1899 if_softc_ctx_t scctx = sc->scctx; 1900 struct xgbe_channel *channel; 1901 struct if_irq irq; 1902 int i, error, rid = 0, flags; 1903 char buf[16]; 1904 1905 MPASS(scctx->isc_intr != IFLIB_INTR_LEGACY); 1906 1907 pdata->isr_as_tasklet = 1; 1908 1909 if (scctx->isc_intr == IFLIB_INTR_MSI) { 1910 pdata->irq_count = 1; 1911 pdata->channel_irq_count = 1; 1912 return (0); 1913 } 1914 1915 axgbe_printf(1, "%s: msix %d txqsets %d rxqsets %d\n", __func__, msix, 1916 scctx->isc_ntxqsets, scctx->isc_nrxqsets); 1917 1918 flags = RF_ACTIVE; 1919 1920 /* DEV INTR SETUP */ 1921 rid++; 1922 error = iflib_irq_alloc_generic(ctx, &pdata->dev_irq, rid, 1923 IFLIB_INTR_ADMIN, axgbe_dev_isr, sc, 0, "dev_irq"); 1924 if (error) { 1925 axgbe_error("Failed to register device interrupt rid %d name %s\n", 1926 rid, "dev_irq"); 1927 return (error); 1928 } 1929 1930 /* ECC INTR SETUP */ 1931 rid++; 1932 pdata->ecc_rid = rid; 1933 pdata->ecc_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, 1934 &rid, flags); 1935 if (!pdata->ecc_irq_res) { 1936 axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", 1937 rid, "ecc_irq"); 1938 return (ENOMEM); 1939 } 1940 1941 error = bus_setup_intr(pdata->dev, pdata->ecc_irq_res, INTR_MPSAFE | 1942 INTR_TYPE_NET, NULL, axgbe_ecc_isr, sc, &pdata->ecc_irq_tag); 1943 if (error) { 1944 axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", 1945 rid, "ecc_irq", error); 1946 return (error); 1947 } 1948 1949 /* I2C INTR SETUP */ 1950 rid++; 1951 pdata->i2c_rid = rid; 1952 pdata->i2c_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, 1953 &rid, flags); 1954 if (!pdata->i2c_irq_res) { 1955 axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", 1956 rid, "i2c_irq"); 1957 return (ENOMEM); 1958 } 1959 1960 error = bus_setup_intr(pdata->dev, pdata->i2c_irq_res, INTR_MPSAFE | 1961 INTR_TYPE_NET, NULL, axgbe_i2c_isr, sc, &pdata->i2c_irq_tag); 1962 if (error) { 1963 axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", 1964 rid, "i2c_irq", error); 1965 return (error); 1966 } 1967 1968 /* AN INTR SETUP */ 1969 rid++; 1970 pdata->an_rid = rid; 1971 pdata->an_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, 1972 &rid, flags); 1973 if (!pdata->an_irq_res) { 1974 axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", 1975 rid, "an_irq"); 1976 return (ENOMEM); 1977 } 1978 1979 error = bus_setup_intr(pdata->dev, pdata->an_irq_res, INTR_MPSAFE | 1980 INTR_TYPE_NET, NULL, axgbe_an_isr, sc, &pdata->an_irq_tag); 1981 if (error) { 1982 axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", 1983 rid, "an_irq", error); 1984 return (error); 1985 } 1986 1987 pdata->per_channel_irq = 1; 1988 pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL; 1989 rid++; 1990 for (i = 0; i < scctx->isc_nrxqsets; i++, rid++) { 1991 1992 channel = pdata->channel[i]; 1993 1994 snprintf(buf, sizeof(buf), "rxq%d", i); 1995 error = iflib_irq_alloc_generic(ctx, &irq, rid, IFLIB_INTR_RXTX, 1996 axgbe_msix_que, channel, channel->queue_index, buf); 1997 1998 if (error) { 1999 axgbe_error("Failed to allocated que int %d err: %d\n", 2000 i, error); 2001 return (error); 2002 } 2003 2004 channel->dma_irq_rid = rid; 2005 channel->dma_irq_res = irq.ii_res; 2006 channel->dma_irq_tag = irq.ii_tag; 2007 axgbe_printf(1, "%s: channel count %d idx %d irq %d\n", 2008 __func__, scctx->isc_nrxqsets, i, rid); 2009 } 2010 pdata->irq_count = msix; 2011 pdata->channel_irq_count = scctx->isc_nrxqsets; 2012 2013 for (i = 0; i < scctx->isc_ntxqsets; i++) { 2014 2015 channel = pdata->channel[i]; 2016 2017 snprintf(buf, sizeof(buf), "txq%d", i); 2018 irq.ii_res = channel->dma_irq_res; 2019 iflib_softirq_alloc_generic(ctx, &irq, IFLIB_INTR_TX, channel, 2020 channel->queue_index, buf); 2021 } 2022 2023 return (0); 2024 } /* axgbe_if_msix_intr_assign */ 2025 2026 static int 2027 xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel) 2028 { 2029 struct xgbe_hw_if *hw_if = &pdata->hw_if; 2030 enum xgbe_int int_id; 2031 2032 if (channel->tx_ring && channel->rx_ring) 2033 int_id = XGMAC_INT_DMA_CH_SR_TI_RI; 2034 else if (channel->tx_ring) 2035 int_id = XGMAC_INT_DMA_CH_SR_TI; 2036 else if (channel->rx_ring) 2037 int_id = XGMAC_INT_DMA_CH_SR_RI; 2038 else 2039 return (-1); 2040 2041 axgbe_printf(1, "%s channel: %d rx_tx interrupt enabled %d\n", 2042 __func__, channel->queue_index, int_id); 2043 return (hw_if->enable_int(channel, int_id)); 2044 } 2045 2046 static void 2047 xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel) 2048 { 2049 struct xgbe_hw_if *hw_if = &pdata->hw_if; 2050 enum xgbe_int int_id; 2051 2052 if (channel->tx_ring && channel->rx_ring) 2053 int_id = XGMAC_INT_DMA_CH_SR_TI_RI; 2054 else if (channel->tx_ring) 2055 int_id = XGMAC_INT_DMA_CH_SR_TI; 2056 else if (channel->rx_ring) 2057 int_id = XGMAC_INT_DMA_CH_SR_RI; 2058 else 2059 return; 2060 2061 axgbe_printf(1, "%s channel: %d rx_tx interrupt disabled %d\n", 2062 __func__, channel->queue_index, int_id); 2063 hw_if->disable_int(channel, int_id); 2064 } 2065 2066 static void 2067 xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) 2068 { 2069 unsigned int i; 2070 2071 for (i = 0; i < pdata->channel_count; i++) 2072 xgbe_disable_rx_tx_int(pdata, pdata->channel[i]); 2073 } 2074 2075 static int 2076 axgbe_msix_que(void *arg) 2077 { 2078 struct xgbe_channel *channel = (struct xgbe_channel *)arg; 2079 struct xgbe_prv_data *pdata = channel->pdata; 2080 unsigned int dma_status; 2081 2082 axgbe_printf(1, "%s: Channel: %d SR 0x%04x DSR 0x%04x IER:0x%04x D_ISR:0x%04x M_ISR:0x%04x\n", 2083 __func__, channel->queue_index, 2084 XGMAC_DMA_IOREAD(channel, DMA_CH_SR), 2085 XGMAC_DMA_IOREAD(channel, DMA_CH_DSR), 2086 XGMAC_DMA_IOREAD(channel, DMA_CH_IER), 2087 XGMAC_IOREAD(pdata, DMA_ISR), 2088 XGMAC_IOREAD(pdata, MAC_ISR)); 2089 2090 (void)XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 2091 2092 /* Disable Tx and Rx channel interrupts */ 2093 xgbe_disable_rx_tx_int(pdata, channel); 2094 2095 /* Clear the interrupts */ 2096 dma_status = 0; 2097 XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1); 2098 XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1); 2099 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status); 2100 2101 return (FILTER_SCHEDULE_THREAD); 2102 } 2103 2104 static int 2105 axgbe_dev_isr(void *arg) 2106 { 2107 struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; 2108 struct xgbe_prv_data *pdata = &sc->pdata; 2109 struct xgbe_channel *channel; 2110 struct xgbe_hw_if *hw_if = &pdata->hw_if; 2111 unsigned int i, dma_isr, dma_ch_isr; 2112 unsigned int mac_isr, mac_mdioisr; 2113 int ret = FILTER_HANDLED; 2114 2115 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); 2116 axgbe_printf(2, "%s DMA ISR: 0x%x\n", __func__, dma_isr); 2117 2118 if (!dma_isr) 2119 return (FILTER_HANDLED); 2120 2121 for (i = 0; i < pdata->channel_count; i++) { 2122 2123 if (!(dma_isr & (1 << i))) 2124 continue; 2125 2126 channel = pdata->channel[i]; 2127 2128 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 2129 axgbe_printf(2, "%s: channel %d SR 0x%x DSR 0x%x\n", __func__, 2130 channel->queue_index, dma_ch_isr, XGMAC_DMA_IOREAD(channel, 2131 DMA_CH_DSR)); 2132 2133 /* 2134 * The TI or RI interrupt bits may still be set even if using 2135 * per channel DMA interrupts. Check to be sure those are not 2136 * enabled before using the private data napi structure. 2137 */ 2138 if (!pdata->per_channel_irq && 2139 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || 2140 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { 2141 2142 /* Disable Tx and Rx interrupts */ 2143 xgbe_disable_rx_tx_ints(pdata); 2144 } else { 2145 2146 /* 2147 * Don't clear Rx/Tx status if doing per channel DMA 2148 * interrupts, these will be cleared by the ISR for 2149 * per channel DMA interrupts 2150 */ 2151 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0); 2152 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0); 2153 } 2154 2155 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU)) 2156 pdata->ext_stats.rx_buffer_unavailable++; 2157 2158 /* Restart the device on a Fatal Bus Error */ 2159 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) 2160 axgbe_error("%s: Fatal bus error reported 0x%x\n", 2161 __func__, dma_ch_isr); 2162 2163 /* Clear all interrupt signals */ 2164 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); 2165 2166 ret = FILTER_SCHEDULE_THREAD; 2167 } 2168 2169 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) { 2170 2171 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); 2172 axgbe_printf(2, "%s MAC ISR: 0x%x\n", __func__, mac_isr); 2173 2174 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS)) 2175 hw_if->tx_mmc_int(pdata); 2176 2177 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) 2178 hw_if->rx_mmc_int(pdata); 2179 2180 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) { 2181 mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR); 2182 2183 if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR, 2184 SNGLCOMPINT)) 2185 wakeup_one(pdata); 2186 } 2187 2188 } 2189 2190 return (ret); 2191 } /* axgbe_dev_isr */ 2192 2193 static void 2194 axgbe_i2c_isr(void *arg) 2195 { 2196 struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; 2197 2198 sc->pdata.i2c_if.i2c_isr(&sc->pdata); 2199 } 2200 2201 static void 2202 axgbe_ecc_isr(void *arg) 2203 { 2204 /* TODO - implement */ 2205 } 2206 2207 static void 2208 axgbe_an_isr(void *arg) 2209 { 2210 struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; 2211 2212 sc->pdata.phy_if.an_isr(&sc->pdata); 2213 } 2214 2215 static int 2216 axgbe_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) 2217 { 2218 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 2219 struct xgbe_prv_data *pdata = &sc->pdata; 2220 int ret; 2221 2222 if (qid < pdata->tx_q_count) { 2223 ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]); 2224 if (ret) { 2225 axgbe_error("Enable TX INT failed\n"); 2226 return (ret); 2227 } 2228 } else 2229 axgbe_error("Queue ID exceed channel count\n"); 2230 2231 return (0); 2232 } 2233 2234 static int 2235 axgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) 2236 { 2237 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 2238 struct xgbe_prv_data *pdata = &sc->pdata; 2239 int ret; 2240 2241 if (qid < pdata->rx_q_count) { 2242 ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]); 2243 if (ret) { 2244 axgbe_error("Enable RX INT failed\n"); 2245 return (ret); 2246 } 2247 } else 2248 axgbe_error("Queue ID exceed channel count\n"); 2249 2250 return (0); 2251 } 2252 2253 static void 2254 axgbe_if_update_admin_status(if_ctx_t ctx) 2255 { 2256 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 2257 struct xgbe_prv_data *pdata = &sc->pdata; 2258 2259 axgbe_printf(1, "%s: phy_link %d status %d speed %d\n", __func__, 2260 pdata->phy_link, sc->link_status, pdata->phy.speed); 2261 2262 if (pdata->phy_link < 0) 2263 return; 2264 2265 if (pdata->phy_link) { 2266 if (sc->link_status == LINK_STATE_DOWN) { 2267 sc->link_status = LINK_STATE_UP; 2268 if (pdata->phy.speed & SPEED_10000) 2269 iflib_link_state_change(ctx, LINK_STATE_UP, 2270 IF_Gbps(10)); 2271 else if (pdata->phy.speed & SPEED_2500) 2272 iflib_link_state_change(ctx, LINK_STATE_UP, 2273 IF_Gbps(2.5)); 2274 else if (pdata->phy.speed & SPEED_1000) 2275 iflib_link_state_change(ctx, LINK_STATE_UP, 2276 IF_Gbps(1)); 2277 else if (pdata->phy.speed & SPEED_100) 2278 iflib_link_state_change(ctx, LINK_STATE_UP, 2279 IF_Mbps(100)); 2280 else if (pdata->phy.speed & SPEED_10) 2281 iflib_link_state_change(ctx, LINK_STATE_UP, 2282 IF_Mbps(10)); 2283 } 2284 } else { 2285 if (sc->link_status == LINK_STATE_UP) { 2286 sc->link_status = LINK_STATE_DOWN; 2287 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 2288 } 2289 } 2290 } 2291 2292 static int 2293 axgbe_if_media_change(if_ctx_t ctx) 2294 { 2295 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 2296 struct ifmedia *ifm = iflib_get_media(ctx); 2297 2298 sx_xlock(&sc->pdata.an_mutex); 2299 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2300 return (EINVAL); 2301 2302 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2303 case IFM_10G_KR: 2304 sc->pdata.phy.speed = SPEED_10000; 2305 sc->pdata.phy.autoneg = AUTONEG_DISABLE; 2306 break; 2307 case IFM_2500_KX: 2308 sc->pdata.phy.speed = SPEED_2500; 2309 sc->pdata.phy.autoneg = AUTONEG_DISABLE; 2310 break; 2311 case IFM_1000_KX: 2312 sc->pdata.phy.speed = SPEED_1000; 2313 sc->pdata.phy.autoneg = AUTONEG_DISABLE; 2314 break; 2315 case IFM_100_TX: 2316 sc->pdata.phy.speed = SPEED_100; 2317 sc->pdata.phy.autoneg = AUTONEG_DISABLE; 2318 break; 2319 case IFM_AUTO: 2320 sc->pdata.phy.autoneg = AUTONEG_ENABLE; 2321 break; 2322 } 2323 sx_xunlock(&sc->pdata.an_mutex); 2324 2325 return (-sc->pdata.phy_if.phy_config_aneg(&sc->pdata)); 2326 } 2327 2328 static int 2329 axgbe_if_promisc_set(if_ctx_t ctx, int flags) 2330 { 2331 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 2332 struct xgbe_prv_data *pdata = &sc->pdata; 2333 if_t ifp = pdata->netdev; 2334 2335 axgbe_printf(1, "%s: MAC_PFR 0x%x drv_flags 0x%x if_flags 0x%x\n", 2336 __func__, XGMAC_IOREAD(pdata, MAC_PFR), if_getdrvflags(ifp), 2337 if_getflags(ifp)); 2338 2339 if (if_getflags(ifp) & IFF_PPROMISC) { 2340 2341 axgbe_printf(1, "User requested to enter promisc mode\n"); 2342 2343 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == 1) { 2344 axgbe_printf(1, "Already in promisc mode\n"); 2345 return (0); 2346 } 2347 2348 axgbe_printf(1, "Entering promisc mode\n"); 2349 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 2350 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); 2351 } else { 2352 2353 axgbe_printf(1, "User requested to leave promisc mode\n"); 2354 2355 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == 0) { 2356 axgbe_printf(1, "Already not in promisc mode\n"); 2357 return (0); 2358 } 2359 2360 axgbe_printf(1, "Leaving promisc mode\n"); 2361 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 2362 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); 2363 } 2364 2365 return (0); 2366 } 2367 2368 static uint64_t 2369 axgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) 2370 { 2371 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 2372 if_t ifp = iflib_get_ifp(ctx); 2373 struct xgbe_prv_data *pdata = &sc->pdata; 2374 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; 2375 2376 pdata->hw_if.read_mmc_stats(pdata); 2377 2378 switch(cnt) { 2379 case IFCOUNTER_IPACKETS: 2380 return (pstats->rxframecount_gb); 2381 case IFCOUNTER_IERRORS: 2382 return (pstats->rxframecount_gb - pstats->rxbroadcastframes_g - 2383 pstats->rxmulticastframes_g - pstats->rxunicastframes_g); 2384 case IFCOUNTER_OPACKETS: 2385 return (pstats->txframecount_gb); 2386 case IFCOUNTER_OERRORS: 2387 return (pstats->txframecount_gb - pstats->txframecount_g); 2388 case IFCOUNTER_IBYTES: 2389 return (pstats->rxoctetcount_gb); 2390 case IFCOUNTER_OBYTES: 2391 return (pstats->txoctetcount_gb); 2392 default: 2393 return (if_get_counter_default(ifp, cnt)); 2394 } 2395 } 2396 2397 static int 2398 axgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 2399 { 2400 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 2401 struct xgbe_prv_data *pdata = &sc->pdata; 2402 int ret; 2403 2404 if (mtu > XGMAC_JUMBO_PACKET_MTU) 2405 return (EINVAL); 2406 2407 ret = xgbe_calc_rx_buf_size(pdata->netdev, mtu); 2408 pdata->rx_buf_size = ret; 2409 axgbe_printf(1, "%s: rx_buf_size %d\n", __func__, ret); 2410 2411 sc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2412 return (0); 2413 } 2414 2415 static void 2416 axgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 2417 { 2418 struct axgbe_if_softc *sc = iflib_get_softc(ctx); 2419 struct xgbe_prv_data *pdata = &sc->pdata; 2420 2421 ifmr->ifm_status = IFM_AVALID; 2422 if (!sc->pdata.phy.link) 2423 return; 2424 2425 ifmr->ifm_active = IFM_ETHER; 2426 ifmr->ifm_status |= IFM_ACTIVE; 2427 2428 axgbe_printf(1, "Speed 0x%x Mode %d\n", sc->pdata.phy.speed, 2429 pdata->phy_if.phy_impl.cur_mode(pdata)); 2430 pdata->phy_if.phy_impl.get_type(pdata, ifmr); 2431 2432 ifmr->ifm_active |= IFM_FDX; 2433 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2434 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2435 } 2436