1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation 3 */ 4 5 #include <linux/clk-provider.h> 6 #include <linux/pci.h> 7 #include <linux/dmi.h> 8 #include "dwmac-intel.h" 9 #include "dwmac4.h" 10 #include "stmmac.h" 11 12 #define INTEL_MGBE_ADHOC_ADDR 0x15 13 #define INTEL_MGBE_XPCS_ADDR 0x16 14 15 /* Selection for PTP Clock Freq belongs to PSE & PCH GbE */ 16 #define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3) 17 #define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) 18 #define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3) 19 #define PSE_PTP_CLK_FREQ_256MHZ (0) 20 #define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0) 21 #define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) 22 #define PCH_PTP_CLK_FREQ_200MHZ (0) 23 24 struct intel_priv_data { 25 int mdio_adhoc_addr; /* mdio address for serdes & etc */ 26 bool is_pse; 27 }; 28 29 /* This struct is used to associate PCI Function of MAC controller on a board, 30 * discovered via DMI, with the address of PHY connected to the MAC. The 31 * negative value of the address means that MAC controller is not connected 32 * with PHY. 33 */ 34 struct stmmac_pci_func_data { 35 unsigned int func; 36 int phy_addr; 37 }; 38 39 struct stmmac_pci_dmi_data { 40 const struct stmmac_pci_func_data *func; 41 size_t nfuncs; 42 }; 43 44 struct stmmac_pci_info { 45 int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); 46 }; 47 48 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev, 49 const struct dmi_system_id *dmi_list) 50 { 51 const struct stmmac_pci_func_data *func_data; 52 const struct stmmac_pci_dmi_data *dmi_data; 53 const struct dmi_system_id *dmi_id; 54 int func = PCI_FUNC(pdev->devfn); 55 size_t n; 56 57 dmi_id = dmi_first_match(dmi_list); 58 if (!dmi_id) 59 return -ENODEV; 60 61 dmi_data = dmi_id->driver_data; 62 func_data = dmi_data->func; 63 64 for (n = 0; n < dmi_data->nfuncs; n++, func_data++) 65 if (func_data->func == func) 66 return func_data->phy_addr; 67 68 return -ENODEV; 69 } 70 71 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr, 72 int phyreg, u32 mask, u32 val) 73 { 74 unsigned int retries = 10; 75 int val_rd; 76 77 do { 78 val_rd = mdiobus_read(priv->mii, phyaddr, phyreg); 79 if ((val_rd & mask) == (val & mask)) 80 return 0; 81 udelay(POLL_DELAY_US); 82 } while (--retries); 83 84 return -ETIMEDOUT; 85 } 86 87 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data) 88 { 89 struct intel_priv_data *intel_priv = priv_data; 90 struct stmmac_priv *priv = netdev_priv(ndev); 91 int serdes_phy_addr = 0; 92 u32 data = 0; 93 94 if (!intel_priv->mdio_adhoc_addr) 95 return 0; 96 97 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 98 99 /* assert clk_req */ 100 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 101 data |= SERDES_PLL_CLK; 102 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 103 104 /* check for clk_ack assertion */ 105 data = serdes_status_poll(priv, serdes_phy_addr, 106 SERDES_GSR0, 107 SERDES_PLL_CLK, 108 SERDES_PLL_CLK); 109 110 if (data) { 111 dev_err(priv->device, "Serdes PLL clk request timeout\n"); 112 return data; 113 } 114 115 /* assert lane reset */ 116 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 117 data |= SERDES_RST; 118 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 119 120 /* check for assert lane reset reflection */ 121 data = serdes_status_poll(priv, serdes_phy_addr, 122 SERDES_GSR0, 123 SERDES_RST, 124 SERDES_RST); 125 126 if (data) { 127 dev_err(priv->device, "Serdes assert lane reset timeout\n"); 128 return data; 129 } 130 131 /* move power state to P0 */ 132 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 133 134 data &= ~SERDES_PWR_ST_MASK; 135 data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT; 136 137 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 138 139 /* Check for P0 state */ 140 data = serdes_status_poll(priv, serdes_phy_addr, 141 SERDES_GSR0, 142 SERDES_PWR_ST_MASK, 143 SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT); 144 145 if (data) { 146 dev_err(priv->device, "Serdes power state P0 timeout.\n"); 147 return data; 148 } 149 150 return 0; 151 } 152 153 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) 154 { 155 struct intel_priv_data *intel_priv = intel_data; 156 struct stmmac_priv *priv = netdev_priv(ndev); 157 int serdes_phy_addr = 0; 158 u32 data = 0; 159 160 if (!intel_priv->mdio_adhoc_addr) 161 return; 162 163 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 164 165 /* move power state to P3 */ 166 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 167 168 data &= ~SERDES_PWR_ST_MASK; 169 data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT; 170 171 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 172 173 /* Check for P3 state */ 174 data = serdes_status_poll(priv, serdes_phy_addr, 175 SERDES_GSR0, 176 SERDES_PWR_ST_MASK, 177 SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT); 178 179 if (data) { 180 dev_err(priv->device, "Serdes power state P3 timeout\n"); 181 return; 182 } 183 184 /* de-assert clk_req */ 185 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 186 data &= ~SERDES_PLL_CLK; 187 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 188 189 /* check for clk_ack de-assert */ 190 data = serdes_status_poll(priv, serdes_phy_addr, 191 SERDES_GSR0, 192 SERDES_PLL_CLK, 193 (u32)~SERDES_PLL_CLK); 194 195 if (data) { 196 dev_err(priv->device, "Serdes PLL clk de-assert timeout\n"); 197 return; 198 } 199 200 /* de-assert lane reset */ 201 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 202 data &= ~SERDES_RST; 203 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 204 205 /* check for de-assert lane reset reflection */ 206 data = serdes_status_poll(priv, serdes_phy_addr, 207 SERDES_GSR0, 208 SERDES_RST, 209 (u32)~SERDES_RST); 210 211 if (data) { 212 dev_err(priv->device, "Serdes de-assert lane reset timeout\n"); 213 return; 214 } 215 } 216 217 /* Program PTP Clock Frequency for different variant of 218 * Intel mGBE that has slightly different GPO mapping 219 */ 220 static void intel_mgbe_ptp_clk_freq_config(void *npriv) 221 { 222 struct stmmac_priv *priv = (struct stmmac_priv *)npriv; 223 struct intel_priv_data *intel_priv; 224 u32 gpio_value; 225 226 intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv; 227 228 gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS); 229 230 if (intel_priv->is_pse) { 231 /* For PSE GbE, use 200MHz */ 232 gpio_value &= ~PSE_PTP_CLK_FREQ_MASK; 233 gpio_value |= PSE_PTP_CLK_FREQ_200MHZ; 234 } else { 235 /* For PCH GbE, use 200MHz */ 236 gpio_value &= ~PCH_PTP_CLK_FREQ_MASK; 237 gpio_value |= PCH_PTP_CLK_FREQ_200MHZ; 238 } 239 240 writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS); 241 } 242 243 static void common_default_data(struct plat_stmmacenet_data *plat) 244 { 245 plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ 246 plat->has_gmac = 1; 247 plat->force_sf_dma_mode = 1; 248 249 plat->mdio_bus_data->needs_reset = true; 250 251 /* Set default value for multicast hash bins */ 252 plat->multicast_filter_bins = HASH_TABLE_SIZE; 253 254 /* Set default value for unicast filter entries */ 255 plat->unicast_filter_entries = 1; 256 257 /* Set the maxmtu to a default of JUMBO_LEN */ 258 plat->maxmtu = JUMBO_LEN; 259 260 /* Set default number of RX and TX queues to use */ 261 plat->tx_queues_to_use = 1; 262 plat->rx_queues_to_use = 1; 263 264 /* Disable Priority config by default */ 265 plat->tx_queues_cfg[0].use_prio = false; 266 plat->rx_queues_cfg[0].use_prio = false; 267 268 /* Disable RX queues routing by default */ 269 plat->rx_queues_cfg[0].pkt_route = 0x0; 270 } 271 272 static int intel_mgbe_common_data(struct pci_dev *pdev, 273 struct plat_stmmacenet_data *plat) 274 { 275 char clk_name[20]; 276 int ret; 277 int i; 278 279 plat->pdev = pdev; 280 plat->phy_addr = -1; 281 plat->clk_csr = 5; 282 plat->has_gmac = 0; 283 plat->has_gmac4 = 1; 284 plat->force_sf_dma_mode = 0; 285 plat->tso_en = 1; 286 287 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 288 289 for (i = 0; i < plat->rx_queues_to_use; i++) { 290 plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; 291 plat->rx_queues_cfg[i].chan = i; 292 293 /* Disable Priority config by default */ 294 plat->rx_queues_cfg[i].use_prio = false; 295 296 /* Disable RX queues routing by default */ 297 plat->rx_queues_cfg[i].pkt_route = 0x0; 298 } 299 300 for (i = 0; i < plat->tx_queues_to_use; i++) { 301 plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; 302 303 /* Disable Priority config by default */ 304 plat->tx_queues_cfg[i].use_prio = false; 305 } 306 307 /* FIFO size is 4096 bytes for 1 tx/rx queue */ 308 plat->tx_fifo_size = plat->tx_queues_to_use * 4096; 309 plat->rx_fifo_size = plat->rx_queues_to_use * 4096; 310 311 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; 312 plat->tx_queues_cfg[0].weight = 0x09; 313 plat->tx_queues_cfg[1].weight = 0x0A; 314 plat->tx_queues_cfg[2].weight = 0x0B; 315 plat->tx_queues_cfg[3].weight = 0x0C; 316 plat->tx_queues_cfg[4].weight = 0x0D; 317 plat->tx_queues_cfg[5].weight = 0x0E; 318 plat->tx_queues_cfg[6].weight = 0x0F; 319 plat->tx_queues_cfg[7].weight = 0x10; 320 321 plat->dma_cfg->pbl = 32; 322 plat->dma_cfg->pblx8 = true; 323 plat->dma_cfg->fixed_burst = 0; 324 plat->dma_cfg->mixed_burst = 0; 325 plat->dma_cfg->aal = 0; 326 327 plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), 328 GFP_KERNEL); 329 if (!plat->axi) 330 return -ENOMEM; 331 332 plat->axi->axi_lpi_en = 0; 333 plat->axi->axi_xit_frm = 0; 334 plat->axi->axi_wr_osr_lmt = 1; 335 plat->axi->axi_rd_osr_lmt = 1; 336 plat->axi->axi_blen[0] = 4; 337 plat->axi->axi_blen[1] = 8; 338 plat->axi->axi_blen[2] = 16; 339 340 plat->ptp_max_adj = plat->clk_ptp_rate; 341 plat->eee_usecs_rate = plat->clk_ptp_rate; 342 343 /* Set system clock */ 344 sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev)); 345 346 plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, 347 clk_name, NULL, 0, 348 plat->clk_ptp_rate); 349 350 if (IS_ERR(plat->stmmac_clk)) { 351 dev_warn(&pdev->dev, "Fail to register stmmac-clk\n"); 352 plat->stmmac_clk = NULL; 353 } 354 355 ret = clk_prepare_enable(plat->stmmac_clk); 356 if (ret) { 357 clk_unregister_fixed_rate(plat->stmmac_clk); 358 return ret; 359 } 360 361 plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config; 362 363 /* Set default value for multicast hash bins */ 364 plat->multicast_filter_bins = HASH_TABLE_SIZE; 365 366 /* Set default value for unicast filter entries */ 367 plat->unicast_filter_entries = 1; 368 369 /* Set the maxmtu to a default of JUMBO_LEN */ 370 plat->maxmtu = JUMBO_LEN; 371 372 plat->vlan_fail_q_en = true; 373 374 /* Use the last Rx queue */ 375 plat->vlan_fail_q = plat->rx_queues_to_use - 1; 376 377 /* Intel mgbe SGMII interface uses pcs-xcps */ 378 if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) { 379 plat->mdio_bus_data->has_xpcs = true; 380 plat->mdio_bus_data->xpcs_an_inband = true; 381 } 382 383 /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ 384 plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR; 385 plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; 386 387 return 0; 388 } 389 390 static int ehl_common_data(struct pci_dev *pdev, 391 struct plat_stmmacenet_data *plat) 392 { 393 plat->rx_queues_to_use = 8; 394 plat->tx_queues_to_use = 8; 395 plat->clk_ptp_rate = 200000000; 396 397 return intel_mgbe_common_data(pdev, plat); 398 } 399 400 static int ehl_sgmii_data(struct pci_dev *pdev, 401 struct plat_stmmacenet_data *plat) 402 { 403 plat->bus_id = 1; 404 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 405 406 plat->serdes_powerup = intel_serdes_powerup; 407 plat->serdes_powerdown = intel_serdes_powerdown; 408 409 return ehl_common_data(pdev, plat); 410 } 411 412 static struct stmmac_pci_info ehl_sgmii1g_info = { 413 .setup = ehl_sgmii_data, 414 }; 415 416 static int ehl_rgmii_data(struct pci_dev *pdev, 417 struct plat_stmmacenet_data *plat) 418 { 419 plat->bus_id = 1; 420 plat->phy_interface = PHY_INTERFACE_MODE_RGMII; 421 422 return ehl_common_data(pdev, plat); 423 } 424 425 static struct stmmac_pci_info ehl_rgmii1g_info = { 426 .setup = ehl_rgmii_data, 427 }; 428 429 static int ehl_pse0_common_data(struct pci_dev *pdev, 430 struct plat_stmmacenet_data *plat) 431 { 432 struct intel_priv_data *intel_priv = plat->bsp_priv; 433 434 intel_priv->is_pse = true; 435 plat->bus_id = 2; 436 plat->addr64 = 32; 437 438 return ehl_common_data(pdev, plat); 439 } 440 441 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev, 442 struct plat_stmmacenet_data *plat) 443 { 444 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; 445 return ehl_pse0_common_data(pdev, plat); 446 } 447 448 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = { 449 .setup = ehl_pse0_rgmii1g_data, 450 }; 451 452 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev, 453 struct plat_stmmacenet_data *plat) 454 { 455 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 456 plat->serdes_powerup = intel_serdes_powerup; 457 plat->serdes_powerdown = intel_serdes_powerdown; 458 return ehl_pse0_common_data(pdev, plat); 459 } 460 461 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = { 462 .setup = ehl_pse0_sgmii1g_data, 463 }; 464 465 static int ehl_pse1_common_data(struct pci_dev *pdev, 466 struct plat_stmmacenet_data *plat) 467 { 468 struct intel_priv_data *intel_priv = plat->bsp_priv; 469 470 intel_priv->is_pse = true; 471 plat->bus_id = 3; 472 plat->addr64 = 32; 473 474 return ehl_common_data(pdev, plat); 475 } 476 477 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev, 478 struct plat_stmmacenet_data *plat) 479 { 480 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; 481 return ehl_pse1_common_data(pdev, plat); 482 } 483 484 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = { 485 .setup = ehl_pse1_rgmii1g_data, 486 }; 487 488 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev, 489 struct plat_stmmacenet_data *plat) 490 { 491 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 492 plat->serdes_powerup = intel_serdes_powerup; 493 plat->serdes_powerdown = intel_serdes_powerdown; 494 return ehl_pse1_common_data(pdev, plat); 495 } 496 497 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = { 498 .setup = ehl_pse1_sgmii1g_data, 499 }; 500 501 static int tgl_common_data(struct pci_dev *pdev, 502 struct plat_stmmacenet_data *plat) 503 { 504 plat->rx_queues_to_use = 6; 505 plat->tx_queues_to_use = 4; 506 plat->clk_ptp_rate = 200000000; 507 508 return intel_mgbe_common_data(pdev, plat); 509 } 510 511 static int tgl_sgmii_phy0_data(struct pci_dev *pdev, 512 struct plat_stmmacenet_data *plat) 513 { 514 plat->bus_id = 1; 515 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 516 plat->serdes_powerup = intel_serdes_powerup; 517 plat->serdes_powerdown = intel_serdes_powerdown; 518 return tgl_common_data(pdev, plat); 519 } 520 521 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = { 522 .setup = tgl_sgmii_phy0_data, 523 }; 524 525 static int tgl_sgmii_phy1_data(struct pci_dev *pdev, 526 struct plat_stmmacenet_data *plat) 527 { 528 plat->bus_id = 2; 529 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 530 plat->serdes_powerup = intel_serdes_powerup; 531 plat->serdes_powerdown = intel_serdes_powerdown; 532 return tgl_common_data(pdev, plat); 533 } 534 535 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = { 536 .setup = tgl_sgmii_phy1_data, 537 }; 538 539 static int adls_sgmii_phy0_data(struct pci_dev *pdev, 540 struct plat_stmmacenet_data *plat) 541 { 542 plat->bus_id = 1; 543 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 544 545 /* SerDes power up and power down are done in BIOS for ADL */ 546 547 return tgl_common_data(pdev, plat); 548 } 549 550 static struct stmmac_pci_info adls_sgmii1g_phy0_info = { 551 .setup = adls_sgmii_phy0_data, 552 }; 553 554 static int adls_sgmii_phy1_data(struct pci_dev *pdev, 555 struct plat_stmmacenet_data *plat) 556 { 557 plat->bus_id = 2; 558 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 559 560 /* SerDes power up and power down are done in BIOS for ADL */ 561 562 return tgl_common_data(pdev, plat); 563 } 564 565 static struct stmmac_pci_info adls_sgmii1g_phy1_info = { 566 .setup = adls_sgmii_phy1_data, 567 }; 568 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { 569 { 570 .func = 6, 571 .phy_addr = 1, 572 }, 573 }; 574 575 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = { 576 .func = galileo_stmmac_func_data, 577 .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data), 578 }; 579 580 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = { 581 { 582 .func = 6, 583 .phy_addr = 1, 584 }, 585 { 586 .func = 7, 587 .phy_addr = 1, 588 }, 589 }; 590 591 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = { 592 .func = iot2040_stmmac_func_data, 593 .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data), 594 }; 595 596 static const struct dmi_system_id quark_pci_dmi[] = { 597 { 598 .matches = { 599 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"), 600 }, 601 .driver_data = (void *)&galileo_stmmac_dmi_data, 602 }, 603 { 604 .matches = { 605 DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), 606 }, 607 .driver_data = (void *)&galileo_stmmac_dmi_data, 608 }, 609 /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040. 610 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which 611 * has only one pci network device while other asset tags are 612 * for IOT2040 which has two. 613 */ 614 { 615 .matches = { 616 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), 617 DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, 618 "6ES7647-0AA00-0YA2"), 619 }, 620 .driver_data = (void *)&galileo_stmmac_dmi_data, 621 }, 622 { 623 .matches = { 624 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), 625 }, 626 .driver_data = (void *)&iot2040_stmmac_dmi_data, 627 }, 628 {} 629 }; 630 631 static int quark_default_data(struct pci_dev *pdev, 632 struct plat_stmmacenet_data *plat) 633 { 634 int ret; 635 636 /* Set common default data first */ 637 common_default_data(plat); 638 639 /* Refuse to load the driver and register net device if MAC controller 640 * does not connect to any PHY interface. 641 */ 642 ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi); 643 if (ret < 0) { 644 /* Return error to the caller on DMI enabled boards. */ 645 if (dmi_get_system_info(DMI_BOARD_NAME)) 646 return ret; 647 648 /* Galileo boards with old firmware don't support DMI. We always 649 * use 1 here as PHY address, so at least the first found MAC 650 * controller would be probed. 651 */ 652 ret = 1; 653 } 654 655 plat->bus_id = pci_dev_id(pdev); 656 plat->phy_addr = ret; 657 plat->phy_interface = PHY_INTERFACE_MODE_RMII; 658 659 plat->dma_cfg->pbl = 16; 660 plat->dma_cfg->pblx8 = true; 661 plat->dma_cfg->fixed_burst = 1; 662 /* AXI (TODO) */ 663 664 return 0; 665 } 666 667 static const struct stmmac_pci_info quark_info = { 668 .setup = quark_default_data, 669 }; 670 671 /** 672 * intel_eth_pci_probe 673 * 674 * @pdev: pci device pointer 675 * @id: pointer to table of device id/id's. 676 * 677 * Description: This probing function gets called for all PCI devices which 678 * match the ID table and are not "owned" by other driver yet. This function 679 * gets passed a "struct pci_dev *" for each device whose entry in the ID table 680 * matches the device. The probe functions returns zero when the driver choose 681 * to take "ownership" of the device or an error code(-ve no) otherwise. 682 */ 683 static int intel_eth_pci_probe(struct pci_dev *pdev, 684 const struct pci_device_id *id) 685 { 686 struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; 687 struct intel_priv_data *intel_priv; 688 struct plat_stmmacenet_data *plat; 689 struct stmmac_resources res; 690 int ret; 691 692 intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL); 693 if (!intel_priv) 694 return -ENOMEM; 695 696 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 697 if (!plat) 698 return -ENOMEM; 699 700 plat->mdio_bus_data = devm_kzalloc(&pdev->dev, 701 sizeof(*plat->mdio_bus_data), 702 GFP_KERNEL); 703 if (!plat->mdio_bus_data) 704 return -ENOMEM; 705 706 plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), 707 GFP_KERNEL); 708 if (!plat->dma_cfg) 709 return -ENOMEM; 710 711 /* Enable pci device */ 712 ret = pci_enable_device(pdev); 713 if (ret) { 714 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", 715 __func__); 716 return ret; 717 } 718 719 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 720 if (ret) 721 return ret; 722 723 pci_set_master(pdev); 724 725 plat->bsp_priv = intel_priv; 726 intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR; 727 728 ret = info->setup(pdev, plat); 729 if (ret) 730 return ret; 731 732 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 733 if (ret < 0) 734 return ret; 735 736 memset(&res, 0, sizeof(res)); 737 res.addr = pcim_iomap_table(pdev)[0]; 738 res.wol_irq = pci_irq_vector(pdev, 0); 739 res.irq = pci_irq_vector(pdev, 0); 740 741 if (plat->eee_usecs_rate > 0) { 742 u32 tx_lpi_usec; 743 744 tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1; 745 writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); 746 } 747 748 ret = stmmac_dvr_probe(&pdev->dev, plat, &res); 749 if (ret) { 750 pci_free_irq_vectors(pdev); 751 clk_disable_unprepare(plat->stmmac_clk); 752 clk_unregister_fixed_rate(plat->stmmac_clk); 753 } 754 755 return ret; 756 } 757 758 /** 759 * intel_eth_pci_remove 760 * 761 * @pdev: platform device pointer 762 * Description: this function calls the main to free the net resources 763 * and releases the PCI resources. 764 */ 765 static void intel_eth_pci_remove(struct pci_dev *pdev) 766 { 767 struct net_device *ndev = dev_get_drvdata(&pdev->dev); 768 struct stmmac_priv *priv = netdev_priv(ndev); 769 770 stmmac_dvr_remove(&pdev->dev); 771 772 pci_free_irq_vectors(pdev); 773 774 clk_unregister_fixed_rate(priv->plat->stmmac_clk); 775 776 pcim_iounmap_regions(pdev, BIT(0)); 777 778 pci_disable_device(pdev); 779 } 780 781 static int __maybe_unused intel_eth_pci_suspend(struct device *dev) 782 { 783 struct pci_dev *pdev = to_pci_dev(dev); 784 int ret; 785 786 ret = stmmac_suspend(dev); 787 if (ret) 788 return ret; 789 790 ret = pci_save_state(pdev); 791 if (ret) 792 return ret; 793 794 pci_disable_device(pdev); 795 pci_wake_from_d3(pdev, true); 796 return 0; 797 } 798 799 static int __maybe_unused intel_eth_pci_resume(struct device *dev) 800 { 801 struct pci_dev *pdev = to_pci_dev(dev); 802 int ret; 803 804 pci_restore_state(pdev); 805 pci_set_power_state(pdev, PCI_D0); 806 807 ret = pci_enable_device(pdev); 808 if (ret) 809 return ret; 810 811 pci_set_master(pdev); 812 813 return stmmac_resume(dev); 814 } 815 816 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend, 817 intel_eth_pci_resume); 818 819 #define PCI_DEVICE_ID_INTEL_QUARK_ID 0x0937 820 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G_ID 0x4b30 821 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G_ID 0x4b31 822 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5_ID 0x4b32 823 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC 824 * which are named PSE0 and PSE1 825 */ 826 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G_ID 0x4ba0 827 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G_ID 0x4ba1 828 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5_ID 0x4ba2 829 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0 830 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1 831 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2 832 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac 833 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2 834 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac 835 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0_ID 0x7aac 836 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1_ID 0x7aad 837 838 static const struct pci_device_id intel_eth_pci_id_table[] = { 839 { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) }, 840 { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_info) }, 841 { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_info) }, 842 { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_info) }, 843 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID, &ehl_pse0_rgmii1g_info) }, 844 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID, &ehl_pse0_sgmii1g_info) }, 845 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID, &ehl_pse0_sgmii1g_info) }, 846 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) }, 847 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) }, 848 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) }, 849 { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_phy0_info) }, 850 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_phy0_info) }, 851 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_phy1_info) }, 852 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0_ID, &adls_sgmii1g_phy0_info) }, 853 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1_ID, &adls_sgmii1g_phy1_info) }, 854 {} 855 }; 856 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); 857 858 static struct pci_driver intel_eth_pci_driver = { 859 .name = "intel-eth-pci", 860 .id_table = intel_eth_pci_id_table, 861 .probe = intel_eth_pci_probe, 862 .remove = intel_eth_pci_remove, 863 .driver = { 864 .pm = &intel_eth_pm_ops, 865 }, 866 }; 867 868 module_pci_driver(intel_eth_pci_driver); 869 870 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver"); 871 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>"); 872 MODULE_LICENSE("GPL v2"); 873