1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation 3 */ 4 5 #include <linux/clk-provider.h> 6 #include <linux/pci.h> 7 #include <linux/dmi.h> 8 #include <linux/platform_data/x86/intel_pmc_ipc.h> 9 #include "dwmac-intel.h" 10 #include "dwmac4.h" 11 #include "stmmac.h" 12 #include "stmmac_ptp.h" 13 14 struct pmc_serdes_regs { 15 u8 index; 16 u32 val; 17 }; 18 19 struct pmc_serdes_reg_info { 20 const struct pmc_serdes_regs *regs; 21 u8 num_regs; 22 }; 23 24 struct intel_priv_data { 25 int mdio_adhoc_addr; /* mdio address for serdes & etc */ 26 unsigned long crossts_adj; 27 bool is_pse; 28 const int *tsn_lane_regs; 29 int max_tsn_lane_regs; 30 struct pmc_serdes_reg_info pid_1g; 31 struct pmc_serdes_reg_info pid_2p5g; 32 }; 33 34 /* This struct is used to associate PCI Function of MAC controller on a board, 35 * discovered via DMI, with the address of PHY connected to the MAC. The 36 * negative value of the address means that MAC controller is not connected 37 * with PHY. 38 */ 39 struct stmmac_pci_func_data { 40 unsigned int func; 41 int phy_addr; 42 }; 43 44 struct stmmac_pci_dmi_data { 45 const struct stmmac_pci_func_data *func; 46 size_t nfuncs; 47 }; 48 49 struct stmmac_pci_info { 50 int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); 51 }; 52 53 static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = { 54 { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G }, 55 { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G }, 56 { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G }, 57 { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G }, 58 { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G }, 59 {} 60 }; 61 62 static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = { 63 { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G }, 64 { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G }, 65 { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G }, 66 { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G }, 67 { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G }, 68 {} 69 }; 70 71 static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = { 72 { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G }, 73 { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G }, 74 { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G }, 75 { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G }, 76 { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G }, 77 {} 78 }; 79 80 static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = { 81 { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G }, 82 { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G }, 83 { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G }, 84 { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G }, 85 { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G }, 86 {} 87 }; 88 89 static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11}; 90 static const int adln_tsn_lane_regs[] = {6}; 91 92 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev, 93 const struct dmi_system_id *dmi_list) 94 { 95 const struct stmmac_pci_func_data *func_data; 96 const struct stmmac_pci_dmi_data *dmi_data; 97 const struct dmi_system_id *dmi_id; 98 int func = PCI_FUNC(pdev->devfn); 99 size_t n; 100 101 dmi_id = dmi_first_match(dmi_list); 102 if (!dmi_id) 103 return -ENODEV; 104 105 dmi_data = dmi_id->driver_data; 106 func_data = dmi_data->func; 107 108 for (n = 0; n < dmi_data->nfuncs; n++, func_data++) 109 if (func_data->func == func) 110 return func_data->phy_addr; 111 112 return -ENODEV; 113 } 114 115 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr, 116 int phyreg, u32 mask, u32 val) 117 { 118 unsigned int retries = 10; 119 int val_rd; 120 121 do { 122 val_rd = mdiobus_read(priv->mii, phyaddr, phyreg); 123 if ((val_rd & mask) == (val & mask)) 124 return 0; 125 udelay(POLL_DELAY_US); 126 } while (--retries); 127 128 return -ETIMEDOUT; 129 } 130 131 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data) 132 { 133 struct intel_priv_data *intel_priv = priv_data; 134 struct stmmac_priv *priv = netdev_priv(ndev); 135 int serdes_phy_addr = 0; 136 u32 data = 0; 137 138 if (!intel_priv->mdio_adhoc_addr) 139 return 0; 140 141 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 142 143 /* Set the serdes rate and the PCLK rate */ 144 data = mdiobus_read(priv->mii, serdes_phy_addr, 145 SERDES_GCR0); 146 147 data &= ~SERDES_RATE_MASK; 148 data &= ~SERDES_PCLK_MASK; 149 150 if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX) 151 data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT | 152 SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT; 153 else 154 data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT | 155 SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT; 156 157 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 158 159 /* assert clk_req */ 160 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 161 data |= SERDES_PLL_CLK; 162 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 163 164 /* check for clk_ack assertion */ 165 data = serdes_status_poll(priv, serdes_phy_addr, 166 SERDES_GSR0, 167 SERDES_PLL_CLK, 168 SERDES_PLL_CLK); 169 170 if (data) { 171 dev_err(priv->device, "Serdes PLL clk request timeout\n"); 172 return data; 173 } 174 175 /* assert lane reset */ 176 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 177 data |= SERDES_RST; 178 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 179 180 /* check for assert lane reset reflection */ 181 data = serdes_status_poll(priv, serdes_phy_addr, 182 SERDES_GSR0, 183 SERDES_RST, 184 SERDES_RST); 185 186 if (data) { 187 dev_err(priv->device, "Serdes assert lane reset timeout\n"); 188 return data; 189 } 190 191 /* move power state to P0 */ 192 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 193 194 data &= ~SERDES_PWR_ST_MASK; 195 data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT; 196 197 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 198 199 /* Check for P0 state */ 200 data = serdes_status_poll(priv, serdes_phy_addr, 201 SERDES_GSR0, 202 SERDES_PWR_ST_MASK, 203 SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT); 204 205 if (data) { 206 dev_err(priv->device, "Serdes power state P0 timeout.\n"); 207 return data; 208 } 209 210 /* PSE only - ungate SGMII PHY Rx Clock */ 211 if (intel_priv->is_pse) 212 mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, 213 0, SERDES_PHY_RX_CLK); 214 215 return 0; 216 } 217 218 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) 219 { 220 struct intel_priv_data *intel_priv = intel_data; 221 struct stmmac_priv *priv = netdev_priv(ndev); 222 int serdes_phy_addr = 0; 223 u32 data = 0; 224 225 if (!intel_priv->mdio_adhoc_addr) 226 return; 227 228 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 229 230 /* PSE only - gate SGMII PHY Rx Clock */ 231 if (intel_priv->is_pse) 232 mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, 233 SERDES_PHY_RX_CLK, 0); 234 235 /* move power state to P3 */ 236 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 237 238 data &= ~SERDES_PWR_ST_MASK; 239 data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT; 240 241 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 242 243 /* Check for P3 state */ 244 data = serdes_status_poll(priv, serdes_phy_addr, 245 SERDES_GSR0, 246 SERDES_PWR_ST_MASK, 247 SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT); 248 249 if (data) { 250 dev_err(priv->device, "Serdes power state P3 timeout\n"); 251 return; 252 } 253 254 /* de-assert clk_req */ 255 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 256 data &= ~SERDES_PLL_CLK; 257 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 258 259 /* check for clk_ack de-assert */ 260 data = serdes_status_poll(priv, serdes_phy_addr, 261 SERDES_GSR0, 262 SERDES_PLL_CLK, 263 (u32)~SERDES_PLL_CLK); 264 265 if (data) { 266 dev_err(priv->device, "Serdes PLL clk de-assert timeout\n"); 267 return; 268 } 269 270 /* de-assert lane reset */ 271 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 272 data &= ~SERDES_RST; 273 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 274 275 /* check for de-assert lane reset reflection */ 276 data = serdes_status_poll(priv, serdes_phy_addr, 277 SERDES_GSR0, 278 SERDES_RST, 279 (u32)~SERDES_RST); 280 281 if (data) { 282 dev_err(priv->device, "Serdes de-assert lane reset timeout\n"); 283 return; 284 } 285 } 286 287 static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data) 288 { 289 struct intel_priv_data *intel_priv = intel_data; 290 struct stmmac_priv *priv = netdev_priv(ndev); 291 int serdes_phy_addr = 0; 292 u32 data = 0; 293 294 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 295 296 /* Determine the link speed mode: 2.5Gbps/1Gbps */ 297 data = mdiobus_read(priv->mii, serdes_phy_addr, 298 SERDES_GCR); 299 300 if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) == 301 SERDES_LINK_MODE_2G5) { 302 dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n"); 303 priv->plat->max_speed = 2500; 304 priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; 305 priv->plat->mdio_bus_data->default_an_inband = false; 306 } else { 307 priv->plat->max_speed = 1000; 308 } 309 } 310 311 /* Program PTP Clock Frequency for different variant of 312 * Intel mGBE that has slightly different GPO mapping 313 */ 314 static void intel_mgbe_ptp_clk_freq_config(struct stmmac_priv *priv) 315 { 316 struct intel_priv_data *intel_priv; 317 u32 gpio_value; 318 319 intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv; 320 321 gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS); 322 323 if (intel_priv->is_pse) { 324 /* For PSE GbE, use 200MHz */ 325 gpio_value &= ~PSE_PTP_CLK_FREQ_MASK; 326 gpio_value |= PSE_PTP_CLK_FREQ_200MHZ; 327 } else { 328 /* For PCH GbE, use 200MHz */ 329 gpio_value &= ~PCH_PTP_CLK_FREQ_MASK; 330 gpio_value |= PCH_PTP_CLK_FREQ_200MHZ; 331 } 332 333 writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS); 334 } 335 336 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr, 337 u64 *art_time) 338 { 339 u64 ns; 340 341 ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3); 342 ns <<= GMAC4_ART_TIME_SHIFT; 343 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2); 344 ns <<= GMAC4_ART_TIME_SHIFT; 345 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1); 346 ns <<= GMAC4_ART_TIME_SHIFT; 347 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0); 348 349 *art_time = ns; 350 } 351 352 static int stmmac_cross_ts_isr(struct stmmac_priv *priv) 353 { 354 return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE); 355 } 356 357 static int intel_crosststamp(ktime_t *device, 358 struct system_counterval_t *system, 359 void *ctx) 360 { 361 struct intel_priv_data *intel_priv; 362 363 struct stmmac_priv *priv = (struct stmmac_priv *)ctx; 364 void __iomem *ptpaddr = priv->ptpaddr; 365 void __iomem *ioaddr = priv->hw->pcsr; 366 unsigned long flags; 367 u64 art_time = 0; 368 u64 ptp_time = 0; 369 u32 num_snapshot; 370 u32 gpio_value; 371 u32 acr_value; 372 int i; 373 374 if (!boot_cpu_has(X86_FEATURE_ART)) 375 return -EOPNOTSUPP; 376 377 intel_priv = priv->plat->bsp_priv; 378 379 /* Both internal crosstimestamping and external triggered event 380 * timestamping cannot be run concurrently. 381 */ 382 if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN) 383 return -EBUSY; 384 385 priv->plat->flags |= STMMAC_FLAG_INT_SNAPSHOT_EN; 386 387 mutex_lock(&priv->aux_ts_lock); 388 /* Enable Internal snapshot trigger */ 389 acr_value = readl(ptpaddr + PTP_ACR); 390 acr_value &= ~PTP_ACR_MASK; 391 switch (priv->plat->int_snapshot_num) { 392 case AUX_SNAPSHOT0: 393 acr_value |= PTP_ACR_ATSEN0; 394 break; 395 case AUX_SNAPSHOT1: 396 acr_value |= PTP_ACR_ATSEN1; 397 break; 398 case AUX_SNAPSHOT2: 399 acr_value |= PTP_ACR_ATSEN2; 400 break; 401 case AUX_SNAPSHOT3: 402 acr_value |= PTP_ACR_ATSEN3; 403 break; 404 default: 405 mutex_unlock(&priv->aux_ts_lock); 406 priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; 407 return -EINVAL; 408 } 409 writel(acr_value, ptpaddr + PTP_ACR); 410 411 /* Clear FIFO */ 412 acr_value = readl(ptpaddr + PTP_ACR); 413 acr_value |= PTP_ACR_ATSFC; 414 writel(acr_value, ptpaddr + PTP_ACR); 415 /* Release the mutex */ 416 mutex_unlock(&priv->aux_ts_lock); 417 418 /* Trigger Internal snapshot signal 419 * Create a rising edge by just toggle the GPO1 to low 420 * and back to high. 421 */ 422 gpio_value = readl(ioaddr + GMAC_GPIO_STATUS); 423 gpio_value &= ~GMAC_GPO1; 424 writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); 425 gpio_value |= GMAC_GPO1; 426 writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); 427 428 /* Time sync done Indication - Interrupt method */ 429 if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait, 430 stmmac_cross_ts_isr(priv), 431 HZ / 100)) { 432 priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; 433 return -ETIMEDOUT; 434 } 435 436 num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & 437 GMAC_TIMESTAMP_ATSNS_MASK) >> 438 GMAC_TIMESTAMP_ATSNS_SHIFT; 439 440 /* Repeat until the timestamps are from the FIFO last segment */ 441 for (i = 0; i < num_snapshot; i++) { 442 read_lock_irqsave(&priv->ptp_lock, flags); 443 stmmac_get_ptptime(priv, ptpaddr, &ptp_time); 444 *device = ns_to_ktime(ptp_time); 445 read_unlock_irqrestore(&priv->ptp_lock, flags); 446 get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time); 447 system->cycles = art_time; 448 } 449 450 system->cycles *= intel_priv->crossts_adj; 451 system->cs_id = CSID_X86_ART; 452 priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; 453 454 return 0; 455 } 456 457 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv, 458 int base) 459 { 460 if (boot_cpu_has(X86_FEATURE_ART)) { 461 unsigned int art_freq; 462 463 /* On systems that support ART, ART frequency can be obtained 464 * from ECX register of CPUID leaf (0x15). 465 */ 466 art_freq = cpuid_ecx(ART_CPUID_LEAF); 467 do_div(art_freq, base); 468 intel_priv->crossts_adj = art_freq; 469 } 470 } 471 472 static int intel_tsn_lane_is_available(struct net_device *ndev, 473 struct intel_priv_data *intel_priv) 474 { 475 struct stmmac_priv *priv = netdev_priv(ndev); 476 struct pmc_ipc_cmd tmp = {}; 477 struct pmc_ipc_rbuf rbuf = {}; 478 int ret = 0, i, j; 479 const int max_fia_regs = 5; 480 481 tmp.cmd = IPC_SOC_REGISTER_ACCESS; 482 tmp.sub_cmd = IPC_SOC_SUB_CMD_READ; 483 484 for (i = 0; i < max_fia_regs; i++) { 485 tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i; 486 487 ret = intel_pmc_ipc(&tmp, &rbuf); 488 if (ret < 0) { 489 netdev_info(priv->dev, "Failed to read from PMC.\n"); 490 return ret; 491 } 492 493 for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++) 494 if ((rbuf.buf[0] >> 495 (4 * (intel_priv->tsn_lane_regs[j] % 8)) & 496 B_PCH_FIA_PCR_L0O) == 0xB) 497 return 0; 498 } 499 500 return -EINVAL; 501 } 502 503 static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs) 504 { 505 int ret = 0, i; 506 507 for (i = 0; i < max_regs; i++) { 508 struct pmc_ipc_cmd tmp = {}; 509 struct pmc_ipc_rbuf rbuf = {}; 510 511 tmp.cmd = IPC_SOC_REGISTER_ACCESS; 512 tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE; 513 tmp.wbuf[0] = (u32)regs[i].index; 514 tmp.wbuf[1] = regs[i].val; 515 516 ret = intel_pmc_ipc(&tmp, &rbuf); 517 if (ret < 0) 518 return ret; 519 } 520 521 return ret; 522 } 523 524 static int intel_mac_finish(struct net_device *ndev, 525 void *intel_data, 526 unsigned int mode, 527 phy_interface_t interface) 528 { 529 struct intel_priv_data *intel_priv = intel_data; 530 struct stmmac_priv *priv = netdev_priv(ndev); 531 const struct pmc_serdes_regs *regs; 532 int max_regs = 0; 533 int ret = 0; 534 535 ret = intel_tsn_lane_is_available(ndev, intel_priv); 536 if (ret < 0) { 537 netdev_info(priv->dev, "No TSN lane available to set the registers.\n"); 538 return ret; 539 } 540 541 if (interface == PHY_INTERFACE_MODE_2500BASEX) { 542 regs = intel_priv->pid_2p5g.regs; 543 max_regs = intel_priv->pid_2p5g.num_regs; 544 } else { 545 regs = intel_priv->pid_1g.regs; 546 max_regs = intel_priv->pid_1g.num_regs; 547 } 548 549 ret = intel_set_reg_access(regs, max_regs); 550 if (ret < 0) 551 return ret; 552 553 priv->plat->phy_interface = interface; 554 555 intel_serdes_powerdown(ndev, intel_priv); 556 intel_serdes_powerup(ndev, intel_priv); 557 558 return ret; 559 } 560 561 static void common_default_data(struct plat_stmmacenet_data *plat) 562 { 563 plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ 564 plat->has_gmac = 1; 565 plat->force_sf_dma_mode = 1; 566 567 plat->mdio_bus_data->needs_reset = true; 568 569 /* Set default value for multicast hash bins */ 570 plat->multicast_filter_bins = HASH_TABLE_SIZE; 571 572 /* Set default value for unicast filter entries */ 573 plat->unicast_filter_entries = 1; 574 575 /* Set the maxmtu to a default of JUMBO_LEN */ 576 plat->maxmtu = JUMBO_LEN; 577 578 /* Set default number of RX and TX queues to use */ 579 plat->tx_queues_to_use = 1; 580 plat->rx_queues_to_use = 1; 581 582 /* Disable Priority config by default */ 583 plat->tx_queues_cfg[0].use_prio = false; 584 plat->rx_queues_cfg[0].use_prio = false; 585 586 /* Disable RX queues routing by default */ 587 plat->rx_queues_cfg[0].pkt_route = 0x0; 588 } 589 590 static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv, 591 phy_interface_t interface) 592 { 593 /* plat->mdio_bus_data->has_xpcs has been set true, so there 594 * should always be an XPCS. The original code would always 595 * return this if present. 596 */ 597 return xpcs_to_phylink_pcs(priv->hw->xpcs); 598 } 599 600 static int intel_mgbe_common_data(struct pci_dev *pdev, 601 struct plat_stmmacenet_data *plat) 602 { 603 struct fwnode_handle *fwnode; 604 char clk_name[20]; 605 int ret; 606 int i; 607 608 plat->pdev = pdev; 609 plat->phy_addr = -1; 610 plat->clk_csr = 5; 611 plat->has_gmac = 0; 612 plat->has_gmac4 = 1; 613 plat->force_sf_dma_mode = 0; 614 plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE); 615 616 /* Multiplying factor to the clk_eee_i clock time 617 * period to make it closer to 100 ns. This value 618 * should be programmed such that the clk_eee_time_period * 619 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns 620 * clk_eee frequency is 19.2Mhz 621 * clk_eee_time_period is 52ns 622 * 52ns * (1 + 1) = 104ns 623 * MULT_FACT_100NS = 1 624 */ 625 plat->mult_fact_100ns = 1; 626 627 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 628 629 for (i = 0; i < plat->rx_queues_to_use; i++) { 630 plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; 631 plat->rx_queues_cfg[i].chan = i; 632 633 /* Disable Priority config by default */ 634 plat->rx_queues_cfg[i].use_prio = false; 635 636 /* Disable RX queues routing by default */ 637 plat->rx_queues_cfg[i].pkt_route = 0x0; 638 } 639 640 for (i = 0; i < plat->tx_queues_to_use; i++) { 641 plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; 642 643 /* Disable Priority config by default */ 644 plat->tx_queues_cfg[i].use_prio = false; 645 /* Default TX Q0 to use TSO and rest TXQ for TBS */ 646 if (i > 0) 647 plat->tx_queues_cfg[i].tbs_en = 1; 648 } 649 650 /* FIFO size is 4096 bytes for 1 tx/rx queue */ 651 plat->tx_fifo_size = plat->tx_queues_to_use * 4096; 652 plat->rx_fifo_size = plat->rx_queues_to_use * 4096; 653 654 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; 655 plat->tx_queues_cfg[0].weight = 0x09; 656 plat->tx_queues_cfg[1].weight = 0x0A; 657 plat->tx_queues_cfg[2].weight = 0x0B; 658 plat->tx_queues_cfg[3].weight = 0x0C; 659 plat->tx_queues_cfg[4].weight = 0x0D; 660 plat->tx_queues_cfg[5].weight = 0x0E; 661 plat->tx_queues_cfg[6].weight = 0x0F; 662 plat->tx_queues_cfg[7].weight = 0x10; 663 664 plat->dma_cfg->pbl = 32; 665 plat->dma_cfg->pblx8 = true; 666 plat->dma_cfg->fixed_burst = 0; 667 plat->dma_cfg->mixed_burst = 0; 668 plat->dma_cfg->aal = 0; 669 plat->dma_cfg->dche = true; 670 671 plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), 672 GFP_KERNEL); 673 if (!plat->axi) 674 return -ENOMEM; 675 676 plat->axi->axi_lpi_en = 0; 677 plat->axi->axi_xit_frm = 0; 678 plat->axi->axi_wr_osr_lmt = 1; 679 plat->axi->axi_rd_osr_lmt = 1; 680 plat->axi->axi_blen[0] = 4; 681 plat->axi->axi_blen[1] = 8; 682 plat->axi->axi_blen[2] = 16; 683 684 plat->ptp_max_adj = plat->clk_ptp_rate; 685 plat->eee_usecs_rate = plat->clk_ptp_rate; 686 687 /* Set system clock */ 688 sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev)); 689 690 plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, 691 clk_name, NULL, 0, 692 plat->clk_ptp_rate); 693 694 if (IS_ERR(plat->stmmac_clk)) { 695 dev_warn(&pdev->dev, "Fail to register stmmac-clk\n"); 696 plat->stmmac_clk = NULL; 697 } 698 699 ret = clk_prepare_enable(plat->stmmac_clk); 700 if (ret) { 701 clk_unregister_fixed_rate(plat->stmmac_clk); 702 return ret; 703 } 704 705 plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config; 706 707 /* Set default value for multicast hash bins */ 708 plat->multicast_filter_bins = HASH_TABLE_SIZE; 709 710 /* Set default value for unicast filter entries */ 711 plat->unicast_filter_entries = 1; 712 713 /* Set the maxmtu to a default of JUMBO_LEN */ 714 plat->maxmtu = JUMBO_LEN; 715 716 plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN; 717 718 /* Use the last Rx queue */ 719 plat->vlan_fail_q = plat->rx_queues_to_use - 1; 720 721 /* For fixed-link setup, we allow phy-mode setting */ 722 fwnode = dev_fwnode(&pdev->dev); 723 if (fwnode) { 724 int phy_mode; 725 726 /* "phy-mode" setting is optional. If it is set, 727 * we allow either sgmii or 1000base-x for now. 728 */ 729 phy_mode = fwnode_get_phy_mode(fwnode); 730 if (phy_mode >= 0) { 731 if (phy_mode == PHY_INTERFACE_MODE_SGMII || 732 phy_mode == PHY_INTERFACE_MODE_1000BASEX) 733 plat->phy_interface = phy_mode; 734 else 735 dev_warn(&pdev->dev, "Invalid phy-mode\n"); 736 } 737 } 738 739 /* Intel mgbe SGMII interface uses pcs-xcps */ 740 if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII || 741 plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { 742 plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR); 743 plat->mdio_bus_data->default_an_inband = true; 744 plat->select_pcs = intel_mgbe_select_pcs; 745 } 746 747 /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ 748 plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR; 749 plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; 750 751 plat->int_snapshot_num = AUX_SNAPSHOT1; 752 753 plat->crosststamp = intel_crosststamp; 754 plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; 755 756 /* Setup MSI vector offset specific to Intel mGbE controller */ 757 plat->msi_mac_vec = 29; 758 plat->msi_lpi_vec = 28; 759 plat->msi_sfty_ce_vec = 27; 760 plat->msi_sfty_ue_vec = 26; 761 plat->msi_rx_base_vec = 0; 762 plat->msi_tx_base_vec = 1; 763 764 return 0; 765 } 766 767 static int ehl_common_data(struct pci_dev *pdev, 768 struct plat_stmmacenet_data *plat) 769 { 770 struct intel_priv_data *intel_priv = plat->bsp_priv; 771 772 plat->rx_queues_to_use = 8; 773 plat->tx_queues_to_use = 8; 774 plat->flags |= STMMAC_FLAG_USE_PHY_WOL; 775 plat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY; 776 777 plat->safety_feat_cfg->tsoee = 1; 778 plat->safety_feat_cfg->mrxpee = 1; 779 plat->safety_feat_cfg->mestee = 1; 780 plat->safety_feat_cfg->mrxee = 1; 781 plat->safety_feat_cfg->mtxee = 1; 782 plat->safety_feat_cfg->epsi = 0; 783 plat->safety_feat_cfg->edpp = 0; 784 plat->safety_feat_cfg->prtyen = 0; 785 plat->safety_feat_cfg->tmouten = 0; 786 787 intel_priv->tsn_lane_regs = ehl_tsn_lane_regs; 788 intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs); 789 790 return intel_mgbe_common_data(pdev, plat); 791 } 792 793 static int ehl_sgmii_data(struct pci_dev *pdev, 794 struct plat_stmmacenet_data *plat) 795 { 796 struct intel_priv_data *intel_priv = plat->bsp_priv; 797 798 plat->bus_id = 1; 799 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 800 plat->serdes_powerup = intel_serdes_powerup; 801 plat->serdes_powerdown = intel_serdes_powerdown; 802 plat->mac_finish = intel_mac_finish; 803 plat->clk_ptp_rate = 204800000; 804 805 intel_priv->pid_1g.regs = pid_modphy3_1g_regs; 806 intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs); 807 intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs; 808 intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs); 809 810 return ehl_common_data(pdev, plat); 811 } 812 813 static struct stmmac_pci_info ehl_sgmii1g_info = { 814 .setup = ehl_sgmii_data, 815 }; 816 817 static int ehl_rgmii_data(struct pci_dev *pdev, 818 struct plat_stmmacenet_data *plat) 819 { 820 plat->bus_id = 1; 821 plat->phy_interface = PHY_INTERFACE_MODE_RGMII; 822 823 plat->clk_ptp_rate = 204800000; 824 825 return ehl_common_data(pdev, plat); 826 } 827 828 static struct stmmac_pci_info ehl_rgmii1g_info = { 829 .setup = ehl_rgmii_data, 830 }; 831 832 static int ehl_pse0_common_data(struct pci_dev *pdev, 833 struct plat_stmmacenet_data *plat) 834 { 835 struct intel_priv_data *intel_priv = plat->bsp_priv; 836 837 intel_priv->is_pse = true; 838 plat->bus_id = 2; 839 plat->host_dma_width = 32; 840 841 plat->clk_ptp_rate = 200000000; 842 843 intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); 844 845 return ehl_common_data(pdev, plat); 846 } 847 848 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev, 849 struct plat_stmmacenet_data *plat) 850 { 851 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; 852 return ehl_pse0_common_data(pdev, plat); 853 } 854 855 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = { 856 .setup = ehl_pse0_rgmii1g_data, 857 }; 858 859 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev, 860 struct plat_stmmacenet_data *plat) 861 { 862 struct intel_priv_data *intel_priv = plat->bsp_priv; 863 864 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 865 plat->serdes_powerup = intel_serdes_powerup; 866 plat->serdes_powerdown = intel_serdes_powerdown; 867 plat->mac_finish = intel_mac_finish; 868 869 intel_priv->pid_1g.regs = pid_modphy1_1g_regs; 870 intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs); 871 intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs; 872 intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs); 873 874 return ehl_pse0_common_data(pdev, plat); 875 } 876 877 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = { 878 .setup = ehl_pse0_sgmii1g_data, 879 }; 880 881 static int ehl_pse1_common_data(struct pci_dev *pdev, 882 struct plat_stmmacenet_data *plat) 883 { 884 struct intel_priv_data *intel_priv = plat->bsp_priv; 885 886 intel_priv->is_pse = true; 887 plat->bus_id = 3; 888 plat->host_dma_width = 32; 889 890 plat->clk_ptp_rate = 200000000; 891 892 intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); 893 894 return ehl_common_data(pdev, plat); 895 } 896 897 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev, 898 struct plat_stmmacenet_data *plat) 899 { 900 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; 901 return ehl_pse1_common_data(pdev, plat); 902 } 903 904 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = { 905 .setup = ehl_pse1_rgmii1g_data, 906 }; 907 908 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev, 909 struct plat_stmmacenet_data *plat) 910 { 911 struct intel_priv_data *intel_priv = plat->bsp_priv; 912 913 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 914 plat->serdes_powerup = intel_serdes_powerup; 915 plat->serdes_powerdown = intel_serdes_powerdown; 916 plat->mac_finish = intel_mac_finish; 917 918 intel_priv->pid_1g.regs = pid_modphy1_1g_regs; 919 intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs); 920 intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs; 921 intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs); 922 923 return ehl_pse1_common_data(pdev, plat); 924 } 925 926 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = { 927 .setup = ehl_pse1_sgmii1g_data, 928 }; 929 930 static int tgl_common_data(struct pci_dev *pdev, 931 struct plat_stmmacenet_data *plat) 932 { 933 plat->rx_queues_to_use = 6; 934 plat->tx_queues_to_use = 4; 935 plat->clk_ptp_rate = 204800000; 936 plat->speed_mode_2500 = intel_speed_mode_2500; 937 938 plat->safety_feat_cfg->tsoee = 1; 939 plat->safety_feat_cfg->mrxpee = 0; 940 plat->safety_feat_cfg->mestee = 1; 941 plat->safety_feat_cfg->mrxee = 1; 942 plat->safety_feat_cfg->mtxee = 1; 943 plat->safety_feat_cfg->epsi = 0; 944 plat->safety_feat_cfg->edpp = 0; 945 plat->safety_feat_cfg->prtyen = 0; 946 plat->safety_feat_cfg->tmouten = 0; 947 948 return intel_mgbe_common_data(pdev, plat); 949 } 950 951 static int tgl_sgmii_phy0_data(struct pci_dev *pdev, 952 struct plat_stmmacenet_data *plat) 953 { 954 plat->bus_id = 1; 955 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 956 plat->serdes_powerup = intel_serdes_powerup; 957 plat->serdes_powerdown = intel_serdes_powerdown; 958 return tgl_common_data(pdev, plat); 959 } 960 961 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = { 962 .setup = tgl_sgmii_phy0_data, 963 }; 964 965 static int tgl_sgmii_phy1_data(struct pci_dev *pdev, 966 struct plat_stmmacenet_data *plat) 967 { 968 plat->bus_id = 2; 969 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 970 plat->serdes_powerup = intel_serdes_powerup; 971 plat->serdes_powerdown = intel_serdes_powerdown; 972 return tgl_common_data(pdev, plat); 973 } 974 975 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = { 976 .setup = tgl_sgmii_phy1_data, 977 }; 978 979 static int adls_sgmii_phy0_data(struct pci_dev *pdev, 980 struct plat_stmmacenet_data *plat) 981 { 982 plat->bus_id = 1; 983 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 984 985 /* SerDes power up and power down are done in BIOS for ADL */ 986 987 return tgl_common_data(pdev, plat); 988 } 989 990 static struct stmmac_pci_info adls_sgmii1g_phy0_info = { 991 .setup = adls_sgmii_phy0_data, 992 }; 993 994 static int adls_sgmii_phy1_data(struct pci_dev *pdev, 995 struct plat_stmmacenet_data *plat) 996 { 997 plat->bus_id = 2; 998 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 999 1000 /* SerDes power up and power down are done in BIOS for ADL */ 1001 1002 return tgl_common_data(pdev, plat); 1003 } 1004 1005 static struct stmmac_pci_info adls_sgmii1g_phy1_info = { 1006 .setup = adls_sgmii_phy1_data, 1007 }; 1008 1009 static int adln_common_data(struct pci_dev *pdev, 1010 struct plat_stmmacenet_data *plat) 1011 { 1012 struct intel_priv_data *intel_priv = plat->bsp_priv; 1013 1014 plat->rx_queues_to_use = 6; 1015 plat->tx_queues_to_use = 4; 1016 plat->clk_ptp_rate = 204800000; 1017 1018 plat->safety_feat_cfg->tsoee = 1; 1019 plat->safety_feat_cfg->mrxpee = 0; 1020 plat->safety_feat_cfg->mestee = 1; 1021 plat->safety_feat_cfg->mrxee = 1; 1022 plat->safety_feat_cfg->mtxee = 1; 1023 plat->safety_feat_cfg->epsi = 0; 1024 plat->safety_feat_cfg->edpp = 0; 1025 plat->safety_feat_cfg->prtyen = 0; 1026 plat->safety_feat_cfg->tmouten = 0; 1027 1028 intel_priv->tsn_lane_regs = adln_tsn_lane_regs; 1029 intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs); 1030 1031 return intel_mgbe_common_data(pdev, plat); 1032 } 1033 1034 static int adln_sgmii_phy0_data(struct pci_dev *pdev, 1035 struct plat_stmmacenet_data *plat) 1036 { 1037 struct intel_priv_data *intel_priv = plat->bsp_priv; 1038 1039 plat->bus_id = 1; 1040 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 1041 plat->serdes_powerup = intel_serdes_powerup; 1042 plat->serdes_powerdown = intel_serdes_powerdown; 1043 plat->mac_finish = intel_mac_finish; 1044 1045 intel_priv->pid_1g.regs = pid_modphy1_1g_regs; 1046 intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs); 1047 intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs; 1048 intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs); 1049 1050 return adln_common_data(pdev, plat); 1051 } 1052 1053 static struct stmmac_pci_info adln_sgmii1g_phy0_info = { 1054 .setup = adln_sgmii_phy0_data, 1055 }; 1056 1057 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { 1058 { 1059 .func = 6, 1060 .phy_addr = 1, 1061 }, 1062 }; 1063 1064 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = { 1065 .func = galileo_stmmac_func_data, 1066 .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data), 1067 }; 1068 1069 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = { 1070 { 1071 .func = 6, 1072 .phy_addr = 1, 1073 }, 1074 { 1075 .func = 7, 1076 .phy_addr = 1, 1077 }, 1078 }; 1079 1080 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = { 1081 .func = iot2040_stmmac_func_data, 1082 .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data), 1083 }; 1084 1085 static const struct dmi_system_id quark_pci_dmi[] = { 1086 { 1087 .matches = { 1088 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"), 1089 }, 1090 .driver_data = (void *)&galileo_stmmac_dmi_data, 1091 }, 1092 { 1093 .matches = { 1094 DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), 1095 }, 1096 .driver_data = (void *)&galileo_stmmac_dmi_data, 1097 }, 1098 /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040. 1099 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which 1100 * has only one pci network device while other asset tags are 1101 * for IOT2040 which has two. 1102 */ 1103 { 1104 .matches = { 1105 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), 1106 DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, 1107 "6ES7647-0AA00-0YA2"), 1108 }, 1109 .driver_data = (void *)&galileo_stmmac_dmi_data, 1110 }, 1111 { 1112 .matches = { 1113 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), 1114 }, 1115 .driver_data = (void *)&iot2040_stmmac_dmi_data, 1116 }, 1117 {} 1118 }; 1119 1120 static int quark_default_data(struct pci_dev *pdev, 1121 struct plat_stmmacenet_data *plat) 1122 { 1123 int ret; 1124 1125 /* Set common default data first */ 1126 common_default_data(plat); 1127 1128 /* Refuse to load the driver and register net device if MAC controller 1129 * does not connect to any PHY interface. 1130 */ 1131 ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi); 1132 if (ret < 0) { 1133 /* Return error to the caller on DMI enabled boards. */ 1134 if (dmi_get_system_info(DMI_BOARD_NAME)) 1135 return ret; 1136 1137 /* Galileo boards with old firmware don't support DMI. We always 1138 * use 1 here as PHY address, so at least the first found MAC 1139 * controller would be probed. 1140 */ 1141 ret = 1; 1142 } 1143 1144 plat->bus_id = pci_dev_id(pdev); 1145 plat->phy_addr = ret; 1146 plat->phy_interface = PHY_INTERFACE_MODE_RMII; 1147 1148 plat->dma_cfg->pbl = 16; 1149 plat->dma_cfg->pblx8 = true; 1150 plat->dma_cfg->fixed_burst = 1; 1151 /* AXI (TODO) */ 1152 1153 return 0; 1154 } 1155 1156 static const struct stmmac_pci_info quark_info = { 1157 .setup = quark_default_data, 1158 }; 1159 1160 static int stmmac_config_single_msi(struct pci_dev *pdev, 1161 struct plat_stmmacenet_data *plat, 1162 struct stmmac_resources *res) 1163 { 1164 int ret; 1165 1166 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 1167 if (ret < 0) { 1168 dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n", 1169 __func__); 1170 return ret; 1171 } 1172 1173 res->irq = pci_irq_vector(pdev, 0); 1174 res->wol_irq = res->irq; 1175 plat->flags &= ~STMMAC_FLAG_MULTI_MSI_EN; 1176 dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n", 1177 __func__); 1178 1179 return 0; 1180 } 1181 1182 static int stmmac_config_multi_msi(struct pci_dev *pdev, 1183 struct plat_stmmacenet_data *plat, 1184 struct stmmac_resources *res) 1185 { 1186 int ret; 1187 int i; 1188 1189 if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX || 1190 plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) { 1191 dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n", 1192 __func__); 1193 return -1; 1194 } 1195 1196 ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX, 1197 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1198 if (ret < 0) { 1199 dev_info(&pdev->dev, "%s: multi MSI enablement failed\n", 1200 __func__); 1201 return ret; 1202 } 1203 1204 /* For RX MSI */ 1205 for (i = 0; i < plat->rx_queues_to_use; i++) { 1206 res->rx_irq[i] = pci_irq_vector(pdev, 1207 plat->msi_rx_base_vec + i * 2); 1208 } 1209 1210 /* For TX MSI */ 1211 for (i = 0; i < plat->tx_queues_to_use; i++) { 1212 res->tx_irq[i] = pci_irq_vector(pdev, 1213 plat->msi_tx_base_vec + i * 2); 1214 } 1215 1216 if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX) 1217 res->irq = pci_irq_vector(pdev, plat->msi_mac_vec); 1218 if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX) 1219 res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec); 1220 if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX) 1221 res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec); 1222 if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX) 1223 res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec); 1224 if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX) 1225 res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec); 1226 1227 plat->flags |= STMMAC_FLAG_MULTI_MSI_EN; 1228 dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__); 1229 1230 return 0; 1231 } 1232 1233 /** 1234 * intel_eth_pci_probe 1235 * 1236 * @pdev: pci device pointer 1237 * @id: pointer to table of device id/id's. 1238 * 1239 * Description: This probing function gets called for all PCI devices which 1240 * match the ID table and are not "owned" by other driver yet. This function 1241 * gets passed a "struct pci_dev *" for each device whose entry in the ID table 1242 * matches the device. The probe functions returns zero when the driver choose 1243 * to take "ownership" of the device or an error code(-ve no) otherwise. 1244 */ 1245 static int intel_eth_pci_probe(struct pci_dev *pdev, 1246 const struct pci_device_id *id) 1247 { 1248 struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; 1249 struct intel_priv_data *intel_priv; 1250 struct plat_stmmacenet_data *plat; 1251 struct stmmac_resources res; 1252 int ret; 1253 1254 intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL); 1255 if (!intel_priv) 1256 return -ENOMEM; 1257 1258 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 1259 if (!plat) 1260 return -ENOMEM; 1261 1262 plat->mdio_bus_data = devm_kzalloc(&pdev->dev, 1263 sizeof(*plat->mdio_bus_data), 1264 GFP_KERNEL); 1265 if (!plat->mdio_bus_data) 1266 return -ENOMEM; 1267 1268 plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), 1269 GFP_KERNEL); 1270 if (!plat->dma_cfg) 1271 return -ENOMEM; 1272 1273 plat->safety_feat_cfg = devm_kzalloc(&pdev->dev, 1274 sizeof(*plat->safety_feat_cfg), 1275 GFP_KERNEL); 1276 if (!plat->safety_feat_cfg) 1277 return -ENOMEM; 1278 1279 /* Enable pci device */ 1280 ret = pcim_enable_device(pdev); 1281 if (ret) { 1282 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", 1283 __func__); 1284 return ret; 1285 } 1286 1287 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 1288 if (ret) 1289 return ret; 1290 1291 pci_set_master(pdev); 1292 1293 plat->bsp_priv = intel_priv; 1294 intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR; 1295 intel_priv->crossts_adj = 1; 1296 1297 /* Initialize all MSI vectors to invalid so that it can be set 1298 * according to platform data settings below. 1299 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX) 1300 */ 1301 plat->msi_mac_vec = STMMAC_MSI_VEC_MAX; 1302 plat->msi_wol_vec = STMMAC_MSI_VEC_MAX; 1303 plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX; 1304 plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX; 1305 plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX; 1306 plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX; 1307 plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX; 1308 1309 ret = info->setup(pdev, plat); 1310 if (ret) 1311 return ret; 1312 1313 memset(&res, 0, sizeof(res)); 1314 res.addr = pcim_iomap_table(pdev)[0]; 1315 1316 if (plat->eee_usecs_rate > 0) { 1317 u32 tx_lpi_usec; 1318 1319 tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1; 1320 writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); 1321 } 1322 1323 ret = stmmac_config_multi_msi(pdev, plat, &res); 1324 if (ret) { 1325 ret = stmmac_config_single_msi(pdev, plat, &res); 1326 if (ret) { 1327 dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n", 1328 __func__); 1329 goto err_alloc_irq; 1330 } 1331 } 1332 1333 ret = stmmac_dvr_probe(&pdev->dev, plat, &res); 1334 if (ret) { 1335 goto err_alloc_irq; 1336 } 1337 1338 return 0; 1339 1340 err_alloc_irq: 1341 clk_disable_unprepare(plat->stmmac_clk); 1342 clk_unregister_fixed_rate(plat->stmmac_clk); 1343 return ret; 1344 } 1345 1346 /** 1347 * intel_eth_pci_remove 1348 * 1349 * @pdev: pci device pointer 1350 * Description: this function calls the main to free the net resources 1351 * and releases the PCI resources. 1352 */ 1353 static void intel_eth_pci_remove(struct pci_dev *pdev) 1354 { 1355 struct net_device *ndev = dev_get_drvdata(&pdev->dev); 1356 struct stmmac_priv *priv = netdev_priv(ndev); 1357 1358 stmmac_dvr_remove(&pdev->dev); 1359 1360 clk_disable_unprepare(priv->plat->stmmac_clk); 1361 clk_unregister_fixed_rate(priv->plat->stmmac_clk); 1362 } 1363 1364 static int __maybe_unused intel_eth_pci_suspend(struct device *dev) 1365 { 1366 struct pci_dev *pdev = to_pci_dev(dev); 1367 int ret; 1368 1369 ret = stmmac_suspend(dev); 1370 if (ret) 1371 return ret; 1372 1373 ret = pci_save_state(pdev); 1374 if (ret) 1375 return ret; 1376 1377 pci_wake_from_d3(pdev, true); 1378 pci_set_power_state(pdev, PCI_D3hot); 1379 return 0; 1380 } 1381 1382 static int __maybe_unused intel_eth_pci_resume(struct device *dev) 1383 { 1384 struct pci_dev *pdev = to_pci_dev(dev); 1385 int ret; 1386 1387 pci_restore_state(pdev); 1388 pci_set_power_state(pdev, PCI_D0); 1389 1390 ret = pcim_enable_device(pdev); 1391 if (ret) 1392 return ret; 1393 1394 pci_set_master(pdev); 1395 1396 return stmmac_resume(dev); 1397 } 1398 1399 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend, 1400 intel_eth_pci_resume); 1401 1402 #define PCI_DEVICE_ID_INTEL_QUARK 0x0937 1403 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30 1404 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31 1405 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32 1406 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC 1407 * which are named PSE0 and PSE1 1408 */ 1409 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0 1410 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1 1411 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2 1412 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0 1413 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1 1414 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2 1415 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac 1416 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2 1417 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac 1418 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac 1419 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad 1420 #define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac 1421 #define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G 0x51ac 1422 1423 static const struct pci_device_id intel_eth_pci_id_table[] = { 1424 { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) }, 1425 { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) }, 1426 { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) }, 1427 { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) }, 1428 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) }, 1429 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) }, 1430 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) }, 1431 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) }, 1432 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) }, 1433 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) }, 1434 { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) }, 1435 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) }, 1436 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) }, 1437 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) }, 1438 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) }, 1439 { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) }, 1440 { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) }, 1441 {} 1442 }; 1443 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); 1444 1445 static struct pci_driver intel_eth_pci_driver = { 1446 .name = "intel-eth-pci", 1447 .id_table = intel_eth_pci_id_table, 1448 .probe = intel_eth_pci_probe, 1449 .remove = intel_eth_pci_remove, 1450 .driver = { 1451 .pm = &intel_eth_pm_ops, 1452 }, 1453 }; 1454 1455 module_pci_driver(intel_eth_pci_driver); 1456 1457 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver"); 1458 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>"); 1459 MODULE_LICENSE("GPL v2"); 1460