1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation 3 */ 4 5 #include <linux/clk-provider.h> 6 #include <linux/pci.h> 7 #include <linux/dmi.h> 8 #include <linux/platform_data/x86/intel_pmc_ipc.h> 9 #include "dwmac-intel.h" 10 #include "dwmac4.h" 11 #include "stmmac.h" 12 #include "stmmac_ptp.h" 13 14 struct pmc_serdes_regs { 15 u8 index; 16 u32 val; 17 }; 18 19 struct pmc_serdes_reg_info { 20 const struct pmc_serdes_regs *regs; 21 u8 num_regs; 22 }; 23 24 struct intel_priv_data { 25 int mdio_adhoc_addr; /* mdio address for serdes & etc */ 26 unsigned long crossts_adj; 27 bool is_pse; 28 const int *tsn_lane_regs; 29 int max_tsn_lane_regs; 30 struct pmc_serdes_reg_info pid_1g; 31 struct pmc_serdes_reg_info pid_2p5g; 32 }; 33 34 /* This struct is used to associate PCI Function of MAC controller on a board, 35 * discovered via DMI, with the address of PHY connected to the MAC. The 36 * negative value of the address means that MAC controller is not connected 37 * with PHY. 38 */ 39 struct stmmac_pci_func_data { 40 unsigned int func; 41 int phy_addr; 42 }; 43 44 struct stmmac_pci_dmi_data { 45 const struct stmmac_pci_func_data *func; 46 size_t nfuncs; 47 }; 48 49 struct stmmac_pci_info { 50 int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); 51 }; 52 53 static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = { 54 { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G }, 55 { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G }, 56 { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G }, 57 { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G }, 58 { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G }, 59 {} 60 }; 61 62 static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = { 63 { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G }, 64 { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G }, 65 { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G }, 66 { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G }, 67 { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G }, 68 {} 69 }; 70 71 static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = { 72 { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G }, 73 { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G }, 74 { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G }, 75 { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G }, 76 { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G }, 77 {} 78 }; 79 80 static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = { 81 { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G }, 82 { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G }, 83 { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G }, 84 { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G }, 85 { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G }, 86 {} 87 }; 88 89 static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11}; 90 static const int adln_tsn_lane_regs[] = {6}; 91 92 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev, 93 const struct dmi_system_id *dmi_list) 94 { 95 const struct stmmac_pci_func_data *func_data; 96 const struct stmmac_pci_dmi_data *dmi_data; 97 const struct dmi_system_id *dmi_id; 98 int func = PCI_FUNC(pdev->devfn); 99 size_t n; 100 101 dmi_id = dmi_first_match(dmi_list); 102 if (!dmi_id) 103 return -ENODEV; 104 105 dmi_data = dmi_id->driver_data; 106 func_data = dmi_data->func; 107 108 for (n = 0; n < dmi_data->nfuncs; n++, func_data++) 109 if (func_data->func == func) 110 return func_data->phy_addr; 111 112 return -ENODEV; 113 } 114 115 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr, 116 int phyreg, u32 mask, u32 val) 117 { 118 unsigned int retries = 10; 119 int val_rd; 120 121 do { 122 val_rd = mdiobus_read(priv->mii, phyaddr, phyreg); 123 if ((val_rd & mask) == (val & mask)) 124 return 0; 125 udelay(POLL_DELAY_US); 126 } while (--retries); 127 128 return -ETIMEDOUT; 129 } 130 131 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data) 132 { 133 struct intel_priv_data *intel_priv = priv_data; 134 struct stmmac_priv *priv = netdev_priv(ndev); 135 int serdes_phy_addr = 0; 136 u32 data = 0; 137 138 if (!intel_priv->mdio_adhoc_addr) 139 return 0; 140 141 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 142 143 /* Set the serdes rate and the PCLK rate */ 144 data = mdiobus_read(priv->mii, serdes_phy_addr, 145 SERDES_GCR0); 146 147 data &= ~SERDES_RATE_MASK; 148 data &= ~SERDES_PCLK_MASK; 149 150 if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX) 151 data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT | 152 SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT; 153 else 154 data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT | 155 SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT; 156 157 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 158 159 /* assert clk_req */ 160 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 161 data |= SERDES_PLL_CLK; 162 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 163 164 /* check for clk_ack assertion */ 165 data = serdes_status_poll(priv, serdes_phy_addr, 166 SERDES_GSR0, 167 SERDES_PLL_CLK, 168 SERDES_PLL_CLK); 169 170 if (data) { 171 dev_err(priv->device, "Serdes PLL clk request timeout\n"); 172 return data; 173 } 174 175 /* assert lane reset */ 176 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 177 data |= SERDES_RST; 178 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 179 180 /* check for assert lane reset reflection */ 181 data = serdes_status_poll(priv, serdes_phy_addr, 182 SERDES_GSR0, 183 SERDES_RST, 184 SERDES_RST); 185 186 if (data) { 187 dev_err(priv->device, "Serdes assert lane reset timeout\n"); 188 return data; 189 } 190 191 /* move power state to P0 */ 192 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 193 194 data &= ~SERDES_PWR_ST_MASK; 195 data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT; 196 197 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 198 199 /* Check for P0 state */ 200 data = serdes_status_poll(priv, serdes_phy_addr, 201 SERDES_GSR0, 202 SERDES_PWR_ST_MASK, 203 SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT); 204 205 if (data) { 206 dev_err(priv->device, "Serdes power state P0 timeout.\n"); 207 return data; 208 } 209 210 /* PSE only - ungate SGMII PHY Rx Clock */ 211 if (intel_priv->is_pse) 212 mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, 213 0, SERDES_PHY_RX_CLK); 214 215 return 0; 216 } 217 218 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) 219 { 220 struct intel_priv_data *intel_priv = intel_data; 221 struct stmmac_priv *priv = netdev_priv(ndev); 222 int serdes_phy_addr = 0; 223 u32 data = 0; 224 225 if (!intel_priv->mdio_adhoc_addr) 226 return; 227 228 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 229 230 /* PSE only - gate SGMII PHY Rx Clock */ 231 if (intel_priv->is_pse) 232 mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, 233 SERDES_PHY_RX_CLK, 0); 234 235 /* move power state to P3 */ 236 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 237 238 data &= ~SERDES_PWR_ST_MASK; 239 data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT; 240 241 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 242 243 /* Check for P3 state */ 244 data = serdes_status_poll(priv, serdes_phy_addr, 245 SERDES_GSR0, 246 SERDES_PWR_ST_MASK, 247 SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT); 248 249 if (data) { 250 dev_err(priv->device, "Serdes power state P3 timeout\n"); 251 return; 252 } 253 254 /* de-assert clk_req */ 255 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 256 data &= ~SERDES_PLL_CLK; 257 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 258 259 /* check for clk_ack de-assert */ 260 data = serdes_status_poll(priv, serdes_phy_addr, 261 SERDES_GSR0, 262 SERDES_PLL_CLK, 263 (u32)~SERDES_PLL_CLK); 264 265 if (data) { 266 dev_err(priv->device, "Serdes PLL clk de-assert timeout\n"); 267 return; 268 } 269 270 /* de-assert lane reset */ 271 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); 272 data &= ~SERDES_RST; 273 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); 274 275 /* check for de-assert lane reset reflection */ 276 data = serdes_status_poll(priv, serdes_phy_addr, 277 SERDES_GSR0, 278 SERDES_RST, 279 (u32)~SERDES_RST); 280 281 if (data) { 282 dev_err(priv->device, "Serdes de-assert lane reset timeout\n"); 283 return; 284 } 285 } 286 287 static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data) 288 { 289 struct intel_priv_data *intel_priv = intel_data; 290 struct stmmac_priv *priv = netdev_priv(ndev); 291 int serdes_phy_addr = 0; 292 u32 data = 0; 293 294 serdes_phy_addr = intel_priv->mdio_adhoc_addr; 295 296 /* Determine the link speed mode: 2.5Gbps/1Gbps */ 297 data = mdiobus_read(priv->mii, serdes_phy_addr, 298 SERDES_GCR); 299 300 if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) == 301 SERDES_LINK_MODE_2G5) { 302 dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n"); 303 priv->plat->max_speed = 2500; 304 priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; 305 priv->plat->mdio_bus_data->default_an_inband = false; 306 } else { 307 priv->plat->max_speed = 1000; 308 } 309 } 310 311 /* Program PTP Clock Frequency for different variant of 312 * Intel mGBE that has slightly different GPO mapping 313 */ 314 static void intel_mgbe_ptp_clk_freq_config(struct stmmac_priv *priv) 315 { 316 struct intel_priv_data *intel_priv; 317 u32 gpio_value; 318 319 intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv; 320 321 gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS); 322 323 if (intel_priv->is_pse) { 324 /* For PSE GbE, use 200MHz */ 325 gpio_value &= ~PSE_PTP_CLK_FREQ_MASK; 326 gpio_value |= PSE_PTP_CLK_FREQ_200MHZ; 327 } else { 328 /* For PCH GbE, use 200MHz */ 329 gpio_value &= ~PCH_PTP_CLK_FREQ_MASK; 330 gpio_value |= PCH_PTP_CLK_FREQ_200MHZ; 331 } 332 333 writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS); 334 } 335 336 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr, 337 u64 *art_time) 338 { 339 u64 ns; 340 341 ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3); 342 ns <<= GMAC4_ART_TIME_SHIFT; 343 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2); 344 ns <<= GMAC4_ART_TIME_SHIFT; 345 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1); 346 ns <<= GMAC4_ART_TIME_SHIFT; 347 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0); 348 349 *art_time = ns; 350 } 351 352 static int stmmac_cross_ts_isr(struct stmmac_priv *priv) 353 { 354 return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE); 355 } 356 357 static int intel_crosststamp(ktime_t *device, 358 struct system_counterval_t *system, 359 void *ctx) 360 { 361 struct intel_priv_data *intel_priv; 362 363 struct stmmac_priv *priv = (struct stmmac_priv *)ctx; 364 void __iomem *ptpaddr = priv->ptpaddr; 365 void __iomem *ioaddr = priv->hw->pcsr; 366 unsigned long flags; 367 u64 art_time = 0; 368 u64 ptp_time = 0; 369 u32 num_snapshot; 370 u32 gpio_value; 371 u32 acr_value; 372 int i; 373 374 if (!boot_cpu_has(X86_FEATURE_ART)) 375 return -EOPNOTSUPP; 376 377 intel_priv = priv->plat->bsp_priv; 378 379 /* Both internal crosstimestamping and external triggered event 380 * timestamping cannot be run concurrently. 381 */ 382 if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN) 383 return -EBUSY; 384 385 priv->plat->flags |= STMMAC_FLAG_INT_SNAPSHOT_EN; 386 387 mutex_lock(&priv->aux_ts_lock); 388 /* Enable Internal snapshot trigger */ 389 acr_value = readl(ptpaddr + PTP_ACR); 390 acr_value &= ~PTP_ACR_MASK; 391 switch (priv->plat->int_snapshot_num) { 392 case AUX_SNAPSHOT0: 393 acr_value |= PTP_ACR_ATSEN0; 394 break; 395 case AUX_SNAPSHOT1: 396 acr_value |= PTP_ACR_ATSEN1; 397 break; 398 case AUX_SNAPSHOT2: 399 acr_value |= PTP_ACR_ATSEN2; 400 break; 401 case AUX_SNAPSHOT3: 402 acr_value |= PTP_ACR_ATSEN3; 403 break; 404 default: 405 mutex_unlock(&priv->aux_ts_lock); 406 priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; 407 return -EINVAL; 408 } 409 writel(acr_value, ptpaddr + PTP_ACR); 410 411 /* Clear FIFO */ 412 acr_value = readl(ptpaddr + PTP_ACR); 413 acr_value |= PTP_ACR_ATSFC; 414 writel(acr_value, ptpaddr + PTP_ACR); 415 /* Release the mutex */ 416 mutex_unlock(&priv->aux_ts_lock); 417 418 /* Trigger Internal snapshot signal 419 * Create a rising edge by just toggle the GPO1 to low 420 * and back to high. 421 */ 422 gpio_value = readl(ioaddr + GMAC_GPIO_STATUS); 423 gpio_value &= ~GMAC_GPO1; 424 writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); 425 gpio_value |= GMAC_GPO1; 426 writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); 427 428 /* Time sync done Indication - Interrupt method */ 429 if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait, 430 stmmac_cross_ts_isr(priv), 431 HZ / 100)) { 432 priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; 433 return -ETIMEDOUT; 434 } 435 436 num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & 437 GMAC_TIMESTAMP_ATSNS_MASK) >> 438 GMAC_TIMESTAMP_ATSNS_SHIFT; 439 440 /* Repeat until the timestamps are from the FIFO last segment */ 441 for (i = 0; i < num_snapshot; i++) { 442 read_lock_irqsave(&priv->ptp_lock, flags); 443 stmmac_get_ptptime(priv, ptpaddr, &ptp_time); 444 *device = ns_to_ktime(ptp_time); 445 read_unlock_irqrestore(&priv->ptp_lock, flags); 446 get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time); 447 system->cycles = art_time; 448 } 449 450 system->cycles *= intel_priv->crossts_adj; 451 system->cs_id = CSID_X86_ART; 452 priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; 453 454 return 0; 455 } 456 457 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv, 458 int base) 459 { 460 if (boot_cpu_has(X86_FEATURE_ART)) { 461 unsigned int art_freq; 462 463 /* On systems that support ART, ART frequency can be obtained 464 * from ECX register of CPUID leaf (0x15). 465 */ 466 art_freq = cpuid_ecx(ART_CPUID_LEAF); 467 do_div(art_freq, base); 468 intel_priv->crossts_adj = art_freq; 469 } 470 } 471 472 static int intel_tsn_lane_is_available(struct net_device *ndev, 473 struct intel_priv_data *intel_priv) 474 { 475 struct stmmac_priv *priv = netdev_priv(ndev); 476 struct pmc_ipc_cmd tmp = {}; 477 struct pmc_ipc_rbuf rbuf = {}; 478 int ret = 0, i, j; 479 const int max_fia_regs = 5; 480 481 tmp.cmd = IPC_SOC_REGISTER_ACCESS; 482 tmp.sub_cmd = IPC_SOC_SUB_CMD_READ; 483 484 for (i = 0; i < max_fia_regs; i++) { 485 tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i; 486 487 ret = intel_pmc_ipc(&tmp, &rbuf); 488 if (ret < 0) { 489 netdev_info(priv->dev, "Failed to read from PMC.\n"); 490 return ret; 491 } 492 493 for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++) 494 if ((rbuf.buf[0] >> 495 (4 * (intel_priv->tsn_lane_regs[j] % 8)) & 496 B_PCH_FIA_PCR_L0O) == 0xB) 497 return 0; 498 } 499 500 return -EINVAL; 501 } 502 503 static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs) 504 { 505 int ret = 0, i; 506 507 for (i = 0; i < max_regs; i++) { 508 struct pmc_ipc_cmd tmp = {}; 509 struct pmc_ipc_rbuf rbuf = {}; 510 511 tmp.cmd = IPC_SOC_REGISTER_ACCESS; 512 tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE; 513 tmp.wbuf[0] = (u32)regs[i].index; 514 tmp.wbuf[1] = regs[i].val; 515 516 ret = intel_pmc_ipc(&tmp, &rbuf); 517 if (ret < 0) 518 return ret; 519 } 520 521 return ret; 522 } 523 524 static int intel_mac_finish(struct net_device *ndev, 525 void *intel_data, 526 unsigned int mode, 527 phy_interface_t interface) 528 { 529 struct intel_priv_data *intel_priv = intel_data; 530 struct stmmac_priv *priv = netdev_priv(ndev); 531 const struct pmc_serdes_regs *regs; 532 int max_regs = 0; 533 int ret = 0; 534 535 ret = intel_tsn_lane_is_available(ndev, intel_priv); 536 if (ret < 0) { 537 netdev_info(priv->dev, "No TSN lane available to set the registers.\n"); 538 return ret; 539 } 540 541 if (interface == PHY_INTERFACE_MODE_2500BASEX) { 542 regs = intel_priv->pid_2p5g.regs; 543 max_regs = intel_priv->pid_2p5g.num_regs; 544 } else { 545 regs = intel_priv->pid_1g.regs; 546 max_regs = intel_priv->pid_1g.num_regs; 547 } 548 549 ret = intel_set_reg_access(regs, max_regs); 550 if (ret < 0) 551 return ret; 552 553 priv->plat->phy_interface = interface; 554 555 intel_serdes_powerdown(ndev, intel_priv); 556 intel_serdes_powerup(ndev, intel_priv); 557 558 return ret; 559 } 560 561 static void common_default_data(struct plat_stmmacenet_data *plat) 562 { 563 plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ 564 plat->has_gmac = 1; 565 plat->force_sf_dma_mode = 1; 566 567 plat->mdio_bus_data->needs_reset = true; 568 569 /* Set default value for multicast hash bins */ 570 plat->multicast_filter_bins = HASH_TABLE_SIZE; 571 572 /* Set default value for unicast filter entries */ 573 plat->unicast_filter_entries = 1; 574 575 /* Set the maxmtu to a default of JUMBO_LEN */ 576 plat->maxmtu = JUMBO_LEN; 577 578 /* Set default number of RX and TX queues to use */ 579 plat->tx_queues_to_use = 1; 580 plat->rx_queues_to_use = 1; 581 582 /* Disable Priority config by default */ 583 plat->tx_queues_cfg[0].use_prio = false; 584 plat->rx_queues_cfg[0].use_prio = false; 585 586 /* Disable RX queues routing by default */ 587 plat->rx_queues_cfg[0].pkt_route = 0x0; 588 } 589 590 static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv, 591 phy_interface_t interface) 592 { 593 /* plat->mdio_bus_data->has_xpcs has been set true, so there 594 * should always be an XPCS. The original code would always 595 * return this if present. 596 */ 597 return xpcs_to_phylink_pcs(priv->hw->xpcs); 598 } 599 600 static int intel_mgbe_common_data(struct pci_dev *pdev, 601 struct plat_stmmacenet_data *plat) 602 { 603 struct fwnode_handle *fwnode; 604 char clk_name[20]; 605 int ret; 606 int i; 607 608 plat->pdev = pdev; 609 plat->phy_addr = -1; 610 plat->clk_csr = 5; 611 plat->has_gmac = 0; 612 plat->has_gmac4 = 1; 613 plat->force_sf_dma_mode = 0; 614 plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE); 615 616 /* Multiplying factor to the clk_eee_i clock time 617 * period to make it closer to 100 ns. This value 618 * should be programmed such that the clk_eee_time_period * 619 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns 620 * clk_eee frequency is 19.2Mhz 621 * clk_eee_time_period is 52ns 622 * 52ns * (1 + 1) = 104ns 623 * MULT_FACT_100NS = 1 624 */ 625 plat->mult_fact_100ns = 1; 626 627 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 628 629 for (i = 0; i < plat->rx_queues_to_use; i++) { 630 plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; 631 plat->rx_queues_cfg[i].chan = i; 632 633 /* Disable Priority config by default */ 634 plat->rx_queues_cfg[i].use_prio = false; 635 636 /* Disable RX queues routing by default */ 637 plat->rx_queues_cfg[i].pkt_route = 0x0; 638 } 639 640 for (i = 0; i < plat->tx_queues_to_use; i++) { 641 plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; 642 643 /* Disable Priority config by default */ 644 plat->tx_queues_cfg[i].use_prio = false; 645 /* Default TX Q0 to use TSO and rest TXQ for TBS */ 646 if (i > 0) 647 plat->tx_queues_cfg[i].tbs_en = 1; 648 } 649 650 /* FIFO size is 4096 bytes for 1 tx/rx queue */ 651 plat->tx_fifo_size = plat->tx_queues_to_use * 4096; 652 plat->rx_fifo_size = plat->rx_queues_to_use * 4096; 653 654 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; 655 plat->tx_queues_cfg[0].weight = 0x09; 656 plat->tx_queues_cfg[1].weight = 0x0A; 657 plat->tx_queues_cfg[2].weight = 0x0B; 658 plat->tx_queues_cfg[3].weight = 0x0C; 659 plat->tx_queues_cfg[4].weight = 0x0D; 660 plat->tx_queues_cfg[5].weight = 0x0E; 661 plat->tx_queues_cfg[6].weight = 0x0F; 662 plat->tx_queues_cfg[7].weight = 0x10; 663 664 plat->dma_cfg->pbl = 32; 665 plat->dma_cfg->pblx8 = true; 666 plat->dma_cfg->fixed_burst = 0; 667 plat->dma_cfg->mixed_burst = 0; 668 plat->dma_cfg->aal = 0; 669 plat->dma_cfg->dche = true; 670 671 plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), 672 GFP_KERNEL); 673 if (!plat->axi) 674 return -ENOMEM; 675 676 plat->axi->axi_lpi_en = 0; 677 plat->axi->axi_xit_frm = 0; 678 plat->axi->axi_wr_osr_lmt = 1; 679 plat->axi->axi_rd_osr_lmt = 1; 680 plat->axi->axi_blen[0] = 4; 681 plat->axi->axi_blen[1] = 8; 682 plat->axi->axi_blen[2] = 16; 683 684 plat->ptp_max_adj = plat->clk_ptp_rate; 685 686 /* Set system clock */ 687 sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev)); 688 689 plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, 690 clk_name, NULL, 0, 691 plat->clk_ptp_rate); 692 693 if (IS_ERR(plat->stmmac_clk)) { 694 dev_warn(&pdev->dev, "Fail to register stmmac-clk\n"); 695 plat->stmmac_clk = NULL; 696 } 697 698 ret = clk_prepare_enable(plat->stmmac_clk); 699 if (ret) { 700 clk_unregister_fixed_rate(plat->stmmac_clk); 701 return ret; 702 } 703 704 plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config; 705 706 /* Set default value for multicast hash bins */ 707 plat->multicast_filter_bins = HASH_TABLE_SIZE; 708 709 /* Set default value for unicast filter entries */ 710 plat->unicast_filter_entries = 1; 711 712 /* Set the maxmtu to a default of JUMBO_LEN */ 713 plat->maxmtu = JUMBO_LEN; 714 715 plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN; 716 717 /* Use the last Rx queue */ 718 plat->vlan_fail_q = plat->rx_queues_to_use - 1; 719 720 /* For fixed-link setup, we allow phy-mode setting */ 721 fwnode = dev_fwnode(&pdev->dev); 722 if (fwnode) { 723 int phy_mode; 724 725 /* "phy-mode" setting is optional. If it is set, 726 * we allow either sgmii or 1000base-x for now. 727 */ 728 phy_mode = fwnode_get_phy_mode(fwnode); 729 if (phy_mode >= 0) { 730 if (phy_mode == PHY_INTERFACE_MODE_SGMII || 731 phy_mode == PHY_INTERFACE_MODE_1000BASEX) 732 plat->phy_interface = phy_mode; 733 else 734 dev_warn(&pdev->dev, "Invalid phy-mode\n"); 735 } 736 } 737 738 /* Intel mgbe SGMII interface uses pcs-xcps */ 739 if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII || 740 plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { 741 plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR); 742 plat->mdio_bus_data->default_an_inband = true; 743 plat->select_pcs = intel_mgbe_select_pcs; 744 } 745 746 /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ 747 plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR; 748 plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; 749 750 plat->int_snapshot_num = AUX_SNAPSHOT1; 751 752 plat->crosststamp = intel_crosststamp; 753 plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; 754 755 /* Setup MSI vector offset specific to Intel mGbE controller */ 756 plat->msi_mac_vec = 29; 757 plat->msi_lpi_vec = 28; 758 plat->msi_sfty_ce_vec = 27; 759 plat->msi_sfty_ue_vec = 26; 760 plat->msi_rx_base_vec = 0; 761 plat->msi_tx_base_vec = 1; 762 763 return 0; 764 } 765 766 static int ehl_common_data(struct pci_dev *pdev, 767 struct plat_stmmacenet_data *plat) 768 { 769 struct intel_priv_data *intel_priv = plat->bsp_priv; 770 771 plat->rx_queues_to_use = 8; 772 plat->tx_queues_to_use = 8; 773 plat->flags |= STMMAC_FLAG_USE_PHY_WOL; 774 plat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY; 775 776 plat->safety_feat_cfg->tsoee = 1; 777 plat->safety_feat_cfg->mrxpee = 1; 778 plat->safety_feat_cfg->mestee = 1; 779 plat->safety_feat_cfg->mrxee = 1; 780 plat->safety_feat_cfg->mtxee = 1; 781 plat->safety_feat_cfg->epsi = 0; 782 plat->safety_feat_cfg->edpp = 0; 783 plat->safety_feat_cfg->prtyen = 0; 784 plat->safety_feat_cfg->tmouten = 0; 785 786 intel_priv->tsn_lane_regs = ehl_tsn_lane_regs; 787 intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs); 788 789 return intel_mgbe_common_data(pdev, plat); 790 } 791 792 static int ehl_sgmii_data(struct pci_dev *pdev, 793 struct plat_stmmacenet_data *plat) 794 { 795 struct intel_priv_data *intel_priv = plat->bsp_priv; 796 797 plat->bus_id = 1; 798 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 799 plat->serdes_powerup = intel_serdes_powerup; 800 plat->serdes_powerdown = intel_serdes_powerdown; 801 plat->mac_finish = intel_mac_finish; 802 plat->clk_ptp_rate = 204800000; 803 804 intel_priv->pid_1g.regs = pid_modphy3_1g_regs; 805 intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs); 806 intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs; 807 intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs); 808 809 return ehl_common_data(pdev, plat); 810 } 811 812 static struct stmmac_pci_info ehl_sgmii1g_info = { 813 .setup = ehl_sgmii_data, 814 }; 815 816 static int ehl_rgmii_data(struct pci_dev *pdev, 817 struct plat_stmmacenet_data *plat) 818 { 819 plat->bus_id = 1; 820 plat->phy_interface = PHY_INTERFACE_MODE_RGMII; 821 822 plat->clk_ptp_rate = 204800000; 823 824 return ehl_common_data(pdev, plat); 825 } 826 827 static struct stmmac_pci_info ehl_rgmii1g_info = { 828 .setup = ehl_rgmii_data, 829 }; 830 831 static int ehl_pse0_common_data(struct pci_dev *pdev, 832 struct plat_stmmacenet_data *plat) 833 { 834 struct intel_priv_data *intel_priv = plat->bsp_priv; 835 836 intel_priv->is_pse = true; 837 plat->bus_id = 2; 838 plat->host_dma_width = 32; 839 840 plat->clk_ptp_rate = 200000000; 841 842 intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); 843 844 return ehl_common_data(pdev, plat); 845 } 846 847 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev, 848 struct plat_stmmacenet_data *plat) 849 { 850 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; 851 return ehl_pse0_common_data(pdev, plat); 852 } 853 854 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = { 855 .setup = ehl_pse0_rgmii1g_data, 856 }; 857 858 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev, 859 struct plat_stmmacenet_data *plat) 860 { 861 struct intel_priv_data *intel_priv = plat->bsp_priv; 862 863 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 864 plat->serdes_powerup = intel_serdes_powerup; 865 plat->serdes_powerdown = intel_serdes_powerdown; 866 plat->mac_finish = intel_mac_finish; 867 868 intel_priv->pid_1g.regs = pid_modphy1_1g_regs; 869 intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs); 870 intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs; 871 intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs); 872 873 return ehl_pse0_common_data(pdev, plat); 874 } 875 876 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = { 877 .setup = ehl_pse0_sgmii1g_data, 878 }; 879 880 static int ehl_pse1_common_data(struct pci_dev *pdev, 881 struct plat_stmmacenet_data *plat) 882 { 883 struct intel_priv_data *intel_priv = plat->bsp_priv; 884 885 intel_priv->is_pse = true; 886 plat->bus_id = 3; 887 plat->host_dma_width = 32; 888 889 plat->clk_ptp_rate = 200000000; 890 891 intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); 892 893 return ehl_common_data(pdev, plat); 894 } 895 896 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev, 897 struct plat_stmmacenet_data *plat) 898 { 899 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; 900 return ehl_pse1_common_data(pdev, plat); 901 } 902 903 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = { 904 .setup = ehl_pse1_rgmii1g_data, 905 }; 906 907 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev, 908 struct plat_stmmacenet_data *plat) 909 { 910 struct intel_priv_data *intel_priv = plat->bsp_priv; 911 912 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 913 plat->serdes_powerup = intel_serdes_powerup; 914 plat->serdes_powerdown = intel_serdes_powerdown; 915 plat->mac_finish = intel_mac_finish; 916 917 intel_priv->pid_1g.regs = pid_modphy1_1g_regs; 918 intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs); 919 intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs; 920 intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs); 921 922 return ehl_pse1_common_data(pdev, plat); 923 } 924 925 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = { 926 .setup = ehl_pse1_sgmii1g_data, 927 }; 928 929 static int tgl_common_data(struct pci_dev *pdev, 930 struct plat_stmmacenet_data *plat) 931 { 932 plat->rx_queues_to_use = 6; 933 plat->tx_queues_to_use = 4; 934 plat->clk_ptp_rate = 204800000; 935 plat->speed_mode_2500 = intel_speed_mode_2500; 936 937 plat->safety_feat_cfg->tsoee = 1; 938 plat->safety_feat_cfg->mrxpee = 0; 939 plat->safety_feat_cfg->mestee = 1; 940 plat->safety_feat_cfg->mrxee = 1; 941 plat->safety_feat_cfg->mtxee = 1; 942 plat->safety_feat_cfg->epsi = 0; 943 plat->safety_feat_cfg->edpp = 0; 944 plat->safety_feat_cfg->prtyen = 0; 945 plat->safety_feat_cfg->tmouten = 0; 946 947 return intel_mgbe_common_data(pdev, plat); 948 } 949 950 static int tgl_sgmii_phy0_data(struct pci_dev *pdev, 951 struct plat_stmmacenet_data *plat) 952 { 953 plat->bus_id = 1; 954 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 955 plat->serdes_powerup = intel_serdes_powerup; 956 plat->serdes_powerdown = intel_serdes_powerdown; 957 return tgl_common_data(pdev, plat); 958 } 959 960 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = { 961 .setup = tgl_sgmii_phy0_data, 962 }; 963 964 static int tgl_sgmii_phy1_data(struct pci_dev *pdev, 965 struct plat_stmmacenet_data *plat) 966 { 967 plat->bus_id = 2; 968 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 969 plat->serdes_powerup = intel_serdes_powerup; 970 plat->serdes_powerdown = intel_serdes_powerdown; 971 return tgl_common_data(pdev, plat); 972 } 973 974 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = { 975 .setup = tgl_sgmii_phy1_data, 976 }; 977 978 static int adls_sgmii_phy0_data(struct pci_dev *pdev, 979 struct plat_stmmacenet_data *plat) 980 { 981 plat->bus_id = 1; 982 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 983 984 /* SerDes power up and power down are done in BIOS for ADL */ 985 986 return tgl_common_data(pdev, plat); 987 } 988 989 static struct stmmac_pci_info adls_sgmii1g_phy0_info = { 990 .setup = adls_sgmii_phy0_data, 991 }; 992 993 static int adls_sgmii_phy1_data(struct pci_dev *pdev, 994 struct plat_stmmacenet_data *plat) 995 { 996 plat->bus_id = 2; 997 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 998 999 /* SerDes power up and power down are done in BIOS for ADL */ 1000 1001 return tgl_common_data(pdev, plat); 1002 } 1003 1004 static struct stmmac_pci_info adls_sgmii1g_phy1_info = { 1005 .setup = adls_sgmii_phy1_data, 1006 }; 1007 1008 static int adln_common_data(struct pci_dev *pdev, 1009 struct plat_stmmacenet_data *plat) 1010 { 1011 struct intel_priv_data *intel_priv = plat->bsp_priv; 1012 1013 plat->rx_queues_to_use = 6; 1014 plat->tx_queues_to_use = 4; 1015 plat->clk_ptp_rate = 204800000; 1016 1017 plat->safety_feat_cfg->tsoee = 1; 1018 plat->safety_feat_cfg->mrxpee = 0; 1019 plat->safety_feat_cfg->mestee = 1; 1020 plat->safety_feat_cfg->mrxee = 1; 1021 plat->safety_feat_cfg->mtxee = 1; 1022 plat->safety_feat_cfg->epsi = 0; 1023 plat->safety_feat_cfg->edpp = 0; 1024 plat->safety_feat_cfg->prtyen = 0; 1025 plat->safety_feat_cfg->tmouten = 0; 1026 1027 intel_priv->tsn_lane_regs = adln_tsn_lane_regs; 1028 intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs); 1029 1030 return intel_mgbe_common_data(pdev, plat); 1031 } 1032 1033 static int adln_sgmii_phy0_data(struct pci_dev *pdev, 1034 struct plat_stmmacenet_data *plat) 1035 { 1036 struct intel_priv_data *intel_priv = plat->bsp_priv; 1037 1038 plat->bus_id = 1; 1039 plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 1040 plat->serdes_powerup = intel_serdes_powerup; 1041 plat->serdes_powerdown = intel_serdes_powerdown; 1042 plat->mac_finish = intel_mac_finish; 1043 1044 intel_priv->pid_1g.regs = pid_modphy1_1g_regs; 1045 intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs); 1046 intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs; 1047 intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs); 1048 1049 return adln_common_data(pdev, plat); 1050 } 1051 1052 static struct stmmac_pci_info adln_sgmii1g_phy0_info = { 1053 .setup = adln_sgmii_phy0_data, 1054 }; 1055 1056 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { 1057 { 1058 .func = 6, 1059 .phy_addr = 1, 1060 }, 1061 }; 1062 1063 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = { 1064 .func = galileo_stmmac_func_data, 1065 .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data), 1066 }; 1067 1068 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = { 1069 { 1070 .func = 6, 1071 .phy_addr = 1, 1072 }, 1073 { 1074 .func = 7, 1075 .phy_addr = 1, 1076 }, 1077 }; 1078 1079 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = { 1080 .func = iot2040_stmmac_func_data, 1081 .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data), 1082 }; 1083 1084 static const struct dmi_system_id quark_pci_dmi[] = { 1085 { 1086 .matches = { 1087 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"), 1088 }, 1089 .driver_data = (void *)&galileo_stmmac_dmi_data, 1090 }, 1091 { 1092 .matches = { 1093 DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), 1094 }, 1095 .driver_data = (void *)&galileo_stmmac_dmi_data, 1096 }, 1097 /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040. 1098 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which 1099 * has only one pci network device while other asset tags are 1100 * for IOT2040 which has two. 1101 */ 1102 { 1103 .matches = { 1104 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), 1105 DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, 1106 "6ES7647-0AA00-0YA2"), 1107 }, 1108 .driver_data = (void *)&galileo_stmmac_dmi_data, 1109 }, 1110 { 1111 .matches = { 1112 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), 1113 }, 1114 .driver_data = (void *)&iot2040_stmmac_dmi_data, 1115 }, 1116 {} 1117 }; 1118 1119 static int quark_default_data(struct pci_dev *pdev, 1120 struct plat_stmmacenet_data *plat) 1121 { 1122 int ret; 1123 1124 /* Set common default data first */ 1125 common_default_data(plat); 1126 1127 /* Refuse to load the driver and register net device if MAC controller 1128 * does not connect to any PHY interface. 1129 */ 1130 ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi); 1131 if (ret < 0) { 1132 /* Return error to the caller on DMI enabled boards. */ 1133 if (dmi_get_system_info(DMI_BOARD_NAME)) 1134 return ret; 1135 1136 /* Galileo boards with old firmware don't support DMI. We always 1137 * use 1 here as PHY address, so at least the first found MAC 1138 * controller would be probed. 1139 */ 1140 ret = 1; 1141 } 1142 1143 plat->bus_id = pci_dev_id(pdev); 1144 plat->phy_addr = ret; 1145 plat->phy_interface = PHY_INTERFACE_MODE_RMII; 1146 1147 plat->dma_cfg->pbl = 16; 1148 plat->dma_cfg->pblx8 = true; 1149 plat->dma_cfg->fixed_burst = 1; 1150 /* AXI (TODO) */ 1151 1152 return 0; 1153 } 1154 1155 static const struct stmmac_pci_info quark_info = { 1156 .setup = quark_default_data, 1157 }; 1158 1159 static int stmmac_config_single_msi(struct pci_dev *pdev, 1160 struct plat_stmmacenet_data *plat, 1161 struct stmmac_resources *res) 1162 { 1163 int ret; 1164 1165 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 1166 if (ret < 0) { 1167 dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n", 1168 __func__); 1169 return ret; 1170 } 1171 1172 res->irq = pci_irq_vector(pdev, 0); 1173 res->wol_irq = res->irq; 1174 plat->flags &= ~STMMAC_FLAG_MULTI_MSI_EN; 1175 dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n", 1176 __func__); 1177 1178 return 0; 1179 } 1180 1181 static int stmmac_config_multi_msi(struct pci_dev *pdev, 1182 struct plat_stmmacenet_data *plat, 1183 struct stmmac_resources *res) 1184 { 1185 int ret; 1186 int i; 1187 1188 if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX || 1189 plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) { 1190 dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n", 1191 __func__); 1192 return -1; 1193 } 1194 1195 ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX, 1196 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1197 if (ret < 0) { 1198 dev_info(&pdev->dev, "%s: multi MSI enablement failed\n", 1199 __func__); 1200 return ret; 1201 } 1202 1203 /* For RX MSI */ 1204 for (i = 0; i < plat->rx_queues_to_use; i++) { 1205 res->rx_irq[i] = pci_irq_vector(pdev, 1206 plat->msi_rx_base_vec + i * 2); 1207 } 1208 1209 /* For TX MSI */ 1210 for (i = 0; i < plat->tx_queues_to_use; i++) { 1211 res->tx_irq[i] = pci_irq_vector(pdev, 1212 plat->msi_tx_base_vec + i * 2); 1213 } 1214 1215 if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX) 1216 res->irq = pci_irq_vector(pdev, plat->msi_mac_vec); 1217 if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX) 1218 res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec); 1219 if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX) 1220 res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec); 1221 if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX) 1222 res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec); 1223 if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX) 1224 res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec); 1225 1226 plat->flags |= STMMAC_FLAG_MULTI_MSI_EN; 1227 dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__); 1228 1229 return 0; 1230 } 1231 1232 /** 1233 * intel_eth_pci_probe 1234 * 1235 * @pdev: pci device pointer 1236 * @id: pointer to table of device id/id's. 1237 * 1238 * Description: This probing function gets called for all PCI devices which 1239 * match the ID table and are not "owned" by other driver yet. This function 1240 * gets passed a "struct pci_dev *" for each device whose entry in the ID table 1241 * matches the device. The probe functions returns zero when the driver choose 1242 * to take "ownership" of the device or an error code(-ve no) otherwise. 1243 */ 1244 static int intel_eth_pci_probe(struct pci_dev *pdev, 1245 const struct pci_device_id *id) 1246 { 1247 struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; 1248 struct intel_priv_data *intel_priv; 1249 struct plat_stmmacenet_data *plat; 1250 struct stmmac_resources res; 1251 int ret; 1252 1253 intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL); 1254 if (!intel_priv) 1255 return -ENOMEM; 1256 1257 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 1258 if (!plat) 1259 return -ENOMEM; 1260 1261 plat->mdio_bus_data = devm_kzalloc(&pdev->dev, 1262 sizeof(*plat->mdio_bus_data), 1263 GFP_KERNEL); 1264 if (!plat->mdio_bus_data) 1265 return -ENOMEM; 1266 1267 plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), 1268 GFP_KERNEL); 1269 if (!plat->dma_cfg) 1270 return -ENOMEM; 1271 1272 plat->safety_feat_cfg = devm_kzalloc(&pdev->dev, 1273 sizeof(*plat->safety_feat_cfg), 1274 GFP_KERNEL); 1275 if (!plat->safety_feat_cfg) 1276 return -ENOMEM; 1277 1278 /* Enable pci device */ 1279 ret = pcim_enable_device(pdev); 1280 if (ret) { 1281 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", 1282 __func__); 1283 return ret; 1284 } 1285 1286 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 1287 if (ret) 1288 return ret; 1289 1290 pci_set_master(pdev); 1291 1292 plat->bsp_priv = intel_priv; 1293 intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR; 1294 intel_priv->crossts_adj = 1; 1295 1296 /* Initialize all MSI vectors to invalid so that it can be set 1297 * according to platform data settings below. 1298 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX) 1299 */ 1300 plat->msi_mac_vec = STMMAC_MSI_VEC_MAX; 1301 plat->msi_wol_vec = STMMAC_MSI_VEC_MAX; 1302 plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX; 1303 plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX; 1304 plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX; 1305 plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX; 1306 plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX; 1307 1308 ret = info->setup(pdev, plat); 1309 if (ret) 1310 return ret; 1311 1312 memset(&res, 0, sizeof(res)); 1313 res.addr = pcim_iomap_table(pdev)[0]; 1314 1315 ret = stmmac_config_multi_msi(pdev, plat, &res); 1316 if (ret) { 1317 ret = stmmac_config_single_msi(pdev, plat, &res); 1318 if (ret) { 1319 dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n", 1320 __func__); 1321 goto err_alloc_irq; 1322 } 1323 } 1324 1325 ret = stmmac_dvr_probe(&pdev->dev, plat, &res); 1326 if (ret) { 1327 goto err_alloc_irq; 1328 } 1329 1330 return 0; 1331 1332 err_alloc_irq: 1333 clk_disable_unprepare(plat->stmmac_clk); 1334 clk_unregister_fixed_rate(plat->stmmac_clk); 1335 return ret; 1336 } 1337 1338 /** 1339 * intel_eth_pci_remove 1340 * 1341 * @pdev: pci device pointer 1342 * Description: this function calls the main to free the net resources 1343 * and releases the PCI resources. 1344 */ 1345 static void intel_eth_pci_remove(struct pci_dev *pdev) 1346 { 1347 struct net_device *ndev = dev_get_drvdata(&pdev->dev); 1348 struct stmmac_priv *priv = netdev_priv(ndev); 1349 1350 stmmac_dvr_remove(&pdev->dev); 1351 1352 clk_disable_unprepare(priv->plat->stmmac_clk); 1353 clk_unregister_fixed_rate(priv->plat->stmmac_clk); 1354 } 1355 1356 static int __maybe_unused intel_eth_pci_suspend(struct device *dev) 1357 { 1358 struct pci_dev *pdev = to_pci_dev(dev); 1359 int ret; 1360 1361 ret = stmmac_suspend(dev); 1362 if (ret) 1363 return ret; 1364 1365 ret = pci_save_state(pdev); 1366 if (ret) 1367 return ret; 1368 1369 pci_wake_from_d3(pdev, true); 1370 pci_set_power_state(pdev, PCI_D3hot); 1371 return 0; 1372 } 1373 1374 static int __maybe_unused intel_eth_pci_resume(struct device *dev) 1375 { 1376 struct pci_dev *pdev = to_pci_dev(dev); 1377 int ret; 1378 1379 pci_restore_state(pdev); 1380 pci_set_power_state(pdev, PCI_D0); 1381 1382 ret = pcim_enable_device(pdev); 1383 if (ret) 1384 return ret; 1385 1386 pci_set_master(pdev); 1387 1388 return stmmac_resume(dev); 1389 } 1390 1391 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend, 1392 intel_eth_pci_resume); 1393 1394 #define PCI_DEVICE_ID_INTEL_QUARK 0x0937 1395 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30 1396 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31 1397 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32 1398 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC 1399 * which are named PSE0 and PSE1 1400 */ 1401 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0 1402 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1 1403 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2 1404 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0 1405 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1 1406 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2 1407 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac 1408 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2 1409 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac 1410 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac 1411 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad 1412 #define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac 1413 #define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G 0x51ac 1414 1415 static const struct pci_device_id intel_eth_pci_id_table[] = { 1416 { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) }, 1417 { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) }, 1418 { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) }, 1419 { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) }, 1420 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) }, 1421 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) }, 1422 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) }, 1423 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) }, 1424 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) }, 1425 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) }, 1426 { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) }, 1427 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) }, 1428 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) }, 1429 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) }, 1430 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) }, 1431 { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) }, 1432 { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) }, 1433 {} 1434 }; 1435 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); 1436 1437 static struct pci_driver intel_eth_pci_driver = { 1438 .name = "intel-eth-pci", 1439 .id_table = intel_eth_pci_id_table, 1440 .probe = intel_eth_pci_probe, 1441 .remove = intel_eth_pci_remove, 1442 .driver = { 1443 .pm = &intel_eth_pm_ops, 1444 }, 1445 }; 1446 1447 module_pci_driver(intel_eth_pci_driver); 1448 1449 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver"); 1450 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>"); 1451 MODULE_LICENSE("GPL v2"); 1452