xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c (revision fe259a1bb26ec78842c975d992331705b0c2c2e8)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation
3  */
4 
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include <linux/platform_data/x86/intel_pmc_ipc.h>
9 #include "dwmac-intel.h"
10 #include "dwmac4.h"
11 #include "stmmac.h"
12 #include "stmmac_ptp.h"
13 
14 struct pmc_serdes_regs {
15 	u8 index;
16 	u32 val;
17 };
18 
19 struct pmc_serdes_reg_info {
20 	const struct pmc_serdes_regs *regs;
21 	u8 num_regs;
22 };
23 
24 struct intel_priv_data {
25 	int mdio_adhoc_addr;	/* mdio address for serdes & etc */
26 	unsigned long crossts_adj;
27 	bool is_pse;
28 	const int *tsn_lane_regs;
29 	int max_tsn_lane_regs;
30 	struct pmc_serdes_reg_info pid_1g;
31 	struct pmc_serdes_reg_info pid_2p5g;
32 };
33 
34 /* This struct is used to associate PCI Function of MAC controller on a board,
35  * discovered via DMI, with the address of PHY connected to the MAC. The
36  * negative value of the address means that MAC controller is not connected
37  * with PHY.
38  */
39 struct stmmac_pci_func_data {
40 	unsigned int func;
41 	int phy_addr;
42 };
43 
44 struct stmmac_pci_dmi_data {
45 	const struct stmmac_pci_func_data *func;
46 	size_t nfuncs;
47 };
48 
49 struct stmmac_pci_info {
50 	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
51 };
52 
53 static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = {
54 	{ PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_1G },
55 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_1G },
56 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_1G },
57 	{ PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_1G },
58 	{ PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
59 	{}
60 };
61 
62 static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = {
63 	{ PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
64 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
65 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
66 	{ PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
67 	{ PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
68 	{}
69 };
70 
71 static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = {
72 	{ PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_1G },
73 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_1G },
74 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_1G },
75 	{ PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_1G },
76 	{ PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
77 	{}
78 };
79 
80 static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = {
81 	{ PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
82 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
83 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
84 	{ PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
85 	{ PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
86 	{}
87 };
88 
89 static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11};
90 static const int adln_tsn_lane_regs[] = {6};
91 
92 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
93 				    const struct dmi_system_id *dmi_list)
94 {
95 	const struct stmmac_pci_func_data *func_data;
96 	const struct stmmac_pci_dmi_data *dmi_data;
97 	const struct dmi_system_id *dmi_id;
98 	int func = PCI_FUNC(pdev->devfn);
99 	size_t n;
100 
101 	dmi_id = dmi_first_match(dmi_list);
102 	if (!dmi_id)
103 		return -ENODEV;
104 
105 	dmi_data = dmi_id->driver_data;
106 	func_data = dmi_data->func;
107 
108 	for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
109 		if (func_data->func == func)
110 			return func_data->phy_addr;
111 
112 	return -ENODEV;
113 }
114 
115 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
116 			      int phyreg, u32 mask, u32 val)
117 {
118 	unsigned int retries = 10;
119 	int val_rd;
120 
121 	do {
122 		val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
123 		if ((val_rd & mask) == (val & mask))
124 			return 0;
125 		udelay(POLL_DELAY_US);
126 	} while (--retries);
127 
128 	return -ETIMEDOUT;
129 }
130 
131 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
132 {
133 	struct intel_priv_data *intel_priv = priv_data;
134 	struct stmmac_priv *priv = netdev_priv(ndev);
135 	int serdes_phy_addr = 0;
136 	u32 data = 0;
137 
138 	if (!intel_priv->mdio_adhoc_addr)
139 		return 0;
140 
141 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
142 
143 	/* Set the serdes rate and the PCLK rate */
144 	data = mdiobus_read(priv->mii, serdes_phy_addr,
145 			    SERDES_GCR0);
146 
147 	data &= ~SERDES_RATE_MASK;
148 	data &= ~SERDES_PCLK_MASK;
149 
150 	if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
151 		data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
152 			SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
153 	else
154 		data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
155 			SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
156 
157 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
158 
159 	/* assert clk_req */
160 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
161 	data |= SERDES_PLL_CLK;
162 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
163 
164 	/* check for clk_ack assertion */
165 	data = serdes_status_poll(priv, serdes_phy_addr,
166 				  SERDES_GSR0,
167 				  SERDES_PLL_CLK,
168 				  SERDES_PLL_CLK);
169 
170 	if (data) {
171 		dev_err(priv->device, "Serdes PLL clk request timeout\n");
172 		return data;
173 	}
174 
175 	/* assert lane reset */
176 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
177 	data |= SERDES_RST;
178 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
179 
180 	/* check for assert lane reset reflection */
181 	data = serdes_status_poll(priv, serdes_phy_addr,
182 				  SERDES_GSR0,
183 				  SERDES_RST,
184 				  SERDES_RST);
185 
186 	if (data) {
187 		dev_err(priv->device, "Serdes assert lane reset timeout\n");
188 		return data;
189 	}
190 
191 	/*  move power state to P0 */
192 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
193 
194 	data &= ~SERDES_PWR_ST_MASK;
195 	data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
196 
197 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
198 
199 	/* Check for P0 state */
200 	data = serdes_status_poll(priv, serdes_phy_addr,
201 				  SERDES_GSR0,
202 				  SERDES_PWR_ST_MASK,
203 				  SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
204 
205 	if (data) {
206 		dev_err(priv->device, "Serdes power state P0 timeout.\n");
207 		return data;
208 	}
209 
210 	/* PSE only - ungate SGMII PHY Rx Clock */
211 	if (intel_priv->is_pse)
212 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
213 			       0, SERDES_PHY_RX_CLK);
214 
215 	return 0;
216 }
217 
218 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
219 {
220 	struct intel_priv_data *intel_priv = intel_data;
221 	struct stmmac_priv *priv = netdev_priv(ndev);
222 	int serdes_phy_addr = 0;
223 	u32 data = 0;
224 
225 	if (!intel_priv->mdio_adhoc_addr)
226 		return;
227 
228 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
229 
230 	/* PSE only - gate SGMII PHY Rx Clock */
231 	if (intel_priv->is_pse)
232 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
233 			       SERDES_PHY_RX_CLK, 0);
234 
235 	/*  move power state to P3 */
236 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
237 
238 	data &= ~SERDES_PWR_ST_MASK;
239 	data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
240 
241 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
242 
243 	/* Check for P3 state */
244 	data = serdes_status_poll(priv, serdes_phy_addr,
245 				  SERDES_GSR0,
246 				  SERDES_PWR_ST_MASK,
247 				  SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
248 
249 	if (data) {
250 		dev_err(priv->device, "Serdes power state P3 timeout\n");
251 		return;
252 	}
253 
254 	/* de-assert clk_req */
255 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
256 	data &= ~SERDES_PLL_CLK;
257 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
258 
259 	/* check for clk_ack de-assert */
260 	data = serdes_status_poll(priv, serdes_phy_addr,
261 				  SERDES_GSR0,
262 				  SERDES_PLL_CLK,
263 				  (u32)~SERDES_PLL_CLK);
264 
265 	if (data) {
266 		dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
267 		return;
268 	}
269 
270 	/* de-assert lane reset */
271 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
272 	data &= ~SERDES_RST;
273 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
274 
275 	/* check for de-assert lane reset reflection */
276 	data = serdes_status_poll(priv, serdes_phy_addr,
277 				  SERDES_GSR0,
278 				  SERDES_RST,
279 				  (u32)~SERDES_RST);
280 
281 	if (data) {
282 		dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
283 		return;
284 	}
285 }
286 
287 static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
288 {
289 	struct intel_priv_data *intel_priv = intel_data;
290 	struct stmmac_priv *priv = netdev_priv(ndev);
291 	int serdes_phy_addr = 0;
292 	u32 data = 0;
293 
294 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
295 
296 	/* Determine the link speed mode: 2.5Gbps/1Gbps */
297 	data = mdiobus_read(priv->mii, serdes_phy_addr,
298 			    SERDES_GCR);
299 
300 	if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
301 	    SERDES_LINK_MODE_2G5) {
302 		dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
303 		priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
304 		priv->plat->mdio_bus_data->default_an_inband = false;
305 	}
306 }
307 
308 /* Program PTP Clock Frequency for different variant of
309  * Intel mGBE that has slightly different GPO mapping
310  */
311 static void intel_mgbe_ptp_clk_freq_config(struct stmmac_priv *priv)
312 {
313 	struct intel_priv_data *intel_priv;
314 	u32 gpio_value;
315 
316 	intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
317 
318 	gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
319 
320 	if (intel_priv->is_pse) {
321 		/* For PSE GbE, use 200MHz */
322 		gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
323 		gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
324 	} else {
325 		/* For PCH GbE, use 200MHz */
326 		gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
327 		gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
328 	}
329 
330 	writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
331 }
332 
333 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
334 			u64 *art_time)
335 {
336 	u64 ns;
337 
338 	ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
339 	ns <<= GMAC4_ART_TIME_SHIFT;
340 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
341 	ns <<= GMAC4_ART_TIME_SHIFT;
342 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
343 	ns <<= GMAC4_ART_TIME_SHIFT;
344 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
345 
346 	*art_time = ns;
347 }
348 
349 static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
350 {
351 	return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
352 }
353 
354 static int intel_crosststamp(ktime_t *device,
355 			     struct system_counterval_t *system,
356 			     void *ctx)
357 {
358 	struct intel_priv_data *intel_priv;
359 
360 	struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
361 	void __iomem *ptpaddr = priv->ptpaddr;
362 	void __iomem *ioaddr = priv->hw->pcsr;
363 	unsigned long flags;
364 	u64 art_time = 0;
365 	u64 ptp_time = 0;
366 	u32 num_snapshot;
367 	u32 gpio_value;
368 	u32 acr_value;
369 	int i;
370 
371 	if (!boot_cpu_has(X86_FEATURE_ART))
372 		return -EOPNOTSUPP;
373 
374 	intel_priv = priv->plat->bsp_priv;
375 
376 	/* Both internal crosstimestamping and external triggered event
377 	 * timestamping cannot be run concurrently.
378 	 */
379 	if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
380 		return -EBUSY;
381 
382 	priv->plat->flags |= STMMAC_FLAG_INT_SNAPSHOT_EN;
383 
384 	mutex_lock(&priv->aux_ts_lock);
385 	/* Enable Internal snapshot trigger */
386 	acr_value = readl(ptpaddr + PTP_ACR);
387 	acr_value &= ~PTP_ACR_MASK;
388 	switch (priv->plat->int_snapshot_num) {
389 	case AUX_SNAPSHOT0:
390 		acr_value |= PTP_ACR_ATSEN0;
391 		break;
392 	case AUX_SNAPSHOT1:
393 		acr_value |= PTP_ACR_ATSEN1;
394 		break;
395 	case AUX_SNAPSHOT2:
396 		acr_value |= PTP_ACR_ATSEN2;
397 		break;
398 	case AUX_SNAPSHOT3:
399 		acr_value |= PTP_ACR_ATSEN3;
400 		break;
401 	default:
402 		mutex_unlock(&priv->aux_ts_lock);
403 		priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
404 		return -EINVAL;
405 	}
406 	writel(acr_value, ptpaddr + PTP_ACR);
407 
408 	/* Clear FIFO */
409 	acr_value = readl(ptpaddr + PTP_ACR);
410 	acr_value |= PTP_ACR_ATSFC;
411 	writel(acr_value, ptpaddr + PTP_ACR);
412 	/* Release the mutex */
413 	mutex_unlock(&priv->aux_ts_lock);
414 
415 	/* Trigger Internal snapshot signal
416 	 * Create a rising edge by just toggle the GPO1 to low
417 	 * and back to high.
418 	 */
419 	gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
420 	gpio_value &= ~GMAC_GPO1;
421 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
422 	gpio_value |= GMAC_GPO1;
423 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
424 
425 	/* Time sync done Indication - Interrupt method */
426 	if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
427 					      stmmac_cross_ts_isr(priv),
428 					      HZ / 100)) {
429 		priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
430 		return -ETIMEDOUT;
431 	}
432 
433 	num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
434 			GMAC_TIMESTAMP_ATSNS_MASK) >>
435 			GMAC_TIMESTAMP_ATSNS_SHIFT;
436 
437 	/* Repeat until the timestamps are from the FIFO last segment */
438 	for (i = 0; i < num_snapshot; i++) {
439 		read_lock_irqsave(&priv->ptp_lock, flags);
440 		stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
441 		*device = ns_to_ktime(ptp_time);
442 		read_unlock_irqrestore(&priv->ptp_lock, flags);
443 		get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
444 		system->cycles = art_time;
445 	}
446 
447 	system->cycles *= intel_priv->crossts_adj;
448 	system->cs_id = CSID_X86_ART;
449 	priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
450 
451 	return 0;
452 }
453 
454 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
455 				       int base)
456 {
457 	if (boot_cpu_has(X86_FEATURE_ART)) {
458 		unsigned int art_freq;
459 
460 		/* On systems that support ART, ART frequency can be obtained
461 		 * from ECX register of CPUID leaf (0x15).
462 		 */
463 		art_freq = cpuid_ecx(ART_CPUID_LEAF);
464 		do_div(art_freq, base);
465 		intel_priv->crossts_adj = art_freq;
466 	}
467 }
468 
469 static int intel_tsn_lane_is_available(struct net_device *ndev,
470 				       struct intel_priv_data *intel_priv)
471 {
472 	struct stmmac_priv *priv = netdev_priv(ndev);
473 	struct pmc_ipc_cmd tmp = {};
474 	struct pmc_ipc_rbuf rbuf = {};
475 	int ret = 0, i, j;
476 	const int max_fia_regs = 5;
477 
478 	tmp.cmd = IPC_SOC_REGISTER_ACCESS;
479 	tmp.sub_cmd = IPC_SOC_SUB_CMD_READ;
480 
481 	for (i = 0; i < max_fia_regs; i++) {
482 		tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i;
483 
484 		ret = intel_pmc_ipc(&tmp, &rbuf);
485 		if (ret < 0) {
486 			netdev_info(priv->dev, "Failed to read from PMC.\n");
487 			return ret;
488 		}
489 
490 		for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++)
491 			if ((rbuf.buf[0] >>
492 				(4 * (intel_priv->tsn_lane_regs[j] % 8)) &
493 					B_PCH_FIA_PCR_L0O) == 0xB)
494 				return 0;
495 	}
496 
497 	return -EINVAL;
498 }
499 
500 static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs)
501 {
502 	int ret = 0, i;
503 
504 	for (i = 0; i < max_regs; i++) {
505 		struct pmc_ipc_cmd tmp = {};
506 		struct pmc_ipc_rbuf rbuf = {};
507 
508 		tmp.cmd = IPC_SOC_REGISTER_ACCESS;
509 		tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE;
510 		tmp.wbuf[0] = (u32)regs[i].index;
511 		tmp.wbuf[1] = regs[i].val;
512 
513 		ret = intel_pmc_ipc(&tmp, &rbuf);
514 		if (ret < 0)
515 			return ret;
516 	}
517 
518 	return ret;
519 }
520 
521 static int intel_mac_finish(struct net_device *ndev,
522 			    void *intel_data,
523 			    unsigned int mode,
524 			    phy_interface_t interface)
525 {
526 	struct intel_priv_data *intel_priv = intel_data;
527 	struct stmmac_priv *priv = netdev_priv(ndev);
528 	const struct pmc_serdes_regs *regs;
529 	int max_regs = 0;
530 	int ret = 0;
531 
532 	ret = intel_tsn_lane_is_available(ndev, intel_priv);
533 	if (ret < 0) {
534 		netdev_info(priv->dev, "No TSN lane available to set the registers.\n");
535 		return ret;
536 	}
537 
538 	if (interface == PHY_INTERFACE_MODE_2500BASEX) {
539 		regs = intel_priv->pid_2p5g.regs;
540 		max_regs = intel_priv->pid_2p5g.num_regs;
541 	} else {
542 		regs = intel_priv->pid_1g.regs;
543 		max_regs = intel_priv->pid_1g.num_regs;
544 	}
545 
546 	ret = intel_set_reg_access(regs, max_regs);
547 	if (ret < 0)
548 		return ret;
549 
550 	priv->plat->phy_interface = interface;
551 
552 	intel_serdes_powerdown(ndev, intel_priv);
553 	intel_serdes_powerup(ndev, intel_priv);
554 
555 	return ret;
556 }
557 
558 static void common_default_data(struct plat_stmmacenet_data *plat)
559 {
560 	plat->clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
561 	plat->has_gmac = 1;
562 	plat->force_sf_dma_mode = 1;
563 
564 	plat->mdio_bus_data->needs_reset = true;
565 
566 	/* Set default value for multicast hash bins */
567 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
568 
569 	/* Set default value for unicast filter entries */
570 	plat->unicast_filter_entries = 1;
571 
572 	/* Set the maxmtu to a default of JUMBO_LEN */
573 	plat->maxmtu = JUMBO_LEN;
574 
575 	/* Set default number of RX and TX queues to use */
576 	plat->tx_queues_to_use = 1;
577 	plat->rx_queues_to_use = 1;
578 
579 	/* Disable Priority config by default */
580 	plat->tx_queues_cfg[0].use_prio = false;
581 	plat->rx_queues_cfg[0].use_prio = false;
582 
583 	/* Disable RX queues routing by default */
584 	plat->rx_queues_cfg[0].pkt_route = 0x0;
585 }
586 
587 static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
588 						 phy_interface_t interface)
589 {
590 	/* plat->mdio_bus_data->has_xpcs has been set true, so there
591 	 * should always be an XPCS. The original code would always
592 	 * return this if present.
593 	 */
594 	return xpcs_to_phylink_pcs(priv->hw->xpcs);
595 }
596 
597 static int intel_mgbe_common_data(struct pci_dev *pdev,
598 				  struct plat_stmmacenet_data *plat)
599 {
600 	struct fwnode_handle *fwnode;
601 	char clk_name[20];
602 	int ret;
603 	int i;
604 
605 	plat->pdev = pdev;
606 	plat->phy_addr = -1;
607 	plat->clk_csr = 5;
608 	plat->has_gmac = 0;
609 	plat->has_gmac4 = 1;
610 	plat->force_sf_dma_mode = 0;
611 	plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE);
612 
613 	/* Multiplying factor to the clk_eee_i clock time
614 	 * period to make it closer to 100 ns. This value
615 	 * should be programmed such that the clk_eee_time_period *
616 	 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
617 	 * clk_eee frequency is 19.2Mhz
618 	 * clk_eee_time_period is 52ns
619 	 * 52ns * (1 + 1) = 104ns
620 	 * MULT_FACT_100NS = 1
621 	 */
622 	plat->mult_fact_100ns = 1;
623 
624 	plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
625 
626 	for (i = 0; i < plat->rx_queues_to_use; i++) {
627 		plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
628 		plat->rx_queues_cfg[i].chan = i;
629 
630 		/* Disable Priority config by default */
631 		plat->rx_queues_cfg[i].use_prio = false;
632 
633 		/* Disable RX queues routing by default */
634 		plat->rx_queues_cfg[i].pkt_route = 0x0;
635 	}
636 
637 	for (i = 0; i < plat->tx_queues_to_use; i++) {
638 		plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
639 
640 		/* Disable Priority config by default */
641 		plat->tx_queues_cfg[i].use_prio = false;
642 		/* Default TX Q0 to use TSO and rest TXQ for TBS */
643 		if (i > 0)
644 			plat->tx_queues_cfg[i].tbs_en = 1;
645 	}
646 
647 	/* FIFO size is 4096 bytes for 1 tx/rx queue */
648 	plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
649 	plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
650 
651 	plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
652 	plat->tx_queues_cfg[0].weight = 0x09;
653 	plat->tx_queues_cfg[1].weight = 0x0A;
654 	plat->tx_queues_cfg[2].weight = 0x0B;
655 	plat->tx_queues_cfg[3].weight = 0x0C;
656 	plat->tx_queues_cfg[4].weight = 0x0D;
657 	plat->tx_queues_cfg[5].weight = 0x0E;
658 	plat->tx_queues_cfg[6].weight = 0x0F;
659 	plat->tx_queues_cfg[7].weight = 0x10;
660 
661 	plat->dma_cfg->pbl = 32;
662 	plat->dma_cfg->pblx8 = true;
663 	plat->dma_cfg->fixed_burst = 0;
664 	plat->dma_cfg->mixed_burst = 0;
665 	plat->dma_cfg->aal = 0;
666 	plat->dma_cfg->dche = true;
667 
668 	plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
669 				 GFP_KERNEL);
670 	if (!plat->axi)
671 		return -ENOMEM;
672 
673 	plat->axi->axi_lpi_en = 0;
674 	plat->axi->axi_xit_frm = 0;
675 	plat->axi->axi_wr_osr_lmt = 1;
676 	plat->axi->axi_rd_osr_lmt = 1;
677 	plat->axi->axi_blen[0] = 4;
678 	plat->axi->axi_blen[1] = 8;
679 	plat->axi->axi_blen[2] = 16;
680 
681 	plat->ptp_max_adj = plat->clk_ptp_rate;
682 
683 	/* Set system clock */
684 	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
685 
686 	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
687 						   clk_name, NULL, 0,
688 						   plat->clk_ptp_rate);
689 
690 	if (IS_ERR(plat->stmmac_clk)) {
691 		dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
692 		plat->stmmac_clk = NULL;
693 	}
694 
695 	ret = clk_prepare_enable(plat->stmmac_clk);
696 	if (ret) {
697 		clk_unregister_fixed_rate(plat->stmmac_clk);
698 		return ret;
699 	}
700 
701 	plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
702 
703 	/* Set default value for multicast hash bins */
704 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
705 
706 	/* Set default value for unicast filter entries */
707 	plat->unicast_filter_entries = 1;
708 
709 	/* Set the maxmtu to a default of JUMBO_LEN */
710 	plat->maxmtu = JUMBO_LEN;
711 
712 	plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
713 
714 	/* Use the last Rx queue */
715 	plat->vlan_fail_q = plat->rx_queues_to_use - 1;
716 
717 	/* For fixed-link setup, we allow phy-mode setting */
718 	fwnode = dev_fwnode(&pdev->dev);
719 	if (fwnode) {
720 		int phy_mode;
721 
722 		/* "phy-mode" setting is optional. If it is set,
723 		 *  we allow either sgmii or 1000base-x for now.
724 		 */
725 		phy_mode = fwnode_get_phy_mode(fwnode);
726 		if (phy_mode >= 0) {
727 			if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
728 			    phy_mode == PHY_INTERFACE_MODE_1000BASEX)
729 				plat->phy_interface = phy_mode;
730 			else
731 				dev_warn(&pdev->dev, "Invalid phy-mode\n");
732 		}
733 	}
734 
735 	/* Intel mgbe SGMII interface uses pcs-xcps */
736 	if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
737 	    plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
738 		plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR);
739 		plat->mdio_bus_data->default_an_inband = true;
740 		plat->select_pcs = intel_mgbe_select_pcs;
741 	}
742 
743 	/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
744 	plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
745 	plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
746 
747 	plat->int_snapshot_num = AUX_SNAPSHOT1;
748 
749 	plat->crosststamp = intel_crosststamp;
750 	plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
751 
752 	/* Setup MSI vector offset specific to Intel mGbE controller */
753 	plat->msi_mac_vec = 29;
754 	plat->msi_lpi_vec = 28;
755 	plat->msi_sfty_ce_vec = 27;
756 	plat->msi_sfty_ue_vec = 26;
757 	plat->msi_rx_base_vec = 0;
758 	plat->msi_tx_base_vec = 1;
759 
760 	return 0;
761 }
762 
763 static int ehl_common_data(struct pci_dev *pdev,
764 			   struct plat_stmmacenet_data *plat)
765 {
766 	struct intel_priv_data *intel_priv = plat->bsp_priv;
767 
768 	plat->rx_queues_to_use = 8;
769 	plat->tx_queues_to_use = 8;
770 	plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
771 	plat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
772 
773 	plat->safety_feat_cfg->tsoee = 1;
774 	plat->safety_feat_cfg->mrxpee = 1;
775 	plat->safety_feat_cfg->mestee = 1;
776 	plat->safety_feat_cfg->mrxee = 1;
777 	plat->safety_feat_cfg->mtxee = 1;
778 	plat->safety_feat_cfg->epsi = 0;
779 	plat->safety_feat_cfg->edpp = 0;
780 	plat->safety_feat_cfg->prtyen = 0;
781 	plat->safety_feat_cfg->tmouten = 0;
782 
783 	intel_priv->tsn_lane_regs = ehl_tsn_lane_regs;
784 	intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs);
785 
786 	return intel_mgbe_common_data(pdev, plat);
787 }
788 
789 static int ehl_sgmii_data(struct pci_dev *pdev,
790 			  struct plat_stmmacenet_data *plat)
791 {
792 	struct intel_priv_data *intel_priv = plat->bsp_priv;
793 
794 	plat->bus_id = 1;
795 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
796 	plat->serdes_powerup = intel_serdes_powerup;
797 	plat->serdes_powerdown = intel_serdes_powerdown;
798 	plat->mac_finish = intel_mac_finish;
799 	plat->clk_ptp_rate = 204800000;
800 
801 	intel_priv->pid_1g.regs = pid_modphy3_1g_regs;
802 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs);
803 	intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs;
804 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs);
805 
806 	return ehl_common_data(pdev, plat);
807 }
808 
809 static struct stmmac_pci_info ehl_sgmii1g_info = {
810 	.setup = ehl_sgmii_data,
811 };
812 
813 static int ehl_rgmii_data(struct pci_dev *pdev,
814 			  struct plat_stmmacenet_data *plat)
815 {
816 	plat->bus_id = 1;
817 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
818 
819 	plat->clk_ptp_rate = 204800000;
820 
821 	return ehl_common_data(pdev, plat);
822 }
823 
824 static struct stmmac_pci_info ehl_rgmii1g_info = {
825 	.setup = ehl_rgmii_data,
826 };
827 
828 static int ehl_pse0_common_data(struct pci_dev *pdev,
829 				struct plat_stmmacenet_data *plat)
830 {
831 	struct intel_priv_data *intel_priv = plat->bsp_priv;
832 
833 	intel_priv->is_pse = true;
834 	plat->bus_id = 2;
835 	plat->host_dma_width = 32;
836 
837 	plat->clk_ptp_rate = 200000000;
838 
839 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
840 
841 	return ehl_common_data(pdev, plat);
842 }
843 
844 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
845 				 struct plat_stmmacenet_data *plat)
846 {
847 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
848 	return ehl_pse0_common_data(pdev, plat);
849 }
850 
851 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
852 	.setup = ehl_pse0_rgmii1g_data,
853 };
854 
855 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
856 				 struct plat_stmmacenet_data *plat)
857 {
858 	struct intel_priv_data *intel_priv = plat->bsp_priv;
859 
860 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
861 	plat->serdes_powerup = intel_serdes_powerup;
862 	plat->serdes_powerdown = intel_serdes_powerdown;
863 	plat->mac_finish = intel_mac_finish;
864 
865 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
866 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
867 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
868 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
869 
870 	return ehl_pse0_common_data(pdev, plat);
871 }
872 
873 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
874 	.setup = ehl_pse0_sgmii1g_data,
875 };
876 
877 static int ehl_pse1_common_data(struct pci_dev *pdev,
878 				struct plat_stmmacenet_data *plat)
879 {
880 	struct intel_priv_data *intel_priv = plat->bsp_priv;
881 
882 	intel_priv->is_pse = true;
883 	plat->bus_id = 3;
884 	plat->host_dma_width = 32;
885 
886 	plat->clk_ptp_rate = 200000000;
887 
888 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
889 
890 	return ehl_common_data(pdev, plat);
891 }
892 
893 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
894 				 struct plat_stmmacenet_data *plat)
895 {
896 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
897 	return ehl_pse1_common_data(pdev, plat);
898 }
899 
900 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
901 	.setup = ehl_pse1_rgmii1g_data,
902 };
903 
904 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
905 				 struct plat_stmmacenet_data *plat)
906 {
907 	struct intel_priv_data *intel_priv = plat->bsp_priv;
908 
909 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
910 	plat->serdes_powerup = intel_serdes_powerup;
911 	plat->serdes_powerdown = intel_serdes_powerdown;
912 	plat->mac_finish = intel_mac_finish;
913 
914 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
915 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
916 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
917 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
918 
919 	return ehl_pse1_common_data(pdev, plat);
920 }
921 
922 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
923 	.setup = ehl_pse1_sgmii1g_data,
924 };
925 
926 static int tgl_common_data(struct pci_dev *pdev,
927 			   struct plat_stmmacenet_data *plat)
928 {
929 	plat->rx_queues_to_use = 6;
930 	plat->tx_queues_to_use = 4;
931 	plat->clk_ptp_rate = 204800000;
932 	plat->speed_mode_2500 = intel_speed_mode_2500;
933 
934 	plat->safety_feat_cfg->tsoee = 1;
935 	plat->safety_feat_cfg->mrxpee = 0;
936 	plat->safety_feat_cfg->mestee = 1;
937 	plat->safety_feat_cfg->mrxee = 1;
938 	plat->safety_feat_cfg->mtxee = 1;
939 	plat->safety_feat_cfg->epsi = 0;
940 	plat->safety_feat_cfg->edpp = 0;
941 	plat->safety_feat_cfg->prtyen = 0;
942 	plat->safety_feat_cfg->tmouten = 0;
943 
944 	return intel_mgbe_common_data(pdev, plat);
945 }
946 
947 static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
948 			       struct plat_stmmacenet_data *plat)
949 {
950 	plat->bus_id = 1;
951 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
952 	plat->serdes_powerup = intel_serdes_powerup;
953 	plat->serdes_powerdown = intel_serdes_powerdown;
954 	return tgl_common_data(pdev, plat);
955 }
956 
957 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
958 	.setup = tgl_sgmii_phy0_data,
959 };
960 
961 static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
962 			       struct plat_stmmacenet_data *plat)
963 {
964 	plat->bus_id = 2;
965 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
966 	plat->serdes_powerup = intel_serdes_powerup;
967 	plat->serdes_powerdown = intel_serdes_powerdown;
968 	return tgl_common_data(pdev, plat);
969 }
970 
971 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
972 	.setup = tgl_sgmii_phy1_data,
973 };
974 
975 static int adls_sgmii_phy0_data(struct pci_dev *pdev,
976 				struct plat_stmmacenet_data *plat)
977 {
978 	plat->bus_id = 1;
979 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
980 
981 	/* SerDes power up and power down are done in BIOS for ADL */
982 
983 	return tgl_common_data(pdev, plat);
984 }
985 
986 static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
987 	.setup = adls_sgmii_phy0_data,
988 };
989 
990 static int adls_sgmii_phy1_data(struct pci_dev *pdev,
991 				struct plat_stmmacenet_data *plat)
992 {
993 	plat->bus_id = 2;
994 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
995 
996 	/* SerDes power up and power down are done in BIOS for ADL */
997 
998 	return tgl_common_data(pdev, plat);
999 }
1000 
1001 static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
1002 	.setup = adls_sgmii_phy1_data,
1003 };
1004 
1005 static int adln_common_data(struct pci_dev *pdev,
1006 			    struct plat_stmmacenet_data *plat)
1007 {
1008 	struct intel_priv_data *intel_priv = plat->bsp_priv;
1009 
1010 	plat->rx_queues_to_use = 6;
1011 	plat->tx_queues_to_use = 4;
1012 	plat->clk_ptp_rate = 204800000;
1013 
1014 	plat->safety_feat_cfg->tsoee = 1;
1015 	plat->safety_feat_cfg->mrxpee = 0;
1016 	plat->safety_feat_cfg->mestee = 1;
1017 	plat->safety_feat_cfg->mrxee = 1;
1018 	plat->safety_feat_cfg->mtxee = 1;
1019 	plat->safety_feat_cfg->epsi = 0;
1020 	plat->safety_feat_cfg->edpp = 0;
1021 	plat->safety_feat_cfg->prtyen = 0;
1022 	plat->safety_feat_cfg->tmouten = 0;
1023 
1024 	intel_priv->tsn_lane_regs = adln_tsn_lane_regs;
1025 	intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs);
1026 
1027 	return intel_mgbe_common_data(pdev, plat);
1028 }
1029 
1030 static int adln_sgmii_phy0_data(struct pci_dev *pdev,
1031 				struct plat_stmmacenet_data *plat)
1032 {
1033 	struct intel_priv_data *intel_priv = plat->bsp_priv;
1034 
1035 	plat->bus_id = 1;
1036 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
1037 	plat->serdes_powerup = intel_serdes_powerup;
1038 	plat->serdes_powerdown = intel_serdes_powerdown;
1039 	plat->mac_finish = intel_mac_finish;
1040 
1041 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
1042 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
1043 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
1044 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
1045 
1046 	return adln_common_data(pdev, plat);
1047 }
1048 
1049 static struct stmmac_pci_info adln_sgmii1g_phy0_info = {
1050 	.setup = adln_sgmii_phy0_data,
1051 };
1052 
1053 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
1054 	{
1055 		.func = 6,
1056 		.phy_addr = 1,
1057 	},
1058 };
1059 
1060 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
1061 	.func = galileo_stmmac_func_data,
1062 	.nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
1063 };
1064 
1065 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
1066 	{
1067 		.func = 6,
1068 		.phy_addr = 1,
1069 	},
1070 	{
1071 		.func = 7,
1072 		.phy_addr = 1,
1073 	},
1074 };
1075 
1076 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
1077 	.func = iot2040_stmmac_func_data,
1078 	.nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
1079 };
1080 
1081 static const struct dmi_system_id quark_pci_dmi[] = {
1082 	{
1083 		.matches = {
1084 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
1085 		},
1086 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1087 	},
1088 	{
1089 		.matches = {
1090 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
1091 		},
1092 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1093 	},
1094 	/* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
1095 	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
1096 	 * has only one pci network device while other asset tags are
1097 	 * for IOT2040 which has two.
1098 	 */
1099 	{
1100 		.matches = {
1101 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1102 			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
1103 					"6ES7647-0AA00-0YA2"),
1104 		},
1105 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1106 	},
1107 	{
1108 		.matches = {
1109 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1110 		},
1111 		.driver_data = (void *)&iot2040_stmmac_dmi_data,
1112 	},
1113 	{}
1114 };
1115 
1116 static int quark_default_data(struct pci_dev *pdev,
1117 			      struct plat_stmmacenet_data *plat)
1118 {
1119 	int ret;
1120 
1121 	/* Set common default data first */
1122 	common_default_data(plat);
1123 
1124 	/* Refuse to load the driver and register net device if MAC controller
1125 	 * does not connect to any PHY interface.
1126 	 */
1127 	ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
1128 	if (ret < 0) {
1129 		/* Return error to the caller on DMI enabled boards. */
1130 		if (dmi_get_system_info(DMI_BOARD_NAME))
1131 			return ret;
1132 
1133 		/* Galileo boards with old firmware don't support DMI. We always
1134 		 * use 1 here as PHY address, so at least the first found MAC
1135 		 * controller would be probed.
1136 		 */
1137 		ret = 1;
1138 	}
1139 
1140 	plat->bus_id = pci_dev_id(pdev);
1141 	plat->phy_addr = ret;
1142 	plat->phy_interface = PHY_INTERFACE_MODE_RMII;
1143 
1144 	plat->dma_cfg->pbl = 16;
1145 	plat->dma_cfg->pblx8 = true;
1146 	plat->dma_cfg->fixed_burst = 1;
1147 	/* AXI (TODO) */
1148 
1149 	return 0;
1150 }
1151 
1152 static const struct stmmac_pci_info quark_info = {
1153 	.setup = quark_default_data,
1154 };
1155 
1156 static int stmmac_config_single_msi(struct pci_dev *pdev,
1157 				    struct plat_stmmacenet_data *plat,
1158 				    struct stmmac_resources *res)
1159 {
1160 	int ret;
1161 
1162 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1163 	if (ret < 0) {
1164 		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
1165 			 __func__);
1166 		return ret;
1167 	}
1168 
1169 	res->irq = pci_irq_vector(pdev, 0);
1170 	res->wol_irq = res->irq;
1171 	plat->flags &= ~STMMAC_FLAG_MULTI_MSI_EN;
1172 	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
1173 		 __func__);
1174 
1175 	return 0;
1176 }
1177 
1178 static int stmmac_config_multi_msi(struct pci_dev *pdev,
1179 				   struct plat_stmmacenet_data *plat,
1180 				   struct stmmac_resources *res)
1181 {
1182 	int ret;
1183 	int i;
1184 
1185 	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
1186 	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
1187 		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
1188 			 __func__);
1189 		return -1;
1190 	}
1191 
1192 	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
1193 				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
1194 	if (ret < 0) {
1195 		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
1196 			 __func__);
1197 		return ret;
1198 	}
1199 
1200 	/* For RX MSI */
1201 	for (i = 0; i < plat->rx_queues_to_use; i++) {
1202 		res->rx_irq[i] = pci_irq_vector(pdev,
1203 						plat->msi_rx_base_vec + i * 2);
1204 	}
1205 
1206 	/* For TX MSI */
1207 	for (i = 0; i < plat->tx_queues_to_use; i++) {
1208 		res->tx_irq[i] = pci_irq_vector(pdev,
1209 						plat->msi_tx_base_vec + i * 2);
1210 	}
1211 
1212 	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
1213 		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
1214 	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
1215 		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
1216 	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
1217 		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
1218 	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
1219 		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
1220 	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
1221 		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
1222 
1223 	plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
1224 	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
1225 
1226 	return 0;
1227 }
1228 
1229 /**
1230  * intel_eth_pci_probe
1231  *
1232  * @pdev: pci device pointer
1233  * @id: pointer to table of device id/id's.
1234  *
1235  * Description: This probing function gets called for all PCI devices which
1236  * match the ID table and are not "owned" by other driver yet. This function
1237  * gets passed a "struct pci_dev *" for each device whose entry in the ID table
1238  * matches the device. The probe functions returns zero when the driver choose
1239  * to take "ownership" of the device or an error code(-ve no) otherwise.
1240  */
1241 static int intel_eth_pci_probe(struct pci_dev *pdev,
1242 			       const struct pci_device_id *id)
1243 {
1244 	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
1245 	struct intel_priv_data *intel_priv;
1246 	struct plat_stmmacenet_data *plat;
1247 	struct stmmac_resources res;
1248 	int ret;
1249 
1250 	intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
1251 	if (!intel_priv)
1252 		return -ENOMEM;
1253 
1254 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
1255 	if (!plat)
1256 		return -ENOMEM;
1257 
1258 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
1259 					   sizeof(*plat->mdio_bus_data),
1260 					   GFP_KERNEL);
1261 	if (!plat->mdio_bus_data)
1262 		return -ENOMEM;
1263 
1264 	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
1265 				     GFP_KERNEL);
1266 	if (!plat->dma_cfg)
1267 		return -ENOMEM;
1268 
1269 	plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
1270 					     sizeof(*plat->safety_feat_cfg),
1271 					     GFP_KERNEL);
1272 	if (!plat->safety_feat_cfg)
1273 		return -ENOMEM;
1274 
1275 	/* Enable pci device */
1276 	ret = pcim_enable_device(pdev);
1277 	if (ret) {
1278 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
1279 			__func__);
1280 		return ret;
1281 	}
1282 
1283 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
1284 	if (ret)
1285 		return ret;
1286 
1287 	pci_set_master(pdev);
1288 
1289 	plat->bsp_priv = intel_priv;
1290 	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
1291 	intel_priv->crossts_adj = 1;
1292 
1293 	/* Initialize all MSI vectors to invalid so that it can be set
1294 	 * according to platform data settings below.
1295 	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
1296 	 */
1297 	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
1298 	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
1299 	plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
1300 	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
1301 	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
1302 	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
1303 	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
1304 
1305 	ret = info->setup(pdev, plat);
1306 	if (ret)
1307 		return ret;
1308 
1309 	memset(&res, 0, sizeof(res));
1310 	res.addr = pcim_iomap_table(pdev)[0];
1311 
1312 	ret = stmmac_config_multi_msi(pdev, plat, &res);
1313 	if (ret) {
1314 		ret = stmmac_config_single_msi(pdev, plat, &res);
1315 		if (ret) {
1316 			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
1317 				__func__);
1318 			goto err_alloc_irq;
1319 		}
1320 	}
1321 
1322 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
1323 	if (ret) {
1324 		goto err_alloc_irq;
1325 	}
1326 
1327 	return 0;
1328 
1329 err_alloc_irq:
1330 	clk_disable_unprepare(plat->stmmac_clk);
1331 	clk_unregister_fixed_rate(plat->stmmac_clk);
1332 	return ret;
1333 }
1334 
1335 /**
1336  * intel_eth_pci_remove
1337  *
1338  * @pdev: pci device pointer
1339  * Description: this function calls the main to free the net resources
1340  * and releases the PCI resources.
1341  */
1342 static void intel_eth_pci_remove(struct pci_dev *pdev)
1343 {
1344 	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
1345 	struct stmmac_priv *priv = netdev_priv(ndev);
1346 
1347 	stmmac_dvr_remove(&pdev->dev);
1348 
1349 	clk_disable_unprepare(priv->plat->stmmac_clk);
1350 	clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1351 }
1352 
1353 static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
1354 {
1355 	struct pci_dev *pdev = to_pci_dev(dev);
1356 	int ret;
1357 
1358 	ret = stmmac_suspend(dev);
1359 	if (ret)
1360 		return ret;
1361 
1362 	ret = pci_save_state(pdev);
1363 	if (ret)
1364 		return ret;
1365 
1366 	pci_wake_from_d3(pdev, true);
1367 	pci_set_power_state(pdev, PCI_D3hot);
1368 	return 0;
1369 }
1370 
1371 static int __maybe_unused intel_eth_pci_resume(struct device *dev)
1372 {
1373 	struct pci_dev *pdev = to_pci_dev(dev);
1374 	int ret;
1375 
1376 	pci_restore_state(pdev);
1377 	pci_set_power_state(pdev, PCI_D0);
1378 
1379 	ret = pcim_enable_device(pdev);
1380 	if (ret)
1381 		return ret;
1382 
1383 	pci_set_master(pdev);
1384 
1385 	return stmmac_resume(dev);
1386 }
1387 
1388 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
1389 			 intel_eth_pci_resume);
1390 
1391 #define PCI_DEVICE_ID_INTEL_QUARK		0x0937
1392 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G		0x4b30
1393 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G		0x4b31
1394 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5	0x4b32
1395 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
1396  * which are named PSE0 and PSE1
1397  */
1398 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G	0x4ba0
1399 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G	0x4ba1
1400 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5	0x4ba2
1401 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G	0x4bb0
1402 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G	0x4bb1
1403 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5	0x4bb2
1404 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0	0x43ac
1405 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1	0x43a2
1406 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G		0xa0ac
1407 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0	0x7aac
1408 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1	0x7aad
1409 #define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G	0x54ac
1410 #define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G	0x51ac
1411 
1412 static const struct pci_device_id intel_eth_pci_id_table[] = {
1413 	{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
1414 	{ PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
1415 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
1416 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
1417 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
1418 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
1419 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
1420 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
1421 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
1422 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
1423 	{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
1424 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
1425 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
1426 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
1427 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
1428 	{ PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) },
1429 	{ PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) },
1430 	{}
1431 };
1432 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
1433 
1434 static struct pci_driver intel_eth_pci_driver = {
1435 	.name = "intel-eth-pci",
1436 	.id_table = intel_eth_pci_id_table,
1437 	.probe = intel_eth_pci_probe,
1438 	.remove = intel_eth_pci_remove,
1439 	.driver         = {
1440 		.pm     = &intel_eth_pm_ops,
1441 	},
1442 };
1443 
1444 module_pci_driver(intel_eth_pci_driver);
1445 
1446 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
1447 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
1448 MODULE_LICENSE("GPL v2");
1449