xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c (revision bca5cfbb694d66a1c482d0c347eee80f6afbc870)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation
3  */
4 
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include <linux/platform_data/x86/intel_pmc_ipc.h>
9 #include "dwmac-intel.h"
10 #include "dwmac4.h"
11 #include "stmmac.h"
12 #include "stmmac_ptp.h"
13 
14 struct pmc_serdes_regs {
15 	u8 index;
16 	u32 val;
17 };
18 
19 struct pmc_serdes_reg_info {
20 	const struct pmc_serdes_regs *regs;
21 	u8 num_regs;
22 };
23 
24 struct intel_priv_data {
25 	int mdio_adhoc_addr;	/* mdio address for serdes & etc */
26 	unsigned long crossts_adj;
27 	bool is_pse;
28 	const int *tsn_lane_regs;
29 	int max_tsn_lane_regs;
30 	struct pmc_serdes_reg_info pid_1g;
31 	struct pmc_serdes_reg_info pid_2p5g;
32 };
33 
34 /* This struct is used to associate PCI Function of MAC controller on a board,
35  * discovered via DMI, with the address of PHY connected to the MAC. The
36  * negative value of the address means that MAC controller is not connected
37  * with PHY.
38  */
39 struct stmmac_pci_func_data {
40 	unsigned int func;
41 	int phy_addr;
42 };
43 
44 struct stmmac_pci_dmi_data {
45 	const struct stmmac_pci_func_data *func;
46 	size_t nfuncs;
47 };
48 
49 struct stmmac_pci_info {
50 	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
51 };
52 
53 static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = {
54 	{ PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_1G },
55 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_1G },
56 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_1G },
57 	{ PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_1G },
58 	{ PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
59 	{}
60 };
61 
62 static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = {
63 	{ PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
64 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
65 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
66 	{ PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
67 	{ PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
68 	{}
69 };
70 
71 static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = {
72 	{ PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_1G },
73 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_1G },
74 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_1G },
75 	{ PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_1G },
76 	{ PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
77 	{}
78 };
79 
80 static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = {
81 	{ PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
82 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
83 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
84 	{ PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
85 	{ PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
86 	{}
87 };
88 
89 static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11};
90 static const int adln_tsn_lane_regs[] = {6};
91 
92 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
93 				    const struct dmi_system_id *dmi_list)
94 {
95 	const struct stmmac_pci_func_data *func_data;
96 	const struct stmmac_pci_dmi_data *dmi_data;
97 	const struct dmi_system_id *dmi_id;
98 	int func = PCI_FUNC(pdev->devfn);
99 	size_t n;
100 
101 	dmi_id = dmi_first_match(dmi_list);
102 	if (!dmi_id)
103 		return -ENODEV;
104 
105 	dmi_data = dmi_id->driver_data;
106 	func_data = dmi_data->func;
107 
108 	for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
109 		if (func_data->func == func)
110 			return func_data->phy_addr;
111 
112 	return -ENODEV;
113 }
114 
115 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
116 			      int phyreg, u32 mask, u32 val)
117 {
118 	unsigned int retries = 10;
119 	int val_rd;
120 
121 	do {
122 		val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
123 		if ((val_rd & mask) == (val & mask))
124 			return 0;
125 		udelay(POLL_DELAY_US);
126 	} while (--retries);
127 
128 	return -ETIMEDOUT;
129 }
130 
131 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
132 {
133 	struct intel_priv_data *intel_priv = priv_data;
134 	struct stmmac_priv *priv = netdev_priv(ndev);
135 	int serdes_phy_addr = 0;
136 	u32 data = 0;
137 
138 	if (!intel_priv->mdio_adhoc_addr)
139 		return 0;
140 
141 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
142 
143 	/* Set the serdes rate and the PCLK rate */
144 	data = mdiobus_read(priv->mii, serdes_phy_addr,
145 			    SERDES_GCR0);
146 
147 	data &= ~SERDES_RATE_MASK;
148 	data &= ~SERDES_PCLK_MASK;
149 
150 	if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
151 		data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
152 			SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
153 	else
154 		data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
155 			SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
156 
157 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
158 
159 	/* assert clk_req */
160 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
161 	data |= SERDES_PLL_CLK;
162 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
163 
164 	/* check for clk_ack assertion */
165 	data = serdes_status_poll(priv, serdes_phy_addr,
166 				  SERDES_GSR0,
167 				  SERDES_PLL_CLK,
168 				  SERDES_PLL_CLK);
169 
170 	if (data) {
171 		dev_err(priv->device, "Serdes PLL clk request timeout\n");
172 		return data;
173 	}
174 
175 	/* assert lane reset */
176 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
177 	data |= SERDES_RST;
178 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
179 
180 	/* check for assert lane reset reflection */
181 	data = serdes_status_poll(priv, serdes_phy_addr,
182 				  SERDES_GSR0,
183 				  SERDES_RST,
184 				  SERDES_RST);
185 
186 	if (data) {
187 		dev_err(priv->device, "Serdes assert lane reset timeout\n");
188 		return data;
189 	}
190 
191 	/*  move power state to P0 */
192 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
193 
194 	data &= ~SERDES_PWR_ST_MASK;
195 	data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
196 
197 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
198 
199 	/* Check for P0 state */
200 	data = serdes_status_poll(priv, serdes_phy_addr,
201 				  SERDES_GSR0,
202 				  SERDES_PWR_ST_MASK,
203 				  SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
204 
205 	if (data) {
206 		dev_err(priv->device, "Serdes power state P0 timeout.\n");
207 		return data;
208 	}
209 
210 	/* PSE only - ungate SGMII PHY Rx Clock */
211 	if (intel_priv->is_pse)
212 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
213 			       0, SERDES_PHY_RX_CLK);
214 
215 	return 0;
216 }
217 
218 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
219 {
220 	struct intel_priv_data *intel_priv = intel_data;
221 	struct stmmac_priv *priv = netdev_priv(ndev);
222 	int serdes_phy_addr = 0;
223 	u32 data = 0;
224 
225 	if (!intel_priv->mdio_adhoc_addr)
226 		return;
227 
228 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
229 
230 	/* PSE only - gate SGMII PHY Rx Clock */
231 	if (intel_priv->is_pse)
232 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
233 			       SERDES_PHY_RX_CLK, 0);
234 
235 	/*  move power state to P3 */
236 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
237 
238 	data &= ~SERDES_PWR_ST_MASK;
239 	data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
240 
241 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
242 
243 	/* Check for P3 state */
244 	data = serdes_status_poll(priv, serdes_phy_addr,
245 				  SERDES_GSR0,
246 				  SERDES_PWR_ST_MASK,
247 				  SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
248 
249 	if (data) {
250 		dev_err(priv->device, "Serdes power state P3 timeout\n");
251 		return;
252 	}
253 
254 	/* de-assert clk_req */
255 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
256 	data &= ~SERDES_PLL_CLK;
257 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
258 
259 	/* check for clk_ack de-assert */
260 	data = serdes_status_poll(priv, serdes_phy_addr,
261 				  SERDES_GSR0,
262 				  SERDES_PLL_CLK,
263 				  (u32)~SERDES_PLL_CLK);
264 
265 	if (data) {
266 		dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
267 		return;
268 	}
269 
270 	/* de-assert lane reset */
271 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
272 	data &= ~SERDES_RST;
273 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
274 
275 	/* check for de-assert lane reset reflection */
276 	data = serdes_status_poll(priv, serdes_phy_addr,
277 				  SERDES_GSR0,
278 				  SERDES_RST,
279 				  (u32)~SERDES_RST);
280 
281 	if (data) {
282 		dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
283 		return;
284 	}
285 }
286 
287 static void tgl_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
288 			       unsigned long *interfaces)
289 {
290 	struct intel_priv_data *intel_priv = bsp_priv;
291 	phy_interface_t interface;
292 	int data;
293 
294 	/* Determine the link speed mode: 2.5Gbps/1Gbps */
295 	data = mdiobus_read(priv->mii, intel_priv->mdio_adhoc_addr, SERDES_GCR);
296 	if (data < 0)
297 		return;
298 
299 	if (FIELD_GET(SERDES_LINK_MODE_MASK, data) == SERDES_LINK_MODE_2G5) {
300 		dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
301 		priv->plat->mdio_bus_data->default_an_inband = false;
302 		interface = PHY_INTERFACE_MODE_2500BASEX;
303 	} else {
304 		interface = PHY_INTERFACE_MODE_SGMII;
305 	}
306 
307 	__set_bit(interface, interfaces);
308 	priv->plat->phy_interface = interface;
309 }
310 
311 /* Program PTP Clock Frequency for different variant of
312  * Intel mGBE that has slightly different GPO mapping
313  */
314 static void intel_mgbe_ptp_clk_freq_config(struct stmmac_priv *priv)
315 {
316 	struct intel_priv_data *intel_priv;
317 	u32 gpio_value;
318 
319 	intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
320 
321 	gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
322 
323 	if (intel_priv->is_pse) {
324 		/* For PSE GbE, use 200MHz */
325 		gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
326 		gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
327 	} else {
328 		/* For PCH GbE, use 200MHz */
329 		gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
330 		gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
331 	}
332 
333 	writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
334 }
335 
336 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
337 			u64 *art_time)
338 {
339 	u64 ns;
340 
341 	ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
342 	ns <<= GMAC4_ART_TIME_SHIFT;
343 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
344 	ns <<= GMAC4_ART_TIME_SHIFT;
345 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
346 	ns <<= GMAC4_ART_TIME_SHIFT;
347 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
348 
349 	*art_time = ns;
350 }
351 
352 static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
353 {
354 	return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
355 }
356 
357 static int intel_crosststamp(ktime_t *device,
358 			     struct system_counterval_t *system,
359 			     void *ctx)
360 {
361 	struct intel_priv_data *intel_priv;
362 
363 	struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
364 	void __iomem *ptpaddr = priv->ptpaddr;
365 	void __iomem *ioaddr = priv->hw->pcsr;
366 	unsigned long flags;
367 	u64 art_time = 0;
368 	u64 ptp_time = 0;
369 	u32 num_snapshot;
370 	u32 gpio_value;
371 	u32 acr_value;
372 	int i;
373 
374 	if (!boot_cpu_has(X86_FEATURE_ART))
375 		return -EOPNOTSUPP;
376 
377 	intel_priv = priv->plat->bsp_priv;
378 
379 	/* Both internal crosstimestamping and external triggered event
380 	 * timestamping cannot be run concurrently.
381 	 */
382 	if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
383 		return -EBUSY;
384 
385 	priv->plat->flags |= STMMAC_FLAG_INT_SNAPSHOT_EN;
386 
387 	mutex_lock(&priv->aux_ts_lock);
388 	/* Enable Internal snapshot trigger */
389 	acr_value = readl(ptpaddr + PTP_ACR);
390 	acr_value &= ~PTP_ACR_MASK;
391 	switch (priv->plat->int_snapshot_num) {
392 	case AUX_SNAPSHOT0:
393 		acr_value |= PTP_ACR_ATSEN0;
394 		break;
395 	case AUX_SNAPSHOT1:
396 		acr_value |= PTP_ACR_ATSEN1;
397 		break;
398 	case AUX_SNAPSHOT2:
399 		acr_value |= PTP_ACR_ATSEN2;
400 		break;
401 	case AUX_SNAPSHOT3:
402 		acr_value |= PTP_ACR_ATSEN3;
403 		break;
404 	default:
405 		mutex_unlock(&priv->aux_ts_lock);
406 		priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
407 		return -EINVAL;
408 	}
409 	writel(acr_value, ptpaddr + PTP_ACR);
410 
411 	/* Clear FIFO */
412 	acr_value = readl(ptpaddr + PTP_ACR);
413 	acr_value |= PTP_ACR_ATSFC;
414 	writel(acr_value, ptpaddr + PTP_ACR);
415 	/* Release the mutex */
416 	mutex_unlock(&priv->aux_ts_lock);
417 
418 	/* Trigger Internal snapshot signal
419 	 * Create a rising edge by just toggle the GPO1 to low
420 	 * and back to high.
421 	 */
422 	gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
423 	gpio_value &= ~GMAC_GPO1;
424 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
425 	gpio_value |= GMAC_GPO1;
426 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
427 
428 	/* Time sync done Indication - Interrupt method */
429 	if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
430 					      stmmac_cross_ts_isr(priv),
431 					      HZ / 100)) {
432 		priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
433 		return -ETIMEDOUT;
434 	}
435 
436 	num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
437 			GMAC_TIMESTAMP_ATSNS_MASK) >>
438 			GMAC_TIMESTAMP_ATSNS_SHIFT;
439 
440 	/* Repeat until the timestamps are from the FIFO last segment */
441 	for (i = 0; i < num_snapshot; i++) {
442 		read_lock_irqsave(&priv->ptp_lock, flags);
443 		stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
444 		*device = ns_to_ktime(ptp_time);
445 		read_unlock_irqrestore(&priv->ptp_lock, flags);
446 		get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
447 		system->cycles = art_time;
448 	}
449 
450 	system->cycles *= intel_priv->crossts_adj;
451 	system->cs_id = CSID_X86_ART;
452 	priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
453 
454 	return 0;
455 }
456 
457 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
458 				       int base)
459 {
460 	if (boot_cpu_has(X86_FEATURE_ART)) {
461 		unsigned int art_freq;
462 
463 		/* On systems that support ART, ART frequency can be obtained
464 		 * from ECX register of CPUID leaf (0x15).
465 		 */
466 		art_freq = cpuid_ecx(ART_CPUID_LEAF);
467 		do_div(art_freq, base);
468 		intel_priv->crossts_adj = art_freq;
469 	}
470 }
471 
472 static int intel_tsn_lane_is_available(struct net_device *ndev,
473 				       struct intel_priv_data *intel_priv)
474 {
475 	struct stmmac_priv *priv = netdev_priv(ndev);
476 	struct pmc_ipc_cmd tmp = {};
477 	struct pmc_ipc_rbuf rbuf = {};
478 	int ret = 0, i, j;
479 	const int max_fia_regs = 5;
480 
481 	tmp.cmd = IPC_SOC_REGISTER_ACCESS;
482 	tmp.sub_cmd = IPC_SOC_SUB_CMD_READ;
483 
484 	for (i = 0; i < max_fia_regs; i++) {
485 		tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i;
486 
487 		ret = intel_pmc_ipc(&tmp, &rbuf);
488 		if (ret < 0) {
489 			netdev_info(priv->dev, "Failed to read from PMC.\n");
490 			return ret;
491 		}
492 
493 		for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++)
494 			if ((rbuf.buf[0] >>
495 				(4 * (intel_priv->tsn_lane_regs[j] % 8)) &
496 					B_PCH_FIA_PCR_L0O) == 0xB)
497 				return 0;
498 	}
499 
500 	return -EINVAL;
501 }
502 
503 static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs)
504 {
505 	int ret = 0, i;
506 
507 	for (i = 0; i < max_regs; i++) {
508 		struct pmc_ipc_cmd tmp = {};
509 		struct pmc_ipc_rbuf rbuf = {};
510 
511 		tmp.cmd = IPC_SOC_REGISTER_ACCESS;
512 		tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE;
513 		tmp.wbuf[0] = (u32)regs[i].index;
514 		tmp.wbuf[1] = regs[i].val;
515 
516 		ret = intel_pmc_ipc(&tmp, &rbuf);
517 		if (ret < 0)
518 			return ret;
519 	}
520 
521 	return ret;
522 }
523 
524 static int intel_mac_finish(struct net_device *ndev,
525 			    void *intel_data,
526 			    unsigned int mode,
527 			    phy_interface_t interface)
528 {
529 	struct intel_priv_data *intel_priv = intel_data;
530 	struct stmmac_priv *priv = netdev_priv(ndev);
531 	const struct pmc_serdes_regs *regs;
532 	int max_regs = 0;
533 	int ret = 0;
534 
535 	ret = intel_tsn_lane_is_available(ndev, intel_priv);
536 	if (ret < 0) {
537 		netdev_info(priv->dev, "No TSN lane available to set the registers.\n");
538 		return ret;
539 	}
540 
541 	if (interface == PHY_INTERFACE_MODE_2500BASEX) {
542 		regs = intel_priv->pid_2p5g.regs;
543 		max_regs = intel_priv->pid_2p5g.num_regs;
544 	} else {
545 		regs = intel_priv->pid_1g.regs;
546 		max_regs = intel_priv->pid_1g.num_regs;
547 	}
548 
549 	ret = intel_set_reg_access(regs, max_regs);
550 	if (ret < 0)
551 		return ret;
552 
553 	priv->plat->phy_interface = interface;
554 
555 	intel_serdes_powerdown(ndev, intel_priv);
556 	intel_serdes_powerup(ndev, intel_priv);
557 
558 	return ret;
559 }
560 
561 static void common_default_data(struct plat_stmmacenet_data *plat)
562 {
563 	plat->clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
564 	plat->has_gmac = 1;
565 	plat->force_sf_dma_mode = 1;
566 
567 	plat->mdio_bus_data->needs_reset = true;
568 
569 	/* Set default value for multicast hash bins */
570 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
571 
572 	/* Set default value for unicast filter entries */
573 	plat->unicast_filter_entries = 1;
574 
575 	/* Set the maxmtu to a default of JUMBO_LEN */
576 	plat->maxmtu = JUMBO_LEN;
577 
578 	/* Set default number of RX and TX queues to use */
579 	plat->tx_queues_to_use = 1;
580 	plat->rx_queues_to_use = 1;
581 
582 	/* Disable Priority config by default */
583 	plat->tx_queues_cfg[0].use_prio = false;
584 	plat->rx_queues_cfg[0].use_prio = false;
585 
586 	/* Disable RX queues routing by default */
587 	plat->rx_queues_cfg[0].pkt_route = 0x0;
588 }
589 
590 static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
591 						 phy_interface_t interface)
592 {
593 	/* plat->mdio_bus_data->has_xpcs has been set true, so there
594 	 * should always be an XPCS. The original code would always
595 	 * return this if present.
596 	 */
597 	return xpcs_to_phylink_pcs(priv->hw->xpcs);
598 }
599 
600 static int intel_mgbe_common_data(struct pci_dev *pdev,
601 				  struct plat_stmmacenet_data *plat)
602 {
603 	struct fwnode_handle *fwnode;
604 	char clk_name[20];
605 	int ret;
606 	int i;
607 
608 	plat->pdev = pdev;
609 	plat->phy_addr = -1;
610 	plat->clk_csr = 5;
611 	plat->has_gmac = 0;
612 	plat->has_gmac4 = 1;
613 	plat->force_sf_dma_mode = 0;
614 	plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE);
615 
616 	/* Multiplying factor to the clk_eee_i clock time
617 	 * period to make it closer to 100 ns. This value
618 	 * should be programmed such that the clk_eee_time_period *
619 	 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
620 	 * clk_eee frequency is 19.2Mhz
621 	 * clk_eee_time_period is 52ns
622 	 * 52ns * (1 + 1) = 104ns
623 	 * MULT_FACT_100NS = 1
624 	 */
625 	plat->mult_fact_100ns = 1;
626 
627 	plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
628 
629 	for (i = 0; i < plat->rx_queues_to_use; i++) {
630 		plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
631 		plat->rx_queues_cfg[i].chan = i;
632 
633 		/* Disable Priority config by default */
634 		plat->rx_queues_cfg[i].use_prio = false;
635 
636 		/* Disable RX queues routing by default */
637 		plat->rx_queues_cfg[i].pkt_route = 0x0;
638 	}
639 
640 	for (i = 0; i < plat->tx_queues_to_use; i++) {
641 		plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
642 
643 		/* Disable Priority config by default */
644 		plat->tx_queues_cfg[i].use_prio = false;
645 		/* Default TX Q0 to use TSO and rest TXQ for TBS */
646 		if (i > 0)
647 			plat->tx_queues_cfg[i].tbs_en = 1;
648 	}
649 
650 	/* FIFO size is 4096 bytes for 1 tx/rx queue */
651 	plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
652 	plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
653 
654 	plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
655 	plat->tx_queues_cfg[0].weight = 0x09;
656 	plat->tx_queues_cfg[1].weight = 0x0A;
657 	plat->tx_queues_cfg[2].weight = 0x0B;
658 	plat->tx_queues_cfg[3].weight = 0x0C;
659 	plat->tx_queues_cfg[4].weight = 0x0D;
660 	plat->tx_queues_cfg[5].weight = 0x0E;
661 	plat->tx_queues_cfg[6].weight = 0x0F;
662 	plat->tx_queues_cfg[7].weight = 0x10;
663 
664 	plat->dma_cfg->pbl = 32;
665 	plat->dma_cfg->pblx8 = true;
666 	plat->dma_cfg->fixed_burst = 0;
667 	plat->dma_cfg->mixed_burst = 0;
668 	plat->dma_cfg->aal = 0;
669 	plat->dma_cfg->dche = true;
670 
671 	plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
672 				 GFP_KERNEL);
673 	if (!plat->axi)
674 		return -ENOMEM;
675 
676 	plat->axi->axi_lpi_en = 0;
677 	plat->axi->axi_xit_frm = 0;
678 	plat->axi->axi_wr_osr_lmt = 1;
679 	plat->axi->axi_rd_osr_lmt = 1;
680 	plat->axi->axi_blen[0] = 4;
681 	plat->axi->axi_blen[1] = 8;
682 	plat->axi->axi_blen[2] = 16;
683 
684 	plat->ptp_max_adj = plat->clk_ptp_rate;
685 
686 	/* Set system clock */
687 	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
688 
689 	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
690 						   clk_name, NULL, 0,
691 						   plat->clk_ptp_rate);
692 
693 	if (IS_ERR(plat->stmmac_clk)) {
694 		dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
695 		plat->stmmac_clk = NULL;
696 	}
697 
698 	ret = clk_prepare_enable(plat->stmmac_clk);
699 	if (ret) {
700 		clk_unregister_fixed_rate(plat->stmmac_clk);
701 		return ret;
702 	}
703 
704 	plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
705 
706 	/* Set default value for multicast hash bins */
707 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
708 
709 	/* Set default value for unicast filter entries */
710 	plat->unicast_filter_entries = 1;
711 
712 	/* Set the maxmtu to a default of JUMBO_LEN */
713 	plat->maxmtu = JUMBO_LEN;
714 
715 	plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
716 
717 	/* Use the last Rx queue */
718 	plat->vlan_fail_q = plat->rx_queues_to_use - 1;
719 
720 	/* For fixed-link setup, we allow phy-mode setting */
721 	fwnode = dev_fwnode(&pdev->dev);
722 	if (fwnode) {
723 		int phy_mode;
724 
725 		/* "phy-mode" setting is optional. If it is set,
726 		 *  we allow either sgmii or 1000base-x for now.
727 		 */
728 		phy_mode = fwnode_get_phy_mode(fwnode);
729 		if (phy_mode >= 0) {
730 			if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
731 			    phy_mode == PHY_INTERFACE_MODE_1000BASEX)
732 				plat->phy_interface = phy_mode;
733 			else
734 				dev_warn(&pdev->dev, "Invalid phy-mode\n");
735 		}
736 	}
737 
738 	/* Intel mgbe SGMII interface uses pcs-xcps */
739 	if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
740 	    plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
741 		plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR);
742 		plat->mdio_bus_data->default_an_inband = true;
743 		plat->select_pcs = intel_mgbe_select_pcs;
744 	}
745 
746 	/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
747 	plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
748 	plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
749 
750 	plat->int_snapshot_num = AUX_SNAPSHOT1;
751 
752 	plat->crosststamp = intel_crosststamp;
753 	plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
754 
755 	/* Setup MSI vector offset specific to Intel mGbE controller */
756 	plat->msi_mac_vec = 29;
757 	plat->msi_lpi_vec = 28;
758 	plat->msi_sfty_ce_vec = 27;
759 	plat->msi_sfty_ue_vec = 26;
760 	plat->msi_rx_base_vec = 0;
761 	plat->msi_tx_base_vec = 1;
762 
763 	return 0;
764 }
765 
766 static int ehl_common_data(struct pci_dev *pdev,
767 			   struct plat_stmmacenet_data *plat)
768 {
769 	struct intel_priv_data *intel_priv = plat->bsp_priv;
770 
771 	plat->rx_queues_to_use = 8;
772 	plat->tx_queues_to_use = 8;
773 	plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
774 	plat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
775 
776 	plat->safety_feat_cfg->tsoee = 1;
777 	plat->safety_feat_cfg->mrxpee = 1;
778 	plat->safety_feat_cfg->mestee = 1;
779 	plat->safety_feat_cfg->mrxee = 1;
780 	plat->safety_feat_cfg->mtxee = 1;
781 	plat->safety_feat_cfg->epsi = 0;
782 	plat->safety_feat_cfg->edpp = 0;
783 	plat->safety_feat_cfg->prtyen = 0;
784 	plat->safety_feat_cfg->tmouten = 0;
785 
786 	intel_priv->tsn_lane_regs = ehl_tsn_lane_regs;
787 	intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs);
788 
789 	return intel_mgbe_common_data(pdev, plat);
790 }
791 
792 static int ehl_sgmii_data(struct pci_dev *pdev,
793 			  struct plat_stmmacenet_data *plat)
794 {
795 	struct intel_priv_data *intel_priv = plat->bsp_priv;
796 
797 	plat->bus_id = 1;
798 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
799 	plat->serdes_powerup = intel_serdes_powerup;
800 	plat->serdes_powerdown = intel_serdes_powerdown;
801 	plat->mac_finish = intel_mac_finish;
802 	plat->clk_ptp_rate = 204800000;
803 
804 	intel_priv->pid_1g.regs = pid_modphy3_1g_regs;
805 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs);
806 	intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs;
807 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs);
808 
809 	return ehl_common_data(pdev, plat);
810 }
811 
812 static struct stmmac_pci_info ehl_sgmii1g_info = {
813 	.setup = ehl_sgmii_data,
814 };
815 
816 static int ehl_rgmii_data(struct pci_dev *pdev,
817 			  struct plat_stmmacenet_data *plat)
818 {
819 	plat->bus_id = 1;
820 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
821 
822 	plat->clk_ptp_rate = 204800000;
823 
824 	return ehl_common_data(pdev, plat);
825 }
826 
827 static struct stmmac_pci_info ehl_rgmii1g_info = {
828 	.setup = ehl_rgmii_data,
829 };
830 
831 static int ehl_pse0_common_data(struct pci_dev *pdev,
832 				struct plat_stmmacenet_data *plat)
833 {
834 	struct intel_priv_data *intel_priv = plat->bsp_priv;
835 
836 	intel_priv->is_pse = true;
837 	plat->bus_id = 2;
838 	plat->host_dma_width = 32;
839 
840 	plat->clk_ptp_rate = 200000000;
841 
842 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
843 
844 	return ehl_common_data(pdev, plat);
845 }
846 
847 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
848 				 struct plat_stmmacenet_data *plat)
849 {
850 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
851 	return ehl_pse0_common_data(pdev, plat);
852 }
853 
854 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
855 	.setup = ehl_pse0_rgmii1g_data,
856 };
857 
858 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
859 				 struct plat_stmmacenet_data *plat)
860 {
861 	struct intel_priv_data *intel_priv = plat->bsp_priv;
862 
863 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
864 	plat->serdes_powerup = intel_serdes_powerup;
865 	plat->serdes_powerdown = intel_serdes_powerdown;
866 	plat->mac_finish = intel_mac_finish;
867 
868 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
869 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
870 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
871 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
872 
873 	return ehl_pse0_common_data(pdev, plat);
874 }
875 
876 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
877 	.setup = ehl_pse0_sgmii1g_data,
878 };
879 
880 static int ehl_pse1_common_data(struct pci_dev *pdev,
881 				struct plat_stmmacenet_data *plat)
882 {
883 	struct intel_priv_data *intel_priv = plat->bsp_priv;
884 
885 	intel_priv->is_pse = true;
886 	plat->bus_id = 3;
887 	plat->host_dma_width = 32;
888 
889 	plat->clk_ptp_rate = 200000000;
890 
891 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
892 
893 	return ehl_common_data(pdev, plat);
894 }
895 
896 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
897 				 struct plat_stmmacenet_data *plat)
898 {
899 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
900 	return ehl_pse1_common_data(pdev, plat);
901 }
902 
903 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
904 	.setup = ehl_pse1_rgmii1g_data,
905 };
906 
907 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
908 				 struct plat_stmmacenet_data *plat)
909 {
910 	struct intel_priv_data *intel_priv = plat->bsp_priv;
911 
912 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
913 	plat->serdes_powerup = intel_serdes_powerup;
914 	plat->serdes_powerdown = intel_serdes_powerdown;
915 	plat->mac_finish = intel_mac_finish;
916 
917 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
918 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
919 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
920 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
921 
922 	return ehl_pse1_common_data(pdev, plat);
923 }
924 
925 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
926 	.setup = ehl_pse1_sgmii1g_data,
927 };
928 
929 static int tgl_common_data(struct pci_dev *pdev,
930 			   struct plat_stmmacenet_data *plat)
931 {
932 	plat->rx_queues_to_use = 6;
933 	plat->tx_queues_to_use = 4;
934 	plat->clk_ptp_rate = 204800000;
935 	plat->get_interfaces = tgl_get_interfaces;
936 
937 	plat->safety_feat_cfg->tsoee = 1;
938 	plat->safety_feat_cfg->mrxpee = 0;
939 	plat->safety_feat_cfg->mestee = 1;
940 	plat->safety_feat_cfg->mrxee = 1;
941 	plat->safety_feat_cfg->mtxee = 1;
942 	plat->safety_feat_cfg->epsi = 0;
943 	plat->safety_feat_cfg->edpp = 0;
944 	plat->safety_feat_cfg->prtyen = 0;
945 	plat->safety_feat_cfg->tmouten = 0;
946 
947 	return intel_mgbe_common_data(pdev, plat);
948 }
949 
950 static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
951 			       struct plat_stmmacenet_data *plat)
952 {
953 	plat->bus_id = 1;
954 	plat->serdes_powerup = intel_serdes_powerup;
955 	plat->serdes_powerdown = intel_serdes_powerdown;
956 	return tgl_common_data(pdev, plat);
957 }
958 
959 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
960 	.setup = tgl_sgmii_phy0_data,
961 };
962 
963 static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
964 			       struct plat_stmmacenet_data *plat)
965 {
966 	plat->bus_id = 2;
967 	plat->serdes_powerup = intel_serdes_powerup;
968 	plat->serdes_powerdown = intel_serdes_powerdown;
969 	return tgl_common_data(pdev, plat);
970 }
971 
972 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
973 	.setup = tgl_sgmii_phy1_data,
974 };
975 
976 static int adls_sgmii_phy0_data(struct pci_dev *pdev,
977 				struct plat_stmmacenet_data *plat)
978 {
979 	plat->bus_id = 1;
980 
981 	/* SerDes power up and power down are done in BIOS for ADL */
982 
983 	return tgl_common_data(pdev, plat);
984 }
985 
986 static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
987 	.setup = adls_sgmii_phy0_data,
988 };
989 
990 static int adls_sgmii_phy1_data(struct pci_dev *pdev,
991 				struct plat_stmmacenet_data *plat)
992 {
993 	plat->bus_id = 2;
994 
995 	/* SerDes power up and power down are done in BIOS for ADL */
996 
997 	return tgl_common_data(pdev, plat);
998 }
999 
1000 static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
1001 	.setup = adls_sgmii_phy1_data,
1002 };
1003 
1004 static int adln_common_data(struct pci_dev *pdev,
1005 			    struct plat_stmmacenet_data *plat)
1006 {
1007 	struct intel_priv_data *intel_priv = plat->bsp_priv;
1008 
1009 	plat->rx_queues_to_use = 6;
1010 	plat->tx_queues_to_use = 4;
1011 	plat->clk_ptp_rate = 204800000;
1012 
1013 	plat->safety_feat_cfg->tsoee = 1;
1014 	plat->safety_feat_cfg->mrxpee = 0;
1015 	plat->safety_feat_cfg->mestee = 1;
1016 	plat->safety_feat_cfg->mrxee = 1;
1017 	plat->safety_feat_cfg->mtxee = 1;
1018 	plat->safety_feat_cfg->epsi = 0;
1019 	plat->safety_feat_cfg->edpp = 0;
1020 	plat->safety_feat_cfg->prtyen = 0;
1021 	plat->safety_feat_cfg->tmouten = 0;
1022 
1023 	intel_priv->tsn_lane_regs = adln_tsn_lane_regs;
1024 	intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs);
1025 
1026 	return intel_mgbe_common_data(pdev, plat);
1027 }
1028 
1029 static int adln_sgmii_phy0_data(struct pci_dev *pdev,
1030 				struct plat_stmmacenet_data *plat)
1031 {
1032 	struct intel_priv_data *intel_priv = plat->bsp_priv;
1033 
1034 	plat->bus_id = 1;
1035 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
1036 	plat->serdes_powerup = intel_serdes_powerup;
1037 	plat->serdes_powerdown = intel_serdes_powerdown;
1038 	plat->mac_finish = intel_mac_finish;
1039 
1040 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
1041 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
1042 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
1043 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
1044 
1045 	return adln_common_data(pdev, plat);
1046 }
1047 
1048 static struct stmmac_pci_info adln_sgmii1g_phy0_info = {
1049 	.setup = adln_sgmii_phy0_data,
1050 };
1051 
1052 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
1053 	{
1054 		.func = 6,
1055 		.phy_addr = 1,
1056 	},
1057 };
1058 
1059 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
1060 	.func = galileo_stmmac_func_data,
1061 	.nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
1062 };
1063 
1064 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
1065 	{
1066 		.func = 6,
1067 		.phy_addr = 1,
1068 	},
1069 	{
1070 		.func = 7,
1071 		.phy_addr = 1,
1072 	},
1073 };
1074 
1075 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
1076 	.func = iot2040_stmmac_func_data,
1077 	.nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
1078 };
1079 
1080 static const struct dmi_system_id quark_pci_dmi[] = {
1081 	{
1082 		.matches = {
1083 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
1084 		},
1085 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1086 	},
1087 	{
1088 		.matches = {
1089 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
1090 		},
1091 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1092 	},
1093 	/* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
1094 	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
1095 	 * has only one pci network device while other asset tags are
1096 	 * for IOT2040 which has two.
1097 	 */
1098 	{
1099 		.matches = {
1100 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1101 			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
1102 					"6ES7647-0AA00-0YA2"),
1103 		},
1104 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1105 	},
1106 	{
1107 		.matches = {
1108 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1109 		},
1110 		.driver_data = (void *)&iot2040_stmmac_dmi_data,
1111 	},
1112 	{}
1113 };
1114 
1115 static int quark_default_data(struct pci_dev *pdev,
1116 			      struct plat_stmmacenet_data *plat)
1117 {
1118 	int ret;
1119 
1120 	/* Set common default data first */
1121 	common_default_data(plat);
1122 
1123 	/* Refuse to load the driver and register net device if MAC controller
1124 	 * does not connect to any PHY interface.
1125 	 */
1126 	ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
1127 	if (ret < 0) {
1128 		/* Return error to the caller on DMI enabled boards. */
1129 		if (dmi_get_system_info(DMI_BOARD_NAME))
1130 			return ret;
1131 
1132 		/* Galileo boards with old firmware don't support DMI. We always
1133 		 * use 1 here as PHY address, so at least the first found MAC
1134 		 * controller would be probed.
1135 		 */
1136 		ret = 1;
1137 	}
1138 
1139 	plat->bus_id = pci_dev_id(pdev);
1140 	plat->phy_addr = ret;
1141 	plat->phy_interface = PHY_INTERFACE_MODE_RMII;
1142 
1143 	plat->dma_cfg->pbl = 16;
1144 	plat->dma_cfg->pblx8 = true;
1145 	plat->dma_cfg->fixed_burst = 1;
1146 	/* AXI (TODO) */
1147 
1148 	return 0;
1149 }
1150 
1151 static const struct stmmac_pci_info quark_info = {
1152 	.setup = quark_default_data,
1153 };
1154 
1155 static int stmmac_config_single_msi(struct pci_dev *pdev,
1156 				    struct plat_stmmacenet_data *plat,
1157 				    struct stmmac_resources *res)
1158 {
1159 	int ret;
1160 
1161 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1162 	if (ret < 0) {
1163 		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
1164 			 __func__);
1165 		return ret;
1166 	}
1167 
1168 	res->irq = pci_irq_vector(pdev, 0);
1169 	res->wol_irq = res->irq;
1170 	plat->flags &= ~STMMAC_FLAG_MULTI_MSI_EN;
1171 	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
1172 		 __func__);
1173 
1174 	return 0;
1175 }
1176 
1177 static int stmmac_config_multi_msi(struct pci_dev *pdev,
1178 				   struct plat_stmmacenet_data *plat,
1179 				   struct stmmac_resources *res)
1180 {
1181 	int ret;
1182 	int i;
1183 
1184 	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
1185 	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
1186 		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
1187 			 __func__);
1188 		return -1;
1189 	}
1190 
1191 	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
1192 				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
1193 	if (ret < 0) {
1194 		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
1195 			 __func__);
1196 		return ret;
1197 	}
1198 
1199 	/* For RX MSI */
1200 	for (i = 0; i < plat->rx_queues_to_use; i++) {
1201 		res->rx_irq[i] = pci_irq_vector(pdev,
1202 						plat->msi_rx_base_vec + i * 2);
1203 	}
1204 
1205 	/* For TX MSI */
1206 	for (i = 0; i < plat->tx_queues_to_use; i++) {
1207 		res->tx_irq[i] = pci_irq_vector(pdev,
1208 						plat->msi_tx_base_vec + i * 2);
1209 	}
1210 
1211 	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
1212 		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
1213 	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
1214 		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
1215 	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
1216 		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
1217 	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
1218 		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
1219 	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
1220 		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
1221 
1222 	plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
1223 	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
1224 
1225 	return 0;
1226 }
1227 
1228 /**
1229  * intel_eth_pci_probe
1230  *
1231  * @pdev: pci device pointer
1232  * @id: pointer to table of device id/id's.
1233  *
1234  * Description: This probing function gets called for all PCI devices which
1235  * match the ID table and are not "owned" by other driver yet. This function
1236  * gets passed a "struct pci_dev *" for each device whose entry in the ID table
1237  * matches the device. The probe functions returns zero when the driver choose
1238  * to take "ownership" of the device or an error code(-ve no) otherwise.
1239  */
1240 static int intel_eth_pci_probe(struct pci_dev *pdev,
1241 			       const struct pci_device_id *id)
1242 {
1243 	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
1244 	struct intel_priv_data *intel_priv;
1245 	struct plat_stmmacenet_data *plat;
1246 	struct stmmac_resources res;
1247 	int ret;
1248 
1249 	intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
1250 	if (!intel_priv)
1251 		return -ENOMEM;
1252 
1253 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
1254 	if (!plat)
1255 		return -ENOMEM;
1256 
1257 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
1258 					   sizeof(*plat->mdio_bus_data),
1259 					   GFP_KERNEL);
1260 	if (!plat->mdio_bus_data)
1261 		return -ENOMEM;
1262 
1263 	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
1264 				     GFP_KERNEL);
1265 	if (!plat->dma_cfg)
1266 		return -ENOMEM;
1267 
1268 	plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
1269 					     sizeof(*plat->safety_feat_cfg),
1270 					     GFP_KERNEL);
1271 	if (!plat->safety_feat_cfg)
1272 		return -ENOMEM;
1273 
1274 	/* Enable pci device */
1275 	ret = pcim_enable_device(pdev);
1276 	if (ret) {
1277 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
1278 			__func__);
1279 		return ret;
1280 	}
1281 
1282 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
1283 	if (ret)
1284 		return ret;
1285 
1286 	pci_set_master(pdev);
1287 
1288 	plat->bsp_priv = intel_priv;
1289 	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
1290 	intel_priv->crossts_adj = 1;
1291 
1292 	/* Initialize all MSI vectors to invalid so that it can be set
1293 	 * according to platform data settings below.
1294 	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
1295 	 */
1296 	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
1297 	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
1298 	plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
1299 	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
1300 	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
1301 	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
1302 	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
1303 
1304 	ret = info->setup(pdev, plat);
1305 	if (ret)
1306 		return ret;
1307 
1308 	memset(&res, 0, sizeof(res));
1309 	res.addr = pcim_iomap_table(pdev)[0];
1310 
1311 	ret = stmmac_config_multi_msi(pdev, plat, &res);
1312 	if (ret) {
1313 		ret = stmmac_config_single_msi(pdev, plat, &res);
1314 		if (ret) {
1315 			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
1316 				__func__);
1317 			goto err_alloc_irq;
1318 		}
1319 	}
1320 
1321 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
1322 	if (ret) {
1323 		goto err_alloc_irq;
1324 	}
1325 
1326 	return 0;
1327 
1328 err_alloc_irq:
1329 	clk_disable_unprepare(plat->stmmac_clk);
1330 	clk_unregister_fixed_rate(plat->stmmac_clk);
1331 	return ret;
1332 }
1333 
1334 /**
1335  * intel_eth_pci_remove
1336  *
1337  * @pdev: pci device pointer
1338  * Description: this function calls the main to free the net resources
1339  * and releases the PCI resources.
1340  */
1341 static void intel_eth_pci_remove(struct pci_dev *pdev)
1342 {
1343 	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
1344 	struct stmmac_priv *priv = netdev_priv(ndev);
1345 
1346 	stmmac_dvr_remove(&pdev->dev);
1347 
1348 	clk_disable_unprepare(priv->plat->stmmac_clk);
1349 	clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1350 }
1351 
1352 static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
1353 {
1354 	struct pci_dev *pdev = to_pci_dev(dev);
1355 	int ret;
1356 
1357 	ret = stmmac_suspend(dev);
1358 	if (ret)
1359 		return ret;
1360 
1361 	ret = pci_save_state(pdev);
1362 	if (ret)
1363 		return ret;
1364 
1365 	pci_wake_from_d3(pdev, true);
1366 	pci_set_power_state(pdev, PCI_D3hot);
1367 	return 0;
1368 }
1369 
1370 static int __maybe_unused intel_eth_pci_resume(struct device *dev)
1371 {
1372 	struct pci_dev *pdev = to_pci_dev(dev);
1373 	int ret;
1374 
1375 	pci_restore_state(pdev);
1376 	pci_set_power_state(pdev, PCI_D0);
1377 
1378 	ret = pcim_enable_device(pdev);
1379 	if (ret)
1380 		return ret;
1381 
1382 	pci_set_master(pdev);
1383 
1384 	return stmmac_resume(dev);
1385 }
1386 
1387 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
1388 			 intel_eth_pci_resume);
1389 
1390 #define PCI_DEVICE_ID_INTEL_QUARK		0x0937
1391 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G		0x4b30
1392 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G		0x4b31
1393 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5	0x4b32
1394 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
1395  * which are named PSE0 and PSE1
1396  */
1397 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G	0x4ba0
1398 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G	0x4ba1
1399 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5	0x4ba2
1400 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G	0x4bb0
1401 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G	0x4bb1
1402 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5	0x4bb2
1403 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0	0x43ac
1404 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1	0x43a2
1405 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G		0xa0ac
1406 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0	0x7aac
1407 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1	0x7aad
1408 #define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G	0x54ac
1409 #define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G	0x51ac
1410 
1411 static const struct pci_device_id intel_eth_pci_id_table[] = {
1412 	{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
1413 	{ PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
1414 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
1415 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
1416 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
1417 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
1418 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
1419 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
1420 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
1421 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
1422 	{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
1423 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
1424 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
1425 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
1426 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
1427 	{ PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) },
1428 	{ PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) },
1429 	{}
1430 };
1431 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
1432 
1433 static struct pci_driver intel_eth_pci_driver = {
1434 	.name = "intel-eth-pci",
1435 	.id_table = intel_eth_pci_id_table,
1436 	.probe = intel_eth_pci_probe,
1437 	.remove = intel_eth_pci_remove,
1438 	.driver         = {
1439 		.pm     = &intel_eth_pm_ops,
1440 	},
1441 };
1442 
1443 module_pci_driver(intel_eth_pci_driver);
1444 
1445 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
1446 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
1447 MODULE_LICENSE("GPL v2");
1448