xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation
3  */
4 
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include <linux/platform_data/x86/intel_pmc_ipc.h>
9 #include "dwmac-intel.h"
10 #include "dwmac4.h"
11 #include "stmmac.h"
12 #include "stmmac_ptp.h"
13 
14 struct pmc_serdes_regs {
15 	u8 index;
16 	u32 val;
17 };
18 
19 struct pmc_serdes_reg_info {
20 	const struct pmc_serdes_regs *regs;
21 	u8 num_regs;
22 };
23 
24 struct intel_priv_data {
25 	int mdio_adhoc_addr;	/* mdio address for serdes & etc */
26 	unsigned long crossts_adj;
27 	bool is_pse;
28 	const int *tsn_lane_regs;
29 	int max_tsn_lane_regs;
30 	struct pmc_serdes_reg_info pid_1g;
31 	struct pmc_serdes_reg_info pid_2p5g;
32 };
33 
34 /* This struct is used to associate PCI Function of MAC controller on a board,
35  * discovered via DMI, with the address of PHY connected to the MAC. The
36  * negative value of the address means that MAC controller is not connected
37  * with PHY.
38  */
39 struct stmmac_pci_func_data {
40 	unsigned int func;
41 	int phy_addr;
42 };
43 
44 struct stmmac_pci_dmi_data {
45 	const struct stmmac_pci_func_data *func;
46 	size_t nfuncs;
47 };
48 
49 struct stmmac_pci_info {
50 	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
51 };
52 
53 static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = {
54 	{ PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_1G },
55 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_1G },
56 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_1G },
57 	{ PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_1G },
58 	{ PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
59 	{}
60 };
61 
62 static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = {
63 	{ PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
64 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
65 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
66 	{ PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
67 	{ PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
68 	{}
69 };
70 
71 static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = {
72 	{ PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_1G },
73 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_1G },
74 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_1G },
75 	{ PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_1G },
76 	{ PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
77 	{}
78 };
79 
80 static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = {
81 	{ PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
82 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
83 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
84 	{ PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
85 	{ PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
86 	{}
87 };
88 
89 static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11};
90 static const int adln_tsn_lane_regs[] = {6};
91 
92 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
93 				    const struct dmi_system_id *dmi_list)
94 {
95 	const struct stmmac_pci_func_data *func_data;
96 	const struct stmmac_pci_dmi_data *dmi_data;
97 	const struct dmi_system_id *dmi_id;
98 	int func = PCI_FUNC(pdev->devfn);
99 	size_t n;
100 
101 	dmi_id = dmi_first_match(dmi_list);
102 	if (!dmi_id)
103 		return -ENODEV;
104 
105 	dmi_data = dmi_id->driver_data;
106 	func_data = dmi_data->func;
107 
108 	for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
109 		if (func_data->func == func)
110 			return func_data->phy_addr;
111 
112 	return -ENODEV;
113 }
114 
115 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
116 			      int phyreg, u32 mask, u32 val)
117 {
118 	unsigned int retries = 10;
119 	int val_rd;
120 
121 	do {
122 		val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
123 		if ((val_rd & mask) == (val & mask))
124 			return 0;
125 		udelay(POLL_DELAY_US);
126 	} while (--retries);
127 
128 	return -ETIMEDOUT;
129 }
130 
131 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
132 {
133 	struct intel_priv_data *intel_priv = priv_data;
134 	struct stmmac_priv *priv = netdev_priv(ndev);
135 	int serdes_phy_addr = 0;
136 	u32 data = 0;
137 
138 	if (!intel_priv->mdio_adhoc_addr)
139 		return 0;
140 
141 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
142 
143 	/* Set the serdes rate and the PCLK rate */
144 	data = mdiobus_read(priv->mii, serdes_phy_addr,
145 			    SERDES_GCR0);
146 
147 	data &= ~SERDES_RATE_MASK;
148 	data &= ~SERDES_PCLK_MASK;
149 
150 	if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
151 		data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
152 			SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
153 	else
154 		data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
155 			SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
156 
157 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
158 
159 	/* assert clk_req */
160 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
161 	data |= SERDES_PLL_CLK;
162 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
163 
164 	/* check for clk_ack assertion */
165 	data = serdes_status_poll(priv, serdes_phy_addr,
166 				  SERDES_GSR0,
167 				  SERDES_PLL_CLK,
168 				  SERDES_PLL_CLK);
169 
170 	if (data) {
171 		dev_err(priv->device, "Serdes PLL clk request timeout\n");
172 		return data;
173 	}
174 
175 	/* assert lane reset */
176 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
177 	data |= SERDES_RST;
178 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
179 
180 	/* check for assert lane reset reflection */
181 	data = serdes_status_poll(priv, serdes_phy_addr,
182 				  SERDES_GSR0,
183 				  SERDES_RST,
184 				  SERDES_RST);
185 
186 	if (data) {
187 		dev_err(priv->device, "Serdes assert lane reset timeout\n");
188 		return data;
189 	}
190 
191 	/*  move power state to P0 */
192 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
193 
194 	data &= ~SERDES_PWR_ST_MASK;
195 	data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
196 
197 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
198 
199 	/* Check for P0 state */
200 	data = serdes_status_poll(priv, serdes_phy_addr,
201 				  SERDES_GSR0,
202 				  SERDES_PWR_ST_MASK,
203 				  SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
204 
205 	if (data) {
206 		dev_err(priv->device, "Serdes power state P0 timeout.\n");
207 		return data;
208 	}
209 
210 	/* PSE only - ungate SGMII PHY Rx Clock */
211 	if (intel_priv->is_pse)
212 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
213 			       0, SERDES_PHY_RX_CLK);
214 
215 	return 0;
216 }
217 
218 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
219 {
220 	struct intel_priv_data *intel_priv = intel_data;
221 	struct stmmac_priv *priv = netdev_priv(ndev);
222 	int serdes_phy_addr = 0;
223 	u32 data = 0;
224 
225 	if (!intel_priv->mdio_adhoc_addr)
226 		return;
227 
228 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
229 
230 	/* PSE only - gate SGMII PHY Rx Clock */
231 	if (intel_priv->is_pse)
232 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
233 			       SERDES_PHY_RX_CLK, 0);
234 
235 	/*  move power state to P3 */
236 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
237 
238 	data &= ~SERDES_PWR_ST_MASK;
239 	data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
240 
241 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
242 
243 	/* Check for P3 state */
244 	data = serdes_status_poll(priv, serdes_phy_addr,
245 				  SERDES_GSR0,
246 				  SERDES_PWR_ST_MASK,
247 				  SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
248 
249 	if (data) {
250 		dev_err(priv->device, "Serdes power state P3 timeout\n");
251 		return;
252 	}
253 
254 	/* de-assert clk_req */
255 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
256 	data &= ~SERDES_PLL_CLK;
257 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
258 
259 	/* check for clk_ack de-assert */
260 	data = serdes_status_poll(priv, serdes_phy_addr,
261 				  SERDES_GSR0,
262 				  SERDES_PLL_CLK,
263 				  (u32)~SERDES_PLL_CLK);
264 
265 	if (data) {
266 		dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
267 		return;
268 	}
269 
270 	/* de-assert lane reset */
271 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
272 	data &= ~SERDES_RST;
273 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
274 
275 	/* check for de-assert lane reset reflection */
276 	data = serdes_status_poll(priv, serdes_phy_addr,
277 				  SERDES_GSR0,
278 				  SERDES_RST,
279 				  (u32)~SERDES_RST);
280 
281 	if (data) {
282 		dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
283 		return;
284 	}
285 }
286 
287 static void tgl_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
288 			       unsigned long *interfaces)
289 {
290 	struct intel_priv_data *intel_priv = bsp_priv;
291 	phy_interface_t interface;
292 	int data;
293 
294 	/* Determine the link speed mode: 2.5Gbps/1Gbps */
295 	data = mdiobus_read(priv->mii, intel_priv->mdio_adhoc_addr, SERDES_GCR);
296 	if (data < 0)
297 		return;
298 
299 	if (FIELD_GET(SERDES_LINK_MODE_MASK, data) == SERDES_LINK_MODE_2G5) {
300 		dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
301 		priv->plat->mdio_bus_data->default_an_inband = false;
302 		interface = PHY_INTERFACE_MODE_2500BASEX;
303 	} else {
304 		interface = PHY_INTERFACE_MODE_SGMII;
305 	}
306 
307 	__set_bit(interface, interfaces);
308 	priv->plat->phy_interface = interface;
309 }
310 
311 /* Program PTP Clock Frequency for different variant of
312  * Intel mGBE that has slightly different GPO mapping
313  */
314 static void intel_mgbe_ptp_clk_freq_config(struct stmmac_priv *priv)
315 {
316 	struct intel_priv_data *intel_priv;
317 	u32 gpio_value;
318 
319 	intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
320 
321 	gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
322 
323 	if (intel_priv->is_pse) {
324 		/* For PSE GbE, use 200MHz */
325 		gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
326 		gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
327 	} else {
328 		/* For PCH GbE, use 200MHz */
329 		gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
330 		gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
331 	}
332 
333 	writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
334 }
335 
336 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
337 			u64 *art_time)
338 {
339 	u64 ns;
340 
341 	ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
342 	ns <<= GMAC4_ART_TIME_SHIFT;
343 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
344 	ns <<= GMAC4_ART_TIME_SHIFT;
345 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
346 	ns <<= GMAC4_ART_TIME_SHIFT;
347 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
348 
349 	*art_time = ns;
350 }
351 
352 static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
353 {
354 	return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
355 }
356 
357 static int intel_crosststamp(ktime_t *device,
358 			     struct system_counterval_t *system,
359 			     void *ctx)
360 {
361 	struct intel_priv_data *intel_priv;
362 
363 	struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
364 	void __iomem *ptpaddr = priv->ptpaddr;
365 	void __iomem *ioaddr = priv->hw->pcsr;
366 	unsigned long flags;
367 	u64 art_time = 0;
368 	u64 ptp_time = 0;
369 	u32 num_snapshot;
370 	u32 gpio_value;
371 	u32 acr_value;
372 	int i;
373 
374 	intel_priv = priv->plat->bsp_priv;
375 
376 	/* Both internal crosstimestamping and external triggered event
377 	 * timestamping cannot be run concurrently.
378 	 */
379 	if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
380 		return -EBUSY;
381 
382 	priv->plat->flags |= STMMAC_FLAG_INT_SNAPSHOT_EN;
383 
384 	mutex_lock(&priv->aux_ts_lock);
385 	/* Enable Internal snapshot trigger */
386 	acr_value = readl(ptpaddr + PTP_ACR);
387 	acr_value &= ~PTP_ACR_MASK;
388 	switch (priv->plat->int_snapshot_num) {
389 	case AUX_SNAPSHOT0:
390 		acr_value |= PTP_ACR_ATSEN0;
391 		break;
392 	case AUX_SNAPSHOT1:
393 		acr_value |= PTP_ACR_ATSEN1;
394 		break;
395 	case AUX_SNAPSHOT2:
396 		acr_value |= PTP_ACR_ATSEN2;
397 		break;
398 	case AUX_SNAPSHOT3:
399 		acr_value |= PTP_ACR_ATSEN3;
400 		break;
401 	default:
402 		mutex_unlock(&priv->aux_ts_lock);
403 		priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
404 		return -EINVAL;
405 	}
406 	writel(acr_value, ptpaddr + PTP_ACR);
407 
408 	/* Clear FIFO */
409 	acr_value = readl(ptpaddr + PTP_ACR);
410 	acr_value |= PTP_ACR_ATSFC;
411 	writel(acr_value, ptpaddr + PTP_ACR);
412 	/* Release the mutex */
413 	mutex_unlock(&priv->aux_ts_lock);
414 
415 	/* Trigger Internal snapshot signal
416 	 * Create a rising edge by just toggle the GPO1 to low
417 	 * and back to high.
418 	 */
419 	gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
420 	gpio_value &= ~GMAC_GPO1;
421 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
422 	gpio_value |= GMAC_GPO1;
423 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
424 
425 	/* Time sync done Indication - Interrupt method */
426 	if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
427 					      stmmac_cross_ts_isr(priv),
428 					      HZ / 100)) {
429 		priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
430 		return -ETIMEDOUT;
431 	}
432 
433 	*system = (struct system_counterval_t) {
434 		.cycles = 0,
435 		.cs_id = CSID_X86_ART,
436 		.use_nsecs = false,
437 	};
438 
439 	num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
440 			GMAC_TIMESTAMP_ATSNS_MASK) >>
441 			GMAC_TIMESTAMP_ATSNS_SHIFT;
442 
443 	/* Repeat until the timestamps are from the FIFO last segment */
444 	for (i = 0; i < num_snapshot; i++) {
445 		read_lock_irqsave(&priv->ptp_lock, flags);
446 		stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
447 		*device = ns_to_ktime(ptp_time);
448 		read_unlock_irqrestore(&priv->ptp_lock, flags);
449 		get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
450 		system->cycles = art_time;
451 	}
452 
453 	system->cycles *= intel_priv->crossts_adj;
454 
455 	priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
456 
457 	return 0;
458 }
459 
460 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
461 				       int base)
462 {
463 	if (boot_cpu_has(X86_FEATURE_ART)) {
464 		unsigned int art_freq;
465 
466 		/* On systems that support ART, ART frequency can be obtained
467 		 * from ECX register of CPUID leaf (0x15).
468 		 */
469 		art_freq = cpuid_ecx(ART_CPUID_LEAF);
470 		do_div(art_freq, base);
471 		intel_priv->crossts_adj = art_freq;
472 	}
473 }
474 
475 static int intel_tsn_lane_is_available(struct net_device *ndev,
476 				       struct intel_priv_data *intel_priv)
477 {
478 	struct stmmac_priv *priv = netdev_priv(ndev);
479 	struct pmc_ipc_cmd tmp = {};
480 	struct pmc_ipc_rbuf rbuf = {};
481 	int ret = 0, i, j;
482 	const int max_fia_regs = 5;
483 
484 	tmp.cmd = IPC_SOC_REGISTER_ACCESS;
485 	tmp.sub_cmd = IPC_SOC_SUB_CMD_READ;
486 
487 	for (i = 0; i < max_fia_regs; i++) {
488 		tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i;
489 
490 		ret = intel_pmc_ipc(&tmp, &rbuf);
491 		if (ret < 0) {
492 			netdev_info(priv->dev, "Failed to read from PMC.\n");
493 			return ret;
494 		}
495 
496 		for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++)
497 			if ((rbuf.buf[0] >>
498 				(4 * (intel_priv->tsn_lane_regs[j] % 8)) &
499 					B_PCH_FIA_PCR_L0O) == 0xB)
500 				return 0;
501 	}
502 
503 	return -EINVAL;
504 }
505 
506 static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs)
507 {
508 	int ret = 0, i;
509 
510 	for (i = 0; i < max_regs; i++) {
511 		struct pmc_ipc_cmd tmp = {};
512 		struct pmc_ipc_rbuf rbuf = {};
513 
514 		tmp.cmd = IPC_SOC_REGISTER_ACCESS;
515 		tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE;
516 		tmp.wbuf[0] = (u32)regs[i].index;
517 		tmp.wbuf[1] = regs[i].val;
518 
519 		ret = intel_pmc_ipc(&tmp, &rbuf);
520 		if (ret < 0)
521 			return ret;
522 	}
523 
524 	return ret;
525 }
526 
527 static int intel_mac_finish(struct net_device *ndev,
528 			    void *intel_data,
529 			    unsigned int mode,
530 			    phy_interface_t interface)
531 {
532 	struct intel_priv_data *intel_priv = intel_data;
533 	struct stmmac_priv *priv = netdev_priv(ndev);
534 	const struct pmc_serdes_regs *regs;
535 	int max_regs = 0;
536 	int ret = 0;
537 
538 	ret = intel_tsn_lane_is_available(ndev, intel_priv);
539 	if (ret < 0) {
540 		netdev_info(priv->dev, "No TSN lane available to set the registers.\n");
541 		return ret;
542 	}
543 
544 	if (interface == PHY_INTERFACE_MODE_2500BASEX) {
545 		regs = intel_priv->pid_2p5g.regs;
546 		max_regs = intel_priv->pid_2p5g.num_regs;
547 	} else {
548 		regs = intel_priv->pid_1g.regs;
549 		max_regs = intel_priv->pid_1g.num_regs;
550 	}
551 
552 	ret = intel_set_reg_access(regs, max_regs);
553 	if (ret < 0)
554 		return ret;
555 
556 	priv->plat->phy_interface = interface;
557 
558 	intel_serdes_powerdown(ndev, intel_priv);
559 	intel_serdes_powerup(ndev, intel_priv);
560 
561 	return ret;
562 }
563 
564 static void common_default_data(struct plat_stmmacenet_data *plat)
565 {
566 	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
567 	plat->clk_csr = STMMAC_CSR_20_35M;
568 	plat->core_type = DWMAC_CORE_GMAC;
569 	plat->force_sf_dma_mode = 1;
570 
571 	plat->mdio_bus_data->needs_reset = true;
572 }
573 
574 static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
575 						 phy_interface_t interface)
576 {
577 	/* plat->mdio_bus_data->has_xpcs has been set true, so there
578 	 * should always be an XPCS. The original code would always
579 	 * return this if present.
580 	 */
581 	return xpcs_to_phylink_pcs(priv->hw->xpcs);
582 }
583 
584 static int intel_mgbe_common_data(struct pci_dev *pdev,
585 				  struct plat_stmmacenet_data *plat)
586 {
587 	struct fwnode_handle *fwnode;
588 	char clk_name[20];
589 	int ret;
590 	int i;
591 
592 	plat->pdev = pdev;
593 	plat->phy_addr = -1;
594 	plat->clk_csr = STMMAC_CSR_250_300M;
595 	plat->core_type = DWMAC_CORE_GMAC4;
596 	plat->force_sf_dma_mode = 0;
597 	plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE);
598 
599 	/* Multiplying factor to the clk_eee_i clock time
600 	 * period to make it closer to 100 ns. This value
601 	 * should be programmed such that the clk_eee_time_period *
602 	 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
603 	 * clk_eee frequency is 19.2Mhz
604 	 * clk_eee_time_period is 52ns
605 	 * 52ns * (1 + 1) = 104ns
606 	 * MULT_FACT_100NS = 1
607 	 */
608 	plat->mult_fact_100ns = 1;
609 
610 	plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
611 
612 	for (i = 0; i < plat->rx_queues_to_use; i++)
613 		plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
614 
615 	for (i = 0; i < plat->tx_queues_to_use; i++) {
616 		plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
617 
618 		/* Default TX Q0 to use TSO and rest TXQ for TBS */
619 		if (i > 0)
620 			plat->tx_queues_cfg[i].tbs_en = 1;
621 	}
622 
623 	/* FIFO size is 4096 bytes for 1 tx/rx queue */
624 	plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
625 	plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
626 
627 	plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
628 	plat->tx_queues_cfg[0].weight = 0x09;
629 	plat->tx_queues_cfg[1].weight = 0x0A;
630 	plat->tx_queues_cfg[2].weight = 0x0B;
631 	plat->tx_queues_cfg[3].weight = 0x0C;
632 	plat->tx_queues_cfg[4].weight = 0x0D;
633 	plat->tx_queues_cfg[5].weight = 0x0E;
634 	plat->tx_queues_cfg[6].weight = 0x0F;
635 	plat->tx_queues_cfg[7].weight = 0x10;
636 
637 	plat->dma_cfg->pbl = 32;
638 	plat->dma_cfg->pblx8 = true;
639 	plat->dma_cfg->fixed_burst = 0;
640 	plat->dma_cfg->mixed_burst = 0;
641 	plat->dma_cfg->aal = 0;
642 	plat->dma_cfg->dche = true;
643 
644 	plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
645 				 GFP_KERNEL);
646 	if (!plat->axi)
647 		return -ENOMEM;
648 
649 	plat->axi->axi_lpi_en = 0;
650 	plat->axi->axi_xit_frm = 0;
651 	plat->axi->axi_wr_osr_lmt = 1;
652 	plat->axi->axi_rd_osr_lmt = 1;
653 	plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
654 				     DMA_AXI_BLEN16;
655 
656 	plat->ptp_max_adj = plat->clk_ptp_rate;
657 
658 	/* Set system clock */
659 	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
660 
661 	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
662 						   clk_name, NULL, 0,
663 						   plat->clk_ptp_rate);
664 
665 	if (IS_ERR(plat->stmmac_clk)) {
666 		dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
667 		plat->stmmac_clk = NULL;
668 	}
669 
670 	ret = clk_prepare_enable(plat->stmmac_clk);
671 	if (ret) {
672 		clk_unregister_fixed_rate(plat->stmmac_clk);
673 		return ret;
674 	}
675 
676 	plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
677 
678 	plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
679 
680 	/* Use the last Rx queue */
681 	plat->vlan_fail_q = plat->rx_queues_to_use - 1;
682 
683 	/* For fixed-link setup, we allow phy-mode setting */
684 	fwnode = dev_fwnode(&pdev->dev);
685 	if (fwnode) {
686 		int phy_mode;
687 
688 		/* "phy-mode" setting is optional. If it is set,
689 		 *  we allow either sgmii or 1000base-x for now.
690 		 */
691 		phy_mode = fwnode_get_phy_mode(fwnode);
692 		if (phy_mode >= 0) {
693 			if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
694 			    phy_mode == PHY_INTERFACE_MODE_1000BASEX)
695 				plat->phy_interface = phy_mode;
696 			else
697 				dev_warn(&pdev->dev, "Invalid phy-mode\n");
698 		}
699 	}
700 
701 	/* Intel mgbe SGMII interface uses pcs-xcps */
702 	if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
703 	    plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
704 		plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR);
705 		plat->mdio_bus_data->default_an_inband = true;
706 		plat->select_pcs = intel_mgbe_select_pcs;
707 	}
708 
709 	/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
710 	plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
711 	plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
712 
713 	plat->int_snapshot_num = AUX_SNAPSHOT1;
714 
715 	if (boot_cpu_has(X86_FEATURE_ART))
716 		plat->crosststamp = intel_crosststamp;
717 
718 	plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
719 
720 	/* Setup MSI vector offset specific to Intel mGbE controller */
721 	plat->msi_mac_vec = 29;
722 	plat->msi_sfty_ce_vec = 27;
723 	plat->msi_sfty_ue_vec = 26;
724 	plat->msi_rx_base_vec = 0;
725 	plat->msi_tx_base_vec = 1;
726 
727 	return 0;
728 }
729 
730 static int ehl_common_data(struct pci_dev *pdev,
731 			   struct plat_stmmacenet_data *plat)
732 {
733 	struct intel_priv_data *intel_priv = plat->bsp_priv;
734 
735 	plat->rx_queues_to_use = 8;
736 	plat->tx_queues_to_use = 8;
737 	plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
738 	plat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
739 
740 	plat->safety_feat_cfg->tsoee = 1;
741 	plat->safety_feat_cfg->mrxpee = 1;
742 	plat->safety_feat_cfg->mestee = 1;
743 	plat->safety_feat_cfg->mrxee = 1;
744 	plat->safety_feat_cfg->mtxee = 1;
745 	plat->safety_feat_cfg->epsi = 0;
746 	plat->safety_feat_cfg->edpp = 0;
747 	plat->safety_feat_cfg->prtyen = 0;
748 	plat->safety_feat_cfg->tmouten = 0;
749 
750 	intel_priv->tsn_lane_regs = ehl_tsn_lane_regs;
751 	intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs);
752 
753 	return intel_mgbe_common_data(pdev, plat);
754 }
755 
756 static int ehl_sgmii_data(struct pci_dev *pdev,
757 			  struct plat_stmmacenet_data *plat)
758 {
759 	struct intel_priv_data *intel_priv = plat->bsp_priv;
760 
761 	plat->bus_id = 1;
762 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
763 	plat->serdes_powerup = intel_serdes_powerup;
764 	plat->serdes_powerdown = intel_serdes_powerdown;
765 	plat->mac_finish = intel_mac_finish;
766 	plat->clk_ptp_rate = 204800000;
767 
768 	intel_priv->pid_1g.regs = pid_modphy3_1g_regs;
769 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs);
770 	intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs;
771 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs);
772 
773 	return ehl_common_data(pdev, plat);
774 }
775 
776 static struct stmmac_pci_info ehl_sgmii1g_info = {
777 	.setup = ehl_sgmii_data,
778 };
779 
780 static int ehl_rgmii_data(struct pci_dev *pdev,
781 			  struct plat_stmmacenet_data *plat)
782 {
783 	plat->bus_id = 1;
784 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
785 
786 	plat->clk_ptp_rate = 204800000;
787 
788 	return ehl_common_data(pdev, plat);
789 }
790 
791 static struct stmmac_pci_info ehl_rgmii1g_info = {
792 	.setup = ehl_rgmii_data,
793 };
794 
795 static int ehl_pse0_common_data(struct pci_dev *pdev,
796 				struct plat_stmmacenet_data *plat)
797 {
798 	struct intel_priv_data *intel_priv = plat->bsp_priv;
799 
800 	intel_priv->is_pse = true;
801 	plat->bus_id = 2;
802 	plat->host_dma_width = 32;
803 
804 	plat->clk_ptp_rate = 200000000;
805 
806 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
807 
808 	return ehl_common_data(pdev, plat);
809 }
810 
811 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
812 				 struct plat_stmmacenet_data *plat)
813 {
814 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
815 	return ehl_pse0_common_data(pdev, plat);
816 }
817 
818 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
819 	.setup = ehl_pse0_rgmii1g_data,
820 };
821 
822 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
823 				 struct plat_stmmacenet_data *plat)
824 {
825 	struct intel_priv_data *intel_priv = plat->bsp_priv;
826 
827 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
828 	plat->serdes_powerup = intel_serdes_powerup;
829 	plat->serdes_powerdown = intel_serdes_powerdown;
830 	plat->mac_finish = intel_mac_finish;
831 
832 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
833 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
834 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
835 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
836 
837 	return ehl_pse0_common_data(pdev, plat);
838 }
839 
840 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
841 	.setup = ehl_pse0_sgmii1g_data,
842 };
843 
844 static int ehl_pse1_common_data(struct pci_dev *pdev,
845 				struct plat_stmmacenet_data *plat)
846 {
847 	struct intel_priv_data *intel_priv = plat->bsp_priv;
848 
849 	intel_priv->is_pse = true;
850 	plat->bus_id = 3;
851 	plat->host_dma_width = 32;
852 
853 	plat->clk_ptp_rate = 200000000;
854 
855 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
856 
857 	return ehl_common_data(pdev, plat);
858 }
859 
860 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
861 				 struct plat_stmmacenet_data *plat)
862 {
863 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
864 	return ehl_pse1_common_data(pdev, plat);
865 }
866 
867 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
868 	.setup = ehl_pse1_rgmii1g_data,
869 };
870 
871 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
872 				 struct plat_stmmacenet_data *plat)
873 {
874 	struct intel_priv_data *intel_priv = plat->bsp_priv;
875 
876 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
877 	plat->serdes_powerup = intel_serdes_powerup;
878 	plat->serdes_powerdown = intel_serdes_powerdown;
879 	plat->mac_finish = intel_mac_finish;
880 
881 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
882 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
883 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
884 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
885 
886 	return ehl_pse1_common_data(pdev, plat);
887 }
888 
889 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
890 	.setup = ehl_pse1_sgmii1g_data,
891 };
892 
893 static int tgl_common_data(struct pci_dev *pdev,
894 			   struct plat_stmmacenet_data *plat)
895 {
896 	plat->rx_queues_to_use = 6;
897 	plat->tx_queues_to_use = 4;
898 	plat->clk_ptp_rate = 204800000;
899 	plat->get_interfaces = tgl_get_interfaces;
900 
901 	plat->safety_feat_cfg->tsoee = 1;
902 	plat->safety_feat_cfg->mrxpee = 0;
903 	plat->safety_feat_cfg->mestee = 1;
904 	plat->safety_feat_cfg->mrxee = 1;
905 	plat->safety_feat_cfg->mtxee = 1;
906 	plat->safety_feat_cfg->epsi = 0;
907 	plat->safety_feat_cfg->edpp = 0;
908 	plat->safety_feat_cfg->prtyen = 0;
909 	plat->safety_feat_cfg->tmouten = 0;
910 
911 	return intel_mgbe_common_data(pdev, plat);
912 }
913 
914 static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
915 			       struct plat_stmmacenet_data *plat)
916 {
917 	plat->bus_id = 1;
918 	plat->serdes_powerup = intel_serdes_powerup;
919 	plat->serdes_powerdown = intel_serdes_powerdown;
920 	return tgl_common_data(pdev, plat);
921 }
922 
923 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
924 	.setup = tgl_sgmii_phy0_data,
925 };
926 
927 static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
928 			       struct plat_stmmacenet_data *plat)
929 {
930 	plat->bus_id = 2;
931 	plat->serdes_powerup = intel_serdes_powerup;
932 	plat->serdes_powerdown = intel_serdes_powerdown;
933 	return tgl_common_data(pdev, plat);
934 }
935 
936 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
937 	.setup = tgl_sgmii_phy1_data,
938 };
939 
940 static int adls_sgmii_phy0_data(struct pci_dev *pdev,
941 				struct plat_stmmacenet_data *plat)
942 {
943 	plat->bus_id = 1;
944 
945 	/* SerDes power up and power down are done in BIOS for ADL */
946 
947 	return tgl_common_data(pdev, plat);
948 }
949 
950 static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
951 	.setup = adls_sgmii_phy0_data,
952 };
953 
954 static int adls_sgmii_phy1_data(struct pci_dev *pdev,
955 				struct plat_stmmacenet_data *plat)
956 {
957 	plat->bus_id = 2;
958 
959 	/* SerDes power up and power down are done in BIOS for ADL */
960 
961 	return tgl_common_data(pdev, plat);
962 }
963 
964 static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
965 	.setup = adls_sgmii_phy1_data,
966 };
967 
968 static int adln_common_data(struct pci_dev *pdev,
969 			    struct plat_stmmacenet_data *plat)
970 {
971 	struct intel_priv_data *intel_priv = plat->bsp_priv;
972 
973 	plat->rx_queues_to_use = 6;
974 	plat->tx_queues_to_use = 4;
975 	plat->clk_ptp_rate = 204800000;
976 
977 	plat->safety_feat_cfg->tsoee = 1;
978 	plat->safety_feat_cfg->mrxpee = 0;
979 	plat->safety_feat_cfg->mestee = 1;
980 	plat->safety_feat_cfg->mrxee = 1;
981 	plat->safety_feat_cfg->mtxee = 1;
982 	plat->safety_feat_cfg->epsi = 0;
983 	plat->safety_feat_cfg->edpp = 0;
984 	plat->safety_feat_cfg->prtyen = 0;
985 	plat->safety_feat_cfg->tmouten = 0;
986 
987 	intel_priv->tsn_lane_regs = adln_tsn_lane_regs;
988 	intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs);
989 
990 	return intel_mgbe_common_data(pdev, plat);
991 }
992 
993 static int adln_sgmii_phy0_data(struct pci_dev *pdev,
994 				struct plat_stmmacenet_data *plat)
995 {
996 	struct intel_priv_data *intel_priv = plat->bsp_priv;
997 
998 	plat->bus_id = 1;
999 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
1000 	plat->serdes_powerup = intel_serdes_powerup;
1001 	plat->serdes_powerdown = intel_serdes_powerdown;
1002 	plat->mac_finish = intel_mac_finish;
1003 
1004 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
1005 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
1006 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
1007 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
1008 
1009 	return adln_common_data(pdev, plat);
1010 }
1011 
1012 static struct stmmac_pci_info adln_sgmii1g_phy0_info = {
1013 	.setup = adln_sgmii_phy0_data,
1014 };
1015 
1016 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
1017 	{
1018 		.func = 6,
1019 		.phy_addr = 1,
1020 	},
1021 };
1022 
1023 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
1024 	.func = galileo_stmmac_func_data,
1025 	.nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
1026 };
1027 
1028 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
1029 	{
1030 		.func = 6,
1031 		.phy_addr = 1,
1032 	},
1033 	{
1034 		.func = 7,
1035 		.phy_addr = 1,
1036 	},
1037 };
1038 
1039 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
1040 	.func = iot2040_stmmac_func_data,
1041 	.nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
1042 };
1043 
1044 static const struct dmi_system_id quark_pci_dmi[] = {
1045 	{
1046 		.matches = {
1047 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
1048 		},
1049 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1050 	},
1051 	{
1052 		.matches = {
1053 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
1054 		},
1055 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1056 	},
1057 	/* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
1058 	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
1059 	 * has only one pci network device while other asset tags are
1060 	 * for IOT2040 which has two.
1061 	 */
1062 	{
1063 		.matches = {
1064 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1065 			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
1066 					"6ES7647-0AA00-0YA2"),
1067 		},
1068 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1069 	},
1070 	{
1071 		.matches = {
1072 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1073 		},
1074 		.driver_data = (void *)&iot2040_stmmac_dmi_data,
1075 	},
1076 	{}
1077 };
1078 
1079 static int quark_default_data(struct pci_dev *pdev,
1080 			      struct plat_stmmacenet_data *plat)
1081 {
1082 	int ret;
1083 
1084 	/* Set common default data first */
1085 	common_default_data(plat);
1086 
1087 	/* Refuse to load the driver and register net device if MAC controller
1088 	 * does not connect to any PHY interface.
1089 	 */
1090 	ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
1091 	if (ret < 0) {
1092 		/* Return error to the caller on DMI enabled boards. */
1093 		if (dmi_get_system_info(DMI_BOARD_NAME))
1094 			return ret;
1095 
1096 		/* Galileo boards with old firmware don't support DMI. We always
1097 		 * use 1 here as PHY address, so at least the first found MAC
1098 		 * controller would be probed.
1099 		 */
1100 		ret = 1;
1101 	}
1102 
1103 	plat->bus_id = pci_dev_id(pdev);
1104 	plat->phy_addr = ret;
1105 	plat->phy_interface = PHY_INTERFACE_MODE_RMII;
1106 
1107 	plat->dma_cfg->pbl = 16;
1108 	plat->dma_cfg->pblx8 = true;
1109 	plat->dma_cfg->fixed_burst = 1;
1110 	/* AXI (TODO) */
1111 
1112 	return 0;
1113 }
1114 
1115 static const struct stmmac_pci_info quark_info = {
1116 	.setup = quark_default_data,
1117 };
1118 
1119 static int stmmac_config_single_msi(struct pci_dev *pdev,
1120 				    struct plat_stmmacenet_data *plat,
1121 				    struct stmmac_resources *res)
1122 {
1123 	int ret;
1124 
1125 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1126 	if (ret < 0) {
1127 		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
1128 			 __func__);
1129 		return ret;
1130 	}
1131 
1132 	res->irq = pci_irq_vector(pdev, 0);
1133 	res->wol_irq = res->irq;
1134 	plat->flags &= ~STMMAC_FLAG_MULTI_MSI_EN;
1135 	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
1136 		 __func__);
1137 
1138 	return 0;
1139 }
1140 
1141 static int stmmac_config_multi_msi(struct pci_dev *pdev,
1142 				   struct plat_stmmacenet_data *plat,
1143 				   struct stmmac_resources *res)
1144 {
1145 	int ret;
1146 	int i;
1147 
1148 	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
1149 	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
1150 		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
1151 			 __func__);
1152 		return -1;
1153 	}
1154 
1155 	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
1156 				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
1157 	if (ret < 0) {
1158 		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
1159 			 __func__);
1160 		return ret;
1161 	}
1162 
1163 	/* For RX MSI */
1164 	for (i = 0; i < plat->rx_queues_to_use; i++) {
1165 		res->rx_irq[i] = pci_irq_vector(pdev,
1166 						plat->msi_rx_base_vec + i * 2);
1167 	}
1168 
1169 	/* For TX MSI */
1170 	for (i = 0; i < plat->tx_queues_to_use; i++) {
1171 		res->tx_irq[i] = pci_irq_vector(pdev,
1172 						plat->msi_tx_base_vec + i * 2);
1173 	}
1174 
1175 	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
1176 		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
1177 	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
1178 		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
1179 	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
1180 		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
1181 	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
1182 		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
1183 
1184 	plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
1185 	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
1186 
1187 	return 0;
1188 }
1189 
1190 static int intel_eth_pci_suspend(struct device *dev, void *bsp_priv)
1191 {
1192 	struct pci_dev *pdev = to_pci_dev(dev);
1193 	int ret;
1194 
1195 	ret = pci_save_state(pdev);
1196 	if (ret)
1197 		return ret;
1198 
1199 	pci_wake_from_d3(pdev, true);
1200 	pci_set_power_state(pdev, PCI_D3hot);
1201 	return 0;
1202 }
1203 
1204 static int intel_eth_pci_resume(struct device *dev, void *bsp_priv)
1205 {
1206 	struct pci_dev *pdev = to_pci_dev(dev);
1207 	int ret;
1208 
1209 	pci_restore_state(pdev);
1210 	pci_set_power_state(pdev, PCI_D0);
1211 
1212 	ret = pcim_enable_device(pdev);
1213 	if (ret)
1214 		return ret;
1215 
1216 	pci_set_master(pdev);
1217 
1218 	return 0;
1219 }
1220 
1221 /**
1222  * intel_eth_pci_probe
1223  *
1224  * @pdev: pci device pointer
1225  * @id: pointer to table of device id/id's.
1226  *
1227  * Description: This probing function gets called for all PCI devices which
1228  * match the ID table and are not "owned" by other driver yet. This function
1229  * gets passed a "struct pci_dev *" for each device whose entry in the ID table
1230  * matches the device. The probe functions returns zero when the driver choose
1231  * to take "ownership" of the device or an error code(-ve no) otherwise.
1232  */
1233 static int intel_eth_pci_probe(struct pci_dev *pdev,
1234 			       const struct pci_device_id *id)
1235 {
1236 	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
1237 	struct intel_priv_data *intel_priv;
1238 	struct plat_stmmacenet_data *plat;
1239 	struct stmmac_resources res;
1240 	int ret;
1241 
1242 	intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
1243 	if (!intel_priv)
1244 		return -ENOMEM;
1245 
1246 	plat = stmmac_plat_dat_alloc(&pdev->dev);
1247 	if (!plat)
1248 		return -ENOMEM;
1249 
1250 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
1251 					   sizeof(*plat->mdio_bus_data),
1252 					   GFP_KERNEL);
1253 	if (!plat->mdio_bus_data)
1254 		return -ENOMEM;
1255 
1256 	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
1257 				     GFP_KERNEL);
1258 	if (!plat->dma_cfg)
1259 		return -ENOMEM;
1260 
1261 	plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
1262 					     sizeof(*plat->safety_feat_cfg),
1263 					     GFP_KERNEL);
1264 	if (!plat->safety_feat_cfg)
1265 		return -ENOMEM;
1266 
1267 	/* Enable pci device */
1268 	ret = pcim_enable_device(pdev);
1269 	if (ret) {
1270 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
1271 			__func__);
1272 		return ret;
1273 	}
1274 
1275 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
1276 	if (ret)
1277 		return ret;
1278 
1279 	pci_set_master(pdev);
1280 
1281 	plat->bsp_priv = intel_priv;
1282 	plat->suspend = intel_eth_pci_suspend;
1283 	plat->resume = intel_eth_pci_resume;
1284 
1285 	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
1286 	intel_priv->crossts_adj = 1;
1287 
1288 	/* Initialize all MSI vectors to invalid so that it can be set
1289 	 * according to platform data settings below.
1290 	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
1291 	 */
1292 	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
1293 	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
1294 	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
1295 	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
1296 	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
1297 	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
1298 
1299 	ret = info->setup(pdev, plat);
1300 	if (ret)
1301 		return ret;
1302 
1303 	memset(&res, 0, sizeof(res));
1304 	res.addr = pcim_iomap_table(pdev)[0];
1305 
1306 	ret = stmmac_config_multi_msi(pdev, plat, &res);
1307 	if (ret) {
1308 		ret = stmmac_config_single_msi(pdev, plat, &res);
1309 		if (ret) {
1310 			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
1311 				__func__);
1312 			goto err_alloc_irq;
1313 		}
1314 	}
1315 
1316 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
1317 	if (ret) {
1318 		goto err_alloc_irq;
1319 	}
1320 
1321 	return 0;
1322 
1323 err_alloc_irq:
1324 	clk_disable_unprepare(plat->stmmac_clk);
1325 	clk_unregister_fixed_rate(plat->stmmac_clk);
1326 	return ret;
1327 }
1328 
1329 /**
1330  * intel_eth_pci_remove
1331  *
1332  * @pdev: pci device pointer
1333  * Description: this function calls the main to free the net resources
1334  * and releases the PCI resources.
1335  */
1336 static void intel_eth_pci_remove(struct pci_dev *pdev)
1337 {
1338 	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
1339 	struct stmmac_priv *priv = netdev_priv(ndev);
1340 
1341 	stmmac_dvr_remove(&pdev->dev);
1342 
1343 	clk_disable_unprepare(priv->plat->stmmac_clk);
1344 	clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1345 }
1346 
1347 #define PCI_DEVICE_ID_INTEL_QUARK		0x0937
1348 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G		0x4b30
1349 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G		0x4b31
1350 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5	0x4b32
1351 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
1352  * which are named PSE0 and PSE1
1353  */
1354 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G	0x4ba0
1355 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G	0x4ba1
1356 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5	0x4ba2
1357 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G	0x4bb0
1358 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G	0x4bb1
1359 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5	0x4bb2
1360 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0	0x43ac
1361 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1	0x43a2
1362 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G		0xa0ac
1363 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0	0x7aac
1364 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1	0x7aad
1365 #define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G	0x54ac
1366 #define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G	0x51ac
1367 
1368 static const struct pci_device_id intel_eth_pci_id_table[] = {
1369 	{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
1370 	{ PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
1371 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
1372 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
1373 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
1374 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
1375 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
1376 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
1377 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
1378 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
1379 	{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
1380 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
1381 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
1382 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
1383 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
1384 	{ PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) },
1385 	{ PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) },
1386 	{}
1387 };
1388 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
1389 
1390 static struct pci_driver intel_eth_pci_driver = {
1391 	.name = "intel-eth-pci",
1392 	.id_table = intel_eth_pci_id_table,
1393 	.probe = intel_eth_pci_probe,
1394 	.remove = intel_eth_pci_remove,
1395 	.driver         = {
1396 		.pm     = &stmmac_simple_pm_ops,
1397 	},
1398 };
1399 
1400 module_pci_driver(intel_eth_pci_driver);
1401 
1402 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
1403 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
1404 MODULE_LICENSE("GPL v2");
1405