xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c (revision 8b0adbe3e38dbe5aae9edf6f5159ffdca7cfbdf1)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation
3  */
4 
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include "dwmac-intel.h"
9 #include "dwmac4.h"
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 
13 #define INTEL_MGBE_ADHOC_ADDR	0x15
14 #define INTEL_MGBE_XPCS_ADDR	0x16
15 
16 /* Selection for PTP Clock Freq belongs to PSE & PCH GbE */
17 #define PSE_PTP_CLK_FREQ_MASK		(GMAC_GPO0 | GMAC_GPO3)
18 #define PSE_PTP_CLK_FREQ_19_2MHZ	(GMAC_GPO0)
19 #define PSE_PTP_CLK_FREQ_200MHZ		(GMAC_GPO0 | GMAC_GPO3)
20 #define PSE_PTP_CLK_FREQ_256MHZ		(0)
21 #define PCH_PTP_CLK_FREQ_MASK		(GMAC_GPO0)
22 #define PCH_PTP_CLK_FREQ_19_2MHZ	(GMAC_GPO0)
23 #define PCH_PTP_CLK_FREQ_200MHZ		(0)
24 
25 /* Cross-timestamping defines */
26 #define ART_CPUID_LEAF		0x15
27 #define EHL_PSE_ART_MHZ		19200000
28 
29 struct intel_priv_data {
30 	int mdio_adhoc_addr;	/* mdio address for serdes & etc */
31 	unsigned long crossts_adj;
32 	bool is_pse;
33 };
34 
35 /* This struct is used to associate PCI Function of MAC controller on a board,
36  * discovered via DMI, with the address of PHY connected to the MAC. The
37  * negative value of the address means that MAC controller is not connected
38  * with PHY.
39  */
40 struct stmmac_pci_func_data {
41 	unsigned int func;
42 	int phy_addr;
43 };
44 
45 struct stmmac_pci_dmi_data {
46 	const struct stmmac_pci_func_data *func;
47 	size_t nfuncs;
48 };
49 
50 struct stmmac_pci_info {
51 	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
52 };
53 
54 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
55 				    const struct dmi_system_id *dmi_list)
56 {
57 	const struct stmmac_pci_func_data *func_data;
58 	const struct stmmac_pci_dmi_data *dmi_data;
59 	const struct dmi_system_id *dmi_id;
60 	int func = PCI_FUNC(pdev->devfn);
61 	size_t n;
62 
63 	dmi_id = dmi_first_match(dmi_list);
64 	if (!dmi_id)
65 		return -ENODEV;
66 
67 	dmi_data = dmi_id->driver_data;
68 	func_data = dmi_data->func;
69 
70 	for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
71 		if (func_data->func == func)
72 			return func_data->phy_addr;
73 
74 	return -ENODEV;
75 }
76 
77 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
78 			      int phyreg, u32 mask, u32 val)
79 {
80 	unsigned int retries = 10;
81 	int val_rd;
82 
83 	do {
84 		val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
85 		if ((val_rd & mask) == (val & mask))
86 			return 0;
87 		udelay(POLL_DELAY_US);
88 	} while (--retries);
89 
90 	return -ETIMEDOUT;
91 }
92 
93 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
94 {
95 	struct intel_priv_data *intel_priv = priv_data;
96 	struct stmmac_priv *priv = netdev_priv(ndev);
97 	int serdes_phy_addr = 0;
98 	u32 data = 0;
99 
100 	if (!intel_priv->mdio_adhoc_addr)
101 		return 0;
102 
103 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
104 
105 	/* assert clk_req */
106 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
107 	data |= SERDES_PLL_CLK;
108 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
109 
110 	/* check for clk_ack assertion */
111 	data = serdes_status_poll(priv, serdes_phy_addr,
112 				  SERDES_GSR0,
113 				  SERDES_PLL_CLK,
114 				  SERDES_PLL_CLK);
115 
116 	if (data) {
117 		dev_err(priv->device, "Serdes PLL clk request timeout\n");
118 		return data;
119 	}
120 
121 	/* assert lane reset */
122 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
123 	data |= SERDES_RST;
124 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
125 
126 	/* check for assert lane reset reflection */
127 	data = serdes_status_poll(priv, serdes_phy_addr,
128 				  SERDES_GSR0,
129 				  SERDES_RST,
130 				  SERDES_RST);
131 
132 	if (data) {
133 		dev_err(priv->device, "Serdes assert lane reset timeout\n");
134 		return data;
135 	}
136 
137 	/*  move power state to P0 */
138 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
139 
140 	data &= ~SERDES_PWR_ST_MASK;
141 	data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
142 
143 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
144 
145 	/* Check for P0 state */
146 	data = serdes_status_poll(priv, serdes_phy_addr,
147 				  SERDES_GSR0,
148 				  SERDES_PWR_ST_MASK,
149 				  SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
150 
151 	if (data) {
152 		dev_err(priv->device, "Serdes power state P0 timeout.\n");
153 		return data;
154 	}
155 
156 	return 0;
157 }
158 
159 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
160 {
161 	struct intel_priv_data *intel_priv = intel_data;
162 	struct stmmac_priv *priv = netdev_priv(ndev);
163 	int serdes_phy_addr = 0;
164 	u32 data = 0;
165 
166 	if (!intel_priv->mdio_adhoc_addr)
167 		return;
168 
169 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
170 
171 	/*  move power state to P3 */
172 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
173 
174 	data &= ~SERDES_PWR_ST_MASK;
175 	data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
176 
177 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
178 
179 	/* Check for P3 state */
180 	data = serdes_status_poll(priv, serdes_phy_addr,
181 				  SERDES_GSR0,
182 				  SERDES_PWR_ST_MASK,
183 				  SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
184 
185 	if (data) {
186 		dev_err(priv->device, "Serdes power state P3 timeout\n");
187 		return;
188 	}
189 
190 	/* de-assert clk_req */
191 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
192 	data &= ~SERDES_PLL_CLK;
193 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
194 
195 	/* check for clk_ack de-assert */
196 	data = serdes_status_poll(priv, serdes_phy_addr,
197 				  SERDES_GSR0,
198 				  SERDES_PLL_CLK,
199 				  (u32)~SERDES_PLL_CLK);
200 
201 	if (data) {
202 		dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
203 		return;
204 	}
205 
206 	/* de-assert lane reset */
207 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
208 	data &= ~SERDES_RST;
209 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
210 
211 	/* check for de-assert lane reset reflection */
212 	data = serdes_status_poll(priv, serdes_phy_addr,
213 				  SERDES_GSR0,
214 				  SERDES_RST,
215 				  (u32)~SERDES_RST);
216 
217 	if (data) {
218 		dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
219 		return;
220 	}
221 }
222 
223 /* Program PTP Clock Frequency for different variant of
224  * Intel mGBE that has slightly different GPO mapping
225  */
226 static void intel_mgbe_ptp_clk_freq_config(void *npriv)
227 {
228 	struct stmmac_priv *priv = (struct stmmac_priv *)npriv;
229 	struct intel_priv_data *intel_priv;
230 	u32 gpio_value;
231 
232 	intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
233 
234 	gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
235 
236 	if (intel_priv->is_pse) {
237 		/* For PSE GbE, use 200MHz */
238 		gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
239 		gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
240 	} else {
241 		/* For PCH GbE, use 200MHz */
242 		gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
243 		gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
244 	}
245 
246 	writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
247 }
248 
249 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
250 			u64 *art_time)
251 {
252 	u64 ns;
253 
254 	ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
255 	ns <<= GMAC4_ART_TIME_SHIFT;
256 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
257 	ns <<= GMAC4_ART_TIME_SHIFT;
258 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
259 	ns <<= GMAC4_ART_TIME_SHIFT;
260 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
261 
262 	*art_time = ns;
263 }
264 
265 static int intel_crosststamp(ktime_t *device,
266 			     struct system_counterval_t *system,
267 			     void *ctx)
268 {
269 	struct intel_priv_data *intel_priv;
270 
271 	struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
272 	void __iomem *ptpaddr = priv->ptpaddr;
273 	void __iomem *ioaddr = priv->hw->pcsr;
274 	unsigned long flags;
275 	u64 art_time = 0;
276 	u64 ptp_time = 0;
277 	u32 num_snapshot;
278 	u32 gpio_value;
279 	u32 acr_value;
280 	int ret;
281 	u32 v;
282 	int i;
283 
284 	if (!boot_cpu_has(X86_FEATURE_ART))
285 		return -EOPNOTSUPP;
286 
287 	intel_priv = priv->plat->bsp_priv;
288 
289 	/* Enable Internal snapshot trigger */
290 	acr_value = readl(ptpaddr + PTP_ACR);
291 	acr_value &= ~PTP_ACR_MASK;
292 	switch (priv->plat->int_snapshot_num) {
293 	case AUX_SNAPSHOT0:
294 		acr_value |= PTP_ACR_ATSEN0;
295 		break;
296 	case AUX_SNAPSHOT1:
297 		acr_value |= PTP_ACR_ATSEN1;
298 		break;
299 	case AUX_SNAPSHOT2:
300 		acr_value |= PTP_ACR_ATSEN2;
301 		break;
302 	case AUX_SNAPSHOT3:
303 		acr_value |= PTP_ACR_ATSEN3;
304 		break;
305 	default:
306 		return -EINVAL;
307 	}
308 	writel(acr_value, ptpaddr + PTP_ACR);
309 
310 	/* Clear FIFO */
311 	acr_value = readl(ptpaddr + PTP_ACR);
312 	acr_value |= PTP_ACR_ATSFC;
313 	writel(acr_value, ptpaddr + PTP_ACR);
314 
315 	/* Trigger Internal snapshot signal
316 	 * Create a rising edge by just toggle the GPO1 to low
317 	 * and back to high.
318 	 */
319 	gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
320 	gpio_value &= ~GMAC_GPO1;
321 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
322 	gpio_value |= GMAC_GPO1;
323 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
324 
325 	/* Poll for time sync operation done */
326 	ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v,
327 				 (v & GMAC_INT_TSIE), 100, 10000);
328 
329 	if (ret == -ETIMEDOUT) {
330 		pr_err("%s: Wait for time sync operation timeout\n", __func__);
331 		return ret;
332 	}
333 
334 	num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
335 			GMAC_TIMESTAMP_ATSNS_MASK) >>
336 			GMAC_TIMESTAMP_ATSNS_SHIFT;
337 
338 	/* Repeat until the timestamps are from the FIFO last segment */
339 	for (i = 0; i < num_snapshot; i++) {
340 		spin_lock_irqsave(&priv->ptp_lock, flags);
341 		stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
342 		*device = ns_to_ktime(ptp_time);
343 		spin_unlock_irqrestore(&priv->ptp_lock, flags);
344 		get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
345 		*system = convert_art_to_tsc(art_time);
346 	}
347 
348 	system->cycles *= intel_priv->crossts_adj;
349 
350 	return 0;
351 }
352 
353 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
354 				       int base)
355 {
356 	if (boot_cpu_has(X86_FEATURE_ART)) {
357 		unsigned int art_freq;
358 
359 		/* On systems that support ART, ART frequency can be obtained
360 		 * from ECX register of CPUID leaf (0x15).
361 		 */
362 		art_freq = cpuid_ecx(ART_CPUID_LEAF);
363 		do_div(art_freq, base);
364 		intel_priv->crossts_adj = art_freq;
365 	}
366 }
367 
368 static void common_default_data(struct plat_stmmacenet_data *plat)
369 {
370 	plat->clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
371 	plat->has_gmac = 1;
372 	plat->force_sf_dma_mode = 1;
373 
374 	plat->mdio_bus_data->needs_reset = true;
375 
376 	/* Set default value for multicast hash bins */
377 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
378 
379 	/* Set default value for unicast filter entries */
380 	plat->unicast_filter_entries = 1;
381 
382 	/* Set the maxmtu to a default of JUMBO_LEN */
383 	plat->maxmtu = JUMBO_LEN;
384 
385 	/* Set default number of RX and TX queues to use */
386 	plat->tx_queues_to_use = 1;
387 	plat->rx_queues_to_use = 1;
388 
389 	/* Disable Priority config by default */
390 	plat->tx_queues_cfg[0].use_prio = false;
391 	plat->rx_queues_cfg[0].use_prio = false;
392 
393 	/* Disable RX queues routing by default */
394 	plat->rx_queues_cfg[0].pkt_route = 0x0;
395 }
396 
397 static int intel_mgbe_common_data(struct pci_dev *pdev,
398 				  struct plat_stmmacenet_data *plat)
399 {
400 	char clk_name[20];
401 	int ret;
402 	int i;
403 
404 	plat->pdev = pdev;
405 	plat->phy_addr = -1;
406 	plat->clk_csr = 5;
407 	plat->has_gmac = 0;
408 	plat->has_gmac4 = 1;
409 	plat->force_sf_dma_mode = 0;
410 	plat->tso_en = 1;
411 
412 	plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
413 
414 	for (i = 0; i < plat->rx_queues_to_use; i++) {
415 		plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
416 		plat->rx_queues_cfg[i].chan = i;
417 
418 		/* Disable Priority config by default */
419 		plat->rx_queues_cfg[i].use_prio = false;
420 
421 		/* Disable RX queues routing by default */
422 		plat->rx_queues_cfg[i].pkt_route = 0x0;
423 	}
424 
425 	for (i = 0; i < plat->tx_queues_to_use; i++) {
426 		plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
427 
428 		/* Disable Priority config by default */
429 		plat->tx_queues_cfg[i].use_prio = false;
430 	}
431 
432 	/* FIFO size is 4096 bytes for 1 tx/rx queue */
433 	plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
434 	plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
435 
436 	plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
437 	plat->tx_queues_cfg[0].weight = 0x09;
438 	plat->tx_queues_cfg[1].weight = 0x0A;
439 	plat->tx_queues_cfg[2].weight = 0x0B;
440 	plat->tx_queues_cfg[3].weight = 0x0C;
441 	plat->tx_queues_cfg[4].weight = 0x0D;
442 	plat->tx_queues_cfg[5].weight = 0x0E;
443 	plat->tx_queues_cfg[6].weight = 0x0F;
444 	plat->tx_queues_cfg[7].weight = 0x10;
445 
446 	plat->dma_cfg->pbl = 32;
447 	plat->dma_cfg->pblx8 = true;
448 	plat->dma_cfg->fixed_burst = 0;
449 	plat->dma_cfg->mixed_burst = 0;
450 	plat->dma_cfg->aal = 0;
451 
452 	plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
453 				 GFP_KERNEL);
454 	if (!plat->axi)
455 		return -ENOMEM;
456 
457 	plat->axi->axi_lpi_en = 0;
458 	plat->axi->axi_xit_frm = 0;
459 	plat->axi->axi_wr_osr_lmt = 1;
460 	plat->axi->axi_rd_osr_lmt = 1;
461 	plat->axi->axi_blen[0] = 4;
462 	plat->axi->axi_blen[1] = 8;
463 	plat->axi->axi_blen[2] = 16;
464 
465 	plat->ptp_max_adj = plat->clk_ptp_rate;
466 	plat->eee_usecs_rate = plat->clk_ptp_rate;
467 
468 	/* Set system clock */
469 	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
470 
471 	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
472 						   clk_name, NULL, 0,
473 						   plat->clk_ptp_rate);
474 
475 	if (IS_ERR(plat->stmmac_clk)) {
476 		dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
477 		plat->stmmac_clk = NULL;
478 	}
479 
480 	ret = clk_prepare_enable(plat->stmmac_clk);
481 	if (ret) {
482 		clk_unregister_fixed_rate(plat->stmmac_clk);
483 		return ret;
484 	}
485 
486 	plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
487 
488 	/* Set default value for multicast hash bins */
489 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
490 
491 	/* Set default value for unicast filter entries */
492 	plat->unicast_filter_entries = 1;
493 
494 	/* Set the maxmtu to a default of JUMBO_LEN */
495 	plat->maxmtu = JUMBO_LEN;
496 
497 	plat->vlan_fail_q_en = true;
498 
499 	/* Use the last Rx queue */
500 	plat->vlan_fail_q = plat->rx_queues_to_use - 1;
501 
502 	/* Intel mgbe SGMII interface uses pcs-xcps */
503 	if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) {
504 		plat->mdio_bus_data->has_xpcs = true;
505 		plat->mdio_bus_data->xpcs_an_inband = true;
506 	}
507 
508 	/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
509 	plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
510 	plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
511 
512 	plat->int_snapshot_num = AUX_SNAPSHOT1;
513 
514 	plat->has_crossts = true;
515 	plat->crosststamp = intel_crosststamp;
516 
517 	/* Setup MSI vector offset specific to Intel mGbE controller */
518 	plat->msi_mac_vec = 29;
519 	plat->msi_lpi_vec = 28;
520 	plat->msi_sfty_ce_vec = 27;
521 	plat->msi_sfty_ue_vec = 26;
522 	plat->msi_rx_base_vec = 0;
523 	plat->msi_tx_base_vec = 1;
524 
525 	return 0;
526 }
527 
528 static int ehl_common_data(struct pci_dev *pdev,
529 			   struct plat_stmmacenet_data *plat)
530 {
531 	plat->rx_queues_to_use = 8;
532 	plat->tx_queues_to_use = 8;
533 	plat->clk_ptp_rate = 200000000;
534 
535 	return intel_mgbe_common_data(pdev, plat);
536 }
537 
538 static int ehl_sgmii_data(struct pci_dev *pdev,
539 			  struct plat_stmmacenet_data *plat)
540 {
541 	plat->bus_id = 1;
542 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
543 
544 	plat->serdes_powerup = intel_serdes_powerup;
545 	plat->serdes_powerdown = intel_serdes_powerdown;
546 
547 	return ehl_common_data(pdev, plat);
548 }
549 
550 static struct stmmac_pci_info ehl_sgmii1g_info = {
551 	.setup = ehl_sgmii_data,
552 };
553 
554 static int ehl_rgmii_data(struct pci_dev *pdev,
555 			  struct plat_stmmacenet_data *plat)
556 {
557 	plat->bus_id = 1;
558 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
559 
560 	return ehl_common_data(pdev, plat);
561 }
562 
563 static struct stmmac_pci_info ehl_rgmii1g_info = {
564 	.setup = ehl_rgmii_data,
565 };
566 
567 static int ehl_pse0_common_data(struct pci_dev *pdev,
568 				struct plat_stmmacenet_data *plat)
569 {
570 	struct intel_priv_data *intel_priv = plat->bsp_priv;
571 
572 	intel_priv->is_pse = true;
573 	plat->bus_id = 2;
574 	plat->addr64 = 32;
575 
576 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
577 
578 	return ehl_common_data(pdev, plat);
579 }
580 
581 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
582 				 struct plat_stmmacenet_data *plat)
583 {
584 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
585 	return ehl_pse0_common_data(pdev, plat);
586 }
587 
588 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
589 	.setup = ehl_pse0_rgmii1g_data,
590 };
591 
592 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
593 				 struct plat_stmmacenet_data *plat)
594 {
595 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
596 	plat->serdes_powerup = intel_serdes_powerup;
597 	plat->serdes_powerdown = intel_serdes_powerdown;
598 	return ehl_pse0_common_data(pdev, plat);
599 }
600 
601 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
602 	.setup = ehl_pse0_sgmii1g_data,
603 };
604 
605 static int ehl_pse1_common_data(struct pci_dev *pdev,
606 				struct plat_stmmacenet_data *plat)
607 {
608 	struct intel_priv_data *intel_priv = plat->bsp_priv;
609 
610 	intel_priv->is_pse = true;
611 	plat->bus_id = 3;
612 	plat->addr64 = 32;
613 
614 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
615 
616 	return ehl_common_data(pdev, plat);
617 }
618 
619 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
620 				 struct plat_stmmacenet_data *plat)
621 {
622 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
623 	return ehl_pse1_common_data(pdev, plat);
624 }
625 
626 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
627 	.setup = ehl_pse1_rgmii1g_data,
628 };
629 
630 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
631 				 struct plat_stmmacenet_data *plat)
632 {
633 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
634 	plat->serdes_powerup = intel_serdes_powerup;
635 	plat->serdes_powerdown = intel_serdes_powerdown;
636 	return ehl_pse1_common_data(pdev, plat);
637 }
638 
639 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
640 	.setup = ehl_pse1_sgmii1g_data,
641 };
642 
643 static int tgl_common_data(struct pci_dev *pdev,
644 			   struct plat_stmmacenet_data *plat)
645 {
646 	plat->rx_queues_to_use = 6;
647 	plat->tx_queues_to_use = 4;
648 	plat->clk_ptp_rate = 200000000;
649 
650 	return intel_mgbe_common_data(pdev, plat);
651 }
652 
653 static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
654 			       struct plat_stmmacenet_data *plat)
655 {
656 	plat->bus_id = 1;
657 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
658 	plat->serdes_powerup = intel_serdes_powerup;
659 	plat->serdes_powerdown = intel_serdes_powerdown;
660 	return tgl_common_data(pdev, plat);
661 }
662 
663 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
664 	.setup = tgl_sgmii_phy0_data,
665 };
666 
667 static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
668 			       struct plat_stmmacenet_data *plat)
669 {
670 	plat->bus_id = 2;
671 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
672 	plat->serdes_powerup = intel_serdes_powerup;
673 	plat->serdes_powerdown = intel_serdes_powerdown;
674 	return tgl_common_data(pdev, plat);
675 }
676 
677 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
678 	.setup = tgl_sgmii_phy1_data,
679 };
680 
681 static int adls_sgmii_phy0_data(struct pci_dev *pdev,
682 				struct plat_stmmacenet_data *plat)
683 {
684 	plat->bus_id = 1;
685 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
686 
687 	/* SerDes power up and power down are done in BIOS for ADL */
688 
689 	return tgl_common_data(pdev, plat);
690 }
691 
692 static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
693 	.setup = adls_sgmii_phy0_data,
694 };
695 
696 static int adls_sgmii_phy1_data(struct pci_dev *pdev,
697 				struct plat_stmmacenet_data *plat)
698 {
699 	plat->bus_id = 2;
700 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
701 
702 	/* SerDes power up and power down are done in BIOS for ADL */
703 
704 	return tgl_common_data(pdev, plat);
705 }
706 
707 static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
708 	.setup = adls_sgmii_phy1_data,
709 };
710 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
711 	{
712 		.func = 6,
713 		.phy_addr = 1,
714 	},
715 };
716 
717 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
718 	.func = galileo_stmmac_func_data,
719 	.nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
720 };
721 
722 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
723 	{
724 		.func = 6,
725 		.phy_addr = 1,
726 	},
727 	{
728 		.func = 7,
729 		.phy_addr = 1,
730 	},
731 };
732 
733 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
734 	.func = iot2040_stmmac_func_data,
735 	.nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
736 };
737 
738 static const struct dmi_system_id quark_pci_dmi[] = {
739 	{
740 		.matches = {
741 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
742 		},
743 		.driver_data = (void *)&galileo_stmmac_dmi_data,
744 	},
745 	{
746 		.matches = {
747 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
748 		},
749 		.driver_data = (void *)&galileo_stmmac_dmi_data,
750 	},
751 	/* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
752 	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
753 	 * has only one pci network device while other asset tags are
754 	 * for IOT2040 which has two.
755 	 */
756 	{
757 		.matches = {
758 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
759 			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
760 					"6ES7647-0AA00-0YA2"),
761 		},
762 		.driver_data = (void *)&galileo_stmmac_dmi_data,
763 	},
764 	{
765 		.matches = {
766 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
767 		},
768 		.driver_data = (void *)&iot2040_stmmac_dmi_data,
769 	},
770 	{}
771 };
772 
773 static int quark_default_data(struct pci_dev *pdev,
774 			      struct plat_stmmacenet_data *plat)
775 {
776 	int ret;
777 
778 	/* Set common default data first */
779 	common_default_data(plat);
780 
781 	/* Refuse to load the driver and register net device if MAC controller
782 	 * does not connect to any PHY interface.
783 	 */
784 	ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
785 	if (ret < 0) {
786 		/* Return error to the caller on DMI enabled boards. */
787 		if (dmi_get_system_info(DMI_BOARD_NAME))
788 			return ret;
789 
790 		/* Galileo boards with old firmware don't support DMI. We always
791 		 * use 1 here as PHY address, so at least the first found MAC
792 		 * controller would be probed.
793 		 */
794 		ret = 1;
795 	}
796 
797 	plat->bus_id = pci_dev_id(pdev);
798 	plat->phy_addr = ret;
799 	plat->phy_interface = PHY_INTERFACE_MODE_RMII;
800 
801 	plat->dma_cfg->pbl = 16;
802 	plat->dma_cfg->pblx8 = true;
803 	plat->dma_cfg->fixed_burst = 1;
804 	/* AXI (TODO) */
805 
806 	return 0;
807 }
808 
809 static const struct stmmac_pci_info quark_info = {
810 	.setup = quark_default_data,
811 };
812 
813 static int stmmac_config_single_msi(struct pci_dev *pdev,
814 				    struct plat_stmmacenet_data *plat,
815 				    struct stmmac_resources *res)
816 {
817 	int ret;
818 
819 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
820 	if (ret < 0) {
821 		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
822 			 __func__);
823 		return ret;
824 	}
825 
826 	res->irq = pci_irq_vector(pdev, 0);
827 	res->wol_irq = res->irq;
828 	plat->multi_msi_en = 0;
829 	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
830 		 __func__);
831 
832 	return 0;
833 }
834 
835 static int stmmac_config_multi_msi(struct pci_dev *pdev,
836 				   struct plat_stmmacenet_data *plat,
837 				   struct stmmac_resources *res)
838 {
839 	int ret;
840 	int i;
841 
842 	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
843 	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
844 		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
845 			 __func__);
846 		return -1;
847 	}
848 
849 	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
850 				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
851 	if (ret < 0) {
852 		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
853 			 __func__);
854 		return ret;
855 	}
856 
857 	/* For RX MSI */
858 	for (i = 0; i < plat->rx_queues_to_use; i++) {
859 		res->rx_irq[i] = pci_irq_vector(pdev,
860 						plat->msi_rx_base_vec + i * 2);
861 	}
862 
863 	/* For TX MSI */
864 	for (i = 0; i < plat->tx_queues_to_use; i++) {
865 		res->tx_irq[i] = pci_irq_vector(pdev,
866 						plat->msi_tx_base_vec + i * 2);
867 	}
868 
869 	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
870 		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
871 	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
872 		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
873 	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
874 		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
875 	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
876 		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
877 	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
878 		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
879 
880 	plat->multi_msi_en = 1;
881 	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
882 
883 	return 0;
884 }
885 
886 /**
887  * intel_eth_pci_probe
888  *
889  * @pdev: pci device pointer
890  * @id: pointer to table of device id/id's.
891  *
892  * Description: This probing function gets called for all PCI devices which
893  * match the ID table and are not "owned" by other driver yet. This function
894  * gets passed a "struct pci_dev *" for each device whose entry in the ID table
895  * matches the device. The probe functions returns zero when the driver choose
896  * to take "ownership" of the device or an error code(-ve no) otherwise.
897  */
898 static int intel_eth_pci_probe(struct pci_dev *pdev,
899 			       const struct pci_device_id *id)
900 {
901 	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
902 	struct intel_priv_data *intel_priv;
903 	struct plat_stmmacenet_data *plat;
904 	struct stmmac_resources res;
905 	int ret;
906 
907 	intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
908 	if (!intel_priv)
909 		return -ENOMEM;
910 
911 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
912 	if (!plat)
913 		return -ENOMEM;
914 
915 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
916 					   sizeof(*plat->mdio_bus_data),
917 					   GFP_KERNEL);
918 	if (!plat->mdio_bus_data)
919 		return -ENOMEM;
920 
921 	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
922 				     GFP_KERNEL);
923 	if (!plat->dma_cfg)
924 		return -ENOMEM;
925 
926 	/* Enable pci device */
927 	ret = pci_enable_device(pdev);
928 	if (ret) {
929 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
930 			__func__);
931 		return ret;
932 	}
933 
934 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
935 	if (ret)
936 		return ret;
937 
938 	pci_set_master(pdev);
939 
940 	plat->bsp_priv = intel_priv;
941 	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
942 	intel_priv->crossts_adj = 1;
943 
944 	/* Initialize all MSI vectors to invalid so that it can be set
945 	 * according to platform data settings below.
946 	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
947 	 */
948 	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
949 	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
950 	plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
951 	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
952 	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
953 	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
954 	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
955 
956 	ret = info->setup(pdev, plat);
957 	if (ret)
958 		return ret;
959 
960 	memset(&res, 0, sizeof(res));
961 	res.addr = pcim_iomap_table(pdev)[0];
962 
963 	if (plat->eee_usecs_rate > 0) {
964 		u32 tx_lpi_usec;
965 
966 		tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
967 		writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
968 	}
969 
970 	ret = stmmac_config_multi_msi(pdev, plat, &res);
971 	if (ret) {
972 		ret = stmmac_config_single_msi(pdev, plat, &res);
973 		if (ret) {
974 			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
975 				__func__);
976 			goto err_alloc_irq;
977 		}
978 	}
979 
980 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
981 	if (ret) {
982 		goto err_dvr_probe;
983 	}
984 
985 	return 0;
986 
987 err_dvr_probe:
988 	pci_free_irq_vectors(pdev);
989 err_alloc_irq:
990 	clk_disable_unprepare(plat->stmmac_clk);
991 	clk_unregister_fixed_rate(plat->stmmac_clk);
992 	return ret;
993 }
994 
995 /**
996  * intel_eth_pci_remove
997  *
998  * @pdev: platform device pointer
999  * Description: this function calls the main to free the net resources
1000  * and releases the PCI resources.
1001  */
1002 static void intel_eth_pci_remove(struct pci_dev *pdev)
1003 {
1004 	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
1005 	struct stmmac_priv *priv = netdev_priv(ndev);
1006 
1007 	stmmac_dvr_remove(&pdev->dev);
1008 
1009 	pci_free_irq_vectors(pdev);
1010 
1011 	clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1012 
1013 	pcim_iounmap_regions(pdev, BIT(0));
1014 
1015 	pci_disable_device(pdev);
1016 }
1017 
1018 static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
1019 {
1020 	struct pci_dev *pdev = to_pci_dev(dev);
1021 	int ret;
1022 
1023 	ret = stmmac_suspend(dev);
1024 	if (ret)
1025 		return ret;
1026 
1027 	ret = pci_save_state(pdev);
1028 	if (ret)
1029 		return ret;
1030 
1031 	pci_disable_device(pdev);
1032 	pci_wake_from_d3(pdev, true);
1033 	return 0;
1034 }
1035 
1036 static int __maybe_unused intel_eth_pci_resume(struct device *dev)
1037 {
1038 	struct pci_dev *pdev = to_pci_dev(dev);
1039 	int ret;
1040 
1041 	pci_restore_state(pdev);
1042 	pci_set_power_state(pdev, PCI_D0);
1043 
1044 	ret = pci_enable_device(pdev);
1045 	if (ret)
1046 		return ret;
1047 
1048 	pci_set_master(pdev);
1049 
1050 	return stmmac_resume(dev);
1051 }
1052 
1053 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
1054 			 intel_eth_pci_resume);
1055 
1056 #define PCI_DEVICE_ID_INTEL_QUARK_ID			0x0937
1057 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G_ID		0x4b30
1058 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G_ID		0x4b31
1059 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5_ID		0x4b32
1060 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
1061  * which are named PSE0 and PSE1
1062  */
1063 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G_ID		0x4ba0
1064 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G_ID		0x4ba1
1065 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5_ID	0x4ba2
1066 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID		0x4bb0
1067 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID		0x4bb1
1068 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID	0x4bb2
1069 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID		0x43ac
1070 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID		0x43a2
1071 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID		0xa0ac
1072 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0_ID		0x7aac
1073 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1_ID		0x7aad
1074 
1075 static const struct pci_device_id intel_eth_pci_id_table[] = {
1076 	{ PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) },
1077 	{ PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_info) },
1078 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_info) },
1079 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_info) },
1080 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID, &ehl_pse0_rgmii1g_info) },
1081 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID, &ehl_pse0_sgmii1g_info) },
1082 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID, &ehl_pse0_sgmii1g_info) },
1083 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) },
1084 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
1085 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
1086 	{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_phy0_info) },
1087 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_phy0_info) },
1088 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_phy1_info) },
1089 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0_ID, &adls_sgmii1g_phy0_info) },
1090 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1_ID, &adls_sgmii1g_phy1_info) },
1091 	{}
1092 };
1093 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
1094 
1095 static struct pci_driver intel_eth_pci_driver = {
1096 	.name = "intel-eth-pci",
1097 	.id_table = intel_eth_pci_id_table,
1098 	.probe = intel_eth_pci_probe,
1099 	.remove = intel_eth_pci_remove,
1100 	.driver         = {
1101 		.pm     = &intel_eth_pm_ops,
1102 	},
1103 };
1104 
1105 module_pci_driver(intel_eth_pci_driver);
1106 
1107 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
1108 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
1109 MODULE_LICENSE("GPL v2");
1110