xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c (revision e83332842a46c091992ad06145b5c1b65a08ab05)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation
3  */
4 
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include "dwmac-intel.h"
9 #include "dwmac4.h"
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 
13 #define INTEL_MGBE_ADHOC_ADDR	0x15
14 #define INTEL_MGBE_XPCS_ADDR	0x16
15 
16 /* Selection for PTP Clock Freq belongs to PSE & PCH GbE */
17 #define PSE_PTP_CLK_FREQ_MASK		(GMAC_GPO0 | GMAC_GPO3)
18 #define PSE_PTP_CLK_FREQ_19_2MHZ	(GMAC_GPO0)
19 #define PSE_PTP_CLK_FREQ_200MHZ		(GMAC_GPO0 | GMAC_GPO3)
20 #define PSE_PTP_CLK_FREQ_256MHZ		(0)
21 #define PCH_PTP_CLK_FREQ_MASK		(GMAC_GPO0)
22 #define PCH_PTP_CLK_FREQ_19_2MHZ	(GMAC_GPO0)
23 #define PCH_PTP_CLK_FREQ_200MHZ		(0)
24 
25 /* Cross-timestamping defines */
26 #define ART_CPUID_LEAF		0x15
27 #define EHL_PSE_ART_MHZ		19200000
28 
29 struct intel_priv_data {
30 	int mdio_adhoc_addr;	/* mdio address for serdes & etc */
31 	unsigned long crossts_adj;
32 	bool is_pse;
33 };
34 
35 /* This struct is used to associate PCI Function of MAC controller on a board,
36  * discovered via DMI, with the address of PHY connected to the MAC. The
37  * negative value of the address means that MAC controller is not connected
38  * with PHY.
39  */
40 struct stmmac_pci_func_data {
41 	unsigned int func;
42 	int phy_addr;
43 };
44 
45 struct stmmac_pci_dmi_data {
46 	const struct stmmac_pci_func_data *func;
47 	size_t nfuncs;
48 };
49 
50 struct stmmac_pci_info {
51 	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
52 };
53 
54 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
55 				    const struct dmi_system_id *dmi_list)
56 {
57 	const struct stmmac_pci_func_data *func_data;
58 	const struct stmmac_pci_dmi_data *dmi_data;
59 	const struct dmi_system_id *dmi_id;
60 	int func = PCI_FUNC(pdev->devfn);
61 	size_t n;
62 
63 	dmi_id = dmi_first_match(dmi_list);
64 	if (!dmi_id)
65 		return -ENODEV;
66 
67 	dmi_data = dmi_id->driver_data;
68 	func_data = dmi_data->func;
69 
70 	for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
71 		if (func_data->func == func)
72 			return func_data->phy_addr;
73 
74 	return -ENODEV;
75 }
76 
77 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
78 			      int phyreg, u32 mask, u32 val)
79 {
80 	unsigned int retries = 10;
81 	int val_rd;
82 
83 	do {
84 		val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
85 		if ((val_rd & mask) == (val & mask))
86 			return 0;
87 		udelay(POLL_DELAY_US);
88 	} while (--retries);
89 
90 	return -ETIMEDOUT;
91 }
92 
93 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
94 {
95 	struct intel_priv_data *intel_priv = priv_data;
96 	struct stmmac_priv *priv = netdev_priv(ndev);
97 	int serdes_phy_addr = 0;
98 	u32 data = 0;
99 
100 	if (!intel_priv->mdio_adhoc_addr)
101 		return 0;
102 
103 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
104 
105 	/* Set the serdes rate and the PCLK rate */
106 	data = mdiobus_read(priv->mii, serdes_phy_addr,
107 			    SERDES_GCR0);
108 
109 	data &= ~SERDES_RATE_MASK;
110 	data &= ~SERDES_PCLK_MASK;
111 
112 	if (priv->plat->max_speed == 2500)
113 		data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
114 			SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
115 	else
116 		data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
117 			SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
118 
119 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
120 
121 	/* assert clk_req */
122 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
123 	data |= SERDES_PLL_CLK;
124 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
125 
126 	/* check for clk_ack assertion */
127 	data = serdes_status_poll(priv, serdes_phy_addr,
128 				  SERDES_GSR0,
129 				  SERDES_PLL_CLK,
130 				  SERDES_PLL_CLK);
131 
132 	if (data) {
133 		dev_err(priv->device, "Serdes PLL clk request timeout\n");
134 		return data;
135 	}
136 
137 	/* assert lane reset */
138 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
139 	data |= SERDES_RST;
140 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
141 
142 	/* check for assert lane reset reflection */
143 	data = serdes_status_poll(priv, serdes_phy_addr,
144 				  SERDES_GSR0,
145 				  SERDES_RST,
146 				  SERDES_RST);
147 
148 	if (data) {
149 		dev_err(priv->device, "Serdes assert lane reset timeout\n");
150 		return data;
151 	}
152 
153 	/*  move power state to P0 */
154 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
155 
156 	data &= ~SERDES_PWR_ST_MASK;
157 	data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
158 
159 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
160 
161 	/* Check for P0 state */
162 	data = serdes_status_poll(priv, serdes_phy_addr,
163 				  SERDES_GSR0,
164 				  SERDES_PWR_ST_MASK,
165 				  SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
166 
167 	if (data) {
168 		dev_err(priv->device, "Serdes power state P0 timeout.\n");
169 		return data;
170 	}
171 
172 	/* PSE only - ungate SGMII PHY Rx Clock */
173 	if (intel_priv->is_pse)
174 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
175 			       0, SERDES_PHY_RX_CLK);
176 
177 	return 0;
178 }
179 
180 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
181 {
182 	struct intel_priv_data *intel_priv = intel_data;
183 	struct stmmac_priv *priv = netdev_priv(ndev);
184 	int serdes_phy_addr = 0;
185 	u32 data = 0;
186 
187 	if (!intel_priv->mdio_adhoc_addr)
188 		return;
189 
190 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
191 
192 	/* PSE only - gate SGMII PHY Rx Clock */
193 	if (intel_priv->is_pse)
194 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
195 			       SERDES_PHY_RX_CLK, 0);
196 
197 	/*  move power state to P3 */
198 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
199 
200 	data &= ~SERDES_PWR_ST_MASK;
201 	data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
202 
203 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
204 
205 	/* Check for P3 state */
206 	data = serdes_status_poll(priv, serdes_phy_addr,
207 				  SERDES_GSR0,
208 				  SERDES_PWR_ST_MASK,
209 				  SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
210 
211 	if (data) {
212 		dev_err(priv->device, "Serdes power state P3 timeout\n");
213 		return;
214 	}
215 
216 	/* de-assert clk_req */
217 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
218 	data &= ~SERDES_PLL_CLK;
219 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
220 
221 	/* check for clk_ack de-assert */
222 	data = serdes_status_poll(priv, serdes_phy_addr,
223 				  SERDES_GSR0,
224 				  SERDES_PLL_CLK,
225 				  (u32)~SERDES_PLL_CLK);
226 
227 	if (data) {
228 		dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
229 		return;
230 	}
231 
232 	/* de-assert lane reset */
233 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
234 	data &= ~SERDES_RST;
235 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
236 
237 	/* check for de-assert lane reset reflection */
238 	data = serdes_status_poll(priv, serdes_phy_addr,
239 				  SERDES_GSR0,
240 				  SERDES_RST,
241 				  (u32)~SERDES_RST);
242 
243 	if (data) {
244 		dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
245 		return;
246 	}
247 }
248 
249 static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
250 {
251 	struct intel_priv_data *intel_priv = intel_data;
252 	struct stmmac_priv *priv = netdev_priv(ndev);
253 	int serdes_phy_addr = 0;
254 	u32 data = 0;
255 
256 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
257 
258 	/* Determine the link speed mode: 2.5Gbps/1Gbps */
259 	data = mdiobus_read(priv->mii, serdes_phy_addr,
260 			    SERDES_GCR);
261 
262 	if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
263 	    SERDES_LINK_MODE_2G5) {
264 		dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
265 		priv->plat->max_speed = 2500;
266 		priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
267 		priv->plat->mdio_bus_data->xpcs_an_inband = false;
268 	} else {
269 		priv->plat->max_speed = 1000;
270 		priv->plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
271 		priv->plat->mdio_bus_data->xpcs_an_inband = true;
272 	}
273 }
274 
275 /* Program PTP Clock Frequency for different variant of
276  * Intel mGBE that has slightly different GPO mapping
277  */
278 static void intel_mgbe_ptp_clk_freq_config(void *npriv)
279 {
280 	struct stmmac_priv *priv = (struct stmmac_priv *)npriv;
281 	struct intel_priv_data *intel_priv;
282 	u32 gpio_value;
283 
284 	intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
285 
286 	gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
287 
288 	if (intel_priv->is_pse) {
289 		/* For PSE GbE, use 200MHz */
290 		gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
291 		gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
292 	} else {
293 		/* For PCH GbE, use 200MHz */
294 		gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
295 		gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
296 	}
297 
298 	writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
299 }
300 
301 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
302 			u64 *art_time)
303 {
304 	u64 ns;
305 
306 	ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
307 	ns <<= GMAC4_ART_TIME_SHIFT;
308 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
309 	ns <<= GMAC4_ART_TIME_SHIFT;
310 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
311 	ns <<= GMAC4_ART_TIME_SHIFT;
312 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
313 
314 	*art_time = ns;
315 }
316 
317 static int intel_crosststamp(ktime_t *device,
318 			     struct system_counterval_t *system,
319 			     void *ctx)
320 {
321 	struct intel_priv_data *intel_priv;
322 
323 	struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
324 	void __iomem *ptpaddr = priv->ptpaddr;
325 	void __iomem *ioaddr = priv->hw->pcsr;
326 	unsigned long flags;
327 	u64 art_time = 0;
328 	u64 ptp_time = 0;
329 	u32 num_snapshot;
330 	u32 gpio_value;
331 	u32 acr_value;
332 	int ret;
333 	u32 v;
334 	int i;
335 
336 	if (!boot_cpu_has(X86_FEATURE_ART))
337 		return -EOPNOTSUPP;
338 
339 	intel_priv = priv->plat->bsp_priv;
340 
341 	/* Both internal crosstimestamping and external triggered event
342 	 * timestamping cannot be run concurrently.
343 	 */
344 	if (priv->plat->ext_snapshot_en)
345 		return -EBUSY;
346 
347 	mutex_lock(&priv->aux_ts_lock);
348 	/* Enable Internal snapshot trigger */
349 	acr_value = readl(ptpaddr + PTP_ACR);
350 	acr_value &= ~PTP_ACR_MASK;
351 	switch (priv->plat->int_snapshot_num) {
352 	case AUX_SNAPSHOT0:
353 		acr_value |= PTP_ACR_ATSEN0;
354 		break;
355 	case AUX_SNAPSHOT1:
356 		acr_value |= PTP_ACR_ATSEN1;
357 		break;
358 	case AUX_SNAPSHOT2:
359 		acr_value |= PTP_ACR_ATSEN2;
360 		break;
361 	case AUX_SNAPSHOT3:
362 		acr_value |= PTP_ACR_ATSEN3;
363 		break;
364 	default:
365 		mutex_unlock(&priv->aux_ts_lock);
366 		return -EINVAL;
367 	}
368 	writel(acr_value, ptpaddr + PTP_ACR);
369 
370 	/* Clear FIFO */
371 	acr_value = readl(ptpaddr + PTP_ACR);
372 	acr_value |= PTP_ACR_ATSFC;
373 	writel(acr_value, ptpaddr + PTP_ACR);
374 	/* Release the mutex */
375 	mutex_unlock(&priv->aux_ts_lock);
376 
377 	/* Trigger Internal snapshot signal
378 	 * Create a rising edge by just toggle the GPO1 to low
379 	 * and back to high.
380 	 */
381 	gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
382 	gpio_value &= ~GMAC_GPO1;
383 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
384 	gpio_value |= GMAC_GPO1;
385 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
386 
387 	/* Poll for time sync operation done */
388 	ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v,
389 				 (v & GMAC_INT_TSIE), 100, 10000);
390 
391 	if (ret == -ETIMEDOUT) {
392 		pr_err("%s: Wait for time sync operation timeout\n", __func__);
393 		return ret;
394 	}
395 
396 	num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
397 			GMAC_TIMESTAMP_ATSNS_MASK) >>
398 			GMAC_TIMESTAMP_ATSNS_SHIFT;
399 
400 	/* Repeat until the timestamps are from the FIFO last segment */
401 	for (i = 0; i < num_snapshot; i++) {
402 		spin_lock_irqsave(&priv->ptp_lock, flags);
403 		stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
404 		*device = ns_to_ktime(ptp_time);
405 		spin_unlock_irqrestore(&priv->ptp_lock, flags);
406 		get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
407 		*system = convert_art_to_tsc(art_time);
408 	}
409 
410 	system->cycles *= intel_priv->crossts_adj;
411 
412 	return 0;
413 }
414 
415 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
416 				       int base)
417 {
418 	if (boot_cpu_has(X86_FEATURE_ART)) {
419 		unsigned int art_freq;
420 
421 		/* On systems that support ART, ART frequency can be obtained
422 		 * from ECX register of CPUID leaf (0x15).
423 		 */
424 		art_freq = cpuid_ecx(ART_CPUID_LEAF);
425 		do_div(art_freq, base);
426 		intel_priv->crossts_adj = art_freq;
427 	}
428 }
429 
430 static void common_default_data(struct plat_stmmacenet_data *plat)
431 {
432 	plat->clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
433 	plat->has_gmac = 1;
434 	plat->force_sf_dma_mode = 1;
435 
436 	plat->mdio_bus_data->needs_reset = true;
437 
438 	/* Set default value for multicast hash bins */
439 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
440 
441 	/* Set default value for unicast filter entries */
442 	plat->unicast_filter_entries = 1;
443 
444 	/* Set the maxmtu to a default of JUMBO_LEN */
445 	plat->maxmtu = JUMBO_LEN;
446 
447 	/* Set default number of RX and TX queues to use */
448 	plat->tx_queues_to_use = 1;
449 	plat->rx_queues_to_use = 1;
450 
451 	/* Disable Priority config by default */
452 	plat->tx_queues_cfg[0].use_prio = false;
453 	plat->rx_queues_cfg[0].use_prio = false;
454 
455 	/* Disable RX queues routing by default */
456 	plat->rx_queues_cfg[0].pkt_route = 0x0;
457 }
458 
459 static int intel_mgbe_common_data(struct pci_dev *pdev,
460 				  struct plat_stmmacenet_data *plat)
461 {
462 	char clk_name[20];
463 	int ret;
464 	int i;
465 
466 	plat->pdev = pdev;
467 	plat->phy_addr = -1;
468 	plat->clk_csr = 5;
469 	plat->has_gmac = 0;
470 	plat->has_gmac4 = 1;
471 	plat->force_sf_dma_mode = 0;
472 	plat->tso_en = 1;
473 
474 	/* Multiplying factor to the clk_eee_i clock time
475 	 * period to make it closer to 100 ns. This value
476 	 * should be programmed such that the clk_eee_time_period *
477 	 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
478 	 * clk_eee frequency is 19.2Mhz
479 	 * clk_eee_time_period is 52ns
480 	 * 52ns * (1 + 1) = 104ns
481 	 * MULT_FACT_100NS = 1
482 	 */
483 	plat->mult_fact_100ns = 1;
484 
485 	plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
486 
487 	for (i = 0; i < plat->rx_queues_to_use; i++) {
488 		plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
489 		plat->rx_queues_cfg[i].chan = i;
490 
491 		/* Disable Priority config by default */
492 		plat->rx_queues_cfg[i].use_prio = false;
493 
494 		/* Disable RX queues routing by default */
495 		plat->rx_queues_cfg[i].pkt_route = 0x0;
496 	}
497 
498 	for (i = 0; i < plat->tx_queues_to_use; i++) {
499 		plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
500 
501 		/* Disable Priority config by default */
502 		plat->tx_queues_cfg[i].use_prio = false;
503 		/* Default TX Q0 to use TSO and rest TXQ for TBS */
504 		if (i > 0)
505 			plat->tx_queues_cfg[i].tbs_en = 1;
506 	}
507 
508 	/* FIFO size is 4096 bytes for 1 tx/rx queue */
509 	plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
510 	plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
511 
512 	plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
513 	plat->tx_queues_cfg[0].weight = 0x09;
514 	plat->tx_queues_cfg[1].weight = 0x0A;
515 	plat->tx_queues_cfg[2].weight = 0x0B;
516 	plat->tx_queues_cfg[3].weight = 0x0C;
517 	plat->tx_queues_cfg[4].weight = 0x0D;
518 	plat->tx_queues_cfg[5].weight = 0x0E;
519 	plat->tx_queues_cfg[6].weight = 0x0F;
520 	plat->tx_queues_cfg[7].weight = 0x10;
521 
522 	plat->dma_cfg->pbl = 32;
523 	plat->dma_cfg->pblx8 = true;
524 	plat->dma_cfg->fixed_burst = 0;
525 	plat->dma_cfg->mixed_burst = 0;
526 	plat->dma_cfg->aal = 0;
527 	plat->dma_cfg->dche = true;
528 
529 	plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
530 				 GFP_KERNEL);
531 	if (!plat->axi)
532 		return -ENOMEM;
533 
534 	plat->axi->axi_lpi_en = 0;
535 	plat->axi->axi_xit_frm = 0;
536 	plat->axi->axi_wr_osr_lmt = 1;
537 	plat->axi->axi_rd_osr_lmt = 1;
538 	plat->axi->axi_blen[0] = 4;
539 	plat->axi->axi_blen[1] = 8;
540 	plat->axi->axi_blen[2] = 16;
541 
542 	plat->ptp_max_adj = plat->clk_ptp_rate;
543 	plat->eee_usecs_rate = plat->clk_ptp_rate;
544 
545 	/* Set system clock */
546 	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
547 
548 	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
549 						   clk_name, NULL, 0,
550 						   plat->clk_ptp_rate);
551 
552 	if (IS_ERR(plat->stmmac_clk)) {
553 		dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
554 		plat->stmmac_clk = NULL;
555 	}
556 
557 	ret = clk_prepare_enable(plat->stmmac_clk);
558 	if (ret) {
559 		clk_unregister_fixed_rate(plat->stmmac_clk);
560 		return ret;
561 	}
562 
563 	plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
564 
565 	/* Set default value for multicast hash bins */
566 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
567 
568 	/* Set default value for unicast filter entries */
569 	plat->unicast_filter_entries = 1;
570 
571 	/* Set the maxmtu to a default of JUMBO_LEN */
572 	plat->maxmtu = JUMBO_LEN;
573 
574 	plat->vlan_fail_q_en = true;
575 
576 	/* Use the last Rx queue */
577 	plat->vlan_fail_q = plat->rx_queues_to_use - 1;
578 
579 	/* Intel mgbe SGMII interface uses pcs-xcps */
580 	if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) {
581 		plat->mdio_bus_data->has_xpcs = true;
582 		plat->mdio_bus_data->xpcs_an_inband = true;
583 	}
584 
585 	/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
586 	plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
587 	plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
588 
589 	plat->int_snapshot_num = AUX_SNAPSHOT1;
590 	plat->ext_snapshot_num = AUX_SNAPSHOT0;
591 
592 	plat->has_crossts = true;
593 	plat->crosststamp = intel_crosststamp;
594 
595 	/* Setup MSI vector offset specific to Intel mGbE controller */
596 	plat->msi_mac_vec = 29;
597 	plat->msi_lpi_vec = 28;
598 	plat->msi_sfty_ce_vec = 27;
599 	plat->msi_sfty_ue_vec = 26;
600 	plat->msi_rx_base_vec = 0;
601 	plat->msi_tx_base_vec = 1;
602 
603 	return 0;
604 }
605 
606 static int ehl_common_data(struct pci_dev *pdev,
607 			   struct plat_stmmacenet_data *plat)
608 {
609 	plat->rx_queues_to_use = 8;
610 	plat->tx_queues_to_use = 8;
611 	plat->clk_ptp_rate = 200000000;
612 
613 	plat->safety_feat_cfg->tsoee = 1;
614 	plat->safety_feat_cfg->mrxpee = 1;
615 	plat->safety_feat_cfg->mestee = 1;
616 	plat->safety_feat_cfg->mrxee = 1;
617 	plat->safety_feat_cfg->mtxee = 1;
618 	plat->safety_feat_cfg->epsi = 0;
619 	plat->safety_feat_cfg->edpp = 0;
620 	plat->safety_feat_cfg->prtyen = 0;
621 	plat->safety_feat_cfg->tmouten = 0;
622 
623 	return intel_mgbe_common_data(pdev, plat);
624 }
625 
626 static int ehl_sgmii_data(struct pci_dev *pdev,
627 			  struct plat_stmmacenet_data *plat)
628 {
629 	plat->bus_id = 1;
630 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
631 	plat->speed_mode_2500 = intel_speed_mode_2500;
632 	plat->serdes_powerup = intel_serdes_powerup;
633 	plat->serdes_powerdown = intel_serdes_powerdown;
634 
635 	return ehl_common_data(pdev, plat);
636 }
637 
638 static struct stmmac_pci_info ehl_sgmii1g_info = {
639 	.setup = ehl_sgmii_data,
640 };
641 
642 static int ehl_rgmii_data(struct pci_dev *pdev,
643 			  struct plat_stmmacenet_data *plat)
644 {
645 	plat->bus_id = 1;
646 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
647 
648 	return ehl_common_data(pdev, plat);
649 }
650 
651 static struct stmmac_pci_info ehl_rgmii1g_info = {
652 	.setup = ehl_rgmii_data,
653 };
654 
655 static int ehl_pse0_common_data(struct pci_dev *pdev,
656 				struct plat_stmmacenet_data *plat)
657 {
658 	struct intel_priv_data *intel_priv = plat->bsp_priv;
659 
660 	intel_priv->is_pse = true;
661 	plat->bus_id = 2;
662 	plat->addr64 = 32;
663 
664 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
665 
666 	return ehl_common_data(pdev, plat);
667 }
668 
669 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
670 				 struct plat_stmmacenet_data *plat)
671 {
672 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
673 	return ehl_pse0_common_data(pdev, plat);
674 }
675 
676 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
677 	.setup = ehl_pse0_rgmii1g_data,
678 };
679 
680 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
681 				 struct plat_stmmacenet_data *plat)
682 {
683 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
684 	plat->speed_mode_2500 = intel_speed_mode_2500;
685 	plat->serdes_powerup = intel_serdes_powerup;
686 	plat->serdes_powerdown = intel_serdes_powerdown;
687 	return ehl_pse0_common_data(pdev, plat);
688 }
689 
690 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
691 	.setup = ehl_pse0_sgmii1g_data,
692 };
693 
694 static int ehl_pse1_common_data(struct pci_dev *pdev,
695 				struct plat_stmmacenet_data *plat)
696 {
697 	struct intel_priv_data *intel_priv = plat->bsp_priv;
698 
699 	intel_priv->is_pse = true;
700 	plat->bus_id = 3;
701 	plat->addr64 = 32;
702 
703 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
704 
705 	return ehl_common_data(pdev, plat);
706 }
707 
708 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
709 				 struct plat_stmmacenet_data *plat)
710 {
711 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
712 	return ehl_pse1_common_data(pdev, plat);
713 }
714 
715 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
716 	.setup = ehl_pse1_rgmii1g_data,
717 };
718 
719 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
720 				 struct plat_stmmacenet_data *plat)
721 {
722 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
723 	plat->speed_mode_2500 = intel_speed_mode_2500;
724 	plat->serdes_powerup = intel_serdes_powerup;
725 	plat->serdes_powerdown = intel_serdes_powerdown;
726 	return ehl_pse1_common_data(pdev, plat);
727 }
728 
729 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
730 	.setup = ehl_pse1_sgmii1g_data,
731 };
732 
733 static int tgl_common_data(struct pci_dev *pdev,
734 			   struct plat_stmmacenet_data *plat)
735 {
736 	plat->rx_queues_to_use = 6;
737 	plat->tx_queues_to_use = 4;
738 	plat->clk_ptp_rate = 200000000;
739 
740 	plat->safety_feat_cfg->tsoee = 1;
741 	plat->safety_feat_cfg->mrxpee = 0;
742 	plat->safety_feat_cfg->mestee = 1;
743 	plat->safety_feat_cfg->mrxee = 1;
744 	plat->safety_feat_cfg->mtxee = 1;
745 	plat->safety_feat_cfg->epsi = 0;
746 	plat->safety_feat_cfg->edpp = 0;
747 	plat->safety_feat_cfg->prtyen = 0;
748 	plat->safety_feat_cfg->tmouten = 0;
749 
750 	return intel_mgbe_common_data(pdev, plat);
751 }
752 
753 static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
754 			       struct plat_stmmacenet_data *plat)
755 {
756 	plat->bus_id = 1;
757 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
758 	plat->speed_mode_2500 = intel_speed_mode_2500;
759 	plat->serdes_powerup = intel_serdes_powerup;
760 	plat->serdes_powerdown = intel_serdes_powerdown;
761 	return tgl_common_data(pdev, plat);
762 }
763 
764 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
765 	.setup = tgl_sgmii_phy0_data,
766 };
767 
768 static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
769 			       struct plat_stmmacenet_data *plat)
770 {
771 	plat->bus_id = 2;
772 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
773 	plat->speed_mode_2500 = intel_speed_mode_2500;
774 	plat->serdes_powerup = intel_serdes_powerup;
775 	plat->serdes_powerdown = intel_serdes_powerdown;
776 	return tgl_common_data(pdev, plat);
777 }
778 
779 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
780 	.setup = tgl_sgmii_phy1_data,
781 };
782 
783 static int adls_sgmii_phy0_data(struct pci_dev *pdev,
784 				struct plat_stmmacenet_data *plat)
785 {
786 	plat->bus_id = 1;
787 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
788 
789 	/* SerDes power up and power down are done in BIOS for ADL */
790 
791 	return tgl_common_data(pdev, plat);
792 }
793 
794 static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
795 	.setup = adls_sgmii_phy0_data,
796 };
797 
798 static int adls_sgmii_phy1_data(struct pci_dev *pdev,
799 				struct plat_stmmacenet_data *plat)
800 {
801 	plat->bus_id = 2;
802 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
803 
804 	/* SerDes power up and power down are done in BIOS for ADL */
805 
806 	return tgl_common_data(pdev, plat);
807 }
808 
809 static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
810 	.setup = adls_sgmii_phy1_data,
811 };
812 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
813 	{
814 		.func = 6,
815 		.phy_addr = 1,
816 	},
817 };
818 
819 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
820 	.func = galileo_stmmac_func_data,
821 	.nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
822 };
823 
824 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
825 	{
826 		.func = 6,
827 		.phy_addr = 1,
828 	},
829 	{
830 		.func = 7,
831 		.phy_addr = 1,
832 	},
833 };
834 
835 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
836 	.func = iot2040_stmmac_func_data,
837 	.nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
838 };
839 
840 static const struct dmi_system_id quark_pci_dmi[] = {
841 	{
842 		.matches = {
843 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
844 		},
845 		.driver_data = (void *)&galileo_stmmac_dmi_data,
846 	},
847 	{
848 		.matches = {
849 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
850 		},
851 		.driver_data = (void *)&galileo_stmmac_dmi_data,
852 	},
853 	/* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
854 	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
855 	 * has only one pci network device while other asset tags are
856 	 * for IOT2040 which has two.
857 	 */
858 	{
859 		.matches = {
860 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
861 			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
862 					"6ES7647-0AA00-0YA2"),
863 		},
864 		.driver_data = (void *)&galileo_stmmac_dmi_data,
865 	},
866 	{
867 		.matches = {
868 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
869 		},
870 		.driver_data = (void *)&iot2040_stmmac_dmi_data,
871 	},
872 	{}
873 };
874 
875 static int quark_default_data(struct pci_dev *pdev,
876 			      struct plat_stmmacenet_data *plat)
877 {
878 	int ret;
879 
880 	/* Set common default data first */
881 	common_default_data(plat);
882 
883 	/* Refuse to load the driver and register net device if MAC controller
884 	 * does not connect to any PHY interface.
885 	 */
886 	ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
887 	if (ret < 0) {
888 		/* Return error to the caller on DMI enabled boards. */
889 		if (dmi_get_system_info(DMI_BOARD_NAME))
890 			return ret;
891 
892 		/* Galileo boards with old firmware don't support DMI. We always
893 		 * use 1 here as PHY address, so at least the first found MAC
894 		 * controller would be probed.
895 		 */
896 		ret = 1;
897 	}
898 
899 	plat->bus_id = pci_dev_id(pdev);
900 	plat->phy_addr = ret;
901 	plat->phy_interface = PHY_INTERFACE_MODE_RMII;
902 
903 	plat->dma_cfg->pbl = 16;
904 	plat->dma_cfg->pblx8 = true;
905 	plat->dma_cfg->fixed_burst = 1;
906 	/* AXI (TODO) */
907 
908 	return 0;
909 }
910 
911 static const struct stmmac_pci_info quark_info = {
912 	.setup = quark_default_data,
913 };
914 
915 static int stmmac_config_single_msi(struct pci_dev *pdev,
916 				    struct plat_stmmacenet_data *plat,
917 				    struct stmmac_resources *res)
918 {
919 	int ret;
920 
921 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
922 	if (ret < 0) {
923 		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
924 			 __func__);
925 		return ret;
926 	}
927 
928 	res->irq = pci_irq_vector(pdev, 0);
929 	res->wol_irq = res->irq;
930 	plat->multi_msi_en = 0;
931 	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
932 		 __func__);
933 
934 	return 0;
935 }
936 
937 static int stmmac_config_multi_msi(struct pci_dev *pdev,
938 				   struct plat_stmmacenet_data *plat,
939 				   struct stmmac_resources *res)
940 {
941 	int ret;
942 	int i;
943 
944 	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
945 	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
946 		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
947 			 __func__);
948 		return -1;
949 	}
950 
951 	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
952 				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
953 	if (ret < 0) {
954 		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
955 			 __func__);
956 		return ret;
957 	}
958 
959 	/* For RX MSI */
960 	for (i = 0; i < plat->rx_queues_to_use; i++) {
961 		res->rx_irq[i] = pci_irq_vector(pdev,
962 						plat->msi_rx_base_vec + i * 2);
963 	}
964 
965 	/* For TX MSI */
966 	for (i = 0; i < plat->tx_queues_to_use; i++) {
967 		res->tx_irq[i] = pci_irq_vector(pdev,
968 						plat->msi_tx_base_vec + i * 2);
969 	}
970 
971 	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
972 		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
973 	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
974 		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
975 	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
976 		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
977 	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
978 		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
979 	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
980 		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
981 
982 	plat->multi_msi_en = 1;
983 	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
984 
985 	return 0;
986 }
987 
988 /**
989  * intel_eth_pci_probe
990  *
991  * @pdev: pci device pointer
992  * @id: pointer to table of device id/id's.
993  *
994  * Description: This probing function gets called for all PCI devices which
995  * match the ID table and are not "owned" by other driver yet. This function
996  * gets passed a "struct pci_dev *" for each device whose entry in the ID table
997  * matches the device. The probe functions returns zero when the driver choose
998  * to take "ownership" of the device or an error code(-ve no) otherwise.
999  */
1000 static int intel_eth_pci_probe(struct pci_dev *pdev,
1001 			       const struct pci_device_id *id)
1002 {
1003 	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
1004 	struct intel_priv_data *intel_priv;
1005 	struct plat_stmmacenet_data *plat;
1006 	struct stmmac_resources res;
1007 	int ret;
1008 
1009 	intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
1010 	if (!intel_priv)
1011 		return -ENOMEM;
1012 
1013 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
1014 	if (!plat)
1015 		return -ENOMEM;
1016 
1017 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
1018 					   sizeof(*plat->mdio_bus_data),
1019 					   GFP_KERNEL);
1020 	if (!plat->mdio_bus_data)
1021 		return -ENOMEM;
1022 
1023 	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
1024 				     GFP_KERNEL);
1025 	if (!plat->dma_cfg)
1026 		return -ENOMEM;
1027 
1028 	plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
1029 					     sizeof(*plat->safety_feat_cfg),
1030 					     GFP_KERNEL);
1031 	if (!plat->safety_feat_cfg)
1032 		return -ENOMEM;
1033 
1034 	/* Enable pci device */
1035 	ret = pcim_enable_device(pdev);
1036 	if (ret) {
1037 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
1038 			__func__);
1039 		return ret;
1040 	}
1041 
1042 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
1043 	if (ret)
1044 		return ret;
1045 
1046 	pci_set_master(pdev);
1047 
1048 	plat->bsp_priv = intel_priv;
1049 	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
1050 	intel_priv->crossts_adj = 1;
1051 
1052 	/* Initialize all MSI vectors to invalid so that it can be set
1053 	 * according to platform data settings below.
1054 	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
1055 	 */
1056 	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
1057 	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
1058 	plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
1059 	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
1060 	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
1061 	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
1062 	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
1063 
1064 	ret = info->setup(pdev, plat);
1065 	if (ret)
1066 		return ret;
1067 
1068 	memset(&res, 0, sizeof(res));
1069 	res.addr = pcim_iomap_table(pdev)[0];
1070 
1071 	if (plat->eee_usecs_rate > 0) {
1072 		u32 tx_lpi_usec;
1073 
1074 		tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
1075 		writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
1076 	}
1077 
1078 	ret = stmmac_config_multi_msi(pdev, plat, &res);
1079 	if (ret) {
1080 		ret = stmmac_config_single_msi(pdev, plat, &res);
1081 		if (ret) {
1082 			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
1083 				__func__);
1084 			goto err_alloc_irq;
1085 		}
1086 	}
1087 
1088 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
1089 	if (ret) {
1090 		goto err_dvr_probe;
1091 	}
1092 
1093 	return 0;
1094 
1095 err_dvr_probe:
1096 	pci_free_irq_vectors(pdev);
1097 err_alloc_irq:
1098 	clk_disable_unprepare(plat->stmmac_clk);
1099 	clk_unregister_fixed_rate(plat->stmmac_clk);
1100 	return ret;
1101 }
1102 
1103 /**
1104  * intel_eth_pci_remove
1105  *
1106  * @pdev: platform device pointer
1107  * Description: this function calls the main to free the net resources
1108  * and releases the PCI resources.
1109  */
1110 static void intel_eth_pci_remove(struct pci_dev *pdev)
1111 {
1112 	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
1113 	struct stmmac_priv *priv = netdev_priv(ndev);
1114 
1115 	stmmac_dvr_remove(&pdev->dev);
1116 
1117 	clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1118 
1119 	pcim_iounmap_regions(pdev, BIT(0));
1120 }
1121 
1122 static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
1123 {
1124 	struct pci_dev *pdev = to_pci_dev(dev);
1125 	int ret;
1126 
1127 	ret = stmmac_suspend(dev);
1128 	if (ret)
1129 		return ret;
1130 
1131 	ret = pci_save_state(pdev);
1132 	if (ret)
1133 		return ret;
1134 
1135 	pci_wake_from_d3(pdev, true);
1136 	return 0;
1137 }
1138 
1139 static int __maybe_unused intel_eth_pci_resume(struct device *dev)
1140 {
1141 	struct pci_dev *pdev = to_pci_dev(dev);
1142 	int ret;
1143 
1144 	pci_restore_state(pdev);
1145 	pci_set_power_state(pdev, PCI_D0);
1146 
1147 	ret = pcim_enable_device(pdev);
1148 	if (ret)
1149 		return ret;
1150 
1151 	pci_set_master(pdev);
1152 
1153 	return stmmac_resume(dev);
1154 }
1155 
1156 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
1157 			 intel_eth_pci_resume);
1158 
1159 #define PCI_DEVICE_ID_INTEL_QUARK		0x0937
1160 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G		0x4b30
1161 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G		0x4b31
1162 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5	0x4b32
1163 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
1164  * which are named PSE0 and PSE1
1165  */
1166 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G	0x4ba0
1167 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G	0x4ba1
1168 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5	0x4ba2
1169 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G	0x4bb0
1170 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G	0x4bb1
1171 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5	0x4bb2
1172 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0	0x43ac
1173 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1	0x43a2
1174 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G		0xa0ac
1175 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0	0x7aac
1176 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1	0x7aad
1177 
1178 static const struct pci_device_id intel_eth_pci_id_table[] = {
1179 	{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
1180 	{ PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
1181 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
1182 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
1183 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
1184 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
1185 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
1186 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
1187 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
1188 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
1189 	{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
1190 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
1191 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
1192 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
1193 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
1194 	{}
1195 };
1196 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
1197 
1198 static struct pci_driver intel_eth_pci_driver = {
1199 	.name = "intel-eth-pci",
1200 	.id_table = intel_eth_pci_id_table,
1201 	.probe = intel_eth_pci_probe,
1202 	.remove = intel_eth_pci_remove,
1203 	.driver         = {
1204 		.pm     = &intel_eth_pm_ops,
1205 	},
1206 };
1207 
1208 module_pci_driver(intel_eth_pci_driver);
1209 
1210 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
1211 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
1212 MODULE_LICENSE("GPL v2");
1213