xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c (revision 6832a9317eee280117cd695fa885b2b7a7a38daf)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation
3  */
4 
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include <linux/platform_data/x86/intel_pmc_ipc.h>
9 #include "dwmac-intel.h"
10 #include "dwmac4.h"
11 #include "stmmac.h"
12 #include "stmmac_ptp.h"
13 
14 struct pmc_serdes_regs {
15 	u8 index;
16 	u32 val;
17 };
18 
19 struct pmc_serdes_reg_info {
20 	const struct pmc_serdes_regs *regs;
21 	u8 num_regs;
22 };
23 
24 struct intel_priv_data {
25 	int mdio_adhoc_addr;	/* mdio address for serdes & etc */
26 	unsigned long crossts_adj;
27 	bool is_pse;
28 	const int *tsn_lane_regs;
29 	int max_tsn_lane_regs;
30 	struct pmc_serdes_reg_info pid_1g;
31 	struct pmc_serdes_reg_info pid_2p5g;
32 };
33 
34 /* This struct is used to associate PCI Function of MAC controller on a board,
35  * discovered via DMI, with the address of PHY connected to the MAC. The
36  * negative value of the address means that MAC controller is not connected
37  * with PHY.
38  */
39 struct stmmac_pci_func_data {
40 	unsigned int func;
41 	int phy_addr;
42 };
43 
44 struct stmmac_pci_dmi_data {
45 	const struct stmmac_pci_func_data *func;
46 	size_t nfuncs;
47 };
48 
49 struct stmmac_pci_info {
50 	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
51 };
52 
53 static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = {
54 	{ PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_1G },
55 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_1G },
56 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_1G },
57 	{ PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_1G },
58 	{ PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
59 	{}
60 };
61 
62 static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = {
63 	{ PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
64 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
65 	{ PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
66 	{ PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
67 	{ PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
68 	{}
69 };
70 
71 static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = {
72 	{ PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_1G },
73 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_1G },
74 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_1G },
75 	{ PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_1G },
76 	{ PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
77 	{}
78 };
79 
80 static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = {
81 	{ PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0,	B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
82 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2,	N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
83 	{ PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7,	N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
84 	{ PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10,	N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
85 	{ PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30,	N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
86 	{}
87 };
88 
89 static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11};
90 static const int adln_tsn_lane_regs[] = {6};
91 
stmmac_pci_find_phy_addr(struct pci_dev * pdev,const struct dmi_system_id * dmi_list)92 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
93 				    const struct dmi_system_id *dmi_list)
94 {
95 	const struct stmmac_pci_func_data *func_data;
96 	const struct stmmac_pci_dmi_data *dmi_data;
97 	const struct dmi_system_id *dmi_id;
98 	int func = PCI_FUNC(pdev->devfn);
99 	size_t n;
100 
101 	dmi_id = dmi_first_match(dmi_list);
102 	if (!dmi_id)
103 		return -ENODEV;
104 
105 	dmi_data = dmi_id->driver_data;
106 	func_data = dmi_data->func;
107 
108 	for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
109 		if (func_data->func == func)
110 			return func_data->phy_addr;
111 
112 	return -ENODEV;
113 }
114 
serdes_status_poll(struct stmmac_priv * priv,int phyaddr,int phyreg,u32 mask,u32 val)115 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
116 			      int phyreg, u32 mask, u32 val)
117 {
118 	unsigned int retries = 10;
119 	int val_rd;
120 
121 	do {
122 		val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
123 		if ((val_rd & mask) == (val & mask))
124 			return 0;
125 		udelay(POLL_DELAY_US);
126 	} while (--retries);
127 
128 	return -ETIMEDOUT;
129 }
130 
intel_serdes_powerup(struct net_device * ndev,void * priv_data)131 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
132 {
133 	struct intel_priv_data *intel_priv = priv_data;
134 	struct stmmac_priv *priv = netdev_priv(ndev);
135 	int serdes_phy_addr = 0;
136 	u32 data = 0;
137 
138 	if (!intel_priv->mdio_adhoc_addr)
139 		return 0;
140 
141 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
142 
143 	/* Set the serdes rate and the PCLK rate */
144 	data = mdiobus_read(priv->mii, serdes_phy_addr,
145 			    SERDES_GCR0);
146 
147 	data &= ~SERDES_RATE_MASK;
148 	data &= ~SERDES_PCLK_MASK;
149 
150 	if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
151 		data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
152 			SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
153 	else
154 		data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
155 			SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
156 
157 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
158 
159 	/* assert clk_req */
160 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
161 	data |= SERDES_PLL_CLK;
162 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
163 
164 	/* check for clk_ack assertion */
165 	data = serdes_status_poll(priv, serdes_phy_addr,
166 				  SERDES_GSR0,
167 				  SERDES_PLL_CLK,
168 				  SERDES_PLL_CLK);
169 
170 	if (data) {
171 		dev_err(priv->device, "Serdes PLL clk request timeout\n");
172 		return data;
173 	}
174 
175 	/* assert lane reset */
176 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
177 	data |= SERDES_RST;
178 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
179 
180 	/* check for assert lane reset reflection */
181 	data = serdes_status_poll(priv, serdes_phy_addr,
182 				  SERDES_GSR0,
183 				  SERDES_RST,
184 				  SERDES_RST);
185 
186 	if (data) {
187 		dev_err(priv->device, "Serdes assert lane reset timeout\n");
188 		return data;
189 	}
190 
191 	/*  move power state to P0 */
192 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
193 
194 	data &= ~SERDES_PWR_ST_MASK;
195 	data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
196 
197 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
198 
199 	/* Check for P0 state */
200 	data = serdes_status_poll(priv, serdes_phy_addr,
201 				  SERDES_GSR0,
202 				  SERDES_PWR_ST_MASK,
203 				  SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
204 
205 	if (data) {
206 		dev_err(priv->device, "Serdes power state P0 timeout.\n");
207 		return data;
208 	}
209 
210 	/* PSE only - ungate SGMII PHY Rx Clock */
211 	if (intel_priv->is_pse)
212 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
213 			       0, SERDES_PHY_RX_CLK);
214 
215 	return 0;
216 }
217 
intel_serdes_powerdown(struct net_device * ndev,void * intel_data)218 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
219 {
220 	struct intel_priv_data *intel_priv = intel_data;
221 	struct stmmac_priv *priv = netdev_priv(ndev);
222 	int serdes_phy_addr = 0;
223 	u32 data = 0;
224 
225 	if (!intel_priv->mdio_adhoc_addr)
226 		return;
227 
228 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
229 
230 	/* PSE only - gate SGMII PHY Rx Clock */
231 	if (intel_priv->is_pse)
232 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
233 			       SERDES_PHY_RX_CLK, 0);
234 
235 	/*  move power state to P3 */
236 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
237 
238 	data &= ~SERDES_PWR_ST_MASK;
239 	data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
240 
241 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
242 
243 	/* Check for P3 state */
244 	data = serdes_status_poll(priv, serdes_phy_addr,
245 				  SERDES_GSR0,
246 				  SERDES_PWR_ST_MASK,
247 				  SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
248 
249 	if (data) {
250 		dev_err(priv->device, "Serdes power state P3 timeout\n");
251 		return;
252 	}
253 
254 	/* de-assert clk_req */
255 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
256 	data &= ~SERDES_PLL_CLK;
257 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
258 
259 	/* check for clk_ack de-assert */
260 	data = serdes_status_poll(priv, serdes_phy_addr,
261 				  SERDES_GSR0,
262 				  SERDES_PLL_CLK,
263 				  (u32)~SERDES_PLL_CLK);
264 
265 	if (data) {
266 		dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
267 		return;
268 	}
269 
270 	/* de-assert lane reset */
271 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
272 	data &= ~SERDES_RST;
273 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
274 
275 	/* check for de-assert lane reset reflection */
276 	data = serdes_status_poll(priv, serdes_phy_addr,
277 				  SERDES_GSR0,
278 				  SERDES_RST,
279 				  (u32)~SERDES_RST);
280 
281 	if (data) {
282 		dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
283 		return;
284 	}
285 }
286 
tgl_get_interfaces(struct stmmac_priv * priv,void * bsp_priv,unsigned long * interfaces)287 static void tgl_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
288 			       unsigned long *interfaces)
289 {
290 	struct intel_priv_data *intel_priv = bsp_priv;
291 	phy_interface_t interface;
292 	int data;
293 
294 	/* Determine the link speed mode: 2.5Gbps/1Gbps */
295 	data = mdiobus_read(priv->mii, intel_priv->mdio_adhoc_addr, SERDES_GCR);
296 	if (data < 0)
297 		return;
298 
299 	if (FIELD_GET(SERDES_LINK_MODE_MASK, data) == SERDES_LINK_MODE_2G5) {
300 		dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
301 		priv->plat->mdio_bus_data->default_an_inband = false;
302 		interface = PHY_INTERFACE_MODE_2500BASEX;
303 	} else {
304 		interface = PHY_INTERFACE_MODE_SGMII;
305 	}
306 
307 	__set_bit(interface, interfaces);
308 	priv->plat->phy_interface = interface;
309 }
310 
311 /* Program PTP Clock Frequency for different variant of
312  * Intel mGBE that has slightly different GPO mapping
313  */
intel_mgbe_ptp_clk_freq_config(struct stmmac_priv * priv)314 static void intel_mgbe_ptp_clk_freq_config(struct stmmac_priv *priv)
315 {
316 	struct intel_priv_data *intel_priv;
317 	u32 gpio_value;
318 
319 	intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
320 
321 	gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
322 
323 	if (intel_priv->is_pse) {
324 		/* For PSE GbE, use 200MHz */
325 		gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
326 		gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
327 	} else {
328 		/* For PCH GbE, use 200MHz */
329 		gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
330 		gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
331 	}
332 
333 	writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
334 }
335 
get_arttime(struct mii_bus * mii,int intel_adhoc_addr,u64 * art_time)336 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
337 			u64 *art_time)
338 {
339 	u64 ns;
340 
341 	ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
342 	ns <<= GMAC4_ART_TIME_SHIFT;
343 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
344 	ns <<= GMAC4_ART_TIME_SHIFT;
345 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
346 	ns <<= GMAC4_ART_TIME_SHIFT;
347 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
348 
349 	*art_time = ns;
350 }
351 
stmmac_cross_ts_isr(struct stmmac_priv * priv)352 static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
353 {
354 	return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
355 }
356 
intel_crosststamp(ktime_t * device,struct system_counterval_t * system,void * ctx)357 static int intel_crosststamp(ktime_t *device,
358 			     struct system_counterval_t *system,
359 			     void *ctx)
360 {
361 	struct intel_priv_data *intel_priv;
362 
363 	struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
364 	void __iomem *ptpaddr = priv->ptpaddr;
365 	void __iomem *ioaddr = priv->hw->pcsr;
366 	unsigned long flags;
367 	u64 art_time = 0;
368 	u64 ptp_time = 0;
369 	u32 num_snapshot;
370 	u32 gpio_value;
371 	u32 acr_value;
372 	int i;
373 
374 	if (!boot_cpu_has(X86_FEATURE_ART))
375 		return -EOPNOTSUPP;
376 
377 	intel_priv = priv->plat->bsp_priv;
378 
379 	/* Both internal crosstimestamping and external triggered event
380 	 * timestamping cannot be run concurrently.
381 	 */
382 	if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
383 		return -EBUSY;
384 
385 	priv->plat->flags |= STMMAC_FLAG_INT_SNAPSHOT_EN;
386 
387 	mutex_lock(&priv->aux_ts_lock);
388 	/* Enable Internal snapshot trigger */
389 	acr_value = readl(ptpaddr + PTP_ACR);
390 	acr_value &= ~PTP_ACR_MASK;
391 	switch (priv->plat->int_snapshot_num) {
392 	case AUX_SNAPSHOT0:
393 		acr_value |= PTP_ACR_ATSEN0;
394 		break;
395 	case AUX_SNAPSHOT1:
396 		acr_value |= PTP_ACR_ATSEN1;
397 		break;
398 	case AUX_SNAPSHOT2:
399 		acr_value |= PTP_ACR_ATSEN2;
400 		break;
401 	case AUX_SNAPSHOT3:
402 		acr_value |= PTP_ACR_ATSEN3;
403 		break;
404 	default:
405 		mutex_unlock(&priv->aux_ts_lock);
406 		priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
407 		return -EINVAL;
408 	}
409 	writel(acr_value, ptpaddr + PTP_ACR);
410 
411 	/* Clear FIFO */
412 	acr_value = readl(ptpaddr + PTP_ACR);
413 	acr_value |= PTP_ACR_ATSFC;
414 	writel(acr_value, ptpaddr + PTP_ACR);
415 	/* Release the mutex */
416 	mutex_unlock(&priv->aux_ts_lock);
417 
418 	/* Trigger Internal snapshot signal
419 	 * Create a rising edge by just toggle the GPO1 to low
420 	 * and back to high.
421 	 */
422 	gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
423 	gpio_value &= ~GMAC_GPO1;
424 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
425 	gpio_value |= GMAC_GPO1;
426 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
427 
428 	/* Time sync done Indication - Interrupt method */
429 	if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
430 					      stmmac_cross_ts_isr(priv),
431 					      HZ / 100)) {
432 		priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
433 		return -ETIMEDOUT;
434 	}
435 
436 	*system = (struct system_counterval_t) {
437 		.cycles = 0,
438 		.cs_id = CSID_X86_ART,
439 		.use_nsecs = false,
440 	};
441 
442 	num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
443 			GMAC_TIMESTAMP_ATSNS_MASK) >>
444 			GMAC_TIMESTAMP_ATSNS_SHIFT;
445 
446 	/* Repeat until the timestamps are from the FIFO last segment */
447 	for (i = 0; i < num_snapshot; i++) {
448 		read_lock_irqsave(&priv->ptp_lock, flags);
449 		stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
450 		*device = ns_to_ktime(ptp_time);
451 		read_unlock_irqrestore(&priv->ptp_lock, flags);
452 		get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
453 		system->cycles = art_time;
454 	}
455 
456 	system->cycles *= intel_priv->crossts_adj;
457 
458 	priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
459 
460 	return 0;
461 }
462 
intel_mgbe_pse_crossts_adj(struct intel_priv_data * intel_priv,int base)463 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
464 				       int base)
465 {
466 	if (boot_cpu_has(X86_FEATURE_ART)) {
467 		unsigned int art_freq;
468 
469 		/* On systems that support ART, ART frequency can be obtained
470 		 * from ECX register of CPUID leaf (0x15).
471 		 */
472 		art_freq = cpuid_ecx(ART_CPUID_LEAF);
473 		do_div(art_freq, base);
474 		intel_priv->crossts_adj = art_freq;
475 	}
476 }
477 
intel_tsn_lane_is_available(struct net_device * ndev,struct intel_priv_data * intel_priv)478 static int intel_tsn_lane_is_available(struct net_device *ndev,
479 				       struct intel_priv_data *intel_priv)
480 {
481 	struct stmmac_priv *priv = netdev_priv(ndev);
482 	struct pmc_ipc_cmd tmp = {};
483 	struct pmc_ipc_rbuf rbuf = {};
484 	int ret = 0, i, j;
485 	const int max_fia_regs = 5;
486 
487 	tmp.cmd = IPC_SOC_REGISTER_ACCESS;
488 	tmp.sub_cmd = IPC_SOC_SUB_CMD_READ;
489 
490 	for (i = 0; i < max_fia_regs; i++) {
491 		tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i;
492 
493 		ret = intel_pmc_ipc(&tmp, &rbuf);
494 		if (ret < 0) {
495 			netdev_info(priv->dev, "Failed to read from PMC.\n");
496 			return ret;
497 		}
498 
499 		for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++)
500 			if ((rbuf.buf[0] >>
501 				(4 * (intel_priv->tsn_lane_regs[j] % 8)) &
502 					B_PCH_FIA_PCR_L0O) == 0xB)
503 				return 0;
504 	}
505 
506 	return -EINVAL;
507 }
508 
intel_set_reg_access(const struct pmc_serdes_regs * regs,int max_regs)509 static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs)
510 {
511 	int ret = 0, i;
512 
513 	for (i = 0; i < max_regs; i++) {
514 		struct pmc_ipc_cmd tmp = {};
515 		struct pmc_ipc_rbuf rbuf = {};
516 
517 		tmp.cmd = IPC_SOC_REGISTER_ACCESS;
518 		tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE;
519 		tmp.wbuf[0] = (u32)regs[i].index;
520 		tmp.wbuf[1] = regs[i].val;
521 
522 		ret = intel_pmc_ipc(&tmp, &rbuf);
523 		if (ret < 0)
524 			return ret;
525 	}
526 
527 	return ret;
528 }
529 
intel_mac_finish(struct net_device * ndev,void * intel_data,unsigned int mode,phy_interface_t interface)530 static int intel_mac_finish(struct net_device *ndev,
531 			    void *intel_data,
532 			    unsigned int mode,
533 			    phy_interface_t interface)
534 {
535 	struct intel_priv_data *intel_priv = intel_data;
536 	struct stmmac_priv *priv = netdev_priv(ndev);
537 	const struct pmc_serdes_regs *regs;
538 	int max_regs = 0;
539 	int ret = 0;
540 
541 	ret = intel_tsn_lane_is_available(ndev, intel_priv);
542 	if (ret < 0) {
543 		netdev_info(priv->dev, "No TSN lane available to set the registers.\n");
544 		return ret;
545 	}
546 
547 	if (interface == PHY_INTERFACE_MODE_2500BASEX) {
548 		regs = intel_priv->pid_2p5g.regs;
549 		max_regs = intel_priv->pid_2p5g.num_regs;
550 	} else {
551 		regs = intel_priv->pid_1g.regs;
552 		max_regs = intel_priv->pid_1g.num_regs;
553 	}
554 
555 	ret = intel_set_reg_access(regs, max_regs);
556 	if (ret < 0)
557 		return ret;
558 
559 	priv->plat->phy_interface = interface;
560 
561 	intel_serdes_powerdown(ndev, intel_priv);
562 	intel_serdes_powerup(ndev, intel_priv);
563 
564 	return ret;
565 }
566 
common_default_data(struct plat_stmmacenet_data * plat)567 static void common_default_data(struct plat_stmmacenet_data *plat)
568 {
569 	plat->clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
570 	plat->has_gmac = 1;
571 	plat->force_sf_dma_mode = 1;
572 
573 	plat->mdio_bus_data->needs_reset = true;
574 
575 	/* Set default value for multicast hash bins */
576 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
577 
578 	/* Set default value for unicast filter entries */
579 	plat->unicast_filter_entries = 1;
580 
581 	/* Set the maxmtu to a default of JUMBO_LEN */
582 	plat->maxmtu = JUMBO_LEN;
583 
584 	/* Set default number of RX and TX queues to use */
585 	plat->tx_queues_to_use = 1;
586 	plat->rx_queues_to_use = 1;
587 
588 	/* Disable Priority config by default */
589 	plat->tx_queues_cfg[0].use_prio = false;
590 	plat->rx_queues_cfg[0].use_prio = false;
591 
592 	/* Disable RX queues routing by default */
593 	plat->rx_queues_cfg[0].pkt_route = 0x0;
594 }
595 
intel_mgbe_select_pcs(struct stmmac_priv * priv,phy_interface_t interface)596 static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
597 						 phy_interface_t interface)
598 {
599 	/* plat->mdio_bus_data->has_xpcs has been set true, so there
600 	 * should always be an XPCS. The original code would always
601 	 * return this if present.
602 	 */
603 	return xpcs_to_phylink_pcs(priv->hw->xpcs);
604 }
605 
intel_mgbe_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)606 static int intel_mgbe_common_data(struct pci_dev *pdev,
607 				  struct plat_stmmacenet_data *plat)
608 {
609 	struct fwnode_handle *fwnode;
610 	char clk_name[20];
611 	int ret;
612 	int i;
613 
614 	plat->pdev = pdev;
615 	plat->phy_addr = -1;
616 	plat->clk_csr = 5;
617 	plat->has_gmac = 0;
618 	plat->has_gmac4 = 1;
619 	plat->force_sf_dma_mode = 0;
620 	plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE);
621 
622 	/* Multiplying factor to the clk_eee_i clock time
623 	 * period to make it closer to 100 ns. This value
624 	 * should be programmed such that the clk_eee_time_period *
625 	 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
626 	 * clk_eee frequency is 19.2Mhz
627 	 * clk_eee_time_period is 52ns
628 	 * 52ns * (1 + 1) = 104ns
629 	 * MULT_FACT_100NS = 1
630 	 */
631 	plat->mult_fact_100ns = 1;
632 
633 	plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
634 
635 	for (i = 0; i < plat->rx_queues_to_use; i++) {
636 		plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
637 		plat->rx_queues_cfg[i].chan = i;
638 
639 		/* Disable Priority config by default */
640 		plat->rx_queues_cfg[i].use_prio = false;
641 
642 		/* Disable RX queues routing by default */
643 		plat->rx_queues_cfg[i].pkt_route = 0x0;
644 	}
645 
646 	for (i = 0; i < plat->tx_queues_to_use; i++) {
647 		plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
648 
649 		/* Disable Priority config by default */
650 		plat->tx_queues_cfg[i].use_prio = false;
651 		/* Default TX Q0 to use TSO and rest TXQ for TBS */
652 		if (i > 0)
653 			plat->tx_queues_cfg[i].tbs_en = 1;
654 	}
655 
656 	/* FIFO size is 4096 bytes for 1 tx/rx queue */
657 	plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
658 	plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
659 
660 	plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
661 	plat->tx_queues_cfg[0].weight = 0x09;
662 	plat->tx_queues_cfg[1].weight = 0x0A;
663 	plat->tx_queues_cfg[2].weight = 0x0B;
664 	plat->tx_queues_cfg[3].weight = 0x0C;
665 	plat->tx_queues_cfg[4].weight = 0x0D;
666 	plat->tx_queues_cfg[5].weight = 0x0E;
667 	plat->tx_queues_cfg[6].weight = 0x0F;
668 	plat->tx_queues_cfg[7].weight = 0x10;
669 
670 	plat->dma_cfg->pbl = 32;
671 	plat->dma_cfg->pblx8 = true;
672 	plat->dma_cfg->fixed_burst = 0;
673 	plat->dma_cfg->mixed_burst = 0;
674 	plat->dma_cfg->aal = 0;
675 	plat->dma_cfg->dche = true;
676 
677 	plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
678 				 GFP_KERNEL);
679 	if (!plat->axi)
680 		return -ENOMEM;
681 
682 	plat->axi->axi_lpi_en = 0;
683 	plat->axi->axi_xit_frm = 0;
684 	plat->axi->axi_wr_osr_lmt = 1;
685 	plat->axi->axi_rd_osr_lmt = 1;
686 	plat->axi->axi_blen[0] = 4;
687 	plat->axi->axi_blen[1] = 8;
688 	plat->axi->axi_blen[2] = 16;
689 
690 	plat->ptp_max_adj = plat->clk_ptp_rate;
691 
692 	/* Set system clock */
693 	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
694 
695 	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
696 						   clk_name, NULL, 0,
697 						   plat->clk_ptp_rate);
698 
699 	if (IS_ERR(plat->stmmac_clk)) {
700 		dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
701 		plat->stmmac_clk = NULL;
702 	}
703 
704 	ret = clk_prepare_enable(plat->stmmac_clk);
705 	if (ret) {
706 		clk_unregister_fixed_rate(plat->stmmac_clk);
707 		return ret;
708 	}
709 
710 	plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
711 
712 	/* Set default value for multicast hash bins */
713 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
714 
715 	/* Set default value for unicast filter entries */
716 	plat->unicast_filter_entries = 1;
717 
718 	/* Set the maxmtu to a default of JUMBO_LEN */
719 	plat->maxmtu = JUMBO_LEN;
720 
721 	plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
722 
723 	/* Use the last Rx queue */
724 	plat->vlan_fail_q = plat->rx_queues_to_use - 1;
725 
726 	/* For fixed-link setup, we allow phy-mode setting */
727 	fwnode = dev_fwnode(&pdev->dev);
728 	if (fwnode) {
729 		int phy_mode;
730 
731 		/* "phy-mode" setting is optional. If it is set,
732 		 *  we allow either sgmii or 1000base-x for now.
733 		 */
734 		phy_mode = fwnode_get_phy_mode(fwnode);
735 		if (phy_mode >= 0) {
736 			if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
737 			    phy_mode == PHY_INTERFACE_MODE_1000BASEX)
738 				plat->phy_interface = phy_mode;
739 			else
740 				dev_warn(&pdev->dev, "Invalid phy-mode\n");
741 		}
742 	}
743 
744 	/* Intel mgbe SGMII interface uses pcs-xcps */
745 	if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
746 	    plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
747 		plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR);
748 		plat->mdio_bus_data->default_an_inband = true;
749 		plat->select_pcs = intel_mgbe_select_pcs;
750 	}
751 
752 	/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
753 	plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
754 	plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
755 
756 	plat->int_snapshot_num = AUX_SNAPSHOT1;
757 
758 	plat->crosststamp = intel_crosststamp;
759 	plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
760 
761 	/* Setup MSI vector offset specific to Intel mGbE controller */
762 	plat->msi_mac_vec = 29;
763 	plat->msi_lpi_vec = 28;
764 	plat->msi_sfty_ce_vec = 27;
765 	plat->msi_sfty_ue_vec = 26;
766 	plat->msi_rx_base_vec = 0;
767 	plat->msi_tx_base_vec = 1;
768 
769 	return 0;
770 }
771 
ehl_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)772 static int ehl_common_data(struct pci_dev *pdev,
773 			   struct plat_stmmacenet_data *plat)
774 {
775 	struct intel_priv_data *intel_priv = plat->bsp_priv;
776 
777 	plat->rx_queues_to_use = 8;
778 	plat->tx_queues_to_use = 8;
779 	plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
780 	plat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
781 
782 	plat->safety_feat_cfg->tsoee = 1;
783 	plat->safety_feat_cfg->mrxpee = 1;
784 	plat->safety_feat_cfg->mestee = 1;
785 	plat->safety_feat_cfg->mrxee = 1;
786 	plat->safety_feat_cfg->mtxee = 1;
787 	plat->safety_feat_cfg->epsi = 0;
788 	plat->safety_feat_cfg->edpp = 0;
789 	plat->safety_feat_cfg->prtyen = 0;
790 	plat->safety_feat_cfg->tmouten = 0;
791 
792 	intel_priv->tsn_lane_regs = ehl_tsn_lane_regs;
793 	intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs);
794 
795 	return intel_mgbe_common_data(pdev, plat);
796 }
797 
ehl_sgmii_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)798 static int ehl_sgmii_data(struct pci_dev *pdev,
799 			  struct plat_stmmacenet_data *plat)
800 {
801 	struct intel_priv_data *intel_priv = plat->bsp_priv;
802 
803 	plat->bus_id = 1;
804 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
805 	plat->serdes_powerup = intel_serdes_powerup;
806 	plat->serdes_powerdown = intel_serdes_powerdown;
807 	plat->mac_finish = intel_mac_finish;
808 	plat->clk_ptp_rate = 204800000;
809 
810 	intel_priv->pid_1g.regs = pid_modphy3_1g_regs;
811 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs);
812 	intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs;
813 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs);
814 
815 	return ehl_common_data(pdev, plat);
816 }
817 
818 static struct stmmac_pci_info ehl_sgmii1g_info = {
819 	.setup = ehl_sgmii_data,
820 };
821 
ehl_rgmii_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)822 static int ehl_rgmii_data(struct pci_dev *pdev,
823 			  struct plat_stmmacenet_data *plat)
824 {
825 	plat->bus_id = 1;
826 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
827 
828 	plat->clk_ptp_rate = 204800000;
829 
830 	return ehl_common_data(pdev, plat);
831 }
832 
833 static struct stmmac_pci_info ehl_rgmii1g_info = {
834 	.setup = ehl_rgmii_data,
835 };
836 
ehl_pse0_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)837 static int ehl_pse0_common_data(struct pci_dev *pdev,
838 				struct plat_stmmacenet_data *plat)
839 {
840 	struct intel_priv_data *intel_priv = plat->bsp_priv;
841 
842 	intel_priv->is_pse = true;
843 	plat->bus_id = 2;
844 	plat->host_dma_width = 32;
845 
846 	plat->clk_ptp_rate = 200000000;
847 
848 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
849 
850 	return ehl_common_data(pdev, plat);
851 }
852 
ehl_pse0_rgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)853 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
854 				 struct plat_stmmacenet_data *plat)
855 {
856 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
857 	return ehl_pse0_common_data(pdev, plat);
858 }
859 
860 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
861 	.setup = ehl_pse0_rgmii1g_data,
862 };
863 
ehl_pse0_sgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)864 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
865 				 struct plat_stmmacenet_data *plat)
866 {
867 	struct intel_priv_data *intel_priv = plat->bsp_priv;
868 
869 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
870 	plat->serdes_powerup = intel_serdes_powerup;
871 	plat->serdes_powerdown = intel_serdes_powerdown;
872 	plat->mac_finish = intel_mac_finish;
873 
874 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
875 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
876 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
877 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
878 
879 	return ehl_pse0_common_data(pdev, plat);
880 }
881 
882 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
883 	.setup = ehl_pse0_sgmii1g_data,
884 };
885 
ehl_pse1_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)886 static int ehl_pse1_common_data(struct pci_dev *pdev,
887 				struct plat_stmmacenet_data *plat)
888 {
889 	struct intel_priv_data *intel_priv = plat->bsp_priv;
890 
891 	intel_priv->is_pse = true;
892 	plat->bus_id = 3;
893 	plat->host_dma_width = 32;
894 
895 	plat->clk_ptp_rate = 200000000;
896 
897 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
898 
899 	return ehl_common_data(pdev, plat);
900 }
901 
ehl_pse1_rgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)902 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
903 				 struct plat_stmmacenet_data *plat)
904 {
905 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
906 	return ehl_pse1_common_data(pdev, plat);
907 }
908 
909 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
910 	.setup = ehl_pse1_rgmii1g_data,
911 };
912 
ehl_pse1_sgmii1g_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)913 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
914 				 struct plat_stmmacenet_data *plat)
915 {
916 	struct intel_priv_data *intel_priv = plat->bsp_priv;
917 
918 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
919 	plat->serdes_powerup = intel_serdes_powerup;
920 	plat->serdes_powerdown = intel_serdes_powerdown;
921 	plat->mac_finish = intel_mac_finish;
922 
923 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
924 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
925 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
926 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
927 
928 	return ehl_pse1_common_data(pdev, plat);
929 }
930 
931 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
932 	.setup = ehl_pse1_sgmii1g_data,
933 };
934 
tgl_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)935 static int tgl_common_data(struct pci_dev *pdev,
936 			   struct plat_stmmacenet_data *plat)
937 {
938 	plat->rx_queues_to_use = 6;
939 	plat->tx_queues_to_use = 4;
940 	plat->clk_ptp_rate = 204800000;
941 	plat->get_interfaces = tgl_get_interfaces;
942 
943 	plat->safety_feat_cfg->tsoee = 1;
944 	plat->safety_feat_cfg->mrxpee = 0;
945 	plat->safety_feat_cfg->mestee = 1;
946 	plat->safety_feat_cfg->mrxee = 1;
947 	plat->safety_feat_cfg->mtxee = 1;
948 	plat->safety_feat_cfg->epsi = 0;
949 	plat->safety_feat_cfg->edpp = 0;
950 	plat->safety_feat_cfg->prtyen = 0;
951 	plat->safety_feat_cfg->tmouten = 0;
952 
953 	return intel_mgbe_common_data(pdev, plat);
954 }
955 
tgl_sgmii_phy0_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)956 static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
957 			       struct plat_stmmacenet_data *plat)
958 {
959 	plat->bus_id = 1;
960 	plat->serdes_powerup = intel_serdes_powerup;
961 	plat->serdes_powerdown = intel_serdes_powerdown;
962 	return tgl_common_data(pdev, plat);
963 }
964 
965 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
966 	.setup = tgl_sgmii_phy0_data,
967 };
968 
tgl_sgmii_phy1_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)969 static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
970 			       struct plat_stmmacenet_data *plat)
971 {
972 	plat->bus_id = 2;
973 	plat->serdes_powerup = intel_serdes_powerup;
974 	plat->serdes_powerdown = intel_serdes_powerdown;
975 	return tgl_common_data(pdev, plat);
976 }
977 
978 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
979 	.setup = tgl_sgmii_phy1_data,
980 };
981 
adls_sgmii_phy0_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)982 static int adls_sgmii_phy0_data(struct pci_dev *pdev,
983 				struct plat_stmmacenet_data *plat)
984 {
985 	plat->bus_id = 1;
986 
987 	/* SerDes power up and power down are done in BIOS for ADL */
988 
989 	return tgl_common_data(pdev, plat);
990 }
991 
992 static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
993 	.setup = adls_sgmii_phy0_data,
994 };
995 
adls_sgmii_phy1_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)996 static int adls_sgmii_phy1_data(struct pci_dev *pdev,
997 				struct plat_stmmacenet_data *plat)
998 {
999 	plat->bus_id = 2;
1000 
1001 	/* SerDes power up and power down are done in BIOS for ADL */
1002 
1003 	return tgl_common_data(pdev, plat);
1004 }
1005 
1006 static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
1007 	.setup = adls_sgmii_phy1_data,
1008 };
1009 
adln_common_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)1010 static int adln_common_data(struct pci_dev *pdev,
1011 			    struct plat_stmmacenet_data *plat)
1012 {
1013 	struct intel_priv_data *intel_priv = plat->bsp_priv;
1014 
1015 	plat->rx_queues_to_use = 6;
1016 	plat->tx_queues_to_use = 4;
1017 	plat->clk_ptp_rate = 204800000;
1018 
1019 	plat->safety_feat_cfg->tsoee = 1;
1020 	plat->safety_feat_cfg->mrxpee = 0;
1021 	plat->safety_feat_cfg->mestee = 1;
1022 	plat->safety_feat_cfg->mrxee = 1;
1023 	plat->safety_feat_cfg->mtxee = 1;
1024 	plat->safety_feat_cfg->epsi = 0;
1025 	plat->safety_feat_cfg->edpp = 0;
1026 	plat->safety_feat_cfg->prtyen = 0;
1027 	plat->safety_feat_cfg->tmouten = 0;
1028 
1029 	intel_priv->tsn_lane_regs = adln_tsn_lane_regs;
1030 	intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs);
1031 
1032 	return intel_mgbe_common_data(pdev, plat);
1033 }
1034 
adln_sgmii_phy0_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)1035 static int adln_sgmii_phy0_data(struct pci_dev *pdev,
1036 				struct plat_stmmacenet_data *plat)
1037 {
1038 	struct intel_priv_data *intel_priv = plat->bsp_priv;
1039 
1040 	plat->bus_id = 1;
1041 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
1042 	plat->serdes_powerup = intel_serdes_powerup;
1043 	plat->serdes_powerdown = intel_serdes_powerdown;
1044 	plat->mac_finish = intel_mac_finish;
1045 
1046 	intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
1047 	intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
1048 	intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
1049 	intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
1050 
1051 	return adln_common_data(pdev, plat);
1052 }
1053 
1054 static struct stmmac_pci_info adln_sgmii1g_phy0_info = {
1055 	.setup = adln_sgmii_phy0_data,
1056 };
1057 
1058 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
1059 	{
1060 		.func = 6,
1061 		.phy_addr = 1,
1062 	},
1063 };
1064 
1065 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
1066 	.func = galileo_stmmac_func_data,
1067 	.nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
1068 };
1069 
1070 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
1071 	{
1072 		.func = 6,
1073 		.phy_addr = 1,
1074 	},
1075 	{
1076 		.func = 7,
1077 		.phy_addr = 1,
1078 	},
1079 };
1080 
1081 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
1082 	.func = iot2040_stmmac_func_data,
1083 	.nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
1084 };
1085 
1086 static const struct dmi_system_id quark_pci_dmi[] = {
1087 	{
1088 		.matches = {
1089 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
1090 		},
1091 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1092 	},
1093 	{
1094 		.matches = {
1095 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
1096 		},
1097 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1098 	},
1099 	/* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
1100 	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
1101 	 * has only one pci network device while other asset tags are
1102 	 * for IOT2040 which has two.
1103 	 */
1104 	{
1105 		.matches = {
1106 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1107 			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
1108 					"6ES7647-0AA00-0YA2"),
1109 		},
1110 		.driver_data = (void *)&galileo_stmmac_dmi_data,
1111 	},
1112 	{
1113 		.matches = {
1114 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1115 		},
1116 		.driver_data = (void *)&iot2040_stmmac_dmi_data,
1117 	},
1118 	{}
1119 };
1120 
quark_default_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)1121 static int quark_default_data(struct pci_dev *pdev,
1122 			      struct plat_stmmacenet_data *plat)
1123 {
1124 	int ret;
1125 
1126 	/* Set common default data first */
1127 	common_default_data(plat);
1128 
1129 	/* Refuse to load the driver and register net device if MAC controller
1130 	 * does not connect to any PHY interface.
1131 	 */
1132 	ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
1133 	if (ret < 0) {
1134 		/* Return error to the caller on DMI enabled boards. */
1135 		if (dmi_get_system_info(DMI_BOARD_NAME))
1136 			return ret;
1137 
1138 		/* Galileo boards with old firmware don't support DMI. We always
1139 		 * use 1 here as PHY address, so at least the first found MAC
1140 		 * controller would be probed.
1141 		 */
1142 		ret = 1;
1143 	}
1144 
1145 	plat->bus_id = pci_dev_id(pdev);
1146 	plat->phy_addr = ret;
1147 	plat->phy_interface = PHY_INTERFACE_MODE_RMII;
1148 
1149 	plat->dma_cfg->pbl = 16;
1150 	plat->dma_cfg->pblx8 = true;
1151 	plat->dma_cfg->fixed_burst = 1;
1152 	/* AXI (TODO) */
1153 
1154 	return 0;
1155 }
1156 
1157 static const struct stmmac_pci_info quark_info = {
1158 	.setup = quark_default_data,
1159 };
1160 
stmmac_config_single_msi(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)1161 static int stmmac_config_single_msi(struct pci_dev *pdev,
1162 				    struct plat_stmmacenet_data *plat,
1163 				    struct stmmac_resources *res)
1164 {
1165 	int ret;
1166 
1167 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1168 	if (ret < 0) {
1169 		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
1170 			 __func__);
1171 		return ret;
1172 	}
1173 
1174 	res->irq = pci_irq_vector(pdev, 0);
1175 	res->wol_irq = res->irq;
1176 	plat->flags &= ~STMMAC_FLAG_MULTI_MSI_EN;
1177 	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
1178 		 __func__);
1179 
1180 	return 0;
1181 }
1182 
stmmac_config_multi_msi(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)1183 static int stmmac_config_multi_msi(struct pci_dev *pdev,
1184 				   struct plat_stmmacenet_data *plat,
1185 				   struct stmmac_resources *res)
1186 {
1187 	int ret;
1188 	int i;
1189 
1190 	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
1191 	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
1192 		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
1193 			 __func__);
1194 		return -1;
1195 	}
1196 
1197 	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
1198 				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
1199 	if (ret < 0) {
1200 		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
1201 			 __func__);
1202 		return ret;
1203 	}
1204 
1205 	/* For RX MSI */
1206 	for (i = 0; i < plat->rx_queues_to_use; i++) {
1207 		res->rx_irq[i] = pci_irq_vector(pdev,
1208 						plat->msi_rx_base_vec + i * 2);
1209 	}
1210 
1211 	/* For TX MSI */
1212 	for (i = 0; i < plat->tx_queues_to_use; i++) {
1213 		res->tx_irq[i] = pci_irq_vector(pdev,
1214 						plat->msi_tx_base_vec + i * 2);
1215 	}
1216 
1217 	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
1218 		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
1219 	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
1220 		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
1221 	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
1222 		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
1223 	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
1224 		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
1225 	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
1226 		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
1227 
1228 	plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
1229 	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
1230 
1231 	return 0;
1232 }
1233 
1234 /**
1235  * intel_eth_pci_probe
1236  *
1237  * @pdev: pci device pointer
1238  * @id: pointer to table of device id/id's.
1239  *
1240  * Description: This probing function gets called for all PCI devices which
1241  * match the ID table and are not "owned" by other driver yet. This function
1242  * gets passed a "struct pci_dev *" for each device whose entry in the ID table
1243  * matches the device. The probe functions returns zero when the driver choose
1244  * to take "ownership" of the device or an error code(-ve no) otherwise.
1245  */
intel_eth_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1246 static int intel_eth_pci_probe(struct pci_dev *pdev,
1247 			       const struct pci_device_id *id)
1248 {
1249 	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
1250 	struct intel_priv_data *intel_priv;
1251 	struct plat_stmmacenet_data *plat;
1252 	struct stmmac_resources res;
1253 	int ret;
1254 
1255 	intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
1256 	if (!intel_priv)
1257 		return -ENOMEM;
1258 
1259 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
1260 	if (!plat)
1261 		return -ENOMEM;
1262 
1263 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
1264 					   sizeof(*plat->mdio_bus_data),
1265 					   GFP_KERNEL);
1266 	if (!plat->mdio_bus_data)
1267 		return -ENOMEM;
1268 
1269 	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
1270 				     GFP_KERNEL);
1271 	if (!plat->dma_cfg)
1272 		return -ENOMEM;
1273 
1274 	plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
1275 					     sizeof(*plat->safety_feat_cfg),
1276 					     GFP_KERNEL);
1277 	if (!plat->safety_feat_cfg)
1278 		return -ENOMEM;
1279 
1280 	/* Enable pci device */
1281 	ret = pcim_enable_device(pdev);
1282 	if (ret) {
1283 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
1284 			__func__);
1285 		return ret;
1286 	}
1287 
1288 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
1289 	if (ret)
1290 		return ret;
1291 
1292 	pci_set_master(pdev);
1293 
1294 	plat->bsp_priv = intel_priv;
1295 	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
1296 	intel_priv->crossts_adj = 1;
1297 
1298 	/* Initialize all MSI vectors to invalid so that it can be set
1299 	 * according to platform data settings below.
1300 	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
1301 	 */
1302 	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
1303 	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
1304 	plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
1305 	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
1306 	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
1307 	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
1308 	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
1309 
1310 	ret = info->setup(pdev, plat);
1311 	if (ret)
1312 		return ret;
1313 
1314 	memset(&res, 0, sizeof(res));
1315 	res.addr = pcim_iomap_table(pdev)[0];
1316 
1317 	ret = stmmac_config_multi_msi(pdev, plat, &res);
1318 	if (ret) {
1319 		ret = stmmac_config_single_msi(pdev, plat, &res);
1320 		if (ret) {
1321 			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
1322 				__func__);
1323 			goto err_alloc_irq;
1324 		}
1325 	}
1326 
1327 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
1328 	if (ret) {
1329 		goto err_alloc_irq;
1330 	}
1331 
1332 	return 0;
1333 
1334 err_alloc_irq:
1335 	clk_disable_unprepare(plat->stmmac_clk);
1336 	clk_unregister_fixed_rate(plat->stmmac_clk);
1337 	return ret;
1338 }
1339 
1340 /**
1341  * intel_eth_pci_remove
1342  *
1343  * @pdev: pci device pointer
1344  * Description: this function calls the main to free the net resources
1345  * and releases the PCI resources.
1346  */
intel_eth_pci_remove(struct pci_dev * pdev)1347 static void intel_eth_pci_remove(struct pci_dev *pdev)
1348 {
1349 	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
1350 	struct stmmac_priv *priv = netdev_priv(ndev);
1351 
1352 	stmmac_dvr_remove(&pdev->dev);
1353 
1354 	clk_disable_unprepare(priv->plat->stmmac_clk);
1355 	clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1356 }
1357 
intel_eth_pci_suspend(struct device * dev)1358 static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
1359 {
1360 	struct pci_dev *pdev = to_pci_dev(dev);
1361 	int ret;
1362 
1363 	ret = stmmac_suspend(dev);
1364 	if (ret)
1365 		return ret;
1366 
1367 	ret = pci_save_state(pdev);
1368 	if (ret)
1369 		return ret;
1370 
1371 	pci_wake_from_d3(pdev, true);
1372 	pci_set_power_state(pdev, PCI_D3hot);
1373 	return 0;
1374 }
1375 
intel_eth_pci_resume(struct device * dev)1376 static int __maybe_unused intel_eth_pci_resume(struct device *dev)
1377 {
1378 	struct pci_dev *pdev = to_pci_dev(dev);
1379 	int ret;
1380 
1381 	pci_restore_state(pdev);
1382 	pci_set_power_state(pdev, PCI_D0);
1383 
1384 	ret = pcim_enable_device(pdev);
1385 	if (ret)
1386 		return ret;
1387 
1388 	pci_set_master(pdev);
1389 
1390 	return stmmac_resume(dev);
1391 }
1392 
1393 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
1394 			 intel_eth_pci_resume);
1395 
1396 #define PCI_DEVICE_ID_INTEL_QUARK		0x0937
1397 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G		0x4b30
1398 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G		0x4b31
1399 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5	0x4b32
1400 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
1401  * which are named PSE0 and PSE1
1402  */
1403 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G	0x4ba0
1404 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G	0x4ba1
1405 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5	0x4ba2
1406 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G	0x4bb0
1407 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G	0x4bb1
1408 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5	0x4bb2
1409 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0	0x43ac
1410 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1	0x43a2
1411 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G		0xa0ac
1412 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0	0x7aac
1413 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1	0x7aad
1414 #define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G	0x54ac
1415 #define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G	0x51ac
1416 
1417 static const struct pci_device_id intel_eth_pci_id_table[] = {
1418 	{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
1419 	{ PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
1420 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
1421 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
1422 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
1423 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
1424 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
1425 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
1426 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
1427 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
1428 	{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
1429 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
1430 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
1431 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
1432 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
1433 	{ PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) },
1434 	{ PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) },
1435 	{}
1436 };
1437 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
1438 
1439 static struct pci_driver intel_eth_pci_driver = {
1440 	.name = "intel-eth-pci",
1441 	.id_table = intel_eth_pci_id_table,
1442 	.probe = intel_eth_pci_probe,
1443 	.remove = intel_eth_pci_remove,
1444 	.driver         = {
1445 		.pm     = &intel_eth_pm_ops,
1446 	},
1447 };
1448 
1449 module_pci_driver(intel_eth_pci_driver);
1450 
1451 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
1452 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
1453 MODULE_LICENSE("GPL v2");
1454