xref: /linux/drivers/net/ethernet/freescale/enetc/enetc4_pf.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2024 NXP */
3 
4 #include <linux/clk.h>
5 #include <linux/module.h>
6 #include <linux/of_net.h>
7 #include <linux/of_platform.h>
8 #include <linux/unaligned.h>
9 
10 #include "enetc_pf_common.h"
11 
12 #define ENETC_SI_MAX_RING_NUM	8
13 
14 static void enetc4_get_port_caps(struct enetc_pf *pf)
15 {
16 	struct enetc_hw *hw = &pf->si->hw;
17 	u32 val;
18 
19 	val = enetc_port_rd(hw, ENETC4_ECAPR1);
20 	pf->caps.num_vsi = (val & ECAPR1_NUM_VSI) >> 24;
21 	pf->caps.num_msix = ((val & ECAPR1_NUM_MSIX) >> 12) + 1;
22 
23 	val = enetc_port_rd(hw, ENETC4_ECAPR2);
24 	pf->caps.num_rx_bdr = (val & ECAPR2_NUM_RX_BDR) >> 16;
25 	pf->caps.num_tx_bdr = val & ECAPR2_NUM_TX_BDR;
26 
27 	val = enetc_port_rd(hw, ENETC4_PMCAPR);
28 	pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
29 }
30 
31 static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
32 					 const u8 *addr)
33 {
34 	u16 lower = get_unaligned_le16(addr + 4);
35 	u32 upper = get_unaligned_le32(addr);
36 
37 	if (si != 0) {
38 		__raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si));
39 		__raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si));
40 	} else {
41 		__raw_writel(upper, hw->port + ENETC4_PMAR0);
42 		__raw_writew(lower, hw->port + ENETC4_PMAR1);
43 	}
44 }
45 
46 static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
47 					 u8 *addr)
48 {
49 	u32 upper;
50 	u16 lower;
51 
52 	upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si));
53 	lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si));
54 
55 	put_unaligned_le32(upper, addr);
56 	put_unaligned_le16(lower, addr + 4);
57 }
58 
59 static const struct enetc_pf_ops enetc4_pf_ops = {
60 	.set_si_primary_mac = enetc4_pf_set_si_primary_mac,
61 	.get_si_primary_mac = enetc4_pf_get_si_primary_mac,
62 };
63 
64 static int enetc4_pf_struct_init(struct enetc_si *si)
65 {
66 	struct enetc_pf *pf = enetc_si_priv(si);
67 
68 	pf->si = si;
69 	pf->total_vfs = pci_sriov_get_totalvfs(si->pdev);
70 	pf->ops = &enetc4_pf_ops;
71 
72 	enetc4_get_port_caps(pf);
73 
74 	return 0;
75 }
76 
77 static u32 enetc4_psicfgr0_val_construct(bool is_vf, u32 num_tx_bdr, u32 num_rx_bdr)
78 {
79 	u32 val;
80 
81 	val = ENETC_PSICFGR0_SET_TXBDR(num_tx_bdr);
82 	val |= ENETC_PSICFGR0_SET_RXBDR(num_rx_bdr);
83 	val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
84 
85 	if (is_vf)
86 		val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
87 
88 	return val;
89 }
90 
91 static void enetc4_default_rings_allocation(struct enetc_pf *pf)
92 {
93 	struct enetc_hw *hw = &pf->si->hw;
94 	u32 num_rx_bdr, num_tx_bdr, val;
95 	u32 vf_tx_bdr, vf_rx_bdr;
96 	int i, rx_rem, tx_rem;
97 
98 	if (pf->caps.num_rx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
99 		num_rx_bdr = pf->caps.num_rx_bdr - pf->caps.num_vsi;
100 	else
101 		num_rx_bdr = ENETC_SI_MAX_RING_NUM;
102 
103 	if (pf->caps.num_tx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
104 		num_tx_bdr = pf->caps.num_tx_bdr - pf->caps.num_vsi;
105 	else
106 		num_tx_bdr = ENETC_SI_MAX_RING_NUM;
107 
108 	val = enetc4_psicfgr0_val_construct(false, num_tx_bdr, num_rx_bdr);
109 	enetc_port_wr(hw, ENETC4_PSICFGR0(0), val);
110 
111 	num_rx_bdr = pf->caps.num_rx_bdr - num_rx_bdr;
112 	rx_rem = num_rx_bdr % pf->caps.num_vsi;
113 	num_rx_bdr = num_rx_bdr / pf->caps.num_vsi;
114 
115 	num_tx_bdr = pf->caps.num_tx_bdr - num_tx_bdr;
116 	tx_rem = num_tx_bdr % pf->caps.num_vsi;
117 	num_tx_bdr = num_tx_bdr / pf->caps.num_vsi;
118 
119 	for (i = 0; i < pf->caps.num_vsi; i++) {
120 		vf_tx_bdr = (i < tx_rem) ? num_tx_bdr + 1 : num_tx_bdr;
121 		vf_rx_bdr = (i < rx_rem) ? num_rx_bdr + 1 : num_rx_bdr;
122 		val = enetc4_psicfgr0_val_construct(true, vf_tx_bdr, vf_rx_bdr);
123 		enetc_port_wr(hw, ENETC4_PSICFGR0(i + 1), val);
124 	}
125 }
126 
127 static void enetc4_allocate_si_rings(struct enetc_pf *pf)
128 {
129 	enetc4_default_rings_allocation(pf);
130 }
131 
132 static void enetc4_pf_set_si_vlan_promisc(struct enetc_hw *hw, int si, bool en)
133 {
134 	u32 val = enetc_port_rd(hw, ENETC4_PSIPVMR);
135 
136 	if (en)
137 		val |= BIT(si);
138 	else
139 		val &= ~BIT(si);
140 
141 	enetc_port_wr(hw, ENETC4_PSIPVMR, val);
142 }
143 
144 static void enetc4_set_default_si_vlan_promisc(struct enetc_pf *pf)
145 {
146 	struct enetc_hw *hw = &pf->si->hw;
147 	int num_si = pf->caps.num_vsi + 1;
148 	int i;
149 
150 	/* enforce VLAN promiscuous mode for all SIs */
151 	for (i = 0; i < num_si; i++)
152 		enetc4_pf_set_si_vlan_promisc(hw, i, true);
153 }
154 
155 /* Allocate the number of MSI-X vectors for per SI. */
156 static void enetc4_set_si_msix_num(struct enetc_pf *pf)
157 {
158 	struct enetc_hw *hw = &pf->si->hw;
159 	int i, num_msix, total_si;
160 	u32 val;
161 
162 	total_si = pf->caps.num_vsi + 1;
163 
164 	num_msix = pf->caps.num_msix / total_si +
165 		   pf->caps.num_msix % total_si - 1;
166 	val = num_msix & PSICFGR2_NUM_MSIX;
167 	enetc_port_wr(hw, ENETC4_PSICFGR2(0), val);
168 
169 	num_msix = pf->caps.num_msix / total_si - 1;
170 	val = num_msix & PSICFGR2_NUM_MSIX;
171 	for (i = 0; i < pf->caps.num_vsi; i++)
172 		enetc_port_wr(hw, ENETC4_PSICFGR2(i + 1), val);
173 }
174 
175 static void enetc4_enable_all_si(struct enetc_pf *pf)
176 {
177 	struct enetc_hw *hw = &pf->si->hw;
178 	int num_si = pf->caps.num_vsi + 1;
179 	u32 si_bitmap = 0;
180 	int i;
181 
182 	/* Master enable for all SIs */
183 	for (i = 0; i < num_si; i++)
184 		si_bitmap |= PMR_SI_EN(i);
185 
186 	enetc_port_wr(hw, ENETC4_PMR, si_bitmap);
187 }
188 
189 static void enetc4_configure_port_si(struct enetc_pf *pf)
190 {
191 	struct enetc_hw *hw = &pf->si->hw;
192 
193 	enetc4_allocate_si_rings(pf);
194 
195 	/* Outer VLAN tag will be used for VLAN filtering */
196 	enetc_port_wr(hw, ENETC4_PSIVLANFMR, PSIVLANFMR_VS);
197 
198 	enetc4_set_default_si_vlan_promisc(pf);
199 
200 	/* Disable SI MAC multicast & unicast promiscuous */
201 	enetc_port_wr(hw, ENETC4_PSIPMMR, 0);
202 
203 	enetc4_set_si_msix_num(pf);
204 
205 	enetc4_enable_all_si(pf);
206 }
207 
208 static void enetc4_pf_reset_tc_msdu(struct enetc_hw *hw)
209 {
210 	u32 val = ENETC_MAC_MAXFRM_SIZE;
211 	int tc;
212 
213 	val = u32_replace_bits(val, SDU_TYPE_MPDU, PTCTMSDUR_SDU_TYPE);
214 
215 	for (tc = 0; tc < ENETC_NUM_TC; tc++)
216 		enetc_port_wr(hw, ENETC4_PTCTMSDUR(tc), val);
217 }
218 
219 static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
220 {
221 	struct enetc_si *si = pf->si;
222 
223 	enetc_port_mac_wr(si, ENETC4_PM_MAXFRM(0),
224 			  ENETC_SET_MAXFRM(ENETC_MAC_MAXFRM_SIZE));
225 
226 	enetc4_pf_reset_tc_msdu(&si->hw);
227 }
228 
229 static void enetc4_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
230 {
231 	int i;
232 
233 	for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
234 		enetc_port_wr(hw, ENETC4_PRSSKR(i), ((u32 *)bytes)[i]);
235 }
236 
237 static void enetc4_set_default_rss_key(struct enetc_pf *pf)
238 {
239 	u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
240 	struct enetc_hw *hw = &pf->si->hw;
241 
242 	/* set up hash key */
243 	get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
244 	enetc4_set_rss_key(hw, hash_key);
245 }
246 
247 static void enetc4_enable_trx(struct enetc_pf *pf)
248 {
249 	struct enetc_hw *hw = &pf->si->hw;
250 
251 	/* Enable port transmit/receive */
252 	enetc_port_wr(hw, ENETC4_POR, 0);
253 }
254 
255 static void enetc4_configure_port(struct enetc_pf *pf)
256 {
257 	enetc4_configure_port_si(pf);
258 	enetc4_set_trx_frame_size(pf);
259 	enetc4_set_default_rss_key(pf);
260 	enetc4_enable_trx(pf);
261 }
262 
263 static int enetc4_pf_init(struct enetc_pf *pf)
264 {
265 	struct device *dev = &pf->si->pdev->dev;
266 	int err;
267 
268 	/* Initialize the MAC address for PF and VFs */
269 	err = enetc_setup_mac_addresses(dev->of_node, pf);
270 	if (err) {
271 		dev_err(dev, "Failed to set MAC addresses\n");
272 		return err;
273 	}
274 
275 	enetc4_configure_port(pf);
276 
277 	return 0;
278 }
279 
280 static const struct net_device_ops enetc4_ndev_ops = {
281 	.ndo_open		= enetc_open,
282 	.ndo_stop		= enetc_close,
283 	.ndo_start_xmit		= enetc_xmit,
284 	.ndo_get_stats		= enetc_get_stats,
285 	.ndo_set_mac_address	= enetc_pf_set_mac_addr,
286 };
287 
288 static struct phylink_pcs *
289 enetc4_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
290 {
291 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
292 
293 	return pf->pcs;
294 }
295 
296 static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode,
297 			      phy_interface_t phy_mode)
298 {
299 	struct enetc_ndev_priv *priv = netdev_priv(pf->si->ndev);
300 	struct enetc_si *si = pf->si;
301 	u32 val;
302 
303 	val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
304 	val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA);
305 
306 	switch (phy_mode) {
307 	case PHY_INTERFACE_MODE_RGMII:
308 	case PHY_INTERFACE_MODE_RGMII_ID:
309 	case PHY_INTERFACE_MODE_RGMII_RXID:
310 	case PHY_INTERFACE_MODE_RGMII_TXID:
311 		val |= IFMODE_RGMII;
312 		/* We need to enable auto-negotiation for the MAC
313 		 * if its RGMII interface support In-Band status.
314 		 */
315 		if (phylink_autoneg_inband(mode))
316 			val |= PM_IF_MODE_ENA;
317 		break;
318 	case PHY_INTERFACE_MODE_RMII:
319 		val |= IFMODE_RMII;
320 		break;
321 	case PHY_INTERFACE_MODE_SGMII:
322 	case PHY_INTERFACE_MODE_2500BASEX:
323 		val |= IFMODE_SGMII;
324 		break;
325 	case PHY_INTERFACE_MODE_10GBASER:
326 	case PHY_INTERFACE_MODE_XGMII:
327 	case PHY_INTERFACE_MODE_USXGMII:
328 		val |= IFMODE_XGMII;
329 		break;
330 	default:
331 		dev_err(priv->dev,
332 			"Unsupported PHY mode:%d\n", phy_mode);
333 		return;
334 	}
335 
336 	enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
337 }
338 
339 static void enetc4_pl_mac_config(struct phylink_config *config, unsigned int mode,
340 				 const struct phylink_link_state *state)
341 {
342 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
343 
344 	enetc4_mac_config(pf, mode, state->interface);
345 }
346 
347 static void enetc4_set_port_speed(struct enetc_ndev_priv *priv, int speed)
348 {
349 	u32 old_speed = priv->speed;
350 	u32 val;
351 
352 	if (speed == old_speed)
353 		return;
354 
355 	val = enetc_port_rd(&priv->si->hw, ENETC4_PCR);
356 	val &= ~PCR_PSPEED;
357 
358 	switch (speed) {
359 	case SPEED_100:
360 	case SPEED_1000:
361 	case SPEED_2500:
362 	case SPEED_10000:
363 		val |= (PCR_PSPEED & PCR_PSPEED_VAL(speed));
364 		break;
365 	case SPEED_10:
366 	default:
367 		val |= (PCR_PSPEED & PCR_PSPEED_VAL(SPEED_10));
368 	}
369 
370 	priv->speed = speed;
371 	enetc_port_wr(&priv->si->hw, ENETC4_PCR, val);
372 }
373 
374 static void enetc4_set_rgmii_mac(struct enetc_pf *pf, int speed, int duplex)
375 {
376 	struct enetc_si *si = pf->si;
377 	u32 old_val, val;
378 
379 	old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
380 	val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_M10 | PM_IF_MODE_REVMII);
381 
382 	switch (speed) {
383 	case SPEED_1000:
384 		val = u32_replace_bits(val, SSP_1G, PM_IF_MODE_SSP);
385 		break;
386 	case SPEED_100:
387 		val = u32_replace_bits(val, SSP_100M, PM_IF_MODE_SSP);
388 		break;
389 	case SPEED_10:
390 		val = u32_replace_bits(val, SSP_10M, PM_IF_MODE_SSP);
391 	}
392 
393 	val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
394 			       PM_IF_MODE_HD);
395 
396 	if (val == old_val)
397 		return;
398 
399 	enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
400 }
401 
402 static void enetc4_set_rmii_mac(struct enetc_pf *pf, int speed, int duplex)
403 {
404 	struct enetc_si *si = pf->si;
405 	u32 old_val, val;
406 
407 	old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
408 	val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_SSP);
409 
410 	switch (speed) {
411 	case SPEED_100:
412 		val &= ~PM_IF_MODE_M10;
413 		break;
414 	case SPEED_10:
415 		val |= PM_IF_MODE_M10;
416 	}
417 
418 	val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
419 			       PM_IF_MODE_HD);
420 
421 	if (val == old_val)
422 		return;
423 
424 	enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
425 }
426 
427 static void enetc4_set_hd_flow_control(struct enetc_pf *pf, bool enable)
428 {
429 	struct enetc_si *si = pf->si;
430 	u32 old_val, val;
431 
432 	if (!pf->caps.half_duplex)
433 		return;
434 
435 	old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
436 	val = u32_replace_bits(old_val, enable ? 1 : 0, PM_CMD_CFG_HD_FCEN);
437 	if (val == old_val)
438 		return;
439 
440 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
441 }
442 
443 static void enetc4_set_rx_pause(struct enetc_pf *pf, bool rx_pause)
444 {
445 	struct enetc_si *si = pf->si;
446 	u32 old_val, val;
447 
448 	old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
449 	val = u32_replace_bits(old_val, rx_pause ? 0 : 1, PM_CMD_CFG_PAUSE_IGN);
450 	if (val == old_val)
451 		return;
452 
453 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
454 }
455 
456 static void enetc4_set_tx_pause(struct enetc_pf *pf, int num_rxbdr, bool tx_pause)
457 {
458 	u32 pause_off_thresh = 0, pause_on_thresh = 0;
459 	u32 init_quanta = 0, refresh_quanta = 0;
460 	struct enetc_hw *hw = &pf->si->hw;
461 	u32 rbmr, old_rbmr;
462 	int i;
463 
464 	for (i = 0; i < num_rxbdr; i++) {
465 		old_rbmr = enetc_rxbdr_rd(hw, i, ENETC_RBMR);
466 		rbmr = u32_replace_bits(old_rbmr, tx_pause ? 1 : 0, ENETC_RBMR_CM);
467 		if (rbmr == old_rbmr)
468 			continue;
469 
470 		enetc_rxbdr_wr(hw, i, ENETC_RBMR, rbmr);
471 	}
472 
473 	if (tx_pause) {
474 		/* When the port first enters congestion, send a PAUSE request
475 		 * with the maximum number of quanta. When the port exits
476 		 * congestion, it will automatically send a PAUSE frame with
477 		 * zero quanta.
478 		 */
479 		init_quanta = 0xffff;
480 
481 		/* Also, set up the refresh timer to send follow-up PAUSE
482 		 * frames at half the quanta value, in case the congestion
483 		 * condition persists.
484 		 */
485 		refresh_quanta = 0xffff / 2;
486 
487 		/* Start emitting PAUSE frames when 3 large frames (or more
488 		 * smaller frames) have accumulated in the FIFO waiting to be
489 		 * DMAed to the RX ring.
490 		 */
491 		pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
492 		pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
493 	}
494 
495 	enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_QUANTA(0), init_quanta);
496 	enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_THRESH(0), refresh_quanta);
497 	enetc_port_wr(hw, ENETC4_PPAUONTR, pause_on_thresh);
498 	enetc_port_wr(hw, ENETC4_PPAUOFFTR, pause_off_thresh);
499 }
500 
501 static void enetc4_enable_mac(struct enetc_pf *pf, bool en)
502 {
503 	struct enetc_si *si = pf->si;
504 	u32 val;
505 
506 	val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
507 	val &= ~(PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN);
508 	val |= en ? (PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN) : 0;
509 
510 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
511 }
512 
513 static void enetc4_pl_mac_link_up(struct phylink_config *config,
514 				  struct phy_device *phy, unsigned int mode,
515 				  phy_interface_t interface, int speed,
516 				  int duplex, bool tx_pause, bool rx_pause)
517 {
518 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
519 	struct enetc_si *si = pf->si;
520 	struct enetc_ndev_priv *priv;
521 	bool hd_fc = false;
522 
523 	priv = netdev_priv(si->ndev);
524 	enetc4_set_port_speed(priv, speed);
525 
526 	if (!phylink_autoneg_inband(mode) &&
527 	    phy_interface_mode_is_rgmii(interface))
528 		enetc4_set_rgmii_mac(pf, speed, duplex);
529 
530 	if (interface == PHY_INTERFACE_MODE_RMII)
531 		enetc4_set_rmii_mac(pf, speed, duplex);
532 
533 	if (duplex == DUPLEX_FULL) {
534 		/* When preemption is enabled, generation of PAUSE frames
535 		 * must be disabled, as stated in the IEEE 802.3 standard.
536 		 */
537 		if (priv->active_offloads & ENETC_F_QBU)
538 			tx_pause = false;
539 	} else { /* DUPLEX_HALF */
540 		if (tx_pause || rx_pause)
541 			hd_fc = true;
542 
543 		/* As per 802.3 annex 31B, PAUSE frames are only supported
544 		 * when the link is configured for full duplex operation.
545 		 */
546 		tx_pause = false;
547 		rx_pause = false;
548 	}
549 
550 	enetc4_set_hd_flow_control(pf, hd_fc);
551 	enetc4_set_tx_pause(pf, priv->num_rx_rings, tx_pause);
552 	enetc4_set_rx_pause(pf, rx_pause);
553 	enetc4_enable_mac(pf, true);
554 }
555 
556 static void enetc4_pl_mac_link_down(struct phylink_config *config,
557 				    unsigned int mode,
558 				    phy_interface_t interface)
559 {
560 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
561 
562 	enetc4_enable_mac(pf, false);
563 }
564 
565 static const struct phylink_mac_ops enetc_pl_mac_ops = {
566 	.mac_select_pcs = enetc4_pl_mac_select_pcs,
567 	.mac_config = enetc4_pl_mac_config,
568 	.mac_link_up = enetc4_pl_mac_link_up,
569 	.mac_link_down = enetc4_pl_mac_link_down,
570 };
571 
572 static void enetc4_pci_remove(void *data)
573 {
574 	struct pci_dev *pdev = data;
575 
576 	enetc_pci_remove(pdev);
577 }
578 
579 static int enetc4_link_init(struct enetc_ndev_priv *priv,
580 			    struct device_node *node)
581 {
582 	struct enetc_pf *pf = enetc_si_priv(priv->si);
583 	struct device *dev = priv->dev;
584 	int err;
585 
586 	err = of_get_phy_mode(node, &pf->if_mode);
587 	if (err) {
588 		dev_err(dev, "Failed to get PHY mode\n");
589 		return err;
590 	}
591 
592 	err = enetc_mdiobus_create(pf, node);
593 	if (err) {
594 		dev_err(dev, "Failed to create MDIO bus\n");
595 		return err;
596 	}
597 
598 	err = enetc_phylink_create(priv, node, &enetc_pl_mac_ops);
599 	if (err) {
600 		dev_err(dev, "Failed to create phylink\n");
601 		goto err_phylink_create;
602 	}
603 
604 	return 0;
605 
606 err_phylink_create:
607 	enetc_mdiobus_destroy(pf);
608 
609 	return err;
610 }
611 
612 static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
613 {
614 	struct enetc_pf *pf = enetc_si_priv(priv->si);
615 
616 	enetc_phylink_destroy(priv);
617 	enetc_mdiobus_destroy(pf);
618 }
619 
620 static int enetc4_pf_netdev_create(struct enetc_si *si)
621 {
622 	struct device *dev = &si->pdev->dev;
623 	struct enetc_ndev_priv *priv;
624 	struct net_device *ndev;
625 	int err;
626 
627 	ndev = alloc_etherdev_mqs(sizeof(struct enetc_ndev_priv),
628 				  si->num_tx_rings, si->num_rx_rings);
629 	if (!ndev)
630 		return  -ENOMEM;
631 
632 	priv = netdev_priv(ndev);
633 	priv->ref_clk = devm_clk_get_optional(dev, "ref");
634 	if (IS_ERR(priv->ref_clk)) {
635 		dev_err(dev, "Get reference clock failed\n");
636 		err = PTR_ERR(priv->ref_clk);
637 		goto err_clk_get;
638 	}
639 
640 	enetc_pf_netdev_setup(si, ndev, &enetc4_ndev_ops);
641 
642 	enetc_init_si_rings_params(priv);
643 
644 	err = enetc_configure_si(priv);
645 	if (err) {
646 		dev_err(dev, "Failed to configure SI\n");
647 		goto err_config_si;
648 	}
649 
650 	err = enetc_alloc_msix(priv);
651 	if (err) {
652 		dev_err(dev, "Failed to alloc MSI-X\n");
653 		goto err_alloc_msix;
654 	}
655 
656 	err = enetc4_link_init(priv, dev->of_node);
657 	if (err)
658 		goto err_link_init;
659 
660 	err = register_netdev(ndev);
661 	if (err) {
662 		dev_err(dev, "Failed to register netdev\n");
663 		goto err_reg_netdev;
664 	}
665 
666 	return 0;
667 
668 err_reg_netdev:
669 	enetc4_link_deinit(priv);
670 err_link_init:
671 	enetc_free_msix(priv);
672 err_alloc_msix:
673 err_config_si:
674 err_clk_get:
675 	mutex_destroy(&priv->mm_lock);
676 	free_netdev(ndev);
677 
678 	return err;
679 }
680 
681 static void enetc4_pf_netdev_destroy(struct enetc_si *si)
682 {
683 	struct enetc_ndev_priv *priv = netdev_priv(si->ndev);
684 	struct net_device *ndev = si->ndev;
685 
686 	unregister_netdev(ndev);
687 	enetc_free_msix(priv);
688 	free_netdev(ndev);
689 }
690 
691 static int enetc4_pf_probe(struct pci_dev *pdev,
692 			   const struct pci_device_id *ent)
693 {
694 	struct device *dev = &pdev->dev;
695 	struct enetc_si *si;
696 	struct enetc_pf *pf;
697 	int err;
698 
699 	err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
700 	if (err)
701 		return dev_err_probe(dev, err, "PCIe probing failed\n");
702 
703 	err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev);
704 	if (err)
705 		return dev_err_probe(dev, err,
706 				     "Add enetc4_pci_remove() action failed\n");
707 
708 	/* si is the private data. */
709 	si = pci_get_drvdata(pdev);
710 	if (!si->hw.port || !si->hw.global)
711 		return dev_err_probe(dev, -ENODEV,
712 				     "Couldn't map PF only space\n");
713 
714 	si->revision = enetc_get_ip_revision(&si->hw);
715 	err = enetc_get_driver_data(si);
716 	if (err)
717 		return dev_err_probe(dev, err,
718 				     "Could not get VF driver data\n");
719 
720 	err = enetc4_pf_struct_init(si);
721 	if (err)
722 		return err;
723 
724 	pf = enetc_si_priv(si);
725 	err = enetc4_pf_init(pf);
726 	if (err)
727 		return err;
728 
729 	enetc_get_si_caps(si);
730 
731 	return enetc4_pf_netdev_create(si);
732 }
733 
734 static void enetc4_pf_remove(struct pci_dev *pdev)
735 {
736 	struct enetc_si *si = pci_get_drvdata(pdev);
737 
738 	enetc4_pf_netdev_destroy(si);
739 }
740 
741 static const struct pci_device_id enetc4_pf_id_table[] = {
742 	{ PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) },
743 	{ 0, } /* End of table. */
744 };
745 MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table);
746 
747 static struct pci_driver enetc4_pf_driver = {
748 	.name = KBUILD_MODNAME,
749 	.id_table = enetc4_pf_id_table,
750 	.probe = enetc4_pf_probe,
751 	.remove = enetc4_pf_remove,
752 };
753 module_pci_driver(enetc4_pf_driver);
754 
755 MODULE_DESCRIPTION("ENETC4 PF Driver");
756 MODULE_LICENSE("Dual BSD/GPL");
757