xref: /linux/drivers/net/ethernet/freescale/enetc/enetc4_pf.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2024 NXP */
3 
4 #include <linux/clk.h>
5 #include <linux/module.h>
6 #include <linux/of_net.h>
7 #include <linux/of_platform.h>
8 #include <linux/unaligned.h>
9 
10 #include "enetc_pf_common.h"
11 #include "enetc4_debugfs.h"
12 
13 #define ENETC_SI_MAX_RING_NUM	8
14 
15 #define ENETC_MAC_FILTER_TYPE_UC	BIT(0)
16 #define ENETC_MAC_FILTER_TYPE_MC	BIT(1)
17 #define ENETC_MAC_FILTER_TYPE_ALL	(ENETC_MAC_FILTER_TYPE_UC | \
18 					 ENETC_MAC_FILTER_TYPE_MC)
19 
20 struct enetc_mac_addr {
21 	u8 addr[ETH_ALEN];
22 };
23 
enetc4_get_port_caps(struct enetc_pf * pf)24 static void enetc4_get_port_caps(struct enetc_pf *pf)
25 {
26 	struct enetc_hw *hw = &pf->si->hw;
27 	u32 val;
28 
29 	val = enetc_port_rd(hw, ENETC4_ECAPR1);
30 	pf->caps.num_vsi = (val & ECAPR1_NUM_VSI) >> 24;
31 	pf->caps.num_msix = ((val & ECAPR1_NUM_MSIX) >> 12) + 1;
32 
33 	val = enetc_port_rd(hw, ENETC4_ECAPR2);
34 	pf->caps.num_rx_bdr = (val & ECAPR2_NUM_RX_BDR) >> 16;
35 	pf->caps.num_tx_bdr = val & ECAPR2_NUM_TX_BDR;
36 
37 	val = enetc_port_rd(hw, ENETC4_PMCAPR);
38 	pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
39 
40 	val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR);
41 	pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE;
42 }
43 
enetc4_get_psi_hw_features(struct enetc_si * si)44 static void enetc4_get_psi_hw_features(struct enetc_si *si)
45 {
46 	struct enetc_hw *hw = &si->hw;
47 	u32 val;
48 
49 	val = enetc_port_rd(hw, ENETC4_PCAPR);
50 	if (val & PCAPR_LINK_TYPE)
51 		si->hw_features |= ENETC_SI_F_PPM;
52 }
53 
enetc4_pf_set_si_primary_mac(struct enetc_hw * hw,int si,const u8 * addr)54 static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
55 					 const u8 *addr)
56 {
57 	u16 lower = get_unaligned_le16(addr + 4);
58 	u32 upper = get_unaligned_le32(addr);
59 
60 	if (si != 0) {
61 		__raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si));
62 		__raw_writel(lower, hw->port + ENETC4_PSIPMAR1(si));
63 	} else {
64 		__raw_writel(upper, hw->port + ENETC4_PMAR0);
65 		__raw_writel(lower, hw->port + ENETC4_PMAR1);
66 	}
67 }
68 
enetc4_pf_get_si_primary_mac(struct enetc_hw * hw,int si,u8 * addr)69 static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
70 					 u8 *addr)
71 {
72 	u32 upper;
73 	u16 lower;
74 
75 	upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si));
76 	lower = __raw_readl(hw->port + ENETC4_PSIPMAR1(si));
77 
78 	put_unaligned_le32(upper, addr);
79 	put_unaligned_le16(lower, addr + 4);
80 }
81 
enetc4_pf_set_si_mac_promisc(struct enetc_hw * hw,int si,bool uc_promisc,bool mc_promisc)82 static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si,
83 					 bool uc_promisc, bool mc_promisc)
84 {
85 	u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR);
86 
87 	if (uc_promisc)
88 		val |= PSIPMMR_SI_MAC_UP(si);
89 	else
90 		val &= ~PSIPMMR_SI_MAC_UP(si);
91 
92 	if (mc_promisc)
93 		val |= PSIPMMR_SI_MAC_MP(si);
94 	else
95 		val &= ~PSIPMMR_SI_MAC_MP(si);
96 
97 	enetc_port_wr(hw, ENETC4_PSIPMMR, val);
98 }
99 
enetc4_pf_set_si_uc_hash_filter(struct enetc_hw * hw,int si,u64 hash)100 static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si,
101 					    u64 hash)
102 {
103 	enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash));
104 	enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash));
105 }
106 
enetc4_pf_set_si_mc_hash_filter(struct enetc_hw * hw,int si,u64 hash)107 static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si,
108 					    u64 hash)
109 {
110 	enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash));
111 	enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash));
112 }
113 
enetc4_pf_set_loopback(struct net_device * ndev,bool en)114 static void enetc4_pf_set_loopback(struct net_device *ndev, bool en)
115 {
116 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
117 	struct enetc_si *si = priv->si;
118 	u32 val;
119 
120 	val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
121 	val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN);
122 	/* Default to select MAC level loopback mode if loopback is enabled. */
123 	val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0,
124 			       PM_CMD_CFG_LPBK_MODE);
125 
126 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
127 }
128 
enetc4_pf_clear_maft_entries(struct enetc_pf * pf)129 static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf)
130 {
131 	int i;
132 
133 	for (i = 0; i < pf->num_mfe; i++)
134 		ntmp_maft_delete_entry(&pf->si->ntmp_user, i);
135 
136 	pf->num_mfe = 0;
137 }
138 
enetc4_pf_add_maft_entries(struct enetc_pf * pf,struct enetc_mac_addr * mac,int mac_cnt)139 static int enetc4_pf_add_maft_entries(struct enetc_pf *pf,
140 				      struct enetc_mac_addr *mac,
141 				      int mac_cnt)
142 {
143 	struct maft_entry_data maft = {};
144 	u16 si_bit = BIT(0);
145 	int i, err;
146 
147 	maft.cfge.si_bitmap = cpu_to_le16(si_bit);
148 	for (i = 0; i < mac_cnt; i++) {
149 		ether_addr_copy(maft.keye.mac_addr, mac[i].addr);
150 		err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft);
151 		if (unlikely(err)) {
152 			pf->num_mfe = i;
153 			goto clear_maft_entries;
154 		}
155 	}
156 
157 	pf->num_mfe = mac_cnt;
158 
159 	return 0;
160 
161 clear_maft_entries:
162 	enetc4_pf_clear_maft_entries(pf);
163 
164 	return  err;
165 }
166 
enetc4_pf_set_uc_exact_filter(struct enetc_pf * pf)167 static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf)
168 {
169 	int max_num_mfe = pf->caps.mac_filter_num;
170 	struct enetc_mac_filter mac_filter = {};
171 	struct net_device *ndev = pf->si->ndev;
172 	struct enetc_hw *hw = &pf->si->hw;
173 	struct enetc_mac_addr *mac_tbl;
174 	struct netdev_hw_addr *ha;
175 	int i = 0, err;
176 	int mac_cnt;
177 
178 	netif_addr_lock_bh(ndev);
179 
180 	mac_cnt = netdev_uc_count(ndev);
181 	if (!mac_cnt) {
182 		netif_addr_unlock_bh(ndev);
183 		/* clear both MAC hash and exact filters */
184 		enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
185 		enetc4_pf_clear_maft_entries(pf);
186 
187 		return 0;
188 	}
189 
190 	if (mac_cnt > max_num_mfe) {
191 		err = -ENOSPC;
192 		goto unlock_netif_addr;
193 	}
194 
195 	mac_tbl = kzalloc_objs(*mac_tbl, mac_cnt, GFP_ATOMIC);
196 	if (!mac_tbl) {
197 		err = -ENOMEM;
198 		goto unlock_netif_addr;
199 	}
200 
201 	netdev_for_each_uc_addr(ha, ndev) {
202 		enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr);
203 		ether_addr_copy(mac_tbl[i++].addr, ha->addr);
204 	}
205 
206 	netif_addr_unlock_bh(ndev);
207 
208 	/* Set temporary unicast hash filters in case of Rx loss when
209 	 * updating MAC address filter table
210 	 */
211 	enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table);
212 	enetc4_pf_clear_maft_entries(pf);
213 
214 	if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i))
215 		enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
216 
217 	kfree(mac_tbl);
218 
219 	return 0;
220 
221 unlock_netif_addr:
222 	netif_addr_unlock_bh(ndev);
223 
224 	return err;
225 }
226 
enetc4_pf_set_mac_hash_filter(struct enetc_pf * pf,int type)227 static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type)
228 {
229 	struct net_device *ndev = pf->si->ndev;
230 	struct enetc_mac_filter *mac_filter;
231 	struct enetc_hw *hw = &pf->si->hw;
232 	struct netdev_hw_addr *ha;
233 
234 	netif_addr_lock_bh(ndev);
235 	if (type & ENETC_MAC_FILTER_TYPE_UC) {
236 		mac_filter = &pf->mac_filter[UC];
237 		enetc_reset_mac_addr_filter(mac_filter);
238 		netdev_for_each_uc_addr(ha, ndev)
239 			enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
240 
241 		enetc4_pf_set_si_uc_hash_filter(hw, 0,
242 						*mac_filter->mac_hash_table);
243 	}
244 
245 	if (type & ENETC_MAC_FILTER_TYPE_MC) {
246 		mac_filter = &pf->mac_filter[MC];
247 		enetc_reset_mac_addr_filter(mac_filter);
248 		netdev_for_each_mc_addr(ha, ndev)
249 			enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
250 
251 		enetc4_pf_set_si_mc_hash_filter(hw, 0,
252 						*mac_filter->mac_hash_table);
253 	}
254 	netif_addr_unlock_bh(ndev);
255 }
256 
enetc4_pf_set_mac_filter(struct enetc_pf * pf,int type)257 static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type)
258 {
259 	/* Currently, the MAC address filter table (MAFT) only has 4 entries,
260 	 * and multiple multicast addresses for filtering will be configured
261 	 * in the default network configuration, so MAFT is only suitable for
262 	 * unicast filtering. If the number of unicast addresses exceeds the
263 	 * table capacity, the MAC hash filter will be used.
264 	 */
265 	if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) {
266 		/* Fall back to the MAC hash filter */
267 		enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC);
268 		/* Clear the old MAC exact filter */
269 		enetc4_pf_clear_maft_entries(pf);
270 	}
271 
272 	if (type & ENETC_MAC_FILTER_TYPE_MC)
273 		enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC);
274 }
275 
276 static const struct enetc_pf_ops enetc4_pf_ops = {
277 	.set_si_primary_mac = enetc4_pf_set_si_primary_mac,
278 	.get_si_primary_mac = enetc4_pf_get_si_primary_mac,
279 };
280 
enetc4_pf_struct_init(struct enetc_si * si)281 static int enetc4_pf_struct_init(struct enetc_si *si)
282 {
283 	struct enetc_pf *pf = enetc_si_priv(si);
284 
285 	pf->si = si;
286 	pf->total_vfs = pci_sriov_get_totalvfs(si->pdev);
287 	pf->ops = &enetc4_pf_ops;
288 
289 	enetc4_get_port_caps(pf);
290 	enetc4_get_psi_hw_features(si);
291 
292 	return 0;
293 }
294 
enetc4_psicfgr0_val_construct(bool is_vf,u32 num_tx_bdr,u32 num_rx_bdr)295 static u32 enetc4_psicfgr0_val_construct(bool is_vf, u32 num_tx_bdr, u32 num_rx_bdr)
296 {
297 	u32 val;
298 
299 	val = ENETC_PSICFGR0_SET_TXBDR(num_tx_bdr);
300 	val |= ENETC_PSICFGR0_SET_RXBDR(num_rx_bdr);
301 	val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
302 
303 	if (is_vf)
304 		val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
305 
306 	return val;
307 }
308 
enetc4_default_rings_allocation(struct enetc_pf * pf)309 static void enetc4_default_rings_allocation(struct enetc_pf *pf)
310 {
311 	struct enetc_hw *hw = &pf->si->hw;
312 	u32 num_rx_bdr, num_tx_bdr, val;
313 	u32 vf_tx_bdr, vf_rx_bdr;
314 	int i, rx_rem, tx_rem;
315 
316 	if (pf->caps.num_rx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
317 		num_rx_bdr = pf->caps.num_rx_bdr - pf->caps.num_vsi;
318 	else
319 		num_rx_bdr = ENETC_SI_MAX_RING_NUM;
320 
321 	if (pf->caps.num_tx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
322 		num_tx_bdr = pf->caps.num_tx_bdr - pf->caps.num_vsi;
323 	else
324 		num_tx_bdr = ENETC_SI_MAX_RING_NUM;
325 
326 	val = enetc4_psicfgr0_val_construct(false, num_tx_bdr, num_rx_bdr);
327 	enetc_port_wr(hw, ENETC4_PSICFGR0(0), val);
328 
329 	num_rx_bdr = pf->caps.num_rx_bdr - num_rx_bdr;
330 	rx_rem = num_rx_bdr % pf->caps.num_vsi;
331 	num_rx_bdr = num_rx_bdr / pf->caps.num_vsi;
332 
333 	num_tx_bdr = pf->caps.num_tx_bdr - num_tx_bdr;
334 	tx_rem = num_tx_bdr % pf->caps.num_vsi;
335 	num_tx_bdr = num_tx_bdr / pf->caps.num_vsi;
336 
337 	for (i = 0; i < pf->caps.num_vsi; i++) {
338 		vf_tx_bdr = (i < tx_rem) ? num_tx_bdr + 1 : num_tx_bdr;
339 		vf_rx_bdr = (i < rx_rem) ? num_rx_bdr + 1 : num_rx_bdr;
340 		val = enetc4_psicfgr0_val_construct(true, vf_tx_bdr, vf_rx_bdr);
341 		enetc_port_wr(hw, ENETC4_PSICFGR0(i + 1), val);
342 	}
343 }
344 
enetc4_allocate_si_rings(struct enetc_pf * pf)345 static void enetc4_allocate_si_rings(struct enetc_pf *pf)
346 {
347 	enetc4_default_rings_allocation(pf);
348 }
349 
enetc4_pf_set_si_vlan_promisc(struct enetc_hw * hw,int si,bool en)350 static void enetc4_pf_set_si_vlan_promisc(struct enetc_hw *hw, int si, bool en)
351 {
352 	u32 val = enetc_port_rd(hw, ENETC4_PSIPVMR);
353 
354 	if (en)
355 		val |= BIT(si);
356 	else
357 		val &= ~BIT(si);
358 
359 	enetc_port_wr(hw, ENETC4_PSIPVMR, val);
360 }
361 
enetc4_set_default_si_vlan_promisc(struct enetc_pf * pf)362 static void enetc4_set_default_si_vlan_promisc(struct enetc_pf *pf)
363 {
364 	struct enetc_hw *hw = &pf->si->hw;
365 	int num_si = pf->caps.num_vsi + 1;
366 	int i;
367 
368 	/* enforce VLAN promiscuous mode for all SIs */
369 	for (i = 0; i < num_si; i++)
370 		enetc4_pf_set_si_vlan_promisc(hw, i, true);
371 }
372 
373 /* Allocate the number of MSI-X vectors for per SI. */
enetc4_set_si_msix_num(struct enetc_pf * pf)374 static void enetc4_set_si_msix_num(struct enetc_pf *pf)
375 {
376 	struct enetc_hw *hw = &pf->si->hw;
377 	int i, num_msix, total_si;
378 	u32 val;
379 
380 	total_si = pf->caps.num_vsi + 1;
381 
382 	num_msix = pf->caps.num_msix / total_si +
383 		   pf->caps.num_msix % total_si - 1;
384 	val = num_msix & PSICFGR2_NUM_MSIX;
385 	enetc_port_wr(hw, ENETC4_PSICFGR2(0), val);
386 
387 	num_msix = pf->caps.num_msix / total_si - 1;
388 	val = num_msix & PSICFGR2_NUM_MSIX;
389 	for (i = 0; i < pf->caps.num_vsi; i++)
390 		enetc_port_wr(hw, ENETC4_PSICFGR2(i + 1), val);
391 }
392 
enetc4_enable_all_si(struct enetc_pf * pf)393 static void enetc4_enable_all_si(struct enetc_pf *pf)
394 {
395 	struct enetc_hw *hw = &pf->si->hw;
396 	int num_si = pf->caps.num_vsi + 1;
397 	u32 si_bitmap = 0;
398 	int i;
399 
400 	/* Master enable for all SIs */
401 	for (i = 0; i < num_si; i++)
402 		si_bitmap |= PMR_SI_EN(i);
403 
404 	enetc_port_wr(hw, ENETC4_PMR, si_bitmap);
405 }
406 
enetc4_configure_port_si(struct enetc_pf * pf)407 static void enetc4_configure_port_si(struct enetc_pf *pf)
408 {
409 	struct enetc_hw *hw = &pf->si->hw;
410 
411 	enetc4_allocate_si_rings(pf);
412 
413 	/* Outer VLAN tag will be used for VLAN filtering */
414 	enetc_port_wr(hw, ENETC4_PSIVLANFMR, PSIVLANFMR_VS);
415 
416 	enetc4_set_default_si_vlan_promisc(pf);
417 
418 	/* Disable SI MAC multicast & unicast promiscuous */
419 	enetc_port_wr(hw, ENETC4_PSIPMMR, 0);
420 
421 	enetc4_set_si_msix_num(pf);
422 
423 	enetc4_enable_all_si(pf);
424 }
425 
enetc4_pf_reset_tc_msdu(struct enetc_hw * hw)426 static void enetc4_pf_reset_tc_msdu(struct enetc_hw *hw)
427 {
428 	u32 val = ENETC_MAC_MAXFRM_SIZE;
429 	int tc;
430 
431 	val = u32_replace_bits(val, SDU_TYPE_MPDU, PTCTMSDUR_SDU_TYPE);
432 
433 	for (tc = 0; tc < ENETC_NUM_TC; tc++)
434 		enetc_port_wr(hw, ENETC4_PTCTMSDUR(tc), val);
435 }
436 
enetc4_set_trx_frame_size(struct enetc_pf * pf)437 static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
438 {
439 	struct enetc_si *si = pf->si;
440 
441 	enetc_port_mac_wr(si, ENETC4_PM_MAXFRM(0),
442 			  ENETC_SET_MAXFRM(ENETC_MAC_MAXFRM_SIZE));
443 
444 	enetc4_pf_reset_tc_msdu(&si->hw);
445 }
446 
enetc4_enable_trx(struct enetc_pf * pf)447 static void enetc4_enable_trx(struct enetc_pf *pf)
448 {
449 	struct enetc_hw *hw = &pf->si->hw;
450 
451 	/* Enable port transmit/receive */
452 	enetc_port_wr(hw, ENETC4_POR, 0);
453 }
454 
enetc4_configure_port(struct enetc_pf * pf)455 static void enetc4_configure_port(struct enetc_pf *pf)
456 {
457 	enetc4_configure_port_si(pf);
458 	enetc4_set_trx_frame_size(pf);
459 	enetc_set_default_rss_key(pf);
460 	enetc4_enable_trx(pf);
461 }
462 
enetc4_init_ntmp_user(struct enetc_si * si)463 static int enetc4_init_ntmp_user(struct enetc_si *si)
464 {
465 	struct ntmp_user *user = &si->ntmp_user;
466 
467 	/* For ENETC 4.1, all table versions are 0 */
468 	memset(&user->tbl, 0, sizeof(user->tbl));
469 
470 	return enetc4_setup_cbdr(si);
471 }
472 
enetc4_free_ntmp_user(struct enetc_si * si)473 static void enetc4_free_ntmp_user(struct enetc_si *si)
474 {
475 	enetc4_teardown_cbdr(si);
476 }
477 
enetc4_pf_init(struct enetc_pf * pf)478 static int enetc4_pf_init(struct enetc_pf *pf)
479 {
480 	struct device *dev = &pf->si->pdev->dev;
481 	int err;
482 
483 	/* Initialize the MAC address for PF and VFs */
484 	err = enetc_setup_mac_addresses(dev->of_node, pf);
485 	if (err) {
486 		dev_err(dev, "Failed to set MAC addresses\n");
487 		return err;
488 	}
489 
490 	err = enetc4_init_ntmp_user(pf->si);
491 	if (err) {
492 		dev_err(dev, "Failed to init CBDR\n");
493 		return err;
494 	}
495 
496 	enetc4_configure_port(pf);
497 
498 	return 0;
499 }
500 
enetc4_pf_free(struct enetc_pf * pf)501 static void enetc4_pf_free(struct enetc_pf *pf)
502 {
503 	enetc4_free_ntmp_user(pf->si);
504 }
505 
enetc4_psi_do_set_rx_mode(struct work_struct * work)506 static void enetc4_psi_do_set_rx_mode(struct work_struct *work)
507 {
508 	struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task);
509 	struct enetc_pf *pf = enetc_si_priv(si);
510 	struct net_device *ndev = si->ndev;
511 	struct enetc_hw *hw = &si->hw;
512 	bool uc_promisc = false;
513 	bool mc_promisc = false;
514 	int type = 0;
515 
516 	rtnl_lock();
517 
518 	if (ndev->flags & IFF_PROMISC) {
519 		uc_promisc = true;
520 		mc_promisc = true;
521 	} else if (ndev->flags & IFF_ALLMULTI) {
522 		mc_promisc = true;
523 		type = ENETC_MAC_FILTER_TYPE_UC;
524 	} else {
525 		type = ENETC_MAC_FILTER_TYPE_ALL;
526 	}
527 
528 	enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc);
529 
530 	if (uc_promisc) {
531 		enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
532 		enetc4_pf_clear_maft_entries(pf);
533 	}
534 
535 	if (mc_promisc)
536 		enetc4_pf_set_si_mc_hash_filter(hw, 0, 0);
537 
538 	/* Set new MAC filter */
539 	enetc4_pf_set_mac_filter(pf, type);
540 
541 	rtnl_unlock();
542 }
543 
enetc4_pf_set_rx_mode(struct net_device * ndev)544 static void enetc4_pf_set_rx_mode(struct net_device *ndev)
545 {
546 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
547 	struct enetc_si *si = priv->si;
548 
549 	queue_work(si->workqueue, &si->rx_mode_task);
550 }
551 
enetc4_pf_set_features(struct net_device * ndev,netdev_features_t features)552 static int enetc4_pf_set_features(struct net_device *ndev,
553 				  netdev_features_t features)
554 {
555 	netdev_features_t changed = ndev->features ^ features;
556 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
557 	struct enetc_hw *hw = &priv->si->hw;
558 
559 	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
560 		bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER);
561 
562 		enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en);
563 	}
564 
565 	if (changed & NETIF_F_LOOPBACK)
566 		enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
567 
568 	enetc_set_features(ndev, features);
569 
570 	return 0;
571 }
572 
573 static const struct net_device_ops enetc4_ndev_ops = {
574 	.ndo_open		= enetc_open,
575 	.ndo_stop		= enetc_close,
576 	.ndo_start_xmit		= enetc_xmit,
577 	.ndo_get_stats		= enetc_get_stats,
578 	.ndo_set_mac_address	= enetc_pf_set_mac_addr,
579 	.ndo_set_rx_mode	= enetc4_pf_set_rx_mode,
580 	.ndo_set_features	= enetc4_pf_set_features,
581 	.ndo_vlan_rx_add_vid	= enetc_vlan_rx_add_vid,
582 	.ndo_vlan_rx_kill_vid	= enetc_vlan_rx_del_vid,
583 	.ndo_eth_ioctl		= enetc_ioctl,
584 	.ndo_hwtstamp_get	= enetc_hwtstamp_get,
585 	.ndo_hwtstamp_set	= enetc_hwtstamp_set,
586 };
587 
588 static struct phylink_pcs *
enetc4_pl_mac_select_pcs(struct phylink_config * config,phy_interface_t iface)589 enetc4_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
590 {
591 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
592 
593 	return pf->pcs;
594 }
595 
enetc4_mac_config(struct enetc_pf * pf,unsigned int mode,phy_interface_t phy_mode)596 static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode,
597 			      phy_interface_t phy_mode)
598 {
599 	struct enetc_ndev_priv *priv = netdev_priv(pf->si->ndev);
600 	struct enetc_si *si = pf->si;
601 	u32 val;
602 
603 	if (enetc_is_pseudo_mac(si))
604 		return;
605 
606 	val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
607 	val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA);
608 
609 	switch (phy_mode) {
610 	case PHY_INTERFACE_MODE_RGMII:
611 	case PHY_INTERFACE_MODE_RGMII_ID:
612 	case PHY_INTERFACE_MODE_RGMII_RXID:
613 	case PHY_INTERFACE_MODE_RGMII_TXID:
614 		val |= IFMODE_RGMII;
615 		/* We need to enable auto-negotiation for the MAC
616 		 * if its RGMII interface support In-Band status.
617 		 */
618 		if (phylink_autoneg_inband(mode))
619 			val |= PM_IF_MODE_ENA;
620 		break;
621 	case PHY_INTERFACE_MODE_RMII:
622 		val |= IFMODE_RMII;
623 		break;
624 	case PHY_INTERFACE_MODE_SGMII:
625 	case PHY_INTERFACE_MODE_2500BASEX:
626 		val |= IFMODE_SGMII;
627 		break;
628 	case PHY_INTERFACE_MODE_10GBASER:
629 	case PHY_INTERFACE_MODE_XGMII:
630 	case PHY_INTERFACE_MODE_USXGMII:
631 		val |= IFMODE_XGMII;
632 		break;
633 	default:
634 		dev_err(priv->dev,
635 			"Unsupported PHY mode:%d\n", phy_mode);
636 		return;
637 	}
638 
639 	enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
640 }
641 
enetc4_pl_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)642 static void enetc4_pl_mac_config(struct phylink_config *config, unsigned int mode,
643 				 const struct phylink_link_state *state)
644 {
645 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
646 
647 	enetc4_mac_config(pf, mode, state->interface);
648 }
649 
enetc4_set_port_speed(struct enetc_ndev_priv * priv,int speed)650 static void enetc4_set_port_speed(struct enetc_ndev_priv *priv, int speed)
651 {
652 	u32 old_speed = priv->speed;
653 	u32 val;
654 
655 	if (speed == old_speed)
656 		return;
657 
658 	val = enetc_port_rd(&priv->si->hw, ENETC4_PCR);
659 	val &= ~PCR_PSPEED;
660 
661 	switch (speed) {
662 	case SPEED_100:
663 	case SPEED_1000:
664 	case SPEED_2500:
665 	case SPEED_10000:
666 		val |= (PCR_PSPEED & PCR_PSPEED_VAL(speed));
667 		break;
668 	case SPEED_10:
669 	default:
670 		val |= (PCR_PSPEED & PCR_PSPEED_VAL(SPEED_10));
671 	}
672 
673 	priv->speed = speed;
674 	enetc_port_wr(&priv->si->hw, ENETC4_PCR, val);
675 }
676 
enetc4_set_rgmii_mac(struct enetc_pf * pf,int speed,int duplex)677 static void enetc4_set_rgmii_mac(struct enetc_pf *pf, int speed, int duplex)
678 {
679 	struct enetc_si *si = pf->si;
680 	u32 old_val, val;
681 
682 	old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
683 	val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_M10 | PM_IF_MODE_REVMII);
684 
685 	switch (speed) {
686 	case SPEED_1000:
687 		val = u32_replace_bits(val, SSP_1G, PM_IF_MODE_SSP);
688 		break;
689 	case SPEED_100:
690 		val = u32_replace_bits(val, SSP_100M, PM_IF_MODE_SSP);
691 		break;
692 	case SPEED_10:
693 		val = u32_replace_bits(val, SSP_10M, PM_IF_MODE_SSP);
694 	}
695 
696 	val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
697 			       PM_IF_MODE_HD);
698 
699 	if (val == old_val)
700 		return;
701 
702 	enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
703 }
704 
enetc4_set_rmii_mac(struct enetc_pf * pf,int speed,int duplex)705 static void enetc4_set_rmii_mac(struct enetc_pf *pf, int speed, int duplex)
706 {
707 	struct enetc_si *si = pf->si;
708 	u32 old_val, val;
709 
710 	old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
711 	val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_SSP);
712 
713 	switch (speed) {
714 	case SPEED_100:
715 		val &= ~PM_IF_MODE_M10;
716 		break;
717 	case SPEED_10:
718 		val |= PM_IF_MODE_M10;
719 	}
720 
721 	val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
722 			       PM_IF_MODE_HD);
723 
724 	if (val == old_val)
725 		return;
726 
727 	enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
728 }
729 
enetc4_set_hd_flow_control(struct enetc_pf * pf,bool enable)730 static void enetc4_set_hd_flow_control(struct enetc_pf *pf, bool enable)
731 {
732 	struct enetc_si *si = pf->si;
733 	u32 old_val, val;
734 
735 	if (!pf->caps.half_duplex)
736 		return;
737 
738 	old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
739 	val = u32_replace_bits(old_val, enable ? 1 : 0, PM_CMD_CFG_HD_FCEN);
740 	if (val == old_val)
741 		return;
742 
743 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
744 }
745 
enetc4_set_rx_pause(struct enetc_pf * pf,bool rx_pause)746 static void enetc4_set_rx_pause(struct enetc_pf *pf, bool rx_pause)
747 {
748 	struct enetc_si *si = pf->si;
749 	u32 old_val, val;
750 
751 	old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
752 	val = u32_replace_bits(old_val, rx_pause ? 0 : 1, PM_CMD_CFG_PAUSE_IGN);
753 	if (val == old_val)
754 		return;
755 
756 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
757 }
758 
enetc4_set_tx_pause(struct enetc_pf * pf,int num_rxbdr,bool tx_pause)759 static void enetc4_set_tx_pause(struct enetc_pf *pf, int num_rxbdr, bool tx_pause)
760 {
761 	u32 pause_off_thresh = 0, pause_on_thresh = 0;
762 	u32 init_quanta = 0, refresh_quanta = 0;
763 	struct enetc_hw *hw = &pf->si->hw;
764 	u32 rbmr, old_rbmr;
765 	int i;
766 
767 	for (i = 0; i < num_rxbdr; i++) {
768 		old_rbmr = enetc_rxbdr_rd(hw, i, ENETC_RBMR);
769 		rbmr = u32_replace_bits(old_rbmr, tx_pause ? 1 : 0, ENETC_RBMR_CM);
770 		if (rbmr == old_rbmr)
771 			continue;
772 
773 		enetc_rxbdr_wr(hw, i, ENETC_RBMR, rbmr);
774 	}
775 
776 	if (tx_pause) {
777 		/* When the port first enters congestion, send a PAUSE request
778 		 * with the maximum number of quanta. When the port exits
779 		 * congestion, it will automatically send a PAUSE frame with
780 		 * zero quanta.
781 		 */
782 		init_quanta = 0xffff;
783 
784 		/* Also, set up the refresh timer to send follow-up PAUSE
785 		 * frames at half the quanta value, in case the congestion
786 		 * condition persists.
787 		 */
788 		refresh_quanta = 0xffff / 2;
789 
790 		/* Start emitting PAUSE frames when 3 large frames (or more
791 		 * smaller frames) have accumulated in the FIFO waiting to be
792 		 * DMAed to the RX ring.
793 		 */
794 		pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
795 		pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
796 	}
797 
798 	enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_QUANTA(0), init_quanta);
799 	enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_THRESH(0), refresh_quanta);
800 	enetc_port_wr(hw, ENETC4_PPAUONTR, pause_on_thresh);
801 	enetc_port_wr(hw, ENETC4_PPAUOFFTR, pause_off_thresh);
802 }
803 
enetc4_enable_mac(struct enetc_pf * pf,bool en)804 static void enetc4_enable_mac(struct enetc_pf *pf, bool en)
805 {
806 	struct enetc_si *si = pf->si;
807 	u32 val;
808 
809 	val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
810 	val &= ~(PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN);
811 	val |= en ? (PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN) : 0;
812 
813 	enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
814 }
815 
enetc4_pl_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)816 static void enetc4_pl_mac_link_up(struct phylink_config *config,
817 				  struct phy_device *phy, unsigned int mode,
818 				  phy_interface_t interface, int speed,
819 				  int duplex, bool tx_pause, bool rx_pause)
820 {
821 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
822 	struct enetc_si *si = pf->si;
823 	struct enetc_ndev_priv *priv;
824 	bool hd_fc = false;
825 
826 	priv = netdev_priv(si->ndev);
827 	enetc4_set_port_speed(priv, speed);
828 
829 	if (!phylink_autoneg_inband(mode) &&
830 	    phy_interface_mode_is_rgmii(interface))
831 		enetc4_set_rgmii_mac(pf, speed, duplex);
832 
833 	if (interface == PHY_INTERFACE_MODE_RMII)
834 		enetc4_set_rmii_mac(pf, speed, duplex);
835 
836 	if (duplex == DUPLEX_FULL) {
837 		/* When preemption is enabled, generation of PAUSE frames
838 		 * must be disabled, as stated in the IEEE 802.3 standard.
839 		 */
840 		if (priv->active_offloads & ENETC_F_QBU)
841 			tx_pause = false;
842 	} else { /* DUPLEX_HALF */
843 		if (tx_pause || rx_pause)
844 			hd_fc = true;
845 
846 		/* As per 802.3 annex 31B, PAUSE frames are only supported
847 		 * when the link is configured for full duplex operation.
848 		 */
849 		tx_pause = false;
850 		rx_pause = false;
851 	}
852 
853 	enetc4_set_hd_flow_control(pf, hd_fc);
854 	enetc4_set_tx_pause(pf, priv->num_rx_rings, tx_pause);
855 	enetc4_set_rx_pause(pf, rx_pause);
856 	enetc4_enable_mac(pf, true);
857 }
858 
enetc4_pl_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)859 static void enetc4_pl_mac_link_down(struct phylink_config *config,
860 				    unsigned int mode,
861 				    phy_interface_t interface)
862 {
863 	struct enetc_pf *pf = phylink_to_enetc_pf(config);
864 
865 	enetc4_enable_mac(pf, false);
866 }
867 
868 static const struct phylink_mac_ops enetc_pl_mac_ops = {
869 	.mac_select_pcs = enetc4_pl_mac_select_pcs,
870 	.mac_config = enetc4_pl_mac_config,
871 	.mac_link_up = enetc4_pl_mac_link_up,
872 	.mac_link_down = enetc4_pl_mac_link_down,
873 };
874 
enetc4_pci_remove(void * data)875 static void enetc4_pci_remove(void *data)
876 {
877 	struct pci_dev *pdev = data;
878 
879 	enetc_pci_remove(pdev);
880 }
881 
enetc4_link_init(struct enetc_ndev_priv * priv,struct device_node * node)882 static int enetc4_link_init(struct enetc_ndev_priv *priv,
883 			    struct device_node *node)
884 {
885 	struct enetc_pf *pf = enetc_si_priv(priv->si);
886 	struct device *dev = priv->dev;
887 	int err;
888 
889 	err = of_get_phy_mode(node, &pf->if_mode);
890 	if (err) {
891 		dev_err(dev, "Failed to get PHY mode\n");
892 		return err;
893 	}
894 
895 	err = enetc_mdiobus_create(pf, node);
896 	if (err) {
897 		dev_err(dev, "Failed to create MDIO bus\n");
898 		return err;
899 	}
900 
901 	err = enetc_phylink_create(priv, node, &enetc_pl_mac_ops);
902 	if (err) {
903 		dev_err(dev, "Failed to create phylink\n");
904 		goto err_phylink_create;
905 	}
906 
907 	return 0;
908 
909 err_phylink_create:
910 	enetc_mdiobus_destroy(pf);
911 
912 	return err;
913 }
914 
enetc4_link_deinit(struct enetc_ndev_priv * priv)915 static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
916 {
917 	struct enetc_pf *pf = enetc_si_priv(priv->si);
918 
919 	enetc_phylink_destroy(priv);
920 	enetc_mdiobus_destroy(pf);
921 }
922 
enetc4_psi_wq_task_init(struct enetc_si * si)923 static int enetc4_psi_wq_task_init(struct enetc_si *si)
924 {
925 	char wq_name[24];
926 
927 	INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode);
928 	snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev));
929 	si->workqueue = create_singlethread_workqueue(wq_name);
930 	if (!si->workqueue)
931 		return -ENOMEM;
932 
933 	return 0;
934 }
935 
enetc4_pf_netdev_create(struct enetc_si * si)936 static int enetc4_pf_netdev_create(struct enetc_si *si)
937 {
938 	struct device *dev = &si->pdev->dev;
939 	struct enetc_ndev_priv *priv;
940 	struct net_device *ndev;
941 	int err;
942 
943 	ndev = alloc_etherdev_mqs(sizeof(struct enetc_ndev_priv),
944 				  si->num_tx_rings, si->num_rx_rings);
945 	if (!ndev)
946 		return  -ENOMEM;
947 
948 	priv = netdev_priv(ndev);
949 	priv->ref_clk = devm_clk_get_optional(dev, "ref");
950 	if (IS_ERR(priv->ref_clk)) {
951 		dev_err(dev, "Get reference clock failed\n");
952 		err = PTR_ERR(priv->ref_clk);
953 		goto err_clk_get;
954 	}
955 
956 	enetc_pf_netdev_setup(si, ndev, &enetc4_ndev_ops);
957 
958 	enetc_init_si_rings_params(priv);
959 
960 	err = enetc_configure_si(priv);
961 	if (err) {
962 		dev_err(dev, "Failed to configure SI\n");
963 		goto err_config_si;
964 	}
965 
966 	err = enetc_alloc_msix(priv);
967 	if (err) {
968 		dev_err(dev, "Failed to alloc MSI-X\n");
969 		goto err_alloc_msix;
970 	}
971 
972 	err = enetc4_link_init(priv, dev->of_node);
973 	if (err)
974 		goto err_link_init;
975 
976 	err = enetc4_psi_wq_task_init(si);
977 	if (err) {
978 		dev_err(dev, "Failed to init workqueue\n");
979 		goto err_wq_init;
980 	}
981 
982 	err = register_netdev(ndev);
983 	if (err) {
984 		dev_err(dev, "Failed to register netdev\n");
985 		goto err_reg_netdev;
986 	}
987 
988 	return 0;
989 
990 err_reg_netdev:
991 	destroy_workqueue(si->workqueue);
992 err_wq_init:
993 	enetc4_link_deinit(priv);
994 err_link_init:
995 	enetc_free_msix(priv);
996 err_alloc_msix:
997 err_config_si:
998 err_clk_get:
999 	free_netdev(ndev);
1000 
1001 	return err;
1002 }
1003 
enetc4_pf_netdev_destroy(struct enetc_si * si)1004 static void enetc4_pf_netdev_destroy(struct enetc_si *si)
1005 {
1006 	struct enetc_ndev_priv *priv = netdev_priv(si->ndev);
1007 	struct net_device *ndev = si->ndev;
1008 
1009 	unregister_netdev(ndev);
1010 	cancel_work(&si->rx_mode_task);
1011 	destroy_workqueue(si->workqueue);
1012 	enetc4_link_deinit(priv);
1013 	enetc_free_msix(priv);
1014 	free_netdev(ndev);
1015 }
1016 
1017 static const struct enetc_si_ops enetc4_psi_ops = {
1018 	.get_rss_table = enetc4_get_rss_table,
1019 	.set_rss_table = enetc4_set_rss_table,
1020 };
1021 
enetc4_pf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1022 static int enetc4_pf_probe(struct pci_dev *pdev,
1023 			   const struct pci_device_id *ent)
1024 {
1025 	struct device *dev = &pdev->dev;
1026 	struct enetc_si *si;
1027 	struct enetc_pf *pf;
1028 	int err;
1029 
1030 	err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
1031 	if (err)
1032 		return dev_err_probe(dev, err, "PCIe probing failed\n");
1033 
1034 	err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev);
1035 	if (err)
1036 		return err;
1037 
1038 	/* si is the private data. */
1039 	si = pci_get_drvdata(pdev);
1040 	if (!si->hw.port || !si->hw.global)
1041 		return dev_err_probe(dev, -ENODEV,
1042 				     "Couldn't map PF only space\n");
1043 
1044 	si->revision = enetc_get_ip_revision(&si->hw);
1045 	si->ops = &enetc4_psi_ops;
1046 	err = enetc_get_driver_data(si);
1047 	if (err)
1048 		return dev_err_probe(dev, err,
1049 				     "Could not get PF driver data\n");
1050 
1051 	err = enetc4_pf_struct_init(si);
1052 	if (err)
1053 		return err;
1054 
1055 	pf = enetc_si_priv(si);
1056 	err = enetc4_pf_init(pf);
1057 	if (err)
1058 		return err;
1059 
1060 	enetc_get_si_caps(si);
1061 
1062 	err = enetc4_pf_netdev_create(si);
1063 	if (err)
1064 		goto err_netdev_create;
1065 
1066 	enetc_create_debugfs(si);
1067 
1068 	return 0;
1069 
1070 err_netdev_create:
1071 	enetc4_pf_free(pf);
1072 
1073 	return err;
1074 }
1075 
enetc4_pf_remove(struct pci_dev * pdev)1076 static void enetc4_pf_remove(struct pci_dev *pdev)
1077 {
1078 	struct enetc_si *si = pci_get_drvdata(pdev);
1079 	struct enetc_pf *pf = enetc_si_priv(si);
1080 
1081 	enetc_remove_debugfs(si);
1082 	enetc4_pf_netdev_destroy(si);
1083 	enetc4_pf_free(pf);
1084 }
1085 
1086 static const struct pci_device_id enetc4_pf_id_table[] = {
1087 	{ PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) },
1088 	{ PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PPM_DEV_ID) },
1089 	{ 0, } /* End of table. */
1090 };
1091 MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table);
1092 
1093 static struct pci_driver enetc4_pf_driver = {
1094 	.name = KBUILD_MODNAME,
1095 	.id_table = enetc4_pf_id_table,
1096 	.probe = enetc4_pf_probe,
1097 	.remove = enetc4_pf_remove,
1098 };
1099 module_pci_driver(enetc4_pf_driver);
1100 
1101 MODULE_DESCRIPTION("ENETC4 PF Driver");
1102 MODULE_LICENSE("Dual BSD/GPL");
1103