xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_sriov.c (revision 17bbde2e1716e2ee4b997d476b48ae85c5a47671)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/pci.h>
6 
7 #include "wx_type.h"
8 #include "wx_hw.h"
9 #include "wx_mbx.h"
10 #include "wx_sriov.h"
11 
wx_vf_configuration(struct pci_dev * pdev,int event_mask)12 static void wx_vf_configuration(struct pci_dev *pdev, int event_mask)
13 {
14 	bool enable = !!WX_VF_ENABLE_CHECK(event_mask);
15 	struct wx *wx = pci_get_drvdata(pdev);
16 	u32 vfn = WX_VF_NUM_GET(event_mask);
17 
18 	if (enable)
19 		eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr);
20 }
21 
wx_alloc_vf_macvlans(struct wx * wx,u8 num_vfs)22 static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs)
23 {
24 	struct vf_macvlans *mv_list;
25 	int num_vf_macvlans, i;
26 
27 	/* Initialize list of VF macvlans */
28 	INIT_LIST_HEAD(&wx->vf_mvs.mvlist);
29 
30 	num_vf_macvlans = wx->mac.num_rar_entries -
31 			  (WX_MAX_PF_MACVLANS + 1 + num_vfs);
32 	if (!num_vf_macvlans)
33 		return -EINVAL;
34 
35 	mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
36 			  GFP_KERNEL);
37 	if (!mv_list)
38 		return -ENOMEM;
39 
40 	for (i = 0; i < num_vf_macvlans; i++) {
41 		mv_list[i].vf = -1;
42 		mv_list[i].free = true;
43 		list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist);
44 	}
45 	wx->mv_list = mv_list;
46 
47 	return 0;
48 }
49 
wx_sriov_clear_data(struct wx * wx)50 static void wx_sriov_clear_data(struct wx *wx)
51 {
52 	/* set num VFs to 0 to prevent access to vfinfo */
53 	wx->num_vfs = 0;
54 
55 	/* free VF control structures */
56 	kfree(wx->vfinfo);
57 	wx->vfinfo = NULL;
58 
59 	/* free macvlan list */
60 	kfree(wx->mv_list);
61 	wx->mv_list = NULL;
62 
63 	/* set default pool back to 0 */
64 	wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
65 	wx->ring_feature[RING_F_VMDQ].offset = 0;
66 
67 	clear_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
68 	clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
69 	/* Disable VMDq flag so device will be set in NM mode */
70 	if (wx->ring_feature[RING_F_VMDQ].limit == 1)
71 		clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
72 }
73 
__wx_enable_sriov(struct wx * wx,u8 num_vfs)74 static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
75 {
76 	int i, ret = 0;
77 	u32 value = 0;
78 
79 	set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
80 	dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
81 
82 	if (num_vfs == 7 && wx->mac.type == wx_mac_em)
83 		set_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
84 
85 	/* Enable VMDq flag so device will be set in VM mode */
86 	set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
87 	if (!wx->ring_feature[RING_F_VMDQ].limit)
88 		wx->ring_feature[RING_F_VMDQ].limit = 1;
89 	wx->ring_feature[RING_F_VMDQ].offset = num_vfs;
90 
91 	wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
92 			     GFP_KERNEL);
93 	if (!wx->vfinfo)
94 		return -ENOMEM;
95 
96 	ret = wx_alloc_vf_macvlans(wx, num_vfs);
97 	if (ret)
98 		return ret;
99 
100 	/* Initialize default switching mode VEB */
101 	wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN);
102 
103 	for (i = 0; i < num_vfs; i++) {
104 		/* enable spoof checking for all VFs */
105 		wx->vfinfo[i].spoofchk_enabled = true;
106 		wx->vfinfo[i].link_enable = true;
107 		/* untrust all VFs */
108 		wx->vfinfo[i].trusted = false;
109 		/* set the default xcast mode */
110 		wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
111 	}
112 
113 	if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
114 		value = WX_CFG_PORT_CTL_NUM_VT_8;
115 	} else {
116 		if (num_vfs < 32)
117 			value = WX_CFG_PORT_CTL_NUM_VT_32;
118 		else
119 			value = WX_CFG_PORT_CTL_NUM_VT_64;
120 	}
121 	wr32m(wx, WX_CFG_PORT_CTL,
122 	      WX_CFG_PORT_CTL_NUM_VT_MASK,
123 	      value);
124 
125 	return ret;
126 }
127 
wx_sriov_reinit(struct wx * wx)128 static void wx_sriov_reinit(struct wx *wx)
129 {
130 	rtnl_lock();
131 	wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev));
132 	rtnl_unlock();
133 }
134 
wx_disable_sriov(struct wx * wx)135 void wx_disable_sriov(struct wx *wx)
136 {
137 	if (!pci_vfs_assigned(wx->pdev))
138 		pci_disable_sriov(wx->pdev);
139 	else
140 		wx_err(wx, "Unloading driver while VFs are assigned.\n");
141 
142 	/* clear flags and free allloced data */
143 	wx_sriov_clear_data(wx);
144 }
145 EXPORT_SYMBOL(wx_disable_sriov);
146 
wx_pci_sriov_enable(struct pci_dev * dev,int num_vfs)147 static int wx_pci_sriov_enable(struct pci_dev *dev,
148 			       int num_vfs)
149 {
150 	struct wx *wx = pci_get_drvdata(dev);
151 	int err = 0, i;
152 
153 	err = __wx_enable_sriov(wx, num_vfs);
154 	if (err)
155 		return err;
156 
157 	wx->num_vfs = num_vfs;
158 	for (i = 0; i < wx->num_vfs; i++)
159 		wx_vf_configuration(dev, (i | WX_VF_ENABLE));
160 
161 	/* reset before enabling SRIOV to avoid mailbox issues */
162 	wx_sriov_reinit(wx);
163 
164 	err = pci_enable_sriov(dev, num_vfs);
165 	if (err) {
166 		wx_err(wx, "Failed to enable PCI sriov: %d\n", err);
167 		goto err_out;
168 	}
169 
170 	return num_vfs;
171 err_out:
172 	wx_sriov_clear_data(wx);
173 	return err;
174 }
175 
wx_pci_sriov_disable(struct pci_dev * dev)176 static void wx_pci_sriov_disable(struct pci_dev *dev)
177 {
178 	struct wx *wx = pci_get_drvdata(dev);
179 
180 	wx_disable_sriov(wx);
181 	wx_sriov_reinit(wx);
182 }
183 
wx_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)184 int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
185 {
186 	struct wx *wx = pci_get_drvdata(pdev);
187 	int err;
188 
189 	if (!num_vfs) {
190 		if (!pci_vfs_assigned(pdev)) {
191 			wx_pci_sriov_disable(pdev);
192 			return 0;
193 		}
194 
195 		wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
196 		return -EBUSY;
197 	}
198 
199 	err = wx_pci_sriov_enable(pdev, num_vfs);
200 	if (err)
201 		return err;
202 
203 	return num_vfs;
204 }
205 EXPORT_SYMBOL(wx_pci_sriov_configure);
206 
wx_set_vf_mac(struct wx * wx,u16 vf,const u8 * mac_addr)207 static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr)
208 {
209 	u8 hw_addr[ETH_ALEN];
210 	int ret = 0;
211 
212 	ether_addr_copy(hw_addr, mac_addr);
213 	wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
214 	ret = wx_add_mac_filter(wx, hw_addr, vf);
215 	if (ret >= 0)
216 		ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr);
217 	else
218 		eth_zero_addr(wx->vfinfo[vf].vf_mac_addr);
219 
220 	return ret;
221 }
222 
wx_set_vmolr(struct wx * wx,u16 vf,bool aupe)223 static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe)
224 {
225 	u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
226 
227 	vmolr |=  WX_PSR_VM_L2CTL_BAM;
228 	if (aupe)
229 		vmolr |= WX_PSR_VM_L2CTL_AUPE;
230 	else
231 		vmolr &= ~WX_PSR_VM_L2CTL_AUPE;
232 	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
233 }
234 
wx_set_vmvir(struct wx * wx,u16 vid,u16 qos,u16 vf)235 static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf)
236 {
237 	u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) |
238 		    WX_TDM_VLAN_INS_VLANA_DEFAULT;
239 
240 	wr32(wx, WX_TDM_VLAN_INS(vf), vmvir);
241 }
242 
wx_set_vf_vlan(struct wx * wx,int add,int vid,u16 vf)243 static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf)
244 {
245 	if (!vid && !add)
246 		return 0;
247 
248 	return wx_set_vfta(wx, vid, vf, (bool)add);
249 }
250 
wx_set_vlan_anti_spoofing(struct wx * wx,bool enable,int vf)251 static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf)
252 {
253 	u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
254 	u32 pfvfspoof;
255 
256 	pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index));
257 	if (enable)
258 		pfvfspoof |= BIT(vf_bit);
259 	else
260 		pfvfspoof &= ~BIT(vf_bit);
261 	wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof);
262 }
263 
wx_write_qde(struct wx * wx,u32 vf,u32 qde)264 static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
265 {
266 	struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
267 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
268 	u32 reg = 0, n = vf * q_per_pool / 32;
269 	u32 i = vf * q_per_pool;
270 
271 	reg = rd32(wx, WX_RDM_PF_QDE(n));
272 	for (i = (vf * q_per_pool - n * 32);
273 	     i < ((vf + 1) * q_per_pool - n * 32);
274 	     i++) {
275 		if (qde == 1)
276 			reg |= qde << i;
277 		else
278 			reg &= qde << i;
279 	}
280 
281 	wr32(wx, WX_RDM_PF_QDE(n), reg);
282 }
283 
wx_clear_vmvir(struct wx * wx,u32 vf)284 static void wx_clear_vmvir(struct wx *wx, u32 vf)
285 {
286 	wr32(wx, WX_TDM_VLAN_INS(vf), 0);
287 }
288 
wx_ping_vf(struct wx * wx,int vf)289 static void wx_ping_vf(struct wx *wx, int vf)
290 {
291 	u32 ping = WX_PF_CONTROL_MSG;
292 
293 	if (wx->vfinfo[vf].clear_to_send)
294 		ping |= WX_VT_MSGTYPE_CTS;
295 	wx_write_mbx_pf(wx, &ping, 1, vf);
296 }
297 
wx_set_vf_rx_tx(struct wx * wx,int vf)298 static void wx_set_vf_rx_tx(struct wx *wx, int vf)
299 {
300 	u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
301 	u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
302 
303 	reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
304 	reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
305 
306 	if (wx->vfinfo[vf].link_enable) {
307 		reg_req_tx = reg_cur_tx | BIT(vf_bit);
308 		reg_req_rx = reg_cur_rx | BIT(vf_bit);
309 		/* Enable particular VF */
310 		if (reg_cur_tx != reg_req_tx)
311 			wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
312 		if (reg_cur_rx != reg_req_rx)
313 			wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
314 	} else {
315 		reg_req_tx = BIT(vf_bit);
316 		reg_req_rx = BIT(vf_bit);
317 		/* Disable particular VF */
318 		if (reg_cur_tx & reg_req_tx)
319 			wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
320 		if (reg_cur_rx & reg_req_rx)
321 			wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
322 	}
323 }
324 
wx_get_vf_queues(struct wx * wx,u32 * msgbuf,u32 vf)325 static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
326 {
327 	struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
328 	unsigned int default_tc = 0;
329 
330 	msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
331 	msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
332 
333 	if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
334 		msgbuf[WX_VF_TRANS_VLAN] = 1;
335 	else
336 		msgbuf[WX_VF_TRANS_VLAN] = 0;
337 
338 	/* notify VF of default queue */
339 	msgbuf[WX_VF_DEF_QUEUE] = default_tc;
340 
341 	return 0;
342 }
343 
wx_vf_reset_event(struct wx * wx,u16 vf)344 static void wx_vf_reset_event(struct wx *wx, u16 vf)
345 {
346 	struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
347 	u8 num_tcs = netdev_get_num_tc(wx->netdev);
348 
349 	/* add PF assigned VLAN */
350 	wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
351 
352 	/* reset offloads to defaults */
353 	wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
354 
355 	/* set outgoing tags for VFs */
356 	if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
357 		wx_clear_vmvir(wx, vf);
358 	} else {
359 		if (vfinfo->pf_qos || !num_tcs)
360 			wx_set_vmvir(wx, vfinfo->pf_vlan,
361 				     vfinfo->pf_qos, vf);
362 		else
363 			wx_set_vmvir(wx, vfinfo->pf_vlan,
364 				     wx->default_up, vf);
365 	}
366 
367 	/* reset multicast table array for vf */
368 	wx->vfinfo[vf].num_vf_mc_hashes = 0;
369 
370 	/* Flush and reset the mta with the new values */
371 	wx_set_rx_mode(wx->netdev);
372 
373 	wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
374 	/* reset VF api back to unknown */
375 	wx->vfinfo[vf].vf_api = wx_mbox_api_null;
376 }
377 
wx_vf_reset_msg(struct wx * wx,u16 vf)378 static void wx_vf_reset_msg(struct wx *wx, u16 vf)
379 {
380 	const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr;
381 	struct net_device *dev = wx->netdev;
382 	u32 msgbuf[5] = {0, 0, 0, 0, 0};
383 	u8 *addr = (u8 *)(&msgbuf[1]);
384 	u32 reg = 0, index, vf_bit;
385 	int pf_max_frame;
386 
387 	/* reset the filters for the device */
388 	wx_vf_reset_event(wx, vf);
389 
390 	/* set vf mac address */
391 	if (!is_zero_ether_addr(vf_mac))
392 		wx_set_vf_mac(wx, vf, vf_mac);
393 
394 	index = WX_VF_REG_OFFSET(vf);
395 	vf_bit = WX_VF_IND_SHIFT(vf);
396 
397 	/* force drop enable for all VF Rx queues */
398 	wx_write_qde(wx, vf, 1);
399 
400 	/* set transmit and receive for vf */
401 	wx_set_vf_rx_tx(wx, vf);
402 
403 	pf_max_frame = dev->mtu + ETH_HLEN;
404 
405 	if (pf_max_frame > ETH_FRAME_LEN)
406 		reg = BIT(vf_bit);
407 	wr32(wx, WX_RDM_VFRE_CLR(index), reg);
408 
409 	/* enable VF mailbox for further messages */
410 	wx->vfinfo[vf].clear_to_send = true;
411 
412 	/* reply to reset with ack and vf mac address */
413 	msgbuf[0] = WX_VF_RESET;
414 	if (!is_zero_ether_addr(vf_mac)) {
415 		msgbuf[0] |= WX_VT_MSGTYPE_ACK;
416 		memcpy(addr, vf_mac, ETH_ALEN);
417 	} else {
418 		msgbuf[0] |= WX_VT_MSGTYPE_NACK;
419 		wx_err(wx, "VF %d has no MAC address assigned", vf);
420 	}
421 
422 	msgbuf[3] = wx->mac.mc_filter_type;
423 	wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
424 }
425 
wx_set_vf_mac_addr(struct wx * wx,u32 * msgbuf,u16 vf)426 static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
427 {
428 	const u8 *new_mac = ((u8 *)(&msgbuf[1]));
429 	int ret;
430 
431 	if (!is_valid_ether_addr(new_mac)) {
432 		wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
433 		return -EINVAL;
434 	}
435 
436 	if (wx->vfinfo[vf].pf_set_mac &&
437 	    memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
438 		wx_err(wx,
439 		       "VF %d attempt to set a MAC but it already had a MAC.",
440 		       vf);
441 		return -EBUSY;
442 	}
443 
444 	ret = wx_set_vf_mac(wx, vf, new_mac);
445 	if (ret < 0)
446 		return ret;
447 
448 	return 0;
449 }
450 
wx_set_vf_multicasts(struct wx * wx,u32 * msgbuf,u32 vf)451 static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)
452 {
453 	struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
454 	u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
455 		      >> WX_VT_MSGINFO_SHIFT;
456 	u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
457 	u32 vector_bit, vector_reg, mta_reg, i;
458 	u16 *hash_list = (u16 *)&msgbuf[1];
459 
460 	/* only so many hash values supported */
461 	entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
462 	vfinfo->num_vf_mc_hashes = entries;
463 
464 	for (i = 0; i < entries; i++)
465 		vfinfo->vf_mc_hashes[i] = hash_list[i];
466 
467 	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
468 		vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]);
469 		vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]);
470 		mta_reg = wx->mac.mta_shadow[vector_reg];
471 		mta_reg |= BIT(vector_bit);
472 		wx->mac.mta_shadow[vector_reg] = mta_reg;
473 		wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
474 	}
475 	vmolr |= WX_PSR_VM_L2CTL_ROMPE;
476 	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
477 }
478 
wx_set_vf_lpe(struct wx * wx,u32 max_frame,u32 vf)479 static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
480 {
481 	u32 index, vf_bit, vfre;
482 	u32 max_frs, reg_val;
483 
484 	/* determine VF receive enable location */
485 	index = WX_VF_REG_OFFSET(vf);
486 	vf_bit = WX_VF_IND_SHIFT(vf);
487 
488 	vfre = rd32(wx, WX_RDM_VF_RE(index));
489 	vfre |= BIT(vf_bit);
490 	wr32(wx, WX_RDM_VF_RE(index), vfre);
491 
492 	/* pull current max frame size from hardware */
493 	max_frs = DIV_ROUND_UP(max_frame, 1024);
494 	reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
495 	if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
496 		wr32(wx, WX_MAC_WDG_TIMEOUT,
497 		     max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
498 }
499 
wx_find_vlvf_entry(struct wx * wx,u32 vlan)500 static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
501 {
502 	int regindex;
503 	u32 vlvf;
504 
505 	/* short cut the special case */
506 	if (vlan == 0)
507 		return 0;
508 
509 	/* Search for the vlan id in the VLVF entries */
510 	for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
511 		wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
512 		vlvf = rd32(wx, WX_PSR_VLAN_SWC);
513 		if ((vlvf & VLAN_VID_MASK) == vlan)
514 			break;
515 	}
516 
517 	/* Return a negative value if not found */
518 	if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
519 		regindex = -EINVAL;
520 
521 	return regindex;
522 }
523 
wx_set_vf_macvlan(struct wx * wx,u16 vf,int index,unsigned char * mac_addr)524 static int wx_set_vf_macvlan(struct wx *wx,
525 			     u16 vf, int index, unsigned char *mac_addr)
526 {
527 	struct vf_macvlans *entry;
528 	struct list_head *pos;
529 	int retval = 0;
530 
531 	if (index <= 1) {
532 		list_for_each(pos, &wx->vf_mvs.mvlist) {
533 			entry = list_entry(pos, struct vf_macvlans, mvlist);
534 			if (entry->vf == vf) {
535 				entry->vf = -1;
536 				entry->free = true;
537 				entry->is_macvlan = false;
538 				wx_del_mac_filter(wx, entry->vf_macvlan, vf);
539 			}
540 		}
541 	}
542 
543 	if (!index)
544 		return 0;
545 
546 	entry = NULL;
547 	list_for_each(pos, &wx->vf_mvs.mvlist) {
548 		entry = list_entry(pos, struct vf_macvlans, mvlist);
549 		if (entry->free)
550 			break;
551 	}
552 
553 	if (!entry || !entry->free)
554 		return -ENOSPC;
555 
556 	retval = wx_add_mac_filter(wx, mac_addr, vf);
557 	if (retval >= 0) {
558 		entry->free = false;
559 		entry->is_macvlan = true;
560 		entry->vf = vf;
561 		memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
562 	}
563 
564 	return retval;
565 }
566 
wx_set_vf_vlan_msg(struct wx * wx,u32 * msgbuf,u16 vf)567 static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
568 {
569 	int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
570 	int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
571 	int ret;
572 
573 	if (add)
574 		wx->vfinfo[vf].vlan_count++;
575 	else if (wx->vfinfo[vf].vlan_count)
576 		wx->vfinfo[vf].vlan_count--;
577 
578 	/* in case of promiscuous mode any VLAN filter set for a VF must
579 	 * also have the PF pool added to it.
580 	 */
581 	if (add && wx->netdev->flags & IFF_PROMISC)
582 		wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
583 
584 	ret = wx_set_vf_vlan(wx, add, vid, vf);
585 	if (!ret && wx->vfinfo[vf].spoofchk_enabled)
586 		wx_set_vlan_anti_spoofing(wx, true, vf);
587 
588 	/* Go through all the checks to see if the VLAN filter should
589 	 * be wiped completely.
590 	 */
591 	if (!add && wx->netdev->flags & IFF_PROMISC) {
592 		u32 bits = 0, vlvf;
593 		int reg_ndx;
594 
595 		reg_ndx = wx_find_vlvf_entry(wx, vid);
596 		if (reg_ndx < 0)
597 			return -ENOSPC;
598 		wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
599 		vlvf = rd32(wx, WX_PSR_VLAN_SWC);
600 		/* See if any other pools are set for this VLAN filter
601 		 * entry other than the PF.
602 		 */
603 		if (VMDQ_P(0) < 32) {
604 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
605 			bits &= ~BIT(VMDQ_P(0));
606 			if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
607 				bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
608 		} else {
609 			if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
610 				bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
611 			bits &= ~BIT(VMDQ_P(0) % 32);
612 			bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
613 		}
614 		/* If the filter was removed then ensure PF pool bit
615 		 * is cleared if the PF only added itself to the pool
616 		 * because the PF is in promiscuous mode.
617 		 */
618 		if ((vlvf & VLAN_VID_MASK) == vid && !bits)
619 			wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
620 	}
621 
622 	return 0;
623 }
624 
wx_set_vf_macvlan_msg(struct wx * wx,u32 * msgbuf,u16 vf)625 static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
626 {
627 	int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
628 		    WX_VT_MSGINFO_SHIFT;
629 	u8 *new_mac = ((u8 *)(&msgbuf[1]));
630 	int err;
631 
632 	if (wx->vfinfo[vf].pf_set_mac && index > 0) {
633 		wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf);
634 		return -EINVAL;
635 	}
636 
637 	/* An non-zero index indicates the VF is setting a filter */
638 	if (index) {
639 		if (!is_valid_ether_addr(new_mac)) {
640 			wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
641 			return -EINVAL;
642 		}
643 		/* If the VF is allowed to set MAC filters then turn off
644 		 * anti-spoofing to avoid false positives.
645 		 */
646 		if (wx->vfinfo[vf].spoofchk_enabled)
647 			wx_set_vf_spoofchk(wx->netdev, vf, false);
648 	}
649 
650 	err = wx_set_vf_macvlan(wx, vf, index, new_mac);
651 	if (err == -ENOSPC)
652 		wx_err(wx,
653 		       "VF %d request MACVLAN filter but there is no space\n",
654 		       vf);
655 	if (err < 0)
656 		return err;
657 
658 	return 0;
659 }
660 
wx_negotiate_vf_api(struct wx * wx,u32 * msgbuf,u32 vf)661 static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
662 {
663 	int api = msgbuf[1];
664 
665 	switch (api) {
666 	case wx_mbox_api_13:
667 		wx->vfinfo[vf].vf_api = api;
668 		return 0;
669 	default:
670 		wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
671 		return -EINVAL;
672 	}
673 }
674 
wx_get_vf_link_state(struct wx * wx,u32 * msgbuf,u32 vf)675 static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
676 {
677 	msgbuf[1] = wx->vfinfo[vf].link_enable;
678 
679 	return 0;
680 }
681 
wx_get_fw_version(struct wx * wx,u32 * msgbuf,u32 vf)682 static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
683 {
684 	unsigned long fw_version = 0ULL;
685 	int ret = 0;
686 
687 	ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
688 	if (ret)
689 		return -EOPNOTSUPP;
690 	msgbuf[1] = fw_version;
691 
692 	return 0;
693 }
694 
wx_update_vf_xcast_mode(struct wx * wx,u32 * msgbuf,u32 vf)695 static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
696 {
697 	int xcast_mode = msgbuf[1];
698 	u32 vmolr, disable, enable;
699 
700 	if (wx->vfinfo[vf].xcast_mode == xcast_mode)
701 		return 0;
702 
703 	switch (xcast_mode) {
704 	case WXVF_XCAST_MODE_NONE:
705 		disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
706 			  WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
707 			  WX_PSR_VM_L2CTL_VPE;
708 		enable = 0;
709 		break;
710 	case WXVF_XCAST_MODE_MULTI:
711 		disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
712 			  WX_PSR_VM_L2CTL_VPE;
713 		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
714 		break;
715 	case WXVF_XCAST_MODE_ALLMULTI:
716 		disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
717 		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
718 			 WX_PSR_VM_L2CTL_MPE;
719 		break;
720 	case WXVF_XCAST_MODE_PROMISC:
721 		disable = 0;
722 		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
723 			 WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
724 			 WX_PSR_VM_L2CTL_VPE;
725 		break;
726 	default:
727 		return -EOPNOTSUPP;
728 	}
729 
730 	vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
731 	vmolr &= ~disable;
732 	vmolr |= enable;
733 	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
734 
735 	wx->vfinfo[vf].xcast_mode = xcast_mode;
736 	msgbuf[1] = xcast_mode;
737 
738 	return 0;
739 }
740 
wx_rcv_msg_from_vf(struct wx * wx,u16 vf)741 static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf)
742 {
743 	u16 mbx_size = WX_VXMAILBOX_SIZE;
744 	u32 msgbuf[WX_VXMAILBOX_SIZE];
745 	int retval;
746 
747 	retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf);
748 	if (retval) {
749 		wx_err(wx, "Error receiving message from VF\n");
750 		return;
751 	}
752 
753 	/* this is a message we already processed, do nothing */
754 	if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK))
755 		return;
756 
757 	if (msgbuf[0] == WX_VF_RESET) {
758 		wx_vf_reset_msg(wx, vf);
759 		return;
760 	}
761 
762 	/* until the vf completes a virtual function reset it should not be
763 	 * allowed to start any configuration.
764 	 */
765 	if (!wx->vfinfo[vf].clear_to_send) {
766 		msgbuf[0] |= WX_VT_MSGTYPE_NACK;
767 		wx_write_mbx_pf(wx, msgbuf, 1, vf);
768 		return;
769 	}
770 
771 	switch ((msgbuf[0] & U16_MAX)) {
772 	case WX_VF_SET_MAC_ADDR:
773 		retval = wx_set_vf_mac_addr(wx, msgbuf, vf);
774 		break;
775 	case WX_VF_SET_MULTICAST:
776 		wx_set_vf_multicasts(wx, msgbuf, vf);
777 		retval = 0;
778 		break;
779 	case WX_VF_SET_VLAN:
780 		retval = wx_set_vf_vlan_msg(wx, msgbuf, vf);
781 		break;
782 	case WX_VF_SET_LPE:
783 		wx_set_vf_lpe(wx, msgbuf[1], vf);
784 		retval = 0;
785 		break;
786 	case WX_VF_SET_MACVLAN:
787 		retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf);
788 		break;
789 	case WX_VF_API_NEGOTIATE:
790 		retval = wx_negotiate_vf_api(wx, msgbuf, vf);
791 		break;
792 	case WX_VF_GET_QUEUES:
793 		retval = wx_get_vf_queues(wx, msgbuf, vf);
794 		break;
795 	case WX_VF_GET_LINK_STATE:
796 		retval = wx_get_vf_link_state(wx, msgbuf, vf);
797 		break;
798 	case WX_VF_GET_FW_VERSION:
799 		retval = wx_get_fw_version(wx, msgbuf, vf);
800 		break;
801 	case WX_VF_UPDATE_XCAST_MODE:
802 		retval = wx_update_vf_xcast_mode(wx, msgbuf, vf);
803 		break;
804 	case WX_VF_BACKUP:
805 		break;
806 	default:
807 		wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]);
808 		break;
809 	}
810 
811 	/* notify the VF of the results of what it sent us */
812 	if (retval)
813 		msgbuf[0] |= WX_VT_MSGTYPE_NACK;
814 	else
815 		msgbuf[0] |= WX_VT_MSGTYPE_ACK;
816 
817 	msgbuf[0] |= WX_VT_MSGTYPE_CTS;
818 
819 	wx_write_mbx_pf(wx, msgbuf, mbx_size, vf);
820 }
821 
wx_rcv_ack_from_vf(struct wx * wx,u16 vf)822 static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf)
823 {
824 	u32 msg = WX_VT_MSGTYPE_NACK;
825 
826 	/* if device isn't clear to send it shouldn't be reading either */
827 	if (!wx->vfinfo[vf].clear_to_send)
828 		wx_write_mbx_pf(wx, &msg, 1, vf);
829 }
830 
wx_msg_task(struct wx * wx)831 void wx_msg_task(struct wx *wx)
832 {
833 	u16 vf;
834 
835 	for (vf = 0; vf < wx->num_vfs; vf++) {
836 		/* process any reset requests */
837 		if (!wx_check_for_rst_pf(wx, vf))
838 			wx_vf_reset_event(wx, vf);
839 
840 		/* process any messages pending */
841 		if (!wx_check_for_msg_pf(wx, vf))
842 			wx_rcv_msg_from_vf(wx, vf);
843 
844 		/* process any acks */
845 		if (!wx_check_for_ack_pf(wx, vf))
846 			wx_rcv_ack_from_vf(wx, vf);
847 	}
848 }
849 EXPORT_SYMBOL(wx_msg_task);
850 
wx_disable_vf_rx_tx(struct wx * wx)851 void wx_disable_vf_rx_tx(struct wx *wx)
852 {
853 	wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX);
854 	wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX);
855 	if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
856 		wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX);
857 		wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX);
858 	}
859 }
860 EXPORT_SYMBOL(wx_disable_vf_rx_tx);
861 
wx_ping_all_vfs_with_link_status(struct wx * wx,bool link_up)862 void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
863 {
864 	u32 msgbuf[2] = {0, 0};
865 	u16 i;
866 
867 	if (!wx->num_vfs)
868 		return;
869 	msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG;
870 	if (link_up)
871 		msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up;
872 	if (wx->notify_down)
873 		msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING;
874 	for (i = 0; i < wx->num_vfs; i++) {
875 		if (wx->vfinfo[i].clear_to_send)
876 			msgbuf[0] |= WX_VT_MSGTYPE_CTS;
877 		wx_write_mbx_pf(wx, msgbuf, 2, i);
878 	}
879 }
880 EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
881 
wx_set_vf_link_state(struct wx * wx,int vf,int state)882 static void wx_set_vf_link_state(struct wx *wx, int vf, int state)
883 {
884 	wx->vfinfo[vf].link_state = state;
885 	switch (state) {
886 	case IFLA_VF_LINK_STATE_AUTO:
887 		if (netif_running(wx->netdev))
888 			wx->vfinfo[vf].link_enable = true;
889 		else
890 			wx->vfinfo[vf].link_enable = false;
891 		break;
892 	case IFLA_VF_LINK_STATE_ENABLE:
893 		wx->vfinfo[vf].link_enable = true;
894 		break;
895 	case IFLA_VF_LINK_STATE_DISABLE:
896 		wx->vfinfo[vf].link_enable = false;
897 		break;
898 	}
899 	/* restart the VF */
900 	wx->vfinfo[vf].clear_to_send = false;
901 	wx_ping_vf(wx, vf);
902 
903 	wx_set_vf_rx_tx(wx, vf);
904 }
905 
wx_set_all_vfs(struct wx * wx)906 void wx_set_all_vfs(struct wx *wx)
907 {
908 	int i;
909 
910 	for (i = 0; i < wx->num_vfs; i++)
911 		wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state);
912 }
913 EXPORT_SYMBOL(wx_set_all_vfs);
914