xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_sriov.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/pci.h>
6 
7 #include "wx_type.h"
8 #include "wx_hw.h"
9 #include "wx_mbx.h"
10 #include "wx_sriov.h"
11 
12 static void wx_vf_configuration(struct pci_dev *pdev, int event_mask)
13 {
14 	bool enable = !!WX_VF_ENABLE_CHECK(event_mask);
15 	struct wx *wx = pci_get_drvdata(pdev);
16 	u32 vfn = WX_VF_NUM_GET(event_mask);
17 
18 	if (enable)
19 		eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr);
20 }
21 
22 static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs)
23 {
24 	struct vf_macvlans *mv_list;
25 	int num_vf_macvlans, i;
26 
27 	/* Initialize list of VF macvlans */
28 	INIT_LIST_HEAD(&wx->vf_mvs.mvlist);
29 
30 	num_vf_macvlans = wx->mac.num_rar_entries -
31 			  (WX_MAX_PF_MACVLANS + 1 + num_vfs);
32 	if (!num_vf_macvlans)
33 		return -EINVAL;
34 
35 	mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
36 			  GFP_KERNEL);
37 	if (!mv_list)
38 		return -ENOMEM;
39 
40 	for (i = 0; i < num_vf_macvlans; i++) {
41 		mv_list[i].vf = -1;
42 		mv_list[i].free = true;
43 		list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist);
44 	}
45 	wx->mv_list = mv_list;
46 
47 	return 0;
48 }
49 
50 static void wx_sriov_clear_data(struct wx *wx)
51 {
52 	/* set num VFs to 0 to prevent access to vfinfo */
53 	wx->num_vfs = 0;
54 
55 	/* free VF control structures */
56 	kfree(wx->vfinfo);
57 	wx->vfinfo = NULL;
58 
59 	/* free macvlan list */
60 	kfree(wx->mv_list);
61 	wx->mv_list = NULL;
62 
63 	/* set default pool back to 0 */
64 	wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
65 	wx->ring_feature[RING_F_VMDQ].offset = 0;
66 
67 	clear_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
68 	clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
69 	/* Disable VMDq flag so device will be set in NM mode */
70 	if (wx->ring_feature[RING_F_VMDQ].limit == 1)
71 		clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
72 }
73 
74 static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
75 {
76 	int i, ret = 0;
77 	u32 value = 0;
78 
79 	set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
80 	dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
81 
82 	if (num_vfs == 7 && wx->mac.type == wx_mac_em)
83 		set_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
84 
85 	/* Enable VMDq flag so device will be set in VM mode */
86 	set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
87 	if (!wx->ring_feature[RING_F_VMDQ].limit)
88 		wx->ring_feature[RING_F_VMDQ].limit = 1;
89 	wx->ring_feature[RING_F_VMDQ].offset = num_vfs;
90 
91 	wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
92 			     GFP_KERNEL);
93 	if (!wx->vfinfo)
94 		return -ENOMEM;
95 
96 	ret = wx_alloc_vf_macvlans(wx, num_vfs);
97 	if (ret)
98 		return ret;
99 
100 	/* Initialize default switching mode VEB */
101 	wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN);
102 
103 	for (i = 0; i < num_vfs; i++) {
104 		/* enable spoof checking for all VFs */
105 		wx->vfinfo[i].spoofchk_enabled = true;
106 		wx->vfinfo[i].link_enable = true;
107 		/* untrust all VFs */
108 		wx->vfinfo[i].trusted = false;
109 		/* set the default xcast mode */
110 		wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
111 	}
112 
113 	if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
114 		value = WX_CFG_PORT_CTL_NUM_VT_8;
115 	} else {
116 		if (num_vfs < 32)
117 			value = WX_CFG_PORT_CTL_NUM_VT_32;
118 		else
119 			value = WX_CFG_PORT_CTL_NUM_VT_64;
120 	}
121 	wr32m(wx, WX_CFG_PORT_CTL,
122 	      WX_CFG_PORT_CTL_NUM_VT_MASK,
123 	      value);
124 
125 	return ret;
126 }
127 
128 static void wx_sriov_reinit(struct wx *wx)
129 {
130 	rtnl_lock();
131 	wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev));
132 	rtnl_unlock();
133 }
134 
135 void wx_disable_sriov(struct wx *wx)
136 {
137 	if (!pci_vfs_assigned(wx->pdev))
138 		pci_disable_sriov(wx->pdev);
139 	else
140 		wx_err(wx, "Unloading driver while VFs are assigned.\n");
141 
142 	/* clear flags and free allloced data */
143 	wx_sriov_clear_data(wx);
144 }
145 EXPORT_SYMBOL(wx_disable_sriov);
146 
147 static int wx_pci_sriov_enable(struct pci_dev *dev,
148 			       int num_vfs)
149 {
150 	struct wx *wx = pci_get_drvdata(dev);
151 	int err = 0, i;
152 
153 	if (netif_is_rxfh_configured(wx->netdev)) {
154 		wx_err(wx, "Cannot enable SR-IOV while RXFH is configured\n");
155 		wx_err(wx, "Run 'ethtool -X <if> default' to reset RSS table\n");
156 		return -EBUSY;
157 	}
158 
159 	err = __wx_enable_sriov(wx, num_vfs);
160 	if (err)
161 		return err;
162 
163 	wx->num_vfs = num_vfs;
164 	for (i = 0; i < wx->num_vfs; i++)
165 		wx_vf_configuration(dev, (i | WX_VF_ENABLE));
166 
167 	/* reset before enabling SRIOV to avoid mailbox issues */
168 	wx_sriov_reinit(wx);
169 
170 	err = pci_enable_sriov(dev, num_vfs);
171 	if (err) {
172 		wx_err(wx, "Failed to enable PCI sriov: %d\n", err);
173 		goto err_out;
174 	}
175 
176 	return num_vfs;
177 err_out:
178 	wx_sriov_clear_data(wx);
179 	return err;
180 }
181 
182 static int wx_pci_sriov_disable(struct pci_dev *dev)
183 {
184 	struct wx *wx = pci_get_drvdata(dev);
185 
186 	if (netif_is_rxfh_configured(wx->netdev)) {
187 		wx_err(wx, "Cannot disable SR-IOV while RXFH is configured\n");
188 		wx_err(wx, "Run 'ethtool -X <if> default' to reset RSS table\n");
189 		return -EBUSY;
190 	}
191 
192 	wx_disable_sriov(wx);
193 	wx_sriov_reinit(wx);
194 
195 	return 0;
196 }
197 
198 int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
199 {
200 	struct wx *wx = pci_get_drvdata(pdev);
201 	int err;
202 
203 	if (!num_vfs) {
204 		if (!pci_vfs_assigned(pdev))
205 			return wx_pci_sriov_disable(pdev);
206 
207 		wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
208 		return -EBUSY;
209 	}
210 
211 	err = wx_pci_sriov_enable(pdev, num_vfs);
212 	if (err)
213 		return err;
214 
215 	return num_vfs;
216 }
217 EXPORT_SYMBOL(wx_pci_sriov_configure);
218 
219 static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr)
220 {
221 	u8 hw_addr[ETH_ALEN];
222 	int ret = 0;
223 
224 	ether_addr_copy(hw_addr, mac_addr);
225 	wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
226 	ret = wx_add_mac_filter(wx, hw_addr, vf);
227 	if (ret >= 0)
228 		ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr);
229 	else
230 		eth_zero_addr(wx->vfinfo[vf].vf_mac_addr);
231 
232 	return ret;
233 }
234 
235 static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe)
236 {
237 	u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
238 
239 	vmolr |=  WX_PSR_VM_L2CTL_BAM;
240 	if (aupe)
241 		vmolr |= WX_PSR_VM_L2CTL_AUPE;
242 	else
243 		vmolr &= ~WX_PSR_VM_L2CTL_AUPE;
244 	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
245 }
246 
247 static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf)
248 {
249 	u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) |
250 		    WX_TDM_VLAN_INS_VLANA_DEFAULT;
251 
252 	wr32(wx, WX_TDM_VLAN_INS(vf), vmvir);
253 }
254 
255 static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf)
256 {
257 	if (!vid && !add)
258 		return 0;
259 
260 	return wx_set_vfta(wx, vid, vf, (bool)add);
261 }
262 
263 static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf)
264 {
265 	u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
266 	u32 pfvfspoof;
267 
268 	pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index));
269 	if (enable)
270 		pfvfspoof |= BIT(vf_bit);
271 	else
272 		pfvfspoof &= ~BIT(vf_bit);
273 	wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof);
274 }
275 
276 static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
277 {
278 	struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
279 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
280 	u32 reg = 0, n = vf * q_per_pool / 32;
281 	u32 i = vf * q_per_pool;
282 
283 	reg = rd32(wx, WX_RDM_PF_QDE(n));
284 	for (i = (vf * q_per_pool - n * 32);
285 	     i < ((vf + 1) * q_per_pool - n * 32);
286 	     i++) {
287 		if (qde == 1)
288 			reg |= qde << i;
289 		else
290 			reg &= qde << i;
291 	}
292 
293 	wr32(wx, WX_RDM_PF_QDE(n), reg);
294 }
295 
296 static void wx_clear_vmvir(struct wx *wx, u32 vf)
297 {
298 	wr32(wx, WX_TDM_VLAN_INS(vf), 0);
299 }
300 
301 static void wx_ping_vf(struct wx *wx, int vf)
302 {
303 	u32 ping = WX_PF_CONTROL_MSG;
304 
305 	if (wx->vfinfo[vf].clear_to_send)
306 		ping |= WX_VT_MSGTYPE_CTS;
307 	wx_write_mbx_pf(wx, &ping, 1, vf);
308 }
309 
310 static void wx_set_vf_rx_tx(struct wx *wx, int vf)
311 {
312 	u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
313 	u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
314 
315 	reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
316 	reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
317 
318 	if (wx->vfinfo[vf].link_enable) {
319 		reg_req_tx = reg_cur_tx | BIT(vf_bit);
320 		reg_req_rx = reg_cur_rx | BIT(vf_bit);
321 		/* Enable particular VF */
322 		if (reg_cur_tx != reg_req_tx)
323 			wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
324 		if (reg_cur_rx != reg_req_rx)
325 			wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
326 	} else {
327 		reg_req_tx = BIT(vf_bit);
328 		reg_req_rx = BIT(vf_bit);
329 		/* Disable particular VF */
330 		if (reg_cur_tx & reg_req_tx)
331 			wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
332 		if (reg_cur_rx & reg_req_rx)
333 			wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
334 	}
335 }
336 
337 static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
338 {
339 	struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
340 	unsigned int default_tc = 0;
341 
342 	msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
343 	msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
344 
345 	if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
346 		msgbuf[WX_VF_TRANS_VLAN] = 1;
347 	else
348 		msgbuf[WX_VF_TRANS_VLAN] = 0;
349 
350 	/* notify VF of default queue */
351 	msgbuf[WX_VF_DEF_QUEUE] = default_tc;
352 
353 	return 0;
354 }
355 
356 static void wx_vf_reset_event(struct wx *wx, u16 vf)
357 {
358 	struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
359 	u8 num_tcs = netdev_get_num_tc(wx->netdev);
360 
361 	/* add PF assigned VLAN */
362 	wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
363 
364 	/* reset offloads to defaults */
365 	wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
366 
367 	/* set outgoing tags for VFs */
368 	if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
369 		wx_clear_vmvir(wx, vf);
370 	} else {
371 		if (vfinfo->pf_qos || !num_tcs)
372 			wx_set_vmvir(wx, vfinfo->pf_vlan,
373 				     vfinfo->pf_qos, vf);
374 		else
375 			wx_set_vmvir(wx, vfinfo->pf_vlan,
376 				     wx->default_up, vf);
377 	}
378 
379 	/* reset multicast table array for vf */
380 	wx->vfinfo[vf].num_vf_mc_hashes = 0;
381 
382 	/* Flush and reset the mta with the new values */
383 	wx_set_rx_mode(wx->netdev);
384 
385 	wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
386 	/* reset VF api back to unknown */
387 	wx->vfinfo[vf].vf_api = wx_mbox_api_null;
388 }
389 
390 static void wx_vf_reset_msg(struct wx *wx, u16 vf)
391 {
392 	const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr;
393 	struct net_device *dev = wx->netdev;
394 	u32 msgbuf[5] = {0, 0, 0, 0, 0};
395 	u8 *addr = (u8 *)(&msgbuf[1]);
396 	u32 reg = 0, index, vf_bit;
397 	int pf_max_frame;
398 
399 	/* reset the filters for the device */
400 	wx_vf_reset_event(wx, vf);
401 
402 	/* set vf mac address */
403 	if (!is_zero_ether_addr(vf_mac))
404 		wx_set_vf_mac(wx, vf, vf_mac);
405 
406 	index = WX_VF_REG_OFFSET(vf);
407 	vf_bit = WX_VF_IND_SHIFT(vf);
408 
409 	/* force drop enable for all VF Rx queues */
410 	wx_write_qde(wx, vf, 1);
411 
412 	/* set transmit and receive for vf */
413 	wx_set_vf_rx_tx(wx, vf);
414 
415 	pf_max_frame = dev->mtu + ETH_HLEN;
416 
417 	if (pf_max_frame > ETH_FRAME_LEN)
418 		reg = BIT(vf_bit);
419 	wr32(wx, WX_RDM_VFRE_CLR(index), reg);
420 
421 	/* enable VF mailbox for further messages */
422 	wx->vfinfo[vf].clear_to_send = true;
423 
424 	/* reply to reset with ack and vf mac address */
425 	msgbuf[0] = WX_VF_RESET;
426 	if (!is_zero_ether_addr(vf_mac)) {
427 		msgbuf[0] |= WX_VT_MSGTYPE_ACK;
428 		memcpy(addr, vf_mac, ETH_ALEN);
429 	} else {
430 		msgbuf[0] |= WX_VT_MSGTYPE_NACK;
431 		wx_err(wx, "VF %d has no MAC address assigned", vf);
432 	}
433 
434 	msgbuf[3] = wx->mac.mc_filter_type;
435 	wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
436 }
437 
438 static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
439 {
440 	const u8 *new_mac = ((u8 *)(&msgbuf[1]));
441 	int ret;
442 
443 	if (!is_valid_ether_addr(new_mac)) {
444 		wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
445 		return -EINVAL;
446 	}
447 
448 	if (wx->vfinfo[vf].pf_set_mac &&
449 	    memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
450 		wx_err(wx,
451 		       "VF %d attempt to set a MAC but it already had a MAC.",
452 		       vf);
453 		return -EBUSY;
454 	}
455 
456 	ret = wx_set_vf_mac(wx, vf, new_mac);
457 	if (ret < 0)
458 		return ret;
459 
460 	return 0;
461 }
462 
463 static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)
464 {
465 	struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
466 	u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
467 		      >> WX_VT_MSGINFO_SHIFT;
468 	u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
469 	u32 vector_bit, vector_reg, mta_reg, i;
470 	u16 *hash_list = (u16 *)&msgbuf[1];
471 
472 	/* only so many hash values supported */
473 	entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
474 	vfinfo->num_vf_mc_hashes = entries;
475 
476 	for (i = 0; i < entries; i++)
477 		vfinfo->vf_mc_hashes[i] = hash_list[i];
478 
479 	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
480 		vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]);
481 		vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]);
482 		mta_reg = wx->mac.mta_shadow[vector_reg];
483 		mta_reg |= BIT(vector_bit);
484 		wx->mac.mta_shadow[vector_reg] = mta_reg;
485 		wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
486 	}
487 	vmolr |= WX_PSR_VM_L2CTL_ROMPE;
488 	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
489 }
490 
491 static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
492 {
493 	u32 index, vf_bit, vfre;
494 	u32 max_frs, reg_val;
495 
496 	/* determine VF receive enable location */
497 	index = WX_VF_REG_OFFSET(vf);
498 	vf_bit = WX_VF_IND_SHIFT(vf);
499 
500 	vfre = rd32(wx, WX_RDM_VF_RE(index));
501 	vfre |= BIT(vf_bit);
502 	wr32(wx, WX_RDM_VF_RE(index), vfre);
503 
504 	/* pull current max frame size from hardware */
505 	max_frs = DIV_ROUND_UP(max_frame, 1024);
506 	reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
507 	if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
508 		wr32(wx, WX_MAC_WDG_TIMEOUT,
509 		     max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
510 }
511 
512 static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
513 {
514 	int regindex;
515 	u32 vlvf;
516 
517 	/* short cut the special case */
518 	if (vlan == 0)
519 		return 0;
520 
521 	/* Search for the vlan id in the VLVF entries */
522 	for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
523 		wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
524 		vlvf = rd32(wx, WX_PSR_VLAN_SWC);
525 		if ((vlvf & VLAN_VID_MASK) == vlan)
526 			break;
527 	}
528 
529 	/* Return a negative value if not found */
530 	if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
531 		regindex = -EINVAL;
532 
533 	return regindex;
534 }
535 
536 static int wx_set_vf_macvlan(struct wx *wx,
537 			     u16 vf, int index, unsigned char *mac_addr)
538 {
539 	struct vf_macvlans *entry;
540 	struct list_head *pos;
541 	int retval = 0;
542 
543 	if (index <= 1) {
544 		list_for_each(pos, &wx->vf_mvs.mvlist) {
545 			entry = list_entry(pos, struct vf_macvlans, mvlist);
546 			if (entry->vf == vf) {
547 				entry->vf = -1;
548 				entry->free = true;
549 				entry->is_macvlan = false;
550 				wx_del_mac_filter(wx, entry->vf_macvlan, vf);
551 			}
552 		}
553 	}
554 
555 	if (!index)
556 		return 0;
557 
558 	entry = NULL;
559 	list_for_each(pos, &wx->vf_mvs.mvlist) {
560 		entry = list_entry(pos, struct vf_macvlans, mvlist);
561 		if (entry->free)
562 			break;
563 	}
564 
565 	if (!entry || !entry->free)
566 		return -ENOSPC;
567 
568 	retval = wx_add_mac_filter(wx, mac_addr, vf);
569 	if (retval >= 0) {
570 		entry->free = false;
571 		entry->is_macvlan = true;
572 		entry->vf = vf;
573 		memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
574 	}
575 
576 	return retval;
577 }
578 
579 static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
580 {
581 	int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
582 	int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
583 	int ret;
584 
585 	if (add)
586 		wx->vfinfo[vf].vlan_count++;
587 	else if (wx->vfinfo[vf].vlan_count)
588 		wx->vfinfo[vf].vlan_count--;
589 
590 	/* in case of promiscuous mode any VLAN filter set for a VF must
591 	 * also have the PF pool added to it.
592 	 */
593 	if (add && wx->netdev->flags & IFF_PROMISC)
594 		wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
595 
596 	ret = wx_set_vf_vlan(wx, add, vid, vf);
597 	if (!ret && wx->vfinfo[vf].spoofchk_enabled)
598 		wx_set_vlan_anti_spoofing(wx, true, vf);
599 
600 	/* Go through all the checks to see if the VLAN filter should
601 	 * be wiped completely.
602 	 */
603 	if (!add && wx->netdev->flags & IFF_PROMISC) {
604 		u32 bits = 0, vlvf;
605 		int reg_ndx;
606 
607 		reg_ndx = wx_find_vlvf_entry(wx, vid);
608 		if (reg_ndx < 0)
609 			return -ENOSPC;
610 		wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
611 		vlvf = rd32(wx, WX_PSR_VLAN_SWC);
612 		/* See if any other pools are set for this VLAN filter
613 		 * entry other than the PF.
614 		 */
615 		if (VMDQ_P(0) < 32) {
616 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
617 			bits &= ~BIT(VMDQ_P(0));
618 			if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
619 				bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
620 		} else {
621 			if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
622 				bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
623 			bits &= ~BIT(VMDQ_P(0) % 32);
624 			bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
625 		}
626 		/* If the filter was removed then ensure PF pool bit
627 		 * is cleared if the PF only added itself to the pool
628 		 * because the PF is in promiscuous mode.
629 		 */
630 		if ((vlvf & VLAN_VID_MASK) == vid && !bits)
631 			wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
632 	}
633 
634 	return 0;
635 }
636 
637 static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
638 {
639 	int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
640 		    WX_VT_MSGINFO_SHIFT;
641 	u8 *new_mac = ((u8 *)(&msgbuf[1]));
642 	int err;
643 
644 	if (wx->vfinfo[vf].pf_set_mac && index > 0) {
645 		wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf);
646 		return -EINVAL;
647 	}
648 
649 	/* An non-zero index indicates the VF is setting a filter */
650 	if (index) {
651 		if (!is_valid_ether_addr(new_mac)) {
652 			wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
653 			return -EINVAL;
654 		}
655 		/* If the VF is allowed to set MAC filters then turn off
656 		 * anti-spoofing to avoid false positives.
657 		 */
658 		if (wx->vfinfo[vf].spoofchk_enabled)
659 			wx_set_vf_spoofchk(wx->netdev, vf, false);
660 	}
661 
662 	err = wx_set_vf_macvlan(wx, vf, index, new_mac);
663 	if (err == -ENOSPC)
664 		wx_err(wx,
665 		       "VF %d request MACVLAN filter but there is no space\n",
666 		       vf);
667 	if (err < 0)
668 		return err;
669 
670 	return 0;
671 }
672 
673 static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
674 {
675 	int api = msgbuf[1];
676 
677 	switch (api) {
678 	case wx_mbox_api_13:
679 		wx->vfinfo[vf].vf_api = api;
680 		return 0;
681 	default:
682 		wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
683 		return -EINVAL;
684 	}
685 }
686 
687 static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
688 {
689 	msgbuf[1] = wx->vfinfo[vf].link_enable;
690 
691 	return 0;
692 }
693 
694 static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
695 {
696 	unsigned long fw_version = 0ULL;
697 	int ret = 0;
698 
699 	ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
700 	if (ret)
701 		return -EOPNOTSUPP;
702 	msgbuf[1] = fw_version;
703 
704 	return 0;
705 }
706 
707 static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
708 {
709 	int xcast_mode = msgbuf[1];
710 	u32 vmolr, disable, enable;
711 
712 	if (wx->vfinfo[vf].xcast_mode == xcast_mode)
713 		return 0;
714 
715 	switch (xcast_mode) {
716 	case WXVF_XCAST_MODE_NONE:
717 		disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
718 			  WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
719 			  WX_PSR_VM_L2CTL_VPE;
720 		enable = 0;
721 		break;
722 	case WXVF_XCAST_MODE_MULTI:
723 		disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
724 			  WX_PSR_VM_L2CTL_VPE;
725 		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
726 		break;
727 	case WXVF_XCAST_MODE_ALLMULTI:
728 		disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
729 		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
730 			 WX_PSR_VM_L2CTL_MPE;
731 		break;
732 	case WXVF_XCAST_MODE_PROMISC:
733 		disable = 0;
734 		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
735 			 WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
736 			 WX_PSR_VM_L2CTL_VPE;
737 		break;
738 	default:
739 		return -EOPNOTSUPP;
740 	}
741 
742 	vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
743 	vmolr &= ~disable;
744 	vmolr |= enable;
745 	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
746 
747 	wx->vfinfo[vf].xcast_mode = xcast_mode;
748 	msgbuf[1] = xcast_mode;
749 
750 	return 0;
751 }
752 
753 static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf)
754 {
755 	u16 mbx_size = WX_VXMAILBOX_SIZE;
756 	u32 msgbuf[WX_VXMAILBOX_SIZE];
757 	int retval;
758 
759 	retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf);
760 	if (retval) {
761 		wx_err(wx, "Error receiving message from VF\n");
762 		return;
763 	}
764 
765 	/* this is a message we already processed, do nothing */
766 	if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK))
767 		return;
768 
769 	if (msgbuf[0] == WX_VF_RESET) {
770 		wx_vf_reset_msg(wx, vf);
771 		return;
772 	}
773 
774 	/* until the vf completes a virtual function reset it should not be
775 	 * allowed to start any configuration.
776 	 */
777 	if (!wx->vfinfo[vf].clear_to_send) {
778 		msgbuf[0] |= WX_VT_MSGTYPE_NACK;
779 		wx_write_mbx_pf(wx, msgbuf, 1, vf);
780 		return;
781 	}
782 
783 	switch ((msgbuf[0] & U16_MAX)) {
784 	case WX_VF_SET_MAC_ADDR:
785 		retval = wx_set_vf_mac_addr(wx, msgbuf, vf);
786 		break;
787 	case WX_VF_SET_MULTICAST:
788 		wx_set_vf_multicasts(wx, msgbuf, vf);
789 		retval = 0;
790 		break;
791 	case WX_VF_SET_VLAN:
792 		retval = wx_set_vf_vlan_msg(wx, msgbuf, vf);
793 		break;
794 	case WX_VF_SET_LPE:
795 		wx_set_vf_lpe(wx, msgbuf[1], vf);
796 		retval = 0;
797 		break;
798 	case WX_VF_SET_MACVLAN:
799 		retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf);
800 		break;
801 	case WX_VF_API_NEGOTIATE:
802 		retval = wx_negotiate_vf_api(wx, msgbuf, vf);
803 		break;
804 	case WX_VF_GET_QUEUES:
805 		retval = wx_get_vf_queues(wx, msgbuf, vf);
806 		break;
807 	case WX_VF_GET_LINK_STATE:
808 		retval = wx_get_vf_link_state(wx, msgbuf, vf);
809 		break;
810 	case WX_VF_GET_FW_VERSION:
811 		retval = wx_get_fw_version(wx, msgbuf, vf);
812 		break;
813 	case WX_VF_UPDATE_XCAST_MODE:
814 		retval = wx_update_vf_xcast_mode(wx, msgbuf, vf);
815 		break;
816 	case WX_VF_BACKUP:
817 		break;
818 	default:
819 		wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]);
820 		break;
821 	}
822 
823 	/* notify the VF of the results of what it sent us */
824 	if (retval)
825 		msgbuf[0] |= WX_VT_MSGTYPE_NACK;
826 	else
827 		msgbuf[0] |= WX_VT_MSGTYPE_ACK;
828 
829 	msgbuf[0] |= WX_VT_MSGTYPE_CTS;
830 
831 	wx_write_mbx_pf(wx, msgbuf, mbx_size, vf);
832 }
833 
834 static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf)
835 {
836 	u32 msg = WX_VT_MSGTYPE_NACK;
837 
838 	/* if device isn't clear to send it shouldn't be reading either */
839 	if (!wx->vfinfo[vf].clear_to_send)
840 		wx_write_mbx_pf(wx, &msg, 1, vf);
841 }
842 
843 void wx_msg_task(struct wx *wx)
844 {
845 	u16 vf;
846 
847 	for (vf = 0; vf < wx->num_vfs; vf++) {
848 		/* process any reset requests */
849 		if (!wx_check_for_rst_pf(wx, vf))
850 			wx_vf_reset_event(wx, vf);
851 
852 		/* process any messages pending */
853 		if (!wx_check_for_msg_pf(wx, vf))
854 			wx_rcv_msg_from_vf(wx, vf);
855 
856 		/* process any acks */
857 		if (!wx_check_for_ack_pf(wx, vf))
858 			wx_rcv_ack_from_vf(wx, vf);
859 	}
860 }
861 EXPORT_SYMBOL(wx_msg_task);
862 
863 void wx_disable_vf_rx_tx(struct wx *wx)
864 {
865 	wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX);
866 	wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX);
867 	if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
868 		wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX);
869 		wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX);
870 	}
871 }
872 EXPORT_SYMBOL(wx_disable_vf_rx_tx);
873 
874 void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
875 {
876 	u32 msgbuf[2] = {0, 0};
877 	u16 i;
878 
879 	if (!wx->num_vfs)
880 		return;
881 	msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG;
882 	if (link_up)
883 		msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up;
884 	if (wx->notify_down)
885 		msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING;
886 	for (i = 0; i < wx->num_vfs; i++) {
887 		if (wx->vfinfo[i].clear_to_send)
888 			msgbuf[0] |= WX_VT_MSGTYPE_CTS;
889 		wx_write_mbx_pf(wx, msgbuf, 2, i);
890 	}
891 }
892 EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
893 
894 static void wx_set_vf_link_state(struct wx *wx, int vf, int state)
895 {
896 	wx->vfinfo[vf].link_state = state;
897 	switch (state) {
898 	case IFLA_VF_LINK_STATE_AUTO:
899 		if (netif_running(wx->netdev))
900 			wx->vfinfo[vf].link_enable = true;
901 		else
902 			wx->vfinfo[vf].link_enable = false;
903 		break;
904 	case IFLA_VF_LINK_STATE_ENABLE:
905 		wx->vfinfo[vf].link_enable = true;
906 		break;
907 	case IFLA_VF_LINK_STATE_DISABLE:
908 		wx->vfinfo[vf].link_enable = false;
909 		break;
910 	}
911 	/* restart the VF */
912 	wx->vfinfo[vf].clear_to_send = false;
913 	wx_ping_vf(wx, vf);
914 
915 	wx_set_vf_rx_tx(wx, vf);
916 }
917 
918 void wx_set_all_vfs(struct wx *wx)
919 {
920 	int i;
921 
922 	for (i = 0; i < wx->num_vfs; i++)
923 		wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state);
924 }
925 EXPORT_SYMBOL(wx_set_all_vfs);
926