xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_sriov.c (revision 876f5ebd58a9ac42f48a7ead3d5b274a314e0ace)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/pci.h>
6 
7 #include "wx_type.h"
8 #include "wx_hw.h"
9 #include "wx_mbx.h"
10 #include "wx_sriov.h"
11 
12 static void wx_vf_configuration(struct pci_dev *pdev, int event_mask)
13 {
14 	bool enable = !!WX_VF_ENABLE_CHECK(event_mask);
15 	struct wx *wx = pci_get_drvdata(pdev);
16 	u32 vfn = WX_VF_NUM_GET(event_mask);
17 
18 	if (enable)
19 		eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr);
20 }
21 
22 static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs)
23 {
24 	struct vf_macvlans *mv_list;
25 	int num_vf_macvlans, i;
26 
27 	/* Initialize list of VF macvlans */
28 	INIT_LIST_HEAD(&wx->vf_mvs.mvlist);
29 
30 	num_vf_macvlans = wx->mac.num_rar_entries -
31 			  (WX_MAX_PF_MACVLANS + 1 + num_vfs);
32 	if (!num_vf_macvlans)
33 		return -EINVAL;
34 
35 	mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
36 			  GFP_KERNEL);
37 	if (!mv_list)
38 		return -ENOMEM;
39 
40 	for (i = 0; i < num_vf_macvlans; i++) {
41 		mv_list[i].vf = -1;
42 		mv_list[i].free = true;
43 		list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist);
44 	}
45 	wx->mv_list = mv_list;
46 
47 	return 0;
48 }
49 
50 static void wx_sriov_clear_data(struct wx *wx)
51 {
52 	/* set num VFs to 0 to prevent access to vfinfo */
53 	wx->num_vfs = 0;
54 
55 	/* free VF control structures */
56 	kfree(wx->vfinfo);
57 	wx->vfinfo = NULL;
58 
59 	/* free macvlan list */
60 	kfree(wx->mv_list);
61 	wx->mv_list = NULL;
62 
63 	/* set default pool back to 0 */
64 	wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
65 	wx->ring_feature[RING_F_VMDQ].offset = 0;
66 
67 	clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
68 	/* Disable VMDq flag so device will be set in NM mode */
69 	if (wx->ring_feature[RING_F_VMDQ].limit == 1)
70 		clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
71 }
72 
73 static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
74 {
75 	int i, ret = 0;
76 	u32 value = 0;
77 
78 	set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
79 	dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
80 
81 	/* Enable VMDq flag so device will be set in VM mode */
82 	set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
83 	if (!wx->ring_feature[RING_F_VMDQ].limit)
84 		wx->ring_feature[RING_F_VMDQ].limit = 1;
85 	wx->ring_feature[RING_F_VMDQ].offset = num_vfs;
86 
87 	wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
88 			     GFP_KERNEL);
89 	if (!wx->vfinfo)
90 		return -ENOMEM;
91 
92 	ret = wx_alloc_vf_macvlans(wx, num_vfs);
93 	if (ret)
94 		return ret;
95 
96 	/* Initialize default switching mode VEB */
97 	wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN);
98 
99 	for (i = 0; i < num_vfs; i++) {
100 		/* enable spoof checking for all VFs */
101 		wx->vfinfo[i].spoofchk_enabled = true;
102 		wx->vfinfo[i].link_enable = true;
103 		/* untrust all VFs */
104 		wx->vfinfo[i].trusted = false;
105 		/* set the default xcast mode */
106 		wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
107 	}
108 
109 	if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
110 		value = WX_CFG_PORT_CTL_NUM_VT_8;
111 	} else {
112 		if (num_vfs < 32)
113 			value = WX_CFG_PORT_CTL_NUM_VT_32;
114 		else
115 			value = WX_CFG_PORT_CTL_NUM_VT_64;
116 	}
117 	wr32m(wx, WX_CFG_PORT_CTL,
118 	      WX_CFG_PORT_CTL_NUM_VT_MASK,
119 	      value);
120 
121 	return ret;
122 }
123 
124 static void wx_sriov_reinit(struct wx *wx)
125 {
126 	rtnl_lock();
127 	wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev));
128 	rtnl_unlock();
129 }
130 
131 void wx_disable_sriov(struct wx *wx)
132 {
133 	if (!pci_vfs_assigned(wx->pdev))
134 		pci_disable_sriov(wx->pdev);
135 	else
136 		wx_err(wx, "Unloading driver while VFs are assigned.\n");
137 
138 	/* clear flags and free allloced data */
139 	wx_sriov_clear_data(wx);
140 }
141 EXPORT_SYMBOL(wx_disable_sriov);
142 
143 static int wx_pci_sriov_enable(struct pci_dev *dev,
144 			       int num_vfs)
145 {
146 	struct wx *wx = pci_get_drvdata(dev);
147 	int err = 0, i;
148 
149 	err = __wx_enable_sriov(wx, num_vfs);
150 	if (err)
151 		return err;
152 
153 	wx->num_vfs = num_vfs;
154 	for (i = 0; i < wx->num_vfs; i++)
155 		wx_vf_configuration(dev, (i | WX_VF_ENABLE));
156 
157 	/* reset before enabling SRIOV to avoid mailbox issues */
158 	wx_sriov_reinit(wx);
159 
160 	err = pci_enable_sriov(dev, num_vfs);
161 	if (err) {
162 		wx_err(wx, "Failed to enable PCI sriov: %d\n", err);
163 		goto err_out;
164 	}
165 
166 	return num_vfs;
167 err_out:
168 	wx_sriov_clear_data(wx);
169 	return err;
170 }
171 
172 static void wx_pci_sriov_disable(struct pci_dev *dev)
173 {
174 	struct wx *wx = pci_get_drvdata(dev);
175 
176 	wx_disable_sriov(wx);
177 	wx_sriov_reinit(wx);
178 }
179 
180 int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
181 {
182 	struct wx *wx = pci_get_drvdata(pdev);
183 	int err;
184 
185 	if (!num_vfs) {
186 		if (!pci_vfs_assigned(pdev)) {
187 			wx_pci_sriov_disable(pdev);
188 			return 0;
189 		}
190 
191 		wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
192 		return -EBUSY;
193 	}
194 
195 	err = wx_pci_sriov_enable(pdev, num_vfs);
196 	if (err)
197 		return err;
198 
199 	return num_vfs;
200 }
201 EXPORT_SYMBOL(wx_pci_sriov_configure);
202 
203 static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr)
204 {
205 	u8 hw_addr[ETH_ALEN];
206 	int ret = 0;
207 
208 	ether_addr_copy(hw_addr, mac_addr);
209 	wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
210 	ret = wx_add_mac_filter(wx, hw_addr, vf);
211 	if (ret >= 0)
212 		ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr);
213 	else
214 		eth_zero_addr(wx->vfinfo[vf].vf_mac_addr);
215 
216 	return ret;
217 }
218 
219 static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe)
220 {
221 	u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
222 
223 	vmolr |=  WX_PSR_VM_L2CTL_BAM;
224 	if (aupe)
225 		vmolr |= WX_PSR_VM_L2CTL_AUPE;
226 	else
227 		vmolr &= ~WX_PSR_VM_L2CTL_AUPE;
228 	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
229 }
230 
231 static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf)
232 {
233 	u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) |
234 		    WX_TDM_VLAN_INS_VLANA_DEFAULT;
235 
236 	wr32(wx, WX_TDM_VLAN_INS(vf), vmvir);
237 }
238 
239 static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf)
240 {
241 	if (!vid && !add)
242 		return 0;
243 
244 	return wx_set_vfta(wx, vid, vf, (bool)add);
245 }
246 
247 static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf)
248 {
249 	u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
250 	u32 pfvfspoof;
251 
252 	pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index));
253 	if (enable)
254 		pfvfspoof |= BIT(vf_bit);
255 	else
256 		pfvfspoof &= ~BIT(vf_bit);
257 	wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof);
258 }
259 
260 static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
261 {
262 	struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
263 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
264 	u32 reg = 0, n = vf * q_per_pool / 32;
265 	u32 i = vf * q_per_pool;
266 
267 	reg = rd32(wx, WX_RDM_PF_QDE(n));
268 	for (i = (vf * q_per_pool - n * 32);
269 	     i < ((vf + 1) * q_per_pool - n * 32);
270 	     i++) {
271 		if (qde == 1)
272 			reg |= qde << i;
273 		else
274 			reg &= qde << i;
275 	}
276 
277 	wr32(wx, WX_RDM_PF_QDE(n), reg);
278 }
279 
280 static void wx_clear_vmvir(struct wx *wx, u32 vf)
281 {
282 	wr32(wx, WX_TDM_VLAN_INS(vf), 0);
283 }
284 
285 static void wx_ping_vf(struct wx *wx, int vf)
286 {
287 	u32 ping = WX_PF_CONTROL_MSG;
288 
289 	if (wx->vfinfo[vf].clear_to_send)
290 		ping |= WX_VT_MSGTYPE_CTS;
291 	wx_write_mbx_pf(wx, &ping, 1, vf);
292 }
293 
294 static void wx_set_vf_rx_tx(struct wx *wx, int vf)
295 {
296 	u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
297 	u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
298 
299 	reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
300 	reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
301 
302 	if (wx->vfinfo[vf].link_enable) {
303 		reg_req_tx = reg_cur_tx | BIT(vf_bit);
304 		reg_req_rx = reg_cur_rx | BIT(vf_bit);
305 		/* Enable particular VF */
306 		if (reg_cur_tx != reg_req_tx)
307 			wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
308 		if (reg_cur_rx != reg_req_rx)
309 			wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
310 	} else {
311 		reg_req_tx = BIT(vf_bit);
312 		reg_req_rx = BIT(vf_bit);
313 		/* Disable particular VF */
314 		if (reg_cur_tx & reg_req_tx)
315 			wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
316 		if (reg_cur_rx & reg_req_rx)
317 			wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
318 	}
319 }
320 
321 static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
322 {
323 	struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
324 	unsigned int default_tc = 0;
325 
326 	msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
327 	msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
328 
329 	if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
330 		msgbuf[WX_VF_TRANS_VLAN] = 1;
331 	else
332 		msgbuf[WX_VF_TRANS_VLAN] = 0;
333 
334 	/* notify VF of default queue */
335 	msgbuf[WX_VF_DEF_QUEUE] = default_tc;
336 
337 	return 0;
338 }
339 
340 static void wx_vf_reset_event(struct wx *wx, u16 vf)
341 {
342 	struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
343 	u8 num_tcs = netdev_get_num_tc(wx->netdev);
344 
345 	/* add PF assigned VLAN */
346 	wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
347 
348 	/* reset offloads to defaults */
349 	wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
350 
351 	/* set outgoing tags for VFs */
352 	if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
353 		wx_clear_vmvir(wx, vf);
354 	} else {
355 		if (vfinfo->pf_qos || !num_tcs)
356 			wx_set_vmvir(wx, vfinfo->pf_vlan,
357 				     vfinfo->pf_qos, vf);
358 		else
359 			wx_set_vmvir(wx, vfinfo->pf_vlan,
360 				     wx->default_up, vf);
361 	}
362 
363 	/* reset multicast table array for vf */
364 	wx->vfinfo[vf].num_vf_mc_hashes = 0;
365 
366 	/* Flush and reset the mta with the new values */
367 	wx_set_rx_mode(wx->netdev);
368 
369 	wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
370 	/* reset VF api back to unknown */
371 	wx->vfinfo[vf].vf_api = wx_mbox_api_null;
372 }
373 
374 static void wx_vf_reset_msg(struct wx *wx, u16 vf)
375 {
376 	const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr;
377 	struct net_device *dev = wx->netdev;
378 	u32 msgbuf[5] = {0, 0, 0, 0, 0};
379 	u8 *addr = (u8 *)(&msgbuf[1]);
380 	u32 reg = 0, index, vf_bit;
381 	int pf_max_frame;
382 
383 	/* reset the filters for the device */
384 	wx_vf_reset_event(wx, vf);
385 
386 	/* set vf mac address */
387 	if (!is_zero_ether_addr(vf_mac))
388 		wx_set_vf_mac(wx, vf, vf_mac);
389 
390 	index = WX_VF_REG_OFFSET(vf);
391 	vf_bit = WX_VF_IND_SHIFT(vf);
392 
393 	/* force drop enable for all VF Rx queues */
394 	wx_write_qde(wx, vf, 1);
395 
396 	/* set transmit and receive for vf */
397 	wx_set_vf_rx_tx(wx, vf);
398 
399 	pf_max_frame = dev->mtu + ETH_HLEN;
400 
401 	if (pf_max_frame > ETH_FRAME_LEN)
402 		reg = BIT(vf_bit);
403 	wr32(wx, WX_RDM_VFRE_CLR(index), reg);
404 
405 	/* enable VF mailbox for further messages */
406 	wx->vfinfo[vf].clear_to_send = true;
407 
408 	/* reply to reset with ack and vf mac address */
409 	msgbuf[0] = WX_VF_RESET;
410 	if (!is_zero_ether_addr(vf_mac)) {
411 		msgbuf[0] |= WX_VT_MSGTYPE_ACK;
412 		memcpy(addr, vf_mac, ETH_ALEN);
413 	} else {
414 		msgbuf[0] |= WX_VT_MSGTYPE_NACK;
415 		wx_err(wx, "VF %d has no MAC address assigned", vf);
416 	}
417 
418 	msgbuf[3] = wx->mac.mc_filter_type;
419 	wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
420 }
421 
422 static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
423 {
424 	const u8 *new_mac = ((u8 *)(&msgbuf[1]));
425 	int ret;
426 
427 	if (!is_valid_ether_addr(new_mac)) {
428 		wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
429 		return -EINVAL;
430 	}
431 
432 	if (wx->vfinfo[vf].pf_set_mac &&
433 	    memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
434 		wx_err(wx,
435 		       "VF %d attempt to set a MAC but it already had a MAC.",
436 		       vf);
437 		return -EBUSY;
438 	}
439 
440 	ret = wx_set_vf_mac(wx, vf, new_mac);
441 	if (ret < 0)
442 		return ret;
443 
444 	return 0;
445 }
446 
447 static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)
448 {
449 	struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
450 	u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
451 		      >> WX_VT_MSGINFO_SHIFT;
452 	u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
453 	u32 vector_bit, vector_reg, mta_reg, i;
454 	u16 *hash_list = (u16 *)&msgbuf[1];
455 
456 	/* only so many hash values supported */
457 	entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
458 	vfinfo->num_vf_mc_hashes = entries;
459 
460 	for (i = 0; i < entries; i++)
461 		vfinfo->vf_mc_hashes[i] = hash_list[i];
462 
463 	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
464 		vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]);
465 		vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]);
466 		mta_reg = wx->mac.mta_shadow[vector_reg];
467 		mta_reg |= BIT(vector_bit);
468 		wx->mac.mta_shadow[vector_reg] = mta_reg;
469 		wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
470 	}
471 	vmolr |= WX_PSR_VM_L2CTL_ROMPE;
472 	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
473 }
474 
475 static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
476 {
477 	u32 index, vf_bit, vfre;
478 	u32 max_frs, reg_val;
479 
480 	/* determine VF receive enable location */
481 	index = WX_VF_REG_OFFSET(vf);
482 	vf_bit = WX_VF_IND_SHIFT(vf);
483 
484 	vfre = rd32(wx, WX_RDM_VF_RE(index));
485 	vfre |= BIT(vf_bit);
486 	wr32(wx, WX_RDM_VF_RE(index), vfre);
487 
488 	/* pull current max frame size from hardware */
489 	max_frs = DIV_ROUND_UP(max_frame, 1024);
490 	reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
491 	if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
492 		wr32(wx, WX_MAC_WDG_TIMEOUT,
493 		     max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
494 }
495 
496 static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
497 {
498 	int regindex;
499 	u32 vlvf;
500 
501 	/* short cut the special case */
502 	if (vlan == 0)
503 		return 0;
504 
505 	/* Search for the vlan id in the VLVF entries */
506 	for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
507 		wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
508 		vlvf = rd32(wx, WX_PSR_VLAN_SWC);
509 		if ((vlvf & VLAN_VID_MASK) == vlan)
510 			break;
511 	}
512 
513 	/* Return a negative value if not found */
514 	if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
515 		regindex = -EINVAL;
516 
517 	return regindex;
518 }
519 
520 static int wx_set_vf_macvlan(struct wx *wx,
521 			     u16 vf, int index, unsigned char *mac_addr)
522 {
523 	struct vf_macvlans *entry;
524 	struct list_head *pos;
525 	int retval = 0;
526 
527 	if (index <= 1) {
528 		list_for_each(pos, &wx->vf_mvs.mvlist) {
529 			entry = list_entry(pos, struct vf_macvlans, mvlist);
530 			if (entry->vf == vf) {
531 				entry->vf = -1;
532 				entry->free = true;
533 				entry->is_macvlan = false;
534 				wx_del_mac_filter(wx, entry->vf_macvlan, vf);
535 			}
536 		}
537 	}
538 
539 	if (!index)
540 		return 0;
541 
542 	entry = NULL;
543 	list_for_each(pos, &wx->vf_mvs.mvlist) {
544 		entry = list_entry(pos, struct vf_macvlans, mvlist);
545 		if (entry->free)
546 			break;
547 	}
548 
549 	if (!entry || !entry->free)
550 		return -ENOSPC;
551 
552 	retval = wx_add_mac_filter(wx, mac_addr, vf);
553 	if (retval >= 0) {
554 		entry->free = false;
555 		entry->is_macvlan = true;
556 		entry->vf = vf;
557 		memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
558 	}
559 
560 	return retval;
561 }
562 
563 static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
564 {
565 	int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
566 	int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
567 	int ret;
568 
569 	if (add)
570 		wx->vfinfo[vf].vlan_count++;
571 	else if (wx->vfinfo[vf].vlan_count)
572 		wx->vfinfo[vf].vlan_count--;
573 
574 	/* in case of promiscuous mode any VLAN filter set for a VF must
575 	 * also have the PF pool added to it.
576 	 */
577 	if (add && wx->netdev->flags & IFF_PROMISC)
578 		wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
579 
580 	ret = wx_set_vf_vlan(wx, add, vid, vf);
581 	if (!ret && wx->vfinfo[vf].spoofchk_enabled)
582 		wx_set_vlan_anti_spoofing(wx, true, vf);
583 
584 	/* Go through all the checks to see if the VLAN filter should
585 	 * be wiped completely.
586 	 */
587 	if (!add && wx->netdev->flags & IFF_PROMISC) {
588 		u32 bits = 0, vlvf;
589 		int reg_ndx;
590 
591 		reg_ndx = wx_find_vlvf_entry(wx, vid);
592 		if (reg_ndx < 0)
593 			return -ENOSPC;
594 		wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
595 		vlvf = rd32(wx, WX_PSR_VLAN_SWC);
596 		/* See if any other pools are set for this VLAN filter
597 		 * entry other than the PF.
598 		 */
599 		if (VMDQ_P(0) < 32) {
600 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
601 			bits &= ~BIT(VMDQ_P(0));
602 			if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
603 				bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
604 		} else {
605 			if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
606 				bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
607 			bits &= ~BIT(VMDQ_P(0) % 32);
608 			bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
609 		}
610 		/* If the filter was removed then ensure PF pool bit
611 		 * is cleared if the PF only added itself to the pool
612 		 * because the PF is in promiscuous mode.
613 		 */
614 		if ((vlvf & VLAN_VID_MASK) == vid && !bits)
615 			wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
616 	}
617 
618 	return 0;
619 }
620 
621 static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
622 {
623 	int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
624 		    WX_VT_MSGINFO_SHIFT;
625 	u8 *new_mac = ((u8 *)(&msgbuf[1]));
626 	int err;
627 
628 	if (wx->vfinfo[vf].pf_set_mac && index > 0) {
629 		wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf);
630 		return -EINVAL;
631 	}
632 
633 	/* An non-zero index indicates the VF is setting a filter */
634 	if (index) {
635 		if (!is_valid_ether_addr(new_mac)) {
636 			wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
637 			return -EINVAL;
638 		}
639 		/* If the VF is allowed to set MAC filters then turn off
640 		 * anti-spoofing to avoid false positives.
641 		 */
642 		if (wx->vfinfo[vf].spoofchk_enabled)
643 			wx_set_vf_spoofchk(wx->netdev, vf, false);
644 	}
645 
646 	err = wx_set_vf_macvlan(wx, vf, index, new_mac);
647 	if (err == -ENOSPC)
648 		wx_err(wx,
649 		       "VF %d request MACVLAN filter but there is no space\n",
650 		       vf);
651 	if (err < 0)
652 		return err;
653 
654 	return 0;
655 }
656 
657 static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
658 {
659 	int api = msgbuf[1];
660 
661 	switch (api) {
662 	case wx_mbox_api_13:
663 		wx->vfinfo[vf].vf_api = api;
664 		return 0;
665 	default:
666 		wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
667 		return -EINVAL;
668 	}
669 }
670 
671 static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
672 {
673 	msgbuf[1] = wx->vfinfo[vf].link_enable;
674 
675 	return 0;
676 }
677 
678 static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
679 {
680 	unsigned long fw_version = 0ULL;
681 	int ret = 0;
682 
683 	ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
684 	if (ret)
685 		return -EOPNOTSUPP;
686 	msgbuf[1] = fw_version;
687 
688 	return 0;
689 }
690 
691 static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
692 {
693 	int xcast_mode = msgbuf[1];
694 	u32 vmolr, disable, enable;
695 
696 	if (wx->vfinfo[vf].xcast_mode == xcast_mode)
697 		return 0;
698 
699 	switch (xcast_mode) {
700 	case WXVF_XCAST_MODE_NONE:
701 		disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
702 			  WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
703 			  WX_PSR_VM_L2CTL_VPE;
704 		enable = 0;
705 		break;
706 	case WXVF_XCAST_MODE_MULTI:
707 		disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
708 			  WX_PSR_VM_L2CTL_VPE;
709 		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
710 		break;
711 	case WXVF_XCAST_MODE_ALLMULTI:
712 		disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
713 		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
714 			 WX_PSR_VM_L2CTL_MPE;
715 		break;
716 	case WXVF_XCAST_MODE_PROMISC:
717 		disable = 0;
718 		enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
719 			 WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
720 			 WX_PSR_VM_L2CTL_VPE;
721 		break;
722 	default:
723 		return -EOPNOTSUPP;
724 	}
725 
726 	vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
727 	vmolr &= ~disable;
728 	vmolr |= enable;
729 	wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
730 
731 	wx->vfinfo[vf].xcast_mode = xcast_mode;
732 	msgbuf[1] = xcast_mode;
733 
734 	return 0;
735 }
736 
737 static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf)
738 {
739 	u16 mbx_size = WX_VXMAILBOX_SIZE;
740 	u32 msgbuf[WX_VXMAILBOX_SIZE];
741 	int retval;
742 
743 	retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf);
744 	if (retval) {
745 		wx_err(wx, "Error receiving message from VF\n");
746 		return;
747 	}
748 
749 	/* this is a message we already processed, do nothing */
750 	if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK))
751 		return;
752 
753 	if (msgbuf[0] == WX_VF_RESET) {
754 		wx_vf_reset_msg(wx, vf);
755 		return;
756 	}
757 
758 	/* until the vf completes a virtual function reset it should not be
759 	 * allowed to start any configuration.
760 	 */
761 	if (!wx->vfinfo[vf].clear_to_send) {
762 		msgbuf[0] |= WX_VT_MSGTYPE_NACK;
763 		wx_write_mbx_pf(wx, msgbuf, 1, vf);
764 		return;
765 	}
766 
767 	switch ((msgbuf[0] & U16_MAX)) {
768 	case WX_VF_SET_MAC_ADDR:
769 		retval = wx_set_vf_mac_addr(wx, msgbuf, vf);
770 		break;
771 	case WX_VF_SET_MULTICAST:
772 		wx_set_vf_multicasts(wx, msgbuf, vf);
773 		retval = 0;
774 		break;
775 	case WX_VF_SET_VLAN:
776 		retval = wx_set_vf_vlan_msg(wx, msgbuf, vf);
777 		break;
778 	case WX_VF_SET_LPE:
779 		wx_set_vf_lpe(wx, msgbuf[1], vf);
780 		retval = 0;
781 		break;
782 	case WX_VF_SET_MACVLAN:
783 		retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf);
784 		break;
785 	case WX_VF_API_NEGOTIATE:
786 		retval = wx_negotiate_vf_api(wx, msgbuf, vf);
787 		break;
788 	case WX_VF_GET_QUEUES:
789 		retval = wx_get_vf_queues(wx, msgbuf, vf);
790 		break;
791 	case WX_VF_GET_LINK_STATE:
792 		retval = wx_get_vf_link_state(wx, msgbuf, vf);
793 		break;
794 	case WX_VF_GET_FW_VERSION:
795 		retval = wx_get_fw_version(wx, msgbuf, vf);
796 		break;
797 	case WX_VF_UPDATE_XCAST_MODE:
798 		retval = wx_update_vf_xcast_mode(wx, msgbuf, vf);
799 		break;
800 	case WX_VF_BACKUP:
801 		break;
802 	default:
803 		wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]);
804 		break;
805 	}
806 
807 	/* notify the VF of the results of what it sent us */
808 	if (retval)
809 		msgbuf[0] |= WX_VT_MSGTYPE_NACK;
810 	else
811 		msgbuf[0] |= WX_VT_MSGTYPE_ACK;
812 
813 	msgbuf[0] |= WX_VT_MSGTYPE_CTS;
814 
815 	wx_write_mbx_pf(wx, msgbuf, mbx_size, vf);
816 }
817 
818 static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf)
819 {
820 	u32 msg = WX_VT_MSGTYPE_NACK;
821 
822 	/* if device isn't clear to send it shouldn't be reading either */
823 	if (!wx->vfinfo[vf].clear_to_send)
824 		wx_write_mbx_pf(wx, &msg, 1, vf);
825 }
826 
827 void wx_msg_task(struct wx *wx)
828 {
829 	u16 vf;
830 
831 	for (vf = 0; vf < wx->num_vfs; vf++) {
832 		/* process any reset requests */
833 		if (!wx_check_for_rst_pf(wx, vf))
834 			wx_vf_reset_event(wx, vf);
835 
836 		/* process any messages pending */
837 		if (!wx_check_for_msg_pf(wx, vf))
838 			wx_rcv_msg_from_vf(wx, vf);
839 
840 		/* process any acks */
841 		if (!wx_check_for_ack_pf(wx, vf))
842 			wx_rcv_ack_from_vf(wx, vf);
843 	}
844 }
845 EXPORT_SYMBOL(wx_msg_task);
846 
847 void wx_disable_vf_rx_tx(struct wx *wx)
848 {
849 	wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX);
850 	wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX);
851 	if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
852 		wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX);
853 		wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX);
854 	}
855 }
856 EXPORT_SYMBOL(wx_disable_vf_rx_tx);
857 
858 void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
859 {
860 	u32 msgbuf[2] = {0, 0};
861 	u16 i;
862 
863 	if (!wx->num_vfs)
864 		return;
865 	msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG;
866 	if (link_up)
867 		msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up;
868 	if (wx->notify_down)
869 		msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING;
870 	for (i = 0; i < wx->num_vfs; i++) {
871 		if (wx->vfinfo[i].clear_to_send)
872 			msgbuf[0] |= WX_VT_MSGTYPE_CTS;
873 		wx_write_mbx_pf(wx, msgbuf, 2, i);
874 	}
875 }
876 EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
877 
878 static void wx_set_vf_link_state(struct wx *wx, int vf, int state)
879 {
880 	wx->vfinfo[vf].link_state = state;
881 	switch (state) {
882 	case IFLA_VF_LINK_STATE_AUTO:
883 		if (netif_running(wx->netdev))
884 			wx->vfinfo[vf].link_enable = true;
885 		else
886 			wx->vfinfo[vf].link_enable = false;
887 		break;
888 	case IFLA_VF_LINK_STATE_ENABLE:
889 		wx->vfinfo[vf].link_enable = true;
890 		break;
891 	case IFLA_VF_LINK_STATE_DISABLE:
892 		wx->vfinfo[vf].link_enable = false;
893 		break;
894 	}
895 	/* restart the VF */
896 	wx->vfinfo[vf].clear_to_send = false;
897 	wx_ping_vf(wx, vf);
898 
899 	wx_set_vf_rx_tx(wx, vf);
900 }
901 
902 void wx_set_all_vfs(struct wx *wx)
903 {
904 	int i;
905 
906 	for (i = 0; i < wx->num_vfs; i++)
907 		wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state);
908 }
909 EXPORT_SYMBOL(wx_set_all_vfs);
910