xref: /linux/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3 
4 #include <linux/types.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/netdevice.h>
8 #include <linux/vmalloc.h>
9 #include <linux/string.h>
10 #include <linux/in.h>
11 #include <linux/ip.h>
12 #include <linux/tcp.h>
13 #include <linux/ipv6.h>
14 #include <linux/if_bridge.h>
15 #ifdef NETIF_F_HW_VLAN_CTAG_TX
16 #include <linux/if_vlan.h>
17 #endif
18 
19 #include "ixgbe.h"
20 #include "ixgbe_type.h"
21 #include "ixgbe_sriov.h"
22 
23 #ifdef CONFIG_PCI_IOV
24 static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
25 					   unsigned int num_vfs)
26 {
27 	struct ixgbe_hw *hw = &adapter->hw;
28 	struct vf_macvlans *mv_list;
29 	int num_vf_macvlans, i;
30 
31 	num_vf_macvlans = hw->mac.num_rar_entries -
32 			  (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
33 	if (!num_vf_macvlans)
34 		return;
35 
36 	mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
37 			  GFP_KERNEL);
38 	if (mv_list) {
39 		/* Initialize list of VF macvlans */
40 		INIT_LIST_HEAD(&adapter->vf_mvs.l);
41 		for (i = 0; i < num_vf_macvlans; i++) {
42 			mv_list[i].vf = -1;
43 			mv_list[i].free = true;
44 			list_add(&mv_list[i].l, &adapter->vf_mvs.l);
45 		}
46 		adapter->mv_list = mv_list;
47 	}
48 }
49 
50 static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
51 				unsigned int num_vfs)
52 {
53 	struct ixgbe_hw *hw = &adapter->hw;
54 	int i;
55 
56 	if (adapter->xdp_prog) {
57 		e_warn(probe, "SRIOV is not supported with XDP\n");
58 		return -EINVAL;
59 	}
60 
61 	/* Enable VMDq flag so device will be set in VM mode */
62 	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
63 			  IXGBE_FLAG_VMDQ_ENABLED;
64 
65 	/* Allocate memory for per VF control structures */
66 	adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
67 				  GFP_KERNEL);
68 	if (!adapter->vfinfo)
69 		return -ENOMEM;
70 
71 	adapter->num_vfs = num_vfs;
72 
73 	ixgbe_alloc_vf_macvlans(adapter, num_vfs);
74 	adapter->ring_feature[RING_F_VMDQ].offset = num_vfs;
75 
76 	/* Initialize default switching mode VEB */
77 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
78 	adapter->bridge_mode = BRIDGE_MODE_VEB;
79 
80 	/* limit traffic classes based on VFs enabled */
81 	if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
82 		adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
83 		adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
84 	} else if (num_vfs < 32) {
85 		adapter->dcb_cfg.num_tcs.pg_tcs = 4;
86 		adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
87 	} else {
88 		adapter->dcb_cfg.num_tcs.pg_tcs = 1;
89 		adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
90 	}
91 
92 	/* Disable RSC when in SR-IOV mode */
93 	adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
94 			     IXGBE_FLAG2_RSC_ENABLED);
95 
96 	for (i = 0; i < num_vfs; i++) {
97 		/* enable spoof checking for all VFs */
98 		adapter->vfinfo[i].spoofchk_enabled = true;
99 		adapter->vfinfo[i].link_enable = true;
100 
101 		/* We support VF RSS querying only for 82599 and x540
102 		 * devices at the moment. These devices share RSS
103 		 * indirection table and RSS hash key with PF therefore
104 		 * we want to disable the querying by default.
105 		 */
106 		adapter->vfinfo[i].rss_query_enabled = false;
107 
108 		/* Untrust all VFs */
109 		adapter->vfinfo[i].trusted = false;
110 
111 		/* set the default xcast mode */
112 		adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
113 	}
114 
115 	e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs);
116 	return 0;
117 }
118 
119 /**
120  * ixgbe_get_vfs - Find and take references to all vf devices
121  * @adapter: Pointer to adapter struct
122  */
123 static void ixgbe_get_vfs(struct ixgbe_adapter *adapter)
124 {
125 	struct pci_dev *pdev = adapter->pdev;
126 	u16 vendor = pdev->vendor;
127 	struct pci_dev *vfdev;
128 	int vf = 0;
129 	u16 vf_id;
130 	int pos;
131 
132 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
133 	if (!pos)
134 		return;
135 	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
136 
137 	vfdev = pci_get_device(vendor, vf_id, NULL);
138 	for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) {
139 		if (!vfdev->is_virtfn)
140 			continue;
141 		if (vfdev->physfn != pdev)
142 			continue;
143 		if (vf >= adapter->num_vfs)
144 			continue;
145 		pci_dev_get(vfdev);
146 		adapter->vfinfo[vf].vfdev = vfdev;
147 		++vf;
148 	}
149 }
150 
151 /* Note this function is called when the user wants to enable SR-IOV
152  * VFs using the now deprecated module parameter
153  */
154 void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
155 {
156 	int pre_existing_vfs = 0;
157 	unsigned int num_vfs;
158 
159 	pre_existing_vfs = pci_num_vf(adapter->pdev);
160 	if (!pre_existing_vfs && !max_vfs)
161 		return;
162 
163 	/* If there are pre-existing VFs then we have to force
164 	 * use of that many - over ride any module parameter value.
165 	 * This may result from the user unloading the PF driver
166 	 * while VFs were assigned to guest VMs or because the VFs
167 	 * have been created via the new PCI SR-IOV sysfs interface.
168 	 */
169 	if (pre_existing_vfs) {
170 		num_vfs = pre_existing_vfs;
171 		dev_warn(&adapter->pdev->dev,
172 			 "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
173 	} else {
174 		int err;
175 		/*
176 		 * The 82599 supports up to 64 VFs per physical function
177 		 * but this implementation limits allocation to 63 so that
178 		 * basic networking resources are still available to the
179 		 * physical function.  If the user requests greater than
180 		 * 63 VFs then it is an error - reset to default of zero.
181 		 */
182 		num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
183 
184 		err = pci_enable_sriov(adapter->pdev, num_vfs);
185 		if (err) {
186 			e_err(probe, "Failed to enable PCI sriov: %d\n", err);
187 			return;
188 		}
189 	}
190 
191 	if (!__ixgbe_enable_sriov(adapter, num_vfs)) {
192 		ixgbe_get_vfs(adapter);
193 		return;
194 	}
195 
196 	/* If we have gotten to this point then there is no memory available
197 	 * to manage the VF devices - print message and bail.
198 	 */
199 	e_err(probe, "Unable to allocate memory for VF Data Storage - "
200 	      "SRIOV disabled\n");
201 	ixgbe_disable_sriov(adapter);
202 }
203 
204 #endif /* #ifdef CONFIG_PCI_IOV */
205 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
206 {
207 	unsigned int num_vfs = adapter->num_vfs, vf;
208 	unsigned long flags;
209 	int rss;
210 
211 	spin_lock_irqsave(&adapter->vfs_lock, flags);
212 	/* set num VFs to 0 to prevent access to vfinfo */
213 	adapter->num_vfs = 0;
214 	spin_unlock_irqrestore(&adapter->vfs_lock, flags);
215 
216 	/* put the reference to all of the vf devices */
217 	for (vf = 0; vf < num_vfs; ++vf) {
218 		struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
219 
220 		if (!vfdev)
221 			continue;
222 		adapter->vfinfo[vf].vfdev = NULL;
223 		pci_dev_put(vfdev);
224 	}
225 
226 	/* free VF control structures */
227 	kfree(adapter->vfinfo);
228 	adapter->vfinfo = NULL;
229 
230 	/* free macvlan list */
231 	kfree(adapter->mv_list);
232 	adapter->mv_list = NULL;
233 
234 	/* if SR-IOV is already disabled then there is nothing to do */
235 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
236 		return 0;
237 
238 #ifdef CONFIG_PCI_IOV
239 	/*
240 	 * If our VFs are assigned we cannot shut down SR-IOV
241 	 * without causing issues, so just leave the hardware
242 	 * available but disabled
243 	 */
244 	if (pci_vfs_assigned(adapter->pdev)) {
245 		e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
246 		return -EPERM;
247 	}
248 	/* disable iov and allow time for transactions to clear */
249 	pci_disable_sriov(adapter->pdev);
250 #endif
251 
252 	/* Disable VMDq flag so device will be set in VM mode */
253 	if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) {
254 		adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
255 		adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
256 		rss = min_t(int, ixgbe_max_rss_indices(adapter),
257 			    num_online_cpus());
258 	} else {
259 		rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
260 	}
261 
262 	adapter->ring_feature[RING_F_VMDQ].offset = 0;
263 	adapter->ring_feature[RING_F_RSS].limit = rss;
264 
265 	/* take a breather then clean up driver data */
266 	msleep(100);
267 	return 0;
268 }
269 
270 static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
271 {
272 #ifdef CONFIG_PCI_IOV
273 	struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
274 	int pre_existing_vfs = pci_num_vf(dev);
275 	int err = 0, num_rx_pools, i, limit;
276 	u8 num_tc;
277 
278 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
279 		err = ixgbe_disable_sriov(adapter);
280 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
281 		return num_vfs;
282 
283 	if (err)
284 		return err;
285 
286 	/* While the SR-IOV capability structure reports total VFs to be 64,
287 	 * we limit the actual number allocated as below based on two factors.
288 	 *    Num_TCs	MAX_VFs
289 	 *	1	  63
290 	 *	<=4	  31
291 	 *	>4	  15
292 	 * First, we reserve some transmit/receive resources for the PF.
293 	 * Second, VMDQ also uses the same pools that SR-IOV does. We need to
294 	 * account for this, so that we don't accidentally allocate more VFs
295 	 * than we have available pools. The PCI bus driver already checks for
296 	 * other values out of range.
297 	 */
298 	num_tc = adapter->hw_tcs;
299 	num_rx_pools = bitmap_weight(adapter->fwd_bitmask,
300 				     adapter->num_rx_pools);
301 	limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
302 		(num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
303 
304 	if (num_vfs > (limit - num_rx_pools)) {
305 		e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
306 			  num_tc, num_rx_pools - 1, limit - num_rx_pools);
307 		return -EPERM;
308 	}
309 
310 	err = __ixgbe_enable_sriov(adapter, num_vfs);
311 	if (err)
312 		return  err;
313 
314 	for (i = 0; i < num_vfs; i++)
315 		ixgbe_vf_configuration(dev, (i | 0x10000000));
316 
317 	/* reset before enabling SRIOV to avoid mailbox issues */
318 	ixgbe_sriov_reinit(adapter);
319 
320 	err = pci_enable_sriov(dev, num_vfs);
321 	if (err) {
322 		e_dev_warn("Failed to enable PCI sriov: %d\n", err);
323 		return err;
324 	}
325 	ixgbe_get_vfs(adapter);
326 
327 	return num_vfs;
328 #else
329 	return 0;
330 #endif
331 }
332 
333 static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
334 {
335 	struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
336 	int err;
337 #ifdef CONFIG_PCI_IOV
338 	u32 current_flags = adapter->flags;
339 	int prev_num_vf = pci_num_vf(dev);
340 #endif
341 
342 	err = ixgbe_disable_sriov(adapter);
343 
344 	/* Only reinit if no error and state changed */
345 #ifdef CONFIG_PCI_IOV
346 	if (!err && (current_flags != adapter->flags ||
347 		     prev_num_vf != pci_num_vf(dev)))
348 		ixgbe_sriov_reinit(adapter);
349 #endif
350 
351 	return err;
352 }
353 
354 int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
355 {
356 	if (num_vfs == 0)
357 		return ixgbe_pci_sriov_disable(dev);
358 	else
359 		return ixgbe_pci_sriov_enable(dev, num_vfs);
360 }
361 
362 static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
363 				   u32 *msgbuf, u32 vf)
364 {
365 	int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
366 		       >> IXGBE_VT_MSGINFO_SHIFT;
367 	u16 *hash_list = (u16 *)&msgbuf[1];
368 	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
369 	struct ixgbe_hw *hw = &adapter->hw;
370 	int i;
371 	u32 vector_bit;
372 	u32 vector_reg;
373 	u32 mta_reg;
374 	u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
375 
376 	/* only so many hash values supported */
377 	entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
378 
379 	/*
380 	 * salt away the number of multi cast addresses assigned
381 	 * to this VF for later use to restore when the PF multi cast
382 	 * list changes
383 	 */
384 	vfinfo->num_vf_mc_hashes = entries;
385 
386 	/*
387 	 * VFs are limited to using the MTA hash table for their multicast
388 	 * addresses
389 	 */
390 	for (i = 0; i < entries; i++) {
391 		vfinfo->vf_mc_hashes[i] = hash_list[i];
392 	}
393 
394 	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
395 		vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
396 		vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
397 		mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
398 		mta_reg |= BIT(vector_bit);
399 		IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
400 	}
401 	vmolr |= IXGBE_VMOLR_ROMPE;
402 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
403 
404 	return 0;
405 }
406 
407 #ifdef CONFIG_PCI_IOV
408 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
409 {
410 	struct ixgbe_hw *hw = &adapter->hw;
411 	struct vf_data_storage *vfinfo;
412 	int i, j;
413 	u32 vector_bit;
414 	u32 vector_reg;
415 	u32 mta_reg;
416 
417 	for (i = 0; i < adapter->num_vfs; i++) {
418 		u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
419 		vfinfo = &adapter->vfinfo[i];
420 		for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
421 			hw->addr_ctrl.mta_in_use++;
422 			vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
423 			vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
424 			mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
425 			mta_reg |= BIT(vector_bit);
426 			IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
427 		}
428 
429 		if (vfinfo->num_vf_mc_hashes)
430 			vmolr |= IXGBE_VMOLR_ROMPE;
431 		else
432 			vmolr &= ~IXGBE_VMOLR_ROMPE;
433 		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
434 	}
435 
436 	/* Restore any VF macvlans */
437 	ixgbe_full_sync_mac_table(adapter);
438 }
439 #endif
440 
441 static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
442 			     u32 vf)
443 {
444 	struct ixgbe_hw *hw = &adapter->hw;
445 	int err;
446 
447 	/* If VLAN overlaps with one the PF is currently monitoring make
448 	 * sure that we are able to allocate a VLVF entry.  This may be
449 	 * redundant but it guarantees PF will maintain visibility to
450 	 * the VLAN.
451 	 */
452 	if (add && test_bit(vid, adapter->active_vlans)) {
453 		err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false);
454 		if (err)
455 			return err;
456 	}
457 
458 	err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false);
459 
460 	if (add && !err)
461 		return err;
462 
463 	/* If we failed to add the VF VLAN or we are removing the VF VLAN
464 	 * we may need to drop the PF pool bit in order to allow us to free
465 	 * up the VLVF resources.
466 	 */
467 	if (test_bit(vid, adapter->active_vlans) ||
468 	    (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
469 		ixgbe_update_pf_promisc_vlvf(adapter, vid);
470 
471 	return err;
472 }
473 
474 static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
475 {
476 	struct ixgbe_hw *hw = &adapter->hw;
477 	u32 max_frs;
478 
479 	if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
480 		e_err(drv, "VF max_frame %d out of range\n", max_frame);
481 		return -EINVAL;
482 	}
483 
484 	/*
485 	 * For 82599EB we have to keep all PFs and VFs operating with
486 	 * the same max_frame value in order to avoid sending an oversize
487 	 * frame to a VF.  In order to guarantee this is handled correctly
488 	 * for all cases we have several special exceptions to take into
489 	 * account before we can enable the VF for receive
490 	 */
491 	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
492 		struct net_device *dev = adapter->netdev;
493 		int pf_max_frame = dev->mtu + ETH_HLEN;
494 		u32 reg_offset, vf_shift, vfre;
495 		s32 err = 0;
496 
497 #ifdef CONFIG_FCOE
498 		if (dev->features & NETIF_F_FCOE_MTU)
499 			pf_max_frame = max_t(int, pf_max_frame,
500 					     IXGBE_FCOE_JUMBO_FRAME_SIZE);
501 
502 #endif /* CONFIG_FCOE */
503 		switch (adapter->vfinfo[vf].vf_api) {
504 		case ixgbe_mbox_api_11:
505 		case ixgbe_mbox_api_12:
506 		case ixgbe_mbox_api_13:
507 		case ixgbe_mbox_api_14:
508 			/* Version 1.1 supports jumbo frames on VFs if PF has
509 			 * jumbo frames enabled which means legacy VFs are
510 			 * disabled
511 			 */
512 			if (pf_max_frame > ETH_FRAME_LEN)
513 				break;
514 			fallthrough;
515 		default:
516 			/* If the PF or VF are running w/ jumbo frames enabled
517 			 * we need to shut down the VF Rx path as we cannot
518 			 * support jumbo frames on legacy VFs
519 			 */
520 			if ((pf_max_frame > ETH_FRAME_LEN) ||
521 			    (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
522 				err = -EINVAL;
523 			break;
524 		}
525 
526 		/* determine VF receive enable location */
527 		vf_shift = vf % 32;
528 		reg_offset = vf / 32;
529 
530 		/* enable or disable receive depending on error */
531 		vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
532 		if (err)
533 			vfre &= ~BIT(vf_shift);
534 		else
535 			vfre |= BIT(vf_shift);
536 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
537 
538 		if (err) {
539 			e_err(drv, "VF max_frame %d out of range\n", max_frame);
540 			return err;
541 		}
542 	}
543 
544 	/* pull current max frame size from hardware */
545 	max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
546 	max_frs &= IXGBE_MHADD_MFS_MASK;
547 	max_frs >>= IXGBE_MHADD_MFS_SHIFT;
548 
549 	if (max_frs < max_frame) {
550 		max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
551 		IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
552 	}
553 
554 	e_info(hw, "VF requests change max MTU to %d\n", max_frame);
555 
556 	return 0;
557 }
558 
559 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
560 {
561 	u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
562 	vmolr |= IXGBE_VMOLR_BAM;
563 	if (aupe)
564 		vmolr |= IXGBE_VMOLR_AUPE;
565 	else
566 		vmolr &= ~IXGBE_VMOLR_AUPE;
567 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
568 }
569 
570 static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
571 {
572 	struct ixgbe_hw *hw = &adapter->hw;
573 
574 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
575 }
576 
577 static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
578 {
579 	struct ixgbe_hw *hw = &adapter->hw;
580 	u32 vlvfb_mask, pool_mask, i;
581 
582 	/* create mask for VF and other pools */
583 	pool_mask = ~BIT(VMDQ_P(0) % 32);
584 	vlvfb_mask = BIT(vf % 32);
585 
586 	/* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
587 	for (i = IXGBE_VLVF_ENTRIES; i--;) {
588 		u32 bits[2], vlvfb, vid, vfta, vlvf;
589 		u32 word = i * 2 + vf / 32;
590 		u32 mask;
591 
592 		vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
593 
594 		/* if our bit isn't set we can skip it */
595 		if (!(vlvfb & vlvfb_mask))
596 			continue;
597 
598 		/* clear our bit from vlvfb */
599 		vlvfb ^= vlvfb_mask;
600 
601 		/* create 64b mask to chedk to see if we should clear VLVF */
602 		bits[word % 2] = vlvfb;
603 		bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
604 
605 		/* if other pools are present, just remove ourselves */
606 		if (bits[(VMDQ_P(0) / 32) ^ 1] ||
607 		    (bits[VMDQ_P(0) / 32] & pool_mask))
608 			goto update_vlvfb;
609 
610 		/* if PF is present, leave VFTA */
611 		if (bits[0] || bits[1])
612 			goto update_vlvf;
613 
614 		/* if we cannot determine VLAN just remove ourselves */
615 		vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
616 		if (!vlvf)
617 			goto update_vlvfb;
618 
619 		vid = vlvf & VLAN_VID_MASK;
620 		mask = BIT(vid % 32);
621 
622 		/* clear bit from VFTA */
623 		vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
624 		if (vfta & mask)
625 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask);
626 update_vlvf:
627 		/* clear POOL selection enable */
628 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
629 
630 		if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
631 			vlvfb = 0;
632 update_vlvfb:
633 		/* clear pool bits */
634 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
635 	}
636 }
637 
638 static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
639 				int vf, int index, unsigned char *mac_addr)
640 {
641 	struct vf_macvlans *entry;
642 	struct list_head *pos;
643 	int retval = 0;
644 
645 	if (index <= 1) {
646 		list_for_each(pos, &adapter->vf_mvs.l) {
647 			entry = list_entry(pos, struct vf_macvlans, l);
648 			if (entry->vf == vf) {
649 				entry->vf = -1;
650 				entry->free = true;
651 				entry->is_macvlan = false;
652 				ixgbe_del_mac_filter(adapter,
653 						     entry->vf_macvlan, vf);
654 			}
655 		}
656 	}
657 
658 	/*
659 	 * If index was zero then we were asked to clear the uc list
660 	 * for the VF.  We're done.
661 	 */
662 	if (!index)
663 		return 0;
664 
665 	entry = NULL;
666 
667 	list_for_each(pos, &adapter->vf_mvs.l) {
668 		entry = list_entry(pos, struct vf_macvlans, l);
669 		if (entry->free)
670 			break;
671 	}
672 
673 	/*
674 	 * If we traversed the entire list and didn't find a free entry
675 	 * then we're out of space on the RAR table.  Also entry may
676 	 * be NULL because the original memory allocation for the list
677 	 * failed, which is not fatal but does mean we can't support
678 	 * VF requests for MACVLAN because we couldn't allocate
679 	 * memory for the list management required.
680 	 */
681 	if (!entry || !entry->free)
682 		return -ENOSPC;
683 
684 	retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
685 	if (retval < 0)
686 		return retval;
687 
688 	entry->free = false;
689 	entry->is_macvlan = true;
690 	entry->vf = vf;
691 	memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
692 
693 	return 0;
694 }
695 
696 static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
697 {
698 	struct ixgbe_hw *hw = &adapter->hw;
699 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
700 	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
701 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
702 	u8 num_tcs = adapter->hw_tcs;
703 	u32 reg_val;
704 	u32 queue;
705 
706 	/* remove VLAN filters beloning to this VF */
707 	ixgbe_clear_vf_vlans(adapter, vf);
708 
709 	/* add back PF assigned VLAN or VLAN 0 */
710 	ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
711 
712 	/* reset offloads to defaults */
713 	ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
714 
715 	/* set outgoing tags for VFs */
716 	if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
717 		ixgbe_clear_vmvir(adapter, vf);
718 	} else {
719 		if (vfinfo->pf_qos || !num_tcs)
720 			ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
721 					vfinfo->pf_qos, vf);
722 		else
723 			ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
724 					adapter->default_up, vf);
725 
726 		if (vfinfo->spoofchk_enabled) {
727 			hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
728 			hw->mac.ops.set_mac_anti_spoofing(hw, true, vf);
729 		}
730 	}
731 
732 	/* reset multicast table array for vf */
733 	adapter->vfinfo[vf].num_vf_mc_hashes = 0;
734 
735 	/* clear any ipsec table info */
736 	ixgbe_ipsec_vf_clear(adapter, vf);
737 
738 	/* Flush and reset the mta with the new values */
739 	ixgbe_set_rx_mode(adapter->netdev);
740 
741 	ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
742 	ixgbe_set_vf_macvlan(adapter, vf, 0, NULL);
743 
744 	/* reset VF api back to unknown */
745 	adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
746 
747 	/* Restart each queue for given VF */
748 	for (queue = 0; queue < q_per_pool; queue++) {
749 		unsigned int reg_idx = (vf * q_per_pool) + queue;
750 
751 		reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx));
752 
753 		/* Re-enabling only configured queues */
754 		if (reg_val) {
755 			reg_val |= IXGBE_TXDCTL_ENABLE;
756 			IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
757 			reg_val &= ~IXGBE_TXDCTL_ENABLE;
758 			IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
759 		}
760 	}
761 
762 	IXGBE_WRITE_FLUSH(hw);
763 }
764 
765 static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
766 {
767 	struct ixgbe_hw *hw = &adapter->hw;
768 	u32 word;
769 
770 	/* Clear VF's mailbox memory */
771 	for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
772 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
773 
774 	IXGBE_WRITE_FLUSH(hw);
775 }
776 
777 static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
778 			    int vf, unsigned char *mac_addr)
779 {
780 	s32 retval;
781 
782 	ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
783 	retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
784 	if (retval >= 0)
785 		memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr,
786 		       ETH_ALEN);
787 	else
788 		eth_zero_addr(adapter->vfinfo[vf].vf_mac_addresses);
789 
790 	return retval;
791 }
792 
793 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
794 {
795 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
796 	unsigned int vfn = (event_mask & 0x3f);
797 
798 	bool enable = ((event_mask & 0x10000000U) != 0);
799 
800 	if (enable)
801 		eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
802 
803 	return 0;
804 }
805 
806 static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
807 				   u32 qde)
808 {
809 	struct ixgbe_hw *hw = &adapter->hw;
810 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
811 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
812 	int i;
813 
814 	for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
815 		u32 reg;
816 
817 		/* flush previous write */
818 		IXGBE_WRITE_FLUSH(hw);
819 
820 		/* indicate to hardware that we want to set drop enable */
821 		reg = IXGBE_QDE_WRITE | qde;
822 		reg |= i <<  IXGBE_QDE_IDX_SHIFT;
823 		IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
824 	}
825 }
826 
827 /**
828  * ixgbe_set_vf_rx_tx - Set VF rx tx
829  * @adapter: Pointer to adapter struct
830  * @vf: VF identifier
831  *
832  * Set or reset correct transmit and receive for vf
833  **/
834 static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf)
835 {
836 	u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
837 	struct ixgbe_hw *hw = &adapter->hw;
838 	u32 reg_offset, vf_shift;
839 
840 	vf_shift = vf % 32;
841 	reg_offset = vf / 32;
842 
843 	reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
844 	reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
845 
846 	if (adapter->vfinfo[vf].link_enable) {
847 		reg_req_tx = reg_cur_tx | 1 << vf_shift;
848 		reg_req_rx = reg_cur_rx | 1 << vf_shift;
849 	} else {
850 		reg_req_tx = reg_cur_tx & ~(1 << vf_shift);
851 		reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
852 	}
853 
854 	/* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
855 	 * For more info take a look at ixgbe_set_vf_lpe
856 	 */
857 	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
858 		struct net_device *dev = adapter->netdev;
859 		int pf_max_frame = dev->mtu + ETH_HLEN;
860 
861 #if IS_ENABLED(CONFIG_FCOE)
862 		if (dev->features & NETIF_F_FCOE_MTU)
863 			pf_max_frame = max_t(int, pf_max_frame,
864 					     IXGBE_FCOE_JUMBO_FRAME_SIZE);
865 #endif /* CONFIG_FCOE */
866 
867 		if (pf_max_frame > ETH_FRAME_LEN)
868 			reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
869 	}
870 
871 	/* Enable/Disable particular VF */
872 	if (reg_cur_tx != reg_req_tx)
873 		IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx);
874 	if (reg_cur_rx != reg_req_rx)
875 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx);
876 }
877 
878 static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
879 {
880 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
881 	struct ixgbe_hw *hw = &adapter->hw;
882 	unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
883 	u32 reg, reg_offset, vf_shift;
884 	u32 msgbuf[4] = {0, 0, 0, 0};
885 	u8 *addr = (u8 *)(&msgbuf[1]);
886 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
887 	int i;
888 
889 	e_info(probe, "VF Reset msg received from vf %d\n", vf);
890 
891 	/* reset the filters for the device */
892 	ixgbe_vf_reset_event(adapter, vf);
893 
894 	ixgbe_vf_clear_mbx(adapter, vf);
895 
896 	/* set vf mac address */
897 	if (!is_zero_ether_addr(vf_mac))
898 		ixgbe_set_vf_mac(adapter, vf, vf_mac);
899 
900 	vf_shift = vf % 32;
901 	reg_offset = vf / 32;
902 
903 	/* force drop enable for all VF Rx queues */
904 	reg = IXGBE_QDE_ENABLE;
905 	if (adapter->vfinfo[vf].pf_vlan)
906 		reg |= IXGBE_QDE_HIDE_VLAN;
907 
908 	ixgbe_write_qde(adapter, vf, reg);
909 
910 	ixgbe_set_vf_rx_tx(adapter, vf);
911 
912 	/* enable VF mailbox for further messages */
913 	adapter->vfinfo[vf].clear_to_send = true;
914 
915 	/* Enable counting of spoofed packets in the SSVPC register */
916 	reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
917 	reg |= BIT(vf_shift);
918 	IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
919 
920 	/*
921 	 * Reset the VFs TDWBAL and TDWBAH registers
922 	 * which are not cleared by an FLR
923 	 */
924 	for (i = 0; i < q_per_pool; i++) {
925 		IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
926 		IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
927 	}
928 
929 	/* reply to reset with ack and vf mac address */
930 	msgbuf[0] = IXGBE_VF_RESET;
931 	if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) {
932 		msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
933 		memcpy(addr, vf_mac, ETH_ALEN);
934 	} else {
935 		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
936 	}
937 
938 	/*
939 	 * Piggyback the multicast filter type so VF can compute the
940 	 * correct vectors
941 	 */
942 	msgbuf[3] = hw->mac.mc_filter_type;
943 	ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
944 
945 	return 0;
946 }
947 
948 static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
949 				 u32 *msgbuf, u32 vf)
950 {
951 	u8 *new_mac = ((u8 *)(&msgbuf[1]));
952 
953 	if (!is_valid_ether_addr(new_mac)) {
954 		e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
955 		return -1;
956 	}
957 
958 	if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
959 	    !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
960 		e_warn(drv,
961 		       "VF %d attempted to override administratively set MAC address\n"
962 		       "Reload the VF driver to resume operations\n",
963 		       vf);
964 		return -1;
965 	}
966 
967 	return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
968 }
969 
970 static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
971 				 u32 *msgbuf, u32 vf)
972 {
973 	u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
974 	u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
975 	u8 tcs = adapter->hw_tcs;
976 
977 	if (adapter->vfinfo[vf].pf_vlan || tcs) {
978 		e_warn(drv,
979 		       "VF %d attempted to override administratively set VLAN configuration\n"
980 		       "Reload the VF driver to resume operations\n",
981 		       vf);
982 		return -1;
983 	}
984 
985 	/* VLAN 0 is a special case, don't allow it to be removed */
986 	if (!vid && !add)
987 		return 0;
988 
989 	return ixgbe_set_vf_vlan(adapter, add, vid, vf);
990 }
991 
992 static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
993 				    u32 *msgbuf, u32 vf)
994 {
995 	u8 *new_mac = ((u8 *)(&msgbuf[1]));
996 	int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
997 		    IXGBE_VT_MSGINFO_SHIFT;
998 	int err;
999 
1000 	if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
1001 	    index > 0) {
1002 		e_warn(drv,
1003 		       "VF %d requested MACVLAN filter but is administratively denied\n",
1004 		       vf);
1005 		return -1;
1006 	}
1007 
1008 	/* An non-zero index indicates the VF is setting a filter */
1009 	if (index) {
1010 		if (!is_valid_ether_addr(new_mac)) {
1011 			e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
1012 			return -1;
1013 		}
1014 
1015 		/*
1016 		 * If the VF is allowed to set MAC filters then turn off
1017 		 * anti-spoofing to avoid false positives.
1018 		 */
1019 		if (adapter->vfinfo[vf].spoofchk_enabled) {
1020 			struct ixgbe_hw *hw = &adapter->hw;
1021 
1022 			hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
1023 			hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
1024 		}
1025 	}
1026 
1027 	err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
1028 	if (err == -ENOSPC)
1029 		e_warn(drv,
1030 		       "VF %d has requested a MACVLAN filter but there is no space for it\n",
1031 		       vf);
1032 
1033 	return err < 0;
1034 }
1035 
1036 static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
1037 				  u32 *msgbuf, u32 vf)
1038 {
1039 	int api = msgbuf[1];
1040 
1041 	switch (api) {
1042 	case ixgbe_mbox_api_10:
1043 	case ixgbe_mbox_api_11:
1044 	case ixgbe_mbox_api_12:
1045 	case ixgbe_mbox_api_13:
1046 	case ixgbe_mbox_api_14:
1047 		adapter->vfinfo[vf].vf_api = api;
1048 		return 0;
1049 	default:
1050 		break;
1051 	}
1052 
1053 	e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
1054 
1055 	return -1;
1056 }
1057 
1058 static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
1059 			       u32 *msgbuf, u32 vf)
1060 {
1061 	struct net_device *dev = adapter->netdev;
1062 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1063 	unsigned int default_tc = 0;
1064 	u8 num_tcs = adapter->hw_tcs;
1065 
1066 	/* verify the PF is supporting the correct APIs */
1067 	switch (adapter->vfinfo[vf].vf_api) {
1068 	case ixgbe_mbox_api_20:
1069 	case ixgbe_mbox_api_11:
1070 	case ixgbe_mbox_api_12:
1071 	case ixgbe_mbox_api_13:
1072 	case ixgbe_mbox_api_14:
1073 		break;
1074 	default:
1075 		return -1;
1076 	}
1077 
1078 	/* only allow 1 Tx queue for bandwidth limiting */
1079 	msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1080 	msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1081 
1082 	/* if TCs > 1 determine which TC belongs to default user priority */
1083 	if (num_tcs > 1)
1084 		default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
1085 
1086 	/* notify VF of need for VLAN tag stripping, and correct queue */
1087 	if (num_tcs)
1088 		msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
1089 	else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
1090 		msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
1091 	else
1092 		msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
1093 
1094 	/* notify VF of default queue */
1095 	msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
1096 
1097 	return 0;
1098 }
1099 
1100 static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
1101 {
1102 	u32 i, j;
1103 	u32 *out_buf = &msgbuf[1];
1104 	const u8 *reta = adapter->rss_indir_tbl;
1105 	u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter);
1106 
1107 	/* Check if operation is permitted */
1108 	if (!adapter->vfinfo[vf].rss_query_enabled)
1109 		return -EPERM;
1110 
1111 	/* verify the PF is supporting the correct API */
1112 	switch (adapter->vfinfo[vf].vf_api) {
1113 	case ixgbe_mbox_api_14:
1114 	case ixgbe_mbox_api_13:
1115 	case ixgbe_mbox_api_12:
1116 		break;
1117 	default:
1118 		return -EOPNOTSUPP;
1119 	}
1120 
1121 	/* This mailbox command is supported (required) only for 82599 and x540
1122 	 * VFs which support up to 4 RSS queues. Therefore we will compress the
1123 	 * RETA by saving only 2 bits from each entry. This way we will be able
1124 	 * to transfer the whole RETA in a single mailbox operation.
1125 	 */
1126 	for (i = 0; i < reta_size / 16; i++) {
1127 		out_buf[i] = 0;
1128 		for (j = 0; j < 16; j++)
1129 			out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j);
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
1136 				u32 *msgbuf, u32 vf)
1137 {
1138 	u32 *rss_key = &msgbuf[1];
1139 
1140 	/* Check if the operation is permitted */
1141 	if (!adapter->vfinfo[vf].rss_query_enabled)
1142 		return -EPERM;
1143 
1144 	/* verify the PF is supporting the correct API */
1145 	switch (adapter->vfinfo[vf].vf_api) {
1146 	case ixgbe_mbox_api_14:
1147 	case ixgbe_mbox_api_13:
1148 	case ixgbe_mbox_api_12:
1149 		break;
1150 	default:
1151 		return -EOPNOTSUPP;
1152 	}
1153 
1154 	memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE);
1155 
1156 	return 0;
1157 }
1158 
1159 static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
1160 				      u32 *msgbuf, u32 vf)
1161 {
1162 	struct ixgbe_hw *hw = &adapter->hw;
1163 	int xcast_mode = msgbuf[1];
1164 	u32 vmolr, fctrl, disable, enable;
1165 
1166 	/* verify the PF is supporting the correct APIs */
1167 	switch (adapter->vfinfo[vf].vf_api) {
1168 	case ixgbe_mbox_api_12:
1169 		/* promisc introduced in 1.3 version */
1170 		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
1171 			return -EOPNOTSUPP;
1172 		fallthrough;
1173 	case ixgbe_mbox_api_13:
1174 	case ixgbe_mbox_api_14:
1175 		break;
1176 	default:
1177 		return -EOPNOTSUPP;
1178 	}
1179 
1180 	if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
1181 	    !adapter->vfinfo[vf].trusted) {
1182 		xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
1183 	}
1184 
1185 	if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
1186 		goto out;
1187 
1188 	switch (xcast_mode) {
1189 	case IXGBEVF_XCAST_MODE_NONE:
1190 		disable = IXGBE_VMOLR_ROMPE |
1191 			  IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1192 		enable = IXGBE_VMOLR_BAM;
1193 		break;
1194 	case IXGBEVF_XCAST_MODE_MULTI:
1195 		disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1196 		enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
1197 		break;
1198 	case IXGBEVF_XCAST_MODE_ALLMULTI:
1199 		disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1200 		enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
1201 		break;
1202 	case IXGBEVF_XCAST_MODE_PROMISC:
1203 		if (hw->mac.type <= ixgbe_mac_82599EB)
1204 			return -EOPNOTSUPP;
1205 
1206 		fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1207 		if (!(fctrl & IXGBE_FCTRL_UPE)) {
1208 			/* VF promisc requires PF in promisc */
1209 			e_warn(drv,
1210 			       "Enabling VF promisc requires PF in promisc\n");
1211 			return -EPERM;
1212 		}
1213 
1214 		disable = IXGBE_VMOLR_VPE;
1215 		enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
1216 			 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
1217 		break;
1218 	default:
1219 		return -EOPNOTSUPP;
1220 	}
1221 
1222 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
1223 	vmolr &= ~disable;
1224 	vmolr |= enable;
1225 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
1226 
1227 	adapter->vfinfo[vf].xcast_mode = xcast_mode;
1228 
1229 out:
1230 	msgbuf[1] = xcast_mode;
1231 
1232 	return 0;
1233 }
1234 
1235 static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
1236 				   u32 *msgbuf, u32 vf)
1237 {
1238 	u32 *link_state = &msgbuf[1];
1239 
1240 	/* verify the PF is supporting the correct API */
1241 	switch (adapter->vfinfo[vf].vf_api) {
1242 	case ixgbe_mbox_api_12:
1243 	case ixgbe_mbox_api_13:
1244 	case ixgbe_mbox_api_14:
1245 		break;
1246 	default:
1247 		return -EOPNOTSUPP;
1248 	}
1249 
1250 	*link_state = adapter->vfinfo[vf].link_enable;
1251 
1252 	return 0;
1253 }
1254 
1255 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1256 {
1257 	u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
1258 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
1259 	struct ixgbe_hw *hw = &adapter->hw;
1260 	s32 retval;
1261 
1262 	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
1263 
1264 	if (retval) {
1265 		pr_err("Error receiving message from VF\n");
1266 		return retval;
1267 	}
1268 
1269 	/* this is a message we already processed, do nothing */
1270 	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
1271 		return 0;
1272 
1273 	/* flush the ack before we write any messages back */
1274 	IXGBE_WRITE_FLUSH(hw);
1275 
1276 	if (msgbuf[0] == IXGBE_VF_RESET)
1277 		return ixgbe_vf_reset_msg(adapter, vf);
1278 
1279 	/*
1280 	 * until the vf completes a virtual function reset it should not be
1281 	 * allowed to start any configuration.
1282 	 */
1283 	if (!adapter->vfinfo[vf].clear_to_send) {
1284 		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1285 		ixgbe_write_mbx(hw, msgbuf, 1, vf);
1286 		return 0;
1287 	}
1288 
1289 	switch ((msgbuf[0] & 0xFFFF)) {
1290 	case IXGBE_VF_SET_MAC_ADDR:
1291 		retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
1292 		break;
1293 	case IXGBE_VF_SET_MULTICAST:
1294 		retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
1295 		break;
1296 	case IXGBE_VF_SET_VLAN:
1297 		retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
1298 		break;
1299 	case IXGBE_VF_SET_LPE:
1300 		retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
1301 		break;
1302 	case IXGBE_VF_SET_MACVLAN:
1303 		retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
1304 		break;
1305 	case IXGBE_VF_API_NEGOTIATE:
1306 		retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
1307 		break;
1308 	case IXGBE_VF_GET_QUEUES:
1309 		retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
1310 		break;
1311 	case IXGBE_VF_GET_RETA:
1312 		retval = ixgbe_get_vf_reta(adapter, msgbuf, vf);
1313 		break;
1314 	case IXGBE_VF_GET_RSS_KEY:
1315 		retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
1316 		break;
1317 	case IXGBE_VF_UPDATE_XCAST_MODE:
1318 		retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
1319 		break;
1320 	case IXGBE_VF_GET_LINK_STATE:
1321 		retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf);
1322 		break;
1323 	case IXGBE_VF_IPSEC_ADD:
1324 		retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf);
1325 		break;
1326 	case IXGBE_VF_IPSEC_DEL:
1327 		retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf);
1328 		break;
1329 	default:
1330 		e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
1331 		retval = IXGBE_ERR_MBX;
1332 		break;
1333 	}
1334 
1335 	/* notify the VF of the results of what it sent us */
1336 	if (retval)
1337 		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1338 	else
1339 		msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
1340 
1341 	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
1342 
1343 	ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
1344 
1345 	return retval;
1346 }
1347 
1348 static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1349 {
1350 	struct ixgbe_hw *hw = &adapter->hw;
1351 	u32 msg = IXGBE_VT_MSGTYPE_NACK;
1352 
1353 	/* if device isn't clear to send it shouldn't be reading either */
1354 	if (!adapter->vfinfo[vf].clear_to_send)
1355 		ixgbe_write_mbx(hw, &msg, 1, vf);
1356 }
1357 
1358 void ixgbe_msg_task(struct ixgbe_adapter *adapter)
1359 {
1360 	struct ixgbe_hw *hw = &adapter->hw;
1361 	unsigned long flags;
1362 	u32 vf;
1363 
1364 	spin_lock_irqsave(&adapter->vfs_lock, flags);
1365 	for (vf = 0; vf < adapter->num_vfs; vf++) {
1366 		/* process any reset requests */
1367 		if (!ixgbe_check_for_rst(hw, vf))
1368 			ixgbe_vf_reset_event(adapter, vf);
1369 
1370 		/* process any messages pending */
1371 		if (!ixgbe_check_for_msg(hw, vf))
1372 			ixgbe_rcv_msg_from_vf(adapter, vf);
1373 
1374 		/* process any acks */
1375 		if (!ixgbe_check_for_ack(hw, vf))
1376 			ixgbe_rcv_ack_from_vf(adapter, vf);
1377 	}
1378 	spin_unlock_irqrestore(&adapter->vfs_lock, flags);
1379 }
1380 
1381 static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
1382 {
1383 	struct ixgbe_hw *hw = &adapter->hw;
1384 	u32 ping;
1385 
1386 	ping = IXGBE_PF_CONTROL_MSG;
1387 	if (adapter->vfinfo[vf].clear_to_send)
1388 		ping |= IXGBE_VT_MSGTYPE_CTS;
1389 	ixgbe_write_mbx(hw, &ping, 1, vf);
1390 }
1391 
1392 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
1393 {
1394 	struct ixgbe_hw *hw = &adapter->hw;
1395 	u32 ping;
1396 	int i;
1397 
1398 	for (i = 0 ; i < adapter->num_vfs; i++) {
1399 		ping = IXGBE_PF_CONTROL_MSG;
1400 		if (adapter->vfinfo[i].clear_to_send)
1401 			ping |= IXGBE_VT_MSGTYPE_CTS;
1402 		ixgbe_write_mbx(hw, &ping, 1, i);
1403 	}
1404 }
1405 
1406 /**
1407  * ixgbe_set_all_vfs - update vfs queues
1408  * @adapter: Pointer to adapter struct
1409  *
1410  * Update setting transmit and receive queues for all vfs
1411  **/
1412 void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
1413 {
1414 	int i;
1415 
1416 	for (i = 0 ; i < adapter->num_vfs; i++)
1417 		ixgbe_set_vf_link_state(adapter, i,
1418 					adapter->vfinfo[i].link_state);
1419 }
1420 
1421 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1422 {
1423 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1424 	s32 retval;
1425 
1426 	if (vf >= adapter->num_vfs)
1427 		return -EINVAL;
1428 
1429 	if (is_valid_ether_addr(mac)) {
1430 		dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
1431 			 mac, vf);
1432 		dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.");
1433 
1434 		retval = ixgbe_set_vf_mac(adapter, vf, mac);
1435 		if (retval >= 0) {
1436 			adapter->vfinfo[vf].pf_set_mac = true;
1437 
1438 			if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1439 				dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
1440 				dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
1441 			}
1442 		} else {
1443 			dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n");
1444 		}
1445 	} else if (is_zero_ether_addr(mac)) {
1446 		unsigned char *vf_mac_addr =
1447 					   adapter->vfinfo[vf].vf_mac_addresses;
1448 
1449 		/* nothing to do */
1450 		if (is_zero_ether_addr(vf_mac_addr))
1451 			return 0;
1452 
1453 		dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
1454 
1455 		retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf);
1456 		if (retval >= 0) {
1457 			adapter->vfinfo[vf].pf_set_mac = false;
1458 			memcpy(vf_mac_addr, mac, ETH_ALEN);
1459 		} else {
1460 			dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n");
1461 		}
1462 	} else {
1463 		retval = -EINVAL;
1464 	}
1465 
1466 	return retval;
1467 }
1468 
1469 static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
1470 				  u16 vlan, u8 qos)
1471 {
1472 	struct ixgbe_hw *hw = &adapter->hw;
1473 	int err;
1474 
1475 	err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1476 	if (err)
1477 		goto out;
1478 
1479 	/* Revoke tagless access via VLAN 0 */
1480 	ixgbe_set_vf_vlan(adapter, false, 0, vf);
1481 
1482 	ixgbe_set_vmvir(adapter, vlan, qos, vf);
1483 	ixgbe_set_vmolr(hw, vf, false);
1484 
1485 	/* enable hide vlan on X550 */
1486 	if (hw->mac.type >= ixgbe_mac_X550)
1487 		ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE |
1488 				IXGBE_QDE_HIDE_VLAN);
1489 
1490 	adapter->vfinfo[vf].pf_vlan = vlan;
1491 	adapter->vfinfo[vf].pf_qos = qos;
1492 	dev_info(&adapter->pdev->dev,
1493 		 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
1494 	if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1495 		dev_warn(&adapter->pdev->dev,
1496 			 "The VF VLAN has been set, but the PF device is not up.\n");
1497 		dev_warn(&adapter->pdev->dev,
1498 			 "Bring the PF device up before attempting to use the VF device.\n");
1499 	}
1500 
1501 out:
1502 	return err;
1503 }
1504 
1505 static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
1506 {
1507 	struct ixgbe_hw *hw = &adapter->hw;
1508 	int err;
1509 
1510 	err = ixgbe_set_vf_vlan(adapter, false,
1511 				adapter->vfinfo[vf].pf_vlan, vf);
1512 	/* Restore tagless access via VLAN 0 */
1513 	ixgbe_set_vf_vlan(adapter, true, 0, vf);
1514 	ixgbe_clear_vmvir(adapter, vf);
1515 	ixgbe_set_vmolr(hw, vf, true);
1516 
1517 	/* disable hide VLAN on X550 */
1518 	if (hw->mac.type >= ixgbe_mac_X550)
1519 		ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
1520 
1521 	adapter->vfinfo[vf].pf_vlan = 0;
1522 	adapter->vfinfo[vf].pf_qos = 0;
1523 
1524 	return err;
1525 }
1526 
1527 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1528 			  u8 qos, __be16 vlan_proto)
1529 {
1530 	int err = 0;
1531 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1532 
1533 	if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
1534 		return -EINVAL;
1535 	if (vlan_proto != htons(ETH_P_8021Q))
1536 		return -EPROTONOSUPPORT;
1537 	if (vlan || qos) {
1538 		/* Check if there is already a port VLAN set, if so
1539 		 * we have to delete the old one first before we
1540 		 * can set the new one.  The usage model had
1541 		 * previously assumed the user would delete the
1542 		 * old port VLAN before setting a new one but this
1543 		 * is not necessarily the case.
1544 		 */
1545 		if (adapter->vfinfo[vf].pf_vlan)
1546 			err = ixgbe_disable_port_vlan(adapter, vf);
1547 		if (err)
1548 			goto out;
1549 		err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
1550 	} else {
1551 		err = ixgbe_disable_port_vlan(adapter, vf);
1552 	}
1553 
1554 out:
1555 	return err;
1556 }
1557 
1558 int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
1559 {
1560 	switch (adapter->link_speed) {
1561 	case IXGBE_LINK_SPEED_100_FULL:
1562 		return 100;
1563 	case IXGBE_LINK_SPEED_1GB_FULL:
1564 		return 1000;
1565 	case IXGBE_LINK_SPEED_10GB_FULL:
1566 		return 10000;
1567 	default:
1568 		return 0;
1569 	}
1570 }
1571 
1572 static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
1573 {
1574 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1575 	struct ixgbe_hw *hw = &adapter->hw;
1576 	u32 bcnrc_val = 0;
1577 	u16 queue, queues_per_pool;
1578 	u16 tx_rate = adapter->vfinfo[vf].tx_rate;
1579 
1580 	if (tx_rate) {
1581 		/* start with base link speed value */
1582 		bcnrc_val = adapter->vf_rate_link_speed;
1583 
1584 		/* Calculate the rate factor values to set */
1585 		bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1586 		bcnrc_val /= tx_rate;
1587 
1588 		/* clear everything but the rate factor */
1589 		bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1590 			     IXGBE_RTTBCNRC_RF_DEC_MASK;
1591 
1592 		/* enable the rate scheduler */
1593 		bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1594 	}
1595 
1596 	/*
1597 	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
1598 	 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
1599 	 * and 0x004 otherwise.
1600 	 */
1601 	switch (hw->mac.type) {
1602 	case ixgbe_mac_82599EB:
1603 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
1604 		break;
1605 	case ixgbe_mac_X540:
1606 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
1607 		break;
1608 	default:
1609 		break;
1610 	}
1611 
1612 	/* determine how many queues per pool based on VMDq mask */
1613 	queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
1614 
1615 	/* write value for all Tx queues belonging to VF */
1616 	for (queue = 0; queue < queues_per_pool; queue++) {
1617 		unsigned int reg_idx = (vf * queues_per_pool) + queue;
1618 
1619 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
1620 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1621 	}
1622 }
1623 
1624 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1625 {
1626 	int i;
1627 
1628 	/* VF Tx rate limit was not set */
1629 	if (!adapter->vf_rate_link_speed)
1630 		return;
1631 
1632 	if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
1633 		adapter->vf_rate_link_speed = 0;
1634 		dev_info(&adapter->pdev->dev,
1635 			 "Link speed has been changed. VF Transmit rate is disabled\n");
1636 	}
1637 
1638 	for (i = 0; i < adapter->num_vfs; i++) {
1639 		if (!adapter->vf_rate_link_speed)
1640 			adapter->vfinfo[i].tx_rate = 0;
1641 
1642 		ixgbe_set_vf_rate_limit(adapter, i);
1643 	}
1644 }
1645 
1646 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
1647 			int max_tx_rate)
1648 {
1649 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1650 	int link_speed;
1651 
1652 	/* verify VF is active */
1653 	if (vf >= adapter->num_vfs)
1654 		return -EINVAL;
1655 
1656 	/* verify link is up */
1657 	if (!adapter->link_up)
1658 		return -EINVAL;
1659 
1660 	/* verify we are linked at 10Gbps */
1661 	link_speed = ixgbe_link_mbps(adapter);
1662 	if (link_speed != 10000)
1663 		return -EINVAL;
1664 
1665 	if (min_tx_rate)
1666 		return -EINVAL;
1667 
1668 	/* rate limit cannot be less than 10Mbs or greater than link speed */
1669 	if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
1670 		return -EINVAL;
1671 
1672 	/* store values */
1673 	adapter->vf_rate_link_speed = link_speed;
1674 	adapter->vfinfo[vf].tx_rate = max_tx_rate;
1675 
1676 	/* update hardware configuration */
1677 	ixgbe_set_vf_rate_limit(adapter, vf);
1678 
1679 	return 0;
1680 }
1681 
1682 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
1683 {
1684 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1685 	struct ixgbe_hw *hw = &adapter->hw;
1686 
1687 	if (vf >= adapter->num_vfs)
1688 		return -EINVAL;
1689 
1690 	adapter->vfinfo[vf].spoofchk_enabled = setting;
1691 
1692 	/* configure MAC spoofing */
1693 	hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
1694 
1695 	/* configure VLAN spoofing */
1696 	hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
1697 
1698 	/* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
1699 	 * calling set_ethertype_anti_spoofing for each VF in loop below
1700 	 */
1701 	if (hw->mac.ops.set_ethertype_anti_spoofing) {
1702 		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
1703 				(IXGBE_ETQF_FILTER_EN    |
1704 				 IXGBE_ETQF_TX_ANTISPOOF |
1705 				 ETH_P_LLDP));
1706 
1707 		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
1708 				(IXGBE_ETQF_FILTER_EN |
1709 				 IXGBE_ETQF_TX_ANTISPOOF |
1710 				 ETH_P_PAUSE));
1711 
1712 		hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
1713 	}
1714 
1715 	return 0;
1716 }
1717 
1718 /**
1719  * ixgbe_set_vf_link_state - Set link state
1720  * @adapter: Pointer to adapter struct
1721  * @vf: VF identifier
1722  * @state: required link state
1723  *
1724  * Set a link force state on/off a single vf
1725  **/
1726 void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
1727 {
1728 	adapter->vfinfo[vf].link_state = state;
1729 
1730 	switch (state) {
1731 	case IFLA_VF_LINK_STATE_AUTO:
1732 		if (test_bit(__IXGBE_DOWN, &adapter->state))
1733 			adapter->vfinfo[vf].link_enable = false;
1734 		else
1735 			adapter->vfinfo[vf].link_enable = true;
1736 		break;
1737 	case IFLA_VF_LINK_STATE_ENABLE:
1738 		adapter->vfinfo[vf].link_enable = true;
1739 		break;
1740 	case IFLA_VF_LINK_STATE_DISABLE:
1741 		adapter->vfinfo[vf].link_enable = false;
1742 		break;
1743 	}
1744 
1745 	ixgbe_set_vf_rx_tx(adapter, vf);
1746 
1747 	/* restart the VF */
1748 	adapter->vfinfo[vf].clear_to_send = false;
1749 	ixgbe_ping_vf(adapter, vf);
1750 }
1751 
1752 /**
1753  * ixgbe_ndo_set_vf_link_state - Set link state
1754  * @netdev: network interface device structure
1755  * @vf: VF identifier
1756  * @state: required link state
1757  *
1758  * Set the link state of a specified VF, regardless of physical link state
1759  **/
1760 int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
1761 {
1762 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1763 	int ret = 0;
1764 
1765 	if (vf < 0 || vf >= adapter->num_vfs) {
1766 		dev_err(&adapter->pdev->dev,
1767 			"NDO set VF link - invalid VF identifier %d\n", vf);
1768 		return -EINVAL;
1769 	}
1770 
1771 	switch (state) {
1772 	case IFLA_VF_LINK_STATE_ENABLE:
1773 		dev_info(&adapter->pdev->dev,
1774 			 "NDO set VF %d link state %d - not supported\n",
1775 			vf, state);
1776 		break;
1777 	case IFLA_VF_LINK_STATE_DISABLE:
1778 		dev_info(&adapter->pdev->dev,
1779 			 "NDO set VF %d link state disable\n", vf);
1780 		ixgbe_set_vf_link_state(adapter, vf, state);
1781 		break;
1782 	case IFLA_VF_LINK_STATE_AUTO:
1783 		dev_info(&adapter->pdev->dev,
1784 			 "NDO set VF %d link state auto\n", vf);
1785 		ixgbe_set_vf_link_state(adapter, vf, state);
1786 		break;
1787 	default:
1788 		dev_err(&adapter->pdev->dev,
1789 			"NDO set VF %d - invalid link state %d\n", vf, state);
1790 		ret = -EINVAL;
1791 	}
1792 
1793 	return ret;
1794 }
1795 
1796 int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
1797 				  bool setting)
1798 {
1799 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1800 
1801 	/* This operation is currently supported only for 82599 and x540
1802 	 * devices.
1803 	 */
1804 	if (adapter->hw.mac.type < ixgbe_mac_82599EB ||
1805 	    adapter->hw.mac.type >= ixgbe_mac_X550)
1806 		return -EOPNOTSUPP;
1807 
1808 	if (vf >= adapter->num_vfs)
1809 		return -EINVAL;
1810 
1811 	adapter->vfinfo[vf].rss_query_enabled = setting;
1812 
1813 	return 0;
1814 }
1815 
1816 int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
1817 {
1818 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1819 
1820 	if (vf >= adapter->num_vfs)
1821 		return -EINVAL;
1822 
1823 	/* nothing to do */
1824 	if (adapter->vfinfo[vf].trusted == setting)
1825 		return 0;
1826 
1827 	adapter->vfinfo[vf].trusted = setting;
1828 
1829 	/* reset VF to reconfigure features */
1830 	adapter->vfinfo[vf].clear_to_send = false;
1831 	ixgbe_ping_vf(adapter, vf);
1832 
1833 	e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
1834 
1835 	return 0;
1836 }
1837 
1838 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1839 			    int vf, struct ifla_vf_info *ivi)
1840 {
1841 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1842 	if (vf >= adapter->num_vfs)
1843 		return -EINVAL;
1844 	ivi->vf = vf;
1845 	memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
1846 	ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
1847 	ivi->min_tx_rate = 0;
1848 	ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1849 	ivi->qos = adapter->vfinfo[vf].pf_qos;
1850 	ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
1851 	ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
1852 	ivi->trusted = adapter->vfinfo[vf].trusted;
1853 	return 0;
1854 }
1855