xref: /linux/drivers/net/ethernet/intel/i40e/i40e_main.c (revision 34f7c6e7d4396090692a09789db231e12cb4762b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
6 #include <linux/pci.h>
7 #include <linux/bpf.h>
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 
11 /* Local includes */
12 #include "i40e.h"
13 #include "i40e_diag.h"
14 #include "i40e_xsk.h"
15 #include <net/udp_tunnel.h>
16 #include <net/xdp_sock_drv.h>
17 /* All i40e tracepoints are defined by the include below, which
18  * must be included exactly once across the whole kernel with
19  * CREATE_TRACE_POINTS defined
20  */
21 #define CREATE_TRACE_POINTS
22 #include "i40e_trace.h"
23 
24 const char i40e_driver_name[] = "i40e";
25 static const char i40e_driver_string[] =
26 			"Intel(R) Ethernet Connection XL710 Network Driver";
27 
28 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
29 
30 /* a bit of forward declarations */
31 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
32 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
33 static int i40e_add_vsi(struct i40e_vsi *vsi);
34 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
35 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
36 static int i40e_setup_misc_vector(struct i40e_pf *pf);
37 static void i40e_determine_queue_usage(struct i40e_pf *pf);
38 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
39 static void i40e_prep_for_reset(struct i40e_pf *pf);
40 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
41 				   bool lock_acquired);
42 static int i40e_reset(struct i40e_pf *pf);
43 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
44 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
45 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
46 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
47 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
48 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
49 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
50 static int i40e_get_capabilities(struct i40e_pf *pf,
51 				 enum i40e_admin_queue_opc list_type);
52 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
53 
54 /* i40e_pci_tbl - PCI Device ID Table
55  *
56  * Last entry must be all 0s
57  *
58  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
59  *   Class, Class Mask, private data (not used) }
60  */
61 static const struct pci_device_id i40e_pci_tbl[] = {
62 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
63 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
64 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
65 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
66 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
67 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
68 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
69 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
70 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
71 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
72 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
73 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
74 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
75 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
76 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
77 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
78 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
79 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
80 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
81 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
82 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
83 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
84 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
85 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
86 	/* required last entry */
87 	{0, }
88 };
89 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
90 
91 #define I40E_MAX_VF_COUNT 128
92 static int debug = -1;
93 module_param(debug, uint, 0);
94 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
95 
96 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
97 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
98 MODULE_LICENSE("GPL v2");
99 
100 static struct workqueue_struct *i40e_wq;
101 
102 static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
103 				  struct net_device *netdev, int delta)
104 {
105 	struct netdev_hw_addr *ha;
106 
107 	if (!f || !netdev)
108 		return;
109 
110 	netdev_for_each_mc_addr(ha, netdev) {
111 		if (ether_addr_equal(ha->addr, f->macaddr)) {
112 			ha->refcount += delta;
113 			if (ha->refcount <= 0)
114 				ha->refcount = 1;
115 			break;
116 		}
117 	}
118 }
119 
120 /**
121  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
122  * @hw:   pointer to the HW structure
123  * @mem:  ptr to mem struct to fill out
124  * @size: size of memory requested
125  * @alignment: what to align the allocation to
126  **/
127 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
128 			    u64 size, u32 alignment)
129 {
130 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
131 
132 	mem->size = ALIGN(size, alignment);
133 	mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
134 				     GFP_KERNEL);
135 	if (!mem->va)
136 		return -ENOMEM;
137 
138 	return 0;
139 }
140 
141 /**
142  * i40e_free_dma_mem_d - OS specific memory free for shared code
143  * @hw:   pointer to the HW structure
144  * @mem:  ptr to mem struct to free
145  **/
146 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
147 {
148 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
149 
150 	dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
151 	mem->va = NULL;
152 	mem->pa = 0;
153 	mem->size = 0;
154 
155 	return 0;
156 }
157 
158 /**
159  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
160  * @hw:   pointer to the HW structure
161  * @mem:  ptr to mem struct to fill out
162  * @size: size of memory requested
163  **/
164 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
165 			     u32 size)
166 {
167 	mem->size = size;
168 	mem->va = kzalloc(size, GFP_KERNEL);
169 
170 	if (!mem->va)
171 		return -ENOMEM;
172 
173 	return 0;
174 }
175 
176 /**
177  * i40e_free_virt_mem_d - OS specific memory free for shared code
178  * @hw:   pointer to the HW structure
179  * @mem:  ptr to mem struct to free
180  **/
181 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
182 {
183 	/* it's ok to kfree a NULL pointer */
184 	kfree(mem->va);
185 	mem->va = NULL;
186 	mem->size = 0;
187 
188 	return 0;
189 }
190 
191 /**
192  * i40e_get_lump - find a lump of free generic resource
193  * @pf: board private structure
194  * @pile: the pile of resource to search
195  * @needed: the number of items needed
196  * @id: an owner id to stick on the items assigned
197  *
198  * Returns the base item index of the lump, or negative for error
199  **/
200 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
201 			 u16 needed, u16 id)
202 {
203 	int ret = -ENOMEM;
204 	int i, j;
205 
206 	if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
207 		dev_info(&pf->pdev->dev,
208 			 "param err: pile=%s needed=%d id=0x%04x\n",
209 			 pile ? "<valid>" : "<null>", needed, id);
210 		return -EINVAL;
211 	}
212 
213 	/* Allocate last queue in the pile for FDIR VSI queue
214 	 * so it doesn't fragment the qp_pile
215 	 */
216 	if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
217 		if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
218 			dev_err(&pf->pdev->dev,
219 				"Cannot allocate queue %d for I40E_VSI_FDIR\n",
220 				pile->num_entries - 1);
221 			return -ENOMEM;
222 		}
223 		pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
224 		return pile->num_entries - 1;
225 	}
226 
227 	i = 0;
228 	while (i < pile->num_entries) {
229 		/* skip already allocated entries */
230 		if (pile->list[i] & I40E_PILE_VALID_BIT) {
231 			i++;
232 			continue;
233 		}
234 
235 		/* do we have enough in this lump? */
236 		for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
237 			if (pile->list[i+j] & I40E_PILE_VALID_BIT)
238 				break;
239 		}
240 
241 		if (j == needed) {
242 			/* there was enough, so assign it to the requestor */
243 			for (j = 0; j < needed; j++)
244 				pile->list[i+j] = id | I40E_PILE_VALID_BIT;
245 			ret = i;
246 			break;
247 		}
248 
249 		/* not enough, so skip over it and continue looking */
250 		i += j;
251 	}
252 
253 	return ret;
254 }
255 
256 /**
257  * i40e_put_lump - return a lump of generic resource
258  * @pile: the pile of resource to search
259  * @index: the base item index
260  * @id: the owner id of the items assigned
261  *
262  * Returns the count of items in the lump
263  **/
264 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
265 {
266 	int valid_id = (id | I40E_PILE_VALID_BIT);
267 	int count = 0;
268 	u16 i;
269 
270 	if (!pile || index >= pile->num_entries)
271 		return -EINVAL;
272 
273 	for (i = index;
274 	     i < pile->num_entries && pile->list[i] == valid_id;
275 	     i++) {
276 		pile->list[i] = 0;
277 		count++;
278 	}
279 
280 
281 	return count;
282 }
283 
284 /**
285  * i40e_find_vsi_from_id - searches for the vsi with the given id
286  * @pf: the pf structure to search for the vsi
287  * @id: id of the vsi it is searching for
288  **/
289 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
290 {
291 	int i;
292 
293 	for (i = 0; i < pf->num_alloc_vsi; i++)
294 		if (pf->vsi[i] && (pf->vsi[i]->id == id))
295 			return pf->vsi[i];
296 
297 	return NULL;
298 }
299 
300 /**
301  * i40e_service_event_schedule - Schedule the service task to wake up
302  * @pf: board private structure
303  *
304  * If not already scheduled, this puts the task into the work queue
305  **/
306 void i40e_service_event_schedule(struct i40e_pf *pf)
307 {
308 	if ((!test_bit(__I40E_DOWN, pf->state) &&
309 	     !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
310 	      test_bit(__I40E_RECOVERY_MODE, pf->state))
311 		queue_work(i40e_wq, &pf->service_task);
312 }
313 
314 /**
315  * i40e_tx_timeout - Respond to a Tx Hang
316  * @netdev: network interface device structure
317  * @txqueue: queue number timing out
318  *
319  * If any port has noticed a Tx timeout, it is likely that the whole
320  * device is munged, not just the one netdev port, so go for the full
321  * reset.
322  **/
323 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
324 {
325 	struct i40e_netdev_priv *np = netdev_priv(netdev);
326 	struct i40e_vsi *vsi = np->vsi;
327 	struct i40e_pf *pf = vsi->back;
328 	struct i40e_ring *tx_ring = NULL;
329 	unsigned int i;
330 	u32 head, val;
331 
332 	pf->tx_timeout_count++;
333 
334 	/* with txqueue index, find the tx_ring struct */
335 	for (i = 0; i < vsi->num_queue_pairs; i++) {
336 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
337 			if (txqueue ==
338 			    vsi->tx_rings[i]->queue_index) {
339 				tx_ring = vsi->tx_rings[i];
340 				break;
341 			}
342 		}
343 	}
344 
345 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
346 		pf->tx_timeout_recovery_level = 1;  /* reset after some time */
347 	else if (time_before(jiffies,
348 		      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
349 		return;   /* don't do any new action before the next timeout */
350 
351 	/* don't kick off another recovery if one is already pending */
352 	if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
353 		return;
354 
355 	if (tx_ring) {
356 		head = i40e_get_head(tx_ring);
357 		/* Read interrupt register */
358 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
359 			val = rd32(&pf->hw,
360 			     I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
361 						tx_ring->vsi->base_vector - 1));
362 		else
363 			val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
364 
365 		netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
366 			    vsi->seid, txqueue, tx_ring->next_to_clean,
367 			    head, tx_ring->next_to_use,
368 			    readl(tx_ring->tail), val);
369 	}
370 
371 	pf->tx_timeout_last_recovery = jiffies;
372 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
373 		    pf->tx_timeout_recovery_level, txqueue);
374 
375 	switch (pf->tx_timeout_recovery_level) {
376 	case 1:
377 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
378 		break;
379 	case 2:
380 		set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
381 		break;
382 	case 3:
383 		set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
384 		break;
385 	default:
386 		netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
387 		break;
388 	}
389 
390 	i40e_service_event_schedule(pf);
391 	pf->tx_timeout_recovery_level++;
392 }
393 
394 /**
395  * i40e_get_vsi_stats_struct - Get System Network Statistics
396  * @vsi: the VSI we care about
397  *
398  * Returns the address of the device statistics structure.
399  * The statistics are actually updated from the service task.
400  **/
401 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
402 {
403 	return &vsi->net_stats;
404 }
405 
406 /**
407  * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
408  * @ring: Tx ring to get statistics from
409  * @stats: statistics entry to be updated
410  **/
411 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
412 					    struct rtnl_link_stats64 *stats)
413 {
414 	u64 bytes, packets;
415 	unsigned int start;
416 
417 	do {
418 		start = u64_stats_fetch_begin_irq(&ring->syncp);
419 		packets = ring->stats.packets;
420 		bytes   = ring->stats.bytes;
421 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
422 
423 	stats->tx_packets += packets;
424 	stats->tx_bytes   += bytes;
425 }
426 
427 /**
428  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
429  * @netdev: network interface device structure
430  * @stats: data structure to store statistics
431  *
432  * Returns the address of the device statistics structure.
433  * The statistics are actually updated from the service task.
434  **/
435 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
436 				  struct rtnl_link_stats64 *stats)
437 {
438 	struct i40e_netdev_priv *np = netdev_priv(netdev);
439 	struct i40e_vsi *vsi = np->vsi;
440 	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
441 	struct i40e_ring *ring;
442 	int i;
443 
444 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
445 		return;
446 
447 	if (!vsi->tx_rings)
448 		return;
449 
450 	rcu_read_lock();
451 	for (i = 0; i < vsi->num_queue_pairs; i++) {
452 		u64 bytes, packets;
453 		unsigned int start;
454 
455 		ring = READ_ONCE(vsi->tx_rings[i]);
456 		if (!ring)
457 			continue;
458 		i40e_get_netdev_stats_struct_tx(ring, stats);
459 
460 		if (i40e_enabled_xdp_vsi(vsi)) {
461 			ring = READ_ONCE(vsi->xdp_rings[i]);
462 			if (!ring)
463 				continue;
464 			i40e_get_netdev_stats_struct_tx(ring, stats);
465 		}
466 
467 		ring = READ_ONCE(vsi->rx_rings[i]);
468 		if (!ring)
469 			continue;
470 		do {
471 			start   = u64_stats_fetch_begin_irq(&ring->syncp);
472 			packets = ring->stats.packets;
473 			bytes   = ring->stats.bytes;
474 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
475 
476 		stats->rx_packets += packets;
477 		stats->rx_bytes   += bytes;
478 
479 	}
480 	rcu_read_unlock();
481 
482 	/* following stats updated by i40e_watchdog_subtask() */
483 	stats->multicast	= vsi_stats->multicast;
484 	stats->tx_errors	= vsi_stats->tx_errors;
485 	stats->tx_dropped	= vsi_stats->tx_dropped;
486 	stats->rx_errors	= vsi_stats->rx_errors;
487 	stats->rx_dropped	= vsi_stats->rx_dropped;
488 	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
489 	stats->rx_length_errors	= vsi_stats->rx_length_errors;
490 }
491 
492 /**
493  * i40e_vsi_reset_stats - Resets all stats of the given vsi
494  * @vsi: the VSI to have its stats reset
495  **/
496 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
497 {
498 	struct rtnl_link_stats64 *ns;
499 	int i;
500 
501 	if (!vsi)
502 		return;
503 
504 	ns = i40e_get_vsi_stats_struct(vsi);
505 	memset(ns, 0, sizeof(*ns));
506 	memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
507 	memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
508 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
509 	if (vsi->rx_rings && vsi->rx_rings[0]) {
510 		for (i = 0; i < vsi->num_queue_pairs; i++) {
511 			memset(&vsi->rx_rings[i]->stats, 0,
512 			       sizeof(vsi->rx_rings[i]->stats));
513 			memset(&vsi->rx_rings[i]->rx_stats, 0,
514 			       sizeof(vsi->rx_rings[i]->rx_stats));
515 			memset(&vsi->tx_rings[i]->stats, 0,
516 			       sizeof(vsi->tx_rings[i]->stats));
517 			memset(&vsi->tx_rings[i]->tx_stats, 0,
518 			       sizeof(vsi->tx_rings[i]->tx_stats));
519 		}
520 	}
521 	vsi->stat_offsets_loaded = false;
522 }
523 
524 /**
525  * i40e_pf_reset_stats - Reset all of the stats for the given PF
526  * @pf: the PF to be reset
527  **/
528 void i40e_pf_reset_stats(struct i40e_pf *pf)
529 {
530 	int i;
531 
532 	memset(&pf->stats, 0, sizeof(pf->stats));
533 	memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
534 	pf->stat_offsets_loaded = false;
535 
536 	for (i = 0; i < I40E_MAX_VEB; i++) {
537 		if (pf->veb[i]) {
538 			memset(&pf->veb[i]->stats, 0,
539 			       sizeof(pf->veb[i]->stats));
540 			memset(&pf->veb[i]->stats_offsets, 0,
541 			       sizeof(pf->veb[i]->stats_offsets));
542 			memset(&pf->veb[i]->tc_stats, 0,
543 			       sizeof(pf->veb[i]->tc_stats));
544 			memset(&pf->veb[i]->tc_stats_offsets, 0,
545 			       sizeof(pf->veb[i]->tc_stats_offsets));
546 			pf->veb[i]->stat_offsets_loaded = false;
547 		}
548 	}
549 	pf->hw_csum_rx_error = 0;
550 }
551 
552 /**
553  * i40e_stat_update48 - read and update a 48 bit stat from the chip
554  * @hw: ptr to the hardware info
555  * @hireg: the high 32 bit reg to read
556  * @loreg: the low 32 bit reg to read
557  * @offset_loaded: has the initial offset been loaded yet
558  * @offset: ptr to current offset value
559  * @stat: ptr to the stat
560  *
561  * Since the device stats are not reset at PFReset, they likely will not
562  * be zeroed when the driver starts.  We'll save the first values read
563  * and use them as offsets to be subtracted from the raw values in order
564  * to report stats that count from zero.  In the process, we also manage
565  * the potential roll-over.
566  **/
567 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
568 			       bool offset_loaded, u64 *offset, u64 *stat)
569 {
570 	u64 new_data;
571 
572 	if (hw->device_id == I40E_DEV_ID_QEMU) {
573 		new_data = rd32(hw, loreg);
574 		new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
575 	} else {
576 		new_data = rd64(hw, loreg);
577 	}
578 	if (!offset_loaded)
579 		*offset = new_data;
580 	if (likely(new_data >= *offset))
581 		*stat = new_data - *offset;
582 	else
583 		*stat = (new_data + BIT_ULL(48)) - *offset;
584 	*stat &= 0xFFFFFFFFFFFFULL;
585 }
586 
587 /**
588  * i40e_stat_update32 - read and update a 32 bit stat from the chip
589  * @hw: ptr to the hardware info
590  * @reg: the hw reg to read
591  * @offset_loaded: has the initial offset been loaded yet
592  * @offset: ptr to current offset value
593  * @stat: ptr to the stat
594  **/
595 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
596 			       bool offset_loaded, u64 *offset, u64 *stat)
597 {
598 	u32 new_data;
599 
600 	new_data = rd32(hw, reg);
601 	if (!offset_loaded)
602 		*offset = new_data;
603 	if (likely(new_data >= *offset))
604 		*stat = (u32)(new_data - *offset);
605 	else
606 		*stat = (u32)((new_data + BIT_ULL(32)) - *offset);
607 }
608 
609 /**
610  * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
611  * @hw: ptr to the hardware info
612  * @reg: the hw reg to read and clear
613  * @stat: ptr to the stat
614  **/
615 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
616 {
617 	u32 new_data = rd32(hw, reg);
618 
619 	wr32(hw, reg, 1); /* must write a nonzero value to clear register */
620 	*stat += new_data;
621 }
622 
623 /**
624  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
625  * @vsi: the VSI to be updated
626  **/
627 void i40e_update_eth_stats(struct i40e_vsi *vsi)
628 {
629 	int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
630 	struct i40e_pf *pf = vsi->back;
631 	struct i40e_hw *hw = &pf->hw;
632 	struct i40e_eth_stats *oes;
633 	struct i40e_eth_stats *es;     /* device's eth stats */
634 
635 	es = &vsi->eth_stats;
636 	oes = &vsi->eth_stats_offsets;
637 
638 	/* Gather up the stats that the hw collects */
639 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
640 			   vsi->stat_offsets_loaded,
641 			   &oes->tx_errors, &es->tx_errors);
642 	i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
643 			   vsi->stat_offsets_loaded,
644 			   &oes->rx_discards, &es->rx_discards);
645 	i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
646 			   vsi->stat_offsets_loaded,
647 			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
648 
649 	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
650 			   I40E_GLV_GORCL(stat_idx),
651 			   vsi->stat_offsets_loaded,
652 			   &oes->rx_bytes, &es->rx_bytes);
653 	i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
654 			   I40E_GLV_UPRCL(stat_idx),
655 			   vsi->stat_offsets_loaded,
656 			   &oes->rx_unicast, &es->rx_unicast);
657 	i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
658 			   I40E_GLV_MPRCL(stat_idx),
659 			   vsi->stat_offsets_loaded,
660 			   &oes->rx_multicast, &es->rx_multicast);
661 	i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
662 			   I40E_GLV_BPRCL(stat_idx),
663 			   vsi->stat_offsets_loaded,
664 			   &oes->rx_broadcast, &es->rx_broadcast);
665 
666 	i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
667 			   I40E_GLV_GOTCL(stat_idx),
668 			   vsi->stat_offsets_loaded,
669 			   &oes->tx_bytes, &es->tx_bytes);
670 	i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
671 			   I40E_GLV_UPTCL(stat_idx),
672 			   vsi->stat_offsets_loaded,
673 			   &oes->tx_unicast, &es->tx_unicast);
674 	i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
675 			   I40E_GLV_MPTCL(stat_idx),
676 			   vsi->stat_offsets_loaded,
677 			   &oes->tx_multicast, &es->tx_multicast);
678 	i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
679 			   I40E_GLV_BPTCL(stat_idx),
680 			   vsi->stat_offsets_loaded,
681 			   &oes->tx_broadcast, &es->tx_broadcast);
682 	vsi->stat_offsets_loaded = true;
683 }
684 
685 /**
686  * i40e_update_veb_stats - Update Switch component statistics
687  * @veb: the VEB being updated
688  **/
689 void i40e_update_veb_stats(struct i40e_veb *veb)
690 {
691 	struct i40e_pf *pf = veb->pf;
692 	struct i40e_hw *hw = &pf->hw;
693 	struct i40e_eth_stats *oes;
694 	struct i40e_eth_stats *es;     /* device's eth stats */
695 	struct i40e_veb_tc_stats *veb_oes;
696 	struct i40e_veb_tc_stats *veb_es;
697 	int i, idx = 0;
698 
699 	idx = veb->stats_idx;
700 	es = &veb->stats;
701 	oes = &veb->stats_offsets;
702 	veb_es = &veb->tc_stats;
703 	veb_oes = &veb->tc_stats_offsets;
704 
705 	/* Gather up the stats that the hw collects */
706 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
707 			   veb->stat_offsets_loaded,
708 			   &oes->tx_discards, &es->tx_discards);
709 	if (hw->revision_id > 0)
710 		i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
711 				   veb->stat_offsets_loaded,
712 				   &oes->rx_unknown_protocol,
713 				   &es->rx_unknown_protocol);
714 	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
715 			   veb->stat_offsets_loaded,
716 			   &oes->rx_bytes, &es->rx_bytes);
717 	i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
718 			   veb->stat_offsets_loaded,
719 			   &oes->rx_unicast, &es->rx_unicast);
720 	i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
721 			   veb->stat_offsets_loaded,
722 			   &oes->rx_multicast, &es->rx_multicast);
723 	i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
724 			   veb->stat_offsets_loaded,
725 			   &oes->rx_broadcast, &es->rx_broadcast);
726 
727 	i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
728 			   veb->stat_offsets_loaded,
729 			   &oes->tx_bytes, &es->tx_bytes);
730 	i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
731 			   veb->stat_offsets_loaded,
732 			   &oes->tx_unicast, &es->tx_unicast);
733 	i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
734 			   veb->stat_offsets_loaded,
735 			   &oes->tx_multicast, &es->tx_multicast);
736 	i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
737 			   veb->stat_offsets_loaded,
738 			   &oes->tx_broadcast, &es->tx_broadcast);
739 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
740 		i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
741 				   I40E_GLVEBTC_RPCL(i, idx),
742 				   veb->stat_offsets_loaded,
743 				   &veb_oes->tc_rx_packets[i],
744 				   &veb_es->tc_rx_packets[i]);
745 		i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
746 				   I40E_GLVEBTC_RBCL(i, idx),
747 				   veb->stat_offsets_loaded,
748 				   &veb_oes->tc_rx_bytes[i],
749 				   &veb_es->tc_rx_bytes[i]);
750 		i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
751 				   I40E_GLVEBTC_TPCL(i, idx),
752 				   veb->stat_offsets_loaded,
753 				   &veb_oes->tc_tx_packets[i],
754 				   &veb_es->tc_tx_packets[i]);
755 		i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
756 				   I40E_GLVEBTC_TBCL(i, idx),
757 				   veb->stat_offsets_loaded,
758 				   &veb_oes->tc_tx_bytes[i],
759 				   &veb_es->tc_tx_bytes[i]);
760 	}
761 	veb->stat_offsets_loaded = true;
762 }
763 
764 /**
765  * i40e_update_vsi_stats - Update the vsi statistics counters.
766  * @vsi: the VSI to be updated
767  *
768  * There are a few instances where we store the same stat in a
769  * couple of different structs.  This is partly because we have
770  * the netdev stats that need to be filled out, which is slightly
771  * different from the "eth_stats" defined by the chip and used in
772  * VF communications.  We sort it out here.
773  **/
774 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
775 {
776 	u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
777 	struct i40e_pf *pf = vsi->back;
778 	struct rtnl_link_stats64 *ons;
779 	struct rtnl_link_stats64 *ns;   /* netdev stats */
780 	struct i40e_eth_stats *oes;
781 	struct i40e_eth_stats *es;     /* device's eth stats */
782 	u64 tx_restart, tx_busy;
783 	struct i40e_ring *p;
784 	u64 bytes, packets;
785 	unsigned int start;
786 	u64 tx_linearize;
787 	u64 tx_force_wb;
788 	u64 rx_p, rx_b;
789 	u64 tx_p, tx_b;
790 	u16 q;
791 
792 	if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
793 	    test_bit(__I40E_CONFIG_BUSY, pf->state))
794 		return;
795 
796 	ns = i40e_get_vsi_stats_struct(vsi);
797 	ons = &vsi->net_stats_offsets;
798 	es = &vsi->eth_stats;
799 	oes = &vsi->eth_stats_offsets;
800 
801 	/* Gather up the netdev and vsi stats that the driver collects
802 	 * on the fly during packet processing
803 	 */
804 	rx_b = rx_p = 0;
805 	tx_b = tx_p = 0;
806 	tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
807 	rx_page = 0;
808 	rx_buf = 0;
809 	rx_reuse = 0;
810 	rx_alloc = 0;
811 	rx_waive = 0;
812 	rx_busy = 0;
813 	rcu_read_lock();
814 	for (q = 0; q < vsi->num_queue_pairs; q++) {
815 		/* locate Tx ring */
816 		p = READ_ONCE(vsi->tx_rings[q]);
817 		if (!p)
818 			continue;
819 
820 		do {
821 			start = u64_stats_fetch_begin_irq(&p->syncp);
822 			packets = p->stats.packets;
823 			bytes = p->stats.bytes;
824 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
825 		tx_b += bytes;
826 		tx_p += packets;
827 		tx_restart += p->tx_stats.restart_queue;
828 		tx_busy += p->tx_stats.tx_busy;
829 		tx_linearize += p->tx_stats.tx_linearize;
830 		tx_force_wb += p->tx_stats.tx_force_wb;
831 
832 		/* locate Rx ring */
833 		p = READ_ONCE(vsi->rx_rings[q]);
834 		if (!p)
835 			continue;
836 
837 		do {
838 			start = u64_stats_fetch_begin_irq(&p->syncp);
839 			packets = p->stats.packets;
840 			bytes = p->stats.bytes;
841 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
842 		rx_b += bytes;
843 		rx_p += packets;
844 		rx_buf += p->rx_stats.alloc_buff_failed;
845 		rx_page += p->rx_stats.alloc_page_failed;
846 		rx_reuse += p->rx_stats.page_reuse_count;
847 		rx_alloc += p->rx_stats.page_alloc_count;
848 		rx_waive += p->rx_stats.page_waive_count;
849 		rx_busy += p->rx_stats.page_busy_count;
850 
851 		if (i40e_enabled_xdp_vsi(vsi)) {
852 			/* locate XDP ring */
853 			p = READ_ONCE(vsi->xdp_rings[q]);
854 			if (!p)
855 				continue;
856 
857 			do {
858 				start = u64_stats_fetch_begin_irq(&p->syncp);
859 				packets = p->stats.packets;
860 				bytes = p->stats.bytes;
861 			} while (u64_stats_fetch_retry_irq(&p->syncp, start));
862 			tx_b += bytes;
863 			tx_p += packets;
864 			tx_restart += p->tx_stats.restart_queue;
865 			tx_busy += p->tx_stats.tx_busy;
866 			tx_linearize += p->tx_stats.tx_linearize;
867 			tx_force_wb += p->tx_stats.tx_force_wb;
868 		}
869 	}
870 	rcu_read_unlock();
871 	vsi->tx_restart = tx_restart;
872 	vsi->tx_busy = tx_busy;
873 	vsi->tx_linearize = tx_linearize;
874 	vsi->tx_force_wb = tx_force_wb;
875 	vsi->rx_page_failed = rx_page;
876 	vsi->rx_buf_failed = rx_buf;
877 	vsi->rx_page_reuse = rx_reuse;
878 	vsi->rx_page_alloc = rx_alloc;
879 	vsi->rx_page_waive = rx_waive;
880 	vsi->rx_page_busy = rx_busy;
881 
882 	ns->rx_packets = rx_p;
883 	ns->rx_bytes = rx_b;
884 	ns->tx_packets = tx_p;
885 	ns->tx_bytes = tx_b;
886 
887 	/* update netdev stats from eth stats */
888 	i40e_update_eth_stats(vsi);
889 	ons->tx_errors = oes->tx_errors;
890 	ns->tx_errors = es->tx_errors;
891 	ons->multicast = oes->rx_multicast;
892 	ns->multicast = es->rx_multicast;
893 	ons->rx_dropped = oes->rx_discards;
894 	ns->rx_dropped = es->rx_discards;
895 	ons->tx_dropped = oes->tx_discards;
896 	ns->tx_dropped = es->tx_discards;
897 
898 	/* pull in a couple PF stats if this is the main vsi */
899 	if (vsi == pf->vsi[pf->lan_vsi]) {
900 		ns->rx_crc_errors = pf->stats.crc_errors;
901 		ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
902 		ns->rx_length_errors = pf->stats.rx_length_errors;
903 	}
904 }
905 
906 /**
907  * i40e_update_pf_stats - Update the PF statistics counters.
908  * @pf: the PF to be updated
909  **/
910 static void i40e_update_pf_stats(struct i40e_pf *pf)
911 {
912 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
913 	struct i40e_hw_port_stats *nsd = &pf->stats;
914 	struct i40e_hw *hw = &pf->hw;
915 	u32 val;
916 	int i;
917 
918 	i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
919 			   I40E_GLPRT_GORCL(hw->port),
920 			   pf->stat_offsets_loaded,
921 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
922 	i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
923 			   I40E_GLPRT_GOTCL(hw->port),
924 			   pf->stat_offsets_loaded,
925 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
926 	i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
927 			   pf->stat_offsets_loaded,
928 			   &osd->eth.rx_discards,
929 			   &nsd->eth.rx_discards);
930 	i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
931 			   I40E_GLPRT_UPRCL(hw->port),
932 			   pf->stat_offsets_loaded,
933 			   &osd->eth.rx_unicast,
934 			   &nsd->eth.rx_unicast);
935 	i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
936 			   I40E_GLPRT_MPRCL(hw->port),
937 			   pf->stat_offsets_loaded,
938 			   &osd->eth.rx_multicast,
939 			   &nsd->eth.rx_multicast);
940 	i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
941 			   I40E_GLPRT_BPRCL(hw->port),
942 			   pf->stat_offsets_loaded,
943 			   &osd->eth.rx_broadcast,
944 			   &nsd->eth.rx_broadcast);
945 	i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
946 			   I40E_GLPRT_UPTCL(hw->port),
947 			   pf->stat_offsets_loaded,
948 			   &osd->eth.tx_unicast,
949 			   &nsd->eth.tx_unicast);
950 	i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
951 			   I40E_GLPRT_MPTCL(hw->port),
952 			   pf->stat_offsets_loaded,
953 			   &osd->eth.tx_multicast,
954 			   &nsd->eth.tx_multicast);
955 	i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
956 			   I40E_GLPRT_BPTCL(hw->port),
957 			   pf->stat_offsets_loaded,
958 			   &osd->eth.tx_broadcast,
959 			   &nsd->eth.tx_broadcast);
960 
961 	i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
962 			   pf->stat_offsets_loaded,
963 			   &osd->tx_dropped_link_down,
964 			   &nsd->tx_dropped_link_down);
965 
966 	i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
967 			   pf->stat_offsets_loaded,
968 			   &osd->crc_errors, &nsd->crc_errors);
969 
970 	i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
971 			   pf->stat_offsets_loaded,
972 			   &osd->illegal_bytes, &nsd->illegal_bytes);
973 
974 	i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
975 			   pf->stat_offsets_loaded,
976 			   &osd->mac_local_faults,
977 			   &nsd->mac_local_faults);
978 	i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
979 			   pf->stat_offsets_loaded,
980 			   &osd->mac_remote_faults,
981 			   &nsd->mac_remote_faults);
982 
983 	i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
984 			   pf->stat_offsets_loaded,
985 			   &osd->rx_length_errors,
986 			   &nsd->rx_length_errors);
987 
988 	i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
989 			   pf->stat_offsets_loaded,
990 			   &osd->link_xon_rx, &nsd->link_xon_rx);
991 	i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
992 			   pf->stat_offsets_loaded,
993 			   &osd->link_xon_tx, &nsd->link_xon_tx);
994 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
995 			   pf->stat_offsets_loaded,
996 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
997 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
998 			   pf->stat_offsets_loaded,
999 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
1000 
1001 	for (i = 0; i < 8; i++) {
1002 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1003 				   pf->stat_offsets_loaded,
1004 				   &osd->priority_xoff_rx[i],
1005 				   &nsd->priority_xoff_rx[i]);
1006 		i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1007 				   pf->stat_offsets_loaded,
1008 				   &osd->priority_xon_rx[i],
1009 				   &nsd->priority_xon_rx[i]);
1010 		i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1011 				   pf->stat_offsets_loaded,
1012 				   &osd->priority_xon_tx[i],
1013 				   &nsd->priority_xon_tx[i]);
1014 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1015 				   pf->stat_offsets_loaded,
1016 				   &osd->priority_xoff_tx[i],
1017 				   &nsd->priority_xoff_tx[i]);
1018 		i40e_stat_update32(hw,
1019 				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1020 				   pf->stat_offsets_loaded,
1021 				   &osd->priority_xon_2_xoff[i],
1022 				   &nsd->priority_xon_2_xoff[i]);
1023 	}
1024 
1025 	i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1026 			   I40E_GLPRT_PRC64L(hw->port),
1027 			   pf->stat_offsets_loaded,
1028 			   &osd->rx_size_64, &nsd->rx_size_64);
1029 	i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1030 			   I40E_GLPRT_PRC127L(hw->port),
1031 			   pf->stat_offsets_loaded,
1032 			   &osd->rx_size_127, &nsd->rx_size_127);
1033 	i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1034 			   I40E_GLPRT_PRC255L(hw->port),
1035 			   pf->stat_offsets_loaded,
1036 			   &osd->rx_size_255, &nsd->rx_size_255);
1037 	i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1038 			   I40E_GLPRT_PRC511L(hw->port),
1039 			   pf->stat_offsets_loaded,
1040 			   &osd->rx_size_511, &nsd->rx_size_511);
1041 	i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1042 			   I40E_GLPRT_PRC1023L(hw->port),
1043 			   pf->stat_offsets_loaded,
1044 			   &osd->rx_size_1023, &nsd->rx_size_1023);
1045 	i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1046 			   I40E_GLPRT_PRC1522L(hw->port),
1047 			   pf->stat_offsets_loaded,
1048 			   &osd->rx_size_1522, &nsd->rx_size_1522);
1049 	i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1050 			   I40E_GLPRT_PRC9522L(hw->port),
1051 			   pf->stat_offsets_loaded,
1052 			   &osd->rx_size_big, &nsd->rx_size_big);
1053 
1054 	i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1055 			   I40E_GLPRT_PTC64L(hw->port),
1056 			   pf->stat_offsets_loaded,
1057 			   &osd->tx_size_64, &nsd->tx_size_64);
1058 	i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1059 			   I40E_GLPRT_PTC127L(hw->port),
1060 			   pf->stat_offsets_loaded,
1061 			   &osd->tx_size_127, &nsd->tx_size_127);
1062 	i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1063 			   I40E_GLPRT_PTC255L(hw->port),
1064 			   pf->stat_offsets_loaded,
1065 			   &osd->tx_size_255, &nsd->tx_size_255);
1066 	i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1067 			   I40E_GLPRT_PTC511L(hw->port),
1068 			   pf->stat_offsets_loaded,
1069 			   &osd->tx_size_511, &nsd->tx_size_511);
1070 	i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1071 			   I40E_GLPRT_PTC1023L(hw->port),
1072 			   pf->stat_offsets_loaded,
1073 			   &osd->tx_size_1023, &nsd->tx_size_1023);
1074 	i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1075 			   I40E_GLPRT_PTC1522L(hw->port),
1076 			   pf->stat_offsets_loaded,
1077 			   &osd->tx_size_1522, &nsd->tx_size_1522);
1078 	i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1079 			   I40E_GLPRT_PTC9522L(hw->port),
1080 			   pf->stat_offsets_loaded,
1081 			   &osd->tx_size_big, &nsd->tx_size_big);
1082 
1083 	i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1084 			   pf->stat_offsets_loaded,
1085 			   &osd->rx_undersize, &nsd->rx_undersize);
1086 	i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1087 			   pf->stat_offsets_loaded,
1088 			   &osd->rx_fragments, &nsd->rx_fragments);
1089 	i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1090 			   pf->stat_offsets_loaded,
1091 			   &osd->rx_oversize, &nsd->rx_oversize);
1092 	i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1093 			   pf->stat_offsets_loaded,
1094 			   &osd->rx_jabber, &nsd->rx_jabber);
1095 
1096 	/* FDIR stats */
1097 	i40e_stat_update_and_clear32(hw,
1098 			I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1099 			&nsd->fd_atr_match);
1100 	i40e_stat_update_and_clear32(hw,
1101 			I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1102 			&nsd->fd_sb_match);
1103 	i40e_stat_update_and_clear32(hw,
1104 			I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1105 			&nsd->fd_atr_tunnel_match);
1106 
1107 	val = rd32(hw, I40E_PRTPM_EEE_STAT);
1108 	nsd->tx_lpi_status =
1109 		       (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1110 			I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1111 	nsd->rx_lpi_status =
1112 		       (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1113 			I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1114 	i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1115 			   pf->stat_offsets_loaded,
1116 			   &osd->tx_lpi_count, &nsd->tx_lpi_count);
1117 	i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1118 			   pf->stat_offsets_loaded,
1119 			   &osd->rx_lpi_count, &nsd->rx_lpi_count);
1120 
1121 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1122 	    !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1123 		nsd->fd_sb_status = true;
1124 	else
1125 		nsd->fd_sb_status = false;
1126 
1127 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1128 	    !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1129 		nsd->fd_atr_status = true;
1130 	else
1131 		nsd->fd_atr_status = false;
1132 
1133 	pf->stat_offsets_loaded = true;
1134 }
1135 
1136 /**
1137  * i40e_update_stats - Update the various statistics counters.
1138  * @vsi: the VSI to be updated
1139  *
1140  * Update the various stats for this VSI and its related entities.
1141  **/
1142 void i40e_update_stats(struct i40e_vsi *vsi)
1143 {
1144 	struct i40e_pf *pf = vsi->back;
1145 
1146 	if (vsi == pf->vsi[pf->lan_vsi])
1147 		i40e_update_pf_stats(pf);
1148 
1149 	i40e_update_vsi_stats(vsi);
1150 }
1151 
1152 /**
1153  * i40e_count_filters - counts VSI mac filters
1154  * @vsi: the VSI to be searched
1155  *
1156  * Returns count of mac filters
1157  **/
1158 int i40e_count_filters(struct i40e_vsi *vsi)
1159 {
1160 	struct i40e_mac_filter *f;
1161 	struct hlist_node *h;
1162 	int bkt;
1163 	int cnt = 0;
1164 
1165 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1166 		++cnt;
1167 
1168 	return cnt;
1169 }
1170 
1171 /**
1172  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1173  * @vsi: the VSI to be searched
1174  * @macaddr: the MAC address
1175  * @vlan: the vlan
1176  *
1177  * Returns ptr to the filter object or NULL
1178  **/
1179 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1180 						const u8 *macaddr, s16 vlan)
1181 {
1182 	struct i40e_mac_filter *f;
1183 	u64 key;
1184 
1185 	if (!vsi || !macaddr)
1186 		return NULL;
1187 
1188 	key = i40e_addr_to_hkey(macaddr);
1189 	hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1190 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1191 		    (vlan == f->vlan))
1192 			return f;
1193 	}
1194 	return NULL;
1195 }
1196 
1197 /**
1198  * i40e_find_mac - Find a mac addr in the macvlan filters list
1199  * @vsi: the VSI to be searched
1200  * @macaddr: the MAC address we are searching for
1201  *
1202  * Returns the first filter with the provided MAC address or NULL if
1203  * MAC address was not found
1204  **/
1205 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1206 {
1207 	struct i40e_mac_filter *f;
1208 	u64 key;
1209 
1210 	if (!vsi || !macaddr)
1211 		return NULL;
1212 
1213 	key = i40e_addr_to_hkey(macaddr);
1214 	hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1215 		if ((ether_addr_equal(macaddr, f->macaddr)))
1216 			return f;
1217 	}
1218 	return NULL;
1219 }
1220 
1221 /**
1222  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1223  * @vsi: the VSI to be searched
1224  *
1225  * Returns true if VSI is in vlan mode or false otherwise
1226  **/
1227 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1228 {
1229 	/* If we have a PVID, always operate in VLAN mode */
1230 	if (vsi->info.pvid)
1231 		return true;
1232 
1233 	/* We need to operate in VLAN mode whenever we have any filters with
1234 	 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1235 	 * time, incurring search cost repeatedly. However, we can notice two
1236 	 * things:
1237 	 *
1238 	 * 1) the only place where we can gain a VLAN filter is in
1239 	 *    i40e_add_filter.
1240 	 *
1241 	 * 2) the only place where filters are actually removed is in
1242 	 *    i40e_sync_filters_subtask.
1243 	 *
1244 	 * Thus, we can simply use a boolean value, has_vlan_filters which we
1245 	 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1246 	 * we have to perform the full search after deleting filters in
1247 	 * i40e_sync_filters_subtask, but we already have to search
1248 	 * filters here and can perform the check at the same time. This
1249 	 * results in avoiding embedding a loop for VLAN mode inside another
1250 	 * loop over all the filters, and should maintain correctness as noted
1251 	 * above.
1252 	 */
1253 	return vsi->has_vlan_filter;
1254 }
1255 
1256 /**
1257  * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1258  * @vsi: the VSI to configure
1259  * @tmp_add_list: list of filters ready to be added
1260  * @tmp_del_list: list of filters ready to be deleted
1261  * @vlan_filters: the number of active VLAN filters
1262  *
1263  * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1264  * behave as expected. If we have any active VLAN filters remaining or about
1265  * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1266  * so that they only match against untagged traffic. If we no longer have any
1267  * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1268  * so that they match against both tagged and untagged traffic. In this way,
1269  * we ensure that we correctly receive the desired traffic. This ensures that
1270  * when we have an active VLAN we will receive only untagged traffic and
1271  * traffic matching active VLANs. If we have no active VLANs then we will
1272  * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1273  *
1274  * Finally, in a similar fashion, this function also corrects filters when
1275  * there is an active PVID assigned to this VSI.
1276  *
1277  * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1278  *
1279  * This function is only expected to be called from within
1280  * i40e_sync_vsi_filters.
1281  *
1282  * NOTE: This function expects to be called while under the
1283  * mac_filter_hash_lock
1284  */
1285 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1286 					 struct hlist_head *tmp_add_list,
1287 					 struct hlist_head *tmp_del_list,
1288 					 int vlan_filters)
1289 {
1290 	s16 pvid = le16_to_cpu(vsi->info.pvid);
1291 	struct i40e_mac_filter *f, *add_head;
1292 	struct i40e_new_mac_filter *new;
1293 	struct hlist_node *h;
1294 	int bkt, new_vlan;
1295 
1296 	/* To determine if a particular filter needs to be replaced we
1297 	 * have the three following conditions:
1298 	 *
1299 	 * a) if we have a PVID assigned, then all filters which are
1300 	 *    not marked as VLAN=PVID must be replaced with filters that
1301 	 *    are.
1302 	 * b) otherwise, if we have any active VLANS, all filters
1303 	 *    which are marked as VLAN=-1 must be replaced with
1304 	 *    filters marked as VLAN=0
1305 	 * c) finally, if we do not have any active VLANS, all filters
1306 	 *    which are marked as VLAN=0 must be replaced with filters
1307 	 *    marked as VLAN=-1
1308 	 */
1309 
1310 	/* Update the filters about to be added in place */
1311 	hlist_for_each_entry(new, tmp_add_list, hlist) {
1312 		if (pvid && new->f->vlan != pvid)
1313 			new->f->vlan = pvid;
1314 		else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1315 			new->f->vlan = 0;
1316 		else if (!vlan_filters && new->f->vlan == 0)
1317 			new->f->vlan = I40E_VLAN_ANY;
1318 	}
1319 
1320 	/* Update the remaining active filters */
1321 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1322 		/* Combine the checks for whether a filter needs to be changed
1323 		 * and then determine the new VLAN inside the if block, in
1324 		 * order to avoid duplicating code for adding the new filter
1325 		 * then deleting the old filter.
1326 		 */
1327 		if ((pvid && f->vlan != pvid) ||
1328 		    (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1329 		    (!vlan_filters && f->vlan == 0)) {
1330 			/* Determine the new vlan we will be adding */
1331 			if (pvid)
1332 				new_vlan = pvid;
1333 			else if (vlan_filters)
1334 				new_vlan = 0;
1335 			else
1336 				new_vlan = I40E_VLAN_ANY;
1337 
1338 			/* Create the new filter */
1339 			add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1340 			if (!add_head)
1341 				return -ENOMEM;
1342 
1343 			/* Create a temporary i40e_new_mac_filter */
1344 			new = kzalloc(sizeof(*new), GFP_ATOMIC);
1345 			if (!new)
1346 				return -ENOMEM;
1347 
1348 			new->f = add_head;
1349 			new->state = add_head->state;
1350 
1351 			/* Add the new filter to the tmp list */
1352 			hlist_add_head(&new->hlist, tmp_add_list);
1353 
1354 			/* Put the original filter into the delete list */
1355 			f->state = I40E_FILTER_REMOVE;
1356 			hash_del(&f->hlist);
1357 			hlist_add_head(&f->hlist, tmp_del_list);
1358 		}
1359 	}
1360 
1361 	vsi->has_vlan_filter = !!vlan_filters;
1362 
1363 	return 0;
1364 }
1365 
1366 /**
1367  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1368  * @vsi: the PF Main VSI - inappropriate for any other VSI
1369  * @macaddr: the MAC address
1370  *
1371  * Remove whatever filter the firmware set up so the driver can manage
1372  * its own filtering intelligently.
1373  **/
1374 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1375 {
1376 	struct i40e_aqc_remove_macvlan_element_data element;
1377 	struct i40e_pf *pf = vsi->back;
1378 
1379 	/* Only appropriate for the PF main VSI */
1380 	if (vsi->type != I40E_VSI_MAIN)
1381 		return;
1382 
1383 	memset(&element, 0, sizeof(element));
1384 	ether_addr_copy(element.mac_addr, macaddr);
1385 	element.vlan_tag = 0;
1386 	/* Ignore error returns, some firmware does it this way... */
1387 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1388 	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1389 
1390 	memset(&element, 0, sizeof(element));
1391 	ether_addr_copy(element.mac_addr, macaddr);
1392 	element.vlan_tag = 0;
1393 	/* ...and some firmware does it this way. */
1394 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1395 			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1396 	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1397 }
1398 
1399 /**
1400  * i40e_add_filter - Add a mac/vlan filter to the VSI
1401  * @vsi: the VSI to be searched
1402  * @macaddr: the MAC address
1403  * @vlan: the vlan
1404  *
1405  * Returns ptr to the filter object or NULL when no memory available.
1406  *
1407  * NOTE: This function is expected to be called with mac_filter_hash_lock
1408  * being held.
1409  **/
1410 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1411 					const u8 *macaddr, s16 vlan)
1412 {
1413 	struct i40e_mac_filter *f;
1414 	u64 key;
1415 
1416 	if (!vsi || !macaddr)
1417 		return NULL;
1418 
1419 	f = i40e_find_filter(vsi, macaddr, vlan);
1420 	if (!f) {
1421 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
1422 		if (!f)
1423 			return NULL;
1424 
1425 		/* Update the boolean indicating if we need to function in
1426 		 * VLAN mode.
1427 		 */
1428 		if (vlan >= 0)
1429 			vsi->has_vlan_filter = true;
1430 
1431 		ether_addr_copy(f->macaddr, macaddr);
1432 		f->vlan = vlan;
1433 		f->state = I40E_FILTER_NEW;
1434 		INIT_HLIST_NODE(&f->hlist);
1435 
1436 		key = i40e_addr_to_hkey(macaddr);
1437 		hash_add(vsi->mac_filter_hash, &f->hlist, key);
1438 
1439 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1440 		set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1441 	}
1442 
1443 	/* If we're asked to add a filter that has been marked for removal, it
1444 	 * is safe to simply restore it to active state. __i40e_del_filter
1445 	 * will have simply deleted any filters which were previously marked
1446 	 * NEW or FAILED, so if it is currently marked REMOVE it must have
1447 	 * previously been ACTIVE. Since we haven't yet run the sync filters
1448 	 * task, just restore this filter to the ACTIVE state so that the
1449 	 * sync task leaves it in place
1450 	 */
1451 	if (f->state == I40E_FILTER_REMOVE)
1452 		f->state = I40E_FILTER_ACTIVE;
1453 
1454 	return f;
1455 }
1456 
1457 /**
1458  * __i40e_del_filter - Remove a specific filter from the VSI
1459  * @vsi: VSI to remove from
1460  * @f: the filter to remove from the list
1461  *
1462  * This function should be called instead of i40e_del_filter only if you know
1463  * the exact filter you will remove already, such as via i40e_find_filter or
1464  * i40e_find_mac.
1465  *
1466  * NOTE: This function is expected to be called with mac_filter_hash_lock
1467  * being held.
1468  * ANOTHER NOTE: This function MUST be called from within the context of
1469  * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1470  * instead of list_for_each_entry().
1471  **/
1472 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1473 {
1474 	if (!f)
1475 		return;
1476 
1477 	/* If the filter was never added to firmware then we can just delete it
1478 	 * directly and we don't want to set the status to remove or else an
1479 	 * admin queue command will unnecessarily fire.
1480 	 */
1481 	if ((f->state == I40E_FILTER_FAILED) ||
1482 	    (f->state == I40E_FILTER_NEW)) {
1483 		hash_del(&f->hlist);
1484 		kfree(f);
1485 	} else {
1486 		f->state = I40E_FILTER_REMOVE;
1487 	}
1488 
1489 	vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1490 	set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1491 }
1492 
1493 /**
1494  * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1495  * @vsi: the VSI to be searched
1496  * @macaddr: the MAC address
1497  * @vlan: the VLAN
1498  *
1499  * NOTE: This function is expected to be called with mac_filter_hash_lock
1500  * being held.
1501  * ANOTHER NOTE: This function MUST be called from within the context of
1502  * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1503  * instead of list_for_each_entry().
1504  **/
1505 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1506 {
1507 	struct i40e_mac_filter *f;
1508 
1509 	if (!vsi || !macaddr)
1510 		return;
1511 
1512 	f = i40e_find_filter(vsi, macaddr, vlan);
1513 	__i40e_del_filter(vsi, f);
1514 }
1515 
1516 /**
1517  * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1518  * @vsi: the VSI to be searched
1519  * @macaddr: the mac address to be filtered
1520  *
1521  * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1522  * go through all the macvlan filters and add a macvlan filter for each
1523  * unique vlan that already exists. If a PVID has been assigned, instead only
1524  * add the macaddr to that VLAN.
1525  *
1526  * Returns last filter added on success, else NULL
1527  **/
1528 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1529 					    const u8 *macaddr)
1530 {
1531 	struct i40e_mac_filter *f, *add = NULL;
1532 	struct hlist_node *h;
1533 	int bkt;
1534 
1535 	if (vsi->info.pvid)
1536 		return i40e_add_filter(vsi, macaddr,
1537 				       le16_to_cpu(vsi->info.pvid));
1538 
1539 	if (!i40e_is_vsi_in_vlan(vsi))
1540 		return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1541 
1542 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1543 		if (f->state == I40E_FILTER_REMOVE)
1544 			continue;
1545 		add = i40e_add_filter(vsi, macaddr, f->vlan);
1546 		if (!add)
1547 			return NULL;
1548 	}
1549 
1550 	return add;
1551 }
1552 
1553 /**
1554  * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1555  * @vsi: the VSI to be searched
1556  * @macaddr: the mac address to be removed
1557  *
1558  * Removes a given MAC address from a VSI regardless of what VLAN it has been
1559  * associated with.
1560  *
1561  * Returns 0 for success, or error
1562  **/
1563 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1564 {
1565 	struct i40e_mac_filter *f;
1566 	struct hlist_node *h;
1567 	bool found = false;
1568 	int bkt;
1569 
1570 	lockdep_assert_held(&vsi->mac_filter_hash_lock);
1571 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1572 		if (ether_addr_equal(macaddr, f->macaddr)) {
1573 			__i40e_del_filter(vsi, f);
1574 			found = true;
1575 		}
1576 	}
1577 
1578 	if (found)
1579 		return 0;
1580 	else
1581 		return -ENOENT;
1582 }
1583 
1584 /**
1585  * i40e_set_mac - NDO callback to set mac address
1586  * @netdev: network interface device structure
1587  * @p: pointer to an address structure
1588  *
1589  * Returns 0 on success, negative on failure
1590  **/
1591 static int i40e_set_mac(struct net_device *netdev, void *p)
1592 {
1593 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1594 	struct i40e_vsi *vsi = np->vsi;
1595 	struct i40e_pf *pf = vsi->back;
1596 	struct i40e_hw *hw = &pf->hw;
1597 	struct sockaddr *addr = p;
1598 
1599 	if (!is_valid_ether_addr(addr->sa_data))
1600 		return -EADDRNOTAVAIL;
1601 
1602 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1603 		netdev_info(netdev, "already using mac address %pM\n",
1604 			    addr->sa_data);
1605 		return 0;
1606 	}
1607 
1608 	if (test_bit(__I40E_DOWN, pf->state) ||
1609 	    test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1610 		return -EADDRNOTAVAIL;
1611 
1612 	if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1613 		netdev_info(netdev, "returning to hw mac address %pM\n",
1614 			    hw->mac.addr);
1615 	else
1616 		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1617 
1618 	/* Copy the address first, so that we avoid a possible race with
1619 	 * .set_rx_mode().
1620 	 * - Remove old address from MAC filter
1621 	 * - Copy new address
1622 	 * - Add new address to MAC filter
1623 	 */
1624 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1625 	i40e_del_mac_filter(vsi, netdev->dev_addr);
1626 	eth_hw_addr_set(netdev, addr->sa_data);
1627 	i40e_add_mac_filter(vsi, netdev->dev_addr);
1628 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1629 
1630 	if (vsi->type == I40E_VSI_MAIN) {
1631 		i40e_status ret;
1632 
1633 		ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1634 						addr->sa_data, NULL);
1635 		if (ret)
1636 			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1637 				    i40e_stat_str(hw, ret),
1638 				    i40e_aq_str(hw, hw->aq.asq_last_status));
1639 	}
1640 
1641 	/* schedule our worker thread which will take care of
1642 	 * applying the new filter changes
1643 	 */
1644 	i40e_service_event_schedule(pf);
1645 	return 0;
1646 }
1647 
1648 /**
1649  * i40e_config_rss_aq - Prepare for RSS using AQ commands
1650  * @vsi: vsi structure
1651  * @seed: RSS hash seed
1652  * @lut: pointer to lookup table of lut_size
1653  * @lut_size: size of the lookup table
1654  **/
1655 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1656 			      u8 *lut, u16 lut_size)
1657 {
1658 	struct i40e_pf *pf = vsi->back;
1659 	struct i40e_hw *hw = &pf->hw;
1660 	int ret = 0;
1661 
1662 	if (seed) {
1663 		struct i40e_aqc_get_set_rss_key_data *seed_dw =
1664 			(struct i40e_aqc_get_set_rss_key_data *)seed;
1665 		ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1666 		if (ret) {
1667 			dev_info(&pf->pdev->dev,
1668 				 "Cannot set RSS key, err %s aq_err %s\n",
1669 				 i40e_stat_str(hw, ret),
1670 				 i40e_aq_str(hw, hw->aq.asq_last_status));
1671 			return ret;
1672 		}
1673 	}
1674 	if (lut) {
1675 		bool pf_lut = vsi->type == I40E_VSI_MAIN;
1676 
1677 		ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1678 		if (ret) {
1679 			dev_info(&pf->pdev->dev,
1680 				 "Cannot set RSS lut, err %s aq_err %s\n",
1681 				 i40e_stat_str(hw, ret),
1682 				 i40e_aq_str(hw, hw->aq.asq_last_status));
1683 			return ret;
1684 		}
1685 	}
1686 	return ret;
1687 }
1688 
1689 /**
1690  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1691  * @vsi: VSI structure
1692  **/
1693 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1694 {
1695 	struct i40e_pf *pf = vsi->back;
1696 	u8 seed[I40E_HKEY_ARRAY_SIZE];
1697 	u8 *lut;
1698 	int ret;
1699 
1700 	if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1701 		return 0;
1702 	if (!vsi->rss_size)
1703 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
1704 				      vsi->num_queue_pairs);
1705 	if (!vsi->rss_size)
1706 		return -EINVAL;
1707 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1708 	if (!lut)
1709 		return -ENOMEM;
1710 
1711 	/* Use the user configured hash keys and lookup table if there is one,
1712 	 * otherwise use default
1713 	 */
1714 	if (vsi->rss_lut_user)
1715 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1716 	else
1717 		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1718 	if (vsi->rss_hkey_user)
1719 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1720 	else
1721 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1722 	ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1723 	kfree(lut);
1724 	return ret;
1725 }
1726 
1727 /**
1728  * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1729  * @vsi: the VSI being configured,
1730  * @ctxt: VSI context structure
1731  * @enabled_tc: number of traffic classes to enable
1732  *
1733  * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1734  **/
1735 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1736 					   struct i40e_vsi_context *ctxt,
1737 					   u8 enabled_tc)
1738 {
1739 	u16 qcount = 0, max_qcount, qmap, sections = 0;
1740 	int i, override_q, pow, num_qps, ret;
1741 	u8 netdev_tc = 0, offset = 0;
1742 
1743 	if (vsi->type != I40E_VSI_MAIN)
1744 		return -EINVAL;
1745 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1746 	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1747 	vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1748 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1749 	num_qps = vsi->mqprio_qopt.qopt.count[0];
1750 
1751 	/* find the next higher power-of-2 of num queue pairs */
1752 	pow = ilog2(num_qps);
1753 	if (!is_power_of_2(num_qps))
1754 		pow++;
1755 	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1756 		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1757 
1758 	/* Setup queue offset/count for all TCs for given VSI */
1759 	max_qcount = vsi->mqprio_qopt.qopt.count[0];
1760 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1761 		/* See if the given TC is enabled for the given VSI */
1762 		if (vsi->tc_config.enabled_tc & BIT(i)) {
1763 			offset = vsi->mqprio_qopt.qopt.offset[i];
1764 			qcount = vsi->mqprio_qopt.qopt.count[i];
1765 			if (qcount > max_qcount)
1766 				max_qcount = qcount;
1767 			vsi->tc_config.tc_info[i].qoffset = offset;
1768 			vsi->tc_config.tc_info[i].qcount = qcount;
1769 			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1770 		} else {
1771 			/* TC is not enabled so set the offset to
1772 			 * default queue and allocate one queue
1773 			 * for the given TC.
1774 			 */
1775 			vsi->tc_config.tc_info[i].qoffset = 0;
1776 			vsi->tc_config.tc_info[i].qcount = 1;
1777 			vsi->tc_config.tc_info[i].netdev_tc = 0;
1778 		}
1779 	}
1780 
1781 	/* Set actual Tx/Rx queue pairs */
1782 	vsi->num_queue_pairs = offset + qcount;
1783 
1784 	/* Setup queue TC[0].qmap for given VSI context */
1785 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1786 	ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1787 	ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1788 	ctxt->info.valid_sections |= cpu_to_le16(sections);
1789 
1790 	/* Reconfigure RSS for main VSI with max queue count */
1791 	vsi->rss_size = max_qcount;
1792 	ret = i40e_vsi_config_rss(vsi);
1793 	if (ret) {
1794 		dev_info(&vsi->back->pdev->dev,
1795 			 "Failed to reconfig rss for num_queues (%u)\n",
1796 			 max_qcount);
1797 		return ret;
1798 	}
1799 	vsi->reconfig_rss = true;
1800 	dev_dbg(&vsi->back->pdev->dev,
1801 		"Reconfigured rss with num_queues (%u)\n", max_qcount);
1802 
1803 	/* Find queue count available for channel VSIs and starting offset
1804 	 * for channel VSIs
1805 	 */
1806 	override_q = vsi->mqprio_qopt.qopt.count[0];
1807 	if (override_q && override_q < vsi->num_queue_pairs) {
1808 		vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1809 		vsi->next_base_queue = override_q;
1810 	}
1811 	return 0;
1812 }
1813 
1814 /**
1815  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1816  * @vsi: the VSI being setup
1817  * @ctxt: VSI context structure
1818  * @enabled_tc: Enabled TCs bitmap
1819  * @is_add: True if called before Add VSI
1820  *
1821  * Setup VSI queue mapping for enabled traffic classes.
1822  **/
1823 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1824 				     struct i40e_vsi_context *ctxt,
1825 				     u8 enabled_tc,
1826 				     bool is_add)
1827 {
1828 	struct i40e_pf *pf = vsi->back;
1829 	u16 num_tc_qps = 0;
1830 	u16 sections = 0;
1831 	u8 netdev_tc = 0;
1832 	u16 numtc = 1;
1833 	u16 qcount;
1834 	u8 offset;
1835 	u16 qmap;
1836 	int i;
1837 
1838 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1839 	offset = 0;
1840 	/* zero out queue mapping, it will get updated on the end of the function */
1841 	memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
1842 
1843 	if (vsi->type == I40E_VSI_MAIN) {
1844 		/* This code helps add more queue to the VSI if we have
1845 		 * more cores than RSS can support, the higher cores will
1846 		 * be served by ATR or other filters. Furthermore, the
1847 		 * non-zero req_queue_pairs says that user requested a new
1848 		 * queue count via ethtool's set_channels, so use this
1849 		 * value for queues distribution across traffic classes
1850 		 */
1851 		if (vsi->req_queue_pairs > 0)
1852 			vsi->num_queue_pairs = vsi->req_queue_pairs;
1853 		else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1854 			vsi->num_queue_pairs = pf->num_lan_msix;
1855 	}
1856 
1857 	/* Number of queues per enabled TC */
1858 	if (vsi->type == I40E_VSI_MAIN ||
1859 	    (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
1860 		num_tc_qps = vsi->num_queue_pairs;
1861 	else
1862 		num_tc_qps = vsi->alloc_queue_pairs;
1863 
1864 	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1865 		/* Find numtc from enabled TC bitmap */
1866 		for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1867 			if (enabled_tc & BIT(i)) /* TC is enabled */
1868 				numtc++;
1869 		}
1870 		if (!numtc) {
1871 			dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1872 			numtc = 1;
1873 		}
1874 		num_tc_qps = num_tc_qps / numtc;
1875 		num_tc_qps = min_t(int, num_tc_qps,
1876 				   i40e_pf_get_max_q_per_tc(pf));
1877 	}
1878 
1879 	vsi->tc_config.numtc = numtc;
1880 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1881 
1882 	/* Do not allow use more TC queue pairs than MSI-X vectors exist */
1883 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1884 		num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1885 
1886 	/* Setup queue offset/count for all TCs for given VSI */
1887 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1888 		/* See if the given TC is enabled for the given VSI */
1889 		if (vsi->tc_config.enabled_tc & BIT(i)) {
1890 			/* TC is enabled */
1891 			int pow, num_qps;
1892 
1893 			switch (vsi->type) {
1894 			case I40E_VSI_MAIN:
1895 				if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1896 				    I40E_FLAG_FD_ATR_ENABLED)) ||
1897 				    vsi->tc_config.enabled_tc != 1) {
1898 					qcount = min_t(int, pf->alloc_rss_size,
1899 						       num_tc_qps);
1900 					break;
1901 				}
1902 				fallthrough;
1903 			case I40E_VSI_FDIR:
1904 			case I40E_VSI_SRIOV:
1905 			case I40E_VSI_VMDQ2:
1906 			default:
1907 				qcount = num_tc_qps;
1908 				WARN_ON(i != 0);
1909 				break;
1910 			}
1911 			vsi->tc_config.tc_info[i].qoffset = offset;
1912 			vsi->tc_config.tc_info[i].qcount = qcount;
1913 
1914 			/* find the next higher power-of-2 of num queue pairs */
1915 			num_qps = qcount;
1916 			pow = 0;
1917 			while (num_qps && (BIT_ULL(pow) < qcount)) {
1918 				pow++;
1919 				num_qps >>= 1;
1920 			}
1921 
1922 			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1923 			qmap =
1924 			    (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1925 			    (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1926 
1927 			offset += qcount;
1928 		} else {
1929 			/* TC is not enabled so set the offset to
1930 			 * default queue and allocate one queue
1931 			 * for the given TC.
1932 			 */
1933 			vsi->tc_config.tc_info[i].qoffset = 0;
1934 			vsi->tc_config.tc_info[i].qcount = 1;
1935 			vsi->tc_config.tc_info[i].netdev_tc = 0;
1936 
1937 			qmap = 0;
1938 		}
1939 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1940 	}
1941 	/* Do not change previously set num_queue_pairs for PFs and VFs*/
1942 	if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
1943 	    (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
1944 	    (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
1945 		vsi->num_queue_pairs = offset;
1946 
1947 	/* Scheduler section valid can only be set for ADD VSI */
1948 	if (is_add) {
1949 		sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1950 
1951 		ctxt->info.up_enable_bits = enabled_tc;
1952 	}
1953 	if (vsi->type == I40E_VSI_SRIOV) {
1954 		ctxt->info.mapping_flags |=
1955 				     cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1956 		for (i = 0; i < vsi->num_queue_pairs; i++)
1957 			ctxt->info.queue_mapping[i] =
1958 					       cpu_to_le16(vsi->base_queue + i);
1959 	} else {
1960 		ctxt->info.mapping_flags |=
1961 					cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1962 		ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1963 	}
1964 	ctxt->info.valid_sections |= cpu_to_le16(sections);
1965 }
1966 
1967 /**
1968  * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1969  * @netdev: the netdevice
1970  * @addr: address to add
1971  *
1972  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1973  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1974  */
1975 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1976 {
1977 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1978 	struct i40e_vsi *vsi = np->vsi;
1979 
1980 	if (i40e_add_mac_filter(vsi, addr))
1981 		return 0;
1982 	else
1983 		return -ENOMEM;
1984 }
1985 
1986 /**
1987  * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1988  * @netdev: the netdevice
1989  * @addr: address to add
1990  *
1991  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1992  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1993  */
1994 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1995 {
1996 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1997 	struct i40e_vsi *vsi = np->vsi;
1998 
1999 	/* Under some circumstances, we might receive a request to delete
2000 	 * our own device address from our uc list. Because we store the
2001 	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
2002 	 * such requests and not delete our device address from this list.
2003 	 */
2004 	if (ether_addr_equal(addr, netdev->dev_addr))
2005 		return 0;
2006 
2007 	i40e_del_mac_filter(vsi, addr);
2008 
2009 	return 0;
2010 }
2011 
2012 /**
2013  * i40e_set_rx_mode - NDO callback to set the netdev filters
2014  * @netdev: network interface device structure
2015  **/
2016 static void i40e_set_rx_mode(struct net_device *netdev)
2017 {
2018 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2019 	struct i40e_vsi *vsi = np->vsi;
2020 
2021 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2022 
2023 	__dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2024 	__dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2025 
2026 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2027 
2028 	/* check for other flag changes */
2029 	if (vsi->current_netdev_flags != vsi->netdev->flags) {
2030 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2031 		set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
2032 	}
2033 }
2034 
2035 /**
2036  * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2037  * @vsi: Pointer to VSI struct
2038  * @from: Pointer to list which contains MAC filter entries - changes to
2039  *        those entries needs to be undone.
2040  *
2041  * MAC filter entries from this list were slated for deletion.
2042  **/
2043 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2044 					 struct hlist_head *from)
2045 {
2046 	struct i40e_mac_filter *f;
2047 	struct hlist_node *h;
2048 
2049 	hlist_for_each_entry_safe(f, h, from, hlist) {
2050 		u64 key = i40e_addr_to_hkey(f->macaddr);
2051 
2052 		/* Move the element back into MAC filter list*/
2053 		hlist_del(&f->hlist);
2054 		hash_add(vsi->mac_filter_hash, &f->hlist, key);
2055 	}
2056 }
2057 
2058 /**
2059  * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2060  * @vsi: Pointer to vsi struct
2061  * @from: Pointer to list which contains MAC filter entries - changes to
2062  *        those entries needs to be undone.
2063  *
2064  * MAC filter entries from this list were slated for addition.
2065  **/
2066 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2067 					 struct hlist_head *from)
2068 {
2069 	struct i40e_new_mac_filter *new;
2070 	struct hlist_node *h;
2071 
2072 	hlist_for_each_entry_safe(new, h, from, hlist) {
2073 		/* We can simply free the wrapper structure */
2074 		hlist_del(&new->hlist);
2075 		netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2076 		kfree(new);
2077 	}
2078 }
2079 
2080 /**
2081  * i40e_next_filter - Get the next non-broadcast filter from a list
2082  * @next: pointer to filter in list
2083  *
2084  * Returns the next non-broadcast filter in the list. Required so that we
2085  * ignore broadcast filters within the list, since these are not handled via
2086  * the normal firmware update path.
2087  */
2088 static
2089 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2090 {
2091 	hlist_for_each_entry_continue(next, hlist) {
2092 		if (!is_broadcast_ether_addr(next->f->macaddr))
2093 			return next;
2094 	}
2095 
2096 	return NULL;
2097 }
2098 
2099 /**
2100  * i40e_update_filter_state - Update filter state based on return data
2101  * from firmware
2102  * @count: Number of filters added
2103  * @add_list: return data from fw
2104  * @add_head: pointer to first filter in current batch
2105  *
2106  * MAC filter entries from list were slated to be added to device. Returns
2107  * number of successful filters. Note that 0 does NOT mean success!
2108  **/
2109 static int
2110 i40e_update_filter_state(int count,
2111 			 struct i40e_aqc_add_macvlan_element_data *add_list,
2112 			 struct i40e_new_mac_filter *add_head)
2113 {
2114 	int retval = 0;
2115 	int i;
2116 
2117 	for (i = 0; i < count; i++) {
2118 		/* Always check status of each filter. We don't need to check
2119 		 * the firmware return status because we pre-set the filter
2120 		 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2121 		 * request to the adminq. Thus, if it no longer matches then
2122 		 * we know the filter is active.
2123 		 */
2124 		if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2125 			add_head->state = I40E_FILTER_FAILED;
2126 		} else {
2127 			add_head->state = I40E_FILTER_ACTIVE;
2128 			retval++;
2129 		}
2130 
2131 		add_head = i40e_next_filter(add_head);
2132 		if (!add_head)
2133 			break;
2134 	}
2135 
2136 	return retval;
2137 }
2138 
2139 /**
2140  * i40e_aqc_del_filters - Request firmware to delete a set of filters
2141  * @vsi: ptr to the VSI
2142  * @vsi_name: name to display in messages
2143  * @list: the list of filters to send to firmware
2144  * @num_del: the number of filters to delete
2145  * @retval: Set to -EIO on failure to delete
2146  *
2147  * Send a request to firmware via AdminQ to delete a set of filters. Uses
2148  * *retval instead of a return value so that success does not force ret_val to
2149  * be set to 0. This ensures that a sequence of calls to this function
2150  * preserve the previous value of *retval on successful delete.
2151  */
2152 static
2153 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2154 			  struct i40e_aqc_remove_macvlan_element_data *list,
2155 			  int num_del, int *retval)
2156 {
2157 	struct i40e_hw *hw = &vsi->back->hw;
2158 	enum i40e_admin_queue_err aq_status;
2159 	i40e_status aq_ret;
2160 
2161 	aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
2162 					   &aq_status);
2163 
2164 	/* Explicitly ignore and do not report when firmware returns ENOENT */
2165 	if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
2166 		*retval = -EIO;
2167 		dev_info(&vsi->back->pdev->dev,
2168 			 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2169 			 vsi_name, i40e_stat_str(hw, aq_ret),
2170 			 i40e_aq_str(hw, aq_status));
2171 	}
2172 }
2173 
2174 /**
2175  * i40e_aqc_add_filters - Request firmware to add a set of filters
2176  * @vsi: ptr to the VSI
2177  * @vsi_name: name to display in messages
2178  * @list: the list of filters to send to firmware
2179  * @add_head: Position in the add hlist
2180  * @num_add: the number of filters to add
2181  *
2182  * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2183  * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2184  * space for more filters.
2185  */
2186 static
2187 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2188 			  struct i40e_aqc_add_macvlan_element_data *list,
2189 			  struct i40e_new_mac_filter *add_head,
2190 			  int num_add)
2191 {
2192 	struct i40e_hw *hw = &vsi->back->hw;
2193 	enum i40e_admin_queue_err aq_status;
2194 	int fcnt;
2195 
2196 	i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
2197 	fcnt = i40e_update_filter_state(num_add, list, add_head);
2198 
2199 	if (fcnt != num_add) {
2200 		if (vsi->type == I40E_VSI_MAIN) {
2201 			set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2202 			dev_warn(&vsi->back->pdev->dev,
2203 				 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2204 				 i40e_aq_str(hw, aq_status), vsi_name);
2205 		} else if (vsi->type == I40E_VSI_SRIOV ||
2206 			   vsi->type == I40E_VSI_VMDQ1 ||
2207 			   vsi->type == I40E_VSI_VMDQ2) {
2208 			dev_warn(&vsi->back->pdev->dev,
2209 				 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2210 				 i40e_aq_str(hw, aq_status), vsi_name,
2211 					     vsi_name);
2212 		} else {
2213 			dev_warn(&vsi->back->pdev->dev,
2214 				 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2215 				 i40e_aq_str(hw, aq_status), vsi_name,
2216 					     vsi->type);
2217 		}
2218 	}
2219 }
2220 
2221 /**
2222  * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2223  * @vsi: pointer to the VSI
2224  * @vsi_name: the VSI name
2225  * @f: filter data
2226  *
2227  * This function sets or clears the promiscuous broadcast flags for VLAN
2228  * filters in order to properly receive broadcast frames. Assumes that only
2229  * broadcast filters are passed.
2230  *
2231  * Returns status indicating success or failure;
2232  **/
2233 static i40e_status
2234 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2235 			  struct i40e_mac_filter *f)
2236 {
2237 	bool enable = f->state == I40E_FILTER_NEW;
2238 	struct i40e_hw *hw = &vsi->back->hw;
2239 	i40e_status aq_ret;
2240 
2241 	if (f->vlan == I40E_VLAN_ANY) {
2242 		aq_ret = i40e_aq_set_vsi_broadcast(hw,
2243 						   vsi->seid,
2244 						   enable,
2245 						   NULL);
2246 	} else {
2247 		aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2248 							    vsi->seid,
2249 							    enable,
2250 							    f->vlan,
2251 							    NULL);
2252 	}
2253 
2254 	if (aq_ret) {
2255 		set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2256 		dev_warn(&vsi->back->pdev->dev,
2257 			 "Error %s, forcing overflow promiscuous on %s\n",
2258 			 i40e_aq_str(hw, hw->aq.asq_last_status),
2259 			 vsi_name);
2260 	}
2261 
2262 	return aq_ret;
2263 }
2264 
2265 /**
2266  * i40e_set_promiscuous - set promiscuous mode
2267  * @pf: board private structure
2268  * @promisc: promisc on or off
2269  *
2270  * There are different ways of setting promiscuous mode on a PF depending on
2271  * what state/environment we're in.  This identifies and sets it appropriately.
2272  * Returns 0 on success.
2273  **/
2274 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2275 {
2276 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2277 	struct i40e_hw *hw = &pf->hw;
2278 	i40e_status aq_ret;
2279 
2280 	if (vsi->type == I40E_VSI_MAIN &&
2281 	    pf->lan_veb != I40E_NO_VEB &&
2282 	    !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2283 		/* set defport ON for Main VSI instead of true promisc
2284 		 * this way we will get all unicast/multicast and VLAN
2285 		 * promisc behavior but will not get VF or VMDq traffic
2286 		 * replicated on the Main VSI.
2287 		 */
2288 		if (promisc)
2289 			aq_ret = i40e_aq_set_default_vsi(hw,
2290 							 vsi->seid,
2291 							 NULL);
2292 		else
2293 			aq_ret = i40e_aq_clear_default_vsi(hw,
2294 							   vsi->seid,
2295 							   NULL);
2296 		if (aq_ret) {
2297 			dev_info(&pf->pdev->dev,
2298 				 "Set default VSI failed, err %s, aq_err %s\n",
2299 				 i40e_stat_str(hw, aq_ret),
2300 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2301 		}
2302 	} else {
2303 		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2304 						  hw,
2305 						  vsi->seid,
2306 						  promisc, NULL,
2307 						  true);
2308 		if (aq_ret) {
2309 			dev_info(&pf->pdev->dev,
2310 				 "set unicast promisc failed, err %s, aq_err %s\n",
2311 				 i40e_stat_str(hw, aq_ret),
2312 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2313 		}
2314 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2315 						  hw,
2316 						  vsi->seid,
2317 						  promisc, NULL);
2318 		if (aq_ret) {
2319 			dev_info(&pf->pdev->dev,
2320 				 "set multicast promisc failed, err %s, aq_err %s\n",
2321 				 i40e_stat_str(hw, aq_ret),
2322 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2323 		}
2324 	}
2325 
2326 	if (!aq_ret)
2327 		pf->cur_promisc = promisc;
2328 
2329 	return aq_ret;
2330 }
2331 
2332 /**
2333  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2334  * @vsi: ptr to the VSI
2335  *
2336  * Push any outstanding VSI filter changes through the AdminQ.
2337  *
2338  * Returns 0 or error value
2339  **/
2340 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2341 {
2342 	struct hlist_head tmp_add_list, tmp_del_list;
2343 	struct i40e_mac_filter *f;
2344 	struct i40e_new_mac_filter *new, *add_head = NULL;
2345 	struct i40e_hw *hw = &vsi->back->hw;
2346 	bool old_overflow, new_overflow;
2347 	unsigned int failed_filters = 0;
2348 	unsigned int vlan_filters = 0;
2349 	char vsi_name[16] = "PF";
2350 	int filter_list_len = 0;
2351 	i40e_status aq_ret = 0;
2352 	u32 changed_flags = 0;
2353 	struct hlist_node *h;
2354 	struct i40e_pf *pf;
2355 	int num_add = 0;
2356 	int num_del = 0;
2357 	int retval = 0;
2358 	u16 cmd_flags;
2359 	int list_size;
2360 	int bkt;
2361 
2362 	/* empty array typed pointers, kcalloc later */
2363 	struct i40e_aqc_add_macvlan_element_data *add_list;
2364 	struct i40e_aqc_remove_macvlan_element_data *del_list;
2365 
2366 	while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2367 		usleep_range(1000, 2000);
2368 	pf = vsi->back;
2369 
2370 	old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2371 
2372 	if (vsi->netdev) {
2373 		changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2374 		vsi->current_netdev_flags = vsi->netdev->flags;
2375 	}
2376 
2377 	INIT_HLIST_HEAD(&tmp_add_list);
2378 	INIT_HLIST_HEAD(&tmp_del_list);
2379 
2380 	if (vsi->type == I40E_VSI_SRIOV)
2381 		snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2382 	else if (vsi->type != I40E_VSI_MAIN)
2383 		snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2384 
2385 	if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2386 		vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2387 
2388 		spin_lock_bh(&vsi->mac_filter_hash_lock);
2389 		/* Create a list of filters to delete. */
2390 		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2391 			if (f->state == I40E_FILTER_REMOVE) {
2392 				/* Move the element into temporary del_list */
2393 				hash_del(&f->hlist);
2394 				hlist_add_head(&f->hlist, &tmp_del_list);
2395 
2396 				/* Avoid counting removed filters */
2397 				continue;
2398 			}
2399 			if (f->state == I40E_FILTER_NEW) {
2400 				/* Create a temporary i40e_new_mac_filter */
2401 				new = kzalloc(sizeof(*new), GFP_ATOMIC);
2402 				if (!new)
2403 					goto err_no_memory_locked;
2404 
2405 				/* Store pointer to the real filter */
2406 				new->f = f;
2407 				new->state = f->state;
2408 
2409 				/* Add it to the hash list */
2410 				hlist_add_head(&new->hlist, &tmp_add_list);
2411 			}
2412 
2413 			/* Count the number of active (current and new) VLAN
2414 			 * filters we have now. Does not count filters which
2415 			 * are marked for deletion.
2416 			 */
2417 			if (f->vlan > 0)
2418 				vlan_filters++;
2419 		}
2420 
2421 		retval = i40e_correct_mac_vlan_filters(vsi,
2422 						       &tmp_add_list,
2423 						       &tmp_del_list,
2424 						       vlan_filters);
2425 
2426 		hlist_for_each_entry(new, &tmp_add_list, hlist)
2427 			netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
2428 
2429 		if (retval)
2430 			goto err_no_memory_locked;
2431 
2432 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2433 	}
2434 
2435 	/* Now process 'del_list' outside the lock */
2436 	if (!hlist_empty(&tmp_del_list)) {
2437 		filter_list_len = hw->aq.asq_buf_size /
2438 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
2439 		list_size = filter_list_len *
2440 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
2441 		del_list = kzalloc(list_size, GFP_ATOMIC);
2442 		if (!del_list)
2443 			goto err_no_memory;
2444 
2445 		hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2446 			cmd_flags = 0;
2447 
2448 			/* handle broadcast filters by updating the broadcast
2449 			 * promiscuous flag and release filter list.
2450 			 */
2451 			if (is_broadcast_ether_addr(f->macaddr)) {
2452 				i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2453 
2454 				hlist_del(&f->hlist);
2455 				kfree(f);
2456 				continue;
2457 			}
2458 
2459 			/* add to delete list */
2460 			ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2461 			if (f->vlan == I40E_VLAN_ANY) {
2462 				del_list[num_del].vlan_tag = 0;
2463 				cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2464 			} else {
2465 				del_list[num_del].vlan_tag =
2466 					cpu_to_le16((u16)(f->vlan));
2467 			}
2468 
2469 			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2470 			del_list[num_del].flags = cmd_flags;
2471 			num_del++;
2472 
2473 			/* flush a full buffer */
2474 			if (num_del == filter_list_len) {
2475 				i40e_aqc_del_filters(vsi, vsi_name, del_list,
2476 						     num_del, &retval);
2477 				memset(del_list, 0, list_size);
2478 				num_del = 0;
2479 			}
2480 			/* Release memory for MAC filter entries which were
2481 			 * synced up with HW.
2482 			 */
2483 			hlist_del(&f->hlist);
2484 			kfree(f);
2485 		}
2486 
2487 		if (num_del) {
2488 			i40e_aqc_del_filters(vsi, vsi_name, del_list,
2489 					     num_del, &retval);
2490 		}
2491 
2492 		kfree(del_list);
2493 		del_list = NULL;
2494 	}
2495 
2496 	if (!hlist_empty(&tmp_add_list)) {
2497 		/* Do all the adds now. */
2498 		filter_list_len = hw->aq.asq_buf_size /
2499 			       sizeof(struct i40e_aqc_add_macvlan_element_data);
2500 		list_size = filter_list_len *
2501 			       sizeof(struct i40e_aqc_add_macvlan_element_data);
2502 		add_list = kzalloc(list_size, GFP_ATOMIC);
2503 		if (!add_list)
2504 			goto err_no_memory;
2505 
2506 		num_add = 0;
2507 		hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2508 			/* handle broadcast filters by updating the broadcast
2509 			 * promiscuous flag instead of adding a MAC filter.
2510 			 */
2511 			if (is_broadcast_ether_addr(new->f->macaddr)) {
2512 				if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2513 							      new->f))
2514 					new->state = I40E_FILTER_FAILED;
2515 				else
2516 					new->state = I40E_FILTER_ACTIVE;
2517 				continue;
2518 			}
2519 
2520 			/* add to add array */
2521 			if (num_add == 0)
2522 				add_head = new;
2523 			cmd_flags = 0;
2524 			ether_addr_copy(add_list[num_add].mac_addr,
2525 					new->f->macaddr);
2526 			if (new->f->vlan == I40E_VLAN_ANY) {
2527 				add_list[num_add].vlan_tag = 0;
2528 				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2529 			} else {
2530 				add_list[num_add].vlan_tag =
2531 					cpu_to_le16((u16)(new->f->vlan));
2532 			}
2533 			add_list[num_add].queue_number = 0;
2534 			/* set invalid match method for later detection */
2535 			add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2536 			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2537 			add_list[num_add].flags = cpu_to_le16(cmd_flags);
2538 			num_add++;
2539 
2540 			/* flush a full buffer */
2541 			if (num_add == filter_list_len) {
2542 				i40e_aqc_add_filters(vsi, vsi_name, add_list,
2543 						     add_head, num_add);
2544 				memset(add_list, 0, list_size);
2545 				num_add = 0;
2546 			}
2547 		}
2548 		if (num_add) {
2549 			i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2550 					     num_add);
2551 		}
2552 		/* Now move all of the filters from the temp add list back to
2553 		 * the VSI's list.
2554 		 */
2555 		spin_lock_bh(&vsi->mac_filter_hash_lock);
2556 		hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2557 			/* Only update the state if we're still NEW */
2558 			if (new->f->state == I40E_FILTER_NEW)
2559 				new->f->state = new->state;
2560 			hlist_del(&new->hlist);
2561 			netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2562 			kfree(new);
2563 		}
2564 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2565 		kfree(add_list);
2566 		add_list = NULL;
2567 	}
2568 
2569 	/* Determine the number of active and failed filters. */
2570 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2571 	vsi->active_filters = 0;
2572 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2573 		if (f->state == I40E_FILTER_ACTIVE)
2574 			vsi->active_filters++;
2575 		else if (f->state == I40E_FILTER_FAILED)
2576 			failed_filters++;
2577 	}
2578 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2579 
2580 	/* Check if we are able to exit overflow promiscuous mode. We can
2581 	 * safely exit if we didn't just enter, we no longer have any failed
2582 	 * filters, and we have reduced filters below the threshold value.
2583 	 */
2584 	if (old_overflow && !failed_filters &&
2585 	    vsi->active_filters < vsi->promisc_threshold) {
2586 		dev_info(&pf->pdev->dev,
2587 			 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2588 			 vsi_name);
2589 		clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2590 		vsi->promisc_threshold = 0;
2591 	}
2592 
2593 	/* if the VF is not trusted do not do promisc */
2594 	if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2595 		clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2596 		goto out;
2597 	}
2598 
2599 	new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2600 
2601 	/* If we are entering overflow promiscuous, we need to calculate a new
2602 	 * threshold for when we are safe to exit
2603 	 */
2604 	if (!old_overflow && new_overflow)
2605 		vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2606 
2607 	/* check for changes in promiscuous modes */
2608 	if (changed_flags & IFF_ALLMULTI) {
2609 		bool cur_multipromisc;
2610 
2611 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2612 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2613 							       vsi->seid,
2614 							       cur_multipromisc,
2615 							       NULL);
2616 		if (aq_ret) {
2617 			retval = i40e_aq_rc_to_posix(aq_ret,
2618 						     hw->aq.asq_last_status);
2619 			dev_info(&pf->pdev->dev,
2620 				 "set multi promisc failed on %s, err %s aq_err %s\n",
2621 				 vsi_name,
2622 				 i40e_stat_str(hw, aq_ret),
2623 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2624 		} else {
2625 			dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2626 				 cur_multipromisc ? "entering" : "leaving");
2627 		}
2628 	}
2629 
2630 	if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2631 		bool cur_promisc;
2632 
2633 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2634 			       new_overflow);
2635 		aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2636 		if (aq_ret) {
2637 			retval = i40e_aq_rc_to_posix(aq_ret,
2638 						     hw->aq.asq_last_status);
2639 			dev_info(&pf->pdev->dev,
2640 				 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2641 				 cur_promisc ? "on" : "off",
2642 				 vsi_name,
2643 				 i40e_stat_str(hw, aq_ret),
2644 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2645 		}
2646 	}
2647 out:
2648 	/* if something went wrong then set the changed flag so we try again */
2649 	if (retval)
2650 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2651 
2652 	clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2653 	return retval;
2654 
2655 err_no_memory:
2656 	/* Restore elements on the temporary add and delete lists */
2657 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2658 err_no_memory_locked:
2659 	i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2660 	i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2661 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2662 
2663 	vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2664 	clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2665 	return -ENOMEM;
2666 }
2667 
2668 /**
2669  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2670  * @pf: board private structure
2671  **/
2672 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2673 {
2674 	int v;
2675 
2676 	if (!pf)
2677 		return;
2678 	if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2679 		return;
2680 	if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2681 		set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2682 		return;
2683 	}
2684 
2685 	for (v = 0; v < pf->num_alloc_vsi; v++) {
2686 		if (pf->vsi[v] &&
2687 		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2688 		    !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2689 			int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2690 
2691 			if (ret) {
2692 				/* come back and try again later */
2693 				set_bit(__I40E_MACVLAN_SYNC_PENDING,
2694 					pf->state);
2695 				break;
2696 			}
2697 		}
2698 	}
2699 }
2700 
2701 /**
2702  * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2703  * @vsi: the vsi
2704  **/
2705 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2706 {
2707 	if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2708 		return I40E_RXBUFFER_2048;
2709 	else
2710 		return I40E_RXBUFFER_3072;
2711 }
2712 
2713 /**
2714  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2715  * @netdev: network interface device structure
2716  * @new_mtu: new value for maximum frame size
2717  *
2718  * Returns 0 on success, negative on failure
2719  **/
2720 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2721 {
2722 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2723 	struct i40e_vsi *vsi = np->vsi;
2724 	struct i40e_pf *pf = vsi->back;
2725 
2726 	if (i40e_enabled_xdp_vsi(vsi)) {
2727 		int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2728 
2729 		if (frame_size > i40e_max_xdp_frame_size(vsi))
2730 			return -EINVAL;
2731 	}
2732 
2733 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
2734 		   netdev->mtu, new_mtu);
2735 	netdev->mtu = new_mtu;
2736 	if (netif_running(netdev))
2737 		i40e_vsi_reinit_locked(vsi);
2738 	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2739 	set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2740 	return 0;
2741 }
2742 
2743 /**
2744  * i40e_ioctl - Access the hwtstamp interface
2745  * @netdev: network interface device structure
2746  * @ifr: interface request data
2747  * @cmd: ioctl command
2748  **/
2749 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2750 {
2751 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2752 	struct i40e_pf *pf = np->vsi->back;
2753 
2754 	switch (cmd) {
2755 	case SIOCGHWTSTAMP:
2756 		return i40e_ptp_get_ts_config(pf, ifr);
2757 	case SIOCSHWTSTAMP:
2758 		return i40e_ptp_set_ts_config(pf, ifr);
2759 	default:
2760 		return -EOPNOTSUPP;
2761 	}
2762 }
2763 
2764 /**
2765  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2766  * @vsi: the vsi being adjusted
2767  **/
2768 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2769 {
2770 	struct i40e_vsi_context ctxt;
2771 	i40e_status ret;
2772 
2773 	/* Don't modify stripping options if a port VLAN is active */
2774 	if (vsi->info.pvid)
2775 		return;
2776 
2777 	if ((vsi->info.valid_sections &
2778 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2779 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2780 		return;  /* already enabled */
2781 
2782 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2783 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2784 				    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2785 
2786 	ctxt.seid = vsi->seid;
2787 	ctxt.info = vsi->info;
2788 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2789 	if (ret) {
2790 		dev_info(&vsi->back->pdev->dev,
2791 			 "update vlan stripping failed, err %s aq_err %s\n",
2792 			 i40e_stat_str(&vsi->back->hw, ret),
2793 			 i40e_aq_str(&vsi->back->hw,
2794 				     vsi->back->hw.aq.asq_last_status));
2795 	}
2796 }
2797 
2798 /**
2799  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2800  * @vsi: the vsi being adjusted
2801  **/
2802 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2803 {
2804 	struct i40e_vsi_context ctxt;
2805 	i40e_status ret;
2806 
2807 	/* Don't modify stripping options if a port VLAN is active */
2808 	if (vsi->info.pvid)
2809 		return;
2810 
2811 	if ((vsi->info.valid_sections &
2812 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2813 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2814 	     I40E_AQ_VSI_PVLAN_EMOD_MASK))
2815 		return;  /* already disabled */
2816 
2817 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2818 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2819 				    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2820 
2821 	ctxt.seid = vsi->seid;
2822 	ctxt.info = vsi->info;
2823 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2824 	if (ret) {
2825 		dev_info(&vsi->back->pdev->dev,
2826 			 "update vlan stripping failed, err %s aq_err %s\n",
2827 			 i40e_stat_str(&vsi->back->hw, ret),
2828 			 i40e_aq_str(&vsi->back->hw,
2829 				     vsi->back->hw.aq.asq_last_status));
2830 	}
2831 }
2832 
2833 /**
2834  * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2835  * @vsi: the vsi being configured
2836  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2837  *
2838  * This is a helper function for adding a new MAC/VLAN filter with the
2839  * specified VLAN for each existing MAC address already in the hash table.
2840  * This function does *not* perform any accounting to update filters based on
2841  * VLAN mode.
2842  *
2843  * NOTE: this function expects to be called while under the
2844  * mac_filter_hash_lock
2845  **/
2846 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2847 {
2848 	struct i40e_mac_filter *f, *add_f;
2849 	struct hlist_node *h;
2850 	int bkt;
2851 
2852 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2853 		if (f->state == I40E_FILTER_REMOVE)
2854 			continue;
2855 		add_f = i40e_add_filter(vsi, f->macaddr, vid);
2856 		if (!add_f) {
2857 			dev_info(&vsi->back->pdev->dev,
2858 				 "Could not add vlan filter %d for %pM\n",
2859 				 vid, f->macaddr);
2860 			return -ENOMEM;
2861 		}
2862 	}
2863 
2864 	return 0;
2865 }
2866 
2867 /**
2868  * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2869  * @vsi: the VSI being configured
2870  * @vid: VLAN id to be added
2871  **/
2872 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2873 {
2874 	int err;
2875 
2876 	if (vsi->info.pvid)
2877 		return -EINVAL;
2878 
2879 	/* The network stack will attempt to add VID=0, with the intention to
2880 	 * receive priority tagged packets with a VLAN of 0. Our HW receives
2881 	 * these packets by default when configured to receive untagged
2882 	 * packets, so we don't need to add a filter for this case.
2883 	 * Additionally, HW interprets adding a VID=0 filter as meaning to
2884 	 * receive *only* tagged traffic and stops receiving untagged traffic.
2885 	 * Thus, we do not want to actually add a filter for VID=0
2886 	 */
2887 	if (!vid)
2888 		return 0;
2889 
2890 	/* Locked once because all functions invoked below iterates list*/
2891 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2892 	err = i40e_add_vlan_all_mac(vsi, vid);
2893 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2894 	if (err)
2895 		return err;
2896 
2897 	/* schedule our worker thread which will take care of
2898 	 * applying the new filter changes
2899 	 */
2900 	i40e_service_event_schedule(vsi->back);
2901 	return 0;
2902 }
2903 
2904 /**
2905  * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2906  * @vsi: the vsi being configured
2907  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2908  *
2909  * This function should be used to remove all VLAN filters which match the
2910  * given VID. It does not schedule the service event and does not take the
2911  * mac_filter_hash_lock so it may be combined with other operations under
2912  * a single invocation of the mac_filter_hash_lock.
2913  *
2914  * NOTE: this function expects to be called while under the
2915  * mac_filter_hash_lock
2916  */
2917 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2918 {
2919 	struct i40e_mac_filter *f;
2920 	struct hlist_node *h;
2921 	int bkt;
2922 
2923 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2924 		if (f->vlan == vid)
2925 			__i40e_del_filter(vsi, f);
2926 	}
2927 }
2928 
2929 /**
2930  * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2931  * @vsi: the VSI being configured
2932  * @vid: VLAN id to be removed
2933  **/
2934 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2935 {
2936 	if (!vid || vsi->info.pvid)
2937 		return;
2938 
2939 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2940 	i40e_rm_vlan_all_mac(vsi, vid);
2941 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2942 
2943 	/* schedule our worker thread which will take care of
2944 	 * applying the new filter changes
2945 	 */
2946 	i40e_service_event_schedule(vsi->back);
2947 }
2948 
2949 /**
2950  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2951  * @netdev: network interface to be adjusted
2952  * @proto: unused protocol value
2953  * @vid: vlan id to be added
2954  *
2955  * net_device_ops implementation for adding vlan ids
2956  **/
2957 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2958 				__always_unused __be16 proto, u16 vid)
2959 {
2960 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2961 	struct i40e_vsi *vsi = np->vsi;
2962 	int ret = 0;
2963 
2964 	if (vid >= VLAN_N_VID)
2965 		return -EINVAL;
2966 
2967 	ret = i40e_vsi_add_vlan(vsi, vid);
2968 	if (!ret)
2969 		set_bit(vid, vsi->active_vlans);
2970 
2971 	return ret;
2972 }
2973 
2974 /**
2975  * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2976  * @netdev: network interface to be adjusted
2977  * @proto: unused protocol value
2978  * @vid: vlan id to be added
2979  **/
2980 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2981 				    __always_unused __be16 proto, u16 vid)
2982 {
2983 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2984 	struct i40e_vsi *vsi = np->vsi;
2985 
2986 	if (vid >= VLAN_N_VID)
2987 		return;
2988 	set_bit(vid, vsi->active_vlans);
2989 }
2990 
2991 /**
2992  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2993  * @netdev: network interface to be adjusted
2994  * @proto: unused protocol value
2995  * @vid: vlan id to be removed
2996  *
2997  * net_device_ops implementation for removing vlan ids
2998  **/
2999 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
3000 				 __always_unused __be16 proto, u16 vid)
3001 {
3002 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3003 	struct i40e_vsi *vsi = np->vsi;
3004 
3005 	/* return code is ignored as there is nothing a user
3006 	 * can do about failure to remove and a log message was
3007 	 * already printed from the other function
3008 	 */
3009 	i40e_vsi_kill_vlan(vsi, vid);
3010 
3011 	clear_bit(vid, vsi->active_vlans);
3012 
3013 	return 0;
3014 }
3015 
3016 /**
3017  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3018  * @vsi: the vsi being brought back up
3019  **/
3020 static void i40e_restore_vlan(struct i40e_vsi *vsi)
3021 {
3022 	u16 vid;
3023 
3024 	if (!vsi->netdev)
3025 		return;
3026 
3027 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3028 		i40e_vlan_stripping_enable(vsi);
3029 	else
3030 		i40e_vlan_stripping_disable(vsi);
3031 
3032 	for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
3033 		i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
3034 					vid);
3035 }
3036 
3037 /**
3038  * i40e_vsi_add_pvid - Add pvid for the VSI
3039  * @vsi: the vsi being adjusted
3040  * @vid: the vlan id to set as a PVID
3041  **/
3042 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
3043 {
3044 	struct i40e_vsi_context ctxt;
3045 	i40e_status ret;
3046 
3047 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3048 	vsi->info.pvid = cpu_to_le16(vid);
3049 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
3050 				    I40E_AQ_VSI_PVLAN_INSERT_PVID |
3051 				    I40E_AQ_VSI_PVLAN_EMOD_STR;
3052 
3053 	ctxt.seid = vsi->seid;
3054 	ctxt.info = vsi->info;
3055 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3056 	if (ret) {
3057 		dev_info(&vsi->back->pdev->dev,
3058 			 "add pvid failed, err %s aq_err %s\n",
3059 			 i40e_stat_str(&vsi->back->hw, ret),
3060 			 i40e_aq_str(&vsi->back->hw,
3061 				     vsi->back->hw.aq.asq_last_status));
3062 		return -ENOENT;
3063 	}
3064 
3065 	return 0;
3066 }
3067 
3068 /**
3069  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3070  * @vsi: the vsi being adjusted
3071  *
3072  * Just use the vlan_rx_register() service to put it back to normal
3073  **/
3074 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3075 {
3076 	vsi->info.pvid = 0;
3077 
3078 	i40e_vlan_stripping_disable(vsi);
3079 }
3080 
3081 /**
3082  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3083  * @vsi: ptr to the VSI
3084  *
3085  * If this function returns with an error, then it's possible one or
3086  * more of the rings is populated (while the rest are not).  It is the
3087  * callers duty to clean those orphaned rings.
3088  *
3089  * Return 0 on success, negative on failure
3090  **/
3091 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3092 {
3093 	int i, err = 0;
3094 
3095 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3096 		err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3097 
3098 	if (!i40e_enabled_xdp_vsi(vsi))
3099 		return err;
3100 
3101 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3102 		err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3103 
3104 	return err;
3105 }
3106 
3107 /**
3108  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3109  * @vsi: ptr to the VSI
3110  *
3111  * Free VSI's transmit software resources
3112  **/
3113 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3114 {
3115 	int i;
3116 
3117 	if (vsi->tx_rings) {
3118 		for (i = 0; i < vsi->num_queue_pairs; i++)
3119 			if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3120 				i40e_free_tx_resources(vsi->tx_rings[i]);
3121 	}
3122 
3123 	if (vsi->xdp_rings) {
3124 		for (i = 0; i < vsi->num_queue_pairs; i++)
3125 			if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3126 				i40e_free_tx_resources(vsi->xdp_rings[i]);
3127 	}
3128 }
3129 
3130 /**
3131  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3132  * @vsi: ptr to the VSI
3133  *
3134  * If this function returns with an error, then it's possible one or
3135  * more of the rings is populated (while the rest are not).  It is the
3136  * callers duty to clean those orphaned rings.
3137  *
3138  * Return 0 on success, negative on failure
3139  **/
3140 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3141 {
3142 	int i, err = 0;
3143 
3144 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3145 		err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3146 	return err;
3147 }
3148 
3149 /**
3150  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3151  * @vsi: ptr to the VSI
3152  *
3153  * Free all receive software resources
3154  **/
3155 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3156 {
3157 	int i;
3158 
3159 	if (!vsi->rx_rings)
3160 		return;
3161 
3162 	for (i = 0; i < vsi->num_queue_pairs; i++)
3163 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3164 			i40e_free_rx_resources(vsi->rx_rings[i]);
3165 }
3166 
3167 /**
3168  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3169  * @ring: The Tx ring to configure
3170  *
3171  * This enables/disables XPS for a given Tx descriptor ring
3172  * based on the TCs enabled for the VSI that ring belongs to.
3173  **/
3174 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3175 {
3176 	int cpu;
3177 
3178 	if (!ring->q_vector || !ring->netdev || ring->ch)
3179 		return;
3180 
3181 	/* We only initialize XPS once, so as not to overwrite user settings */
3182 	if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3183 		return;
3184 
3185 	cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3186 	netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3187 			    ring->queue_index);
3188 }
3189 
3190 /**
3191  * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3192  * @ring: The Tx or Rx ring
3193  *
3194  * Returns the AF_XDP buffer pool or NULL.
3195  **/
3196 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3197 {
3198 	bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3199 	int qid = ring->queue_index;
3200 
3201 	if (ring_is_xdp(ring))
3202 		qid -= ring->vsi->alloc_queue_pairs;
3203 
3204 	if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3205 		return NULL;
3206 
3207 	return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3208 }
3209 
3210 /**
3211  * i40e_configure_tx_ring - Configure a transmit ring context and rest
3212  * @ring: The Tx ring to configure
3213  *
3214  * Configure the Tx descriptor ring in the HMC context.
3215  **/
3216 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3217 {
3218 	struct i40e_vsi *vsi = ring->vsi;
3219 	u16 pf_q = vsi->base_queue + ring->queue_index;
3220 	struct i40e_hw *hw = &vsi->back->hw;
3221 	struct i40e_hmc_obj_txq tx_ctx;
3222 	i40e_status err = 0;
3223 	u32 qtx_ctl = 0;
3224 
3225 	if (ring_is_xdp(ring))
3226 		ring->xsk_pool = i40e_xsk_pool(ring);
3227 
3228 	/* some ATR related tx ring init */
3229 	if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3230 		ring->atr_sample_rate = vsi->back->atr_sample_rate;
3231 		ring->atr_count = 0;
3232 	} else {
3233 		ring->atr_sample_rate = 0;
3234 	}
3235 
3236 	/* configure XPS */
3237 	i40e_config_xps_tx_ring(ring);
3238 
3239 	/* clear the context structure first */
3240 	memset(&tx_ctx, 0, sizeof(tx_ctx));
3241 
3242 	tx_ctx.new_context = 1;
3243 	tx_ctx.base = (ring->dma / 128);
3244 	tx_ctx.qlen = ring->count;
3245 	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3246 					       I40E_FLAG_FD_ATR_ENABLED));
3247 	tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3248 	/* FDIR VSI tx ring can still use RS bit and writebacks */
3249 	if (vsi->type != I40E_VSI_FDIR)
3250 		tx_ctx.head_wb_ena = 1;
3251 	tx_ctx.head_wb_addr = ring->dma +
3252 			      (ring->count * sizeof(struct i40e_tx_desc));
3253 
3254 	/* As part of VSI creation/update, FW allocates certain
3255 	 * Tx arbitration queue sets for each TC enabled for
3256 	 * the VSI. The FW returns the handles to these queue
3257 	 * sets as part of the response buffer to Add VSI,
3258 	 * Update VSI, etc. AQ commands. It is expected that
3259 	 * these queue set handles be associated with the Tx
3260 	 * queues by the driver as part of the TX queue context
3261 	 * initialization. This has to be done regardless of
3262 	 * DCB as by default everything is mapped to TC0.
3263 	 */
3264 
3265 	if (ring->ch)
3266 		tx_ctx.rdylist =
3267 			le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3268 
3269 	else
3270 		tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3271 
3272 	tx_ctx.rdylist_act = 0;
3273 
3274 	/* clear the context in the HMC */
3275 	err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3276 	if (err) {
3277 		dev_info(&vsi->back->pdev->dev,
3278 			 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3279 			 ring->queue_index, pf_q, err);
3280 		return -ENOMEM;
3281 	}
3282 
3283 	/* set the context in the HMC */
3284 	err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3285 	if (err) {
3286 		dev_info(&vsi->back->pdev->dev,
3287 			 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3288 			 ring->queue_index, pf_q, err);
3289 		return -ENOMEM;
3290 	}
3291 
3292 	/* Now associate this queue with this PCI function */
3293 	if (ring->ch) {
3294 		if (ring->ch->type == I40E_VSI_VMDQ2)
3295 			qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3296 		else
3297 			return -EINVAL;
3298 
3299 		qtx_ctl |= (ring->ch->vsi_number <<
3300 			    I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3301 			    I40E_QTX_CTL_VFVM_INDX_MASK;
3302 	} else {
3303 		if (vsi->type == I40E_VSI_VMDQ2) {
3304 			qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3305 			qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3306 				    I40E_QTX_CTL_VFVM_INDX_MASK;
3307 		} else {
3308 			qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3309 		}
3310 	}
3311 
3312 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3313 		    I40E_QTX_CTL_PF_INDX_MASK);
3314 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3315 	i40e_flush(hw);
3316 
3317 	/* cache tail off for easier writes later */
3318 	ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3319 
3320 	return 0;
3321 }
3322 
3323 /**
3324  * i40e_rx_offset - Return expected offset into page to access data
3325  * @rx_ring: Ring we are requesting offset of
3326  *
3327  * Returns the offset value for ring into the data buffer.
3328  */
3329 static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3330 {
3331 	return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
3332 }
3333 
3334 /**
3335  * i40e_configure_rx_ring - Configure a receive ring context
3336  * @ring: The Rx ring to configure
3337  *
3338  * Configure the Rx descriptor ring in the HMC context.
3339  **/
3340 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3341 {
3342 	struct i40e_vsi *vsi = ring->vsi;
3343 	u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3344 	u16 pf_q = vsi->base_queue + ring->queue_index;
3345 	struct i40e_hw *hw = &vsi->back->hw;
3346 	struct i40e_hmc_obj_rxq rx_ctx;
3347 	i40e_status err = 0;
3348 	bool ok;
3349 	int ret;
3350 
3351 	bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3352 
3353 	/* clear the context structure first */
3354 	memset(&rx_ctx, 0, sizeof(rx_ctx));
3355 
3356 	if (ring->vsi->type == I40E_VSI_MAIN)
3357 		xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3358 
3359 	kfree(ring->rx_bi);
3360 	ring->xsk_pool = i40e_xsk_pool(ring);
3361 	if (ring->xsk_pool) {
3362 		ret = i40e_alloc_rx_bi_zc(ring);
3363 		if (ret)
3364 			return ret;
3365 		ring->rx_buf_len =
3366 		  xsk_pool_get_rx_frame_size(ring->xsk_pool);
3367 		/* For AF_XDP ZC, we disallow packets to span on
3368 		 * multiple buffers, thus letting us skip that
3369 		 * handling in the fast-path.
3370 		 */
3371 		chain_len = 1;
3372 		ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3373 						 MEM_TYPE_XSK_BUFF_POOL,
3374 						 NULL);
3375 		if (ret)
3376 			return ret;
3377 		dev_info(&vsi->back->pdev->dev,
3378 			 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3379 			 ring->queue_index);
3380 
3381 	} else {
3382 		ret = i40e_alloc_rx_bi(ring);
3383 		if (ret)
3384 			return ret;
3385 		ring->rx_buf_len = vsi->rx_buf_len;
3386 		if (ring->vsi->type == I40E_VSI_MAIN) {
3387 			ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3388 							 MEM_TYPE_PAGE_SHARED,
3389 							 NULL);
3390 			if (ret)
3391 				return ret;
3392 		}
3393 	}
3394 
3395 	rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3396 				    BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3397 
3398 	rx_ctx.base = (ring->dma / 128);
3399 	rx_ctx.qlen = ring->count;
3400 
3401 	/* use 16 byte descriptors */
3402 	rx_ctx.dsize = 0;
3403 
3404 	/* descriptor type is always zero
3405 	 * rx_ctx.dtype = 0;
3406 	 */
3407 	rx_ctx.hsplit_0 = 0;
3408 
3409 	rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3410 	if (hw->revision_id == 0)
3411 		rx_ctx.lrxqthresh = 0;
3412 	else
3413 		rx_ctx.lrxqthresh = 1;
3414 	rx_ctx.crcstrip = 1;
3415 	rx_ctx.l2tsel = 1;
3416 	/* this controls whether VLAN is stripped from inner headers */
3417 	rx_ctx.showiv = 0;
3418 	/* set the prefena field to 1 because the manual says to */
3419 	rx_ctx.prefena = 1;
3420 
3421 	/* clear the context in the HMC */
3422 	err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3423 	if (err) {
3424 		dev_info(&vsi->back->pdev->dev,
3425 			 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3426 			 ring->queue_index, pf_q, err);
3427 		return -ENOMEM;
3428 	}
3429 
3430 	/* set the context in the HMC */
3431 	err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3432 	if (err) {
3433 		dev_info(&vsi->back->pdev->dev,
3434 			 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3435 			 ring->queue_index, pf_q, err);
3436 		return -ENOMEM;
3437 	}
3438 
3439 	/* configure Rx buffer alignment */
3440 	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3441 		clear_ring_build_skb_enabled(ring);
3442 	else
3443 		set_ring_build_skb_enabled(ring);
3444 
3445 	ring->rx_offset = i40e_rx_offset(ring);
3446 
3447 	/* cache tail for quicker writes, and clear the reg before use */
3448 	ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3449 	writel(0, ring->tail);
3450 
3451 	if (ring->xsk_pool) {
3452 		xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3453 		ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3454 	} else {
3455 		ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3456 	}
3457 	if (!ok) {
3458 		/* Log this in case the user has forgotten to give the kernel
3459 		 * any buffers, even later in the application.
3460 		 */
3461 		dev_info(&vsi->back->pdev->dev,
3462 			 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3463 			 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3464 			 ring->queue_index, pf_q);
3465 	}
3466 
3467 	return 0;
3468 }
3469 
3470 /**
3471  * i40e_vsi_configure_tx - Configure the VSI for Tx
3472  * @vsi: VSI structure describing this set of rings and resources
3473  *
3474  * Configure the Tx VSI for operation.
3475  **/
3476 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3477 {
3478 	int err = 0;
3479 	u16 i;
3480 
3481 	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3482 		err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3483 
3484 	if (err || !i40e_enabled_xdp_vsi(vsi))
3485 		return err;
3486 
3487 	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3488 		err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3489 
3490 	return err;
3491 }
3492 
3493 /**
3494  * i40e_vsi_configure_rx - Configure the VSI for Rx
3495  * @vsi: the VSI being configured
3496  *
3497  * Configure the Rx VSI for operation.
3498  **/
3499 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3500 {
3501 	int err = 0;
3502 	u16 i;
3503 
3504 	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3505 		vsi->max_frame = I40E_MAX_RXBUFFER;
3506 		vsi->rx_buf_len = I40E_RXBUFFER_2048;
3507 #if (PAGE_SIZE < 8192)
3508 	} else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3509 		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3510 		vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3511 		vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3512 #endif
3513 	} else {
3514 		vsi->max_frame = I40E_MAX_RXBUFFER;
3515 		vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3516 						       I40E_RXBUFFER_2048;
3517 	}
3518 
3519 	/* set up individual rings */
3520 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3521 		err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3522 
3523 	return err;
3524 }
3525 
3526 /**
3527  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3528  * @vsi: ptr to the VSI
3529  **/
3530 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3531 {
3532 	struct i40e_ring *tx_ring, *rx_ring;
3533 	u16 qoffset, qcount;
3534 	int i, n;
3535 
3536 	if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3537 		/* Reset the TC information */
3538 		for (i = 0; i < vsi->num_queue_pairs; i++) {
3539 			rx_ring = vsi->rx_rings[i];
3540 			tx_ring = vsi->tx_rings[i];
3541 			rx_ring->dcb_tc = 0;
3542 			tx_ring->dcb_tc = 0;
3543 		}
3544 		return;
3545 	}
3546 
3547 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3548 		if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3549 			continue;
3550 
3551 		qoffset = vsi->tc_config.tc_info[n].qoffset;
3552 		qcount = vsi->tc_config.tc_info[n].qcount;
3553 		for (i = qoffset; i < (qoffset + qcount); i++) {
3554 			rx_ring = vsi->rx_rings[i];
3555 			tx_ring = vsi->tx_rings[i];
3556 			rx_ring->dcb_tc = n;
3557 			tx_ring->dcb_tc = n;
3558 		}
3559 	}
3560 }
3561 
3562 /**
3563  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3564  * @vsi: ptr to the VSI
3565  **/
3566 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3567 {
3568 	if (vsi->netdev)
3569 		i40e_set_rx_mode(vsi->netdev);
3570 }
3571 
3572 /**
3573  * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3574  * @pf: Pointer to the targeted PF
3575  *
3576  * Set all flow director counters to 0.
3577  */
3578 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3579 {
3580 	pf->fd_tcp4_filter_cnt = 0;
3581 	pf->fd_udp4_filter_cnt = 0;
3582 	pf->fd_sctp4_filter_cnt = 0;
3583 	pf->fd_ip4_filter_cnt = 0;
3584 	pf->fd_tcp6_filter_cnt = 0;
3585 	pf->fd_udp6_filter_cnt = 0;
3586 	pf->fd_sctp6_filter_cnt = 0;
3587 	pf->fd_ip6_filter_cnt = 0;
3588 }
3589 
3590 /**
3591  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3592  * @vsi: Pointer to the targeted VSI
3593  *
3594  * This function replays the hlist on the hw where all the SB Flow Director
3595  * filters were saved.
3596  **/
3597 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3598 {
3599 	struct i40e_fdir_filter *filter;
3600 	struct i40e_pf *pf = vsi->back;
3601 	struct hlist_node *node;
3602 
3603 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3604 		return;
3605 
3606 	/* Reset FDir counters as we're replaying all existing filters */
3607 	i40e_reset_fdir_filter_cnt(pf);
3608 
3609 	hlist_for_each_entry_safe(filter, node,
3610 				  &pf->fdir_filter_list, fdir_node) {
3611 		i40e_add_del_fdir(vsi, filter, true);
3612 	}
3613 }
3614 
3615 /**
3616  * i40e_vsi_configure - Set up the VSI for action
3617  * @vsi: the VSI being configured
3618  **/
3619 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3620 {
3621 	int err;
3622 
3623 	i40e_set_vsi_rx_mode(vsi);
3624 	i40e_restore_vlan(vsi);
3625 	i40e_vsi_config_dcb_rings(vsi);
3626 	err = i40e_vsi_configure_tx(vsi);
3627 	if (!err)
3628 		err = i40e_vsi_configure_rx(vsi);
3629 
3630 	return err;
3631 }
3632 
3633 /**
3634  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3635  * @vsi: the VSI being configured
3636  **/
3637 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3638 {
3639 	bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3640 	struct i40e_pf *pf = vsi->back;
3641 	struct i40e_hw *hw = &pf->hw;
3642 	u16 vector;
3643 	int i, q;
3644 	u32 qp;
3645 
3646 	/* The interrupt indexing is offset by 1 in the PFINT_ITRn
3647 	 * and PFINT_LNKLSTn registers, e.g.:
3648 	 *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
3649 	 */
3650 	qp = vsi->base_queue;
3651 	vector = vsi->base_vector;
3652 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3653 		struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3654 
3655 		q_vector->rx.next_update = jiffies + 1;
3656 		q_vector->rx.target_itr =
3657 			ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3658 		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3659 		     q_vector->rx.target_itr >> 1);
3660 		q_vector->rx.current_itr = q_vector->rx.target_itr;
3661 
3662 		q_vector->tx.next_update = jiffies + 1;
3663 		q_vector->tx.target_itr =
3664 			ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3665 		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3666 		     q_vector->tx.target_itr >> 1);
3667 		q_vector->tx.current_itr = q_vector->tx.target_itr;
3668 
3669 		wr32(hw, I40E_PFINT_RATEN(vector - 1),
3670 		     i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3671 
3672 		/* Linked list for the queuepairs assigned to this vector */
3673 		wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3674 		for (q = 0; q < q_vector->num_ringpairs; q++) {
3675 			u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3676 			u32 val;
3677 
3678 			val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3679 			      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3680 			      (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3681 			      (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3682 			      (I40E_QUEUE_TYPE_TX <<
3683 			       I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3684 
3685 			wr32(hw, I40E_QINT_RQCTL(qp), val);
3686 
3687 			if (has_xdp) {
3688 				val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3689 				      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3690 				      (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3691 				      (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3692 				      (I40E_QUEUE_TYPE_TX <<
3693 				       I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3694 
3695 				wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3696 			}
3697 
3698 			val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3699 			      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3700 			      (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3701 			      ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3702 			      (I40E_QUEUE_TYPE_RX <<
3703 			       I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3704 
3705 			/* Terminate the linked list */
3706 			if (q == (q_vector->num_ringpairs - 1))
3707 				val |= (I40E_QUEUE_END_OF_LIST <<
3708 					I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3709 
3710 			wr32(hw, I40E_QINT_TQCTL(qp), val);
3711 			qp++;
3712 		}
3713 	}
3714 
3715 	i40e_flush(hw);
3716 }
3717 
3718 /**
3719  * i40e_enable_misc_int_causes - enable the non-queue interrupts
3720  * @pf: pointer to private device data structure
3721  **/
3722 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3723 {
3724 	struct i40e_hw *hw = &pf->hw;
3725 	u32 val;
3726 
3727 	/* clear things first */
3728 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
3729 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
3730 
3731 	val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
3732 	      I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
3733 	      I40E_PFINT_ICR0_ENA_GRST_MASK          |
3734 	      I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3735 	      I40E_PFINT_ICR0_ENA_GPIO_MASK          |
3736 	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
3737 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
3738 	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3739 
3740 	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3741 		val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3742 
3743 	if (pf->flags & I40E_FLAG_PTP)
3744 		val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3745 
3746 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
3747 
3748 	/* SW_ITR_IDX = 0, but don't change INTENA */
3749 	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3750 					I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3751 
3752 	/* OTHER_ITR_IDX = 0 */
3753 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3754 }
3755 
3756 /**
3757  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3758  * @vsi: the VSI being configured
3759  **/
3760 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3761 {
3762 	u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3763 	struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3764 	struct i40e_pf *pf = vsi->back;
3765 	struct i40e_hw *hw = &pf->hw;
3766 	u32 val;
3767 
3768 	/* set the ITR configuration */
3769 	q_vector->rx.next_update = jiffies + 1;
3770 	q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3771 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3772 	q_vector->rx.current_itr = q_vector->rx.target_itr;
3773 	q_vector->tx.next_update = jiffies + 1;
3774 	q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3775 	wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3776 	q_vector->tx.current_itr = q_vector->tx.target_itr;
3777 
3778 	i40e_enable_misc_int_causes(pf);
3779 
3780 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3781 	wr32(hw, I40E_PFINT_LNKLST0, 0);
3782 
3783 	/* Associate the queue pair to the vector and enable the queue int */
3784 	val = I40E_QINT_RQCTL_CAUSE_ENA_MASK		       |
3785 	      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
3786 	      (nextqp	   << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3787 	      (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3788 
3789 	wr32(hw, I40E_QINT_RQCTL(0), val);
3790 
3791 	if (i40e_enabled_xdp_vsi(vsi)) {
3792 		val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		     |
3793 		      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3794 		      (I40E_QUEUE_TYPE_TX
3795 		       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3796 
3797 		wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3798 	}
3799 
3800 	val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		      |
3801 	      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3802 	      (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3803 
3804 	wr32(hw, I40E_QINT_TQCTL(0), val);
3805 	i40e_flush(hw);
3806 }
3807 
3808 /**
3809  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3810  * @pf: board private structure
3811  **/
3812 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3813 {
3814 	struct i40e_hw *hw = &pf->hw;
3815 
3816 	wr32(hw, I40E_PFINT_DYN_CTL0,
3817 	     I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3818 	i40e_flush(hw);
3819 }
3820 
3821 /**
3822  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3823  * @pf: board private structure
3824  **/
3825 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3826 {
3827 	struct i40e_hw *hw = &pf->hw;
3828 	u32 val;
3829 
3830 	val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
3831 	      I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3832 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3833 
3834 	wr32(hw, I40E_PFINT_DYN_CTL0, val);
3835 	i40e_flush(hw);
3836 }
3837 
3838 /**
3839  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3840  * @irq: interrupt number
3841  * @data: pointer to a q_vector
3842  **/
3843 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3844 {
3845 	struct i40e_q_vector *q_vector = data;
3846 
3847 	if (!q_vector->tx.ring && !q_vector->rx.ring)
3848 		return IRQ_HANDLED;
3849 
3850 	napi_schedule_irqoff(&q_vector->napi);
3851 
3852 	return IRQ_HANDLED;
3853 }
3854 
3855 /**
3856  * i40e_irq_affinity_notify - Callback for affinity changes
3857  * @notify: context as to what irq was changed
3858  * @mask: the new affinity mask
3859  *
3860  * This is a callback function used by the irq_set_affinity_notifier function
3861  * so that we may register to receive changes to the irq affinity masks.
3862  **/
3863 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3864 				     const cpumask_t *mask)
3865 {
3866 	struct i40e_q_vector *q_vector =
3867 		container_of(notify, struct i40e_q_vector, affinity_notify);
3868 
3869 	cpumask_copy(&q_vector->affinity_mask, mask);
3870 }
3871 
3872 /**
3873  * i40e_irq_affinity_release - Callback for affinity notifier release
3874  * @ref: internal core kernel usage
3875  *
3876  * This is a callback function used by the irq_set_affinity_notifier function
3877  * to inform the current notification subscriber that they will no longer
3878  * receive notifications.
3879  **/
3880 static void i40e_irq_affinity_release(struct kref *ref) {}
3881 
3882 /**
3883  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3884  * @vsi: the VSI being configured
3885  * @basename: name for the vector
3886  *
3887  * Allocates MSI-X vectors and requests interrupts from the kernel.
3888  **/
3889 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3890 {
3891 	int q_vectors = vsi->num_q_vectors;
3892 	struct i40e_pf *pf = vsi->back;
3893 	int base = vsi->base_vector;
3894 	int rx_int_idx = 0;
3895 	int tx_int_idx = 0;
3896 	int vector, err;
3897 	int irq_num;
3898 	int cpu;
3899 
3900 	for (vector = 0; vector < q_vectors; vector++) {
3901 		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3902 
3903 		irq_num = pf->msix_entries[base + vector].vector;
3904 
3905 		if (q_vector->tx.ring && q_vector->rx.ring) {
3906 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3907 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3908 			tx_int_idx++;
3909 		} else if (q_vector->rx.ring) {
3910 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3911 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
3912 		} else if (q_vector->tx.ring) {
3913 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3914 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
3915 		} else {
3916 			/* skip this unused q_vector */
3917 			continue;
3918 		}
3919 		err = request_irq(irq_num,
3920 				  vsi->irq_handler,
3921 				  0,
3922 				  q_vector->name,
3923 				  q_vector);
3924 		if (err) {
3925 			dev_info(&pf->pdev->dev,
3926 				 "MSIX request_irq failed, error: %d\n", err);
3927 			goto free_queue_irqs;
3928 		}
3929 
3930 		/* register for affinity change notifications */
3931 		q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3932 		q_vector->affinity_notify.release = i40e_irq_affinity_release;
3933 		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3934 		/* Spread affinity hints out across online CPUs.
3935 		 *
3936 		 * get_cpu_mask returns a static constant mask with
3937 		 * a permanent lifetime so it's ok to pass to
3938 		 * irq_update_affinity_hint without making a copy.
3939 		 */
3940 		cpu = cpumask_local_spread(q_vector->v_idx, -1);
3941 		irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
3942 	}
3943 
3944 	vsi->irqs_ready = true;
3945 	return 0;
3946 
3947 free_queue_irqs:
3948 	while (vector) {
3949 		vector--;
3950 		irq_num = pf->msix_entries[base + vector].vector;
3951 		irq_set_affinity_notifier(irq_num, NULL);
3952 		irq_update_affinity_hint(irq_num, NULL);
3953 		free_irq(irq_num, &vsi->q_vectors[vector]);
3954 	}
3955 	return err;
3956 }
3957 
3958 /**
3959  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3960  * @vsi: the VSI being un-configured
3961  **/
3962 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3963 {
3964 	struct i40e_pf *pf = vsi->back;
3965 	struct i40e_hw *hw = &pf->hw;
3966 	int base = vsi->base_vector;
3967 	int i;
3968 
3969 	/* disable interrupt causation from each queue */
3970 	for (i = 0; i < vsi->num_queue_pairs; i++) {
3971 		u32 val;
3972 
3973 		val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3974 		val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3975 		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3976 
3977 		val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3978 		val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3979 		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3980 
3981 		if (!i40e_enabled_xdp_vsi(vsi))
3982 			continue;
3983 		wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3984 	}
3985 
3986 	/* disable each interrupt */
3987 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3988 		for (i = vsi->base_vector;
3989 		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3990 			wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3991 
3992 		i40e_flush(hw);
3993 		for (i = 0; i < vsi->num_q_vectors; i++)
3994 			synchronize_irq(pf->msix_entries[i + base].vector);
3995 	} else {
3996 		/* Legacy and MSI mode - this stops all interrupt handling */
3997 		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3998 		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3999 		i40e_flush(hw);
4000 		synchronize_irq(pf->pdev->irq);
4001 	}
4002 }
4003 
4004 /**
4005  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4006  * @vsi: the VSI being configured
4007  **/
4008 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
4009 {
4010 	struct i40e_pf *pf = vsi->back;
4011 	int i;
4012 
4013 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4014 		for (i = 0; i < vsi->num_q_vectors; i++)
4015 			i40e_irq_dynamic_enable(vsi, i);
4016 	} else {
4017 		i40e_irq_dynamic_enable_icr0(pf);
4018 	}
4019 
4020 	i40e_flush(&pf->hw);
4021 	return 0;
4022 }
4023 
4024 /**
4025  * i40e_free_misc_vector - Free the vector that handles non-queue events
4026  * @pf: board private structure
4027  **/
4028 static void i40e_free_misc_vector(struct i40e_pf *pf)
4029 {
4030 	/* Disable ICR 0 */
4031 	wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
4032 	i40e_flush(&pf->hw);
4033 
4034 	if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4035 		synchronize_irq(pf->msix_entries[0].vector);
4036 		free_irq(pf->msix_entries[0].vector, pf);
4037 		clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
4038 	}
4039 }
4040 
4041 /**
4042  * i40e_intr - MSI/Legacy and non-queue interrupt handler
4043  * @irq: interrupt number
4044  * @data: pointer to a q_vector
4045  *
4046  * This is the handler used for all MSI/Legacy interrupts, and deals
4047  * with both queue and non-queue interrupts.  This is also used in
4048  * MSIX mode to handle the non-queue interrupts.
4049  **/
4050 static irqreturn_t i40e_intr(int irq, void *data)
4051 {
4052 	struct i40e_pf *pf = (struct i40e_pf *)data;
4053 	struct i40e_hw *hw = &pf->hw;
4054 	irqreturn_t ret = IRQ_NONE;
4055 	u32 icr0, icr0_remaining;
4056 	u32 val, ena_mask;
4057 
4058 	icr0 = rd32(hw, I40E_PFINT_ICR0);
4059 	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
4060 
4061 	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
4062 	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4063 		goto enable_intr;
4064 
4065 	/* if interrupt but no bits showing, must be SWINT */
4066 	if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4067 	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4068 		pf->sw_int_count++;
4069 
4070 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
4071 	    (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4072 		ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4073 		dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4074 		set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4075 	}
4076 
4077 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
4078 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4079 		struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4080 		struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4081 
4082 		/* We do not have a way to disarm Queue causes while leaving
4083 		 * interrupt enabled for all other causes, ideally
4084 		 * interrupt should be disabled while we are in NAPI but
4085 		 * this is not a performance path and napi_schedule()
4086 		 * can deal with rescheduling.
4087 		 */
4088 		if (!test_bit(__I40E_DOWN, pf->state))
4089 			napi_schedule_irqoff(&q_vector->napi);
4090 	}
4091 
4092 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4093 		ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4094 		set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4095 		i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4096 	}
4097 
4098 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4099 		ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4100 		set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4101 	}
4102 
4103 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4104 		/* disable any further VFLR event notifications */
4105 		if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4106 			u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4107 
4108 			reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4109 			wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4110 		} else {
4111 			ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4112 			set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4113 		}
4114 	}
4115 
4116 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4117 		if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4118 			set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4119 		ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4120 		val = rd32(hw, I40E_GLGEN_RSTAT);
4121 		val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4122 		       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4123 		if (val == I40E_RESET_CORER) {
4124 			pf->corer_count++;
4125 		} else if (val == I40E_RESET_GLOBR) {
4126 			pf->globr_count++;
4127 		} else if (val == I40E_RESET_EMPR) {
4128 			pf->empr_count++;
4129 			set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4130 		}
4131 	}
4132 
4133 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4134 		icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4135 		dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4136 		dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4137 			 rd32(hw, I40E_PFHMC_ERRORINFO),
4138 			 rd32(hw, I40E_PFHMC_ERRORDATA));
4139 	}
4140 
4141 	if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4142 		u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4143 
4144 		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
4145 			schedule_work(&pf->ptp_extts0_work);
4146 
4147 		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
4148 			i40e_ptp_tx_hwtstamp(pf);
4149 
4150 		icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4151 	}
4152 
4153 	/* If a critical error is pending we have no choice but to reset the
4154 	 * device.
4155 	 * Report and mask out any remaining unexpected interrupts.
4156 	 */
4157 	icr0_remaining = icr0 & ena_mask;
4158 	if (icr0_remaining) {
4159 		dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4160 			 icr0_remaining);
4161 		if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4162 		    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4163 		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4164 			dev_info(&pf->pdev->dev, "device will be reset\n");
4165 			set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4166 			i40e_service_event_schedule(pf);
4167 		}
4168 		ena_mask &= ~icr0_remaining;
4169 	}
4170 	ret = IRQ_HANDLED;
4171 
4172 enable_intr:
4173 	/* re-enable interrupt causes */
4174 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4175 	if (!test_bit(__I40E_DOWN, pf->state) ||
4176 	    test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4177 		i40e_service_event_schedule(pf);
4178 		i40e_irq_dynamic_enable_icr0(pf);
4179 	}
4180 
4181 	return ret;
4182 }
4183 
4184 /**
4185  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4186  * @tx_ring:  tx ring to clean
4187  * @budget:   how many cleans we're allowed
4188  *
4189  * Returns true if there's any budget left (e.g. the clean is finished)
4190  **/
4191 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4192 {
4193 	struct i40e_vsi *vsi = tx_ring->vsi;
4194 	u16 i = tx_ring->next_to_clean;
4195 	struct i40e_tx_buffer *tx_buf;
4196 	struct i40e_tx_desc *tx_desc;
4197 
4198 	tx_buf = &tx_ring->tx_bi[i];
4199 	tx_desc = I40E_TX_DESC(tx_ring, i);
4200 	i -= tx_ring->count;
4201 
4202 	do {
4203 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4204 
4205 		/* if next_to_watch is not set then there is no work pending */
4206 		if (!eop_desc)
4207 			break;
4208 
4209 		/* prevent any other reads prior to eop_desc */
4210 		smp_rmb();
4211 
4212 		/* if the descriptor isn't done, no work yet to do */
4213 		if (!(eop_desc->cmd_type_offset_bsz &
4214 		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4215 			break;
4216 
4217 		/* clear next_to_watch to prevent false hangs */
4218 		tx_buf->next_to_watch = NULL;
4219 
4220 		tx_desc->buffer_addr = 0;
4221 		tx_desc->cmd_type_offset_bsz = 0;
4222 		/* move past filter desc */
4223 		tx_buf++;
4224 		tx_desc++;
4225 		i++;
4226 		if (unlikely(!i)) {
4227 			i -= tx_ring->count;
4228 			tx_buf = tx_ring->tx_bi;
4229 			tx_desc = I40E_TX_DESC(tx_ring, 0);
4230 		}
4231 		/* unmap skb header data */
4232 		dma_unmap_single(tx_ring->dev,
4233 				 dma_unmap_addr(tx_buf, dma),
4234 				 dma_unmap_len(tx_buf, len),
4235 				 DMA_TO_DEVICE);
4236 		if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4237 			kfree(tx_buf->raw_buf);
4238 
4239 		tx_buf->raw_buf = NULL;
4240 		tx_buf->tx_flags = 0;
4241 		tx_buf->next_to_watch = NULL;
4242 		dma_unmap_len_set(tx_buf, len, 0);
4243 		tx_desc->buffer_addr = 0;
4244 		tx_desc->cmd_type_offset_bsz = 0;
4245 
4246 		/* move us past the eop_desc for start of next FD desc */
4247 		tx_buf++;
4248 		tx_desc++;
4249 		i++;
4250 		if (unlikely(!i)) {
4251 			i -= tx_ring->count;
4252 			tx_buf = tx_ring->tx_bi;
4253 			tx_desc = I40E_TX_DESC(tx_ring, 0);
4254 		}
4255 
4256 		/* update budget accounting */
4257 		budget--;
4258 	} while (likely(budget));
4259 
4260 	i += tx_ring->count;
4261 	tx_ring->next_to_clean = i;
4262 
4263 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4264 		i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4265 
4266 	return budget > 0;
4267 }
4268 
4269 /**
4270  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4271  * @irq: interrupt number
4272  * @data: pointer to a q_vector
4273  **/
4274 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4275 {
4276 	struct i40e_q_vector *q_vector = data;
4277 	struct i40e_vsi *vsi;
4278 
4279 	if (!q_vector->tx.ring)
4280 		return IRQ_HANDLED;
4281 
4282 	vsi = q_vector->tx.ring->vsi;
4283 	i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4284 
4285 	return IRQ_HANDLED;
4286 }
4287 
4288 /**
4289  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4290  * @vsi: the VSI being configured
4291  * @v_idx: vector index
4292  * @qp_idx: queue pair index
4293  **/
4294 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4295 {
4296 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4297 	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4298 	struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4299 
4300 	tx_ring->q_vector = q_vector;
4301 	tx_ring->next = q_vector->tx.ring;
4302 	q_vector->tx.ring = tx_ring;
4303 	q_vector->tx.count++;
4304 
4305 	/* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4306 	if (i40e_enabled_xdp_vsi(vsi)) {
4307 		struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4308 
4309 		xdp_ring->q_vector = q_vector;
4310 		xdp_ring->next = q_vector->tx.ring;
4311 		q_vector->tx.ring = xdp_ring;
4312 		q_vector->tx.count++;
4313 	}
4314 
4315 	rx_ring->q_vector = q_vector;
4316 	rx_ring->next = q_vector->rx.ring;
4317 	q_vector->rx.ring = rx_ring;
4318 	q_vector->rx.count++;
4319 }
4320 
4321 /**
4322  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4323  * @vsi: the VSI being configured
4324  *
4325  * This function maps descriptor rings to the queue-specific vectors
4326  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
4327  * one vector per queue pair, but on a constrained vector budget, we
4328  * group the queue pairs as "efficiently" as possible.
4329  **/
4330 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4331 {
4332 	int qp_remaining = vsi->num_queue_pairs;
4333 	int q_vectors = vsi->num_q_vectors;
4334 	int num_ringpairs;
4335 	int v_start = 0;
4336 	int qp_idx = 0;
4337 
4338 	/* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4339 	 * group them so there are multiple queues per vector.
4340 	 * It is also important to go through all the vectors available to be
4341 	 * sure that if we don't use all the vectors, that the remaining vectors
4342 	 * are cleared. This is especially important when decreasing the
4343 	 * number of queues in use.
4344 	 */
4345 	for (; v_start < q_vectors; v_start++) {
4346 		struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4347 
4348 		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4349 
4350 		q_vector->num_ringpairs = num_ringpairs;
4351 		q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4352 
4353 		q_vector->rx.count = 0;
4354 		q_vector->tx.count = 0;
4355 		q_vector->rx.ring = NULL;
4356 		q_vector->tx.ring = NULL;
4357 
4358 		while (num_ringpairs--) {
4359 			i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4360 			qp_idx++;
4361 			qp_remaining--;
4362 		}
4363 	}
4364 }
4365 
4366 /**
4367  * i40e_vsi_request_irq - Request IRQ from the OS
4368  * @vsi: the VSI being configured
4369  * @basename: name for the vector
4370  **/
4371 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4372 {
4373 	struct i40e_pf *pf = vsi->back;
4374 	int err;
4375 
4376 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4377 		err = i40e_vsi_request_irq_msix(vsi, basename);
4378 	else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4379 		err = request_irq(pf->pdev->irq, i40e_intr, 0,
4380 				  pf->int_name, pf);
4381 	else
4382 		err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4383 				  pf->int_name, pf);
4384 
4385 	if (err)
4386 		dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4387 
4388 	return err;
4389 }
4390 
4391 #ifdef CONFIG_NET_POLL_CONTROLLER
4392 /**
4393  * i40e_netpoll - A Polling 'interrupt' handler
4394  * @netdev: network interface device structure
4395  *
4396  * This is used by netconsole to send skbs without having to re-enable
4397  * interrupts.  It's not called while the normal interrupt routine is executing.
4398  **/
4399 static void i40e_netpoll(struct net_device *netdev)
4400 {
4401 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4402 	struct i40e_vsi *vsi = np->vsi;
4403 	struct i40e_pf *pf = vsi->back;
4404 	int i;
4405 
4406 	/* if interface is down do nothing */
4407 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
4408 		return;
4409 
4410 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4411 		for (i = 0; i < vsi->num_q_vectors; i++)
4412 			i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4413 	} else {
4414 		i40e_intr(pf->pdev->irq, netdev);
4415 	}
4416 }
4417 #endif
4418 
4419 #define I40E_QTX_ENA_WAIT_COUNT 50
4420 
4421 /**
4422  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4423  * @pf: the PF being configured
4424  * @pf_q: the PF queue
4425  * @enable: enable or disable state of the queue
4426  *
4427  * This routine will wait for the given Tx queue of the PF to reach the
4428  * enabled or disabled state.
4429  * Returns -ETIMEDOUT in case of failing to reach the requested state after
4430  * multiple retries; else will return 0 in case of success.
4431  **/
4432 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4433 {
4434 	int i;
4435 	u32 tx_reg;
4436 
4437 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4438 		tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4439 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4440 			break;
4441 
4442 		usleep_range(10, 20);
4443 	}
4444 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4445 		return -ETIMEDOUT;
4446 
4447 	return 0;
4448 }
4449 
4450 /**
4451  * i40e_control_tx_q - Start or stop a particular Tx queue
4452  * @pf: the PF structure
4453  * @pf_q: the PF queue to configure
4454  * @enable: start or stop the queue
4455  *
4456  * This function enables or disables a single queue. Note that any delay
4457  * required after the operation is expected to be handled by the caller of
4458  * this function.
4459  **/
4460 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4461 {
4462 	struct i40e_hw *hw = &pf->hw;
4463 	u32 tx_reg;
4464 	int i;
4465 
4466 	/* warn the TX unit of coming changes */
4467 	i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4468 	if (!enable)
4469 		usleep_range(10, 20);
4470 
4471 	for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4472 		tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4473 		if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4474 		    ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4475 			break;
4476 		usleep_range(1000, 2000);
4477 	}
4478 
4479 	/* Skip if the queue is already in the requested state */
4480 	if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4481 		return;
4482 
4483 	/* turn on/off the queue */
4484 	if (enable) {
4485 		wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4486 		tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4487 	} else {
4488 		tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4489 	}
4490 
4491 	wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4492 }
4493 
4494 /**
4495  * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4496  * @seid: VSI SEID
4497  * @pf: the PF structure
4498  * @pf_q: the PF queue to configure
4499  * @is_xdp: true if the queue is used for XDP
4500  * @enable: start or stop the queue
4501  **/
4502 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4503 			   bool is_xdp, bool enable)
4504 {
4505 	int ret;
4506 
4507 	i40e_control_tx_q(pf, pf_q, enable);
4508 
4509 	/* wait for the change to finish */
4510 	ret = i40e_pf_txq_wait(pf, pf_q, enable);
4511 	if (ret) {
4512 		dev_info(&pf->pdev->dev,
4513 			 "VSI seid %d %sTx ring %d %sable timeout\n",
4514 			 seid, (is_xdp ? "XDP " : ""), pf_q,
4515 			 (enable ? "en" : "dis"));
4516 	}
4517 
4518 	return ret;
4519 }
4520 
4521 /**
4522  * i40e_vsi_enable_tx - Start a VSI's rings
4523  * @vsi: the VSI being configured
4524  **/
4525 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4526 {
4527 	struct i40e_pf *pf = vsi->back;
4528 	int i, pf_q, ret = 0;
4529 
4530 	pf_q = vsi->base_queue;
4531 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4532 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
4533 					     pf_q,
4534 					     false /*is xdp*/, true);
4535 		if (ret)
4536 			break;
4537 
4538 		if (!i40e_enabled_xdp_vsi(vsi))
4539 			continue;
4540 
4541 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
4542 					     pf_q + vsi->alloc_queue_pairs,
4543 					     true /*is xdp*/, true);
4544 		if (ret)
4545 			break;
4546 	}
4547 	return ret;
4548 }
4549 
4550 /**
4551  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4552  * @pf: the PF being configured
4553  * @pf_q: the PF queue
4554  * @enable: enable or disable state of the queue
4555  *
4556  * This routine will wait for the given Rx queue of the PF to reach the
4557  * enabled or disabled state.
4558  * Returns -ETIMEDOUT in case of failing to reach the requested state after
4559  * multiple retries; else will return 0 in case of success.
4560  **/
4561 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4562 {
4563 	int i;
4564 	u32 rx_reg;
4565 
4566 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4567 		rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4568 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4569 			break;
4570 
4571 		usleep_range(10, 20);
4572 	}
4573 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4574 		return -ETIMEDOUT;
4575 
4576 	return 0;
4577 }
4578 
4579 /**
4580  * i40e_control_rx_q - Start or stop a particular Rx queue
4581  * @pf: the PF structure
4582  * @pf_q: the PF queue to configure
4583  * @enable: start or stop the queue
4584  *
4585  * This function enables or disables a single queue. Note that
4586  * any delay required after the operation is expected to be
4587  * handled by the caller of this function.
4588  **/
4589 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4590 {
4591 	struct i40e_hw *hw = &pf->hw;
4592 	u32 rx_reg;
4593 	int i;
4594 
4595 	for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4596 		rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4597 		if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4598 		    ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4599 			break;
4600 		usleep_range(1000, 2000);
4601 	}
4602 
4603 	/* Skip if the queue is already in the requested state */
4604 	if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4605 		return;
4606 
4607 	/* turn on/off the queue */
4608 	if (enable)
4609 		rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4610 	else
4611 		rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4612 
4613 	wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4614 }
4615 
4616 /**
4617  * i40e_control_wait_rx_q
4618  * @pf: the PF structure
4619  * @pf_q: queue being configured
4620  * @enable: start or stop the rings
4621  *
4622  * This function enables or disables a single queue along with waiting
4623  * for the change to finish. The caller of this function should handle
4624  * the delays needed in the case of disabling queues.
4625  **/
4626 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4627 {
4628 	int ret = 0;
4629 
4630 	i40e_control_rx_q(pf, pf_q, enable);
4631 
4632 	/* wait for the change to finish */
4633 	ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4634 	if (ret)
4635 		return ret;
4636 
4637 	return ret;
4638 }
4639 
4640 /**
4641  * i40e_vsi_enable_rx - Start a VSI's rings
4642  * @vsi: the VSI being configured
4643  **/
4644 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4645 {
4646 	struct i40e_pf *pf = vsi->back;
4647 	int i, pf_q, ret = 0;
4648 
4649 	pf_q = vsi->base_queue;
4650 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4651 		ret = i40e_control_wait_rx_q(pf, pf_q, true);
4652 		if (ret) {
4653 			dev_info(&pf->pdev->dev,
4654 				 "VSI seid %d Rx ring %d enable timeout\n",
4655 				 vsi->seid, pf_q);
4656 			break;
4657 		}
4658 	}
4659 
4660 	return ret;
4661 }
4662 
4663 /**
4664  * i40e_vsi_start_rings - Start a VSI's rings
4665  * @vsi: the VSI being configured
4666  **/
4667 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4668 {
4669 	int ret = 0;
4670 
4671 	/* do rx first for enable and last for disable */
4672 	ret = i40e_vsi_enable_rx(vsi);
4673 	if (ret)
4674 		return ret;
4675 	ret = i40e_vsi_enable_tx(vsi);
4676 
4677 	return ret;
4678 }
4679 
4680 #define I40E_DISABLE_TX_GAP_MSEC	50
4681 
4682 /**
4683  * i40e_vsi_stop_rings - Stop a VSI's rings
4684  * @vsi: the VSI being configured
4685  **/
4686 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4687 {
4688 	struct i40e_pf *pf = vsi->back;
4689 	int pf_q, err, q_end;
4690 
4691 	/* When port TX is suspended, don't wait */
4692 	if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4693 		return i40e_vsi_stop_rings_no_wait(vsi);
4694 
4695 	q_end = vsi->base_queue + vsi->num_queue_pairs;
4696 	for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4697 		i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
4698 
4699 	for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4700 		err = i40e_control_wait_rx_q(pf, pf_q, false);
4701 		if (err)
4702 			dev_info(&pf->pdev->dev,
4703 				 "VSI seid %d Rx ring %d disable timeout\n",
4704 				 vsi->seid, pf_q);
4705 	}
4706 
4707 	msleep(I40E_DISABLE_TX_GAP_MSEC);
4708 	pf_q = vsi->base_queue;
4709 	for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4710 		wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4711 
4712 	i40e_vsi_wait_queues_disabled(vsi);
4713 }
4714 
4715 /**
4716  * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4717  * @vsi: the VSI being shutdown
4718  *
4719  * This function stops all the rings for a VSI but does not delay to verify
4720  * that rings have been disabled. It is expected that the caller is shutting
4721  * down multiple VSIs at once and will delay together for all the VSIs after
4722  * initiating the shutdown. This is particularly useful for shutting down lots
4723  * of VFs together. Otherwise, a large delay can be incurred while configuring
4724  * each VSI in serial.
4725  **/
4726 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4727 {
4728 	struct i40e_pf *pf = vsi->back;
4729 	int i, pf_q;
4730 
4731 	pf_q = vsi->base_queue;
4732 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4733 		i40e_control_tx_q(pf, pf_q, false);
4734 		i40e_control_rx_q(pf, pf_q, false);
4735 	}
4736 }
4737 
4738 /**
4739  * i40e_vsi_free_irq - Free the irq association with the OS
4740  * @vsi: the VSI being configured
4741  **/
4742 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4743 {
4744 	struct i40e_pf *pf = vsi->back;
4745 	struct i40e_hw *hw = &pf->hw;
4746 	int base = vsi->base_vector;
4747 	u32 val, qp;
4748 	int i;
4749 
4750 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4751 		if (!vsi->q_vectors)
4752 			return;
4753 
4754 		if (!vsi->irqs_ready)
4755 			return;
4756 
4757 		vsi->irqs_ready = false;
4758 		for (i = 0; i < vsi->num_q_vectors; i++) {
4759 			int irq_num;
4760 			u16 vector;
4761 
4762 			vector = i + base;
4763 			irq_num = pf->msix_entries[vector].vector;
4764 
4765 			/* free only the irqs that were actually requested */
4766 			if (!vsi->q_vectors[i] ||
4767 			    !vsi->q_vectors[i]->num_ringpairs)
4768 				continue;
4769 
4770 			/* clear the affinity notifier in the IRQ descriptor */
4771 			irq_set_affinity_notifier(irq_num, NULL);
4772 			/* remove our suggested affinity mask for this IRQ */
4773 			irq_update_affinity_hint(irq_num, NULL);
4774 			synchronize_irq(irq_num);
4775 			free_irq(irq_num, vsi->q_vectors[i]);
4776 
4777 			/* Tear down the interrupt queue link list
4778 			 *
4779 			 * We know that they come in pairs and always
4780 			 * the Rx first, then the Tx.  To clear the
4781 			 * link list, stick the EOL value into the
4782 			 * next_q field of the registers.
4783 			 */
4784 			val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4785 			qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4786 				>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4787 			val |= I40E_QUEUE_END_OF_LIST
4788 				<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4789 			wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4790 
4791 			while (qp != I40E_QUEUE_END_OF_LIST) {
4792 				u32 next;
4793 
4794 				val = rd32(hw, I40E_QINT_RQCTL(qp));
4795 
4796 				val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4797 					 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4798 					 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4799 					 I40E_QINT_RQCTL_INTEVENT_MASK);
4800 
4801 				val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4802 					 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4803 
4804 				wr32(hw, I40E_QINT_RQCTL(qp), val);
4805 
4806 				val = rd32(hw, I40E_QINT_TQCTL(qp));
4807 
4808 				next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4809 					>> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4810 
4811 				val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4812 					 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4813 					 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4814 					 I40E_QINT_TQCTL_INTEVENT_MASK);
4815 
4816 				val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4817 					 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4818 
4819 				wr32(hw, I40E_QINT_TQCTL(qp), val);
4820 				qp = next;
4821 			}
4822 		}
4823 	} else {
4824 		free_irq(pf->pdev->irq, pf);
4825 
4826 		val = rd32(hw, I40E_PFINT_LNKLST0);
4827 		qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4828 			>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4829 		val |= I40E_QUEUE_END_OF_LIST
4830 			<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4831 		wr32(hw, I40E_PFINT_LNKLST0, val);
4832 
4833 		val = rd32(hw, I40E_QINT_RQCTL(qp));
4834 		val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4835 			 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4836 			 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4837 			 I40E_QINT_RQCTL_INTEVENT_MASK);
4838 
4839 		val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4840 			I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4841 
4842 		wr32(hw, I40E_QINT_RQCTL(qp), val);
4843 
4844 		val = rd32(hw, I40E_QINT_TQCTL(qp));
4845 
4846 		val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4847 			 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4848 			 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4849 			 I40E_QINT_TQCTL_INTEVENT_MASK);
4850 
4851 		val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4852 			I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4853 
4854 		wr32(hw, I40E_QINT_TQCTL(qp), val);
4855 	}
4856 }
4857 
4858 /**
4859  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4860  * @vsi: the VSI being configured
4861  * @v_idx: Index of vector to be freed
4862  *
4863  * This function frees the memory allocated to the q_vector.  In addition if
4864  * NAPI is enabled it will delete any references to the NAPI struct prior
4865  * to freeing the q_vector.
4866  **/
4867 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4868 {
4869 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4870 	struct i40e_ring *ring;
4871 
4872 	if (!q_vector)
4873 		return;
4874 
4875 	/* disassociate q_vector from rings */
4876 	i40e_for_each_ring(ring, q_vector->tx)
4877 		ring->q_vector = NULL;
4878 
4879 	i40e_for_each_ring(ring, q_vector->rx)
4880 		ring->q_vector = NULL;
4881 
4882 	/* only VSI w/ an associated netdev is set up w/ NAPI */
4883 	if (vsi->netdev)
4884 		netif_napi_del(&q_vector->napi);
4885 
4886 	vsi->q_vectors[v_idx] = NULL;
4887 
4888 	kfree_rcu(q_vector, rcu);
4889 }
4890 
4891 /**
4892  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4893  * @vsi: the VSI being un-configured
4894  *
4895  * This frees the memory allocated to the q_vectors and
4896  * deletes references to the NAPI struct.
4897  **/
4898 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4899 {
4900 	int v_idx;
4901 
4902 	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4903 		i40e_free_q_vector(vsi, v_idx);
4904 }
4905 
4906 /**
4907  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4908  * @pf: board private structure
4909  **/
4910 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4911 {
4912 	/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4913 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4914 		pci_disable_msix(pf->pdev);
4915 		kfree(pf->msix_entries);
4916 		pf->msix_entries = NULL;
4917 		kfree(pf->irq_pile);
4918 		pf->irq_pile = NULL;
4919 	} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4920 		pci_disable_msi(pf->pdev);
4921 	}
4922 	pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4923 }
4924 
4925 /**
4926  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4927  * @pf: board private structure
4928  *
4929  * We go through and clear interrupt specific resources and reset the structure
4930  * to pre-load conditions
4931  **/
4932 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4933 {
4934 	int i;
4935 
4936 	if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
4937 		i40e_free_misc_vector(pf);
4938 
4939 	i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4940 		      I40E_IWARP_IRQ_PILE_ID);
4941 
4942 	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4943 	for (i = 0; i < pf->num_alloc_vsi; i++)
4944 		if (pf->vsi[i])
4945 			i40e_vsi_free_q_vectors(pf->vsi[i]);
4946 	i40e_reset_interrupt_capability(pf);
4947 }
4948 
4949 /**
4950  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4951  * @vsi: the VSI being configured
4952  **/
4953 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4954 {
4955 	int q_idx;
4956 
4957 	if (!vsi->netdev)
4958 		return;
4959 
4960 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4961 		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4962 
4963 		if (q_vector->rx.ring || q_vector->tx.ring)
4964 			napi_enable(&q_vector->napi);
4965 	}
4966 }
4967 
4968 /**
4969  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4970  * @vsi: the VSI being configured
4971  **/
4972 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4973 {
4974 	int q_idx;
4975 
4976 	if (!vsi->netdev)
4977 		return;
4978 
4979 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4980 		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4981 
4982 		if (q_vector->rx.ring || q_vector->tx.ring)
4983 			napi_disable(&q_vector->napi);
4984 	}
4985 }
4986 
4987 /**
4988  * i40e_vsi_close - Shut down a VSI
4989  * @vsi: the vsi to be quelled
4990  **/
4991 static void i40e_vsi_close(struct i40e_vsi *vsi)
4992 {
4993 	struct i40e_pf *pf = vsi->back;
4994 	if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4995 		i40e_down(vsi);
4996 	i40e_vsi_free_irq(vsi);
4997 	i40e_vsi_free_tx_resources(vsi);
4998 	i40e_vsi_free_rx_resources(vsi);
4999 	vsi->current_netdev_flags = 0;
5000 	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
5001 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5002 		set_bit(__I40E_CLIENT_RESET, pf->state);
5003 }
5004 
5005 /**
5006  * i40e_quiesce_vsi - Pause a given VSI
5007  * @vsi: the VSI being paused
5008  **/
5009 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
5010 {
5011 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
5012 		return;
5013 
5014 	set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
5015 	if (vsi->netdev && netif_running(vsi->netdev))
5016 		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5017 	else
5018 		i40e_vsi_close(vsi);
5019 }
5020 
5021 /**
5022  * i40e_unquiesce_vsi - Resume a given VSI
5023  * @vsi: the VSI being resumed
5024  **/
5025 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
5026 {
5027 	if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
5028 		return;
5029 
5030 	if (vsi->netdev && netif_running(vsi->netdev))
5031 		vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5032 	else
5033 		i40e_vsi_open(vsi);   /* this clears the DOWN bit */
5034 }
5035 
5036 /**
5037  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5038  * @pf: the PF
5039  **/
5040 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
5041 {
5042 	int v;
5043 
5044 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5045 		if (pf->vsi[v])
5046 			i40e_quiesce_vsi(pf->vsi[v]);
5047 	}
5048 }
5049 
5050 /**
5051  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5052  * @pf: the PF
5053  **/
5054 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
5055 {
5056 	int v;
5057 
5058 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5059 		if (pf->vsi[v])
5060 			i40e_unquiesce_vsi(pf->vsi[v]);
5061 	}
5062 }
5063 
5064 /**
5065  * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5066  * @vsi: the VSI being configured
5067  *
5068  * Wait until all queues on a given VSI have been disabled.
5069  **/
5070 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
5071 {
5072 	struct i40e_pf *pf = vsi->back;
5073 	int i, pf_q, ret;
5074 
5075 	pf_q = vsi->base_queue;
5076 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5077 		/* Check and wait for the Tx queue */
5078 		ret = i40e_pf_txq_wait(pf, pf_q, false);
5079 		if (ret) {
5080 			dev_info(&pf->pdev->dev,
5081 				 "VSI seid %d Tx ring %d disable timeout\n",
5082 				 vsi->seid, pf_q);
5083 			return ret;
5084 		}
5085 
5086 		if (!i40e_enabled_xdp_vsi(vsi))
5087 			goto wait_rx;
5088 
5089 		/* Check and wait for the XDP Tx queue */
5090 		ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5091 				       false);
5092 		if (ret) {
5093 			dev_info(&pf->pdev->dev,
5094 				 "VSI seid %d XDP Tx ring %d disable timeout\n",
5095 				 vsi->seid, pf_q);
5096 			return ret;
5097 		}
5098 wait_rx:
5099 		/* Check and wait for the Rx queue */
5100 		ret = i40e_pf_rxq_wait(pf, pf_q, false);
5101 		if (ret) {
5102 			dev_info(&pf->pdev->dev,
5103 				 "VSI seid %d Rx ring %d disable timeout\n",
5104 				 vsi->seid, pf_q);
5105 			return ret;
5106 		}
5107 	}
5108 
5109 	return 0;
5110 }
5111 
5112 #ifdef CONFIG_I40E_DCB
5113 /**
5114  * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5115  * @pf: the PF
5116  *
5117  * This function waits for the queues to be in disabled state for all the
5118  * VSIs that are managed by this PF.
5119  **/
5120 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5121 {
5122 	int v, ret = 0;
5123 
5124 	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5125 		if (pf->vsi[v]) {
5126 			ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5127 			if (ret)
5128 				break;
5129 		}
5130 	}
5131 
5132 	return ret;
5133 }
5134 
5135 #endif
5136 
5137 /**
5138  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5139  * @pf: pointer to PF
5140  *
5141  * Get TC map for ISCSI PF type that will include iSCSI TC
5142  * and LAN TC.
5143  **/
5144 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5145 {
5146 	struct i40e_dcb_app_priority_table app;
5147 	struct i40e_hw *hw = &pf->hw;
5148 	u8 enabled_tc = 1; /* TC0 is always enabled */
5149 	u8 tc, i;
5150 	/* Get the iSCSI APP TLV */
5151 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5152 
5153 	for (i = 0; i < dcbcfg->numapps; i++) {
5154 		app = dcbcfg->app[i];
5155 		if (app.selector == I40E_APP_SEL_TCPIP &&
5156 		    app.protocolid == I40E_APP_PROTOID_ISCSI) {
5157 			tc = dcbcfg->etscfg.prioritytable[app.priority];
5158 			enabled_tc |= BIT(tc);
5159 			break;
5160 		}
5161 	}
5162 
5163 	return enabled_tc;
5164 }
5165 
5166 /**
5167  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
5168  * @dcbcfg: the corresponding DCBx configuration structure
5169  *
5170  * Return the number of TCs from given DCBx configuration
5171  **/
5172 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5173 {
5174 	int i, tc_unused = 0;
5175 	u8 num_tc = 0;
5176 	u8 ret = 0;
5177 
5178 	/* Scan the ETS Config Priority Table to find
5179 	 * traffic class enabled for a given priority
5180 	 * and create a bitmask of enabled TCs
5181 	 */
5182 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5183 		num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5184 
5185 	/* Now scan the bitmask to check for
5186 	 * contiguous TCs starting with TC0
5187 	 */
5188 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5189 		if (num_tc & BIT(i)) {
5190 			if (!tc_unused) {
5191 				ret++;
5192 			} else {
5193 				pr_err("Non-contiguous TC - Disabling DCB\n");
5194 				return 1;
5195 			}
5196 		} else {
5197 			tc_unused = 1;
5198 		}
5199 	}
5200 
5201 	/* There is always at least TC0 */
5202 	if (!ret)
5203 		ret = 1;
5204 
5205 	return ret;
5206 }
5207 
5208 /**
5209  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5210  * @dcbcfg: the corresponding DCBx configuration structure
5211  *
5212  * Query the current DCB configuration and return the number of
5213  * traffic classes enabled from the given DCBX config
5214  **/
5215 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5216 {
5217 	u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5218 	u8 enabled_tc = 1;
5219 	u8 i;
5220 
5221 	for (i = 0; i < num_tc; i++)
5222 		enabled_tc |= BIT(i);
5223 
5224 	return enabled_tc;
5225 }
5226 
5227 /**
5228  * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5229  * @pf: PF being queried
5230  *
5231  * Query the current MQPRIO configuration and return the number of
5232  * traffic classes enabled.
5233  **/
5234 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5235 {
5236 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5237 	u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5238 	u8 enabled_tc = 1, i;
5239 
5240 	for (i = 1; i < num_tc; i++)
5241 		enabled_tc |= BIT(i);
5242 	return enabled_tc;
5243 }
5244 
5245 /**
5246  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5247  * @pf: PF being queried
5248  *
5249  * Return number of traffic classes enabled for the given PF
5250  **/
5251 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5252 {
5253 	struct i40e_hw *hw = &pf->hw;
5254 	u8 i, enabled_tc = 1;
5255 	u8 num_tc = 0;
5256 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5257 
5258 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5259 		return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5260 
5261 	/* If neither MQPRIO nor DCB is enabled, then always use single TC */
5262 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5263 		return 1;
5264 
5265 	/* SFP mode will be enabled for all TCs on port */
5266 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5267 		return i40e_dcb_get_num_tc(dcbcfg);
5268 
5269 	/* MFP mode return count of enabled TCs for this PF */
5270 	if (pf->hw.func_caps.iscsi)
5271 		enabled_tc =  i40e_get_iscsi_tc_map(pf);
5272 	else
5273 		return 1; /* Only TC0 */
5274 
5275 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5276 		if (enabled_tc & BIT(i))
5277 			num_tc++;
5278 	}
5279 	return num_tc;
5280 }
5281 
5282 /**
5283  * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5284  * @pf: PF being queried
5285  *
5286  * Return a bitmap for enabled traffic classes for this PF.
5287  **/
5288 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5289 {
5290 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5291 		return i40e_mqprio_get_enabled_tc(pf);
5292 
5293 	/* If neither MQPRIO nor DCB is enabled for this PF then just return
5294 	 * default TC
5295 	 */
5296 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5297 		return I40E_DEFAULT_TRAFFIC_CLASS;
5298 
5299 	/* SFP mode we want PF to be enabled for all TCs */
5300 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5301 		return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5302 
5303 	/* MFP enabled and iSCSI PF type */
5304 	if (pf->hw.func_caps.iscsi)
5305 		return i40e_get_iscsi_tc_map(pf);
5306 	else
5307 		return I40E_DEFAULT_TRAFFIC_CLASS;
5308 }
5309 
5310 /**
5311  * i40e_vsi_get_bw_info - Query VSI BW Information
5312  * @vsi: the VSI being queried
5313  *
5314  * Returns 0 on success, negative value on failure
5315  **/
5316 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5317 {
5318 	struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5319 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5320 	struct i40e_pf *pf = vsi->back;
5321 	struct i40e_hw *hw = &pf->hw;
5322 	i40e_status ret;
5323 	u32 tc_bw_max;
5324 	int i;
5325 
5326 	/* Get the VSI level BW configuration */
5327 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5328 	if (ret) {
5329 		dev_info(&pf->pdev->dev,
5330 			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5331 			 i40e_stat_str(&pf->hw, ret),
5332 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5333 		return -EINVAL;
5334 	}
5335 
5336 	/* Get the VSI level BW configuration per TC */
5337 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5338 					       NULL);
5339 	if (ret) {
5340 		dev_info(&pf->pdev->dev,
5341 			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5342 			 i40e_stat_str(&pf->hw, ret),
5343 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5344 		return -EINVAL;
5345 	}
5346 
5347 	if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5348 		dev_info(&pf->pdev->dev,
5349 			 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5350 			 bw_config.tc_valid_bits,
5351 			 bw_ets_config.tc_valid_bits);
5352 		/* Still continuing */
5353 	}
5354 
5355 	vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5356 	vsi->bw_max_quanta = bw_config.max_bw;
5357 	tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5358 		    (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5359 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5360 		vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5361 		vsi->bw_ets_limit_credits[i] =
5362 					le16_to_cpu(bw_ets_config.credits[i]);
5363 		/* 3 bits out of 4 for each TC */
5364 		vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5365 	}
5366 
5367 	return 0;
5368 }
5369 
5370 /**
5371  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5372  * @vsi: the VSI being configured
5373  * @enabled_tc: TC bitmap
5374  * @bw_share: BW shared credits per TC
5375  *
5376  * Returns 0 on success, negative value on failure
5377  **/
5378 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5379 				       u8 *bw_share)
5380 {
5381 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5382 	struct i40e_pf *pf = vsi->back;
5383 	i40e_status ret;
5384 	int i;
5385 
5386 	/* There is no need to reset BW when mqprio mode is on.  */
5387 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5388 		return 0;
5389 	if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5390 		ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5391 		if (ret)
5392 			dev_info(&pf->pdev->dev,
5393 				 "Failed to reset tx rate for vsi->seid %u\n",
5394 				 vsi->seid);
5395 		return ret;
5396 	}
5397 	memset(&bw_data, 0, sizeof(bw_data));
5398 	bw_data.tc_valid_bits = enabled_tc;
5399 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5400 		bw_data.tc_bw_credits[i] = bw_share[i];
5401 
5402 	ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5403 	if (ret) {
5404 		dev_info(&pf->pdev->dev,
5405 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
5406 			 pf->hw.aq.asq_last_status);
5407 		return -EINVAL;
5408 	}
5409 
5410 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5411 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5412 
5413 	return 0;
5414 }
5415 
5416 /**
5417  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5418  * @vsi: the VSI being configured
5419  * @enabled_tc: TC map to be enabled
5420  *
5421  **/
5422 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5423 {
5424 	struct net_device *netdev = vsi->netdev;
5425 	struct i40e_pf *pf = vsi->back;
5426 	struct i40e_hw *hw = &pf->hw;
5427 	u8 netdev_tc = 0;
5428 	int i;
5429 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5430 
5431 	if (!netdev)
5432 		return;
5433 
5434 	if (!enabled_tc) {
5435 		netdev_reset_tc(netdev);
5436 		return;
5437 	}
5438 
5439 	/* Set up actual enabled TCs on the VSI */
5440 	if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5441 		return;
5442 
5443 	/* set per TC queues for the VSI */
5444 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5445 		/* Only set TC queues for enabled tcs
5446 		 *
5447 		 * e.g. For a VSI that has TC0 and TC3 enabled the
5448 		 * enabled_tc bitmap would be 0x00001001; the driver
5449 		 * will set the numtc for netdev as 2 that will be
5450 		 * referenced by the netdev layer as TC 0 and 1.
5451 		 */
5452 		if (vsi->tc_config.enabled_tc & BIT(i))
5453 			netdev_set_tc_queue(netdev,
5454 					vsi->tc_config.tc_info[i].netdev_tc,
5455 					vsi->tc_config.tc_info[i].qcount,
5456 					vsi->tc_config.tc_info[i].qoffset);
5457 	}
5458 
5459 	if (pf->flags & I40E_FLAG_TC_MQPRIO)
5460 		return;
5461 
5462 	/* Assign UP2TC map for the VSI */
5463 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5464 		/* Get the actual TC# for the UP */
5465 		u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5466 		/* Get the mapped netdev TC# for the UP */
5467 		netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
5468 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
5469 	}
5470 }
5471 
5472 /**
5473  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5474  * @vsi: the VSI being configured
5475  * @ctxt: the ctxt buffer returned from AQ VSI update param command
5476  **/
5477 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5478 				      struct i40e_vsi_context *ctxt)
5479 {
5480 	/* copy just the sections touched not the entire info
5481 	 * since not all sections are valid as returned by
5482 	 * update vsi params
5483 	 */
5484 	vsi->info.mapping_flags = ctxt->info.mapping_flags;
5485 	memcpy(&vsi->info.queue_mapping,
5486 	       &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5487 	memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5488 	       sizeof(vsi->info.tc_mapping));
5489 }
5490 
5491 /**
5492  * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5493  * @vsi: the VSI being reconfigured
5494  * @vsi_offset: offset from main VF VSI
5495  */
5496 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5497 {
5498 	struct i40e_vsi_context ctxt = {};
5499 	struct i40e_pf *pf;
5500 	struct i40e_hw *hw;
5501 	int ret;
5502 
5503 	if (!vsi)
5504 		return I40E_ERR_PARAM;
5505 	pf = vsi->back;
5506 	hw = &pf->hw;
5507 
5508 	ctxt.seid = vsi->seid;
5509 	ctxt.pf_num = hw->pf_id;
5510 	ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5511 	ctxt.uplink_seid = vsi->uplink_seid;
5512 	ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5513 	ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5514 	ctxt.info = vsi->info;
5515 
5516 	i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
5517 				 false);
5518 	if (vsi->reconfig_rss) {
5519 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
5520 				      vsi->num_queue_pairs);
5521 		ret = i40e_vsi_config_rss(vsi);
5522 		if (ret) {
5523 			dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5524 			return ret;
5525 		}
5526 		vsi->reconfig_rss = false;
5527 	}
5528 
5529 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5530 	if (ret) {
5531 		dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
5532 			 i40e_stat_str(hw, ret),
5533 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5534 		return ret;
5535 	}
5536 	/* update the local VSI info with updated queue map */
5537 	i40e_vsi_update_queue_map(vsi, &ctxt);
5538 	vsi->info.valid_sections = 0;
5539 
5540 	return ret;
5541 }
5542 
5543 /**
5544  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5545  * @vsi: VSI to be configured
5546  * @enabled_tc: TC bitmap
5547  *
5548  * This configures a particular VSI for TCs that are mapped to the
5549  * given TC bitmap. It uses default bandwidth share for TCs across
5550  * VSIs to configure TC for a particular VSI.
5551  *
5552  * NOTE:
5553  * It is expected that the VSI queues have been quisced before calling
5554  * this function.
5555  **/
5556 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5557 {
5558 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5559 	struct i40e_pf *pf = vsi->back;
5560 	struct i40e_hw *hw = &pf->hw;
5561 	struct i40e_vsi_context ctxt;
5562 	int ret = 0;
5563 	int i;
5564 
5565 	/* Check if enabled_tc is same as existing or new TCs */
5566 	if (vsi->tc_config.enabled_tc == enabled_tc &&
5567 	    vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5568 		return ret;
5569 
5570 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
5571 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5572 		if (enabled_tc & BIT(i))
5573 			bw_share[i] = 1;
5574 	}
5575 
5576 	ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5577 	if (ret) {
5578 		struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5579 
5580 		dev_info(&pf->pdev->dev,
5581 			 "Failed configuring TC map %d for VSI %d\n",
5582 			 enabled_tc, vsi->seid);
5583 		ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5584 						  &bw_config, NULL);
5585 		if (ret) {
5586 			dev_info(&pf->pdev->dev,
5587 				 "Failed querying vsi bw info, err %s aq_err %s\n",
5588 				 i40e_stat_str(hw, ret),
5589 				 i40e_aq_str(hw, hw->aq.asq_last_status));
5590 			goto out;
5591 		}
5592 		if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5593 			u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5594 
5595 			if (!valid_tc)
5596 				valid_tc = bw_config.tc_valid_bits;
5597 			/* Always enable TC0, no matter what */
5598 			valid_tc |= 1;
5599 			dev_info(&pf->pdev->dev,
5600 				 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5601 				 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5602 			enabled_tc = valid_tc;
5603 		}
5604 
5605 		ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5606 		if (ret) {
5607 			dev_err(&pf->pdev->dev,
5608 				"Unable to  configure TC map %d for VSI %d\n",
5609 				enabled_tc, vsi->seid);
5610 			goto out;
5611 		}
5612 	}
5613 
5614 	/* Update Queue Pairs Mapping for currently enabled UPs */
5615 	ctxt.seid = vsi->seid;
5616 	ctxt.pf_num = vsi->back->hw.pf_id;
5617 	ctxt.vf_num = 0;
5618 	ctxt.uplink_seid = vsi->uplink_seid;
5619 	ctxt.info = vsi->info;
5620 	if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5621 		ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5622 		if (ret)
5623 			goto out;
5624 	} else {
5625 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5626 	}
5627 
5628 	/* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5629 	 * queues changed.
5630 	 */
5631 	if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5632 		vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5633 				      vsi->num_queue_pairs);
5634 		ret = i40e_vsi_config_rss(vsi);
5635 		if (ret) {
5636 			dev_info(&vsi->back->pdev->dev,
5637 				 "Failed to reconfig rss for num_queues\n");
5638 			return ret;
5639 		}
5640 		vsi->reconfig_rss = false;
5641 	}
5642 	if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5643 		ctxt.info.valid_sections |=
5644 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5645 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5646 	}
5647 
5648 	/* Update the VSI after updating the VSI queue-mapping
5649 	 * information
5650 	 */
5651 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5652 	if (ret) {
5653 		dev_info(&pf->pdev->dev,
5654 			 "Update vsi tc config failed, err %s aq_err %s\n",
5655 			 i40e_stat_str(hw, ret),
5656 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5657 		goto out;
5658 	}
5659 	/* update the local VSI info with updated queue map */
5660 	i40e_vsi_update_queue_map(vsi, &ctxt);
5661 	vsi->info.valid_sections = 0;
5662 
5663 	/* Update current VSI BW information */
5664 	ret = i40e_vsi_get_bw_info(vsi);
5665 	if (ret) {
5666 		dev_info(&pf->pdev->dev,
5667 			 "Failed updating vsi bw info, err %s aq_err %s\n",
5668 			 i40e_stat_str(hw, ret),
5669 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5670 		goto out;
5671 	}
5672 
5673 	/* Update the netdev TC setup */
5674 	i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5675 out:
5676 	return ret;
5677 }
5678 
5679 /**
5680  * i40e_get_link_speed - Returns link speed for the interface
5681  * @vsi: VSI to be configured
5682  *
5683  **/
5684 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5685 {
5686 	struct i40e_pf *pf = vsi->back;
5687 
5688 	switch (pf->hw.phy.link_info.link_speed) {
5689 	case I40E_LINK_SPEED_40GB:
5690 		return 40000;
5691 	case I40E_LINK_SPEED_25GB:
5692 		return 25000;
5693 	case I40E_LINK_SPEED_20GB:
5694 		return 20000;
5695 	case I40E_LINK_SPEED_10GB:
5696 		return 10000;
5697 	case I40E_LINK_SPEED_1GB:
5698 		return 1000;
5699 	default:
5700 		return -EINVAL;
5701 	}
5702 }
5703 
5704 /**
5705  * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5706  * @vsi: VSI to be configured
5707  * @seid: seid of the channel/VSI
5708  * @max_tx_rate: max TX rate to be configured as BW limit
5709  *
5710  * Helper function to set BW limit for a given VSI
5711  **/
5712 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5713 {
5714 	struct i40e_pf *pf = vsi->back;
5715 	u64 credits = 0;
5716 	int speed = 0;
5717 	int ret = 0;
5718 
5719 	speed = i40e_get_link_speed(vsi);
5720 	if (max_tx_rate > speed) {
5721 		dev_err(&pf->pdev->dev,
5722 			"Invalid max tx rate %llu specified for VSI seid %d.",
5723 			max_tx_rate, seid);
5724 		return -EINVAL;
5725 	}
5726 	if (max_tx_rate && max_tx_rate < 50) {
5727 		dev_warn(&pf->pdev->dev,
5728 			 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5729 		max_tx_rate = 50;
5730 	}
5731 
5732 	/* Tx rate credits are in values of 50Mbps, 0 is disabled */
5733 	credits = max_tx_rate;
5734 	do_div(credits, I40E_BW_CREDIT_DIVISOR);
5735 	ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5736 					  I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5737 	if (ret)
5738 		dev_err(&pf->pdev->dev,
5739 			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5740 			max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5741 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5742 	return ret;
5743 }
5744 
5745 /**
5746  * i40e_remove_queue_channels - Remove queue channels for the TCs
5747  * @vsi: VSI to be configured
5748  *
5749  * Remove queue channels for the TCs
5750  **/
5751 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5752 {
5753 	enum i40e_admin_queue_err last_aq_status;
5754 	struct i40e_cloud_filter *cfilter;
5755 	struct i40e_channel *ch, *ch_tmp;
5756 	struct i40e_pf *pf = vsi->back;
5757 	struct hlist_node *node;
5758 	int ret, i;
5759 
5760 	/* Reset rss size that was stored when reconfiguring rss for
5761 	 * channel VSIs with non-power-of-2 queue count.
5762 	 */
5763 	vsi->current_rss_size = 0;
5764 
5765 	/* perform cleanup for channels if they exist */
5766 	if (list_empty(&vsi->ch_list))
5767 		return;
5768 
5769 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5770 		struct i40e_vsi *p_vsi;
5771 
5772 		list_del(&ch->list);
5773 		p_vsi = ch->parent_vsi;
5774 		if (!p_vsi || !ch->initialized) {
5775 			kfree(ch);
5776 			continue;
5777 		}
5778 		/* Reset queue contexts */
5779 		for (i = 0; i < ch->num_queue_pairs; i++) {
5780 			struct i40e_ring *tx_ring, *rx_ring;
5781 			u16 pf_q;
5782 
5783 			pf_q = ch->base_queue + i;
5784 			tx_ring = vsi->tx_rings[pf_q];
5785 			tx_ring->ch = NULL;
5786 
5787 			rx_ring = vsi->rx_rings[pf_q];
5788 			rx_ring->ch = NULL;
5789 		}
5790 
5791 		/* Reset BW configured for this VSI via mqprio */
5792 		ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5793 		if (ret)
5794 			dev_info(&vsi->back->pdev->dev,
5795 				 "Failed to reset tx rate for ch->seid %u\n",
5796 				 ch->seid);
5797 
5798 		/* delete cloud filters associated with this channel */
5799 		hlist_for_each_entry_safe(cfilter, node,
5800 					  &pf->cloud_filter_list, cloud_node) {
5801 			if (cfilter->seid != ch->seid)
5802 				continue;
5803 
5804 			hash_del(&cfilter->cloud_node);
5805 			if (cfilter->dst_port)
5806 				ret = i40e_add_del_cloud_filter_big_buf(vsi,
5807 									cfilter,
5808 									false);
5809 			else
5810 				ret = i40e_add_del_cloud_filter(vsi, cfilter,
5811 								false);
5812 			last_aq_status = pf->hw.aq.asq_last_status;
5813 			if (ret)
5814 				dev_info(&pf->pdev->dev,
5815 					 "Failed to delete cloud filter, err %s aq_err %s\n",
5816 					 i40e_stat_str(&pf->hw, ret),
5817 					 i40e_aq_str(&pf->hw, last_aq_status));
5818 			kfree(cfilter);
5819 		}
5820 
5821 		/* delete VSI from FW */
5822 		ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5823 					     NULL);
5824 		if (ret)
5825 			dev_err(&vsi->back->pdev->dev,
5826 				"unable to remove channel (%d) for parent VSI(%d)\n",
5827 				ch->seid, p_vsi->seid);
5828 		kfree(ch);
5829 	}
5830 	INIT_LIST_HEAD(&vsi->ch_list);
5831 }
5832 
5833 /**
5834  * i40e_get_max_queues_for_channel
5835  * @vsi: ptr to VSI to which channels are associated with
5836  *
5837  * Helper function which returns max value among the queue counts set on the
5838  * channels/TCs created.
5839  **/
5840 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5841 {
5842 	struct i40e_channel *ch, *ch_tmp;
5843 	int max = 0;
5844 
5845 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5846 		if (!ch->initialized)
5847 			continue;
5848 		if (ch->num_queue_pairs > max)
5849 			max = ch->num_queue_pairs;
5850 	}
5851 
5852 	return max;
5853 }
5854 
5855 /**
5856  * i40e_validate_num_queues - validate num_queues w.r.t channel
5857  * @pf: ptr to PF device
5858  * @num_queues: number of queues
5859  * @vsi: the parent VSI
5860  * @reconfig_rss: indicates should the RSS be reconfigured or not
5861  *
5862  * This function validates number of queues in the context of new channel
5863  * which is being established and determines if RSS should be reconfigured
5864  * or not for parent VSI.
5865  **/
5866 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5867 				    struct i40e_vsi *vsi, bool *reconfig_rss)
5868 {
5869 	int max_ch_queues;
5870 
5871 	if (!reconfig_rss)
5872 		return -EINVAL;
5873 
5874 	*reconfig_rss = false;
5875 	if (vsi->current_rss_size) {
5876 		if (num_queues > vsi->current_rss_size) {
5877 			dev_dbg(&pf->pdev->dev,
5878 				"Error: num_queues (%d) > vsi's current_size(%d)\n",
5879 				num_queues, vsi->current_rss_size);
5880 			return -EINVAL;
5881 		} else if ((num_queues < vsi->current_rss_size) &&
5882 			   (!is_power_of_2(num_queues))) {
5883 			dev_dbg(&pf->pdev->dev,
5884 				"Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5885 				num_queues, vsi->current_rss_size);
5886 			return -EINVAL;
5887 		}
5888 	}
5889 
5890 	if (!is_power_of_2(num_queues)) {
5891 		/* Find the max num_queues configured for channel if channel
5892 		 * exist.
5893 		 * if channel exist, then enforce 'num_queues' to be more than
5894 		 * max ever queues configured for channel.
5895 		 */
5896 		max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5897 		if (num_queues < max_ch_queues) {
5898 			dev_dbg(&pf->pdev->dev,
5899 				"Error: num_queues (%d) < max queues configured for channel(%d)\n",
5900 				num_queues, max_ch_queues);
5901 			return -EINVAL;
5902 		}
5903 		*reconfig_rss = true;
5904 	}
5905 
5906 	return 0;
5907 }
5908 
5909 /**
5910  * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5911  * @vsi: the VSI being setup
5912  * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5913  *
5914  * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5915  **/
5916 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5917 {
5918 	struct i40e_pf *pf = vsi->back;
5919 	u8 seed[I40E_HKEY_ARRAY_SIZE];
5920 	struct i40e_hw *hw = &pf->hw;
5921 	int local_rss_size;
5922 	u8 *lut;
5923 	int ret;
5924 
5925 	if (!vsi->rss_size)
5926 		return -EINVAL;
5927 
5928 	if (rss_size > vsi->rss_size)
5929 		return -EINVAL;
5930 
5931 	local_rss_size = min_t(int, vsi->rss_size, rss_size);
5932 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5933 	if (!lut)
5934 		return -ENOMEM;
5935 
5936 	/* Ignoring user configured lut if there is one */
5937 	i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5938 
5939 	/* Use user configured hash key if there is one, otherwise
5940 	 * use default.
5941 	 */
5942 	if (vsi->rss_hkey_user)
5943 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5944 	else
5945 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5946 
5947 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5948 	if (ret) {
5949 		dev_info(&pf->pdev->dev,
5950 			 "Cannot set RSS lut, err %s aq_err %s\n",
5951 			 i40e_stat_str(hw, ret),
5952 			 i40e_aq_str(hw, hw->aq.asq_last_status));
5953 		kfree(lut);
5954 		return ret;
5955 	}
5956 	kfree(lut);
5957 
5958 	/* Do the update w.r.t. storing rss_size */
5959 	if (!vsi->orig_rss_size)
5960 		vsi->orig_rss_size = vsi->rss_size;
5961 	vsi->current_rss_size = local_rss_size;
5962 
5963 	return ret;
5964 }
5965 
5966 /**
5967  * i40e_channel_setup_queue_map - Setup a channel queue map
5968  * @pf: ptr to PF device
5969  * @ctxt: VSI context structure
5970  * @ch: ptr to channel structure
5971  *
5972  * Setup queue map for a specific channel
5973  **/
5974 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5975 					 struct i40e_vsi_context *ctxt,
5976 					 struct i40e_channel *ch)
5977 {
5978 	u16 qcount, qmap, sections = 0;
5979 	u8 offset = 0;
5980 	int pow;
5981 
5982 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5983 	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5984 
5985 	qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5986 	ch->num_queue_pairs = qcount;
5987 
5988 	/* find the next higher power-of-2 of num queue pairs */
5989 	pow = ilog2(qcount);
5990 	if (!is_power_of_2(qcount))
5991 		pow++;
5992 
5993 	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5994 		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5995 
5996 	/* Setup queue TC[0].qmap for given VSI context */
5997 	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5998 
5999 	ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
6000 	ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
6001 	ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
6002 	ctxt->info.valid_sections |= cpu_to_le16(sections);
6003 }
6004 
6005 /**
6006  * i40e_add_channel - add a channel by adding VSI
6007  * @pf: ptr to PF device
6008  * @uplink_seid: underlying HW switching element (VEB) ID
6009  * @ch: ptr to channel structure
6010  *
6011  * Add a channel (VSI) using add_vsi and queue_map
6012  **/
6013 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
6014 			    struct i40e_channel *ch)
6015 {
6016 	struct i40e_hw *hw = &pf->hw;
6017 	struct i40e_vsi_context ctxt;
6018 	u8 enabled_tc = 0x1; /* TC0 enabled */
6019 	int ret;
6020 
6021 	if (ch->type != I40E_VSI_VMDQ2) {
6022 		dev_info(&pf->pdev->dev,
6023 			 "add new vsi failed, ch->type %d\n", ch->type);
6024 		return -EINVAL;
6025 	}
6026 
6027 	memset(&ctxt, 0, sizeof(ctxt));
6028 	ctxt.pf_num = hw->pf_id;
6029 	ctxt.vf_num = 0;
6030 	ctxt.uplink_seid = uplink_seid;
6031 	ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
6032 	if (ch->type == I40E_VSI_VMDQ2)
6033 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6034 
6035 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
6036 		ctxt.info.valid_sections |=
6037 		     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6038 		ctxt.info.switch_id =
6039 		   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6040 	}
6041 
6042 	/* Set queue map for a given VSI context */
6043 	i40e_channel_setup_queue_map(pf, &ctxt, ch);
6044 
6045 	/* Now time to create VSI */
6046 	ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6047 	if (ret) {
6048 		dev_info(&pf->pdev->dev,
6049 			 "add new vsi failed, err %s aq_err %s\n",
6050 			 i40e_stat_str(&pf->hw, ret),
6051 			 i40e_aq_str(&pf->hw,
6052 				     pf->hw.aq.asq_last_status));
6053 		return -ENOENT;
6054 	}
6055 
6056 	/* Success, update channel, set enabled_tc only if the channel
6057 	 * is not a macvlan
6058 	 */
6059 	ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
6060 	ch->seid = ctxt.seid;
6061 	ch->vsi_number = ctxt.vsi_number;
6062 	ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
6063 
6064 	/* copy just the sections touched not the entire info
6065 	 * since not all sections are valid as returned by
6066 	 * update vsi params
6067 	 */
6068 	ch->info.mapping_flags = ctxt.info.mapping_flags;
6069 	memcpy(&ch->info.queue_mapping,
6070 	       &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
6071 	memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
6072 	       sizeof(ctxt.info.tc_mapping));
6073 
6074 	return 0;
6075 }
6076 
6077 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
6078 				  u8 *bw_share)
6079 {
6080 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
6081 	i40e_status ret;
6082 	int i;
6083 
6084 	memset(&bw_data, 0, sizeof(bw_data));
6085 	bw_data.tc_valid_bits = ch->enabled_tc;
6086 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6087 		bw_data.tc_bw_credits[i] = bw_share[i];
6088 
6089 	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
6090 				       &bw_data, NULL);
6091 	if (ret) {
6092 		dev_info(&vsi->back->pdev->dev,
6093 			 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
6094 			 vsi->back->hw.aq.asq_last_status, ch->seid);
6095 		return -EINVAL;
6096 	}
6097 
6098 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6099 		ch->info.qs_handle[i] = bw_data.qs_handles[i];
6100 
6101 	return 0;
6102 }
6103 
6104 /**
6105  * i40e_channel_config_tx_ring - config TX ring associated with new channel
6106  * @pf: ptr to PF device
6107  * @vsi: the VSI being setup
6108  * @ch: ptr to channel structure
6109  *
6110  * Configure TX rings associated with channel (VSI) since queues are being
6111  * from parent VSI.
6112  **/
6113 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6114 				       struct i40e_vsi *vsi,
6115 				       struct i40e_channel *ch)
6116 {
6117 	i40e_status ret;
6118 	int i;
6119 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6120 
6121 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
6122 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6123 		if (ch->enabled_tc & BIT(i))
6124 			bw_share[i] = 1;
6125 	}
6126 
6127 	/* configure BW for new VSI */
6128 	ret = i40e_channel_config_bw(vsi, ch, bw_share);
6129 	if (ret) {
6130 		dev_info(&vsi->back->pdev->dev,
6131 			 "Failed configuring TC map %d for channel (seid %u)\n",
6132 			 ch->enabled_tc, ch->seid);
6133 		return ret;
6134 	}
6135 
6136 	for (i = 0; i < ch->num_queue_pairs; i++) {
6137 		struct i40e_ring *tx_ring, *rx_ring;
6138 		u16 pf_q;
6139 
6140 		pf_q = ch->base_queue + i;
6141 
6142 		/* Get to TX ring ptr of main VSI, for re-setup TX queue
6143 		 * context
6144 		 */
6145 		tx_ring = vsi->tx_rings[pf_q];
6146 		tx_ring->ch = ch;
6147 
6148 		/* Get the RX ring ptr */
6149 		rx_ring = vsi->rx_rings[pf_q];
6150 		rx_ring->ch = ch;
6151 	}
6152 
6153 	return 0;
6154 }
6155 
6156 /**
6157  * i40e_setup_hw_channel - setup new channel
6158  * @pf: ptr to PF device
6159  * @vsi: the VSI being setup
6160  * @ch: ptr to channel structure
6161  * @uplink_seid: underlying HW switching element (VEB) ID
6162  * @type: type of channel to be created (VMDq2/VF)
6163  *
6164  * Setup new channel (VSI) based on specified type (VMDq2/VF)
6165  * and configures TX rings accordingly
6166  **/
6167 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6168 					struct i40e_vsi *vsi,
6169 					struct i40e_channel *ch,
6170 					u16 uplink_seid, u8 type)
6171 {
6172 	int ret;
6173 
6174 	ch->initialized = false;
6175 	ch->base_queue = vsi->next_base_queue;
6176 	ch->type = type;
6177 
6178 	/* Proceed with creation of channel (VMDq2) VSI */
6179 	ret = i40e_add_channel(pf, uplink_seid, ch);
6180 	if (ret) {
6181 		dev_info(&pf->pdev->dev,
6182 			 "failed to add_channel using uplink_seid %u\n",
6183 			 uplink_seid);
6184 		return ret;
6185 	}
6186 
6187 	/* Mark the successful creation of channel */
6188 	ch->initialized = true;
6189 
6190 	/* Reconfigure TX queues using QTX_CTL register */
6191 	ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6192 	if (ret) {
6193 		dev_info(&pf->pdev->dev,
6194 			 "failed to configure TX rings for channel %u\n",
6195 			 ch->seid);
6196 		return ret;
6197 	}
6198 
6199 	/* update 'next_base_queue' */
6200 	vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6201 	dev_dbg(&pf->pdev->dev,
6202 		"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6203 		ch->seid, ch->vsi_number, ch->stat_counter_idx,
6204 		ch->num_queue_pairs,
6205 		vsi->next_base_queue);
6206 	return ret;
6207 }
6208 
6209 /**
6210  * i40e_setup_channel - setup new channel using uplink element
6211  * @pf: ptr to PF device
6212  * @vsi: pointer to the VSI to set up the channel within
6213  * @ch: ptr to channel structure
6214  *
6215  * Setup new channel (VSI) based on specified type (VMDq2/VF)
6216  * and uplink switching element (uplink_seid)
6217  **/
6218 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6219 			       struct i40e_channel *ch)
6220 {
6221 	u8 vsi_type;
6222 	u16 seid;
6223 	int ret;
6224 
6225 	if (vsi->type == I40E_VSI_MAIN) {
6226 		vsi_type = I40E_VSI_VMDQ2;
6227 	} else {
6228 		dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6229 			vsi->type);
6230 		return false;
6231 	}
6232 
6233 	/* underlying switching element */
6234 	seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6235 
6236 	/* create channel (VSI), configure TX rings */
6237 	ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6238 	if (ret) {
6239 		dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6240 		return false;
6241 	}
6242 
6243 	return ch->initialized ? true : false;
6244 }
6245 
6246 /**
6247  * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6248  * @vsi: ptr to VSI which has PF backing
6249  *
6250  * Sets up switch mode correctly if it needs to be changed and perform
6251  * what are allowed modes.
6252  **/
6253 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6254 {
6255 	u8 mode;
6256 	struct i40e_pf *pf = vsi->back;
6257 	struct i40e_hw *hw = &pf->hw;
6258 	int ret;
6259 
6260 	ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6261 	if (ret)
6262 		return -EINVAL;
6263 
6264 	if (hw->dev_caps.switch_mode) {
6265 		/* if switch mode is set, support mode2 (non-tunneled for
6266 		 * cloud filter) for now
6267 		 */
6268 		u32 switch_mode = hw->dev_caps.switch_mode &
6269 				  I40E_SWITCH_MODE_MASK;
6270 		if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6271 			if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6272 				return 0;
6273 			dev_err(&pf->pdev->dev,
6274 				"Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6275 				hw->dev_caps.switch_mode);
6276 			return -EINVAL;
6277 		}
6278 	}
6279 
6280 	/* Set Bit 7 to be valid */
6281 	mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6282 
6283 	/* Set L4type for TCP support */
6284 	mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6285 
6286 	/* Set cloud filter mode */
6287 	mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6288 
6289 	/* Prep mode field for set_switch_config */
6290 	ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6291 					pf->last_sw_conf_valid_flags,
6292 					mode, NULL);
6293 	if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6294 		dev_err(&pf->pdev->dev,
6295 			"couldn't set switch config bits, err %s aq_err %s\n",
6296 			i40e_stat_str(hw, ret),
6297 			i40e_aq_str(hw,
6298 				    hw->aq.asq_last_status));
6299 
6300 	return ret;
6301 }
6302 
6303 /**
6304  * i40e_create_queue_channel - function to create channel
6305  * @vsi: VSI to be configured
6306  * @ch: ptr to channel (it contains channel specific params)
6307  *
6308  * This function creates channel (VSI) using num_queues specified by user,
6309  * reconfigs RSS if needed.
6310  **/
6311 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6312 			      struct i40e_channel *ch)
6313 {
6314 	struct i40e_pf *pf = vsi->back;
6315 	bool reconfig_rss;
6316 	int err;
6317 
6318 	if (!ch)
6319 		return -EINVAL;
6320 
6321 	if (!ch->num_queue_pairs) {
6322 		dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6323 			ch->num_queue_pairs);
6324 		return -EINVAL;
6325 	}
6326 
6327 	/* validate user requested num_queues for channel */
6328 	err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6329 				       &reconfig_rss);
6330 	if (err) {
6331 		dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6332 			 ch->num_queue_pairs);
6333 		return -EINVAL;
6334 	}
6335 
6336 	/* By default we are in VEPA mode, if this is the first VF/VMDq
6337 	 * VSI to be added switch to VEB mode.
6338 	 */
6339 
6340 	if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6341 		pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6342 
6343 		if (vsi->type == I40E_VSI_MAIN) {
6344 			if (pf->flags & I40E_FLAG_TC_MQPRIO)
6345 				i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
6346 			else
6347 				i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
6348 		}
6349 		/* now onwards for main VSI, number of queues will be value
6350 		 * of TC0's queue count
6351 		 */
6352 	}
6353 
6354 	/* By this time, vsi->cnt_q_avail shall be set to non-zero and
6355 	 * it should be more than num_queues
6356 	 */
6357 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6358 		dev_dbg(&pf->pdev->dev,
6359 			"Error: cnt_q_avail (%u) less than num_queues %d\n",
6360 			vsi->cnt_q_avail, ch->num_queue_pairs);
6361 		return -EINVAL;
6362 	}
6363 
6364 	/* reconfig_rss only if vsi type is MAIN_VSI */
6365 	if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6366 		err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6367 		if (err) {
6368 			dev_info(&pf->pdev->dev,
6369 				 "Error: unable to reconfig rss for num_queues (%u)\n",
6370 				 ch->num_queue_pairs);
6371 			return -EINVAL;
6372 		}
6373 	}
6374 
6375 	if (!i40e_setup_channel(pf, vsi, ch)) {
6376 		dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6377 		return -EINVAL;
6378 	}
6379 
6380 	dev_info(&pf->pdev->dev,
6381 		 "Setup channel (id:%u) utilizing num_queues %d\n",
6382 		 ch->seid, ch->num_queue_pairs);
6383 
6384 	/* configure VSI for BW limit */
6385 	if (ch->max_tx_rate) {
6386 		u64 credits = ch->max_tx_rate;
6387 
6388 		if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6389 			return -EINVAL;
6390 
6391 		do_div(credits, I40E_BW_CREDIT_DIVISOR);
6392 		dev_dbg(&pf->pdev->dev,
6393 			"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6394 			ch->max_tx_rate,
6395 			credits,
6396 			ch->seid);
6397 	}
6398 
6399 	/* in case of VF, this will be main SRIOV VSI */
6400 	ch->parent_vsi = vsi;
6401 
6402 	/* and update main_vsi's count for queue_available to use */
6403 	vsi->cnt_q_avail -= ch->num_queue_pairs;
6404 
6405 	return 0;
6406 }
6407 
6408 /**
6409  * i40e_configure_queue_channels - Add queue channel for the given TCs
6410  * @vsi: VSI to be configured
6411  *
6412  * Configures queue channel mapping to the given TCs
6413  **/
6414 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6415 {
6416 	struct i40e_channel *ch;
6417 	u64 max_rate = 0;
6418 	int ret = 0, i;
6419 
6420 	/* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6421 	vsi->tc_seid_map[0] = vsi->seid;
6422 	for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6423 		if (vsi->tc_config.enabled_tc & BIT(i)) {
6424 			ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6425 			if (!ch) {
6426 				ret = -ENOMEM;
6427 				goto err_free;
6428 			}
6429 
6430 			INIT_LIST_HEAD(&ch->list);
6431 			ch->num_queue_pairs =
6432 				vsi->tc_config.tc_info[i].qcount;
6433 			ch->base_queue =
6434 				vsi->tc_config.tc_info[i].qoffset;
6435 
6436 			/* Bandwidth limit through tc interface is in bytes/s,
6437 			 * change to Mbit/s
6438 			 */
6439 			max_rate = vsi->mqprio_qopt.max_rate[i];
6440 			do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6441 			ch->max_tx_rate = max_rate;
6442 
6443 			list_add_tail(&ch->list, &vsi->ch_list);
6444 
6445 			ret = i40e_create_queue_channel(vsi, ch);
6446 			if (ret) {
6447 				dev_err(&vsi->back->pdev->dev,
6448 					"Failed creating queue channel with TC%d: queues %d\n",
6449 					i, ch->num_queue_pairs);
6450 				goto err_free;
6451 			}
6452 			vsi->tc_seid_map[i] = ch->seid;
6453 		}
6454 	}
6455 	return ret;
6456 
6457 err_free:
6458 	i40e_remove_queue_channels(vsi);
6459 	return ret;
6460 }
6461 
6462 /**
6463  * i40e_veb_config_tc - Configure TCs for given VEB
6464  * @veb: given VEB
6465  * @enabled_tc: TC bitmap
6466  *
6467  * Configures given TC bitmap for VEB (switching) element
6468  **/
6469 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6470 {
6471 	struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6472 	struct i40e_pf *pf = veb->pf;
6473 	int ret = 0;
6474 	int i;
6475 
6476 	/* No TCs or already enabled TCs just return */
6477 	if (!enabled_tc || veb->enabled_tc == enabled_tc)
6478 		return ret;
6479 
6480 	bw_data.tc_valid_bits = enabled_tc;
6481 	/* bw_data.absolute_credits is not set (relative) */
6482 
6483 	/* Enable ETS TCs with equal BW Share for now */
6484 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6485 		if (enabled_tc & BIT(i))
6486 			bw_data.tc_bw_share_credits[i] = 1;
6487 	}
6488 
6489 	ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6490 						   &bw_data, NULL);
6491 	if (ret) {
6492 		dev_info(&pf->pdev->dev,
6493 			 "VEB bw config failed, err %s aq_err %s\n",
6494 			 i40e_stat_str(&pf->hw, ret),
6495 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6496 		goto out;
6497 	}
6498 
6499 	/* Update the BW information */
6500 	ret = i40e_veb_get_bw_info(veb);
6501 	if (ret) {
6502 		dev_info(&pf->pdev->dev,
6503 			 "Failed getting veb bw config, err %s aq_err %s\n",
6504 			 i40e_stat_str(&pf->hw, ret),
6505 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6506 	}
6507 
6508 out:
6509 	return ret;
6510 }
6511 
6512 #ifdef CONFIG_I40E_DCB
6513 /**
6514  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6515  * @pf: PF struct
6516  *
6517  * Reconfigure VEB/VSIs on a given PF; it is assumed that
6518  * the caller would've quiesce all the VSIs before calling
6519  * this function
6520  **/
6521 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6522 {
6523 	u8 tc_map = 0;
6524 	int ret;
6525 	u8 v;
6526 
6527 	/* Enable the TCs available on PF to all VEBs */
6528 	tc_map = i40e_pf_get_tc_map(pf);
6529 	if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6530 		return;
6531 
6532 	for (v = 0; v < I40E_MAX_VEB; v++) {
6533 		if (!pf->veb[v])
6534 			continue;
6535 		ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6536 		if (ret) {
6537 			dev_info(&pf->pdev->dev,
6538 				 "Failed configuring TC for VEB seid=%d\n",
6539 				 pf->veb[v]->seid);
6540 			/* Will try to configure as many components */
6541 		}
6542 	}
6543 
6544 	/* Update each VSI */
6545 	for (v = 0; v < pf->num_alloc_vsi; v++) {
6546 		if (!pf->vsi[v])
6547 			continue;
6548 
6549 		/* - Enable all TCs for the LAN VSI
6550 		 * - For all others keep them at TC0 for now
6551 		 */
6552 		if (v == pf->lan_vsi)
6553 			tc_map = i40e_pf_get_tc_map(pf);
6554 		else
6555 			tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6556 
6557 		ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6558 		if (ret) {
6559 			dev_info(&pf->pdev->dev,
6560 				 "Failed configuring TC for VSI seid=%d\n",
6561 				 pf->vsi[v]->seid);
6562 			/* Will try to configure as many components */
6563 		} else {
6564 			/* Re-configure VSI vectors based on updated TC map */
6565 			i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6566 			if (pf->vsi[v]->netdev)
6567 				i40e_dcbnl_set_all(pf->vsi[v]);
6568 		}
6569 	}
6570 }
6571 
6572 /**
6573  * i40e_resume_port_tx - Resume port Tx
6574  * @pf: PF struct
6575  *
6576  * Resume a port's Tx and issue a PF reset in case of failure to
6577  * resume.
6578  **/
6579 static int i40e_resume_port_tx(struct i40e_pf *pf)
6580 {
6581 	struct i40e_hw *hw = &pf->hw;
6582 	int ret;
6583 
6584 	ret = i40e_aq_resume_port_tx(hw, NULL);
6585 	if (ret) {
6586 		dev_info(&pf->pdev->dev,
6587 			 "Resume Port Tx failed, err %s aq_err %s\n",
6588 			  i40e_stat_str(&pf->hw, ret),
6589 			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6590 		/* Schedule PF reset to recover */
6591 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6592 		i40e_service_event_schedule(pf);
6593 	}
6594 
6595 	return ret;
6596 }
6597 
6598 /**
6599  * i40e_suspend_port_tx - Suspend port Tx
6600  * @pf: PF struct
6601  *
6602  * Suspend a port's Tx and issue a PF reset in case of failure.
6603  **/
6604 static int i40e_suspend_port_tx(struct i40e_pf *pf)
6605 {
6606 	struct i40e_hw *hw = &pf->hw;
6607 	int ret;
6608 
6609 	ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6610 	if (ret) {
6611 		dev_info(&pf->pdev->dev,
6612 			 "Suspend Port Tx failed, err %s aq_err %s\n",
6613 			 i40e_stat_str(&pf->hw, ret),
6614 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6615 		/* Schedule PF reset to recover */
6616 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6617 		i40e_service_event_schedule(pf);
6618 	}
6619 
6620 	return ret;
6621 }
6622 
6623 /**
6624  * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6625  * @pf: PF being configured
6626  * @new_cfg: New DCBX configuration
6627  *
6628  * Program DCB settings into HW and reconfigure VEB/VSIs on
6629  * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6630  **/
6631 static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6632 				  struct i40e_dcbx_config *new_cfg)
6633 {
6634 	struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6635 	int ret;
6636 
6637 	/* Check if need reconfiguration */
6638 	if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6639 		dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6640 		return 0;
6641 	}
6642 
6643 	/* Config change disable all VSIs */
6644 	i40e_pf_quiesce_all_vsi(pf);
6645 
6646 	/* Copy the new config to the current config */
6647 	*old_cfg = *new_cfg;
6648 	old_cfg->etsrec = old_cfg->etscfg;
6649 	ret = i40e_set_dcb_config(&pf->hw);
6650 	if (ret) {
6651 		dev_info(&pf->pdev->dev,
6652 			 "Set DCB Config failed, err %s aq_err %s\n",
6653 			 i40e_stat_str(&pf->hw, ret),
6654 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6655 		goto out;
6656 	}
6657 
6658 	/* Changes in configuration update VEB/VSI */
6659 	i40e_dcb_reconfigure(pf);
6660 out:
6661 	/* In case of reset do not try to resume anything */
6662 	if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6663 		/* Re-start the VSIs if disabled */
6664 		ret = i40e_resume_port_tx(pf);
6665 		/* In case of error no point in resuming VSIs */
6666 		if (ret)
6667 			goto err;
6668 		i40e_pf_unquiesce_all_vsi(pf);
6669 	}
6670 err:
6671 	return ret;
6672 }
6673 
6674 /**
6675  * i40e_hw_dcb_config - Program new DCBX settings into HW
6676  * @pf: PF being configured
6677  * @new_cfg: New DCBX configuration
6678  *
6679  * Program DCB settings into HW and reconfigure VEB/VSIs on
6680  * given PF
6681  **/
6682 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6683 {
6684 	struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6685 	u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6686 	u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6687 	struct i40e_dcbx_config *old_cfg;
6688 	u8 mode[I40E_MAX_TRAFFIC_CLASS];
6689 	struct i40e_rx_pb_config pb_cfg;
6690 	struct i40e_hw *hw = &pf->hw;
6691 	u8 num_ports = hw->num_ports;
6692 	bool need_reconfig;
6693 	int ret = -EINVAL;
6694 	u8 lltc_map = 0;
6695 	u8 tc_map = 0;
6696 	u8 new_numtc;
6697 	u8 i;
6698 
6699 	dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6700 	/* Un-pack information to Program ETS HW via shared API
6701 	 * numtc, tcmap
6702 	 * LLTC map
6703 	 * ETS/NON-ETS arbiter mode
6704 	 * max exponent (credit refills)
6705 	 * Total number of ports
6706 	 * PFC priority bit-map
6707 	 * Priority Table
6708 	 * BW % per TC
6709 	 * Arbiter mode between UPs sharing same TC
6710 	 * TSA table (ETS or non-ETS)
6711 	 * EEE enabled or not
6712 	 * MFS TC table
6713 	 */
6714 
6715 	new_numtc = i40e_dcb_get_num_tc(new_cfg);
6716 
6717 	memset(&ets_data, 0, sizeof(ets_data));
6718 	for (i = 0; i < new_numtc; i++) {
6719 		tc_map |= BIT(i);
6720 		switch (new_cfg->etscfg.tsatable[i]) {
6721 		case I40E_IEEE_TSA_ETS:
6722 			prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6723 			ets_data.tc_bw_share_credits[i] =
6724 					new_cfg->etscfg.tcbwtable[i];
6725 			break;
6726 		case I40E_IEEE_TSA_STRICT:
6727 			prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6728 			lltc_map |= BIT(i);
6729 			ets_data.tc_bw_share_credits[i] =
6730 					I40E_DCB_STRICT_PRIO_CREDITS;
6731 			break;
6732 		default:
6733 			/* Invalid TSA type */
6734 			need_reconfig = false;
6735 			goto out;
6736 		}
6737 	}
6738 
6739 	old_cfg = &hw->local_dcbx_config;
6740 	/* Check if need reconfiguration */
6741 	need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6742 
6743 	/* If needed, enable/disable frame tagging, disable all VSIs
6744 	 * and suspend port tx
6745 	 */
6746 	if (need_reconfig) {
6747 		/* Enable DCB tagging only when more than one TC */
6748 		if (new_numtc > 1)
6749 			pf->flags |= I40E_FLAG_DCB_ENABLED;
6750 		else
6751 			pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6752 
6753 		set_bit(__I40E_PORT_SUSPENDED, pf->state);
6754 		/* Reconfiguration needed quiesce all VSIs */
6755 		i40e_pf_quiesce_all_vsi(pf);
6756 		ret = i40e_suspend_port_tx(pf);
6757 		if (ret)
6758 			goto err;
6759 	}
6760 
6761 	/* Configure Port ETS Tx Scheduler */
6762 	ets_data.tc_valid_bits = tc_map;
6763 	ets_data.tc_strict_priority_flags = lltc_map;
6764 	ret = i40e_aq_config_switch_comp_ets
6765 		(hw, pf->mac_seid, &ets_data,
6766 		 i40e_aqc_opc_modify_switching_comp_ets, NULL);
6767 	if (ret) {
6768 		dev_info(&pf->pdev->dev,
6769 			 "Modify Port ETS failed, err %s aq_err %s\n",
6770 			 i40e_stat_str(&pf->hw, ret),
6771 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6772 		goto out;
6773 	}
6774 
6775 	/* Configure Rx ETS HW */
6776 	memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
6777 	i40e_dcb_hw_set_num_tc(hw, new_numtc);
6778 	i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
6779 				   I40E_DCB_ARB_MODE_STRICT_PRIORITY,
6780 				   I40E_DCB_DEFAULT_MAX_EXPONENT,
6781 				   lltc_map);
6782 	i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
6783 	i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
6784 				     prio_type);
6785 	i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
6786 			       new_cfg->etscfg.prioritytable);
6787 	i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
6788 
6789 	/* Configure Rx Packet Buffers in HW */
6790 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6791 		mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
6792 		mfs_tc[i] += I40E_PACKET_HDR_PAD;
6793 	}
6794 
6795 	i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
6796 					 false, new_cfg->pfc.pfcenable,
6797 					 mfs_tc, &pb_cfg);
6798 	i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
6799 
6800 	/* Update the local Rx Packet buffer config */
6801 	pf->pb_cfg = pb_cfg;
6802 
6803 	/* Inform the FW about changes to DCB configuration */
6804 	ret = i40e_aq_dcb_updated(&pf->hw, NULL);
6805 	if (ret) {
6806 		dev_info(&pf->pdev->dev,
6807 			 "DCB Updated failed, err %s aq_err %s\n",
6808 			 i40e_stat_str(&pf->hw, ret),
6809 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6810 		goto out;
6811 	}
6812 
6813 	/* Update the port DCBx configuration */
6814 	*old_cfg = *new_cfg;
6815 
6816 	/* Changes in configuration update VEB/VSI */
6817 	i40e_dcb_reconfigure(pf);
6818 out:
6819 	/* Re-start the VSIs if disabled */
6820 	if (need_reconfig) {
6821 		ret = i40e_resume_port_tx(pf);
6822 
6823 		clear_bit(__I40E_PORT_SUSPENDED, pf->state);
6824 		/* In case of error no point in resuming VSIs */
6825 		if (ret)
6826 			goto err;
6827 
6828 		/* Wait for the PF's queues to be disabled */
6829 		ret = i40e_pf_wait_queues_disabled(pf);
6830 		if (ret) {
6831 			/* Schedule PF reset to recover */
6832 			set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6833 			i40e_service_event_schedule(pf);
6834 			goto err;
6835 		} else {
6836 			i40e_pf_unquiesce_all_vsi(pf);
6837 			set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6838 			set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
6839 		}
6840 		/* registers are set, lets apply */
6841 		if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
6842 			ret = i40e_hw_set_dcb_config(pf, new_cfg);
6843 	}
6844 
6845 err:
6846 	return ret;
6847 }
6848 
6849 /**
6850  * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
6851  * @pf: PF being queried
6852  *
6853  * Set default DCB configuration in case DCB is to be done in SW.
6854  **/
6855 int i40e_dcb_sw_default_config(struct i40e_pf *pf)
6856 {
6857 	struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
6858 	struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6859 	struct i40e_hw *hw = &pf->hw;
6860 	int err;
6861 
6862 	if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
6863 		/* Update the local cached instance with TC0 ETS */
6864 		memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
6865 		pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6866 		pf->tmp_cfg.etscfg.maxtcs = 0;
6867 		pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6868 		pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
6869 		pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
6870 		pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
6871 		/* FW needs one App to configure HW */
6872 		pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
6873 		pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
6874 		pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
6875 		pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
6876 
6877 		return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
6878 	}
6879 
6880 	memset(&ets_data, 0, sizeof(ets_data));
6881 	ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
6882 	ets_data.tc_strict_priority_flags = 0; /* ETS */
6883 	ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
6884 
6885 	/* Enable ETS on the Physical port */
6886 	err = i40e_aq_config_switch_comp_ets
6887 		(hw, pf->mac_seid, &ets_data,
6888 		 i40e_aqc_opc_enable_switching_comp_ets, NULL);
6889 	if (err) {
6890 		dev_info(&pf->pdev->dev,
6891 			 "Enable Port ETS failed, err %s aq_err %s\n",
6892 			 i40e_stat_str(&pf->hw, err),
6893 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6894 		err = -ENOENT;
6895 		goto out;
6896 	}
6897 
6898 	/* Update the local cached instance with TC0 ETS */
6899 	dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6900 	dcb_cfg->etscfg.cbs = 0;
6901 	dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
6902 	dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6903 
6904 out:
6905 	return err;
6906 }
6907 
6908 /**
6909  * i40e_init_pf_dcb - Initialize DCB configuration
6910  * @pf: PF being configured
6911  *
6912  * Query the current DCB configuration and cache it
6913  * in the hardware structure
6914  **/
6915 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6916 {
6917 	struct i40e_hw *hw = &pf->hw;
6918 	int err;
6919 
6920 	/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6921 	 * Also do not enable DCBx if FW LLDP agent is disabled
6922 	 */
6923 	if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
6924 		dev_info(&pf->pdev->dev, "DCB is not supported.\n");
6925 		err = I40E_NOT_SUPPORTED;
6926 		goto out;
6927 	}
6928 	if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
6929 		dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
6930 		err = i40e_dcb_sw_default_config(pf);
6931 		if (err) {
6932 			dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
6933 			goto out;
6934 		}
6935 		dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
6936 		pf->dcbx_cap = DCB_CAP_DCBX_HOST |
6937 			       DCB_CAP_DCBX_VER_IEEE;
6938 		/* at init capable but disabled */
6939 		pf->flags |= I40E_FLAG_DCB_CAPABLE;
6940 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6941 		goto out;
6942 	}
6943 	err = i40e_init_dcb(hw, true);
6944 	if (!err) {
6945 		/* Device/Function is not DCBX capable */
6946 		if ((!hw->func_caps.dcb) ||
6947 		    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6948 			dev_info(&pf->pdev->dev,
6949 				 "DCBX offload is not supported or is disabled for this PF.\n");
6950 		} else {
6951 			/* When status is not DISABLED then DCBX in FW */
6952 			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6953 				       DCB_CAP_DCBX_VER_IEEE;
6954 
6955 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
6956 			/* Enable DCB tagging only when more than one TC
6957 			 * or explicitly disable if only one TC
6958 			 */
6959 			if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6960 				pf->flags |= I40E_FLAG_DCB_ENABLED;
6961 			else
6962 				pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6963 			dev_dbg(&pf->pdev->dev,
6964 				"DCBX offload is supported for this PF.\n");
6965 		}
6966 	} else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6967 		dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6968 		pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6969 	} else {
6970 		dev_info(&pf->pdev->dev,
6971 			 "Query for DCB configuration failed, err %s aq_err %s\n",
6972 			 i40e_stat_str(&pf->hw, err),
6973 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6974 	}
6975 
6976 out:
6977 	return err;
6978 }
6979 #endif /* CONFIG_I40E_DCB */
6980 
6981 /**
6982  * i40e_print_link_message - print link up or down
6983  * @vsi: the VSI for which link needs a message
6984  * @isup: true of link is up, false otherwise
6985  */
6986 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6987 {
6988 	enum i40e_aq_link_speed new_speed;
6989 	struct i40e_pf *pf = vsi->back;
6990 	char *speed = "Unknown";
6991 	char *fc = "Unknown";
6992 	char *fec = "";
6993 	char *req_fec = "";
6994 	char *an = "";
6995 
6996 	if (isup)
6997 		new_speed = pf->hw.phy.link_info.link_speed;
6998 	else
6999 		new_speed = I40E_LINK_SPEED_UNKNOWN;
7000 
7001 	if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
7002 		return;
7003 	vsi->current_isup = isup;
7004 	vsi->current_speed = new_speed;
7005 	if (!isup) {
7006 		netdev_info(vsi->netdev, "NIC Link is Down\n");
7007 		return;
7008 	}
7009 
7010 	/* Warn user if link speed on NPAR enabled partition is not at
7011 	 * least 10GB
7012 	 */
7013 	if (pf->hw.func_caps.npar_enable &&
7014 	    (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
7015 	     pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
7016 		netdev_warn(vsi->netdev,
7017 			    "The partition detected link speed that is less than 10Gbps\n");
7018 
7019 	switch (pf->hw.phy.link_info.link_speed) {
7020 	case I40E_LINK_SPEED_40GB:
7021 		speed = "40 G";
7022 		break;
7023 	case I40E_LINK_SPEED_20GB:
7024 		speed = "20 G";
7025 		break;
7026 	case I40E_LINK_SPEED_25GB:
7027 		speed = "25 G";
7028 		break;
7029 	case I40E_LINK_SPEED_10GB:
7030 		speed = "10 G";
7031 		break;
7032 	case I40E_LINK_SPEED_5GB:
7033 		speed = "5 G";
7034 		break;
7035 	case I40E_LINK_SPEED_2_5GB:
7036 		speed = "2.5 G";
7037 		break;
7038 	case I40E_LINK_SPEED_1GB:
7039 		speed = "1000 M";
7040 		break;
7041 	case I40E_LINK_SPEED_100MB:
7042 		speed = "100 M";
7043 		break;
7044 	default:
7045 		break;
7046 	}
7047 
7048 	switch (pf->hw.fc.current_mode) {
7049 	case I40E_FC_FULL:
7050 		fc = "RX/TX";
7051 		break;
7052 	case I40E_FC_TX_PAUSE:
7053 		fc = "TX";
7054 		break;
7055 	case I40E_FC_RX_PAUSE:
7056 		fc = "RX";
7057 		break;
7058 	default:
7059 		fc = "None";
7060 		break;
7061 	}
7062 
7063 	if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
7064 		req_fec = "None";
7065 		fec = "None";
7066 		an = "False";
7067 
7068 		if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7069 			an = "True";
7070 
7071 		if (pf->hw.phy.link_info.fec_info &
7072 		    I40E_AQ_CONFIG_FEC_KR_ENA)
7073 			fec = "CL74 FC-FEC/BASE-R";
7074 		else if (pf->hw.phy.link_info.fec_info &
7075 			 I40E_AQ_CONFIG_FEC_RS_ENA)
7076 			fec = "CL108 RS-FEC";
7077 
7078 		/* 'CL108 RS-FEC' should be displayed when RS is requested, or
7079 		 * both RS and FC are requested
7080 		 */
7081 		if (vsi->back->hw.phy.link_info.req_fec_info &
7082 		    (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7083 			if (vsi->back->hw.phy.link_info.req_fec_info &
7084 			    I40E_AQ_REQUEST_FEC_RS)
7085 				req_fec = "CL108 RS-FEC";
7086 			else
7087 				req_fec = "CL74 FC-FEC/BASE-R";
7088 		}
7089 		netdev_info(vsi->netdev,
7090 			    "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7091 			    speed, req_fec, fec, an, fc);
7092 	} else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7093 		req_fec = "None";
7094 		fec = "None";
7095 		an = "False";
7096 
7097 		if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7098 			an = "True";
7099 
7100 		if (pf->hw.phy.link_info.fec_info &
7101 		    I40E_AQ_CONFIG_FEC_KR_ENA)
7102 			fec = "CL74 FC-FEC/BASE-R";
7103 
7104 		if (pf->hw.phy.link_info.req_fec_info &
7105 		    I40E_AQ_REQUEST_FEC_KR)
7106 			req_fec = "CL74 FC-FEC/BASE-R";
7107 
7108 		netdev_info(vsi->netdev,
7109 			    "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7110 			    speed, req_fec, fec, an, fc);
7111 	} else {
7112 		netdev_info(vsi->netdev,
7113 			    "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7114 			    speed, fc);
7115 	}
7116 
7117 }
7118 
7119 /**
7120  * i40e_up_complete - Finish the last steps of bringing up a connection
7121  * @vsi: the VSI being configured
7122  **/
7123 static int i40e_up_complete(struct i40e_vsi *vsi)
7124 {
7125 	struct i40e_pf *pf = vsi->back;
7126 	int err;
7127 
7128 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7129 		i40e_vsi_configure_msix(vsi);
7130 	else
7131 		i40e_configure_msi_and_legacy(vsi);
7132 
7133 	/* start rings */
7134 	err = i40e_vsi_start_rings(vsi);
7135 	if (err)
7136 		return err;
7137 
7138 	clear_bit(__I40E_VSI_DOWN, vsi->state);
7139 	i40e_napi_enable_all(vsi);
7140 	i40e_vsi_enable_irq(vsi);
7141 
7142 	if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7143 	    (vsi->netdev)) {
7144 		i40e_print_link_message(vsi, true);
7145 		netif_tx_start_all_queues(vsi->netdev);
7146 		netif_carrier_on(vsi->netdev);
7147 	}
7148 
7149 	/* replay FDIR SB filters */
7150 	if (vsi->type == I40E_VSI_FDIR) {
7151 		/* reset fd counters */
7152 		pf->fd_add_err = 0;
7153 		pf->fd_atr_cnt = 0;
7154 		i40e_fdir_filter_restore(vsi);
7155 	}
7156 
7157 	/* On the next run of the service_task, notify any clients of the new
7158 	 * opened netdev
7159 	 */
7160 	set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7161 	i40e_service_event_schedule(pf);
7162 
7163 	return 0;
7164 }
7165 
7166 /**
7167  * i40e_vsi_reinit_locked - Reset the VSI
7168  * @vsi: the VSI being configured
7169  *
7170  * Rebuild the ring structs after some configuration
7171  * has changed, e.g. MTU size.
7172  **/
7173 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7174 {
7175 	struct i40e_pf *pf = vsi->back;
7176 
7177 	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7178 		usleep_range(1000, 2000);
7179 	i40e_down(vsi);
7180 
7181 	i40e_up(vsi);
7182 	clear_bit(__I40E_CONFIG_BUSY, pf->state);
7183 }
7184 
7185 /**
7186  * i40e_force_link_state - Force the link status
7187  * @pf: board private structure
7188  * @is_up: whether the link state should be forced up or down
7189  **/
7190 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7191 {
7192 	struct i40e_aq_get_phy_abilities_resp abilities;
7193 	struct i40e_aq_set_phy_config config = {0};
7194 	bool non_zero_phy_type = is_up;
7195 	struct i40e_hw *hw = &pf->hw;
7196 	i40e_status err;
7197 	u64 mask;
7198 	u8 speed;
7199 
7200 	/* Card might've been put in an unstable state by other drivers
7201 	 * and applications, which causes incorrect speed values being
7202 	 * set on startup. In order to clear speed registers, we call
7203 	 * get_phy_capabilities twice, once to get initial state of
7204 	 * available speeds, and once to get current PHY config.
7205 	 */
7206 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7207 					   NULL);
7208 	if (err) {
7209 		dev_err(&pf->pdev->dev,
7210 			"failed to get phy cap., ret =  %s last_status =  %s\n",
7211 			i40e_stat_str(hw, err),
7212 			i40e_aq_str(hw, hw->aq.asq_last_status));
7213 		return err;
7214 	}
7215 	speed = abilities.link_speed;
7216 
7217 	/* Get the current phy config */
7218 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7219 					   NULL);
7220 	if (err) {
7221 		dev_err(&pf->pdev->dev,
7222 			"failed to get phy cap., ret =  %s last_status =  %s\n",
7223 			i40e_stat_str(hw, err),
7224 			i40e_aq_str(hw, hw->aq.asq_last_status));
7225 		return err;
7226 	}
7227 
7228 	/* If link needs to go up, but was not forced to go down,
7229 	 * and its speed values are OK, no need for a flap
7230 	 * if non_zero_phy_type was set, still need to force up
7231 	 */
7232 	if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
7233 		non_zero_phy_type = true;
7234 	else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7235 		return I40E_SUCCESS;
7236 
7237 	/* To force link we need to set bits for all supported PHY types,
7238 	 * but there are now more than 32, so we need to split the bitmap
7239 	 * across two fields.
7240 	 */
7241 	mask = I40E_PHY_TYPES_BITMASK;
7242 	config.phy_type =
7243 		non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7244 	config.phy_type_ext =
7245 		non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7246 	/* Copy the old settings, except of phy_type */
7247 	config.abilities = abilities.abilities;
7248 	if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
7249 		if (is_up)
7250 			config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7251 		else
7252 			config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7253 	}
7254 	if (abilities.link_speed != 0)
7255 		config.link_speed = abilities.link_speed;
7256 	else
7257 		config.link_speed = speed;
7258 	config.eee_capability = abilities.eee_capability;
7259 	config.eeer = abilities.eeer_val;
7260 	config.low_power_ctrl = abilities.d3_lpan;
7261 	config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7262 			    I40E_AQ_PHY_FEC_CONFIG_MASK;
7263 	err = i40e_aq_set_phy_config(hw, &config, NULL);
7264 
7265 	if (err) {
7266 		dev_err(&pf->pdev->dev,
7267 			"set phy config ret =  %s last_status =  %s\n",
7268 			i40e_stat_str(&pf->hw, err),
7269 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7270 		return err;
7271 	}
7272 
7273 	/* Update the link info */
7274 	err = i40e_update_link_info(hw);
7275 	if (err) {
7276 		/* Wait a little bit (on 40G cards it sometimes takes a really
7277 		 * long time for link to come back from the atomic reset)
7278 		 * and try once more
7279 		 */
7280 		msleep(1000);
7281 		i40e_update_link_info(hw);
7282 	}
7283 
7284 	i40e_aq_set_link_restart_an(hw, is_up, NULL);
7285 
7286 	return I40E_SUCCESS;
7287 }
7288 
7289 /**
7290  * i40e_up - Bring the connection back up after being down
7291  * @vsi: the VSI being configured
7292  **/
7293 int i40e_up(struct i40e_vsi *vsi)
7294 {
7295 	int err;
7296 
7297 	if (vsi->type == I40E_VSI_MAIN &&
7298 	    (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7299 	     vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7300 		i40e_force_link_state(vsi->back, true);
7301 
7302 	err = i40e_vsi_configure(vsi);
7303 	if (!err)
7304 		err = i40e_up_complete(vsi);
7305 
7306 	return err;
7307 }
7308 
7309 /**
7310  * i40e_down - Shutdown the connection processing
7311  * @vsi: the VSI being stopped
7312  **/
7313 void i40e_down(struct i40e_vsi *vsi)
7314 {
7315 	int i;
7316 
7317 	/* It is assumed that the caller of this function
7318 	 * sets the vsi->state __I40E_VSI_DOWN bit.
7319 	 */
7320 	if (vsi->netdev) {
7321 		netif_carrier_off(vsi->netdev);
7322 		netif_tx_disable(vsi->netdev);
7323 	}
7324 	i40e_vsi_disable_irq(vsi);
7325 	i40e_vsi_stop_rings(vsi);
7326 	if (vsi->type == I40E_VSI_MAIN &&
7327 	   (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7328 	    vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7329 		i40e_force_link_state(vsi->back, false);
7330 	i40e_napi_disable_all(vsi);
7331 
7332 	for (i = 0; i < vsi->num_queue_pairs; i++) {
7333 		i40e_clean_tx_ring(vsi->tx_rings[i]);
7334 		if (i40e_enabled_xdp_vsi(vsi)) {
7335 			/* Make sure that in-progress ndo_xdp_xmit and
7336 			 * ndo_xsk_wakeup calls are completed.
7337 			 */
7338 			synchronize_rcu();
7339 			i40e_clean_tx_ring(vsi->xdp_rings[i]);
7340 		}
7341 		i40e_clean_rx_ring(vsi->rx_rings[i]);
7342 	}
7343 
7344 }
7345 
7346 /**
7347  * i40e_validate_mqprio_qopt- validate queue mapping info
7348  * @vsi: the VSI being configured
7349  * @mqprio_qopt: queue parametrs
7350  **/
7351 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7352 				     struct tc_mqprio_qopt_offload *mqprio_qopt)
7353 {
7354 	u64 sum_max_rate = 0;
7355 	u64 max_rate = 0;
7356 	int i;
7357 
7358 	if (mqprio_qopt->qopt.offset[0] != 0 ||
7359 	    mqprio_qopt->qopt.num_tc < 1 ||
7360 	    mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7361 		return -EINVAL;
7362 	for (i = 0; ; i++) {
7363 		if (!mqprio_qopt->qopt.count[i])
7364 			return -EINVAL;
7365 		if (mqprio_qopt->min_rate[i]) {
7366 			dev_err(&vsi->back->pdev->dev,
7367 				"Invalid min tx rate (greater than 0) specified\n");
7368 			return -EINVAL;
7369 		}
7370 		max_rate = mqprio_qopt->max_rate[i];
7371 		do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7372 		sum_max_rate += max_rate;
7373 
7374 		if (i >= mqprio_qopt->qopt.num_tc - 1)
7375 			break;
7376 		if (mqprio_qopt->qopt.offset[i + 1] !=
7377 		    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7378 			return -EINVAL;
7379 	}
7380 	if (vsi->num_queue_pairs <
7381 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7382 		dev_err(&vsi->back->pdev->dev,
7383 			"Failed to create traffic channel, insufficient number of queues.\n");
7384 		return -EINVAL;
7385 	}
7386 	if (sum_max_rate > i40e_get_link_speed(vsi)) {
7387 		dev_err(&vsi->back->pdev->dev,
7388 			"Invalid max tx rate specified\n");
7389 		return -EINVAL;
7390 	}
7391 	return 0;
7392 }
7393 
7394 /**
7395  * i40e_vsi_set_default_tc_config - set default values for tc configuration
7396  * @vsi: the VSI being configured
7397  **/
7398 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7399 {
7400 	u16 qcount;
7401 	int i;
7402 
7403 	/* Only TC0 is enabled */
7404 	vsi->tc_config.numtc = 1;
7405 	vsi->tc_config.enabled_tc = 1;
7406 	qcount = min_t(int, vsi->alloc_queue_pairs,
7407 		       i40e_pf_get_max_q_per_tc(vsi->back));
7408 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7409 		/* For the TC that is not enabled set the offset to default
7410 		 * queue and allocate one queue for the given TC.
7411 		 */
7412 		vsi->tc_config.tc_info[i].qoffset = 0;
7413 		if (i == 0)
7414 			vsi->tc_config.tc_info[i].qcount = qcount;
7415 		else
7416 			vsi->tc_config.tc_info[i].qcount = 1;
7417 		vsi->tc_config.tc_info[i].netdev_tc = 0;
7418 	}
7419 }
7420 
7421 /**
7422  * i40e_del_macvlan_filter
7423  * @hw: pointer to the HW structure
7424  * @seid: seid of the channel VSI
7425  * @macaddr: the mac address to apply as a filter
7426  * @aq_err: store the admin Q error
7427  *
7428  * This function deletes a mac filter on the channel VSI which serves as the
7429  * macvlan. Returns 0 on success.
7430  **/
7431 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7432 					   const u8 *macaddr, int *aq_err)
7433 {
7434 	struct i40e_aqc_remove_macvlan_element_data element;
7435 	i40e_status status;
7436 
7437 	memset(&element, 0, sizeof(element));
7438 	ether_addr_copy(element.mac_addr, macaddr);
7439 	element.vlan_tag = 0;
7440 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7441 	status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7442 	*aq_err = hw->aq.asq_last_status;
7443 
7444 	return status;
7445 }
7446 
7447 /**
7448  * i40e_add_macvlan_filter
7449  * @hw: pointer to the HW structure
7450  * @seid: seid of the channel VSI
7451  * @macaddr: the mac address to apply as a filter
7452  * @aq_err: store the admin Q error
7453  *
7454  * This function adds a mac filter on the channel VSI which serves as the
7455  * macvlan. Returns 0 on success.
7456  **/
7457 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7458 					   const u8 *macaddr, int *aq_err)
7459 {
7460 	struct i40e_aqc_add_macvlan_element_data element;
7461 	i40e_status status;
7462 	u16 cmd_flags = 0;
7463 
7464 	ether_addr_copy(element.mac_addr, macaddr);
7465 	element.vlan_tag = 0;
7466 	element.queue_number = 0;
7467 	element.match_method = I40E_AQC_MM_ERR_NO_RES;
7468 	cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7469 	element.flags = cpu_to_le16(cmd_flags);
7470 	status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7471 	*aq_err = hw->aq.asq_last_status;
7472 
7473 	return status;
7474 }
7475 
7476 /**
7477  * i40e_reset_ch_rings - Reset the queue contexts in a channel
7478  * @vsi: the VSI we want to access
7479  * @ch: the channel we want to access
7480  */
7481 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7482 {
7483 	struct i40e_ring *tx_ring, *rx_ring;
7484 	u16 pf_q;
7485 	int i;
7486 
7487 	for (i = 0; i < ch->num_queue_pairs; i++) {
7488 		pf_q = ch->base_queue + i;
7489 		tx_ring = vsi->tx_rings[pf_q];
7490 		tx_ring->ch = NULL;
7491 		rx_ring = vsi->rx_rings[pf_q];
7492 		rx_ring->ch = NULL;
7493 	}
7494 }
7495 
7496 /**
7497  * i40e_free_macvlan_channels
7498  * @vsi: the VSI we want to access
7499  *
7500  * This function frees the Qs of the channel VSI from
7501  * the stack and also deletes the channel VSIs which
7502  * serve as macvlans.
7503  */
7504 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7505 {
7506 	struct i40e_channel *ch, *ch_tmp;
7507 	int ret;
7508 
7509 	if (list_empty(&vsi->macvlan_list))
7510 		return;
7511 
7512 	list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7513 		struct i40e_vsi *parent_vsi;
7514 
7515 		if (i40e_is_channel_macvlan(ch)) {
7516 			i40e_reset_ch_rings(vsi, ch);
7517 			clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7518 			netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7519 			netdev_set_sb_channel(ch->fwd->netdev, 0);
7520 			kfree(ch->fwd);
7521 			ch->fwd = NULL;
7522 		}
7523 
7524 		list_del(&ch->list);
7525 		parent_vsi = ch->parent_vsi;
7526 		if (!parent_vsi || !ch->initialized) {
7527 			kfree(ch);
7528 			continue;
7529 		}
7530 
7531 		/* remove the VSI */
7532 		ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7533 					     NULL);
7534 		if (ret)
7535 			dev_err(&vsi->back->pdev->dev,
7536 				"unable to remove channel (%d) for parent VSI(%d)\n",
7537 				ch->seid, parent_vsi->seid);
7538 		kfree(ch);
7539 	}
7540 	vsi->macvlan_cnt = 0;
7541 }
7542 
7543 /**
7544  * i40e_fwd_ring_up - bring the macvlan device up
7545  * @vsi: the VSI we want to access
7546  * @vdev: macvlan netdevice
7547  * @fwd: the private fwd structure
7548  */
7549 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7550 			    struct i40e_fwd_adapter *fwd)
7551 {
7552 	int ret = 0, num_tc = 1,  i, aq_err;
7553 	struct i40e_channel *ch, *ch_tmp;
7554 	struct i40e_pf *pf = vsi->back;
7555 	struct i40e_hw *hw = &pf->hw;
7556 
7557 	if (list_empty(&vsi->macvlan_list))
7558 		return -EINVAL;
7559 
7560 	/* Go through the list and find an available channel */
7561 	list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7562 		if (!i40e_is_channel_macvlan(ch)) {
7563 			ch->fwd = fwd;
7564 			/* record configuration for macvlan interface in vdev */
7565 			for (i = 0; i < num_tc; i++)
7566 				netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7567 							     i,
7568 							     ch->num_queue_pairs,
7569 							     ch->base_queue);
7570 			for (i = 0; i < ch->num_queue_pairs; i++) {
7571 				struct i40e_ring *tx_ring, *rx_ring;
7572 				u16 pf_q;
7573 
7574 				pf_q = ch->base_queue + i;
7575 
7576 				/* Get to TX ring ptr */
7577 				tx_ring = vsi->tx_rings[pf_q];
7578 				tx_ring->ch = ch;
7579 
7580 				/* Get the RX ring ptr */
7581 				rx_ring = vsi->rx_rings[pf_q];
7582 				rx_ring->ch = ch;
7583 			}
7584 			break;
7585 		}
7586 	}
7587 
7588 	/* Guarantee all rings are updated before we update the
7589 	 * MAC address filter.
7590 	 */
7591 	wmb();
7592 
7593 	/* Add a mac filter */
7594 	ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7595 	if (ret) {
7596 		/* if we cannot add the MAC rule then disable the offload */
7597 		macvlan_release_l2fw_offload(vdev);
7598 		for (i = 0; i < ch->num_queue_pairs; i++) {
7599 			struct i40e_ring *rx_ring;
7600 			u16 pf_q;
7601 
7602 			pf_q = ch->base_queue + i;
7603 			rx_ring = vsi->rx_rings[pf_q];
7604 			rx_ring->netdev = NULL;
7605 		}
7606 		dev_info(&pf->pdev->dev,
7607 			 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7608 			  i40e_stat_str(hw, ret),
7609 			  i40e_aq_str(hw, aq_err));
7610 		netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7611 	}
7612 
7613 	return ret;
7614 }
7615 
7616 /**
7617  * i40e_setup_macvlans - create the channels which will be macvlans
7618  * @vsi: the VSI we want to access
7619  * @macvlan_cnt: no. of macvlans to be setup
7620  * @qcnt: no. of Qs per macvlan
7621  * @vdev: macvlan netdevice
7622  */
7623 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7624 			       struct net_device *vdev)
7625 {
7626 	struct i40e_pf *pf = vsi->back;
7627 	struct i40e_hw *hw = &pf->hw;
7628 	struct i40e_vsi_context ctxt;
7629 	u16 sections, qmap, num_qps;
7630 	struct i40e_channel *ch;
7631 	int i, pow, ret = 0;
7632 	u8 offset = 0;
7633 
7634 	if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7635 		return -EINVAL;
7636 
7637 	num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7638 
7639 	/* find the next higher power-of-2 of num queue pairs */
7640 	pow = fls(roundup_pow_of_two(num_qps) - 1);
7641 
7642 	qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7643 		(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7644 
7645 	/* Setup context bits for the main VSI */
7646 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7647 	sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7648 	memset(&ctxt, 0, sizeof(ctxt));
7649 	ctxt.seid = vsi->seid;
7650 	ctxt.pf_num = vsi->back->hw.pf_id;
7651 	ctxt.vf_num = 0;
7652 	ctxt.uplink_seid = vsi->uplink_seid;
7653 	ctxt.info = vsi->info;
7654 	ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7655 	ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7656 	ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7657 	ctxt.info.valid_sections |= cpu_to_le16(sections);
7658 
7659 	/* Reconfigure RSS for main VSI with new max queue count */
7660 	vsi->rss_size = max_t(u16, num_qps, qcnt);
7661 	ret = i40e_vsi_config_rss(vsi);
7662 	if (ret) {
7663 		dev_info(&pf->pdev->dev,
7664 			 "Failed to reconfig RSS for num_queues (%u)\n",
7665 			 vsi->rss_size);
7666 		return ret;
7667 	}
7668 	vsi->reconfig_rss = true;
7669 	dev_dbg(&vsi->back->pdev->dev,
7670 		"Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7671 	vsi->next_base_queue = num_qps;
7672 	vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7673 
7674 	/* Update the VSI after updating the VSI queue-mapping
7675 	 * information
7676 	 */
7677 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7678 	if (ret) {
7679 		dev_info(&pf->pdev->dev,
7680 			 "Update vsi tc config failed, err %s aq_err %s\n",
7681 			 i40e_stat_str(hw, ret),
7682 			 i40e_aq_str(hw, hw->aq.asq_last_status));
7683 		return ret;
7684 	}
7685 	/* update the local VSI info with updated queue map */
7686 	i40e_vsi_update_queue_map(vsi, &ctxt);
7687 	vsi->info.valid_sections = 0;
7688 
7689 	/* Create channels for macvlans */
7690 	INIT_LIST_HEAD(&vsi->macvlan_list);
7691 	for (i = 0; i < macvlan_cnt; i++) {
7692 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7693 		if (!ch) {
7694 			ret = -ENOMEM;
7695 			goto err_free;
7696 		}
7697 		INIT_LIST_HEAD(&ch->list);
7698 		ch->num_queue_pairs = qcnt;
7699 		if (!i40e_setup_channel(pf, vsi, ch)) {
7700 			ret = -EINVAL;
7701 			kfree(ch);
7702 			goto err_free;
7703 		}
7704 		ch->parent_vsi = vsi;
7705 		vsi->cnt_q_avail -= ch->num_queue_pairs;
7706 		vsi->macvlan_cnt++;
7707 		list_add_tail(&ch->list, &vsi->macvlan_list);
7708 	}
7709 
7710 	return ret;
7711 
7712 err_free:
7713 	dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7714 	i40e_free_macvlan_channels(vsi);
7715 
7716 	return ret;
7717 }
7718 
7719 /**
7720  * i40e_fwd_add - configure macvlans
7721  * @netdev: net device to configure
7722  * @vdev: macvlan netdevice
7723  **/
7724 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7725 {
7726 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7727 	u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7728 	struct i40e_vsi *vsi = np->vsi;
7729 	struct i40e_pf *pf = vsi->back;
7730 	struct i40e_fwd_adapter *fwd;
7731 	int avail_macvlan, ret;
7732 
7733 	if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7734 		netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7735 		return ERR_PTR(-EINVAL);
7736 	}
7737 	if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7738 		netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7739 		return ERR_PTR(-EINVAL);
7740 	}
7741 	if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7742 		netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7743 		return ERR_PTR(-EINVAL);
7744 	}
7745 
7746 	/* The macvlan device has to be a single Q device so that the
7747 	 * tc_to_txq field can be reused to pick the tx queue.
7748 	 */
7749 	if (netif_is_multiqueue(vdev))
7750 		return ERR_PTR(-ERANGE);
7751 
7752 	if (!vsi->macvlan_cnt) {
7753 		/* reserve bit 0 for the pf device */
7754 		set_bit(0, vsi->fwd_bitmask);
7755 
7756 		/* Try to reserve as many queues as possible for macvlans. First
7757 		 * reserve 3/4th of max vectors, then half, then quarter and
7758 		 * calculate Qs per macvlan as you go
7759 		 */
7760 		vectors = pf->num_lan_msix;
7761 		if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7762 			/* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7763 			q_per_macvlan = 4;
7764 			macvlan_cnt = (vectors - 32) / 4;
7765 		} else if (vectors <= 64 && vectors > 32) {
7766 			/* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7767 			q_per_macvlan = 2;
7768 			macvlan_cnt = (vectors - 16) / 2;
7769 		} else if (vectors <= 32 && vectors > 16) {
7770 			/* allocate 1 Q per macvlan and 16 Qs to the PF*/
7771 			q_per_macvlan = 1;
7772 			macvlan_cnt = vectors - 16;
7773 		} else if (vectors <= 16 && vectors > 8) {
7774 			/* allocate 1 Q per macvlan and 8 Qs to the PF */
7775 			q_per_macvlan = 1;
7776 			macvlan_cnt = vectors - 8;
7777 		} else {
7778 			/* allocate 1 Q per macvlan and 1 Q to the PF */
7779 			q_per_macvlan = 1;
7780 			macvlan_cnt = vectors - 1;
7781 		}
7782 
7783 		if (macvlan_cnt == 0)
7784 			return ERR_PTR(-EBUSY);
7785 
7786 		/* Quiesce VSI queues */
7787 		i40e_quiesce_vsi(vsi);
7788 
7789 		/* sets up the macvlans but does not "enable" them */
7790 		ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7791 					  vdev);
7792 		if (ret)
7793 			return ERR_PTR(ret);
7794 
7795 		/* Unquiesce VSI */
7796 		i40e_unquiesce_vsi(vsi);
7797 	}
7798 	avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7799 					    vsi->macvlan_cnt);
7800 	if (avail_macvlan >= I40E_MAX_MACVLANS)
7801 		return ERR_PTR(-EBUSY);
7802 
7803 	/* create the fwd struct */
7804 	fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7805 	if (!fwd)
7806 		return ERR_PTR(-ENOMEM);
7807 
7808 	set_bit(avail_macvlan, vsi->fwd_bitmask);
7809 	fwd->bit_no = avail_macvlan;
7810 	netdev_set_sb_channel(vdev, avail_macvlan);
7811 	fwd->netdev = vdev;
7812 
7813 	if (!netif_running(netdev))
7814 		return fwd;
7815 
7816 	/* Set fwd ring up */
7817 	ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7818 	if (ret) {
7819 		/* unbind the queues and drop the subordinate channel config */
7820 		netdev_unbind_sb_channel(netdev, vdev);
7821 		netdev_set_sb_channel(vdev, 0);
7822 
7823 		kfree(fwd);
7824 		return ERR_PTR(-EINVAL);
7825 	}
7826 
7827 	return fwd;
7828 }
7829 
7830 /**
7831  * i40e_del_all_macvlans - Delete all the mac filters on the channels
7832  * @vsi: the VSI we want to access
7833  */
7834 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7835 {
7836 	struct i40e_channel *ch, *ch_tmp;
7837 	struct i40e_pf *pf = vsi->back;
7838 	struct i40e_hw *hw = &pf->hw;
7839 	int aq_err, ret = 0;
7840 
7841 	if (list_empty(&vsi->macvlan_list))
7842 		return;
7843 
7844 	list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7845 		if (i40e_is_channel_macvlan(ch)) {
7846 			ret = i40e_del_macvlan_filter(hw, ch->seid,
7847 						      i40e_channel_mac(ch),
7848 						      &aq_err);
7849 			if (!ret) {
7850 				/* Reset queue contexts */
7851 				i40e_reset_ch_rings(vsi, ch);
7852 				clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7853 				netdev_unbind_sb_channel(vsi->netdev,
7854 							 ch->fwd->netdev);
7855 				netdev_set_sb_channel(ch->fwd->netdev, 0);
7856 				kfree(ch->fwd);
7857 				ch->fwd = NULL;
7858 			}
7859 		}
7860 	}
7861 }
7862 
7863 /**
7864  * i40e_fwd_del - delete macvlan interfaces
7865  * @netdev: net device to configure
7866  * @vdev: macvlan netdevice
7867  */
7868 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7869 {
7870 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7871 	struct i40e_fwd_adapter *fwd = vdev;
7872 	struct i40e_channel *ch, *ch_tmp;
7873 	struct i40e_vsi *vsi = np->vsi;
7874 	struct i40e_pf *pf = vsi->back;
7875 	struct i40e_hw *hw = &pf->hw;
7876 	int aq_err, ret = 0;
7877 
7878 	/* Find the channel associated with the macvlan and del mac filter */
7879 	list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7880 		if (i40e_is_channel_macvlan(ch) &&
7881 		    ether_addr_equal(i40e_channel_mac(ch),
7882 				     fwd->netdev->dev_addr)) {
7883 			ret = i40e_del_macvlan_filter(hw, ch->seid,
7884 						      i40e_channel_mac(ch),
7885 						      &aq_err);
7886 			if (!ret) {
7887 				/* Reset queue contexts */
7888 				i40e_reset_ch_rings(vsi, ch);
7889 				clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7890 				netdev_unbind_sb_channel(netdev, fwd->netdev);
7891 				netdev_set_sb_channel(fwd->netdev, 0);
7892 				kfree(ch->fwd);
7893 				ch->fwd = NULL;
7894 			} else {
7895 				dev_info(&pf->pdev->dev,
7896 					 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7897 					  i40e_stat_str(hw, ret),
7898 					  i40e_aq_str(hw, aq_err));
7899 			}
7900 			break;
7901 		}
7902 	}
7903 }
7904 
7905 /**
7906  * i40e_setup_tc - configure multiple traffic classes
7907  * @netdev: net device to configure
7908  * @type_data: tc offload data
7909  **/
7910 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7911 {
7912 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7913 	struct i40e_netdev_priv *np = netdev_priv(netdev);
7914 	struct i40e_vsi *vsi = np->vsi;
7915 	struct i40e_pf *pf = vsi->back;
7916 	u8 enabled_tc = 0, num_tc, hw;
7917 	bool need_reset = false;
7918 	int old_queue_pairs;
7919 	int ret = -EINVAL;
7920 	u16 mode;
7921 	int i;
7922 
7923 	old_queue_pairs = vsi->num_queue_pairs;
7924 	num_tc = mqprio_qopt->qopt.num_tc;
7925 	hw = mqprio_qopt->qopt.hw;
7926 	mode = mqprio_qopt->mode;
7927 	if (!hw) {
7928 		pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7929 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7930 		goto config_tc;
7931 	}
7932 
7933 	/* Check if MFP enabled */
7934 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7935 		netdev_info(netdev,
7936 			    "Configuring TC not supported in MFP mode\n");
7937 		return ret;
7938 	}
7939 	switch (mode) {
7940 	case TC_MQPRIO_MODE_DCB:
7941 		pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7942 
7943 		/* Check if DCB enabled to continue */
7944 		if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7945 			netdev_info(netdev,
7946 				    "DCB is not enabled for adapter\n");
7947 			return ret;
7948 		}
7949 
7950 		/* Check whether tc count is within enabled limit */
7951 		if (num_tc > i40e_pf_get_num_tc(pf)) {
7952 			netdev_info(netdev,
7953 				    "TC count greater than enabled on link for adapter\n");
7954 			return ret;
7955 		}
7956 		break;
7957 	case TC_MQPRIO_MODE_CHANNEL:
7958 		if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7959 			netdev_info(netdev,
7960 				    "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7961 			return ret;
7962 		}
7963 		if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7964 			return ret;
7965 		ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7966 		if (ret)
7967 			return ret;
7968 		memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7969 		       sizeof(*mqprio_qopt));
7970 		pf->flags |= I40E_FLAG_TC_MQPRIO;
7971 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7972 		break;
7973 	default:
7974 		return -EINVAL;
7975 	}
7976 
7977 config_tc:
7978 	/* Generate TC map for number of tc requested */
7979 	for (i = 0; i < num_tc; i++)
7980 		enabled_tc |= BIT(i);
7981 
7982 	/* Requesting same TC configuration as already enabled */
7983 	if (enabled_tc == vsi->tc_config.enabled_tc &&
7984 	    mode != TC_MQPRIO_MODE_CHANNEL)
7985 		return 0;
7986 
7987 	/* Quiesce VSI queues */
7988 	i40e_quiesce_vsi(vsi);
7989 
7990 	if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7991 		i40e_remove_queue_channels(vsi);
7992 
7993 	/* Configure VSI for enabled TCs */
7994 	ret = i40e_vsi_config_tc(vsi, enabled_tc);
7995 	if (ret) {
7996 		netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
7997 			    vsi->seid);
7998 		need_reset = true;
7999 		goto exit;
8000 	} else if (enabled_tc &&
8001 		   (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
8002 		netdev_info(netdev,
8003 			    "Failed to create channel. Override queues (%u) not power of 2\n",
8004 			    vsi->tc_config.tc_info[0].qcount);
8005 		ret = -EINVAL;
8006 		need_reset = true;
8007 		goto exit;
8008 	}
8009 
8010 	dev_info(&vsi->back->pdev->dev,
8011 		 "Setup channel (id:%u) utilizing num_queues %d\n",
8012 		 vsi->seid, vsi->tc_config.tc_info[0].qcount);
8013 
8014 	if (pf->flags & I40E_FLAG_TC_MQPRIO) {
8015 		if (vsi->mqprio_qopt.max_rate[0]) {
8016 			u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8017 
8018 			do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
8019 			ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
8020 			if (!ret) {
8021 				u64 credits = max_tx_rate;
8022 
8023 				do_div(credits, I40E_BW_CREDIT_DIVISOR);
8024 				dev_dbg(&vsi->back->pdev->dev,
8025 					"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
8026 					max_tx_rate,
8027 					credits,
8028 					vsi->seid);
8029 			} else {
8030 				need_reset = true;
8031 				goto exit;
8032 			}
8033 		}
8034 		ret = i40e_configure_queue_channels(vsi);
8035 		if (ret) {
8036 			vsi->num_queue_pairs = old_queue_pairs;
8037 			netdev_info(netdev,
8038 				    "Failed configuring queue channels\n");
8039 			need_reset = true;
8040 			goto exit;
8041 		}
8042 	}
8043 
8044 exit:
8045 	/* Reset the configuration data to defaults, only TC0 is enabled */
8046 	if (need_reset) {
8047 		i40e_vsi_set_default_tc_config(vsi);
8048 		need_reset = false;
8049 	}
8050 
8051 	/* Unquiesce VSI */
8052 	i40e_unquiesce_vsi(vsi);
8053 	return ret;
8054 }
8055 
8056 /**
8057  * i40e_set_cld_element - sets cloud filter element data
8058  * @filter: cloud filter rule
8059  * @cld: ptr to cloud filter element data
8060  *
8061  * This is helper function to copy data into cloud filter element
8062  **/
8063 static inline void
8064 i40e_set_cld_element(struct i40e_cloud_filter *filter,
8065 		     struct i40e_aqc_cloud_filters_element_data *cld)
8066 {
8067 	u32 ipa;
8068 	int i;
8069 
8070 	memset(cld, 0, sizeof(*cld));
8071 	ether_addr_copy(cld->outer_mac, filter->dst_mac);
8072 	ether_addr_copy(cld->inner_mac, filter->src_mac);
8073 
8074 	if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
8075 		return;
8076 
8077 	if (filter->n_proto == ETH_P_IPV6) {
8078 #define IPV6_MAX_INDEX	(ARRAY_SIZE(filter->dst_ipv6) - 1)
8079 		for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
8080 			ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
8081 
8082 			*(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
8083 		}
8084 	} else {
8085 		ipa = be32_to_cpu(filter->dst_ipv4);
8086 
8087 		memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8088 	}
8089 
8090 	cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8091 
8092 	/* tenant_id is not supported by FW now, once the support is enabled
8093 	 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
8094 	 */
8095 	if (filter->tenant_id)
8096 		return;
8097 }
8098 
8099 /**
8100  * i40e_add_del_cloud_filter - Add/del cloud filter
8101  * @vsi: pointer to VSI
8102  * @filter: cloud filter rule
8103  * @add: if true, add, if false, delete
8104  *
8105  * Add or delete a cloud filter for a specific flow spec.
8106  * Returns 0 if the filter were successfully added.
8107  **/
8108 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8109 			      struct i40e_cloud_filter *filter, bool add)
8110 {
8111 	struct i40e_aqc_cloud_filters_element_data cld_filter;
8112 	struct i40e_pf *pf = vsi->back;
8113 	int ret;
8114 	static const u16 flag_table[128] = {
8115 		[I40E_CLOUD_FILTER_FLAGS_OMAC]  =
8116 			I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8117 		[I40E_CLOUD_FILTER_FLAGS_IMAC]  =
8118 			I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8119 		[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN]  =
8120 			I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8121 		[I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8122 			I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8123 		[I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8124 			I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8125 		[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8126 			I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8127 		[I40E_CLOUD_FILTER_FLAGS_IIP] =
8128 			I40E_AQC_ADD_CLOUD_FILTER_IIP,
8129 	};
8130 
8131 	if (filter->flags >= ARRAY_SIZE(flag_table))
8132 		return I40E_ERR_CONFIG;
8133 
8134 	memset(&cld_filter, 0, sizeof(cld_filter));
8135 
8136 	/* copy element needed to add cloud filter from filter */
8137 	i40e_set_cld_element(filter, &cld_filter);
8138 
8139 	if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8140 		cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8141 					     I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8142 
8143 	if (filter->n_proto == ETH_P_IPV6)
8144 		cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8145 						I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8146 	else
8147 		cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8148 						I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8149 
8150 	if (add)
8151 		ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8152 						&cld_filter, 1);
8153 	else
8154 		ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8155 						&cld_filter, 1);
8156 	if (ret)
8157 		dev_dbg(&pf->pdev->dev,
8158 			"Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8159 			add ? "add" : "delete", filter->dst_port, ret,
8160 			pf->hw.aq.asq_last_status);
8161 	else
8162 		dev_info(&pf->pdev->dev,
8163 			 "%s cloud filter for VSI: %d\n",
8164 			 add ? "Added" : "Deleted", filter->seid);
8165 	return ret;
8166 }
8167 
8168 /**
8169  * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8170  * @vsi: pointer to VSI
8171  * @filter: cloud filter rule
8172  * @add: if true, add, if false, delete
8173  *
8174  * Add or delete a cloud filter for a specific flow spec using big buffer.
8175  * Returns 0 if the filter were successfully added.
8176  **/
8177 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8178 				      struct i40e_cloud_filter *filter,
8179 				      bool add)
8180 {
8181 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
8182 	struct i40e_pf *pf = vsi->back;
8183 	int ret;
8184 
8185 	/* Both (src/dst) valid mac_addr are not supported */
8186 	if ((is_valid_ether_addr(filter->dst_mac) &&
8187 	     is_valid_ether_addr(filter->src_mac)) ||
8188 	    (is_multicast_ether_addr(filter->dst_mac) &&
8189 	     is_multicast_ether_addr(filter->src_mac)))
8190 		return -EOPNOTSUPP;
8191 
8192 	/* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
8193 	 * ports are not supported via big buffer now.
8194 	 */
8195 	if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8196 		return -EOPNOTSUPP;
8197 
8198 	/* adding filter using src_port/src_ip is not supported at this stage */
8199 	if (filter->src_port ||
8200 	    (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8201 	    !ipv6_addr_any(&filter->ip.v6.src_ip6))
8202 		return -EOPNOTSUPP;
8203 
8204 	memset(&cld_filter, 0, sizeof(cld_filter));
8205 
8206 	/* copy element needed to add cloud filter from filter */
8207 	i40e_set_cld_element(filter, &cld_filter.element);
8208 
8209 	if (is_valid_ether_addr(filter->dst_mac) ||
8210 	    is_valid_ether_addr(filter->src_mac) ||
8211 	    is_multicast_ether_addr(filter->dst_mac) ||
8212 	    is_multicast_ether_addr(filter->src_mac)) {
8213 		/* MAC + IP : unsupported mode */
8214 		if (filter->dst_ipv4)
8215 			return -EOPNOTSUPP;
8216 
8217 		/* since we validated that L4 port must be valid before
8218 		 * we get here, start with respective "flags" value
8219 		 * and update if vlan is present or not
8220 		 */
8221 		cld_filter.element.flags =
8222 			cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8223 
8224 		if (filter->vlan_id) {
8225 			cld_filter.element.flags =
8226 			cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8227 		}
8228 
8229 	} else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8230 		   !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8231 		cld_filter.element.flags =
8232 				cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8233 		if (filter->n_proto == ETH_P_IPV6)
8234 			cld_filter.element.flags |=
8235 				cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8236 		else
8237 			cld_filter.element.flags |=
8238 				cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8239 	} else {
8240 		dev_err(&pf->pdev->dev,
8241 			"either mac or ip has to be valid for cloud filter\n");
8242 		return -EINVAL;
8243 	}
8244 
8245 	/* Now copy L4 port in Byte 6..7 in general fields */
8246 	cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8247 						be16_to_cpu(filter->dst_port);
8248 
8249 	if (add) {
8250 		/* Validate current device switch mode, change if necessary */
8251 		ret = i40e_validate_and_set_switch_mode(vsi);
8252 		if (ret) {
8253 			dev_err(&pf->pdev->dev,
8254 				"failed to set switch mode, ret %d\n",
8255 				ret);
8256 			return ret;
8257 		}
8258 
8259 		ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8260 						   &cld_filter, 1);
8261 	} else {
8262 		ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8263 						   &cld_filter, 1);
8264 	}
8265 
8266 	if (ret)
8267 		dev_dbg(&pf->pdev->dev,
8268 			"Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8269 			add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8270 	else
8271 		dev_info(&pf->pdev->dev,
8272 			 "%s cloud filter for VSI: %d, L4 port: %d\n",
8273 			 add ? "add" : "delete", filter->seid,
8274 			 ntohs(filter->dst_port));
8275 	return ret;
8276 }
8277 
8278 /**
8279  * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8280  * @vsi: Pointer to VSI
8281  * @f: Pointer to struct flow_cls_offload
8282  * @filter: Pointer to cloud filter structure
8283  *
8284  **/
8285 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8286 				 struct flow_cls_offload *f,
8287 				 struct i40e_cloud_filter *filter)
8288 {
8289 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8290 	struct flow_dissector *dissector = rule->match.dissector;
8291 	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8292 	struct i40e_pf *pf = vsi->back;
8293 	u8 field_flags = 0;
8294 
8295 	if (dissector->used_keys &
8296 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
8297 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
8298 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8299 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
8300 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8301 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8302 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
8303 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8304 		dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
8305 			dissector->used_keys);
8306 		return -EOPNOTSUPP;
8307 	}
8308 
8309 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8310 		struct flow_match_enc_keyid match;
8311 
8312 		flow_rule_match_enc_keyid(rule, &match);
8313 		if (match.mask->keyid != 0)
8314 			field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8315 
8316 		filter->tenant_id = be32_to_cpu(match.key->keyid);
8317 	}
8318 
8319 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8320 		struct flow_match_basic match;
8321 
8322 		flow_rule_match_basic(rule, &match);
8323 		n_proto_key = ntohs(match.key->n_proto);
8324 		n_proto_mask = ntohs(match.mask->n_proto);
8325 
8326 		if (n_proto_key == ETH_P_ALL) {
8327 			n_proto_key = 0;
8328 			n_proto_mask = 0;
8329 		}
8330 		filter->n_proto = n_proto_key & n_proto_mask;
8331 		filter->ip_proto = match.key->ip_proto;
8332 	}
8333 
8334 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8335 		struct flow_match_eth_addrs match;
8336 
8337 		flow_rule_match_eth_addrs(rule, &match);
8338 
8339 		/* use is_broadcast and is_zero to check for all 0xf or 0 */
8340 		if (!is_zero_ether_addr(match.mask->dst)) {
8341 			if (is_broadcast_ether_addr(match.mask->dst)) {
8342 				field_flags |= I40E_CLOUD_FIELD_OMAC;
8343 			} else {
8344 				dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8345 					match.mask->dst);
8346 				return I40E_ERR_CONFIG;
8347 			}
8348 		}
8349 
8350 		if (!is_zero_ether_addr(match.mask->src)) {
8351 			if (is_broadcast_ether_addr(match.mask->src)) {
8352 				field_flags |= I40E_CLOUD_FIELD_IMAC;
8353 			} else {
8354 				dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8355 					match.mask->src);
8356 				return I40E_ERR_CONFIG;
8357 			}
8358 		}
8359 		ether_addr_copy(filter->dst_mac, match.key->dst);
8360 		ether_addr_copy(filter->src_mac, match.key->src);
8361 	}
8362 
8363 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8364 		struct flow_match_vlan match;
8365 
8366 		flow_rule_match_vlan(rule, &match);
8367 		if (match.mask->vlan_id) {
8368 			if (match.mask->vlan_id == VLAN_VID_MASK) {
8369 				field_flags |= I40E_CLOUD_FIELD_IVLAN;
8370 
8371 			} else {
8372 				dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8373 					match.mask->vlan_id);
8374 				return I40E_ERR_CONFIG;
8375 			}
8376 		}
8377 
8378 		filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8379 	}
8380 
8381 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8382 		struct flow_match_control match;
8383 
8384 		flow_rule_match_control(rule, &match);
8385 		addr_type = match.key->addr_type;
8386 	}
8387 
8388 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8389 		struct flow_match_ipv4_addrs match;
8390 
8391 		flow_rule_match_ipv4_addrs(rule, &match);
8392 		if (match.mask->dst) {
8393 			if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8394 				field_flags |= I40E_CLOUD_FIELD_IIP;
8395 			} else {
8396 				dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8397 					&match.mask->dst);
8398 				return I40E_ERR_CONFIG;
8399 			}
8400 		}
8401 
8402 		if (match.mask->src) {
8403 			if (match.mask->src == cpu_to_be32(0xffffffff)) {
8404 				field_flags |= I40E_CLOUD_FIELD_IIP;
8405 			} else {
8406 				dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8407 					&match.mask->src);
8408 				return I40E_ERR_CONFIG;
8409 			}
8410 		}
8411 
8412 		if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8413 			dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8414 			return I40E_ERR_CONFIG;
8415 		}
8416 		filter->dst_ipv4 = match.key->dst;
8417 		filter->src_ipv4 = match.key->src;
8418 	}
8419 
8420 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8421 		struct flow_match_ipv6_addrs match;
8422 
8423 		flow_rule_match_ipv6_addrs(rule, &match);
8424 
8425 		/* src and dest IPV6 address should not be LOOPBACK
8426 		 * (0:0:0:0:0:0:0:1), which can be represented as ::1
8427 		 */
8428 		if (ipv6_addr_loopback(&match.key->dst) ||
8429 		    ipv6_addr_loopback(&match.key->src)) {
8430 			dev_err(&pf->pdev->dev,
8431 				"Bad ipv6, addr is LOOPBACK\n");
8432 			return I40E_ERR_CONFIG;
8433 		}
8434 		if (!ipv6_addr_any(&match.mask->dst) ||
8435 		    !ipv6_addr_any(&match.mask->src))
8436 			field_flags |= I40E_CLOUD_FIELD_IIP;
8437 
8438 		memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8439 		       sizeof(filter->src_ipv6));
8440 		memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8441 		       sizeof(filter->dst_ipv6));
8442 	}
8443 
8444 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8445 		struct flow_match_ports match;
8446 
8447 		flow_rule_match_ports(rule, &match);
8448 		if (match.mask->src) {
8449 			if (match.mask->src == cpu_to_be16(0xffff)) {
8450 				field_flags |= I40E_CLOUD_FIELD_IIP;
8451 			} else {
8452 				dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8453 					be16_to_cpu(match.mask->src));
8454 				return I40E_ERR_CONFIG;
8455 			}
8456 		}
8457 
8458 		if (match.mask->dst) {
8459 			if (match.mask->dst == cpu_to_be16(0xffff)) {
8460 				field_flags |= I40E_CLOUD_FIELD_IIP;
8461 			} else {
8462 				dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8463 					be16_to_cpu(match.mask->dst));
8464 				return I40E_ERR_CONFIG;
8465 			}
8466 		}
8467 
8468 		filter->dst_port = match.key->dst;
8469 		filter->src_port = match.key->src;
8470 
8471 		switch (filter->ip_proto) {
8472 		case IPPROTO_TCP:
8473 		case IPPROTO_UDP:
8474 			break;
8475 		default:
8476 			dev_err(&pf->pdev->dev,
8477 				"Only UDP and TCP transport are supported\n");
8478 			return -EINVAL;
8479 		}
8480 	}
8481 	filter->flags = field_flags;
8482 	return 0;
8483 }
8484 
8485 /**
8486  * i40e_handle_tclass: Forward to a traffic class on the device
8487  * @vsi: Pointer to VSI
8488  * @tc: traffic class index on the device
8489  * @filter: Pointer to cloud filter structure
8490  *
8491  **/
8492 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8493 			      struct i40e_cloud_filter *filter)
8494 {
8495 	struct i40e_channel *ch, *ch_tmp;
8496 
8497 	/* direct to a traffic class on the same device */
8498 	if (tc == 0) {
8499 		filter->seid = vsi->seid;
8500 		return 0;
8501 	} else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8502 		if (!filter->dst_port) {
8503 			dev_err(&vsi->back->pdev->dev,
8504 				"Specify destination port to direct to traffic class that is not default\n");
8505 			return -EINVAL;
8506 		}
8507 		if (list_empty(&vsi->ch_list))
8508 			return -EINVAL;
8509 		list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8510 					 list) {
8511 			if (ch->seid == vsi->tc_seid_map[tc])
8512 				filter->seid = ch->seid;
8513 		}
8514 		return 0;
8515 	}
8516 	dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8517 	return -EINVAL;
8518 }
8519 
8520 /**
8521  * i40e_configure_clsflower - Configure tc flower filters
8522  * @vsi: Pointer to VSI
8523  * @cls_flower: Pointer to struct flow_cls_offload
8524  *
8525  **/
8526 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8527 				    struct flow_cls_offload *cls_flower)
8528 {
8529 	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8530 	struct i40e_cloud_filter *filter = NULL;
8531 	struct i40e_pf *pf = vsi->back;
8532 	int err = 0;
8533 
8534 	if (tc < 0) {
8535 		dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8536 		return -EOPNOTSUPP;
8537 	}
8538 
8539 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8540 	    test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8541 		return -EBUSY;
8542 
8543 	if (pf->fdir_pf_active_filters ||
8544 	    (!hlist_empty(&pf->fdir_filter_list))) {
8545 		dev_err(&vsi->back->pdev->dev,
8546 			"Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8547 		return -EINVAL;
8548 	}
8549 
8550 	if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8551 		dev_err(&vsi->back->pdev->dev,
8552 			"Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8553 		vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8554 		vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8555 	}
8556 
8557 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8558 	if (!filter)
8559 		return -ENOMEM;
8560 
8561 	filter->cookie = cls_flower->cookie;
8562 
8563 	err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8564 	if (err < 0)
8565 		goto err;
8566 
8567 	err = i40e_handle_tclass(vsi, tc, filter);
8568 	if (err < 0)
8569 		goto err;
8570 
8571 	/* Add cloud filter */
8572 	if (filter->dst_port)
8573 		err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8574 	else
8575 		err = i40e_add_del_cloud_filter(vsi, filter, true);
8576 
8577 	if (err) {
8578 		dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
8579 			err);
8580 		goto err;
8581 	}
8582 
8583 	/* add filter to the ordered list */
8584 	INIT_HLIST_NODE(&filter->cloud_node);
8585 
8586 	hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8587 
8588 	pf->num_cloud_filters++;
8589 
8590 	return err;
8591 err:
8592 	kfree(filter);
8593 	return err;
8594 }
8595 
8596 /**
8597  * i40e_find_cloud_filter - Find the could filter in the list
8598  * @vsi: Pointer to VSI
8599  * @cookie: filter specific cookie
8600  *
8601  **/
8602 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8603 							unsigned long *cookie)
8604 {
8605 	struct i40e_cloud_filter *filter = NULL;
8606 	struct hlist_node *node2;
8607 
8608 	hlist_for_each_entry_safe(filter, node2,
8609 				  &vsi->back->cloud_filter_list, cloud_node)
8610 		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8611 			return filter;
8612 	return NULL;
8613 }
8614 
8615 /**
8616  * i40e_delete_clsflower - Remove tc flower filters
8617  * @vsi: Pointer to VSI
8618  * @cls_flower: Pointer to struct flow_cls_offload
8619  *
8620  **/
8621 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8622 				 struct flow_cls_offload *cls_flower)
8623 {
8624 	struct i40e_cloud_filter *filter = NULL;
8625 	struct i40e_pf *pf = vsi->back;
8626 	int err = 0;
8627 
8628 	filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8629 
8630 	if (!filter)
8631 		return -EINVAL;
8632 
8633 	hash_del(&filter->cloud_node);
8634 
8635 	if (filter->dst_port)
8636 		err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8637 	else
8638 		err = i40e_add_del_cloud_filter(vsi, filter, false);
8639 
8640 	kfree(filter);
8641 	if (err) {
8642 		dev_err(&pf->pdev->dev,
8643 			"Failed to delete cloud filter, err %s\n",
8644 			i40e_stat_str(&pf->hw, err));
8645 		return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8646 	}
8647 
8648 	pf->num_cloud_filters--;
8649 	if (!pf->num_cloud_filters)
8650 		if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8651 		    !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8652 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8653 			pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8654 			pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8655 		}
8656 	return 0;
8657 }
8658 
8659 /**
8660  * i40e_setup_tc_cls_flower - flower classifier offloads
8661  * @np: net device to configure
8662  * @cls_flower: offload data
8663  **/
8664 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8665 				    struct flow_cls_offload *cls_flower)
8666 {
8667 	struct i40e_vsi *vsi = np->vsi;
8668 
8669 	switch (cls_flower->command) {
8670 	case FLOW_CLS_REPLACE:
8671 		return i40e_configure_clsflower(vsi, cls_flower);
8672 	case FLOW_CLS_DESTROY:
8673 		return i40e_delete_clsflower(vsi, cls_flower);
8674 	case FLOW_CLS_STATS:
8675 		return -EOPNOTSUPP;
8676 	default:
8677 		return -EOPNOTSUPP;
8678 	}
8679 }
8680 
8681 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8682 				  void *cb_priv)
8683 {
8684 	struct i40e_netdev_priv *np = cb_priv;
8685 
8686 	if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8687 		return -EOPNOTSUPP;
8688 
8689 	switch (type) {
8690 	case TC_SETUP_CLSFLOWER:
8691 		return i40e_setup_tc_cls_flower(np, type_data);
8692 
8693 	default:
8694 		return -EOPNOTSUPP;
8695 	}
8696 }
8697 
8698 static LIST_HEAD(i40e_block_cb_list);
8699 
8700 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8701 			   void *type_data)
8702 {
8703 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8704 
8705 	switch (type) {
8706 	case TC_SETUP_QDISC_MQPRIO:
8707 		return i40e_setup_tc(netdev, type_data);
8708 	case TC_SETUP_BLOCK:
8709 		return flow_block_cb_setup_simple(type_data,
8710 						  &i40e_block_cb_list,
8711 						  i40e_setup_tc_block_cb,
8712 						  np, np, true);
8713 	default:
8714 		return -EOPNOTSUPP;
8715 	}
8716 }
8717 
8718 /**
8719  * i40e_open - Called when a network interface is made active
8720  * @netdev: network interface device structure
8721  *
8722  * The open entry point is called when a network interface is made
8723  * active by the system (IFF_UP).  At this point all resources needed
8724  * for transmit and receive operations are allocated, the interrupt
8725  * handler is registered with the OS, the netdev watchdog subtask is
8726  * enabled, and the stack is notified that the interface is ready.
8727  *
8728  * Returns 0 on success, negative value on failure
8729  **/
8730 int i40e_open(struct net_device *netdev)
8731 {
8732 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8733 	struct i40e_vsi *vsi = np->vsi;
8734 	struct i40e_pf *pf = vsi->back;
8735 	int err;
8736 
8737 	/* disallow open during test or if eeprom is broken */
8738 	if (test_bit(__I40E_TESTING, pf->state) ||
8739 	    test_bit(__I40E_BAD_EEPROM, pf->state))
8740 		return -EBUSY;
8741 
8742 	netif_carrier_off(netdev);
8743 
8744 	if (i40e_force_link_state(pf, true))
8745 		return -EAGAIN;
8746 
8747 	err = i40e_vsi_open(vsi);
8748 	if (err)
8749 		return err;
8750 
8751 	/* configure global TSO hardware offload settings */
8752 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8753 						       TCP_FLAG_FIN) >> 16);
8754 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8755 						       TCP_FLAG_FIN |
8756 						       TCP_FLAG_CWR) >> 16);
8757 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8758 	udp_tunnel_get_rx_info(netdev);
8759 
8760 	return 0;
8761 }
8762 
8763 /**
8764  * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
8765  * @vsi: vsi structure
8766  *
8767  * This updates netdev's number of tx/rx queues
8768  *
8769  * Returns status of setting tx/rx queues
8770  **/
8771 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
8772 {
8773 	int ret;
8774 
8775 	ret = netif_set_real_num_rx_queues(vsi->netdev,
8776 					   vsi->num_queue_pairs);
8777 	if (ret)
8778 		return ret;
8779 
8780 	return netif_set_real_num_tx_queues(vsi->netdev,
8781 					    vsi->num_queue_pairs);
8782 }
8783 
8784 /**
8785  * i40e_vsi_open -
8786  * @vsi: the VSI to open
8787  *
8788  * Finish initialization of the VSI.
8789  *
8790  * Returns 0 on success, negative value on failure
8791  *
8792  * Note: expects to be called while under rtnl_lock()
8793  **/
8794 int i40e_vsi_open(struct i40e_vsi *vsi)
8795 {
8796 	struct i40e_pf *pf = vsi->back;
8797 	char int_name[I40E_INT_NAME_STR_LEN];
8798 	int err;
8799 
8800 	/* allocate descriptors */
8801 	err = i40e_vsi_setup_tx_resources(vsi);
8802 	if (err)
8803 		goto err_setup_tx;
8804 	err = i40e_vsi_setup_rx_resources(vsi);
8805 	if (err)
8806 		goto err_setup_rx;
8807 
8808 	err = i40e_vsi_configure(vsi);
8809 	if (err)
8810 		goto err_setup_rx;
8811 
8812 	if (vsi->netdev) {
8813 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8814 			 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8815 		err = i40e_vsi_request_irq(vsi, int_name);
8816 		if (err)
8817 			goto err_setup_rx;
8818 
8819 		/* Notify the stack of the actual queue counts. */
8820 		err = i40e_netif_set_realnum_tx_rx_queues(vsi);
8821 		if (err)
8822 			goto err_set_queues;
8823 
8824 	} else if (vsi->type == I40E_VSI_FDIR) {
8825 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8826 			 dev_driver_string(&pf->pdev->dev),
8827 			 dev_name(&pf->pdev->dev));
8828 		err = i40e_vsi_request_irq(vsi, int_name);
8829 		if (err)
8830 			goto err_setup_rx;
8831 
8832 	} else {
8833 		err = -EINVAL;
8834 		goto err_setup_rx;
8835 	}
8836 
8837 	err = i40e_up_complete(vsi);
8838 	if (err)
8839 		goto err_up_complete;
8840 
8841 	return 0;
8842 
8843 err_up_complete:
8844 	i40e_down(vsi);
8845 err_set_queues:
8846 	i40e_vsi_free_irq(vsi);
8847 err_setup_rx:
8848 	i40e_vsi_free_rx_resources(vsi);
8849 err_setup_tx:
8850 	i40e_vsi_free_tx_resources(vsi);
8851 	if (vsi == pf->vsi[pf->lan_vsi])
8852 		i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8853 
8854 	return err;
8855 }
8856 
8857 /**
8858  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
8859  * @pf: Pointer to PF
8860  *
8861  * This function destroys the hlist where all the Flow Director
8862  * filters were saved.
8863  **/
8864 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8865 {
8866 	struct i40e_fdir_filter *filter;
8867 	struct i40e_flex_pit *pit_entry, *tmp;
8868 	struct hlist_node *node2;
8869 
8870 	hlist_for_each_entry_safe(filter, node2,
8871 				  &pf->fdir_filter_list, fdir_node) {
8872 		hlist_del(&filter->fdir_node);
8873 		kfree(filter);
8874 	}
8875 
8876 	list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8877 		list_del(&pit_entry->list);
8878 		kfree(pit_entry);
8879 	}
8880 	INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8881 
8882 	list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8883 		list_del(&pit_entry->list);
8884 		kfree(pit_entry);
8885 	}
8886 	INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8887 
8888 	pf->fdir_pf_active_filters = 0;
8889 	i40e_reset_fdir_filter_cnt(pf);
8890 
8891 	/* Reprogram the default input set for TCP/IPv4 */
8892 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8893 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8894 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8895 
8896 	/* Reprogram the default input set for TCP/IPv6 */
8897 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
8898 				I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8899 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8900 
8901 	/* Reprogram the default input set for UDP/IPv4 */
8902 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8903 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8904 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8905 
8906 	/* Reprogram the default input set for UDP/IPv6 */
8907 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
8908 				I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8909 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8910 
8911 	/* Reprogram the default input set for SCTP/IPv4 */
8912 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8913 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8914 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8915 
8916 	/* Reprogram the default input set for SCTP/IPv6 */
8917 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
8918 				I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8919 				I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8920 
8921 	/* Reprogram the default input set for Other/IPv4 */
8922 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8923 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8924 
8925 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8926 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8927 
8928 	/* Reprogram the default input set for Other/IPv6 */
8929 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
8930 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8931 
8932 	i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
8933 				I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8934 }
8935 
8936 /**
8937  * i40e_cloud_filter_exit - Cleans up the cloud filters
8938  * @pf: Pointer to PF
8939  *
8940  * This function destroys the hlist where all the cloud filters
8941  * were saved.
8942  **/
8943 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8944 {
8945 	struct i40e_cloud_filter *cfilter;
8946 	struct hlist_node *node;
8947 
8948 	hlist_for_each_entry_safe(cfilter, node,
8949 				  &pf->cloud_filter_list, cloud_node) {
8950 		hlist_del(&cfilter->cloud_node);
8951 		kfree(cfilter);
8952 	}
8953 	pf->num_cloud_filters = 0;
8954 
8955 	if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8956 	    !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8957 		pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8958 		pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8959 		pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8960 	}
8961 }
8962 
8963 /**
8964  * i40e_close - Disables a network interface
8965  * @netdev: network interface device structure
8966  *
8967  * The close entry point is called when an interface is de-activated
8968  * by the OS.  The hardware is still under the driver's control, but
8969  * this netdev interface is disabled.
8970  *
8971  * Returns 0, this is not allowed to fail
8972  **/
8973 int i40e_close(struct net_device *netdev)
8974 {
8975 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8976 	struct i40e_vsi *vsi = np->vsi;
8977 
8978 	i40e_vsi_close(vsi);
8979 
8980 	return 0;
8981 }
8982 
8983 /**
8984  * i40e_do_reset - Start a PF or Core Reset sequence
8985  * @pf: board private structure
8986  * @reset_flags: which reset is requested
8987  * @lock_acquired: indicates whether or not the lock has been acquired
8988  * before this function was called.
8989  *
8990  * The essential difference in resets is that the PF Reset
8991  * doesn't clear the packet buffers, doesn't reset the PE
8992  * firmware, and doesn't bother the other PFs on the chip.
8993  **/
8994 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
8995 {
8996 	u32 val;
8997 
8998 	/* do the biggest reset indicated */
8999 	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
9000 
9001 		/* Request a Global Reset
9002 		 *
9003 		 * This will start the chip's countdown to the actual full
9004 		 * chip reset event, and a warning interrupt to be sent
9005 		 * to all PFs, including the requestor.  Our handler
9006 		 * for the warning interrupt will deal with the shutdown
9007 		 * and recovery of the switch setup.
9008 		 */
9009 		dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
9010 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9011 		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
9012 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9013 
9014 	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
9015 
9016 		/* Request a Core Reset
9017 		 *
9018 		 * Same as Global Reset, except does *not* include the MAC/PHY
9019 		 */
9020 		dev_dbg(&pf->pdev->dev, "CoreR requested\n");
9021 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9022 		val |= I40E_GLGEN_RTRIG_CORER_MASK;
9023 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9024 		i40e_flush(&pf->hw);
9025 
9026 	} else if (reset_flags & I40E_PF_RESET_FLAG) {
9027 
9028 		/* Request a PF Reset
9029 		 *
9030 		 * Resets only the PF-specific registers
9031 		 *
9032 		 * This goes directly to the tear-down and rebuild of
9033 		 * the switch, since we need to do all the recovery as
9034 		 * for the Core Reset.
9035 		 */
9036 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
9037 		i40e_handle_reset_warning(pf, lock_acquired);
9038 
9039 	} else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
9040 		/* Request a PF Reset
9041 		 *
9042 		 * Resets PF and reinitializes PFs VSI.
9043 		 */
9044 		i40e_prep_for_reset(pf);
9045 		i40e_reset_and_rebuild(pf, true, lock_acquired);
9046 		dev_info(&pf->pdev->dev,
9047 			 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
9048 			 "FW LLDP is disabled\n" :
9049 			 "FW LLDP is enabled\n");
9050 
9051 	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
9052 		int v;
9053 
9054 		/* Find the VSI(s) that requested a re-init */
9055 		dev_info(&pf->pdev->dev,
9056 			 "VSI reinit requested\n");
9057 		for (v = 0; v < pf->num_alloc_vsi; v++) {
9058 			struct i40e_vsi *vsi = pf->vsi[v];
9059 
9060 			if (vsi != NULL &&
9061 			    test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
9062 					       vsi->state))
9063 				i40e_vsi_reinit_locked(pf->vsi[v]);
9064 		}
9065 	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
9066 		int v;
9067 
9068 		/* Find the VSI(s) that needs to be brought down */
9069 		dev_info(&pf->pdev->dev, "VSI down requested\n");
9070 		for (v = 0; v < pf->num_alloc_vsi; v++) {
9071 			struct i40e_vsi *vsi = pf->vsi[v];
9072 
9073 			if (vsi != NULL &&
9074 			    test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
9075 					       vsi->state)) {
9076 				set_bit(__I40E_VSI_DOWN, vsi->state);
9077 				i40e_down(vsi);
9078 			}
9079 		}
9080 	} else {
9081 		dev_info(&pf->pdev->dev,
9082 			 "bad reset request 0x%08x\n", reset_flags);
9083 	}
9084 }
9085 
9086 #ifdef CONFIG_I40E_DCB
9087 /**
9088  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9089  * @pf: board private structure
9090  * @old_cfg: current DCB config
9091  * @new_cfg: new DCB config
9092  **/
9093 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
9094 			    struct i40e_dcbx_config *old_cfg,
9095 			    struct i40e_dcbx_config *new_cfg)
9096 {
9097 	bool need_reconfig = false;
9098 
9099 	/* Check if ETS configuration has changed */
9100 	if (memcmp(&new_cfg->etscfg,
9101 		   &old_cfg->etscfg,
9102 		   sizeof(new_cfg->etscfg))) {
9103 		/* If Priority Table has changed reconfig is needed */
9104 		if (memcmp(&new_cfg->etscfg.prioritytable,
9105 			   &old_cfg->etscfg.prioritytable,
9106 			   sizeof(new_cfg->etscfg.prioritytable))) {
9107 			need_reconfig = true;
9108 			dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9109 		}
9110 
9111 		if (memcmp(&new_cfg->etscfg.tcbwtable,
9112 			   &old_cfg->etscfg.tcbwtable,
9113 			   sizeof(new_cfg->etscfg.tcbwtable)))
9114 			dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9115 
9116 		if (memcmp(&new_cfg->etscfg.tsatable,
9117 			   &old_cfg->etscfg.tsatable,
9118 			   sizeof(new_cfg->etscfg.tsatable)))
9119 			dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9120 	}
9121 
9122 	/* Check if PFC configuration has changed */
9123 	if (memcmp(&new_cfg->pfc,
9124 		   &old_cfg->pfc,
9125 		   sizeof(new_cfg->pfc))) {
9126 		need_reconfig = true;
9127 		dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9128 	}
9129 
9130 	/* Check if APP Table has changed */
9131 	if (memcmp(&new_cfg->app,
9132 		   &old_cfg->app,
9133 		   sizeof(new_cfg->app))) {
9134 		need_reconfig = true;
9135 		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9136 	}
9137 
9138 	dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9139 	return need_reconfig;
9140 }
9141 
9142 /**
9143  * i40e_handle_lldp_event - Handle LLDP Change MIB event
9144  * @pf: board private structure
9145  * @e: event info posted on ARQ
9146  **/
9147 static int i40e_handle_lldp_event(struct i40e_pf *pf,
9148 				  struct i40e_arq_event_info *e)
9149 {
9150 	struct i40e_aqc_lldp_get_mib *mib =
9151 		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9152 	struct i40e_hw *hw = &pf->hw;
9153 	struct i40e_dcbx_config tmp_dcbx_cfg;
9154 	bool need_reconfig = false;
9155 	int ret = 0;
9156 	u8 type;
9157 
9158 	/* X710-T*L 2.5G and 5G speeds don't support DCB */
9159 	if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9160 	    (hw->phy.link_info.link_speed &
9161 	     ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9162 	     !(pf->flags & I40E_FLAG_DCB_CAPABLE))
9163 		/* let firmware decide if the DCB should be disabled */
9164 		pf->flags |= I40E_FLAG_DCB_CAPABLE;
9165 
9166 	/* Not DCB capable or capability disabled */
9167 	if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
9168 		return ret;
9169 
9170 	/* Ignore if event is not for Nearest Bridge */
9171 	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9172 		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9173 	dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9174 	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9175 		return ret;
9176 
9177 	/* Check MIB Type and return if event for Remote MIB update */
9178 	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9179 	dev_dbg(&pf->pdev->dev,
9180 		"LLDP event mib type %s\n", type ? "remote" : "local");
9181 	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9182 		/* Update the remote cached instance and return */
9183 		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9184 				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9185 				&hw->remote_dcbx_config);
9186 		goto exit;
9187 	}
9188 
9189 	/* Store the old configuration */
9190 	tmp_dcbx_cfg = hw->local_dcbx_config;
9191 
9192 	/* Reset the old DCBx configuration data */
9193 	memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9194 	/* Get updated DCBX data from firmware */
9195 	ret = i40e_get_dcb_config(&pf->hw);
9196 	if (ret) {
9197 		/* X710-T*L 2.5G and 5G speeds don't support DCB */
9198 		if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9199 		    (hw->phy.link_info.link_speed &
9200 		     (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9201 			dev_warn(&pf->pdev->dev,
9202 				 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9203 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9204 		} else {
9205 			dev_info(&pf->pdev->dev,
9206 				 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
9207 				 i40e_stat_str(&pf->hw, ret),
9208 				 i40e_aq_str(&pf->hw,
9209 					     pf->hw.aq.asq_last_status));
9210 		}
9211 		goto exit;
9212 	}
9213 
9214 	/* No change detected in DCBX configs */
9215 	if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9216 		    sizeof(tmp_dcbx_cfg))) {
9217 		dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9218 		goto exit;
9219 	}
9220 
9221 	need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9222 					       &hw->local_dcbx_config);
9223 
9224 	i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9225 
9226 	if (!need_reconfig)
9227 		goto exit;
9228 
9229 	/* Enable DCB tagging only when more than one TC */
9230 	if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9231 		pf->flags |= I40E_FLAG_DCB_ENABLED;
9232 	else
9233 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9234 
9235 	set_bit(__I40E_PORT_SUSPENDED, pf->state);
9236 	/* Reconfiguration needed quiesce all VSIs */
9237 	i40e_pf_quiesce_all_vsi(pf);
9238 
9239 	/* Changes in configuration update VEB/VSI */
9240 	i40e_dcb_reconfigure(pf);
9241 
9242 	ret = i40e_resume_port_tx(pf);
9243 
9244 	clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9245 	/* In case of error no point in resuming VSIs */
9246 	if (ret)
9247 		goto exit;
9248 
9249 	/* Wait for the PF's queues to be disabled */
9250 	ret = i40e_pf_wait_queues_disabled(pf);
9251 	if (ret) {
9252 		/* Schedule PF reset to recover */
9253 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9254 		i40e_service_event_schedule(pf);
9255 	} else {
9256 		i40e_pf_unquiesce_all_vsi(pf);
9257 		set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9258 		set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9259 	}
9260 
9261 exit:
9262 	return ret;
9263 }
9264 #endif /* CONFIG_I40E_DCB */
9265 
9266 /**
9267  * i40e_do_reset_safe - Protected reset path for userland calls.
9268  * @pf: board private structure
9269  * @reset_flags: which reset is requested
9270  *
9271  **/
9272 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9273 {
9274 	rtnl_lock();
9275 	i40e_do_reset(pf, reset_flags, true);
9276 	rtnl_unlock();
9277 }
9278 
9279 /**
9280  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9281  * @pf: board private structure
9282  * @e: event info posted on ARQ
9283  *
9284  * Handler for LAN Queue Overflow Event generated by the firmware for PF
9285  * and VF queues
9286  **/
9287 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9288 					   struct i40e_arq_event_info *e)
9289 {
9290 	struct i40e_aqc_lan_overflow *data =
9291 		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9292 	u32 queue = le32_to_cpu(data->prtdcb_rupto);
9293 	u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9294 	struct i40e_hw *hw = &pf->hw;
9295 	struct i40e_vf *vf;
9296 	u16 vf_id;
9297 
9298 	dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9299 		queue, qtx_ctl);
9300 
9301 	/* Queue belongs to VF, find the VF and issue VF reset */
9302 	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
9303 	    >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
9304 		vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
9305 			 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
9306 		vf_id -= hw->func_caps.vf_base_id;
9307 		vf = &pf->vf[vf_id];
9308 		i40e_vc_notify_vf_reset(vf);
9309 		/* Allow VF to process pending reset notification */
9310 		msleep(20);
9311 		i40e_reset_vf(vf, false);
9312 	}
9313 }
9314 
9315 /**
9316  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9317  * @pf: board private structure
9318  **/
9319 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9320 {
9321 	u32 val, fcnt_prog;
9322 
9323 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9324 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9325 	return fcnt_prog;
9326 }
9327 
9328 /**
9329  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9330  * @pf: board private structure
9331  **/
9332 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9333 {
9334 	u32 val, fcnt_prog;
9335 
9336 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9337 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9338 		    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
9339 		      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
9340 	return fcnt_prog;
9341 }
9342 
9343 /**
9344  * i40e_get_global_fd_count - Get total FD filters programmed on device
9345  * @pf: board private structure
9346  **/
9347 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9348 {
9349 	u32 val, fcnt_prog;
9350 
9351 	val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9352 	fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9353 		    ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
9354 		     I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
9355 	return fcnt_prog;
9356 }
9357 
9358 /**
9359  * i40e_reenable_fdir_sb - Restore FDir SB capability
9360  * @pf: board private structure
9361  **/
9362 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9363 {
9364 	if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9365 		if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
9366 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
9367 			dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9368 }
9369 
9370 /**
9371  * i40e_reenable_fdir_atr - Restore FDir ATR capability
9372  * @pf: board private structure
9373  **/
9374 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9375 {
9376 	if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9377 		/* ATR uses the same filtering logic as SB rules. It only
9378 		 * functions properly if the input set mask is at the default
9379 		 * settings. It is safe to restore the default input set
9380 		 * because there are no active TCPv4 filter rules.
9381 		 */
9382 		i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9383 					I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9384 					I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9385 
9386 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9387 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
9388 			dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9389 	}
9390 }
9391 
9392 /**
9393  * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9394  * @pf: board private structure
9395  * @filter: FDir filter to remove
9396  */
9397 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9398 				       struct i40e_fdir_filter *filter)
9399 {
9400 	/* Update counters */
9401 	pf->fdir_pf_active_filters--;
9402 	pf->fd_inv = 0;
9403 
9404 	switch (filter->flow_type) {
9405 	case TCP_V4_FLOW:
9406 		pf->fd_tcp4_filter_cnt--;
9407 		break;
9408 	case UDP_V4_FLOW:
9409 		pf->fd_udp4_filter_cnt--;
9410 		break;
9411 	case SCTP_V4_FLOW:
9412 		pf->fd_sctp4_filter_cnt--;
9413 		break;
9414 	case TCP_V6_FLOW:
9415 		pf->fd_tcp6_filter_cnt--;
9416 		break;
9417 	case UDP_V6_FLOW:
9418 		pf->fd_udp6_filter_cnt--;
9419 		break;
9420 	case SCTP_V6_FLOW:
9421 		pf->fd_udp6_filter_cnt--;
9422 		break;
9423 	case IP_USER_FLOW:
9424 		switch (filter->ipl4_proto) {
9425 		case IPPROTO_TCP:
9426 			pf->fd_tcp4_filter_cnt--;
9427 			break;
9428 		case IPPROTO_UDP:
9429 			pf->fd_udp4_filter_cnt--;
9430 			break;
9431 		case IPPROTO_SCTP:
9432 			pf->fd_sctp4_filter_cnt--;
9433 			break;
9434 		case IPPROTO_IP:
9435 			pf->fd_ip4_filter_cnt--;
9436 			break;
9437 		}
9438 		break;
9439 	case IPV6_USER_FLOW:
9440 		switch (filter->ipl4_proto) {
9441 		case IPPROTO_TCP:
9442 			pf->fd_tcp6_filter_cnt--;
9443 			break;
9444 		case IPPROTO_UDP:
9445 			pf->fd_udp6_filter_cnt--;
9446 			break;
9447 		case IPPROTO_SCTP:
9448 			pf->fd_sctp6_filter_cnt--;
9449 			break;
9450 		case IPPROTO_IP:
9451 			pf->fd_ip6_filter_cnt--;
9452 			break;
9453 		}
9454 		break;
9455 	}
9456 
9457 	/* Remove the filter from the list and free memory */
9458 	hlist_del(&filter->fdir_node);
9459 	kfree(filter);
9460 }
9461 
9462 /**
9463  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9464  * @pf: board private structure
9465  **/
9466 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9467 {
9468 	struct i40e_fdir_filter *filter;
9469 	u32 fcnt_prog, fcnt_avail;
9470 	struct hlist_node *node;
9471 
9472 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9473 		return;
9474 
9475 	/* Check if we have enough room to re-enable FDir SB capability. */
9476 	fcnt_prog = i40e_get_global_fd_count(pf);
9477 	fcnt_avail = pf->fdir_pf_filter_count;
9478 	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9479 	    (pf->fd_add_err == 0) ||
9480 	    (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9481 		i40e_reenable_fdir_sb(pf);
9482 
9483 	/* We should wait for even more space before re-enabling ATR.
9484 	 * Additionally, we cannot enable ATR as long as we still have TCP SB
9485 	 * rules active.
9486 	 */
9487 	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9488 	    pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9489 		i40e_reenable_fdir_atr(pf);
9490 
9491 	/* if hw had a problem adding a filter, delete it */
9492 	if (pf->fd_inv > 0) {
9493 		hlist_for_each_entry_safe(filter, node,
9494 					  &pf->fdir_filter_list, fdir_node)
9495 			if (filter->fd_id == pf->fd_inv)
9496 				i40e_delete_invalid_filter(pf, filter);
9497 	}
9498 }
9499 
9500 #define I40E_MIN_FD_FLUSH_INTERVAL 10
9501 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9502 /**
9503  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9504  * @pf: board private structure
9505  **/
9506 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9507 {
9508 	unsigned long min_flush_time;
9509 	int flush_wait_retry = 50;
9510 	bool disable_atr = false;
9511 	int fd_room;
9512 	int reg;
9513 
9514 	if (!time_after(jiffies, pf->fd_flush_timestamp +
9515 				 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9516 		return;
9517 
9518 	/* If the flush is happening too quick and we have mostly SB rules we
9519 	 * should not re-enable ATR for some time.
9520 	 */
9521 	min_flush_time = pf->fd_flush_timestamp +
9522 			 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9523 	fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9524 
9525 	if (!(time_after(jiffies, min_flush_time)) &&
9526 	    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9527 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
9528 			dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9529 		disable_atr = true;
9530 	}
9531 
9532 	pf->fd_flush_timestamp = jiffies;
9533 	set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9534 	/* flush all filters */
9535 	wr32(&pf->hw, I40E_PFQF_CTL_1,
9536 	     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9537 	i40e_flush(&pf->hw);
9538 	pf->fd_flush_cnt++;
9539 	pf->fd_add_err = 0;
9540 	do {
9541 		/* Check FD flush status every 5-6msec */
9542 		usleep_range(5000, 6000);
9543 		reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9544 		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9545 			break;
9546 	} while (flush_wait_retry--);
9547 	if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9548 		dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9549 	} else {
9550 		/* replay sideband filters */
9551 		i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9552 		if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9553 			clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9554 		clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9555 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
9556 			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9557 	}
9558 }
9559 
9560 /**
9561  * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9562  * @pf: board private structure
9563  **/
9564 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9565 {
9566 	return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9567 }
9568 
9569 /**
9570  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9571  * @pf: board private structure
9572  **/
9573 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9574 {
9575 
9576 	/* if interface is down do nothing */
9577 	if (test_bit(__I40E_DOWN, pf->state))
9578 		return;
9579 
9580 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9581 		i40e_fdir_flush_and_replay(pf);
9582 
9583 	i40e_fdir_check_and_reenable(pf);
9584 
9585 }
9586 
9587 /**
9588  * i40e_vsi_link_event - notify VSI of a link event
9589  * @vsi: vsi to be notified
9590  * @link_up: link up or down
9591  **/
9592 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9593 {
9594 	if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9595 		return;
9596 
9597 	switch (vsi->type) {
9598 	case I40E_VSI_MAIN:
9599 		if (!vsi->netdev || !vsi->netdev_registered)
9600 			break;
9601 
9602 		if (link_up) {
9603 			netif_carrier_on(vsi->netdev);
9604 			netif_tx_wake_all_queues(vsi->netdev);
9605 		} else {
9606 			netif_carrier_off(vsi->netdev);
9607 			netif_tx_stop_all_queues(vsi->netdev);
9608 		}
9609 		break;
9610 
9611 	case I40E_VSI_SRIOV:
9612 	case I40E_VSI_VMDQ2:
9613 	case I40E_VSI_CTRL:
9614 	case I40E_VSI_IWARP:
9615 	case I40E_VSI_MIRROR:
9616 	default:
9617 		/* there is no notification for other VSIs */
9618 		break;
9619 	}
9620 }
9621 
9622 /**
9623  * i40e_veb_link_event - notify elements on the veb of a link event
9624  * @veb: veb to be notified
9625  * @link_up: link up or down
9626  **/
9627 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9628 {
9629 	struct i40e_pf *pf;
9630 	int i;
9631 
9632 	if (!veb || !veb->pf)
9633 		return;
9634 	pf = veb->pf;
9635 
9636 	/* depth first... */
9637 	for (i = 0; i < I40E_MAX_VEB; i++)
9638 		if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9639 			i40e_veb_link_event(pf->veb[i], link_up);
9640 
9641 	/* ... now the local VSIs */
9642 	for (i = 0; i < pf->num_alloc_vsi; i++)
9643 		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9644 			i40e_vsi_link_event(pf->vsi[i], link_up);
9645 }
9646 
9647 /**
9648  * i40e_link_event - Update netif_carrier status
9649  * @pf: board private structure
9650  **/
9651 static void i40e_link_event(struct i40e_pf *pf)
9652 {
9653 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9654 	u8 new_link_speed, old_link_speed;
9655 	i40e_status status;
9656 	bool new_link, old_link;
9657 #ifdef CONFIG_I40E_DCB
9658 	int err;
9659 #endif /* CONFIG_I40E_DCB */
9660 
9661 	/* set this to force the get_link_status call to refresh state */
9662 	pf->hw.phy.get_link_info = true;
9663 	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9664 	status = i40e_get_link_status(&pf->hw, &new_link);
9665 
9666 	/* On success, disable temp link polling */
9667 	if (status == I40E_SUCCESS) {
9668 		clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9669 	} else {
9670 		/* Enable link polling temporarily until i40e_get_link_status
9671 		 * returns I40E_SUCCESS
9672 		 */
9673 		set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9674 		dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9675 			status);
9676 		return;
9677 	}
9678 
9679 	old_link_speed = pf->hw.phy.link_info_old.link_speed;
9680 	new_link_speed = pf->hw.phy.link_info.link_speed;
9681 
9682 	if (new_link == old_link &&
9683 	    new_link_speed == old_link_speed &&
9684 	    (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9685 	     new_link == netif_carrier_ok(vsi->netdev)))
9686 		return;
9687 
9688 	i40e_print_link_message(vsi, new_link);
9689 
9690 	/* Notify the base of the switch tree connected to
9691 	 * the link.  Floating VEBs are not notified.
9692 	 */
9693 	if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9694 		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9695 	else
9696 		i40e_vsi_link_event(vsi, new_link);
9697 
9698 	if (pf->vf)
9699 		i40e_vc_notify_link_state(pf);
9700 
9701 	if (pf->flags & I40E_FLAG_PTP)
9702 		i40e_ptp_set_increment(pf);
9703 #ifdef CONFIG_I40E_DCB
9704 	if (new_link == old_link)
9705 		return;
9706 	/* Not SW DCB so firmware will take care of default settings */
9707 	if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9708 		return;
9709 
9710 	/* We cover here only link down, as after link up in case of SW DCB
9711 	 * SW LLDP agent will take care of setting it up
9712 	 */
9713 	if (!new_link) {
9714 		dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9715 		memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9716 		err = i40e_dcb_sw_default_config(pf);
9717 		if (err) {
9718 			pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
9719 				       I40E_FLAG_DCB_ENABLED);
9720 		} else {
9721 			pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9722 				       DCB_CAP_DCBX_VER_IEEE;
9723 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
9724 			pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9725 		}
9726 	}
9727 #endif /* CONFIG_I40E_DCB */
9728 }
9729 
9730 /**
9731  * i40e_watchdog_subtask - periodic checks not using event driven response
9732  * @pf: board private structure
9733  **/
9734 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9735 {
9736 	int i;
9737 
9738 	/* if interface is down do nothing */
9739 	if (test_bit(__I40E_DOWN, pf->state) ||
9740 	    test_bit(__I40E_CONFIG_BUSY, pf->state))
9741 		return;
9742 
9743 	/* make sure we don't do these things too often */
9744 	if (time_before(jiffies, (pf->service_timer_previous +
9745 				  pf->service_timer_period)))
9746 		return;
9747 	pf->service_timer_previous = jiffies;
9748 
9749 	if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9750 	    test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9751 		i40e_link_event(pf);
9752 
9753 	/* Update the stats for active netdevs so the network stack
9754 	 * can look at updated numbers whenever it cares to
9755 	 */
9756 	for (i = 0; i < pf->num_alloc_vsi; i++)
9757 		if (pf->vsi[i] && pf->vsi[i]->netdev)
9758 			i40e_update_stats(pf->vsi[i]);
9759 
9760 	if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9761 		/* Update the stats for the active switching components */
9762 		for (i = 0; i < I40E_MAX_VEB; i++)
9763 			if (pf->veb[i])
9764 				i40e_update_veb_stats(pf->veb[i]);
9765 	}
9766 
9767 	i40e_ptp_rx_hang(pf);
9768 	i40e_ptp_tx_hang(pf);
9769 }
9770 
9771 /**
9772  * i40e_reset_subtask - Set up for resetting the device and driver
9773  * @pf: board private structure
9774  **/
9775 static void i40e_reset_subtask(struct i40e_pf *pf)
9776 {
9777 	u32 reset_flags = 0;
9778 
9779 	if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9780 		reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9781 		clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9782 	}
9783 	if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9784 		reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9785 		clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9786 	}
9787 	if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9788 		reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9789 		clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9790 	}
9791 	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9792 		reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9793 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9794 	}
9795 	if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9796 		reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9797 		clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9798 	}
9799 
9800 	/* If there's a recovery already waiting, it takes
9801 	 * precedence before starting a new reset sequence.
9802 	 */
9803 	if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9804 		i40e_prep_for_reset(pf);
9805 		i40e_reset(pf);
9806 		i40e_rebuild(pf, false, false);
9807 	}
9808 
9809 	/* If we're already down or resetting, just bail */
9810 	if (reset_flags &&
9811 	    !test_bit(__I40E_DOWN, pf->state) &&
9812 	    !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9813 		i40e_do_reset(pf, reset_flags, false);
9814 	}
9815 }
9816 
9817 /**
9818  * i40e_handle_link_event - Handle link event
9819  * @pf: board private structure
9820  * @e: event info posted on ARQ
9821  **/
9822 static void i40e_handle_link_event(struct i40e_pf *pf,
9823 				   struct i40e_arq_event_info *e)
9824 {
9825 	struct i40e_aqc_get_link_status *status =
9826 		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9827 
9828 	/* Do a new status request to re-enable LSE reporting
9829 	 * and load new status information into the hw struct
9830 	 * This completely ignores any state information
9831 	 * in the ARQ event info, instead choosing to always
9832 	 * issue the AQ update link status command.
9833 	 */
9834 	i40e_link_event(pf);
9835 
9836 	/* Check if module meets thermal requirements */
9837 	if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9838 		dev_err(&pf->pdev->dev,
9839 			"Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9840 		dev_err(&pf->pdev->dev,
9841 			"Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9842 	} else {
9843 		/* check for unqualified module, if link is down, suppress
9844 		 * the message if link was forced to be down.
9845 		 */
9846 		if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9847 		    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9848 		    (!(status->link_info & I40E_AQ_LINK_UP)) &&
9849 		    (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9850 			dev_err(&pf->pdev->dev,
9851 				"Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9852 			dev_err(&pf->pdev->dev,
9853 				"Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9854 		}
9855 	}
9856 }
9857 
9858 /**
9859  * i40e_clean_adminq_subtask - Clean the AdminQ rings
9860  * @pf: board private structure
9861  **/
9862 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9863 {
9864 	struct i40e_arq_event_info event;
9865 	struct i40e_hw *hw = &pf->hw;
9866 	u16 pending, i = 0;
9867 	i40e_status ret;
9868 	u16 opcode;
9869 	u32 oldval;
9870 	u32 val;
9871 
9872 	/* Do not run clean AQ when PF reset fails */
9873 	if (test_bit(__I40E_RESET_FAILED, pf->state))
9874 		return;
9875 
9876 	/* check for error indications */
9877 	val = rd32(&pf->hw, pf->hw.aq.arq.len);
9878 	oldval = val;
9879 	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9880 		if (hw->debug_mask & I40E_DEBUG_AQ)
9881 			dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9882 		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9883 	}
9884 	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9885 		if (hw->debug_mask & I40E_DEBUG_AQ)
9886 			dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9887 		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9888 		pf->arq_overflows++;
9889 	}
9890 	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9891 		if (hw->debug_mask & I40E_DEBUG_AQ)
9892 			dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9893 		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9894 	}
9895 	if (oldval != val)
9896 		wr32(&pf->hw, pf->hw.aq.arq.len, val);
9897 
9898 	val = rd32(&pf->hw, pf->hw.aq.asq.len);
9899 	oldval = val;
9900 	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9901 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9902 			dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9903 		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9904 	}
9905 	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9906 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9907 			dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9908 		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9909 	}
9910 	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9911 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9912 			dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9913 		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9914 	}
9915 	if (oldval != val)
9916 		wr32(&pf->hw, pf->hw.aq.asq.len, val);
9917 
9918 	event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9919 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9920 	if (!event.msg_buf)
9921 		return;
9922 
9923 	do {
9924 		ret = i40e_clean_arq_element(hw, &event, &pending);
9925 		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9926 			break;
9927 		else if (ret) {
9928 			dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9929 			break;
9930 		}
9931 
9932 		opcode = le16_to_cpu(event.desc.opcode);
9933 		switch (opcode) {
9934 
9935 		case i40e_aqc_opc_get_link_status:
9936 			rtnl_lock();
9937 			i40e_handle_link_event(pf, &event);
9938 			rtnl_unlock();
9939 			break;
9940 		case i40e_aqc_opc_send_msg_to_pf:
9941 			ret = i40e_vc_process_vf_msg(pf,
9942 					le16_to_cpu(event.desc.retval),
9943 					le32_to_cpu(event.desc.cookie_high),
9944 					le32_to_cpu(event.desc.cookie_low),
9945 					event.msg_buf,
9946 					event.msg_len);
9947 			break;
9948 		case i40e_aqc_opc_lldp_update_mib:
9949 			dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9950 #ifdef CONFIG_I40E_DCB
9951 			rtnl_lock();
9952 			i40e_handle_lldp_event(pf, &event);
9953 			rtnl_unlock();
9954 #endif /* CONFIG_I40E_DCB */
9955 			break;
9956 		case i40e_aqc_opc_event_lan_overflow:
9957 			dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9958 			i40e_handle_lan_overflow_event(pf, &event);
9959 			break;
9960 		case i40e_aqc_opc_send_msg_to_peer:
9961 			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9962 			break;
9963 		case i40e_aqc_opc_nvm_erase:
9964 		case i40e_aqc_opc_nvm_update:
9965 		case i40e_aqc_opc_oem_post_update:
9966 			i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9967 				   "ARQ NVM operation 0x%04x completed\n",
9968 				   opcode);
9969 			break;
9970 		default:
9971 			dev_info(&pf->pdev->dev,
9972 				 "ARQ: Unknown event 0x%04x ignored\n",
9973 				 opcode);
9974 			break;
9975 		}
9976 	} while (i++ < pf->adminq_work_limit);
9977 
9978 	if (i < pf->adminq_work_limit)
9979 		clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9980 
9981 	/* re-enable Admin queue interrupt cause */
9982 	val = rd32(hw, I40E_PFINT_ICR0_ENA);
9983 	val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9984 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
9985 	i40e_flush(hw);
9986 
9987 	kfree(event.msg_buf);
9988 }
9989 
9990 /**
9991  * i40e_verify_eeprom - make sure eeprom is good to use
9992  * @pf: board private structure
9993  **/
9994 static void i40e_verify_eeprom(struct i40e_pf *pf)
9995 {
9996 	int err;
9997 
9998 	err = i40e_diag_eeprom_test(&pf->hw);
9999 	if (err) {
10000 		/* retry in case of garbage read */
10001 		err = i40e_diag_eeprom_test(&pf->hw);
10002 		if (err) {
10003 			dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
10004 				 err);
10005 			set_bit(__I40E_BAD_EEPROM, pf->state);
10006 		}
10007 	}
10008 
10009 	if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
10010 		dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
10011 		clear_bit(__I40E_BAD_EEPROM, pf->state);
10012 	}
10013 }
10014 
10015 /**
10016  * i40e_enable_pf_switch_lb
10017  * @pf: pointer to the PF structure
10018  *
10019  * enable switch loop back or die - no point in a return value
10020  **/
10021 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
10022 {
10023 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10024 	struct i40e_vsi_context ctxt;
10025 	int ret;
10026 
10027 	ctxt.seid = pf->main_vsi_seid;
10028 	ctxt.pf_num = pf->hw.pf_id;
10029 	ctxt.vf_num = 0;
10030 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10031 	if (ret) {
10032 		dev_info(&pf->pdev->dev,
10033 			 "couldn't get PF vsi config, err %s aq_err %s\n",
10034 			 i40e_stat_str(&pf->hw, ret),
10035 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10036 		return;
10037 	}
10038 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10039 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10040 	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10041 
10042 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10043 	if (ret) {
10044 		dev_info(&pf->pdev->dev,
10045 			 "update vsi switch failed, err %s aq_err %s\n",
10046 			 i40e_stat_str(&pf->hw, ret),
10047 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10048 	}
10049 }
10050 
10051 /**
10052  * i40e_disable_pf_switch_lb
10053  * @pf: pointer to the PF structure
10054  *
10055  * disable switch loop back or die - no point in a return value
10056  **/
10057 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
10058 {
10059 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10060 	struct i40e_vsi_context ctxt;
10061 	int ret;
10062 
10063 	ctxt.seid = pf->main_vsi_seid;
10064 	ctxt.pf_num = pf->hw.pf_id;
10065 	ctxt.vf_num = 0;
10066 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10067 	if (ret) {
10068 		dev_info(&pf->pdev->dev,
10069 			 "couldn't get PF vsi config, err %s aq_err %s\n",
10070 			 i40e_stat_str(&pf->hw, ret),
10071 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10072 		return;
10073 	}
10074 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10075 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10076 	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10077 
10078 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10079 	if (ret) {
10080 		dev_info(&pf->pdev->dev,
10081 			 "update vsi switch failed, err %s aq_err %s\n",
10082 			 i40e_stat_str(&pf->hw, ret),
10083 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10084 	}
10085 }
10086 
10087 /**
10088  * i40e_config_bridge_mode - Configure the HW bridge mode
10089  * @veb: pointer to the bridge instance
10090  *
10091  * Configure the loop back mode for the LAN VSI that is downlink to the
10092  * specified HW bridge instance. It is expected this function is called
10093  * when a new HW bridge is instantiated.
10094  **/
10095 static void i40e_config_bridge_mode(struct i40e_veb *veb)
10096 {
10097 	struct i40e_pf *pf = veb->pf;
10098 
10099 	if (pf->hw.debug_mask & I40E_DEBUG_LAN)
10100 		dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
10101 			 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10102 	if (veb->bridge_mode & BRIDGE_MODE_VEPA)
10103 		i40e_disable_pf_switch_lb(pf);
10104 	else
10105 		i40e_enable_pf_switch_lb(pf);
10106 }
10107 
10108 /**
10109  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10110  * @veb: pointer to the VEB instance
10111  *
10112  * This is a recursive function that first builds the attached VSIs then
10113  * recurses in to build the next layer of VEB.  We track the connections
10114  * through our own index numbers because the seid's from the HW could
10115  * change across the reset.
10116  **/
10117 static int i40e_reconstitute_veb(struct i40e_veb *veb)
10118 {
10119 	struct i40e_vsi *ctl_vsi = NULL;
10120 	struct i40e_pf *pf = veb->pf;
10121 	int v, veb_idx;
10122 	int ret;
10123 
10124 	/* build VSI that owns this VEB, temporarily attached to base VEB */
10125 	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10126 		if (pf->vsi[v] &&
10127 		    pf->vsi[v]->veb_idx == veb->idx &&
10128 		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10129 			ctl_vsi = pf->vsi[v];
10130 			break;
10131 		}
10132 	}
10133 	if (!ctl_vsi) {
10134 		dev_info(&pf->pdev->dev,
10135 			 "missing owner VSI for veb_idx %d\n", veb->idx);
10136 		ret = -ENOENT;
10137 		goto end_reconstitute;
10138 	}
10139 	if (ctl_vsi != pf->vsi[pf->lan_vsi])
10140 		ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10141 	ret = i40e_add_vsi(ctl_vsi);
10142 	if (ret) {
10143 		dev_info(&pf->pdev->dev,
10144 			 "rebuild of veb_idx %d owner VSI failed: %d\n",
10145 			 veb->idx, ret);
10146 		goto end_reconstitute;
10147 	}
10148 	i40e_vsi_reset_stats(ctl_vsi);
10149 
10150 	/* create the VEB in the switch and move the VSI onto the VEB */
10151 	ret = i40e_add_veb(veb, ctl_vsi);
10152 	if (ret)
10153 		goto end_reconstitute;
10154 
10155 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10156 		veb->bridge_mode = BRIDGE_MODE_VEB;
10157 	else
10158 		veb->bridge_mode = BRIDGE_MODE_VEPA;
10159 	i40e_config_bridge_mode(veb);
10160 
10161 	/* create the remaining VSIs attached to this VEB */
10162 	for (v = 0; v < pf->num_alloc_vsi; v++) {
10163 		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10164 			continue;
10165 
10166 		if (pf->vsi[v]->veb_idx == veb->idx) {
10167 			struct i40e_vsi *vsi = pf->vsi[v];
10168 
10169 			vsi->uplink_seid = veb->seid;
10170 			ret = i40e_add_vsi(vsi);
10171 			if (ret) {
10172 				dev_info(&pf->pdev->dev,
10173 					 "rebuild of vsi_idx %d failed: %d\n",
10174 					 v, ret);
10175 				goto end_reconstitute;
10176 			}
10177 			i40e_vsi_reset_stats(vsi);
10178 		}
10179 	}
10180 
10181 	/* create any VEBs attached to this VEB - RECURSION */
10182 	for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10183 		if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10184 			pf->veb[veb_idx]->uplink_seid = veb->seid;
10185 			ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
10186 			if (ret)
10187 				break;
10188 		}
10189 	}
10190 
10191 end_reconstitute:
10192 	return ret;
10193 }
10194 
10195 /**
10196  * i40e_get_capabilities - get info about the HW
10197  * @pf: the PF struct
10198  * @list_type: AQ capability to be queried
10199  **/
10200 static int i40e_get_capabilities(struct i40e_pf *pf,
10201 				 enum i40e_admin_queue_opc list_type)
10202 {
10203 	struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10204 	u16 data_size;
10205 	int buf_len;
10206 	int err;
10207 
10208 	buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10209 	do {
10210 		cap_buf = kzalloc(buf_len, GFP_KERNEL);
10211 		if (!cap_buf)
10212 			return -ENOMEM;
10213 
10214 		/* this loads the data into the hw struct for us */
10215 		err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10216 						    &data_size, list_type,
10217 						    NULL);
10218 		/* data loaded, buffer no longer needed */
10219 		kfree(cap_buf);
10220 
10221 		if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10222 			/* retry with a larger buffer */
10223 			buf_len = data_size;
10224 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
10225 			dev_info(&pf->pdev->dev,
10226 				 "capability discovery failed, err %s aq_err %s\n",
10227 				 i40e_stat_str(&pf->hw, err),
10228 				 i40e_aq_str(&pf->hw,
10229 					     pf->hw.aq.asq_last_status));
10230 			return -ENODEV;
10231 		}
10232 	} while (err);
10233 
10234 	if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10235 		if (list_type == i40e_aqc_opc_list_func_capabilities) {
10236 			dev_info(&pf->pdev->dev,
10237 				 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10238 				 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10239 				 pf->hw.func_caps.num_msix_vectors,
10240 				 pf->hw.func_caps.num_msix_vectors_vf,
10241 				 pf->hw.func_caps.fd_filters_guaranteed,
10242 				 pf->hw.func_caps.fd_filters_best_effort,
10243 				 pf->hw.func_caps.num_tx_qp,
10244 				 pf->hw.func_caps.num_vsis);
10245 		} else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10246 			dev_info(&pf->pdev->dev,
10247 				 "switch_mode=0x%04x, function_valid=0x%08x\n",
10248 				 pf->hw.dev_caps.switch_mode,
10249 				 pf->hw.dev_caps.valid_functions);
10250 			dev_info(&pf->pdev->dev,
10251 				 "SR-IOV=%d, num_vfs for all function=%u\n",
10252 				 pf->hw.dev_caps.sr_iov_1_1,
10253 				 pf->hw.dev_caps.num_vfs);
10254 			dev_info(&pf->pdev->dev,
10255 				 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10256 				 pf->hw.dev_caps.num_vsis,
10257 				 pf->hw.dev_caps.num_rx_qp,
10258 				 pf->hw.dev_caps.num_tx_qp);
10259 		}
10260 	}
10261 	if (list_type == i40e_aqc_opc_list_func_capabilities) {
10262 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10263 		       + pf->hw.func_caps.num_vfs)
10264 		if (pf->hw.revision_id == 0 &&
10265 		    pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10266 			dev_info(&pf->pdev->dev,
10267 				 "got num_vsis %d, setting num_vsis to %d\n",
10268 				 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10269 			pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10270 		}
10271 	}
10272 	return 0;
10273 }
10274 
10275 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10276 
10277 /**
10278  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10279  * @pf: board private structure
10280  **/
10281 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10282 {
10283 	struct i40e_vsi *vsi;
10284 
10285 	/* quick workaround for an NVM issue that leaves a critical register
10286 	 * uninitialized
10287 	 */
10288 	if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10289 		static const u32 hkey[] = {
10290 			0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10291 			0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10292 			0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10293 			0x95b3a76d};
10294 		int i;
10295 
10296 		for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10297 			wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10298 	}
10299 
10300 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
10301 		return;
10302 
10303 	/* find existing VSI and see if it needs configuring */
10304 	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10305 
10306 	/* create a new VSI if none exists */
10307 	if (!vsi) {
10308 		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10309 				     pf->vsi[pf->lan_vsi]->seid, 0);
10310 		if (!vsi) {
10311 			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10312 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10313 			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10314 			return;
10315 		}
10316 	}
10317 
10318 	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10319 }
10320 
10321 /**
10322  * i40e_fdir_teardown - release the Flow Director resources
10323  * @pf: board private structure
10324  **/
10325 static void i40e_fdir_teardown(struct i40e_pf *pf)
10326 {
10327 	struct i40e_vsi *vsi;
10328 
10329 	i40e_fdir_filter_exit(pf);
10330 	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10331 	if (vsi)
10332 		i40e_vsi_release(vsi);
10333 }
10334 
10335 /**
10336  * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10337  * @vsi: PF main vsi
10338  * @seid: seid of main or channel VSIs
10339  *
10340  * Rebuilds cloud filters associated with main VSI and channel VSIs if they
10341  * existed before reset
10342  **/
10343 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10344 {
10345 	struct i40e_cloud_filter *cfilter;
10346 	struct i40e_pf *pf = vsi->back;
10347 	struct hlist_node *node;
10348 	i40e_status ret;
10349 
10350 	/* Add cloud filters back if they exist */
10351 	hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10352 				  cloud_node) {
10353 		if (cfilter->seid != seid)
10354 			continue;
10355 
10356 		if (cfilter->dst_port)
10357 			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10358 								true);
10359 		else
10360 			ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10361 
10362 		if (ret) {
10363 			dev_dbg(&pf->pdev->dev,
10364 				"Failed to rebuild cloud filter, err %s aq_err %s\n",
10365 				i40e_stat_str(&pf->hw, ret),
10366 				i40e_aq_str(&pf->hw,
10367 					    pf->hw.aq.asq_last_status));
10368 			return ret;
10369 		}
10370 	}
10371 	return 0;
10372 }
10373 
10374 /**
10375  * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10376  * @vsi: PF main vsi
10377  *
10378  * Rebuilds channel VSIs if they existed before reset
10379  **/
10380 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10381 {
10382 	struct i40e_channel *ch, *ch_tmp;
10383 	i40e_status ret;
10384 
10385 	if (list_empty(&vsi->ch_list))
10386 		return 0;
10387 
10388 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10389 		if (!ch->initialized)
10390 			break;
10391 		/* Proceed with creation of channel (VMDq2) VSI */
10392 		ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10393 		if (ret) {
10394 			dev_info(&vsi->back->pdev->dev,
10395 				 "failed to rebuild channels using uplink_seid %u\n",
10396 				 vsi->uplink_seid);
10397 			return ret;
10398 		}
10399 		/* Reconfigure TX queues using QTX_CTL register */
10400 		ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10401 		if (ret) {
10402 			dev_info(&vsi->back->pdev->dev,
10403 				 "failed to configure TX rings for channel %u\n",
10404 				 ch->seid);
10405 			return ret;
10406 		}
10407 		/* update 'next_base_queue' */
10408 		vsi->next_base_queue = vsi->next_base_queue +
10409 							ch->num_queue_pairs;
10410 		if (ch->max_tx_rate) {
10411 			u64 credits = ch->max_tx_rate;
10412 
10413 			if (i40e_set_bw_limit(vsi, ch->seid,
10414 					      ch->max_tx_rate))
10415 				return -EINVAL;
10416 
10417 			do_div(credits, I40E_BW_CREDIT_DIVISOR);
10418 			dev_dbg(&vsi->back->pdev->dev,
10419 				"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10420 				ch->max_tx_rate,
10421 				credits,
10422 				ch->seid);
10423 		}
10424 		ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10425 		if (ret) {
10426 			dev_dbg(&vsi->back->pdev->dev,
10427 				"Failed to rebuild cloud filters for channel VSI %u\n",
10428 				ch->seid);
10429 			return ret;
10430 		}
10431 	}
10432 	return 0;
10433 }
10434 
10435 /**
10436  * i40e_prep_for_reset - prep for the core to reset
10437  * @pf: board private structure
10438  *
10439  * Close up the VFs and other things in prep for PF Reset.
10440   **/
10441 static void i40e_prep_for_reset(struct i40e_pf *pf)
10442 {
10443 	struct i40e_hw *hw = &pf->hw;
10444 	i40e_status ret = 0;
10445 	u32 v;
10446 
10447 	clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10448 	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10449 		return;
10450 	if (i40e_check_asq_alive(&pf->hw))
10451 		i40e_vc_notify_reset(pf);
10452 
10453 	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10454 
10455 	/* quiesce the VSIs and their queues that are not already DOWN */
10456 	i40e_pf_quiesce_all_vsi(pf);
10457 
10458 	for (v = 0; v < pf->num_alloc_vsi; v++) {
10459 		if (pf->vsi[v])
10460 			pf->vsi[v]->seid = 0;
10461 	}
10462 
10463 	i40e_shutdown_adminq(&pf->hw);
10464 
10465 	/* call shutdown HMC */
10466 	if (hw->hmc.hmc_obj) {
10467 		ret = i40e_shutdown_lan_hmc(hw);
10468 		if (ret)
10469 			dev_warn(&pf->pdev->dev,
10470 				 "shutdown_lan_hmc failed: %d\n", ret);
10471 	}
10472 
10473 	/* Save the current PTP time so that we can restore the time after the
10474 	 * reset completes.
10475 	 */
10476 	i40e_ptp_save_hw_time(pf);
10477 }
10478 
10479 /**
10480  * i40e_send_version - update firmware with driver version
10481  * @pf: PF struct
10482  */
10483 static void i40e_send_version(struct i40e_pf *pf)
10484 {
10485 	struct i40e_driver_version dv;
10486 
10487 	dv.major_version = 0xff;
10488 	dv.minor_version = 0xff;
10489 	dv.build_version = 0xff;
10490 	dv.subbuild_version = 0;
10491 	strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10492 	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10493 }
10494 
10495 /**
10496  * i40e_get_oem_version - get OEM specific version information
10497  * @hw: pointer to the hardware structure
10498  **/
10499 static void i40e_get_oem_version(struct i40e_hw *hw)
10500 {
10501 	u16 block_offset = 0xffff;
10502 	u16 block_length = 0;
10503 	u16 capabilities = 0;
10504 	u16 gen_snap = 0;
10505 	u16 release = 0;
10506 
10507 #define I40E_SR_NVM_OEM_VERSION_PTR		0x1B
10508 #define I40E_NVM_OEM_LENGTH_OFFSET		0x00
10509 #define I40E_NVM_OEM_CAPABILITIES_OFFSET	0x01
10510 #define I40E_NVM_OEM_GEN_OFFSET			0x02
10511 #define I40E_NVM_OEM_RELEASE_OFFSET		0x03
10512 #define I40E_NVM_OEM_CAPABILITIES_MASK		0x000F
10513 #define I40E_NVM_OEM_LENGTH			3
10514 
10515 	/* Check if pointer to OEM version block is valid. */
10516 	i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10517 	if (block_offset == 0xffff)
10518 		return;
10519 
10520 	/* Check if OEM version block has correct length. */
10521 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10522 			   &block_length);
10523 	if (block_length < I40E_NVM_OEM_LENGTH)
10524 		return;
10525 
10526 	/* Check if OEM version format is as expected. */
10527 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10528 			   &capabilities);
10529 	if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10530 		return;
10531 
10532 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10533 			   &gen_snap);
10534 	i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10535 			   &release);
10536 	hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
10537 	hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10538 }
10539 
10540 /**
10541  * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10542  * @pf: board private structure
10543  **/
10544 static int i40e_reset(struct i40e_pf *pf)
10545 {
10546 	struct i40e_hw *hw = &pf->hw;
10547 	i40e_status ret;
10548 
10549 	ret = i40e_pf_reset(hw);
10550 	if (ret) {
10551 		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10552 		set_bit(__I40E_RESET_FAILED, pf->state);
10553 		clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10554 	} else {
10555 		pf->pfr_count++;
10556 	}
10557 	return ret;
10558 }
10559 
10560 /**
10561  * i40e_rebuild - rebuild using a saved config
10562  * @pf: board private structure
10563  * @reinit: if the Main VSI needs to re-initialized.
10564  * @lock_acquired: indicates whether or not the lock has been acquired
10565  * before this function was called.
10566  **/
10567 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10568 {
10569 	int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
10570 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10571 	struct i40e_hw *hw = &pf->hw;
10572 	i40e_status ret;
10573 	u32 val;
10574 	int v;
10575 
10576 	if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10577 	    i40e_check_recovery_mode(pf)) {
10578 		i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10579 	}
10580 
10581 	if (test_bit(__I40E_DOWN, pf->state) &&
10582 	    !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
10583 	    !old_recovery_mode_bit)
10584 		goto clear_recovery;
10585 	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10586 
10587 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
10588 	ret = i40e_init_adminq(&pf->hw);
10589 	if (ret) {
10590 		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
10591 			 i40e_stat_str(&pf->hw, ret),
10592 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10593 		goto clear_recovery;
10594 	}
10595 	i40e_get_oem_version(&pf->hw);
10596 
10597 	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
10598 		/* The following delay is necessary for firmware update. */
10599 		mdelay(1000);
10600 	}
10601 
10602 	/* re-verify the eeprom if we just had an EMP reset */
10603 	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10604 		i40e_verify_eeprom(pf);
10605 
10606 	/* if we are going out of or into recovery mode we have to act
10607 	 * accordingly with regard to resources initialization
10608 	 * and deinitialization
10609 	 */
10610 	if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
10611 	    old_recovery_mode_bit) {
10612 		if (i40e_get_capabilities(pf,
10613 					  i40e_aqc_opc_list_func_capabilities))
10614 			goto end_unlock;
10615 
10616 		if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10617 			/* we're staying in recovery mode so we'll reinitialize
10618 			 * misc vector here
10619 			 */
10620 			if (i40e_setup_misc_vector_for_recovery_mode(pf))
10621 				goto end_unlock;
10622 		} else {
10623 			if (!lock_acquired)
10624 				rtnl_lock();
10625 			/* we're going out of recovery mode so we'll free
10626 			 * the IRQ allocated specifically for recovery mode
10627 			 * and restore the interrupt scheme
10628 			 */
10629 			free_irq(pf->pdev->irq, pf);
10630 			i40e_clear_interrupt_scheme(pf);
10631 			if (i40e_restore_interrupt_scheme(pf))
10632 				goto end_unlock;
10633 		}
10634 
10635 		/* tell the firmware that we're starting */
10636 		i40e_send_version(pf);
10637 
10638 		/* bail out in case recovery mode was detected, as there is
10639 		 * no need for further configuration.
10640 		 */
10641 		goto end_unlock;
10642 	}
10643 
10644 	i40e_clear_pxe_mode(hw);
10645 	ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10646 	if (ret)
10647 		goto end_core_reset;
10648 
10649 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10650 				hw->func_caps.num_rx_qp, 0, 0);
10651 	if (ret) {
10652 		dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10653 		goto end_core_reset;
10654 	}
10655 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10656 	if (ret) {
10657 		dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10658 		goto end_core_reset;
10659 	}
10660 
10661 #ifdef CONFIG_I40E_DCB
10662 	/* Enable FW to write a default DCB config on link-up
10663 	 * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
10664 	 * is not supported with new link speed
10665 	 */
10666 	if (pf->flags & I40E_FLAG_TC_MQPRIO) {
10667 		i40e_aq_set_dcb_parameters(hw, false, NULL);
10668 	} else {
10669 		if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10670 		    (hw->phy.link_info.link_speed &
10671 		     (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10672 			i40e_aq_set_dcb_parameters(hw, false, NULL);
10673 			dev_warn(&pf->pdev->dev,
10674 				 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10675 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10676 		} else {
10677 			i40e_aq_set_dcb_parameters(hw, true, NULL);
10678 			ret = i40e_init_pf_dcb(pf);
10679 			if (ret) {
10680 				dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10681 					 ret);
10682 				pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10683 				/* Continue without DCB enabled */
10684 			}
10685 		}
10686 	}
10687 
10688 #endif /* CONFIG_I40E_DCB */
10689 	if (!lock_acquired)
10690 		rtnl_lock();
10691 	ret = i40e_setup_pf_switch(pf, reinit, true);
10692 	if (ret)
10693 		goto end_unlock;
10694 
10695 	/* The driver only wants link up/down and module qualification
10696 	 * reports from firmware.  Note the negative logic.
10697 	 */
10698 	ret = i40e_aq_set_phy_int_mask(&pf->hw,
10699 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
10700 					 I40E_AQ_EVENT_MEDIA_NA |
10701 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10702 	if (ret)
10703 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10704 			 i40e_stat_str(&pf->hw, ret),
10705 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10706 
10707 	/* Rebuild the VSIs and VEBs that existed before reset.
10708 	 * They are still in our local switch element arrays, so only
10709 	 * need to rebuild the switch model in the HW.
10710 	 *
10711 	 * If there were VEBs but the reconstitution failed, we'll try
10712 	 * to recover minimal use by getting the basic PF VSI working.
10713 	 */
10714 	if (vsi->uplink_seid != pf->mac_seid) {
10715 		dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10716 		/* find the one VEB connected to the MAC, and find orphans */
10717 		for (v = 0; v < I40E_MAX_VEB; v++) {
10718 			if (!pf->veb[v])
10719 				continue;
10720 
10721 			if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10722 			    pf->veb[v]->uplink_seid == 0) {
10723 				ret = i40e_reconstitute_veb(pf->veb[v]);
10724 
10725 				if (!ret)
10726 					continue;
10727 
10728 				/* If Main VEB failed, we're in deep doodoo,
10729 				 * so give up rebuilding the switch and set up
10730 				 * for minimal rebuild of PF VSI.
10731 				 * If orphan failed, we'll report the error
10732 				 * but try to keep going.
10733 				 */
10734 				if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10735 					dev_info(&pf->pdev->dev,
10736 						 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10737 						 ret);
10738 					vsi->uplink_seid = pf->mac_seid;
10739 					break;
10740 				} else if (pf->veb[v]->uplink_seid == 0) {
10741 					dev_info(&pf->pdev->dev,
10742 						 "rebuild of orphan VEB failed: %d\n",
10743 						 ret);
10744 				}
10745 			}
10746 		}
10747 	}
10748 
10749 	if (vsi->uplink_seid == pf->mac_seid) {
10750 		dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10751 		/* no VEB, so rebuild only the Main VSI */
10752 		ret = i40e_add_vsi(vsi);
10753 		if (ret) {
10754 			dev_info(&pf->pdev->dev,
10755 				 "rebuild of Main VSI failed: %d\n", ret);
10756 			goto end_unlock;
10757 		}
10758 	}
10759 
10760 	if (vsi->mqprio_qopt.max_rate[0]) {
10761 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10762 		u64 credits = 0;
10763 
10764 		do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10765 		ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10766 		if (ret)
10767 			goto end_unlock;
10768 
10769 		credits = max_tx_rate;
10770 		do_div(credits, I40E_BW_CREDIT_DIVISOR);
10771 		dev_dbg(&vsi->back->pdev->dev,
10772 			"Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10773 			max_tx_rate,
10774 			credits,
10775 			vsi->seid);
10776 	}
10777 
10778 	ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10779 	if (ret)
10780 		goto end_unlock;
10781 
10782 	/* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
10783 	 * for this main VSI if they exist
10784 	 */
10785 	ret = i40e_rebuild_channels(vsi);
10786 	if (ret)
10787 		goto end_unlock;
10788 
10789 	/* Reconfigure hardware for allowing smaller MSS in the case
10790 	 * of TSO, so that we avoid the MDD being fired and causing
10791 	 * a reset in the case of small MSS+TSO.
10792 	 */
10793 #define I40E_REG_MSS          0x000E64DC
10794 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
10795 #define I40E_64BYTE_MSS       0x400000
10796 	val = rd32(hw, I40E_REG_MSS);
10797 	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10798 		val &= ~I40E_REG_MSS_MIN_MASK;
10799 		val |= I40E_64BYTE_MSS;
10800 		wr32(hw, I40E_REG_MSS, val);
10801 	}
10802 
10803 	if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10804 		msleep(75);
10805 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10806 		if (ret)
10807 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10808 				 i40e_stat_str(&pf->hw, ret),
10809 				 i40e_aq_str(&pf->hw,
10810 					     pf->hw.aq.asq_last_status));
10811 	}
10812 	/* reinit the misc interrupt */
10813 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10814 		ret = i40e_setup_misc_vector(pf);
10815 
10816 	/* Add a filter to drop all Flow control frames from any VSI from being
10817 	 * transmitted. By doing so we stop a malicious VF from sending out
10818 	 * PAUSE or PFC frames and potentially controlling traffic for other
10819 	 * PF/VF VSIs.
10820 	 * The FW can still send Flow control frames if enabled.
10821 	 */
10822 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10823 						       pf->main_vsi_seid);
10824 
10825 	/* restart the VSIs that were rebuilt and running before the reset */
10826 	i40e_pf_unquiesce_all_vsi(pf);
10827 
10828 	/* Release the RTNL lock before we start resetting VFs */
10829 	if (!lock_acquired)
10830 		rtnl_unlock();
10831 
10832 	/* Restore promiscuous settings */
10833 	ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10834 	if (ret)
10835 		dev_warn(&pf->pdev->dev,
10836 			 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10837 			 pf->cur_promisc ? "on" : "off",
10838 			 i40e_stat_str(&pf->hw, ret),
10839 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10840 
10841 	i40e_reset_all_vfs(pf, true);
10842 
10843 	/* tell the firmware that we're starting */
10844 	i40e_send_version(pf);
10845 
10846 	/* We've already released the lock, so don't do it again */
10847 	goto end_core_reset;
10848 
10849 end_unlock:
10850 	if (!lock_acquired)
10851 		rtnl_unlock();
10852 end_core_reset:
10853 	clear_bit(__I40E_RESET_FAILED, pf->state);
10854 clear_recovery:
10855 	clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10856 	clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10857 }
10858 
10859 /**
10860  * i40e_reset_and_rebuild - reset and rebuild using a saved config
10861  * @pf: board private structure
10862  * @reinit: if the Main VSI needs to re-initialized.
10863  * @lock_acquired: indicates whether or not the lock has been acquired
10864  * before this function was called.
10865  **/
10866 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10867 				   bool lock_acquired)
10868 {
10869 	int ret;
10870 
10871 	if (test_bit(__I40E_IN_REMOVE, pf->state))
10872 		return;
10873 	/* Now we wait for GRST to settle out.
10874 	 * We don't have to delete the VEBs or VSIs from the hw switch
10875 	 * because the reset will make them disappear.
10876 	 */
10877 	ret = i40e_reset(pf);
10878 	if (!ret)
10879 		i40e_rebuild(pf, reinit, lock_acquired);
10880 }
10881 
10882 /**
10883  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
10884  * @pf: board private structure
10885  *
10886  * Close up the VFs and other things in prep for a Core Reset,
10887  * then get ready to rebuild the world.
10888  * @lock_acquired: indicates whether or not the lock has been acquired
10889  * before this function was called.
10890  **/
10891 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10892 {
10893 	i40e_prep_for_reset(pf);
10894 	i40e_reset_and_rebuild(pf, false, lock_acquired);
10895 }
10896 
10897 /**
10898  * i40e_handle_mdd_event
10899  * @pf: pointer to the PF structure
10900  *
10901  * Called from the MDD irq handler to identify possibly malicious vfs
10902  **/
10903 static void i40e_handle_mdd_event(struct i40e_pf *pf)
10904 {
10905 	struct i40e_hw *hw = &pf->hw;
10906 	bool mdd_detected = false;
10907 	struct i40e_vf *vf;
10908 	u32 reg;
10909 	int i;
10910 
10911 	if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10912 		return;
10913 
10914 	/* find what triggered the MDD event */
10915 	reg = rd32(hw, I40E_GL_MDET_TX);
10916 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10917 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10918 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
10919 		u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10920 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
10921 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10922 				I40E_GL_MDET_TX_EVENT_SHIFT;
10923 		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10924 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
10925 				pf->hw.func_caps.base_queue;
10926 		if (netif_msg_tx_err(pf))
10927 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10928 				 event, queue, pf_num, vf_num);
10929 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10930 		mdd_detected = true;
10931 	}
10932 	reg = rd32(hw, I40E_GL_MDET_RX);
10933 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10934 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10935 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
10936 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10937 				I40E_GL_MDET_RX_EVENT_SHIFT;
10938 		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10939 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
10940 				pf->hw.func_caps.base_queue;
10941 		if (netif_msg_rx_err(pf))
10942 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10943 				 event, queue, func);
10944 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10945 		mdd_detected = true;
10946 	}
10947 
10948 	if (mdd_detected) {
10949 		reg = rd32(hw, I40E_PF_MDET_TX);
10950 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10951 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10952 			dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10953 		}
10954 		reg = rd32(hw, I40E_PF_MDET_RX);
10955 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10956 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10957 			dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10958 		}
10959 	}
10960 
10961 	/* see if one of the VFs needs its hand slapped */
10962 	for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10963 		vf = &(pf->vf[i]);
10964 		reg = rd32(hw, I40E_VP_MDET_TX(i));
10965 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10966 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10967 			vf->num_mdd_events++;
10968 			dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10969 				 i);
10970 			dev_info(&pf->pdev->dev,
10971 				 "Use PF Control I/F to re-enable the VF\n");
10972 			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10973 		}
10974 
10975 		reg = rd32(hw, I40E_VP_MDET_RX(i));
10976 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10977 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10978 			vf->num_mdd_events++;
10979 			dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10980 				 i);
10981 			dev_info(&pf->pdev->dev,
10982 				 "Use PF Control I/F to re-enable the VF\n");
10983 			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10984 		}
10985 	}
10986 
10987 	/* re-enable mdd interrupt cause */
10988 	clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
10989 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
10990 	reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
10991 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
10992 	i40e_flush(hw);
10993 }
10994 
10995 /**
10996  * i40e_service_task - Run the driver's async subtasks
10997  * @work: pointer to work_struct containing our data
10998  **/
10999 static void i40e_service_task(struct work_struct *work)
11000 {
11001 	struct i40e_pf *pf = container_of(work,
11002 					  struct i40e_pf,
11003 					  service_task);
11004 	unsigned long start_time = jiffies;
11005 
11006 	/* don't bother with service tasks if a reset is in progress */
11007 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
11008 	    test_bit(__I40E_SUSPENDED, pf->state))
11009 		return;
11010 
11011 	if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
11012 		return;
11013 
11014 	if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
11015 		i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
11016 		i40e_sync_filters_subtask(pf);
11017 		i40e_reset_subtask(pf);
11018 		i40e_handle_mdd_event(pf);
11019 		i40e_vc_process_vflr_event(pf);
11020 		i40e_watchdog_subtask(pf);
11021 		i40e_fdir_reinit_subtask(pf);
11022 		if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
11023 			/* Client subtask will reopen next time through. */
11024 			i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
11025 							   true);
11026 		} else {
11027 			i40e_client_subtask(pf);
11028 			if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
11029 					       pf->state))
11030 				i40e_notify_client_of_l2_param_changes(
11031 								pf->vsi[pf->lan_vsi]);
11032 		}
11033 		i40e_sync_filters_subtask(pf);
11034 	} else {
11035 		i40e_reset_subtask(pf);
11036 	}
11037 
11038 	i40e_clean_adminq_subtask(pf);
11039 
11040 	/* flush memory to make sure state is correct before next watchdog */
11041 	smp_mb__before_atomic();
11042 	clear_bit(__I40E_SERVICE_SCHED, pf->state);
11043 
11044 	/* If the tasks have taken longer than one timer cycle or there
11045 	 * is more work to be done, reschedule the service task now
11046 	 * rather than wait for the timer to tick again.
11047 	 */
11048 	if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
11049 	    test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state)		 ||
11050 	    test_bit(__I40E_MDD_EVENT_PENDING, pf->state)		 ||
11051 	    test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
11052 		i40e_service_event_schedule(pf);
11053 }
11054 
11055 /**
11056  * i40e_service_timer - timer callback
11057  * @t: timer list pointer
11058  **/
11059 static void i40e_service_timer(struct timer_list *t)
11060 {
11061 	struct i40e_pf *pf = from_timer(pf, t, service_timer);
11062 
11063 	mod_timer(&pf->service_timer,
11064 		  round_jiffies(jiffies + pf->service_timer_period));
11065 	i40e_service_event_schedule(pf);
11066 }
11067 
11068 /**
11069  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11070  * @vsi: the VSI being configured
11071  **/
11072 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
11073 {
11074 	struct i40e_pf *pf = vsi->back;
11075 
11076 	switch (vsi->type) {
11077 	case I40E_VSI_MAIN:
11078 		vsi->alloc_queue_pairs = pf->num_lan_qps;
11079 		if (!vsi->num_tx_desc)
11080 			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11081 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11082 		if (!vsi->num_rx_desc)
11083 			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11084 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11085 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11086 			vsi->num_q_vectors = pf->num_lan_msix;
11087 		else
11088 			vsi->num_q_vectors = 1;
11089 
11090 		break;
11091 
11092 	case I40E_VSI_FDIR:
11093 		vsi->alloc_queue_pairs = 1;
11094 		vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11095 					 I40E_REQ_DESCRIPTOR_MULTIPLE);
11096 		vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11097 					 I40E_REQ_DESCRIPTOR_MULTIPLE);
11098 		vsi->num_q_vectors = pf->num_fdsb_msix;
11099 		break;
11100 
11101 	case I40E_VSI_VMDQ2:
11102 		vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11103 		if (!vsi->num_tx_desc)
11104 			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11105 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11106 		if (!vsi->num_rx_desc)
11107 			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11108 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11109 		vsi->num_q_vectors = pf->num_vmdq_msix;
11110 		break;
11111 
11112 	case I40E_VSI_SRIOV:
11113 		vsi->alloc_queue_pairs = pf->num_vf_qps;
11114 		if (!vsi->num_tx_desc)
11115 			vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11116 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11117 		if (!vsi->num_rx_desc)
11118 			vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11119 						 I40E_REQ_DESCRIPTOR_MULTIPLE);
11120 		break;
11121 
11122 	default:
11123 		WARN_ON(1);
11124 		return -ENODATA;
11125 	}
11126 
11127 	if (is_kdump_kernel()) {
11128 		vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11129 		vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11130 	}
11131 
11132 	return 0;
11133 }
11134 
11135 /**
11136  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11137  * @vsi: VSI pointer
11138  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
11139  *
11140  * On error: returns error code (negative)
11141  * On success: returns 0
11142  **/
11143 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11144 {
11145 	struct i40e_ring **next_rings;
11146 	int size;
11147 	int ret = 0;
11148 
11149 	/* allocate memory for both Tx, XDP Tx and Rx ring pointers */
11150 	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11151 	       (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11152 	vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11153 	if (!vsi->tx_rings)
11154 		return -ENOMEM;
11155 	next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11156 	if (i40e_enabled_xdp_vsi(vsi)) {
11157 		vsi->xdp_rings = next_rings;
11158 		next_rings += vsi->alloc_queue_pairs;
11159 	}
11160 	vsi->rx_rings = next_rings;
11161 
11162 	if (alloc_qvectors) {
11163 		/* allocate memory for q_vector pointers */
11164 		size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11165 		vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11166 		if (!vsi->q_vectors) {
11167 			ret = -ENOMEM;
11168 			goto err_vectors;
11169 		}
11170 	}
11171 	return ret;
11172 
11173 err_vectors:
11174 	kfree(vsi->tx_rings);
11175 	return ret;
11176 }
11177 
11178 /**
11179  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11180  * @pf: board private structure
11181  * @type: type of VSI
11182  *
11183  * On error: returns error code (negative)
11184  * On success: returns vsi index in PF (positive)
11185  **/
11186 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11187 {
11188 	int ret = -ENODEV;
11189 	struct i40e_vsi *vsi;
11190 	int vsi_idx;
11191 	int i;
11192 
11193 	/* Need to protect the allocation of the VSIs at the PF level */
11194 	mutex_lock(&pf->switch_mutex);
11195 
11196 	/* VSI list may be fragmented if VSI creation/destruction has
11197 	 * been happening.  We can afford to do a quick scan to look
11198 	 * for any free VSIs in the list.
11199 	 *
11200 	 * find next empty vsi slot, looping back around if necessary
11201 	 */
11202 	i = pf->next_vsi;
11203 	while (i < pf->num_alloc_vsi && pf->vsi[i])
11204 		i++;
11205 	if (i >= pf->num_alloc_vsi) {
11206 		i = 0;
11207 		while (i < pf->next_vsi && pf->vsi[i])
11208 			i++;
11209 	}
11210 
11211 	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11212 		vsi_idx = i;             /* Found one! */
11213 	} else {
11214 		ret = -ENODEV;
11215 		goto unlock_pf;  /* out of VSI slots! */
11216 	}
11217 	pf->next_vsi = ++i;
11218 
11219 	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11220 	if (!vsi) {
11221 		ret = -ENOMEM;
11222 		goto unlock_pf;
11223 	}
11224 	vsi->type = type;
11225 	vsi->back = pf;
11226 	set_bit(__I40E_VSI_DOWN, vsi->state);
11227 	vsi->flags = 0;
11228 	vsi->idx = vsi_idx;
11229 	vsi->int_rate_limit = 0;
11230 	vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11231 				pf->rss_table_size : 64;
11232 	vsi->netdev_registered = false;
11233 	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11234 	hash_init(vsi->mac_filter_hash);
11235 	vsi->irqs_ready = false;
11236 
11237 	if (type == I40E_VSI_MAIN) {
11238 		vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11239 		if (!vsi->af_xdp_zc_qps)
11240 			goto err_rings;
11241 	}
11242 
11243 	ret = i40e_set_num_rings_in_vsi(vsi);
11244 	if (ret)
11245 		goto err_rings;
11246 
11247 	ret = i40e_vsi_alloc_arrays(vsi, true);
11248 	if (ret)
11249 		goto err_rings;
11250 
11251 	/* Setup default MSIX irq handler for VSI */
11252 	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11253 
11254 	/* Initialize VSI lock */
11255 	spin_lock_init(&vsi->mac_filter_hash_lock);
11256 	pf->vsi[vsi_idx] = vsi;
11257 	ret = vsi_idx;
11258 	goto unlock_pf;
11259 
11260 err_rings:
11261 	bitmap_free(vsi->af_xdp_zc_qps);
11262 	pf->next_vsi = i - 1;
11263 	kfree(vsi);
11264 unlock_pf:
11265 	mutex_unlock(&pf->switch_mutex);
11266 	return ret;
11267 }
11268 
11269 /**
11270  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11271  * @vsi: VSI pointer
11272  * @free_qvectors: a bool to specify if q_vectors need to be freed.
11273  *
11274  * On error: returns error code (negative)
11275  * On success: returns 0
11276  **/
11277 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11278 {
11279 	/* free the ring and vector containers */
11280 	if (free_qvectors) {
11281 		kfree(vsi->q_vectors);
11282 		vsi->q_vectors = NULL;
11283 	}
11284 	kfree(vsi->tx_rings);
11285 	vsi->tx_rings = NULL;
11286 	vsi->rx_rings = NULL;
11287 	vsi->xdp_rings = NULL;
11288 }
11289 
11290 /**
11291  * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11292  * and lookup table
11293  * @vsi: Pointer to VSI structure
11294  */
11295 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11296 {
11297 	if (!vsi)
11298 		return;
11299 
11300 	kfree(vsi->rss_hkey_user);
11301 	vsi->rss_hkey_user = NULL;
11302 
11303 	kfree(vsi->rss_lut_user);
11304 	vsi->rss_lut_user = NULL;
11305 }
11306 
11307 /**
11308  * i40e_vsi_clear - Deallocate the VSI provided
11309  * @vsi: the VSI being un-configured
11310  **/
11311 static int i40e_vsi_clear(struct i40e_vsi *vsi)
11312 {
11313 	struct i40e_pf *pf;
11314 
11315 	if (!vsi)
11316 		return 0;
11317 
11318 	if (!vsi->back)
11319 		goto free_vsi;
11320 	pf = vsi->back;
11321 
11322 	mutex_lock(&pf->switch_mutex);
11323 	if (!pf->vsi[vsi->idx]) {
11324 		dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11325 			vsi->idx, vsi->idx, vsi->type);
11326 		goto unlock_vsi;
11327 	}
11328 
11329 	if (pf->vsi[vsi->idx] != vsi) {
11330 		dev_err(&pf->pdev->dev,
11331 			"pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11332 			pf->vsi[vsi->idx]->idx,
11333 			pf->vsi[vsi->idx]->type,
11334 			vsi->idx, vsi->type);
11335 		goto unlock_vsi;
11336 	}
11337 
11338 	/* updates the PF for this cleared vsi */
11339 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11340 	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11341 
11342 	bitmap_free(vsi->af_xdp_zc_qps);
11343 	i40e_vsi_free_arrays(vsi, true);
11344 	i40e_clear_rss_config_user(vsi);
11345 
11346 	pf->vsi[vsi->idx] = NULL;
11347 	if (vsi->idx < pf->next_vsi)
11348 		pf->next_vsi = vsi->idx;
11349 
11350 unlock_vsi:
11351 	mutex_unlock(&pf->switch_mutex);
11352 free_vsi:
11353 	kfree(vsi);
11354 
11355 	return 0;
11356 }
11357 
11358 /**
11359  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11360  * @vsi: the VSI being cleaned
11361  **/
11362 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11363 {
11364 	int i;
11365 
11366 	if (vsi->tx_rings && vsi->tx_rings[0]) {
11367 		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11368 			kfree_rcu(vsi->tx_rings[i], rcu);
11369 			WRITE_ONCE(vsi->tx_rings[i], NULL);
11370 			WRITE_ONCE(vsi->rx_rings[i], NULL);
11371 			if (vsi->xdp_rings)
11372 				WRITE_ONCE(vsi->xdp_rings[i], NULL);
11373 		}
11374 	}
11375 }
11376 
11377 /**
11378  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11379  * @vsi: the VSI being configured
11380  **/
11381 static int i40e_alloc_rings(struct i40e_vsi *vsi)
11382 {
11383 	int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11384 	struct i40e_pf *pf = vsi->back;
11385 	struct i40e_ring *ring;
11386 
11387 	/* Set basic values in the rings to be used later during open() */
11388 	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11389 		/* allocate space for both Tx and Rx in one shot */
11390 		ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11391 		if (!ring)
11392 			goto err_out;
11393 
11394 		ring->queue_index = i;
11395 		ring->reg_idx = vsi->base_queue + i;
11396 		ring->ring_active = false;
11397 		ring->vsi = vsi;
11398 		ring->netdev = vsi->netdev;
11399 		ring->dev = &pf->pdev->dev;
11400 		ring->count = vsi->num_tx_desc;
11401 		ring->size = 0;
11402 		ring->dcb_tc = 0;
11403 		if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11404 			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11405 		ring->itr_setting = pf->tx_itr_default;
11406 		WRITE_ONCE(vsi->tx_rings[i], ring++);
11407 
11408 		if (!i40e_enabled_xdp_vsi(vsi))
11409 			goto setup_rx;
11410 
11411 		ring->queue_index = vsi->alloc_queue_pairs + i;
11412 		ring->reg_idx = vsi->base_queue + ring->queue_index;
11413 		ring->ring_active = false;
11414 		ring->vsi = vsi;
11415 		ring->netdev = NULL;
11416 		ring->dev = &pf->pdev->dev;
11417 		ring->count = vsi->num_tx_desc;
11418 		ring->size = 0;
11419 		ring->dcb_tc = 0;
11420 		if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11421 			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11422 		set_ring_xdp(ring);
11423 		ring->itr_setting = pf->tx_itr_default;
11424 		WRITE_ONCE(vsi->xdp_rings[i], ring++);
11425 
11426 setup_rx:
11427 		ring->queue_index = i;
11428 		ring->reg_idx = vsi->base_queue + i;
11429 		ring->ring_active = false;
11430 		ring->vsi = vsi;
11431 		ring->netdev = vsi->netdev;
11432 		ring->dev = &pf->pdev->dev;
11433 		ring->count = vsi->num_rx_desc;
11434 		ring->size = 0;
11435 		ring->dcb_tc = 0;
11436 		ring->itr_setting = pf->rx_itr_default;
11437 		WRITE_ONCE(vsi->rx_rings[i], ring);
11438 	}
11439 
11440 	return 0;
11441 
11442 err_out:
11443 	i40e_vsi_clear_rings(vsi);
11444 	return -ENOMEM;
11445 }
11446 
11447 /**
11448  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11449  * @pf: board private structure
11450  * @vectors: the number of MSI-X vectors to request
11451  *
11452  * Returns the number of vectors reserved, or error
11453  **/
11454 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11455 {
11456 	vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11457 					I40E_MIN_MSIX, vectors);
11458 	if (vectors < 0) {
11459 		dev_info(&pf->pdev->dev,
11460 			 "MSI-X vector reservation failed: %d\n", vectors);
11461 		vectors = 0;
11462 	}
11463 
11464 	return vectors;
11465 }
11466 
11467 /**
11468  * i40e_init_msix - Setup the MSIX capability
11469  * @pf: board private structure
11470  *
11471  * Work with the OS to set up the MSIX vectors needed.
11472  *
11473  * Returns the number of vectors reserved or negative on failure
11474  **/
11475 static int i40e_init_msix(struct i40e_pf *pf)
11476 {
11477 	struct i40e_hw *hw = &pf->hw;
11478 	int cpus, extra_vectors;
11479 	int vectors_left;
11480 	int v_budget, i;
11481 	int v_actual;
11482 	int iwarp_requested = 0;
11483 
11484 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
11485 		return -ENODEV;
11486 
11487 	/* The number of vectors we'll request will be comprised of:
11488 	 *   - Add 1 for "other" cause for Admin Queue events, etc.
11489 	 *   - The number of LAN queue pairs
11490 	 *	- Queues being used for RSS.
11491 	 *		We don't need as many as max_rss_size vectors.
11492 	 *		use rss_size instead in the calculation since that
11493 	 *		is governed by number of cpus in the system.
11494 	 *	- assumes symmetric Tx/Rx pairing
11495 	 *   - The number of VMDq pairs
11496 	 *   - The CPU count within the NUMA node if iWARP is enabled
11497 	 * Once we count this up, try the request.
11498 	 *
11499 	 * If we can't get what we want, we'll simplify to nearly nothing
11500 	 * and try again.  If that still fails, we punt.
11501 	 */
11502 	vectors_left = hw->func_caps.num_msix_vectors;
11503 	v_budget = 0;
11504 
11505 	/* reserve one vector for miscellaneous handler */
11506 	if (vectors_left) {
11507 		v_budget++;
11508 		vectors_left--;
11509 	}
11510 
11511 	/* reserve some vectors for the main PF traffic queues. Initially we
11512 	 * only reserve at most 50% of the available vectors, in the case that
11513 	 * the number of online CPUs is large. This ensures that we can enable
11514 	 * extra features as well. Once we've enabled the other features, we
11515 	 * will use any remaining vectors to reach as close as we can to the
11516 	 * number of online CPUs.
11517 	 */
11518 	cpus = num_online_cpus();
11519 	pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11520 	vectors_left -= pf->num_lan_msix;
11521 
11522 	/* reserve one vector for sideband flow director */
11523 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11524 		if (vectors_left) {
11525 			pf->num_fdsb_msix = 1;
11526 			v_budget++;
11527 			vectors_left--;
11528 		} else {
11529 			pf->num_fdsb_msix = 0;
11530 		}
11531 	}
11532 
11533 	/* can we reserve enough for iWARP? */
11534 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11535 		iwarp_requested = pf->num_iwarp_msix;
11536 
11537 		if (!vectors_left)
11538 			pf->num_iwarp_msix = 0;
11539 		else if (vectors_left < pf->num_iwarp_msix)
11540 			pf->num_iwarp_msix = 1;
11541 		v_budget += pf->num_iwarp_msix;
11542 		vectors_left -= pf->num_iwarp_msix;
11543 	}
11544 
11545 	/* any vectors left over go for VMDq support */
11546 	if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11547 		if (!vectors_left) {
11548 			pf->num_vmdq_msix = 0;
11549 			pf->num_vmdq_qps = 0;
11550 		} else {
11551 			int vmdq_vecs_wanted =
11552 				pf->num_vmdq_vsis * pf->num_vmdq_qps;
11553 			int vmdq_vecs =
11554 				min_t(int, vectors_left, vmdq_vecs_wanted);
11555 
11556 			/* if we're short on vectors for what's desired, we limit
11557 			 * the queues per vmdq.  If this is still more than are
11558 			 * available, the user will need to change the number of
11559 			 * queues/vectors used by the PF later with the ethtool
11560 			 * channels command
11561 			 */
11562 			if (vectors_left < vmdq_vecs_wanted) {
11563 				pf->num_vmdq_qps = 1;
11564 				vmdq_vecs_wanted = pf->num_vmdq_vsis;
11565 				vmdq_vecs = min_t(int,
11566 						  vectors_left,
11567 						  vmdq_vecs_wanted);
11568 			}
11569 			pf->num_vmdq_msix = pf->num_vmdq_qps;
11570 
11571 			v_budget += vmdq_vecs;
11572 			vectors_left -= vmdq_vecs;
11573 		}
11574 	}
11575 
11576 	/* On systems with a large number of SMP cores, we previously limited
11577 	 * the number of vectors for num_lan_msix to be at most 50% of the
11578 	 * available vectors, to allow for other features. Now, we add back
11579 	 * the remaining vectors. However, we ensure that the total
11580 	 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11581 	 * calculate the number of vectors we can add without going over the
11582 	 * cap of CPUs. For systems with a small number of CPUs this will be
11583 	 * zero.
11584 	 */
11585 	extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11586 	pf->num_lan_msix += extra_vectors;
11587 	vectors_left -= extra_vectors;
11588 
11589 	WARN(vectors_left < 0,
11590 	     "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11591 
11592 	v_budget += pf->num_lan_msix;
11593 	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11594 				   GFP_KERNEL);
11595 	if (!pf->msix_entries)
11596 		return -ENOMEM;
11597 
11598 	for (i = 0; i < v_budget; i++)
11599 		pf->msix_entries[i].entry = i;
11600 	v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11601 
11602 	if (v_actual < I40E_MIN_MSIX) {
11603 		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11604 		kfree(pf->msix_entries);
11605 		pf->msix_entries = NULL;
11606 		pci_disable_msix(pf->pdev);
11607 		return -ENODEV;
11608 
11609 	} else if (v_actual == I40E_MIN_MSIX) {
11610 		/* Adjust for minimal MSIX use */
11611 		pf->num_vmdq_vsis = 0;
11612 		pf->num_vmdq_qps = 0;
11613 		pf->num_lan_qps = 1;
11614 		pf->num_lan_msix = 1;
11615 
11616 	} else if (v_actual != v_budget) {
11617 		/* If we have limited resources, we will start with no vectors
11618 		 * for the special features and then allocate vectors to some
11619 		 * of these features based on the policy and at the end disable
11620 		 * the features that did not get any vectors.
11621 		 */
11622 		int vec;
11623 
11624 		dev_info(&pf->pdev->dev,
11625 			 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11626 			 v_actual, v_budget);
11627 		/* reserve the misc vector */
11628 		vec = v_actual - 1;
11629 
11630 		/* Scale vector usage down */
11631 		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
11632 		pf->num_vmdq_vsis = 1;
11633 		pf->num_vmdq_qps = 1;
11634 
11635 		/* partition out the remaining vectors */
11636 		switch (vec) {
11637 		case 2:
11638 			pf->num_lan_msix = 1;
11639 			break;
11640 		case 3:
11641 			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11642 				pf->num_lan_msix = 1;
11643 				pf->num_iwarp_msix = 1;
11644 			} else {
11645 				pf->num_lan_msix = 2;
11646 			}
11647 			break;
11648 		default:
11649 			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11650 				pf->num_iwarp_msix = min_t(int, (vec / 3),
11651 						 iwarp_requested);
11652 				pf->num_vmdq_vsis = min_t(int, (vec / 3),
11653 						  I40E_DEFAULT_NUM_VMDQ_VSI);
11654 			} else {
11655 				pf->num_vmdq_vsis = min_t(int, (vec / 2),
11656 						  I40E_DEFAULT_NUM_VMDQ_VSI);
11657 			}
11658 			if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11659 				pf->num_fdsb_msix = 1;
11660 				vec--;
11661 			}
11662 			pf->num_lan_msix = min_t(int,
11663 			       (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11664 							      pf->num_lan_msix);
11665 			pf->num_lan_qps = pf->num_lan_msix;
11666 			break;
11667 		}
11668 	}
11669 
11670 	if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11671 	    (pf->num_fdsb_msix == 0)) {
11672 		dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11673 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11674 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11675 	}
11676 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11677 	    (pf->num_vmdq_msix == 0)) {
11678 		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11679 		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11680 	}
11681 
11682 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11683 	    (pf->num_iwarp_msix == 0)) {
11684 		dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11685 		pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11686 	}
11687 	i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11688 		   "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11689 		   pf->num_lan_msix,
11690 		   pf->num_vmdq_msix * pf->num_vmdq_vsis,
11691 		   pf->num_fdsb_msix,
11692 		   pf->num_iwarp_msix);
11693 
11694 	return v_actual;
11695 }
11696 
11697 /**
11698  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11699  * @vsi: the VSI being configured
11700  * @v_idx: index of the vector in the vsi struct
11701  *
11702  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
11703  **/
11704 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11705 {
11706 	struct i40e_q_vector *q_vector;
11707 
11708 	/* allocate q_vector */
11709 	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11710 	if (!q_vector)
11711 		return -ENOMEM;
11712 
11713 	q_vector->vsi = vsi;
11714 	q_vector->v_idx = v_idx;
11715 	cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11716 
11717 	if (vsi->netdev)
11718 		netif_napi_add(vsi->netdev, &q_vector->napi,
11719 			       i40e_napi_poll, NAPI_POLL_WEIGHT);
11720 
11721 	/* tie q_vector and vsi together */
11722 	vsi->q_vectors[v_idx] = q_vector;
11723 
11724 	return 0;
11725 }
11726 
11727 /**
11728  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11729  * @vsi: the VSI being configured
11730  *
11731  * We allocate one q_vector per queue interrupt.  If allocation fails we
11732  * return -ENOMEM.
11733  **/
11734 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11735 {
11736 	struct i40e_pf *pf = vsi->back;
11737 	int err, v_idx, num_q_vectors;
11738 
11739 	/* if not MSIX, give the one vector only to the LAN VSI */
11740 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11741 		num_q_vectors = vsi->num_q_vectors;
11742 	else if (vsi == pf->vsi[pf->lan_vsi])
11743 		num_q_vectors = 1;
11744 	else
11745 		return -EINVAL;
11746 
11747 	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11748 		err = i40e_vsi_alloc_q_vector(vsi, v_idx);
11749 		if (err)
11750 			goto err_out;
11751 	}
11752 
11753 	return 0;
11754 
11755 err_out:
11756 	while (v_idx--)
11757 		i40e_free_q_vector(vsi, v_idx);
11758 
11759 	return err;
11760 }
11761 
11762 /**
11763  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
11764  * @pf: board private structure to initialize
11765  **/
11766 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11767 {
11768 	int vectors = 0;
11769 	ssize_t size;
11770 
11771 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11772 		vectors = i40e_init_msix(pf);
11773 		if (vectors < 0) {
11774 			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	|
11775 				       I40E_FLAG_IWARP_ENABLED	|
11776 				       I40E_FLAG_RSS_ENABLED	|
11777 				       I40E_FLAG_DCB_CAPABLE	|
11778 				       I40E_FLAG_DCB_ENABLED	|
11779 				       I40E_FLAG_SRIOV_ENABLED	|
11780 				       I40E_FLAG_FD_SB_ENABLED	|
11781 				       I40E_FLAG_FD_ATR_ENABLED	|
11782 				       I40E_FLAG_VMDQ_ENABLED);
11783 			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11784 
11785 			/* rework the queue expectations without MSIX */
11786 			i40e_determine_queue_usage(pf);
11787 		}
11788 	}
11789 
11790 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11791 	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11792 		dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11793 		vectors = pci_enable_msi(pf->pdev);
11794 		if (vectors < 0) {
11795 			dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11796 				 vectors);
11797 			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11798 		}
11799 		vectors = 1;  /* one MSI or Legacy vector */
11800 	}
11801 
11802 	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11803 		dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11804 
11805 	/* set up vector assignment tracking */
11806 	size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11807 	pf->irq_pile = kzalloc(size, GFP_KERNEL);
11808 	if (!pf->irq_pile)
11809 		return -ENOMEM;
11810 
11811 	pf->irq_pile->num_entries = vectors;
11812 
11813 	/* track first vector for misc interrupts, ignore return */
11814 	(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11815 
11816 	return 0;
11817 }
11818 
11819 /**
11820  * i40e_restore_interrupt_scheme - Restore the interrupt scheme
11821  * @pf: private board data structure
11822  *
11823  * Restore the interrupt scheme that was cleared when we suspended the
11824  * device. This should be called during resume to re-allocate the q_vectors
11825  * and reacquire IRQs.
11826  */
11827 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11828 {
11829 	int err, i;
11830 
11831 	/* We cleared the MSI and MSI-X flags when disabling the old interrupt
11832 	 * scheme. We need to re-enabled them here in order to attempt to
11833 	 * re-acquire the MSI or MSI-X vectors
11834 	 */
11835 	pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11836 
11837 	err = i40e_init_interrupt_scheme(pf);
11838 	if (err)
11839 		return err;
11840 
11841 	/* Now that we've re-acquired IRQs, we need to remap the vectors and
11842 	 * rings together again.
11843 	 */
11844 	for (i = 0; i < pf->num_alloc_vsi; i++) {
11845 		if (pf->vsi[i]) {
11846 			err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11847 			if (err)
11848 				goto err_unwind;
11849 			i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11850 		}
11851 	}
11852 
11853 	err = i40e_setup_misc_vector(pf);
11854 	if (err)
11855 		goto err_unwind;
11856 
11857 	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11858 		i40e_client_update_msix_info(pf);
11859 
11860 	return 0;
11861 
11862 err_unwind:
11863 	while (i--) {
11864 		if (pf->vsi[i])
11865 			i40e_vsi_free_q_vectors(pf->vsi[i]);
11866 	}
11867 
11868 	return err;
11869 }
11870 
11871 /**
11872  * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
11873  * non queue events in recovery mode
11874  * @pf: board private structure
11875  *
11876  * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
11877  * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
11878  * This is handled differently than in recovery mode since no Tx/Rx resources
11879  * are being allocated.
11880  **/
11881 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11882 {
11883 	int err;
11884 
11885 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11886 		err = i40e_setup_misc_vector(pf);
11887 
11888 		if (err) {
11889 			dev_info(&pf->pdev->dev,
11890 				 "MSI-X misc vector request failed, error %d\n",
11891 				 err);
11892 			return err;
11893 		}
11894 	} else {
11895 		u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11896 
11897 		err = request_irq(pf->pdev->irq, i40e_intr, flags,
11898 				  pf->int_name, pf);
11899 
11900 		if (err) {
11901 			dev_info(&pf->pdev->dev,
11902 				 "MSI/legacy misc vector request failed, error %d\n",
11903 				 err);
11904 			return err;
11905 		}
11906 		i40e_enable_misc_int_causes(pf);
11907 		i40e_irq_dynamic_enable_icr0(pf);
11908 	}
11909 
11910 	return 0;
11911 }
11912 
11913 /**
11914  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
11915  * @pf: board private structure
11916  *
11917  * This sets up the handler for MSIX 0, which is used to manage the
11918  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
11919  * when in MSI or Legacy interrupt mode.
11920  **/
11921 static int i40e_setup_misc_vector(struct i40e_pf *pf)
11922 {
11923 	struct i40e_hw *hw = &pf->hw;
11924 	int err = 0;
11925 
11926 	/* Only request the IRQ once, the first time through. */
11927 	if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11928 		err = request_irq(pf->msix_entries[0].vector,
11929 				  i40e_intr, 0, pf->int_name, pf);
11930 		if (err) {
11931 			clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11932 			dev_info(&pf->pdev->dev,
11933 				 "request_irq for %s failed: %d\n",
11934 				 pf->int_name, err);
11935 			return -EFAULT;
11936 		}
11937 	}
11938 
11939 	i40e_enable_misc_int_causes(pf);
11940 
11941 	/* associate no queues to the misc vector */
11942 	wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11943 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
11944 
11945 	i40e_flush(hw);
11946 
11947 	i40e_irq_dynamic_enable_icr0(pf);
11948 
11949 	return err;
11950 }
11951 
11952 /**
11953  * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
11954  * @vsi: Pointer to vsi structure
11955  * @seed: Buffter to store the hash keys
11956  * @lut: Buffer to store the lookup table entries
11957  * @lut_size: Size of buffer to store the lookup table entries
11958  *
11959  * Return 0 on success, negative on failure
11960  */
11961 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11962 			   u8 *lut, u16 lut_size)
11963 {
11964 	struct i40e_pf *pf = vsi->back;
11965 	struct i40e_hw *hw = &pf->hw;
11966 	int ret = 0;
11967 
11968 	if (seed) {
11969 		ret = i40e_aq_get_rss_key(hw, vsi->id,
11970 			(struct i40e_aqc_get_set_rss_key_data *)seed);
11971 		if (ret) {
11972 			dev_info(&pf->pdev->dev,
11973 				 "Cannot get RSS key, err %s aq_err %s\n",
11974 				 i40e_stat_str(&pf->hw, ret),
11975 				 i40e_aq_str(&pf->hw,
11976 					     pf->hw.aq.asq_last_status));
11977 			return ret;
11978 		}
11979 	}
11980 
11981 	if (lut) {
11982 		bool pf_lut = vsi->type == I40E_VSI_MAIN;
11983 
11984 		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11985 		if (ret) {
11986 			dev_info(&pf->pdev->dev,
11987 				 "Cannot get RSS lut, err %s aq_err %s\n",
11988 				 i40e_stat_str(&pf->hw, ret),
11989 				 i40e_aq_str(&pf->hw,
11990 					     pf->hw.aq.asq_last_status));
11991 			return ret;
11992 		}
11993 	}
11994 
11995 	return ret;
11996 }
11997 
11998 /**
11999  * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12000  * @vsi: Pointer to vsi structure
12001  * @seed: RSS hash seed
12002  * @lut: Lookup table
12003  * @lut_size: Lookup table size
12004  *
12005  * Returns 0 on success, negative on failure
12006  **/
12007 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
12008 			       const u8 *lut, u16 lut_size)
12009 {
12010 	struct i40e_pf *pf = vsi->back;
12011 	struct i40e_hw *hw = &pf->hw;
12012 	u16 vf_id = vsi->vf_id;
12013 	u8 i;
12014 
12015 	/* Fill out hash function seed */
12016 	if (seed) {
12017 		u32 *seed_dw = (u32 *)seed;
12018 
12019 		if (vsi->type == I40E_VSI_MAIN) {
12020 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12021 				wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
12022 		} else if (vsi->type == I40E_VSI_SRIOV) {
12023 			for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
12024 				wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
12025 		} else {
12026 			dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
12027 		}
12028 	}
12029 
12030 	if (lut) {
12031 		u32 *lut_dw = (u32 *)lut;
12032 
12033 		if (vsi->type == I40E_VSI_MAIN) {
12034 			if (lut_size != I40E_HLUT_ARRAY_SIZE)
12035 				return -EINVAL;
12036 			for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12037 				wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
12038 		} else if (vsi->type == I40E_VSI_SRIOV) {
12039 			if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
12040 				return -EINVAL;
12041 			for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12042 				wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
12043 		} else {
12044 			dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12045 		}
12046 	}
12047 	i40e_flush(hw);
12048 
12049 	return 0;
12050 }
12051 
12052 /**
12053  * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12054  * @vsi: Pointer to VSI structure
12055  * @seed: Buffer to store the keys
12056  * @lut: Buffer to store the lookup table entries
12057  * @lut_size: Size of buffer to store the lookup table entries
12058  *
12059  * Returns 0 on success, negative on failure
12060  */
12061 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
12062 			    u8 *lut, u16 lut_size)
12063 {
12064 	struct i40e_pf *pf = vsi->back;
12065 	struct i40e_hw *hw = &pf->hw;
12066 	u16 i;
12067 
12068 	if (seed) {
12069 		u32 *seed_dw = (u32 *)seed;
12070 
12071 		for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12072 			seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
12073 	}
12074 	if (lut) {
12075 		u32 *lut_dw = (u32 *)lut;
12076 
12077 		if (lut_size != I40E_HLUT_ARRAY_SIZE)
12078 			return -EINVAL;
12079 		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12080 			lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
12081 	}
12082 
12083 	return 0;
12084 }
12085 
12086 /**
12087  * i40e_config_rss - Configure RSS keys and lut
12088  * @vsi: Pointer to VSI structure
12089  * @seed: RSS hash seed
12090  * @lut: Lookup table
12091  * @lut_size: Lookup table size
12092  *
12093  * Returns 0 on success, negative on failure
12094  */
12095 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12096 {
12097 	struct i40e_pf *pf = vsi->back;
12098 
12099 	if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12100 		return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12101 	else
12102 		return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12103 }
12104 
12105 /**
12106  * i40e_get_rss - Get RSS keys and lut
12107  * @vsi: Pointer to VSI structure
12108  * @seed: Buffer to store the keys
12109  * @lut: Buffer to store the lookup table entries
12110  * @lut_size: Size of buffer to store the lookup table entries
12111  *
12112  * Returns 0 on success, negative on failure
12113  */
12114 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12115 {
12116 	struct i40e_pf *pf = vsi->back;
12117 
12118 	if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12119 		return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12120 	else
12121 		return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12122 }
12123 
12124 /**
12125  * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12126  * @pf: Pointer to board private structure
12127  * @lut: Lookup table
12128  * @rss_table_size: Lookup table size
12129  * @rss_size: Range of queue number for hashing
12130  */
12131 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12132 		       u16 rss_table_size, u16 rss_size)
12133 {
12134 	u16 i;
12135 
12136 	for (i = 0; i < rss_table_size; i++)
12137 		lut[i] = i % rss_size;
12138 }
12139 
12140 /**
12141  * i40e_pf_config_rss - Prepare for RSS if used
12142  * @pf: board private structure
12143  **/
12144 static int i40e_pf_config_rss(struct i40e_pf *pf)
12145 {
12146 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12147 	u8 seed[I40E_HKEY_ARRAY_SIZE];
12148 	u8 *lut;
12149 	struct i40e_hw *hw = &pf->hw;
12150 	u32 reg_val;
12151 	u64 hena;
12152 	int ret;
12153 
12154 	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
12155 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12156 		((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12157 	hena |= i40e_pf_get_default_rss_hena(pf);
12158 
12159 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12160 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12161 
12162 	/* Determine the RSS table size based on the hardware capabilities */
12163 	reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12164 	reg_val = (pf->rss_table_size == 512) ?
12165 			(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12166 			(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12167 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12168 
12169 	/* Determine the RSS size of the VSI */
12170 	if (!vsi->rss_size) {
12171 		u16 qcount;
12172 		/* If the firmware does something weird during VSI init, we
12173 		 * could end up with zero TCs. Check for that to avoid
12174 		 * divide-by-zero. It probably won't pass traffic, but it also
12175 		 * won't panic.
12176 		 */
12177 		qcount = vsi->num_queue_pairs /
12178 			 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12179 		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12180 	}
12181 	if (!vsi->rss_size)
12182 		return -EINVAL;
12183 
12184 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12185 	if (!lut)
12186 		return -ENOMEM;
12187 
12188 	/* Use user configured lut if there is one, otherwise use default */
12189 	if (vsi->rss_lut_user)
12190 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12191 	else
12192 		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12193 
12194 	/* Use user configured hash key if there is one, otherwise
12195 	 * use default.
12196 	 */
12197 	if (vsi->rss_hkey_user)
12198 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12199 	else
12200 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12201 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12202 	kfree(lut);
12203 
12204 	return ret;
12205 }
12206 
12207 /**
12208  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12209  * @pf: board private structure
12210  * @queue_count: the requested queue count for rss.
12211  *
12212  * returns 0 if rss is not enabled, if enabled returns the final rss queue
12213  * count which may be different from the requested queue count.
12214  * Note: expects to be called while under rtnl_lock()
12215  **/
12216 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12217 {
12218 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12219 	int new_rss_size;
12220 
12221 	if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
12222 		return 0;
12223 
12224 	queue_count = min_t(int, queue_count, num_online_cpus());
12225 	new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12226 
12227 	if (queue_count != vsi->num_queue_pairs) {
12228 		u16 qcount;
12229 
12230 		vsi->req_queue_pairs = queue_count;
12231 		i40e_prep_for_reset(pf);
12232 		if (test_bit(__I40E_IN_REMOVE, pf->state))
12233 			return pf->alloc_rss_size;
12234 
12235 		pf->alloc_rss_size = new_rss_size;
12236 
12237 		i40e_reset_and_rebuild(pf, true, true);
12238 
12239 		/* Discard the user configured hash keys and lut, if less
12240 		 * queues are enabled.
12241 		 */
12242 		if (queue_count < vsi->rss_size) {
12243 			i40e_clear_rss_config_user(vsi);
12244 			dev_dbg(&pf->pdev->dev,
12245 				"discard user configured hash keys and lut\n");
12246 		}
12247 
12248 		/* Reset vsi->rss_size, as number of enabled queues changed */
12249 		qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12250 		vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12251 
12252 		i40e_pf_config_rss(pf);
12253 	}
12254 	dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count:  %d/%d\n",
12255 		 vsi->req_queue_pairs, pf->rss_size_max);
12256 	return pf->alloc_rss_size;
12257 }
12258 
12259 /**
12260  * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12261  * @pf: board private structure
12262  **/
12263 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
12264 {
12265 	i40e_status status;
12266 	bool min_valid, max_valid;
12267 	u32 max_bw, min_bw;
12268 
12269 	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12270 					   &min_valid, &max_valid);
12271 
12272 	if (!status) {
12273 		if (min_valid)
12274 			pf->min_bw = min_bw;
12275 		if (max_valid)
12276 			pf->max_bw = max_bw;
12277 	}
12278 
12279 	return status;
12280 }
12281 
12282 /**
12283  * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12284  * @pf: board private structure
12285  **/
12286 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
12287 {
12288 	struct i40e_aqc_configure_partition_bw_data bw_data;
12289 	i40e_status status;
12290 
12291 	memset(&bw_data, 0, sizeof(bw_data));
12292 
12293 	/* Set the valid bit for this PF */
12294 	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12295 	bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12296 	bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12297 
12298 	/* Set the new bandwidths */
12299 	status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12300 
12301 	return status;
12302 }
12303 
12304 /**
12305  * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12306  * @pf: board private structure
12307  **/
12308 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12309 {
12310 	/* Commit temporary BW setting to permanent NVM image */
12311 	enum i40e_admin_queue_err last_aq_status;
12312 	i40e_status ret;
12313 	u16 nvm_word;
12314 
12315 	if (pf->hw.partition_id != 1) {
12316 		dev_info(&pf->pdev->dev,
12317 			 "Commit BW only works on partition 1! This is partition %d",
12318 			 pf->hw.partition_id);
12319 		ret = I40E_NOT_SUPPORTED;
12320 		goto bw_commit_out;
12321 	}
12322 
12323 	/* Acquire NVM for read access */
12324 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12325 	last_aq_status = pf->hw.aq.asq_last_status;
12326 	if (ret) {
12327 		dev_info(&pf->pdev->dev,
12328 			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
12329 			 i40e_stat_str(&pf->hw, ret),
12330 			 i40e_aq_str(&pf->hw, last_aq_status));
12331 		goto bw_commit_out;
12332 	}
12333 
12334 	/* Read word 0x10 of NVM - SW compatibility word 1 */
12335 	ret = i40e_aq_read_nvm(&pf->hw,
12336 			       I40E_SR_NVM_CONTROL_WORD,
12337 			       0x10, sizeof(nvm_word), &nvm_word,
12338 			       false, NULL);
12339 	/* Save off last admin queue command status before releasing
12340 	 * the NVM
12341 	 */
12342 	last_aq_status = pf->hw.aq.asq_last_status;
12343 	i40e_release_nvm(&pf->hw);
12344 	if (ret) {
12345 		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
12346 			 i40e_stat_str(&pf->hw, ret),
12347 			 i40e_aq_str(&pf->hw, last_aq_status));
12348 		goto bw_commit_out;
12349 	}
12350 
12351 	/* Wait a bit for NVM release to complete */
12352 	msleep(50);
12353 
12354 	/* Acquire NVM for write access */
12355 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12356 	last_aq_status = pf->hw.aq.asq_last_status;
12357 	if (ret) {
12358 		dev_info(&pf->pdev->dev,
12359 			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
12360 			 i40e_stat_str(&pf->hw, ret),
12361 			 i40e_aq_str(&pf->hw, last_aq_status));
12362 		goto bw_commit_out;
12363 	}
12364 	/* Write it back out unchanged to initiate update NVM,
12365 	 * which will force a write of the shadow (alt) RAM to
12366 	 * the NVM - thus storing the bandwidth values permanently.
12367 	 */
12368 	ret = i40e_aq_update_nvm(&pf->hw,
12369 				 I40E_SR_NVM_CONTROL_WORD,
12370 				 0x10, sizeof(nvm_word),
12371 				 &nvm_word, true, 0, NULL);
12372 	/* Save off last admin queue command status before releasing
12373 	 * the NVM
12374 	 */
12375 	last_aq_status = pf->hw.aq.asq_last_status;
12376 	i40e_release_nvm(&pf->hw);
12377 	if (ret)
12378 		dev_info(&pf->pdev->dev,
12379 			 "BW settings NOT SAVED, err %s aq_err %s\n",
12380 			 i40e_stat_str(&pf->hw, ret),
12381 			 i40e_aq_str(&pf->hw, last_aq_status));
12382 bw_commit_out:
12383 
12384 	return ret;
12385 }
12386 
12387 /**
12388  * i40e_is_total_port_shutdown_enabled - read NVM and return value
12389  * if total port shutdown feature is enabled for this PF
12390  * @pf: board private structure
12391  **/
12392 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12393 {
12394 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED	BIT(4)
12395 #define I40E_FEATURES_ENABLE_PTR		0x2A
12396 #define I40E_CURRENT_SETTING_PTR		0x2B
12397 #define I40E_LINK_BEHAVIOR_WORD_OFFSET		0x2D
12398 #define I40E_LINK_BEHAVIOR_WORD_LENGTH		0x1
12399 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED	BIT(0)
12400 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH	4
12401 	i40e_status read_status = I40E_SUCCESS;
12402 	u16 sr_emp_sr_settings_ptr = 0;
12403 	u16 features_enable = 0;
12404 	u16 link_behavior = 0;
12405 	bool ret = false;
12406 
12407 	read_status = i40e_read_nvm_word(&pf->hw,
12408 					 I40E_SR_EMP_SR_SETTINGS_PTR,
12409 					 &sr_emp_sr_settings_ptr);
12410 	if (read_status)
12411 		goto err_nvm;
12412 	read_status = i40e_read_nvm_word(&pf->hw,
12413 					 sr_emp_sr_settings_ptr +
12414 					 I40E_FEATURES_ENABLE_PTR,
12415 					 &features_enable);
12416 	if (read_status)
12417 		goto err_nvm;
12418 	if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12419 		read_status = i40e_read_nvm_module_data(&pf->hw,
12420 							I40E_SR_EMP_SR_SETTINGS_PTR,
12421 							I40E_CURRENT_SETTING_PTR,
12422 							I40E_LINK_BEHAVIOR_WORD_OFFSET,
12423 							I40E_LINK_BEHAVIOR_WORD_LENGTH,
12424 							&link_behavior);
12425 		if (read_status)
12426 			goto err_nvm;
12427 		link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12428 		ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12429 	}
12430 	return ret;
12431 
12432 err_nvm:
12433 	dev_warn(&pf->pdev->dev,
12434 		 "total-port-shutdown feature is off due to read nvm error: %s\n",
12435 		 i40e_stat_str(&pf->hw, read_status));
12436 	return ret;
12437 }
12438 
12439 /**
12440  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12441  * @pf: board private structure to initialize
12442  *
12443  * i40e_sw_init initializes the Adapter private data structure.
12444  * Fields are initialized based on PCI device information and
12445  * OS network device settings (MTU size).
12446  **/
12447 static int i40e_sw_init(struct i40e_pf *pf)
12448 {
12449 	int err = 0;
12450 	int size;
12451 	u16 pow;
12452 
12453 	/* Set default capability flags */
12454 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
12455 		    I40E_FLAG_MSI_ENABLED     |
12456 		    I40E_FLAG_MSIX_ENABLED;
12457 
12458 	/* Set default ITR */
12459 	pf->rx_itr_default = I40E_ITR_RX_DEF;
12460 	pf->tx_itr_default = I40E_ITR_TX_DEF;
12461 
12462 	/* Depending on PF configurations, it is possible that the RSS
12463 	 * maximum might end up larger than the available queues
12464 	 */
12465 	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12466 	pf->alloc_rss_size = 1;
12467 	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12468 	pf->rss_size_max = min_t(int, pf->rss_size_max,
12469 				 pf->hw.func_caps.num_tx_qp);
12470 
12471 	/* find the next higher power-of-2 of num cpus */
12472 	pow = roundup_pow_of_two(num_online_cpus());
12473 	pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12474 
12475 	if (pf->hw.func_caps.rss) {
12476 		pf->flags |= I40E_FLAG_RSS_ENABLED;
12477 		pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12478 					   num_online_cpus());
12479 	}
12480 
12481 	/* MFP mode enabled */
12482 	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12483 		pf->flags |= I40E_FLAG_MFP_ENABLED;
12484 		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12485 		if (i40e_get_partition_bw_setting(pf)) {
12486 			dev_warn(&pf->pdev->dev,
12487 				 "Could not get partition bw settings\n");
12488 		} else {
12489 			dev_info(&pf->pdev->dev,
12490 				 "Partition BW Min = %8.8x, Max = %8.8x\n",
12491 				 pf->min_bw, pf->max_bw);
12492 
12493 			/* nudge the Tx scheduler */
12494 			i40e_set_partition_bw_setting(pf);
12495 		}
12496 	}
12497 
12498 	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12499 	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12500 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
12501 		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
12502 		if (pf->flags & I40E_FLAG_MFP_ENABLED &&
12503 		    pf->hw.num_partitions > 1)
12504 			dev_info(&pf->pdev->dev,
12505 				 "Flow Director Sideband mode Disabled in MFP mode\n");
12506 		else
12507 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12508 		pf->fdir_pf_filter_count =
12509 				 pf->hw.func_caps.fd_filters_guaranteed;
12510 		pf->hw.fdir_shared_filter_count =
12511 				 pf->hw.func_caps.fd_filters_best_effort;
12512 	}
12513 
12514 	if (pf->hw.mac.type == I40E_MAC_X722) {
12515 		pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12516 				    I40E_HW_128_QP_RSS_CAPABLE |
12517 				    I40E_HW_ATR_EVICT_CAPABLE |
12518 				    I40E_HW_WB_ON_ITR_CAPABLE |
12519 				    I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12520 				    I40E_HW_NO_PCI_LINK_CHECK |
12521 				    I40E_HW_USE_SET_LLDP_MIB |
12522 				    I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12523 				    I40E_HW_PTP_L4_CAPABLE |
12524 				    I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12525 				    I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12526 
12527 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12528 		if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12529 		    I40E_FDEVICT_PCTYPE_DEFAULT) {
12530 			dev_warn(&pf->pdev->dev,
12531 				 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12532 			pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12533 		}
12534 	} else if ((pf->hw.aq.api_maj_ver > 1) ||
12535 		   ((pf->hw.aq.api_maj_ver == 1) &&
12536 		    (pf->hw.aq.api_min_ver > 4))) {
12537 		/* Supported in FW API version higher than 1.4 */
12538 		pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12539 	}
12540 
12541 	/* Enable HW ATR eviction if possible */
12542 	if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12543 		pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12544 
12545 	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12546 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12547 	    (pf->hw.aq.fw_maj_ver < 4))) {
12548 		pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12549 		/* No DCB support  for FW < v4.33 */
12550 		pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12551 	}
12552 
12553 	/* Disable FW LLDP if FW < v4.3 */
12554 	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12555 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12556 	    (pf->hw.aq.fw_maj_ver < 4)))
12557 		pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12558 
12559 	/* Use the FW Set LLDP MIB API if FW > v4.40 */
12560 	if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12561 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12562 	    (pf->hw.aq.fw_maj_ver >= 5)))
12563 		pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12564 
12565 	/* Enable PTP L4 if FW > v6.0 */
12566 	if (pf->hw.mac.type == I40E_MAC_XL710 &&
12567 	    pf->hw.aq.fw_maj_ver >= 6)
12568 		pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12569 
12570 	if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12571 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12572 		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12573 		pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12574 	}
12575 
12576 	if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12577 		pf->flags |= I40E_FLAG_IWARP_ENABLED;
12578 		/* IWARP needs one extra vector for CQP just like MISC.*/
12579 		pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12580 	}
12581 	/* Stopping FW LLDP engine is supported on XL710 and X722
12582 	 * starting from FW versions determined in i40e_init_adminq.
12583 	 * Stopping the FW LLDP engine is not supported on XL710
12584 	 * if NPAR is functioning so unset this hw flag in this case.
12585 	 */
12586 	if (pf->hw.mac.type == I40E_MAC_XL710 &&
12587 	    pf->hw.func_caps.npar_enable &&
12588 	    (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12589 		pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12590 
12591 #ifdef CONFIG_PCI_IOV
12592 	if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12593 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12594 		pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12595 		pf->num_req_vfs = min_t(int,
12596 					pf->hw.func_caps.num_vfs,
12597 					I40E_MAX_VF_COUNT);
12598 	}
12599 #endif /* CONFIG_PCI_IOV */
12600 	pf->eeprom_version = 0xDEAD;
12601 	pf->lan_veb = I40E_NO_VEB;
12602 	pf->lan_vsi = I40E_NO_VSI;
12603 
12604 	/* By default FW has this off for performance reasons */
12605 	pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12606 
12607 	/* set up queue assignment tracking */
12608 	size = sizeof(struct i40e_lump_tracking)
12609 		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12610 	pf->qp_pile = kzalloc(size, GFP_KERNEL);
12611 	if (!pf->qp_pile) {
12612 		err = -ENOMEM;
12613 		goto sw_init_done;
12614 	}
12615 	pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12616 
12617 	pf->tx_timeout_recovery_level = 1;
12618 
12619 	if (pf->hw.mac.type != I40E_MAC_X722 &&
12620 	    i40e_is_total_port_shutdown_enabled(pf)) {
12621 		/* Link down on close must be on when total port shutdown
12622 		 * is enabled for a given port
12623 		 */
12624 		pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12625 			      I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12626 		dev_info(&pf->pdev->dev,
12627 			 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12628 	}
12629 	mutex_init(&pf->switch_mutex);
12630 
12631 sw_init_done:
12632 	return err;
12633 }
12634 
12635 /**
12636  * i40e_set_ntuple - set the ntuple feature flag and take action
12637  * @pf: board private structure to initialize
12638  * @features: the feature set that the stack is suggesting
12639  *
12640  * returns a bool to indicate if reset needs to happen
12641  **/
12642 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12643 {
12644 	bool need_reset = false;
12645 
12646 	/* Check if Flow Director n-tuple support was enabled or disabled.  If
12647 	 * the state changed, we need to reset.
12648 	 */
12649 	if (features & NETIF_F_NTUPLE) {
12650 		/* Enable filters and mark for reset */
12651 		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12652 			need_reset = true;
12653 		/* enable FD_SB only if there is MSI-X vector and no cloud
12654 		 * filters exist
12655 		 */
12656 		if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12657 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12658 			pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12659 		}
12660 	} else {
12661 		/* turn off filters, mark for reset and clear SW filter list */
12662 		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12663 			need_reset = true;
12664 			i40e_fdir_filter_exit(pf);
12665 		}
12666 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12667 		clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12668 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12669 
12670 		/* reset fd counters */
12671 		pf->fd_add_err = 0;
12672 		pf->fd_atr_cnt = 0;
12673 		/* if ATR was auto disabled it can be re-enabled. */
12674 		if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12675 			if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12676 			    (I40E_DEBUG_FD & pf->hw.debug_mask))
12677 				dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12678 	}
12679 	return need_reset;
12680 }
12681 
12682 /**
12683  * i40e_clear_rss_lut - clear the rx hash lookup table
12684  * @vsi: the VSI being configured
12685  **/
12686 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12687 {
12688 	struct i40e_pf *pf = vsi->back;
12689 	struct i40e_hw *hw = &pf->hw;
12690 	u16 vf_id = vsi->vf_id;
12691 	u8 i;
12692 
12693 	if (vsi->type == I40E_VSI_MAIN) {
12694 		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12695 			wr32(hw, I40E_PFQF_HLUT(i), 0);
12696 	} else if (vsi->type == I40E_VSI_SRIOV) {
12697 		for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12698 			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12699 	} else {
12700 		dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12701 	}
12702 }
12703 
12704 /**
12705  * i40e_set_features - set the netdev feature flags
12706  * @netdev: ptr to the netdev being adjusted
12707  * @features: the feature set that the stack is suggesting
12708  * Note: expects to be called while under rtnl_lock()
12709  **/
12710 static int i40e_set_features(struct net_device *netdev,
12711 			     netdev_features_t features)
12712 {
12713 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12714 	struct i40e_vsi *vsi = np->vsi;
12715 	struct i40e_pf *pf = vsi->back;
12716 	bool need_reset;
12717 
12718 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12719 		i40e_pf_config_rss(pf);
12720 	else if (!(features & NETIF_F_RXHASH) &&
12721 		 netdev->features & NETIF_F_RXHASH)
12722 		i40e_clear_rss_lut(vsi);
12723 
12724 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
12725 		i40e_vlan_stripping_enable(vsi);
12726 	else
12727 		i40e_vlan_stripping_disable(vsi);
12728 
12729 	if (!(features & NETIF_F_HW_TC) &&
12730 	    (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12731 		dev_err(&pf->pdev->dev,
12732 			"Offloaded tc filters active, can't turn hw_tc_offload off");
12733 		return -EINVAL;
12734 	}
12735 
12736 	if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12737 		i40e_del_all_macvlans(vsi);
12738 
12739 	need_reset = i40e_set_ntuple(pf, features);
12740 
12741 	if (need_reset)
12742 		i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12743 
12744 	return 0;
12745 }
12746 
12747 static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12748 				    unsigned int table, unsigned int idx,
12749 				    struct udp_tunnel_info *ti)
12750 {
12751 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12752 	struct i40e_hw *hw = &np->vsi->back->hw;
12753 	u8 type, filter_index;
12754 	i40e_status ret;
12755 
12756 	type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12757 						   I40E_AQC_TUNNEL_TYPE_NGE;
12758 
12759 	ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12760 				     NULL);
12761 	if (ret) {
12762 		netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
12763 			    i40e_stat_str(hw, ret),
12764 			    i40e_aq_str(hw, hw->aq.asq_last_status));
12765 		return -EIO;
12766 	}
12767 
12768 	udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
12769 	return 0;
12770 }
12771 
12772 static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
12773 				      unsigned int table, unsigned int idx,
12774 				      struct udp_tunnel_info *ti)
12775 {
12776 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12777 	struct i40e_hw *hw = &np->vsi->back->hw;
12778 	i40e_status ret;
12779 
12780 	ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
12781 	if (ret) {
12782 		netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
12783 			    i40e_stat_str(hw, ret),
12784 			    i40e_aq_str(hw, hw->aq.asq_last_status));
12785 		return -EIO;
12786 	}
12787 
12788 	return 0;
12789 }
12790 
12791 static int i40e_get_phys_port_id(struct net_device *netdev,
12792 				 struct netdev_phys_item_id *ppid)
12793 {
12794 	struct i40e_netdev_priv *np = netdev_priv(netdev);
12795 	struct i40e_pf *pf = np->vsi->back;
12796 	struct i40e_hw *hw = &pf->hw;
12797 
12798 	if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12799 		return -EOPNOTSUPP;
12800 
12801 	ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12802 	memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12803 
12804 	return 0;
12805 }
12806 
12807 /**
12808  * i40e_ndo_fdb_add - add an entry to the hardware database
12809  * @ndm: the input from the stack
12810  * @tb: pointer to array of nladdr (unused)
12811  * @dev: the net device pointer
12812  * @addr: the MAC address entry being added
12813  * @vid: VLAN ID
12814  * @flags: instructions from stack about fdb operation
12815  * @extack: netlink extended ack, unused currently
12816  */
12817 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12818 			    struct net_device *dev,
12819 			    const unsigned char *addr, u16 vid,
12820 			    u16 flags,
12821 			    struct netlink_ext_ack *extack)
12822 {
12823 	struct i40e_netdev_priv *np = netdev_priv(dev);
12824 	struct i40e_pf *pf = np->vsi->back;
12825 	int err = 0;
12826 
12827 	if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12828 		return -EOPNOTSUPP;
12829 
12830 	if (vid) {
12831 		pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12832 		return -EINVAL;
12833 	}
12834 
12835 	/* Hardware does not support aging addresses so if a
12836 	 * ndm_state is given only allow permanent addresses
12837 	 */
12838 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12839 		netdev_info(dev, "FDB only supports static addresses\n");
12840 		return -EINVAL;
12841 	}
12842 
12843 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12844 		err = dev_uc_add_excl(dev, addr);
12845 	else if (is_multicast_ether_addr(addr))
12846 		err = dev_mc_add_excl(dev, addr);
12847 	else
12848 		err = -EINVAL;
12849 
12850 	/* Only return duplicate errors if NLM_F_EXCL is set */
12851 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
12852 		err = 0;
12853 
12854 	return err;
12855 }
12856 
12857 /**
12858  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
12859  * @dev: the netdev being configured
12860  * @nlh: RTNL message
12861  * @flags: bridge flags
12862  * @extack: netlink extended ack
12863  *
12864  * Inserts a new hardware bridge if not already created and
12865  * enables the bridging mode requested (VEB or VEPA). If the
12866  * hardware bridge has already been inserted and the request
12867  * is to change the mode then that requires a PF reset to
12868  * allow rebuild of the components with required hardware
12869  * bridge mode enabled.
12870  *
12871  * Note: expects to be called while under rtnl_lock()
12872  **/
12873 static int i40e_ndo_bridge_setlink(struct net_device *dev,
12874 				   struct nlmsghdr *nlh,
12875 				   u16 flags,
12876 				   struct netlink_ext_ack *extack)
12877 {
12878 	struct i40e_netdev_priv *np = netdev_priv(dev);
12879 	struct i40e_vsi *vsi = np->vsi;
12880 	struct i40e_pf *pf = vsi->back;
12881 	struct i40e_veb *veb = NULL;
12882 	struct nlattr *attr, *br_spec;
12883 	int i, rem;
12884 
12885 	/* Only for PF VSI for now */
12886 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12887 		return -EOPNOTSUPP;
12888 
12889 	/* Find the HW bridge for PF VSI */
12890 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12891 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12892 			veb = pf->veb[i];
12893 	}
12894 
12895 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12896 
12897 	nla_for_each_nested(attr, br_spec, rem) {
12898 		__u16 mode;
12899 
12900 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
12901 			continue;
12902 
12903 		mode = nla_get_u16(attr);
12904 		if ((mode != BRIDGE_MODE_VEPA) &&
12905 		    (mode != BRIDGE_MODE_VEB))
12906 			return -EINVAL;
12907 
12908 		/* Insert a new HW bridge */
12909 		if (!veb) {
12910 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12911 					     vsi->tc_config.enabled_tc);
12912 			if (veb) {
12913 				veb->bridge_mode = mode;
12914 				i40e_config_bridge_mode(veb);
12915 			} else {
12916 				/* No Bridge HW offload available */
12917 				return -ENOENT;
12918 			}
12919 			break;
12920 		} else if (mode != veb->bridge_mode) {
12921 			/* Existing HW bridge but different mode needs reset */
12922 			veb->bridge_mode = mode;
12923 			/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
12924 			if (mode == BRIDGE_MODE_VEB)
12925 				pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12926 			else
12927 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12928 			i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12929 			break;
12930 		}
12931 	}
12932 
12933 	return 0;
12934 }
12935 
12936 /**
12937  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
12938  * @skb: skb buff
12939  * @pid: process id
12940  * @seq: RTNL message seq #
12941  * @dev: the netdev being configured
12942  * @filter_mask: unused
12943  * @nlflags: netlink flags passed in
12944  *
12945  * Return the mode in which the hardware bridge is operating in
12946  * i.e VEB or VEPA.
12947  **/
12948 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12949 				   struct net_device *dev,
12950 				   u32 __always_unused filter_mask,
12951 				   int nlflags)
12952 {
12953 	struct i40e_netdev_priv *np = netdev_priv(dev);
12954 	struct i40e_vsi *vsi = np->vsi;
12955 	struct i40e_pf *pf = vsi->back;
12956 	struct i40e_veb *veb = NULL;
12957 	int i;
12958 
12959 	/* Only for PF VSI for now */
12960 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12961 		return -EOPNOTSUPP;
12962 
12963 	/* Find the HW bridge for the PF VSI */
12964 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12965 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12966 			veb = pf->veb[i];
12967 	}
12968 
12969 	if (!veb)
12970 		return 0;
12971 
12972 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12973 				       0, 0, nlflags, filter_mask, NULL);
12974 }
12975 
12976 /**
12977  * i40e_features_check - Validate encapsulated packet conforms to limits
12978  * @skb: skb buff
12979  * @dev: This physical port's netdev
12980  * @features: Offload features that the stack believes apply
12981  **/
12982 static netdev_features_t i40e_features_check(struct sk_buff *skb,
12983 					     struct net_device *dev,
12984 					     netdev_features_t features)
12985 {
12986 	size_t len;
12987 
12988 	/* No point in doing any of this if neither checksum nor GSO are
12989 	 * being requested for this frame.  We can rule out both by just
12990 	 * checking for CHECKSUM_PARTIAL
12991 	 */
12992 	if (skb->ip_summed != CHECKSUM_PARTIAL)
12993 		return features;
12994 
12995 	/* We cannot support GSO if the MSS is going to be less than
12996 	 * 64 bytes.  If it is then we need to drop support for GSO.
12997 	 */
12998 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
12999 		features &= ~NETIF_F_GSO_MASK;
13000 
13001 	/* MACLEN can support at most 63 words */
13002 	len = skb_network_header(skb) - skb->data;
13003 	if (len & ~(63 * 2))
13004 		goto out_err;
13005 
13006 	/* IPLEN and EIPLEN can support at most 127 dwords */
13007 	len = skb_transport_header(skb) - skb_network_header(skb);
13008 	if (len & ~(127 * 4))
13009 		goto out_err;
13010 
13011 	if (skb->encapsulation) {
13012 		/* L4TUNLEN can support 127 words */
13013 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
13014 		if (len & ~(127 * 2))
13015 			goto out_err;
13016 
13017 		/* IPLEN can support at most 127 dwords */
13018 		len = skb_inner_transport_header(skb) -
13019 		      skb_inner_network_header(skb);
13020 		if (len & ~(127 * 4))
13021 			goto out_err;
13022 	}
13023 
13024 	/* No need to validate L4LEN as TCP is the only protocol with a
13025 	 * a flexible value and we support all possible values supported
13026 	 * by TCP, which is at most 15 dwords
13027 	 */
13028 
13029 	return features;
13030 out_err:
13031 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13032 }
13033 
13034 /**
13035  * i40e_xdp_setup - add/remove an XDP program
13036  * @vsi: VSI to changed
13037  * @prog: XDP program
13038  * @extack: netlink extended ack
13039  **/
13040 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
13041 			  struct netlink_ext_ack *extack)
13042 {
13043 	int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
13044 	struct i40e_pf *pf = vsi->back;
13045 	struct bpf_prog *old_prog;
13046 	bool need_reset;
13047 	int i;
13048 
13049 	/* Don't allow frames that span over multiple buffers */
13050 	if (frame_size > vsi->rx_buf_len) {
13051 		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
13052 		return -EINVAL;
13053 	}
13054 
13055 	/* When turning XDP on->off/off->on we reset and rebuild the rings. */
13056 	need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
13057 
13058 	if (need_reset)
13059 		i40e_prep_for_reset(pf);
13060 
13061 	/* VSI shall be deleted in a moment, just return EINVAL */
13062 	if (test_bit(__I40E_IN_REMOVE, pf->state))
13063 		return -EINVAL;
13064 
13065 	old_prog = xchg(&vsi->xdp_prog, prog);
13066 
13067 	if (need_reset) {
13068 		if (!prog)
13069 			/* Wait until ndo_xsk_wakeup completes. */
13070 			synchronize_rcu();
13071 		i40e_reset_and_rebuild(pf, true, true);
13072 	}
13073 
13074 	for (i = 0; i < vsi->num_queue_pairs; i++)
13075 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
13076 
13077 	if (old_prog)
13078 		bpf_prog_put(old_prog);
13079 
13080 	/* Kick start the NAPI context if there is an AF_XDP socket open
13081 	 * on that queue id. This so that receiving will start.
13082 	 */
13083 	if (need_reset && prog)
13084 		for (i = 0; i < vsi->num_queue_pairs; i++)
13085 			if (vsi->xdp_rings[i]->xsk_pool)
13086 				(void)i40e_xsk_wakeup(vsi->netdev, i,
13087 						      XDP_WAKEUP_RX);
13088 
13089 	return 0;
13090 }
13091 
13092 /**
13093  * i40e_enter_busy_conf - Enters busy config state
13094  * @vsi: vsi
13095  *
13096  * Returns 0 on success, <0 for failure.
13097  **/
13098 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
13099 {
13100 	struct i40e_pf *pf = vsi->back;
13101 	int timeout = 50;
13102 
13103 	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13104 		timeout--;
13105 		if (!timeout)
13106 			return -EBUSY;
13107 		usleep_range(1000, 2000);
13108 	}
13109 
13110 	return 0;
13111 }
13112 
13113 /**
13114  * i40e_exit_busy_conf - Exits busy config state
13115  * @vsi: vsi
13116  **/
13117 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13118 {
13119 	struct i40e_pf *pf = vsi->back;
13120 
13121 	clear_bit(__I40E_CONFIG_BUSY, pf->state);
13122 }
13123 
13124 /**
13125  * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13126  * @vsi: vsi
13127  * @queue_pair: queue pair
13128  **/
13129 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13130 {
13131 	memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13132 	       sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13133 	memset(&vsi->tx_rings[queue_pair]->stats, 0,
13134 	       sizeof(vsi->tx_rings[queue_pair]->stats));
13135 	if (i40e_enabled_xdp_vsi(vsi)) {
13136 		memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13137 		       sizeof(vsi->xdp_rings[queue_pair]->stats));
13138 	}
13139 }
13140 
13141 /**
13142  * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13143  * @vsi: vsi
13144  * @queue_pair: queue pair
13145  **/
13146 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13147 {
13148 	i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13149 	if (i40e_enabled_xdp_vsi(vsi)) {
13150 		/* Make sure that in-progress ndo_xdp_xmit calls are
13151 		 * completed.
13152 		 */
13153 		synchronize_rcu();
13154 		i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13155 	}
13156 	i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13157 }
13158 
13159 /**
13160  * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13161  * @vsi: vsi
13162  * @queue_pair: queue pair
13163  * @enable: true for enable, false for disable
13164  **/
13165 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13166 					bool enable)
13167 {
13168 	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13169 	struct i40e_q_vector *q_vector = rxr->q_vector;
13170 
13171 	if (!vsi->netdev)
13172 		return;
13173 
13174 	/* All rings in a qp belong to the same qvector. */
13175 	if (q_vector->rx.ring || q_vector->tx.ring) {
13176 		if (enable)
13177 			napi_enable(&q_vector->napi);
13178 		else
13179 			napi_disable(&q_vector->napi);
13180 	}
13181 }
13182 
13183 /**
13184  * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13185  * @vsi: vsi
13186  * @queue_pair: queue pair
13187  * @enable: true for enable, false for disable
13188  *
13189  * Returns 0 on success, <0 on failure.
13190  **/
13191 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13192 					bool enable)
13193 {
13194 	struct i40e_pf *pf = vsi->back;
13195 	int pf_q, ret = 0;
13196 
13197 	pf_q = vsi->base_queue + queue_pair;
13198 	ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13199 				     false /*is xdp*/, enable);
13200 	if (ret) {
13201 		dev_info(&pf->pdev->dev,
13202 			 "VSI seid %d Tx ring %d %sable timeout\n",
13203 			 vsi->seid, pf_q, (enable ? "en" : "dis"));
13204 		return ret;
13205 	}
13206 
13207 	i40e_control_rx_q(pf, pf_q, enable);
13208 	ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13209 	if (ret) {
13210 		dev_info(&pf->pdev->dev,
13211 			 "VSI seid %d Rx ring %d %sable timeout\n",
13212 			 vsi->seid, pf_q, (enable ? "en" : "dis"));
13213 		return ret;
13214 	}
13215 
13216 	/* Due to HW errata, on Rx disable only, the register can
13217 	 * indicate done before it really is. Needs 50ms to be sure
13218 	 */
13219 	if (!enable)
13220 		mdelay(50);
13221 
13222 	if (!i40e_enabled_xdp_vsi(vsi))
13223 		return ret;
13224 
13225 	ret = i40e_control_wait_tx_q(vsi->seid, pf,
13226 				     pf_q + vsi->alloc_queue_pairs,
13227 				     true /*is xdp*/, enable);
13228 	if (ret) {
13229 		dev_info(&pf->pdev->dev,
13230 			 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13231 			 vsi->seid, pf_q, (enable ? "en" : "dis"));
13232 	}
13233 
13234 	return ret;
13235 }
13236 
13237 /**
13238  * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13239  * @vsi: vsi
13240  * @queue_pair: queue_pair
13241  **/
13242 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13243 {
13244 	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13245 	struct i40e_pf *pf = vsi->back;
13246 	struct i40e_hw *hw = &pf->hw;
13247 
13248 	/* All rings in a qp belong to the same qvector. */
13249 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
13250 		i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13251 	else
13252 		i40e_irq_dynamic_enable_icr0(pf);
13253 
13254 	i40e_flush(hw);
13255 }
13256 
13257 /**
13258  * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13259  * @vsi: vsi
13260  * @queue_pair: queue_pair
13261  **/
13262 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13263 {
13264 	struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13265 	struct i40e_pf *pf = vsi->back;
13266 	struct i40e_hw *hw = &pf->hw;
13267 
13268 	/* For simplicity, instead of removing the qp interrupt causes
13269 	 * from the interrupt linked list, we simply disable the interrupt, and
13270 	 * leave the list intact.
13271 	 *
13272 	 * All rings in a qp belong to the same qvector.
13273 	 */
13274 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13275 		u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13276 
13277 		wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13278 		i40e_flush(hw);
13279 		synchronize_irq(pf->msix_entries[intpf].vector);
13280 	} else {
13281 		/* Legacy and MSI mode - this stops all interrupt handling */
13282 		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13283 		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13284 		i40e_flush(hw);
13285 		synchronize_irq(pf->pdev->irq);
13286 	}
13287 }
13288 
13289 /**
13290  * i40e_queue_pair_disable - Disables a queue pair
13291  * @vsi: vsi
13292  * @queue_pair: queue pair
13293  *
13294  * Returns 0 on success, <0 on failure.
13295  **/
13296 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13297 {
13298 	int err;
13299 
13300 	err = i40e_enter_busy_conf(vsi);
13301 	if (err)
13302 		return err;
13303 
13304 	i40e_queue_pair_disable_irq(vsi, queue_pair);
13305 	err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
13306 	i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
13307 	i40e_queue_pair_clean_rings(vsi, queue_pair);
13308 	i40e_queue_pair_reset_stats(vsi, queue_pair);
13309 
13310 	return err;
13311 }
13312 
13313 /**
13314  * i40e_queue_pair_enable - Enables a queue pair
13315  * @vsi: vsi
13316  * @queue_pair: queue pair
13317  *
13318  * Returns 0 on success, <0 on failure.
13319  **/
13320 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13321 {
13322 	int err;
13323 
13324 	err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13325 	if (err)
13326 		return err;
13327 
13328 	if (i40e_enabled_xdp_vsi(vsi)) {
13329 		err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13330 		if (err)
13331 			return err;
13332 	}
13333 
13334 	err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13335 	if (err)
13336 		return err;
13337 
13338 	err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
13339 	i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
13340 	i40e_queue_pair_enable_irq(vsi, queue_pair);
13341 
13342 	i40e_exit_busy_conf(vsi);
13343 
13344 	return err;
13345 }
13346 
13347 /**
13348  * i40e_xdp - implements ndo_bpf for i40e
13349  * @dev: netdevice
13350  * @xdp: XDP command
13351  **/
13352 static int i40e_xdp(struct net_device *dev,
13353 		    struct netdev_bpf *xdp)
13354 {
13355 	struct i40e_netdev_priv *np = netdev_priv(dev);
13356 	struct i40e_vsi *vsi = np->vsi;
13357 
13358 	if (vsi->type != I40E_VSI_MAIN)
13359 		return -EINVAL;
13360 
13361 	switch (xdp->command) {
13362 	case XDP_SETUP_PROG:
13363 		return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13364 	case XDP_SETUP_XSK_POOL:
13365 		return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13366 					   xdp->xsk.queue_id);
13367 	default:
13368 		return -EINVAL;
13369 	}
13370 }
13371 
13372 static const struct net_device_ops i40e_netdev_ops = {
13373 	.ndo_open		= i40e_open,
13374 	.ndo_stop		= i40e_close,
13375 	.ndo_start_xmit		= i40e_lan_xmit_frame,
13376 	.ndo_get_stats64	= i40e_get_netdev_stats_struct,
13377 	.ndo_set_rx_mode	= i40e_set_rx_mode,
13378 	.ndo_validate_addr	= eth_validate_addr,
13379 	.ndo_set_mac_address	= i40e_set_mac,
13380 	.ndo_change_mtu		= i40e_change_mtu,
13381 	.ndo_eth_ioctl		= i40e_ioctl,
13382 	.ndo_tx_timeout		= i40e_tx_timeout,
13383 	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,
13384 	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid,
13385 #ifdef CONFIG_NET_POLL_CONTROLLER
13386 	.ndo_poll_controller	= i40e_netpoll,
13387 #endif
13388 	.ndo_setup_tc		= __i40e_setup_tc,
13389 	.ndo_select_queue	= i40e_lan_select_queue,
13390 	.ndo_set_features	= i40e_set_features,
13391 	.ndo_set_vf_mac		= i40e_ndo_set_vf_mac,
13392 	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan,
13393 	.ndo_get_vf_stats	= i40e_get_vf_stats,
13394 	.ndo_set_vf_rate	= i40e_ndo_set_vf_bw,
13395 	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
13396 	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
13397 	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofchk,
13398 	.ndo_set_vf_trust	= i40e_ndo_set_vf_trust,
13399 	.ndo_get_phys_port_id	= i40e_get_phys_port_id,
13400 	.ndo_fdb_add		= i40e_ndo_fdb_add,
13401 	.ndo_features_check	= i40e_features_check,
13402 	.ndo_bridge_getlink	= i40e_ndo_bridge_getlink,
13403 	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink,
13404 	.ndo_bpf		= i40e_xdp,
13405 	.ndo_xdp_xmit		= i40e_xdp_xmit,
13406 	.ndo_xsk_wakeup	        = i40e_xsk_wakeup,
13407 	.ndo_dfwd_add_station	= i40e_fwd_add,
13408 	.ndo_dfwd_del_station	= i40e_fwd_del,
13409 };
13410 
13411 /**
13412  * i40e_config_netdev - Setup the netdev flags
13413  * @vsi: the VSI being configured
13414  *
13415  * Returns 0 on success, negative value on failure
13416  **/
13417 static int i40e_config_netdev(struct i40e_vsi *vsi)
13418 {
13419 	struct i40e_pf *pf = vsi->back;
13420 	struct i40e_hw *hw = &pf->hw;
13421 	struct i40e_netdev_priv *np;
13422 	struct net_device *netdev;
13423 	u8 broadcast[ETH_ALEN];
13424 	u8 mac_addr[ETH_ALEN];
13425 	int etherdev_size;
13426 	netdev_features_t hw_enc_features;
13427 	netdev_features_t hw_features;
13428 
13429 	etherdev_size = sizeof(struct i40e_netdev_priv);
13430 	netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13431 	if (!netdev)
13432 		return -ENOMEM;
13433 
13434 	vsi->netdev = netdev;
13435 	np = netdev_priv(netdev);
13436 	np->vsi = vsi;
13437 
13438 	hw_enc_features = NETIF_F_SG			|
13439 			  NETIF_F_IP_CSUM		|
13440 			  NETIF_F_IPV6_CSUM		|
13441 			  NETIF_F_HIGHDMA		|
13442 			  NETIF_F_SOFT_FEATURES		|
13443 			  NETIF_F_TSO			|
13444 			  NETIF_F_TSO_ECN		|
13445 			  NETIF_F_TSO6			|
13446 			  NETIF_F_GSO_GRE		|
13447 			  NETIF_F_GSO_GRE_CSUM		|
13448 			  NETIF_F_GSO_PARTIAL		|
13449 			  NETIF_F_GSO_IPXIP4		|
13450 			  NETIF_F_GSO_IPXIP6		|
13451 			  NETIF_F_GSO_UDP_TUNNEL	|
13452 			  NETIF_F_GSO_UDP_TUNNEL_CSUM	|
13453 			  NETIF_F_GSO_UDP_L4		|
13454 			  NETIF_F_SCTP_CRC		|
13455 			  NETIF_F_RXHASH		|
13456 			  NETIF_F_RXCSUM		|
13457 			  0;
13458 
13459 	if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13460 		netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13461 
13462 	netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13463 
13464 	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13465 
13466 	netdev->hw_enc_features |= hw_enc_features;
13467 
13468 	/* record features VLANs can make use of */
13469 	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13470 
13471 	/* enable macvlan offloads */
13472 	netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13473 
13474 	hw_features = hw_enc_features		|
13475 		      NETIF_F_HW_VLAN_CTAG_TX	|
13476 		      NETIF_F_HW_VLAN_CTAG_RX;
13477 
13478 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13479 		hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13480 
13481 	netdev->hw_features |= hw_features;
13482 
13483 	netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13484 	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13485 
13486 	netdev->features &= ~NETIF_F_HW_TC;
13487 
13488 	if (vsi->type == I40E_VSI_MAIN) {
13489 		SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13490 		ether_addr_copy(mac_addr, hw->mac.perm_addr);
13491 		/* The following steps are necessary for two reasons. First,
13492 		 * some older NVM configurations load a default MAC-VLAN
13493 		 * filter that will accept any tagged packet, and we want to
13494 		 * replace this with a normal filter. Additionally, it is
13495 		 * possible our MAC address was provided by the platform using
13496 		 * Open Firmware or similar.
13497 		 *
13498 		 * Thus, we need to remove the default filter and install one
13499 		 * specific to the MAC address.
13500 		 */
13501 		i40e_rm_default_mac_filter(vsi, mac_addr);
13502 		spin_lock_bh(&vsi->mac_filter_hash_lock);
13503 		i40e_add_mac_filter(vsi, mac_addr);
13504 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
13505 	} else {
13506 		/* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13507 		 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13508 		 * the end, which is 4 bytes long, so force truncation of the
13509 		 * original name by IFNAMSIZ - 4
13510 		 */
13511 		snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13512 			 IFNAMSIZ - 4,
13513 			 pf->vsi[pf->lan_vsi]->netdev->name);
13514 		eth_random_addr(mac_addr);
13515 
13516 		spin_lock_bh(&vsi->mac_filter_hash_lock);
13517 		i40e_add_mac_filter(vsi, mac_addr);
13518 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
13519 	}
13520 
13521 	/* Add the broadcast filter so that we initially will receive
13522 	 * broadcast packets. Note that when a new VLAN is first added the
13523 	 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13524 	 * specific filters as part of transitioning into "vlan" operation.
13525 	 * When more VLANs are added, the driver will copy each existing MAC
13526 	 * filter and add it for the new VLAN.
13527 	 *
13528 	 * Broadcast filters are handled specially by
13529 	 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13530 	 * promiscuous bit instead of adding this directly as a MAC/VLAN
13531 	 * filter. The subtask will update the correct broadcast promiscuous
13532 	 * bits as VLANs become active or inactive.
13533 	 */
13534 	eth_broadcast_addr(broadcast);
13535 	spin_lock_bh(&vsi->mac_filter_hash_lock);
13536 	i40e_add_mac_filter(vsi, broadcast);
13537 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
13538 
13539 	eth_hw_addr_set(netdev, mac_addr);
13540 	ether_addr_copy(netdev->perm_addr, mac_addr);
13541 
13542 	/* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13543 	netdev->neigh_priv_len = sizeof(u32) * 4;
13544 
13545 	netdev->priv_flags |= IFF_UNICAST_FLT;
13546 	netdev->priv_flags |= IFF_SUPP_NOFCS;
13547 	/* Setup netdev TC information */
13548 	i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13549 
13550 	netdev->netdev_ops = &i40e_netdev_ops;
13551 	netdev->watchdog_timeo = 5 * HZ;
13552 	i40e_set_ethtool_ops(netdev);
13553 
13554 	/* MTU range: 68 - 9706 */
13555 	netdev->min_mtu = ETH_MIN_MTU;
13556 	netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13557 
13558 	return 0;
13559 }
13560 
13561 /**
13562  * i40e_vsi_delete - Delete a VSI from the switch
13563  * @vsi: the VSI being removed
13564  *
13565  * Returns 0 on success, negative value on failure
13566  **/
13567 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13568 {
13569 	/* remove default VSI is not allowed */
13570 	if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13571 		return;
13572 
13573 	i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13574 }
13575 
13576 /**
13577  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13578  * @vsi: the VSI being queried
13579  *
13580  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13581  **/
13582 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13583 {
13584 	struct i40e_veb *veb;
13585 	struct i40e_pf *pf = vsi->back;
13586 
13587 	/* Uplink is not a bridge so default to VEB */
13588 	if (vsi->veb_idx >= I40E_MAX_VEB)
13589 		return 1;
13590 
13591 	veb = pf->veb[vsi->veb_idx];
13592 	if (!veb) {
13593 		dev_info(&pf->pdev->dev,
13594 			 "There is no veb associated with the bridge\n");
13595 		return -ENOENT;
13596 	}
13597 
13598 	/* Uplink is a bridge in VEPA mode */
13599 	if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13600 		return 0;
13601 	} else {
13602 		/* Uplink is a bridge in VEB mode */
13603 		return 1;
13604 	}
13605 
13606 	/* VEPA is now default bridge, so return 0 */
13607 	return 0;
13608 }
13609 
13610 /**
13611  * i40e_add_vsi - Add a VSI to the switch
13612  * @vsi: the VSI being configured
13613  *
13614  * This initializes a VSI context depending on the VSI type to be added and
13615  * passes it down to the add_vsi aq command.
13616  **/
13617 static int i40e_add_vsi(struct i40e_vsi *vsi)
13618 {
13619 	int ret = -ENODEV;
13620 	struct i40e_pf *pf = vsi->back;
13621 	struct i40e_hw *hw = &pf->hw;
13622 	struct i40e_vsi_context ctxt;
13623 	struct i40e_mac_filter *f;
13624 	struct hlist_node *h;
13625 	int bkt;
13626 
13627 	u8 enabled_tc = 0x1; /* TC0 enabled */
13628 	int f_count = 0;
13629 
13630 	memset(&ctxt, 0, sizeof(ctxt));
13631 	switch (vsi->type) {
13632 	case I40E_VSI_MAIN:
13633 		/* The PF's main VSI is already setup as part of the
13634 		 * device initialization, so we'll not bother with
13635 		 * the add_vsi call, but we will retrieve the current
13636 		 * VSI context.
13637 		 */
13638 		ctxt.seid = pf->main_vsi_seid;
13639 		ctxt.pf_num = pf->hw.pf_id;
13640 		ctxt.vf_num = 0;
13641 		ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13642 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13643 		if (ret) {
13644 			dev_info(&pf->pdev->dev,
13645 				 "couldn't get PF vsi config, err %s aq_err %s\n",
13646 				 i40e_stat_str(&pf->hw, ret),
13647 				 i40e_aq_str(&pf->hw,
13648 					     pf->hw.aq.asq_last_status));
13649 			return -ENOENT;
13650 		}
13651 		vsi->info = ctxt.info;
13652 		vsi->info.valid_sections = 0;
13653 
13654 		vsi->seid = ctxt.seid;
13655 		vsi->id = ctxt.vsi_number;
13656 
13657 		enabled_tc = i40e_pf_get_tc_map(pf);
13658 
13659 		/* Source pruning is enabled by default, so the flag is
13660 		 * negative logic - if it's set, we need to fiddle with
13661 		 * the VSI to disable source pruning.
13662 		 */
13663 		if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13664 			memset(&ctxt, 0, sizeof(ctxt));
13665 			ctxt.seid = pf->main_vsi_seid;
13666 			ctxt.pf_num = pf->hw.pf_id;
13667 			ctxt.vf_num = 0;
13668 			ctxt.info.valid_sections |=
13669 				     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13670 			ctxt.info.switch_id =
13671 				   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13672 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13673 			if (ret) {
13674 				dev_info(&pf->pdev->dev,
13675 					 "update vsi failed, err %s aq_err %s\n",
13676 					 i40e_stat_str(&pf->hw, ret),
13677 					 i40e_aq_str(&pf->hw,
13678 						     pf->hw.aq.asq_last_status));
13679 				ret = -ENOENT;
13680 				goto err;
13681 			}
13682 		}
13683 
13684 		/* MFP mode setup queue map and update VSI */
13685 		if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13686 		    !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13687 			memset(&ctxt, 0, sizeof(ctxt));
13688 			ctxt.seid = pf->main_vsi_seid;
13689 			ctxt.pf_num = pf->hw.pf_id;
13690 			ctxt.vf_num = 0;
13691 			i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13692 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13693 			if (ret) {
13694 				dev_info(&pf->pdev->dev,
13695 					 "update vsi failed, err %s aq_err %s\n",
13696 					 i40e_stat_str(&pf->hw, ret),
13697 					 i40e_aq_str(&pf->hw,
13698 						    pf->hw.aq.asq_last_status));
13699 				ret = -ENOENT;
13700 				goto err;
13701 			}
13702 			/* update the local VSI info queue map */
13703 			i40e_vsi_update_queue_map(vsi, &ctxt);
13704 			vsi->info.valid_sections = 0;
13705 		} else {
13706 			/* Default/Main VSI is only enabled for TC0
13707 			 * reconfigure it to enable all TCs that are
13708 			 * available on the port in SFP mode.
13709 			 * For MFP case the iSCSI PF would use this
13710 			 * flow to enable LAN+iSCSI TC.
13711 			 */
13712 			ret = i40e_vsi_config_tc(vsi, enabled_tc);
13713 			if (ret) {
13714 				/* Single TC condition is not fatal,
13715 				 * message and continue
13716 				 */
13717 				dev_info(&pf->pdev->dev,
13718 					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13719 					 enabled_tc,
13720 					 i40e_stat_str(&pf->hw, ret),
13721 					 i40e_aq_str(&pf->hw,
13722 						    pf->hw.aq.asq_last_status));
13723 			}
13724 		}
13725 		break;
13726 
13727 	case I40E_VSI_FDIR:
13728 		ctxt.pf_num = hw->pf_id;
13729 		ctxt.vf_num = 0;
13730 		ctxt.uplink_seid = vsi->uplink_seid;
13731 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13732 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13733 		if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13734 		    (i40e_is_vsi_uplink_mode_veb(vsi))) {
13735 			ctxt.info.valid_sections |=
13736 			     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13737 			ctxt.info.switch_id =
13738 			   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13739 		}
13740 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13741 		break;
13742 
13743 	case I40E_VSI_VMDQ2:
13744 		ctxt.pf_num = hw->pf_id;
13745 		ctxt.vf_num = 0;
13746 		ctxt.uplink_seid = vsi->uplink_seid;
13747 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13748 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13749 
13750 		/* This VSI is connected to VEB so the switch_id
13751 		 * should be set to zero by default.
13752 		 */
13753 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13754 			ctxt.info.valid_sections |=
13755 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13756 			ctxt.info.switch_id =
13757 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13758 		}
13759 
13760 		/* Setup the VSI tx/rx queue map for TC0 only for now */
13761 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13762 		break;
13763 
13764 	case I40E_VSI_SRIOV:
13765 		ctxt.pf_num = hw->pf_id;
13766 		ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13767 		ctxt.uplink_seid = vsi->uplink_seid;
13768 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13769 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13770 
13771 		/* This VSI is connected to VEB so the switch_id
13772 		 * should be set to zero by default.
13773 		 */
13774 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13775 			ctxt.info.valid_sections |=
13776 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13777 			ctxt.info.switch_id =
13778 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13779 		}
13780 
13781 		if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13782 			ctxt.info.valid_sections |=
13783 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13784 			ctxt.info.queueing_opt_flags |=
13785 				(I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13786 				 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13787 		}
13788 
13789 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13790 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13791 		if (pf->vf[vsi->vf_id].spoofchk) {
13792 			ctxt.info.valid_sections |=
13793 				cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13794 			ctxt.info.sec_flags |=
13795 				(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13796 				 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13797 		}
13798 		/* Setup the VSI tx/rx queue map for TC0 only for now */
13799 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13800 		break;
13801 
13802 	case I40E_VSI_IWARP:
13803 		/* send down message to iWARP */
13804 		break;
13805 
13806 	default:
13807 		return -ENODEV;
13808 	}
13809 
13810 	if (vsi->type != I40E_VSI_MAIN) {
13811 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13812 		if (ret) {
13813 			dev_info(&vsi->back->pdev->dev,
13814 				 "add vsi failed, err %s aq_err %s\n",
13815 				 i40e_stat_str(&pf->hw, ret),
13816 				 i40e_aq_str(&pf->hw,
13817 					     pf->hw.aq.asq_last_status));
13818 			ret = -ENOENT;
13819 			goto err;
13820 		}
13821 		vsi->info = ctxt.info;
13822 		vsi->info.valid_sections = 0;
13823 		vsi->seid = ctxt.seid;
13824 		vsi->id = ctxt.vsi_number;
13825 	}
13826 
13827 	vsi->active_filters = 0;
13828 	clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13829 	spin_lock_bh(&vsi->mac_filter_hash_lock);
13830 	/* If macvlan filters already exist, force them to get loaded */
13831 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13832 		f->state = I40E_FILTER_NEW;
13833 		f_count++;
13834 	}
13835 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
13836 
13837 	if (f_count) {
13838 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13839 		set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13840 	}
13841 
13842 	/* Update VSI BW information */
13843 	ret = i40e_vsi_get_bw_info(vsi);
13844 	if (ret) {
13845 		dev_info(&pf->pdev->dev,
13846 			 "couldn't get vsi bw info, err %s aq_err %s\n",
13847 			 i40e_stat_str(&pf->hw, ret),
13848 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13849 		/* VSI is already added so not tearing that up */
13850 		ret = 0;
13851 	}
13852 
13853 err:
13854 	return ret;
13855 }
13856 
13857 /**
13858  * i40e_vsi_release - Delete a VSI and free its resources
13859  * @vsi: the VSI being removed
13860  *
13861  * Returns 0 on success or < 0 on error
13862  **/
13863 int i40e_vsi_release(struct i40e_vsi *vsi)
13864 {
13865 	struct i40e_mac_filter *f;
13866 	struct hlist_node *h;
13867 	struct i40e_veb *veb = NULL;
13868 	struct i40e_pf *pf;
13869 	u16 uplink_seid;
13870 	int i, n, bkt;
13871 
13872 	pf = vsi->back;
13873 
13874 	/* release of a VEB-owner or last VSI is not allowed */
13875 	if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13876 		dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13877 			 vsi->seid, vsi->uplink_seid);
13878 		return -ENODEV;
13879 	}
13880 	if (vsi == pf->vsi[pf->lan_vsi] &&
13881 	    !test_bit(__I40E_DOWN, pf->state)) {
13882 		dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13883 		return -ENODEV;
13884 	}
13885 	set_bit(__I40E_VSI_RELEASING, vsi->state);
13886 	uplink_seid = vsi->uplink_seid;
13887 	if (vsi->type != I40E_VSI_SRIOV) {
13888 		if (vsi->netdev_registered) {
13889 			vsi->netdev_registered = false;
13890 			if (vsi->netdev) {
13891 				/* results in a call to i40e_close() */
13892 				unregister_netdev(vsi->netdev);
13893 			}
13894 		} else {
13895 			i40e_vsi_close(vsi);
13896 		}
13897 		i40e_vsi_disable_irq(vsi);
13898 	}
13899 
13900 	spin_lock_bh(&vsi->mac_filter_hash_lock);
13901 
13902 	/* clear the sync flag on all filters */
13903 	if (vsi->netdev) {
13904 		__dev_uc_unsync(vsi->netdev, NULL);
13905 		__dev_mc_unsync(vsi->netdev, NULL);
13906 	}
13907 
13908 	/* make sure any remaining filters are marked for deletion */
13909 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13910 		__i40e_del_filter(vsi, f);
13911 
13912 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
13913 
13914 	i40e_sync_vsi_filters(vsi);
13915 
13916 	i40e_vsi_delete(vsi);
13917 	i40e_vsi_free_q_vectors(vsi);
13918 	if (vsi->netdev) {
13919 		free_netdev(vsi->netdev);
13920 		vsi->netdev = NULL;
13921 	}
13922 	i40e_vsi_clear_rings(vsi);
13923 	i40e_vsi_clear(vsi);
13924 
13925 	/* If this was the last thing on the VEB, except for the
13926 	 * controlling VSI, remove the VEB, which puts the controlling
13927 	 * VSI onto the next level down in the switch.
13928 	 *
13929 	 * Well, okay, there's one more exception here: don't remove
13930 	 * the orphan VEBs yet.  We'll wait for an explicit remove request
13931 	 * from up the network stack.
13932 	 */
13933 	for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13934 		if (pf->vsi[i] &&
13935 		    pf->vsi[i]->uplink_seid == uplink_seid &&
13936 		    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13937 			n++;      /* count the VSIs */
13938 		}
13939 	}
13940 	for (i = 0; i < I40E_MAX_VEB; i++) {
13941 		if (!pf->veb[i])
13942 			continue;
13943 		if (pf->veb[i]->uplink_seid == uplink_seid)
13944 			n++;     /* count the VEBs */
13945 		if (pf->veb[i]->seid == uplink_seid)
13946 			veb = pf->veb[i];
13947 	}
13948 	if (n == 0 && veb && veb->uplink_seid != 0)
13949 		i40e_veb_release(veb);
13950 
13951 	return 0;
13952 }
13953 
13954 /**
13955  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
13956  * @vsi: ptr to the VSI
13957  *
13958  * This should only be called after i40e_vsi_mem_alloc() which allocates the
13959  * corresponding SW VSI structure and initializes num_queue_pairs for the
13960  * newly allocated VSI.
13961  *
13962  * Returns 0 on success or negative on failure
13963  **/
13964 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13965 {
13966 	int ret = -ENOENT;
13967 	struct i40e_pf *pf = vsi->back;
13968 
13969 	if (vsi->q_vectors[0]) {
13970 		dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13971 			 vsi->seid);
13972 		return -EEXIST;
13973 	}
13974 
13975 	if (vsi->base_vector) {
13976 		dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
13977 			 vsi->seid, vsi->base_vector);
13978 		return -EEXIST;
13979 	}
13980 
13981 	ret = i40e_vsi_alloc_q_vectors(vsi);
13982 	if (ret) {
13983 		dev_info(&pf->pdev->dev,
13984 			 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
13985 			 vsi->num_q_vectors, vsi->seid, ret);
13986 		vsi->num_q_vectors = 0;
13987 		goto vector_setup_out;
13988 	}
13989 
13990 	/* In Legacy mode, we do not have to get any other vector since we
13991 	 * piggyback on the misc/ICR0 for queue interrupts.
13992 	*/
13993 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
13994 		return ret;
13995 	if (vsi->num_q_vectors)
13996 		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
13997 						 vsi->num_q_vectors, vsi->idx);
13998 	if (vsi->base_vector < 0) {
13999 		dev_info(&pf->pdev->dev,
14000 			 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
14001 			 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
14002 		i40e_vsi_free_q_vectors(vsi);
14003 		ret = -ENOENT;
14004 		goto vector_setup_out;
14005 	}
14006 
14007 vector_setup_out:
14008 	return ret;
14009 }
14010 
14011 /**
14012  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14013  * @vsi: pointer to the vsi.
14014  *
14015  * This re-allocates a vsi's queue resources.
14016  *
14017  * Returns pointer to the successfully allocated and configured VSI sw struct
14018  * on success, otherwise returns NULL on failure.
14019  **/
14020 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
14021 {
14022 	u16 alloc_queue_pairs;
14023 	struct i40e_pf *pf;
14024 	u8 enabled_tc;
14025 	int ret;
14026 
14027 	if (!vsi)
14028 		return NULL;
14029 
14030 	pf = vsi->back;
14031 
14032 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
14033 	i40e_vsi_clear_rings(vsi);
14034 
14035 	i40e_vsi_free_arrays(vsi, false);
14036 	i40e_set_num_rings_in_vsi(vsi);
14037 	ret = i40e_vsi_alloc_arrays(vsi, false);
14038 	if (ret)
14039 		goto err_vsi;
14040 
14041 	alloc_queue_pairs = vsi->alloc_queue_pairs *
14042 			    (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14043 
14044 	ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14045 	if (ret < 0) {
14046 		dev_info(&pf->pdev->dev,
14047 			 "failed to get tracking for %d queues for VSI %d err %d\n",
14048 			 alloc_queue_pairs, vsi->seid, ret);
14049 		goto err_vsi;
14050 	}
14051 	vsi->base_queue = ret;
14052 
14053 	/* Update the FW view of the VSI. Force a reset of TC and queue
14054 	 * layout configurations.
14055 	 */
14056 	enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14057 	pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14058 	pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14059 	i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14060 	if (vsi->type == I40E_VSI_MAIN)
14061 		i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
14062 
14063 	/* assign it some queues */
14064 	ret = i40e_alloc_rings(vsi);
14065 	if (ret)
14066 		goto err_rings;
14067 
14068 	/* map all of the rings to the q_vectors */
14069 	i40e_vsi_map_rings_to_vectors(vsi);
14070 	return vsi;
14071 
14072 err_rings:
14073 	i40e_vsi_free_q_vectors(vsi);
14074 	if (vsi->netdev_registered) {
14075 		vsi->netdev_registered = false;
14076 		unregister_netdev(vsi->netdev);
14077 		free_netdev(vsi->netdev);
14078 		vsi->netdev = NULL;
14079 	}
14080 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14081 err_vsi:
14082 	i40e_vsi_clear(vsi);
14083 	return NULL;
14084 }
14085 
14086 /**
14087  * i40e_vsi_setup - Set up a VSI by a given type
14088  * @pf: board private structure
14089  * @type: VSI type
14090  * @uplink_seid: the switch element to link to
14091  * @param1: usage depends upon VSI type. For VF types, indicates VF id
14092  *
14093  * This allocates the sw VSI structure and its queue resources, then add a VSI
14094  * to the identified VEB.
14095  *
14096  * Returns pointer to the successfully allocated and configure VSI sw struct on
14097  * success, otherwise returns NULL on failure.
14098  **/
14099 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
14100 				u16 uplink_seid, u32 param1)
14101 {
14102 	struct i40e_vsi *vsi = NULL;
14103 	struct i40e_veb *veb = NULL;
14104 	u16 alloc_queue_pairs;
14105 	int ret, i;
14106 	int v_idx;
14107 
14108 	/* The requested uplink_seid must be either
14109 	 *     - the PF's port seid
14110 	 *              no VEB is needed because this is the PF
14111 	 *              or this is a Flow Director special case VSI
14112 	 *     - seid of an existing VEB
14113 	 *     - seid of a VSI that owns an existing VEB
14114 	 *     - seid of a VSI that doesn't own a VEB
14115 	 *              a new VEB is created and the VSI becomes the owner
14116 	 *     - seid of the PF VSI, which is what creates the first VEB
14117 	 *              this is a special case of the previous
14118 	 *
14119 	 * Find which uplink_seid we were given and create a new VEB if needed
14120 	 */
14121 	for (i = 0; i < I40E_MAX_VEB; i++) {
14122 		if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
14123 			veb = pf->veb[i];
14124 			break;
14125 		}
14126 	}
14127 
14128 	if (!veb && uplink_seid != pf->mac_seid) {
14129 
14130 		for (i = 0; i < pf->num_alloc_vsi; i++) {
14131 			if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14132 				vsi = pf->vsi[i];
14133 				break;
14134 			}
14135 		}
14136 		if (!vsi) {
14137 			dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14138 				 uplink_seid);
14139 			return NULL;
14140 		}
14141 
14142 		if (vsi->uplink_seid == pf->mac_seid)
14143 			veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14144 					     vsi->tc_config.enabled_tc);
14145 		else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14146 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14147 					     vsi->tc_config.enabled_tc);
14148 		if (veb) {
14149 			if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14150 				dev_info(&vsi->back->pdev->dev,
14151 					 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14152 				return NULL;
14153 			}
14154 			/* We come up by default in VEPA mode if SRIOV is not
14155 			 * already enabled, in which case we can't force VEPA
14156 			 * mode.
14157 			 */
14158 			if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
14159 				veb->bridge_mode = BRIDGE_MODE_VEPA;
14160 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
14161 			}
14162 			i40e_config_bridge_mode(veb);
14163 		}
14164 		for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14165 			if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14166 				veb = pf->veb[i];
14167 		}
14168 		if (!veb) {
14169 			dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14170 			return NULL;
14171 		}
14172 
14173 		vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14174 		uplink_seid = veb->seid;
14175 	}
14176 
14177 	/* get vsi sw struct */
14178 	v_idx = i40e_vsi_mem_alloc(pf, type);
14179 	if (v_idx < 0)
14180 		goto err_alloc;
14181 	vsi = pf->vsi[v_idx];
14182 	if (!vsi)
14183 		goto err_alloc;
14184 	vsi->type = type;
14185 	vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14186 
14187 	if (type == I40E_VSI_MAIN)
14188 		pf->lan_vsi = v_idx;
14189 	else if (type == I40E_VSI_SRIOV)
14190 		vsi->vf_id = param1;
14191 	/* assign it some queues */
14192 	alloc_queue_pairs = vsi->alloc_queue_pairs *
14193 			    (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14194 
14195 	ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14196 	if (ret < 0) {
14197 		dev_info(&pf->pdev->dev,
14198 			 "failed to get tracking for %d queues for VSI %d err=%d\n",
14199 			 alloc_queue_pairs, vsi->seid, ret);
14200 		goto err_vsi;
14201 	}
14202 	vsi->base_queue = ret;
14203 
14204 	/* get a VSI from the hardware */
14205 	vsi->uplink_seid = uplink_seid;
14206 	ret = i40e_add_vsi(vsi);
14207 	if (ret)
14208 		goto err_vsi;
14209 
14210 	switch (vsi->type) {
14211 	/* setup the netdev if needed */
14212 	case I40E_VSI_MAIN:
14213 	case I40E_VSI_VMDQ2:
14214 		ret = i40e_config_netdev(vsi);
14215 		if (ret)
14216 			goto err_netdev;
14217 		ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
14218 		if (ret)
14219 			goto err_netdev;
14220 		ret = register_netdev(vsi->netdev);
14221 		if (ret)
14222 			goto err_netdev;
14223 		vsi->netdev_registered = true;
14224 		netif_carrier_off(vsi->netdev);
14225 #ifdef CONFIG_I40E_DCB
14226 		/* Setup DCB netlink interface */
14227 		i40e_dcbnl_setup(vsi);
14228 #endif /* CONFIG_I40E_DCB */
14229 		fallthrough;
14230 	case I40E_VSI_FDIR:
14231 		/* set up vectors and rings if needed */
14232 		ret = i40e_vsi_setup_vectors(vsi);
14233 		if (ret)
14234 			goto err_msix;
14235 
14236 		ret = i40e_alloc_rings(vsi);
14237 		if (ret)
14238 			goto err_rings;
14239 
14240 		/* map all of the rings to the q_vectors */
14241 		i40e_vsi_map_rings_to_vectors(vsi);
14242 
14243 		i40e_vsi_reset_stats(vsi);
14244 		break;
14245 	default:
14246 		/* no netdev or rings for the other VSI types */
14247 		break;
14248 	}
14249 
14250 	if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
14251 	    (vsi->type == I40E_VSI_VMDQ2)) {
14252 		ret = i40e_vsi_config_rss(vsi);
14253 	}
14254 	return vsi;
14255 
14256 err_rings:
14257 	i40e_vsi_free_q_vectors(vsi);
14258 err_msix:
14259 	if (vsi->netdev_registered) {
14260 		vsi->netdev_registered = false;
14261 		unregister_netdev(vsi->netdev);
14262 		free_netdev(vsi->netdev);
14263 		vsi->netdev = NULL;
14264 	}
14265 err_netdev:
14266 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14267 err_vsi:
14268 	i40e_vsi_clear(vsi);
14269 err_alloc:
14270 	return NULL;
14271 }
14272 
14273 /**
14274  * i40e_veb_get_bw_info - Query VEB BW information
14275  * @veb: the veb to query
14276  *
14277  * Query the Tx scheduler BW configuration data for given VEB
14278  **/
14279 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14280 {
14281 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14282 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14283 	struct i40e_pf *pf = veb->pf;
14284 	struct i40e_hw *hw = &pf->hw;
14285 	u32 tc_bw_max;
14286 	int ret = 0;
14287 	int i;
14288 
14289 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14290 						  &bw_data, NULL);
14291 	if (ret) {
14292 		dev_info(&pf->pdev->dev,
14293 			 "query veb bw config failed, err %s aq_err %s\n",
14294 			 i40e_stat_str(&pf->hw, ret),
14295 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14296 		goto out;
14297 	}
14298 
14299 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14300 						   &ets_data, NULL);
14301 	if (ret) {
14302 		dev_info(&pf->pdev->dev,
14303 			 "query veb bw ets config failed, err %s aq_err %s\n",
14304 			 i40e_stat_str(&pf->hw, ret),
14305 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14306 		goto out;
14307 	}
14308 
14309 	veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14310 	veb->bw_max_quanta = ets_data.tc_bw_max;
14311 	veb->is_abs_credits = bw_data.absolute_credits_enable;
14312 	veb->enabled_tc = ets_data.tc_valid_bits;
14313 	tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14314 		    (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14315 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14316 		veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14317 		veb->bw_tc_limit_credits[i] =
14318 					le16_to_cpu(bw_data.tc_bw_limits[i]);
14319 		veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14320 	}
14321 
14322 out:
14323 	return ret;
14324 }
14325 
14326 /**
14327  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14328  * @pf: board private structure
14329  *
14330  * On error: returns error code (negative)
14331  * On success: returns vsi index in PF (positive)
14332  **/
14333 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14334 {
14335 	int ret = -ENOENT;
14336 	struct i40e_veb *veb;
14337 	int i;
14338 
14339 	/* Need to protect the allocation of switch elements at the PF level */
14340 	mutex_lock(&pf->switch_mutex);
14341 
14342 	/* VEB list may be fragmented if VEB creation/destruction has
14343 	 * been happening.  We can afford to do a quick scan to look
14344 	 * for any free slots in the list.
14345 	 *
14346 	 * find next empty veb slot, looping back around if necessary
14347 	 */
14348 	i = 0;
14349 	while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14350 		i++;
14351 	if (i >= I40E_MAX_VEB) {
14352 		ret = -ENOMEM;
14353 		goto err_alloc_veb;  /* out of VEB slots! */
14354 	}
14355 
14356 	veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14357 	if (!veb) {
14358 		ret = -ENOMEM;
14359 		goto err_alloc_veb;
14360 	}
14361 	veb->pf = pf;
14362 	veb->idx = i;
14363 	veb->enabled_tc = 1;
14364 
14365 	pf->veb[i] = veb;
14366 	ret = i;
14367 err_alloc_veb:
14368 	mutex_unlock(&pf->switch_mutex);
14369 	return ret;
14370 }
14371 
14372 /**
14373  * i40e_switch_branch_release - Delete a branch of the switch tree
14374  * @branch: where to start deleting
14375  *
14376  * This uses recursion to find the tips of the branch to be
14377  * removed, deleting until we get back to and can delete this VEB.
14378  **/
14379 static void i40e_switch_branch_release(struct i40e_veb *branch)
14380 {
14381 	struct i40e_pf *pf = branch->pf;
14382 	u16 branch_seid = branch->seid;
14383 	u16 veb_idx = branch->idx;
14384 	int i;
14385 
14386 	/* release any VEBs on this VEB - RECURSION */
14387 	for (i = 0; i < I40E_MAX_VEB; i++) {
14388 		if (!pf->veb[i])
14389 			continue;
14390 		if (pf->veb[i]->uplink_seid == branch->seid)
14391 			i40e_switch_branch_release(pf->veb[i]);
14392 	}
14393 
14394 	/* Release the VSIs on this VEB, but not the owner VSI.
14395 	 *
14396 	 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
14397 	 *       the VEB itself, so don't use (*branch) after this loop.
14398 	 */
14399 	for (i = 0; i < pf->num_alloc_vsi; i++) {
14400 		if (!pf->vsi[i])
14401 			continue;
14402 		if (pf->vsi[i]->uplink_seid == branch_seid &&
14403 		   (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14404 			i40e_vsi_release(pf->vsi[i]);
14405 		}
14406 	}
14407 
14408 	/* There's one corner case where the VEB might not have been
14409 	 * removed, so double check it here and remove it if needed.
14410 	 * This case happens if the veb was created from the debugfs
14411 	 * commands and no VSIs were added to it.
14412 	 */
14413 	if (pf->veb[veb_idx])
14414 		i40e_veb_release(pf->veb[veb_idx]);
14415 }
14416 
14417 /**
14418  * i40e_veb_clear - remove veb struct
14419  * @veb: the veb to remove
14420  **/
14421 static void i40e_veb_clear(struct i40e_veb *veb)
14422 {
14423 	if (!veb)
14424 		return;
14425 
14426 	if (veb->pf) {
14427 		struct i40e_pf *pf = veb->pf;
14428 
14429 		mutex_lock(&pf->switch_mutex);
14430 		if (pf->veb[veb->idx] == veb)
14431 			pf->veb[veb->idx] = NULL;
14432 		mutex_unlock(&pf->switch_mutex);
14433 	}
14434 
14435 	kfree(veb);
14436 }
14437 
14438 /**
14439  * i40e_veb_release - Delete a VEB and free its resources
14440  * @veb: the VEB being removed
14441  **/
14442 void i40e_veb_release(struct i40e_veb *veb)
14443 {
14444 	struct i40e_vsi *vsi = NULL;
14445 	struct i40e_pf *pf;
14446 	int i, n = 0;
14447 
14448 	pf = veb->pf;
14449 
14450 	/* find the remaining VSI and check for extras */
14451 	for (i = 0; i < pf->num_alloc_vsi; i++) {
14452 		if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14453 			n++;
14454 			vsi = pf->vsi[i];
14455 		}
14456 	}
14457 	if (n != 1) {
14458 		dev_info(&pf->pdev->dev,
14459 			 "can't remove VEB %d with %d VSIs left\n",
14460 			 veb->seid, n);
14461 		return;
14462 	}
14463 
14464 	/* move the remaining VSI to uplink veb */
14465 	vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14466 	if (veb->uplink_seid) {
14467 		vsi->uplink_seid = veb->uplink_seid;
14468 		if (veb->uplink_seid == pf->mac_seid)
14469 			vsi->veb_idx = I40E_NO_VEB;
14470 		else
14471 			vsi->veb_idx = veb->veb_idx;
14472 	} else {
14473 		/* floating VEB */
14474 		vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14475 		vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14476 	}
14477 
14478 	i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14479 	i40e_veb_clear(veb);
14480 }
14481 
14482 /**
14483  * i40e_add_veb - create the VEB in the switch
14484  * @veb: the VEB to be instantiated
14485  * @vsi: the controlling VSI
14486  **/
14487 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14488 {
14489 	struct i40e_pf *pf = veb->pf;
14490 	bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14491 	int ret;
14492 
14493 	ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14494 			      veb->enabled_tc, false,
14495 			      &veb->seid, enable_stats, NULL);
14496 
14497 	/* get a VEB from the hardware */
14498 	if (ret) {
14499 		dev_info(&pf->pdev->dev,
14500 			 "couldn't add VEB, err %s aq_err %s\n",
14501 			 i40e_stat_str(&pf->hw, ret),
14502 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14503 		return -EPERM;
14504 	}
14505 
14506 	/* get statistics counter */
14507 	ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14508 					 &veb->stats_idx, NULL, NULL, NULL);
14509 	if (ret) {
14510 		dev_info(&pf->pdev->dev,
14511 			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
14512 			 i40e_stat_str(&pf->hw, ret),
14513 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14514 		return -EPERM;
14515 	}
14516 	ret = i40e_veb_get_bw_info(veb);
14517 	if (ret) {
14518 		dev_info(&pf->pdev->dev,
14519 			 "couldn't get VEB bw info, err %s aq_err %s\n",
14520 			 i40e_stat_str(&pf->hw, ret),
14521 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14522 		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14523 		return -ENOENT;
14524 	}
14525 
14526 	vsi->uplink_seid = veb->seid;
14527 	vsi->veb_idx = veb->idx;
14528 	vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14529 
14530 	return 0;
14531 }
14532 
14533 /**
14534  * i40e_veb_setup - Set up a VEB
14535  * @pf: board private structure
14536  * @flags: VEB setup flags
14537  * @uplink_seid: the switch element to link to
14538  * @vsi_seid: the initial VSI seid
14539  * @enabled_tc: Enabled TC bit-map
14540  *
14541  * This allocates the sw VEB structure and links it into the switch
14542  * It is possible and legal for this to be a duplicate of an already
14543  * existing VEB.  It is also possible for both uplink and vsi seids
14544  * to be zero, in order to create a floating VEB.
14545  *
14546  * Returns pointer to the successfully allocated VEB sw struct on
14547  * success, otherwise returns NULL on failure.
14548  **/
14549 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14550 				u16 uplink_seid, u16 vsi_seid,
14551 				u8 enabled_tc)
14552 {
14553 	struct i40e_veb *veb, *uplink_veb = NULL;
14554 	int vsi_idx, veb_idx;
14555 	int ret;
14556 
14557 	/* if one seid is 0, the other must be 0 to create a floating relay */
14558 	if ((uplink_seid == 0 || vsi_seid == 0) &&
14559 	    (uplink_seid + vsi_seid != 0)) {
14560 		dev_info(&pf->pdev->dev,
14561 			 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14562 			 uplink_seid, vsi_seid);
14563 		return NULL;
14564 	}
14565 
14566 	/* make sure there is such a vsi and uplink */
14567 	for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14568 		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14569 			break;
14570 	if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14571 		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14572 			 vsi_seid);
14573 		return NULL;
14574 	}
14575 
14576 	if (uplink_seid && uplink_seid != pf->mac_seid) {
14577 		for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14578 			if (pf->veb[veb_idx] &&
14579 			    pf->veb[veb_idx]->seid == uplink_seid) {
14580 				uplink_veb = pf->veb[veb_idx];
14581 				break;
14582 			}
14583 		}
14584 		if (!uplink_veb) {
14585 			dev_info(&pf->pdev->dev,
14586 				 "uplink seid %d not found\n", uplink_seid);
14587 			return NULL;
14588 		}
14589 	}
14590 
14591 	/* get veb sw struct */
14592 	veb_idx = i40e_veb_mem_alloc(pf);
14593 	if (veb_idx < 0)
14594 		goto err_alloc;
14595 	veb = pf->veb[veb_idx];
14596 	veb->flags = flags;
14597 	veb->uplink_seid = uplink_seid;
14598 	veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14599 	veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14600 
14601 	/* create the VEB in the switch */
14602 	ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14603 	if (ret)
14604 		goto err_veb;
14605 	if (vsi_idx == pf->lan_vsi)
14606 		pf->lan_veb = veb->idx;
14607 
14608 	return veb;
14609 
14610 err_veb:
14611 	i40e_veb_clear(veb);
14612 err_alloc:
14613 	return NULL;
14614 }
14615 
14616 /**
14617  * i40e_setup_pf_switch_element - set PF vars based on switch type
14618  * @pf: board private structure
14619  * @ele: element we are building info from
14620  * @num_reported: total number of elements
14621  * @printconfig: should we print the contents
14622  *
14623  * helper function to assist in extracting a few useful SEID values.
14624  **/
14625 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14626 				struct i40e_aqc_switch_config_element_resp *ele,
14627 				u16 num_reported, bool printconfig)
14628 {
14629 	u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14630 	u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14631 	u8 element_type = ele->element_type;
14632 	u16 seid = le16_to_cpu(ele->seid);
14633 
14634 	if (printconfig)
14635 		dev_info(&pf->pdev->dev,
14636 			 "type=%d seid=%d uplink=%d downlink=%d\n",
14637 			 element_type, seid, uplink_seid, downlink_seid);
14638 
14639 	switch (element_type) {
14640 	case I40E_SWITCH_ELEMENT_TYPE_MAC:
14641 		pf->mac_seid = seid;
14642 		break;
14643 	case I40E_SWITCH_ELEMENT_TYPE_VEB:
14644 		/* Main VEB? */
14645 		if (uplink_seid != pf->mac_seid)
14646 			break;
14647 		if (pf->lan_veb >= I40E_MAX_VEB) {
14648 			int v;
14649 
14650 			/* find existing or else empty VEB */
14651 			for (v = 0; v < I40E_MAX_VEB; v++) {
14652 				if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14653 					pf->lan_veb = v;
14654 					break;
14655 				}
14656 			}
14657 			if (pf->lan_veb >= I40E_MAX_VEB) {
14658 				v = i40e_veb_mem_alloc(pf);
14659 				if (v < 0)
14660 					break;
14661 				pf->lan_veb = v;
14662 			}
14663 		}
14664 		if (pf->lan_veb >= I40E_MAX_VEB)
14665 			break;
14666 
14667 		pf->veb[pf->lan_veb]->seid = seid;
14668 		pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14669 		pf->veb[pf->lan_veb]->pf = pf;
14670 		pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14671 		break;
14672 	case I40E_SWITCH_ELEMENT_TYPE_VSI:
14673 		if (num_reported != 1)
14674 			break;
14675 		/* This is immediately after a reset so we can assume this is
14676 		 * the PF's VSI
14677 		 */
14678 		pf->mac_seid = uplink_seid;
14679 		pf->pf_seid = downlink_seid;
14680 		pf->main_vsi_seid = seid;
14681 		if (printconfig)
14682 			dev_info(&pf->pdev->dev,
14683 				 "pf_seid=%d main_vsi_seid=%d\n",
14684 				 pf->pf_seid, pf->main_vsi_seid);
14685 		break;
14686 	case I40E_SWITCH_ELEMENT_TYPE_PF:
14687 	case I40E_SWITCH_ELEMENT_TYPE_VF:
14688 	case I40E_SWITCH_ELEMENT_TYPE_EMP:
14689 	case I40E_SWITCH_ELEMENT_TYPE_BMC:
14690 	case I40E_SWITCH_ELEMENT_TYPE_PE:
14691 	case I40E_SWITCH_ELEMENT_TYPE_PA:
14692 		/* ignore these for now */
14693 		break;
14694 	default:
14695 		dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14696 			 element_type, seid);
14697 		break;
14698 	}
14699 }
14700 
14701 /**
14702  * i40e_fetch_switch_configuration - Get switch config from firmware
14703  * @pf: board private structure
14704  * @printconfig: should we print the contents
14705  *
14706  * Get the current switch configuration from the device and
14707  * extract a few useful SEID values.
14708  **/
14709 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14710 {
14711 	struct i40e_aqc_get_switch_config_resp *sw_config;
14712 	u16 next_seid = 0;
14713 	int ret = 0;
14714 	u8 *aq_buf;
14715 	int i;
14716 
14717 	aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14718 	if (!aq_buf)
14719 		return -ENOMEM;
14720 
14721 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14722 	do {
14723 		u16 num_reported, num_total;
14724 
14725 		ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14726 						I40E_AQ_LARGE_BUF,
14727 						&next_seid, NULL);
14728 		if (ret) {
14729 			dev_info(&pf->pdev->dev,
14730 				 "get switch config failed err %s aq_err %s\n",
14731 				 i40e_stat_str(&pf->hw, ret),
14732 				 i40e_aq_str(&pf->hw,
14733 					     pf->hw.aq.asq_last_status));
14734 			kfree(aq_buf);
14735 			return -ENOENT;
14736 		}
14737 
14738 		num_reported = le16_to_cpu(sw_config->header.num_reported);
14739 		num_total = le16_to_cpu(sw_config->header.num_total);
14740 
14741 		if (printconfig)
14742 			dev_info(&pf->pdev->dev,
14743 				 "header: %d reported %d total\n",
14744 				 num_reported, num_total);
14745 
14746 		for (i = 0; i < num_reported; i++) {
14747 			struct i40e_aqc_switch_config_element_resp *ele =
14748 				&sw_config->element[i];
14749 
14750 			i40e_setup_pf_switch_element(pf, ele, num_reported,
14751 						     printconfig);
14752 		}
14753 	} while (next_seid != 0);
14754 
14755 	kfree(aq_buf);
14756 	return ret;
14757 }
14758 
14759 /**
14760  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14761  * @pf: board private structure
14762  * @reinit: if the Main VSI needs to re-initialized.
14763  * @lock_acquired: indicates whether or not the lock has been acquired
14764  *
14765  * Returns 0 on success, negative value on failure
14766  **/
14767 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
14768 {
14769 	u16 flags = 0;
14770 	int ret;
14771 
14772 	/* find out what's out there already */
14773 	ret = i40e_fetch_switch_configuration(pf, false);
14774 	if (ret) {
14775 		dev_info(&pf->pdev->dev,
14776 			 "couldn't fetch switch config, err %s aq_err %s\n",
14777 			 i40e_stat_str(&pf->hw, ret),
14778 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14779 		return ret;
14780 	}
14781 	i40e_pf_reset_stats(pf);
14782 
14783 	/* set the switch config bit for the whole device to
14784 	 * support limited promisc or true promisc
14785 	 * when user requests promisc. The default is limited
14786 	 * promisc.
14787 	*/
14788 
14789 	if ((pf->hw.pf_id == 0) &&
14790 	    !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14791 		flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14792 		pf->last_sw_conf_flags = flags;
14793 	}
14794 
14795 	if (pf->hw.pf_id == 0) {
14796 		u16 valid_flags;
14797 
14798 		valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14799 		ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14800 						NULL);
14801 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14802 			dev_info(&pf->pdev->dev,
14803 				 "couldn't set switch config bits, err %s aq_err %s\n",
14804 				 i40e_stat_str(&pf->hw, ret),
14805 				 i40e_aq_str(&pf->hw,
14806 					     pf->hw.aq.asq_last_status));
14807 			/* not a fatal problem, just keep going */
14808 		}
14809 		pf->last_sw_conf_valid_flags = valid_flags;
14810 	}
14811 
14812 	/* first time setup */
14813 	if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14814 		struct i40e_vsi *vsi = NULL;
14815 		u16 uplink_seid;
14816 
14817 		/* Set up the PF VSI associated with the PF's main VSI
14818 		 * that is already in the HW switch
14819 		 */
14820 		if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14821 			uplink_seid = pf->veb[pf->lan_veb]->seid;
14822 		else
14823 			uplink_seid = pf->mac_seid;
14824 		if (pf->lan_vsi == I40E_NO_VSI)
14825 			vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14826 		else if (reinit)
14827 			vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14828 		if (!vsi) {
14829 			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14830 			i40e_cloud_filter_exit(pf);
14831 			i40e_fdir_teardown(pf);
14832 			return -EAGAIN;
14833 		}
14834 	} else {
14835 		/* force a reset of TC and queue layout configurations */
14836 		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14837 
14838 		pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14839 		pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14840 		i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14841 	}
14842 	i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14843 
14844 	i40e_fdir_sb_setup(pf);
14845 
14846 	/* Setup static PF queue filter control settings */
14847 	ret = i40e_setup_pf_filter_control(pf);
14848 	if (ret) {
14849 		dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14850 			 ret);
14851 		/* Failure here should not stop continuing other steps */
14852 	}
14853 
14854 	/* enable RSS in the HW, even for only one queue, as the stack can use
14855 	 * the hash
14856 	 */
14857 	if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14858 		i40e_pf_config_rss(pf);
14859 
14860 	/* fill in link information and enable LSE reporting */
14861 	i40e_link_event(pf);
14862 
14863 	/* Initialize user-specific link properties */
14864 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14865 				  I40E_AQ_AN_COMPLETED) ? true : false);
14866 
14867 	i40e_ptp_init(pf);
14868 
14869 	if (!lock_acquired)
14870 		rtnl_lock();
14871 
14872 	/* repopulate tunnel port filters */
14873 	udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
14874 
14875 	if (!lock_acquired)
14876 		rtnl_unlock();
14877 
14878 	return ret;
14879 }
14880 
14881 /**
14882  * i40e_determine_queue_usage - Work out queue distribution
14883  * @pf: board private structure
14884  **/
14885 static void i40e_determine_queue_usage(struct i40e_pf *pf)
14886 {
14887 	int queues_left;
14888 	int q_max;
14889 
14890 	pf->num_lan_qps = 0;
14891 
14892 	/* Find the max queues to be put into basic use.  We'll always be
14893 	 * using TC0, whether or not DCB is running, and TC0 will get the
14894 	 * big RSS set.
14895 	 */
14896 	queues_left = pf->hw.func_caps.num_tx_qp;
14897 
14898 	if ((queues_left == 1) ||
14899 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14900 		/* one qp for PF, no queues for anything else */
14901 		queues_left = 0;
14902 		pf->alloc_rss_size = pf->num_lan_qps = 1;
14903 
14904 		/* make sure all the fancies are disabled */
14905 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
14906 			       I40E_FLAG_IWARP_ENABLED	|
14907 			       I40E_FLAG_FD_SB_ENABLED	|
14908 			       I40E_FLAG_FD_ATR_ENABLED	|
14909 			       I40E_FLAG_DCB_CAPABLE	|
14910 			       I40E_FLAG_DCB_ENABLED	|
14911 			       I40E_FLAG_SRIOV_ENABLED	|
14912 			       I40E_FLAG_VMDQ_ENABLED);
14913 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14914 	} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14915 				  I40E_FLAG_FD_SB_ENABLED |
14916 				  I40E_FLAG_FD_ATR_ENABLED |
14917 				  I40E_FLAG_DCB_CAPABLE))) {
14918 		/* one qp for PF */
14919 		pf->alloc_rss_size = pf->num_lan_qps = 1;
14920 		queues_left -= pf->num_lan_qps;
14921 
14922 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
14923 			       I40E_FLAG_IWARP_ENABLED	|
14924 			       I40E_FLAG_FD_SB_ENABLED	|
14925 			       I40E_FLAG_FD_ATR_ENABLED	|
14926 			       I40E_FLAG_DCB_ENABLED	|
14927 			       I40E_FLAG_VMDQ_ENABLED);
14928 		pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14929 	} else {
14930 		/* Not enough queues for all TCs */
14931 		if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14932 		    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14933 			pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14934 					I40E_FLAG_DCB_ENABLED);
14935 			dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14936 		}
14937 
14938 		/* limit lan qps to the smaller of qps, cpus or msix */
14939 		q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14940 		q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14941 		q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14942 		pf->num_lan_qps = q_max;
14943 
14944 		queues_left -= pf->num_lan_qps;
14945 	}
14946 
14947 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14948 		if (queues_left > 1) {
14949 			queues_left -= 1; /* save 1 queue for FD */
14950 		} else {
14951 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14952 			pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14953 			dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14954 		}
14955 	}
14956 
14957 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14958 	    pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14959 		pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14960 					(queues_left / pf->num_vf_qps));
14961 		queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14962 	}
14963 
14964 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14965 	    pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14966 		pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14967 					  (queues_left / pf->num_vmdq_qps));
14968 		queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14969 	}
14970 
14971 	pf->queues_left = queues_left;
14972 	dev_dbg(&pf->pdev->dev,
14973 		"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
14974 		pf->hw.func_caps.num_tx_qp,
14975 		!!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
14976 		pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
14977 		pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
14978 		queues_left);
14979 }
14980 
14981 /**
14982  * i40e_setup_pf_filter_control - Setup PF static filter control
14983  * @pf: PF to be setup
14984  *
14985  * i40e_setup_pf_filter_control sets up a PF's initial filter control
14986  * settings. If PE/FCoE are enabled then it will also set the per PF
14987  * based filter sizes required for them. It also enables Flow director,
14988  * ethertype and macvlan type filter settings for the pf.
14989  *
14990  * Returns 0 on success, negative on failure
14991  **/
14992 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
14993 {
14994 	struct i40e_filter_control_settings *settings = &pf->filter_settings;
14995 
14996 	settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
14997 
14998 	/* Flow Director is enabled */
14999 	if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
15000 		settings->enable_fdir = true;
15001 
15002 	/* Ethtype and MACVLAN filters enabled for PF */
15003 	settings->enable_ethtype = true;
15004 	settings->enable_macvlan = true;
15005 
15006 	if (i40e_set_filter_control(&pf->hw, settings))
15007 		return -ENOENT;
15008 
15009 	return 0;
15010 }
15011 
15012 #define INFO_STRING_LEN 255
15013 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15014 static void i40e_print_features(struct i40e_pf *pf)
15015 {
15016 	struct i40e_hw *hw = &pf->hw;
15017 	char *buf;
15018 	int i;
15019 
15020 	buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
15021 	if (!buf)
15022 		return;
15023 
15024 	i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
15025 #ifdef CONFIG_PCI_IOV
15026 	i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
15027 #endif
15028 	i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
15029 		      pf->hw.func_caps.num_vsis,
15030 		      pf->vsi[pf->lan_vsi]->num_queue_pairs);
15031 	if (pf->flags & I40E_FLAG_RSS_ENABLED)
15032 		i += scnprintf(&buf[i], REMAIN(i), " RSS");
15033 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
15034 		i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
15035 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15036 		i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
15037 		i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
15038 	}
15039 	if (pf->flags & I40E_FLAG_DCB_CAPABLE)
15040 		i += scnprintf(&buf[i], REMAIN(i), " DCB");
15041 	i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
15042 	i += scnprintf(&buf[i], REMAIN(i), " Geneve");
15043 	if (pf->flags & I40E_FLAG_PTP)
15044 		i += scnprintf(&buf[i], REMAIN(i), " PTP");
15045 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
15046 		i += scnprintf(&buf[i], REMAIN(i), " VEB");
15047 	else
15048 		i += scnprintf(&buf[i], REMAIN(i), " VEPA");
15049 
15050 	dev_info(&pf->pdev->dev, "%s\n", buf);
15051 	kfree(buf);
15052 	WARN_ON(i > INFO_STRING_LEN);
15053 }
15054 
15055 /**
15056  * i40e_get_platform_mac_addr - get platform-specific MAC address
15057  * @pdev: PCI device information struct
15058  * @pf: board private structure
15059  *
15060  * Look up the MAC address for the device. First we'll try
15061  * eth_platform_get_mac_address, which will check Open Firmware, or arch
15062  * specific fallback. Otherwise, we'll default to the stored value in
15063  * firmware.
15064  **/
15065 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
15066 {
15067 	if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
15068 		i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
15069 }
15070 
15071 /**
15072  * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15073  * @fec_cfg: FEC option to set in flags
15074  * @flags: ptr to flags in which we set FEC option
15075  **/
15076 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
15077 {
15078 	if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
15079 		*flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
15080 	if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
15081 	    (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
15082 		*flags |= I40E_FLAG_RS_FEC;
15083 		*flags &= ~I40E_FLAG_BASE_R_FEC;
15084 	}
15085 	if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
15086 	    (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
15087 		*flags |= I40E_FLAG_BASE_R_FEC;
15088 		*flags &= ~I40E_FLAG_RS_FEC;
15089 	}
15090 	if (fec_cfg == 0)
15091 		*flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
15092 }
15093 
15094 /**
15095  * i40e_check_recovery_mode - check if we are running transition firmware
15096  * @pf: board private structure
15097  *
15098  * Check registers indicating the firmware runs in recovery mode. Sets the
15099  * appropriate driver state.
15100  *
15101  * Returns true if the recovery mode was detected, false otherwise
15102  **/
15103 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
15104 {
15105 	u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
15106 
15107 	if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15108 		dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
15109 		dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
15110 		set_bit(__I40E_RECOVERY_MODE, pf->state);
15111 
15112 		return true;
15113 	}
15114 	if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15115 		dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
15116 
15117 	return false;
15118 }
15119 
15120 /**
15121  * i40e_pf_loop_reset - perform reset in a loop.
15122  * @pf: board private structure
15123  *
15124  * This function is useful when a NIC is about to enter recovery mode.
15125  * When a NIC's internal data structures are corrupted the NIC's
15126  * firmware is going to enter recovery mode.
15127  * Right after a POR it takes about 7 minutes for firmware to enter
15128  * recovery mode. Until that time a NIC is in some kind of intermediate
15129  * state. After that time period the NIC almost surely enters
15130  * recovery mode. The only way for a driver to detect intermediate
15131  * state is to issue a series of pf-resets and check a return value.
15132  * If a PF reset returns success then the firmware could be in recovery
15133  * mode so the caller of this code needs to check for recovery mode
15134  * if this function returns success. There is a little chance that
15135  * firmware will hang in intermediate state forever.
15136  * Since waiting 7 minutes is quite a lot of time this function waits
15137  * 10 seconds and then gives up by returning an error.
15138  *
15139  * Return 0 on success, negative on failure.
15140  **/
15141 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
15142 {
15143 	/* wait max 10 seconds for PF reset to succeed */
15144 	const unsigned long time_end = jiffies + 10 * HZ;
15145 
15146 	struct i40e_hw *hw = &pf->hw;
15147 	i40e_status ret;
15148 
15149 	ret = i40e_pf_reset(hw);
15150 	while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
15151 		usleep_range(10000, 20000);
15152 		ret = i40e_pf_reset(hw);
15153 	}
15154 
15155 	if (ret == I40E_SUCCESS)
15156 		pf->pfr_count++;
15157 	else
15158 		dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15159 
15160 	return ret;
15161 }
15162 
15163 /**
15164  * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15165  * @pf: board private structure
15166  *
15167  * Check FW registers to determine if FW issued unexpected EMP Reset.
15168  * Every time when unexpected EMP Reset occurs the FW increments
15169  * a counter of unexpected EMP Resets. When the counter reaches 10
15170  * the FW should enter the Recovery mode
15171  *
15172  * Returns true if FW issued unexpected EMP Reset
15173  **/
15174 static bool i40e_check_fw_empr(struct i40e_pf *pf)
15175 {
15176 	const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15177 			   I40E_GL_FWSTS_FWS1B_MASK;
15178 	return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15179 	       (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15180 }
15181 
15182 /**
15183  * i40e_handle_resets - handle EMP resets and PF resets
15184  * @pf: board private structure
15185  *
15186  * Handle both EMP resets and PF resets and conclude whether there are
15187  * any issues regarding these resets. If there are any issues then
15188  * generate log entry.
15189  *
15190  * Return 0 if NIC is healthy or negative value when there are issues
15191  * with resets
15192  **/
15193 static i40e_status i40e_handle_resets(struct i40e_pf *pf)
15194 {
15195 	const i40e_status pfr = i40e_pf_loop_reset(pf);
15196 	const bool is_empr = i40e_check_fw_empr(pf);
15197 
15198 	if (is_empr || pfr != I40E_SUCCESS)
15199 		dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15200 
15201 	return is_empr ? I40E_ERR_RESET_FAILED : pfr;
15202 }
15203 
15204 /**
15205  * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15206  * @pf: board private structure
15207  * @hw: ptr to the hardware info
15208  *
15209  * This function does a minimal setup of all subsystems needed for running
15210  * recovery mode.
15211  *
15212  * Returns 0 on success, negative on failure
15213  **/
15214 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15215 {
15216 	struct i40e_vsi *vsi;
15217 	int err;
15218 	int v_idx;
15219 
15220 	pci_save_state(pf->pdev);
15221 
15222 	/* set up periodic task facility */
15223 	timer_setup(&pf->service_timer, i40e_service_timer, 0);
15224 	pf->service_timer_period = HZ;
15225 
15226 	INIT_WORK(&pf->service_task, i40e_service_task);
15227 	clear_bit(__I40E_SERVICE_SCHED, pf->state);
15228 
15229 	err = i40e_init_interrupt_scheme(pf);
15230 	if (err)
15231 		goto err_switch_setup;
15232 
15233 	/* The number of VSIs reported by the FW is the minimum guaranteed
15234 	 * to us; HW supports far more and we share the remaining pool with
15235 	 * the other PFs. We allocate space for more than the guarantee with
15236 	 * the understanding that we might not get them all later.
15237 	 */
15238 	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15239 		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15240 	else
15241 		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15242 
15243 	/* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
15244 	pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15245 			  GFP_KERNEL);
15246 	if (!pf->vsi) {
15247 		err = -ENOMEM;
15248 		goto err_switch_setup;
15249 	}
15250 
15251 	/* We allocate one VSI which is needed as absolute minimum
15252 	 * in order to register the netdev
15253 	 */
15254 	v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15255 	if (v_idx < 0) {
15256 		err = v_idx;
15257 		goto err_switch_setup;
15258 	}
15259 	pf->lan_vsi = v_idx;
15260 	vsi = pf->vsi[v_idx];
15261 	if (!vsi) {
15262 		err = -EFAULT;
15263 		goto err_switch_setup;
15264 	}
15265 	vsi->alloc_queue_pairs = 1;
15266 	err = i40e_config_netdev(vsi);
15267 	if (err)
15268 		goto err_switch_setup;
15269 	err = register_netdev(vsi->netdev);
15270 	if (err)
15271 		goto err_switch_setup;
15272 	vsi->netdev_registered = true;
15273 	i40e_dbg_pf_init(pf);
15274 
15275 	err = i40e_setup_misc_vector_for_recovery_mode(pf);
15276 	if (err)
15277 		goto err_switch_setup;
15278 
15279 	/* tell the firmware that we're starting */
15280 	i40e_send_version(pf);
15281 
15282 	/* since everything's happy, start the service_task timer */
15283 	mod_timer(&pf->service_timer,
15284 		  round_jiffies(jiffies + pf->service_timer_period));
15285 
15286 	return 0;
15287 
15288 err_switch_setup:
15289 	i40e_reset_interrupt_capability(pf);
15290 	del_timer_sync(&pf->service_timer);
15291 	i40e_shutdown_adminq(hw);
15292 	iounmap(hw->hw_addr);
15293 	pci_disable_pcie_error_reporting(pf->pdev);
15294 	pci_release_mem_regions(pf->pdev);
15295 	pci_disable_device(pf->pdev);
15296 	kfree(pf);
15297 
15298 	return err;
15299 }
15300 
15301 /**
15302  * i40e_set_subsystem_device_id - set subsystem device id
15303  * @hw: pointer to the hardware info
15304  *
15305  * Set PCI subsystem device id either from a pci_dev structure or
15306  * a specific FW register.
15307  **/
15308 static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
15309 {
15310 	struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
15311 
15312 	hw->subsystem_device_id = pdev->subsystem_device ?
15313 		pdev->subsystem_device :
15314 		(ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
15315 }
15316 
15317 /**
15318  * i40e_probe - Device initialization routine
15319  * @pdev: PCI device information struct
15320  * @ent: entry in i40e_pci_tbl
15321  *
15322  * i40e_probe initializes a PF identified by a pci_dev structure.
15323  * The OS initialization, configuring of the PF private structure,
15324  * and a hardware reset occur.
15325  *
15326  * Returns 0 on success, negative on failure
15327  **/
15328 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15329 {
15330 	struct i40e_aq_get_phy_abilities_resp abilities;
15331 #ifdef CONFIG_I40E_DCB
15332 	enum i40e_get_fw_lldp_status_resp lldp_status;
15333 	i40e_status status;
15334 #endif /* CONFIG_I40E_DCB */
15335 	struct i40e_pf *pf;
15336 	struct i40e_hw *hw;
15337 	static u16 pfs_found;
15338 	u16 wol_nvm_bits;
15339 	u16 link_status;
15340 	int err;
15341 	u32 val;
15342 	u32 i;
15343 
15344 	err = pci_enable_device_mem(pdev);
15345 	if (err)
15346 		return err;
15347 
15348 	/* set up for high or low dma */
15349 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15350 	if (err) {
15351 		dev_err(&pdev->dev,
15352 			"DMA configuration failed: 0x%x\n", err);
15353 		goto err_dma;
15354 	}
15355 
15356 	/* set up pci connections */
15357 	err = pci_request_mem_regions(pdev, i40e_driver_name);
15358 	if (err) {
15359 		dev_info(&pdev->dev,
15360 			 "pci_request_selected_regions failed %d\n", err);
15361 		goto err_pci_reg;
15362 	}
15363 
15364 	pci_enable_pcie_error_reporting(pdev);
15365 	pci_set_master(pdev);
15366 
15367 	/* Now that we have a PCI connection, we need to do the
15368 	 * low level device setup.  This is primarily setting up
15369 	 * the Admin Queue structures and then querying for the
15370 	 * device's current profile information.
15371 	 */
15372 	pf = kzalloc(sizeof(*pf), GFP_KERNEL);
15373 	if (!pf) {
15374 		err = -ENOMEM;
15375 		goto err_pf_alloc;
15376 	}
15377 	pf->next_vsi = 0;
15378 	pf->pdev = pdev;
15379 	set_bit(__I40E_DOWN, pf->state);
15380 
15381 	hw = &pf->hw;
15382 	hw->back = pf;
15383 
15384 	pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15385 				I40E_MAX_CSR_SPACE);
15386 	/* We believe that the highest register to read is
15387 	 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15388 	 * is not less than that before mapping to prevent a
15389 	 * kernel panic.
15390 	 */
15391 	if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15392 		dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15393 			pf->ioremap_len);
15394 		err = -ENOMEM;
15395 		goto err_ioremap;
15396 	}
15397 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15398 	if (!hw->hw_addr) {
15399 		err = -EIO;
15400 		dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15401 			 (unsigned int)pci_resource_start(pdev, 0),
15402 			 pf->ioremap_len, err);
15403 		goto err_ioremap;
15404 	}
15405 	hw->vendor_id = pdev->vendor;
15406 	hw->device_id = pdev->device;
15407 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15408 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
15409 	i40e_set_subsystem_device_id(hw);
15410 	hw->bus.device = PCI_SLOT(pdev->devfn);
15411 	hw->bus.func = PCI_FUNC(pdev->devfn);
15412 	hw->bus.bus_id = pdev->bus->number;
15413 	pf->instance = pfs_found;
15414 
15415 	/* Select something other than the 802.1ad ethertype for the
15416 	 * switch to use internally and drop on ingress.
15417 	 */
15418 	hw->switch_tag = 0xffff;
15419 	hw->first_tag = ETH_P_8021AD;
15420 	hw->second_tag = ETH_P_8021Q;
15421 
15422 	INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15423 	INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15424 	INIT_LIST_HEAD(&pf->ddp_old_prof);
15425 
15426 	/* set up the locks for the AQ, do this only once in probe
15427 	 * and destroy them only once in remove
15428 	 */
15429 	mutex_init(&hw->aq.asq_mutex);
15430 	mutex_init(&hw->aq.arq_mutex);
15431 
15432 	pf->msg_enable = netif_msg_init(debug,
15433 					NETIF_MSG_DRV |
15434 					NETIF_MSG_PROBE |
15435 					NETIF_MSG_LINK);
15436 	if (debug < -1)
15437 		pf->hw.debug_mask = debug;
15438 
15439 	/* do a special CORER for clearing PXE mode once at init */
15440 	if (hw->revision_id == 0 &&
15441 	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15442 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15443 		i40e_flush(hw);
15444 		msleep(200);
15445 		pf->corer_count++;
15446 
15447 		i40e_clear_pxe_mode(hw);
15448 	}
15449 
15450 	/* Reset here to make sure all is clean and to define PF 'n' */
15451 	i40e_clear_hw(hw);
15452 
15453 	err = i40e_set_mac_type(hw);
15454 	if (err) {
15455 		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15456 			 err);
15457 		goto err_pf_reset;
15458 	}
15459 
15460 	err = i40e_handle_resets(pf);
15461 	if (err)
15462 		goto err_pf_reset;
15463 
15464 	i40e_check_recovery_mode(pf);
15465 
15466 	if (is_kdump_kernel()) {
15467 		hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15468 		hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15469 	} else {
15470 		hw->aq.num_arq_entries = I40E_AQ_LEN;
15471 		hw->aq.num_asq_entries = I40E_AQ_LEN;
15472 	}
15473 	hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15474 	hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15475 	pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15476 
15477 	snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15478 		 "%s-%s:misc",
15479 		 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15480 
15481 	err = i40e_init_shared_code(hw);
15482 	if (err) {
15483 		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15484 			 err);
15485 		goto err_pf_reset;
15486 	}
15487 
15488 	/* set up a default setting for link flow control */
15489 	pf->hw.fc.requested_mode = I40E_FC_NONE;
15490 
15491 	err = i40e_init_adminq(hw);
15492 	if (err) {
15493 		if (err == I40E_ERR_FIRMWARE_API_VERSION)
15494 			dev_info(&pdev->dev,
15495 				 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15496 				 hw->aq.api_maj_ver,
15497 				 hw->aq.api_min_ver,
15498 				 I40E_FW_API_VERSION_MAJOR,
15499 				 I40E_FW_MINOR_VERSION(hw));
15500 		else
15501 			dev_info(&pdev->dev,
15502 				 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15503 
15504 		goto err_pf_reset;
15505 	}
15506 	i40e_get_oem_version(hw);
15507 
15508 	/* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15509 	dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15510 		 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15511 		 hw->aq.api_maj_ver, hw->aq.api_min_ver,
15512 		 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15513 		 hw->subsystem_vendor_id, hw->subsystem_device_id);
15514 
15515 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15516 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15517 		dev_dbg(&pdev->dev,
15518 			"The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15519 			 hw->aq.api_maj_ver,
15520 			 hw->aq.api_min_ver,
15521 			 I40E_FW_API_VERSION_MAJOR,
15522 			 I40E_FW_MINOR_VERSION(hw));
15523 	else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15524 		dev_info(&pdev->dev,
15525 			 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15526 			 hw->aq.api_maj_ver,
15527 			 hw->aq.api_min_ver,
15528 			 I40E_FW_API_VERSION_MAJOR,
15529 			 I40E_FW_MINOR_VERSION(hw));
15530 
15531 	i40e_verify_eeprom(pf);
15532 
15533 	/* Rev 0 hardware was never productized */
15534 	if (hw->revision_id < 1)
15535 		dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15536 
15537 	i40e_clear_pxe_mode(hw);
15538 
15539 	err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15540 	if (err)
15541 		goto err_adminq_setup;
15542 
15543 	err = i40e_sw_init(pf);
15544 	if (err) {
15545 		dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15546 		goto err_sw_init;
15547 	}
15548 
15549 	if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15550 		return i40e_init_recovery_mode(pf, hw);
15551 
15552 	err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15553 				hw->func_caps.num_rx_qp, 0, 0);
15554 	if (err) {
15555 		dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15556 		goto err_init_lan_hmc;
15557 	}
15558 
15559 	err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15560 	if (err) {
15561 		dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15562 		err = -ENOENT;
15563 		goto err_configure_lan_hmc;
15564 	}
15565 
15566 	/* Disable LLDP for NICs that have firmware versions lower than v4.3.
15567 	 * Ignore error return codes because if it was already disabled via
15568 	 * hardware settings this will fail
15569 	 */
15570 	if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15571 		dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15572 		i40e_aq_stop_lldp(hw, true, false, NULL);
15573 	}
15574 
15575 	/* allow a platform config to override the HW addr */
15576 	i40e_get_platform_mac_addr(pdev, pf);
15577 
15578 	if (!is_valid_ether_addr(hw->mac.addr)) {
15579 		dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15580 		err = -EIO;
15581 		goto err_mac_addr;
15582 	}
15583 	dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15584 	ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15585 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15586 	if (is_valid_ether_addr(hw->mac.port_addr))
15587 		pf->hw_features |= I40E_HW_PORT_ID_VALID;
15588 
15589 	i40e_ptp_alloc_pins(pf);
15590 	pci_set_drvdata(pdev, pf);
15591 	pci_save_state(pdev);
15592 
15593 #ifdef CONFIG_I40E_DCB
15594 	status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15595 	(!status &&
15596 	 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15597 		(pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
15598 		(pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
15599 	dev_info(&pdev->dev,
15600 		 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15601 			"FW LLDP is disabled\n" :
15602 			"FW LLDP is enabled\n");
15603 
15604 	/* Enable FW to write default DCB config on link-up */
15605 	i40e_aq_set_dcb_parameters(hw, true, NULL);
15606 
15607 	err = i40e_init_pf_dcb(pf);
15608 	if (err) {
15609 		dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15610 		pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15611 		/* Continue without DCB enabled */
15612 	}
15613 #endif /* CONFIG_I40E_DCB */
15614 
15615 	/* set up periodic task facility */
15616 	timer_setup(&pf->service_timer, i40e_service_timer, 0);
15617 	pf->service_timer_period = HZ;
15618 
15619 	INIT_WORK(&pf->service_task, i40e_service_task);
15620 	clear_bit(__I40E_SERVICE_SCHED, pf->state);
15621 
15622 	/* NVM bit on means WoL disabled for the port */
15623 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15624 	if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15625 		pf->wol_en = false;
15626 	else
15627 		pf->wol_en = true;
15628 	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15629 
15630 	/* set up the main switch operations */
15631 	i40e_determine_queue_usage(pf);
15632 	err = i40e_init_interrupt_scheme(pf);
15633 	if (err)
15634 		goto err_switch_setup;
15635 
15636 	/* Reduce Tx and Rx pairs for kdump
15637 	 * When MSI-X is enabled, it's not allowed to use more TC queue
15638 	 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
15639 	 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
15640 	 */
15641 	if (is_kdump_kernel())
15642 		pf->num_lan_msix = 1;
15643 
15644 	pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15645 	pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15646 	pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15647 	pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15648 	pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15649 	pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15650 						    UDP_TUNNEL_TYPE_GENEVE;
15651 
15652 	/* The number of VSIs reported by the FW is the minimum guaranteed
15653 	 * to us; HW supports far more and we share the remaining pool with
15654 	 * the other PFs. We allocate space for more than the guarantee with
15655 	 * the understanding that we might not get them all later.
15656 	 */
15657 	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15658 		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15659 	else
15660 		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15661 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15662 		dev_warn(&pf->pdev->dev,
15663 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15664 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15665 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15666 	}
15667 
15668 	/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15669 	pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15670 			  GFP_KERNEL);
15671 	if (!pf->vsi) {
15672 		err = -ENOMEM;
15673 		goto err_switch_setup;
15674 	}
15675 
15676 #ifdef CONFIG_PCI_IOV
15677 	/* prep for VF support */
15678 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15679 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15680 	    !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15681 		if (pci_num_vf(pdev))
15682 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15683 	}
15684 #endif
15685 	err = i40e_setup_pf_switch(pf, false, false);
15686 	if (err) {
15687 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15688 		goto err_vsis;
15689 	}
15690 	INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15691 
15692 	/* if FDIR VSI was set up, start it now */
15693 	for (i = 0; i < pf->num_alloc_vsi; i++) {
15694 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15695 			i40e_vsi_open(pf->vsi[i]);
15696 			break;
15697 		}
15698 	}
15699 
15700 	/* The driver only wants link up/down and module qualification
15701 	 * reports from firmware.  Note the negative logic.
15702 	 */
15703 	err = i40e_aq_set_phy_int_mask(&pf->hw,
15704 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
15705 					 I40E_AQ_EVENT_MEDIA_NA |
15706 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15707 	if (err)
15708 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15709 			 i40e_stat_str(&pf->hw, err),
15710 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15711 
15712 	/* Reconfigure hardware for allowing smaller MSS in the case
15713 	 * of TSO, so that we avoid the MDD being fired and causing
15714 	 * a reset in the case of small MSS+TSO.
15715 	 */
15716 	val = rd32(hw, I40E_REG_MSS);
15717 	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15718 		val &= ~I40E_REG_MSS_MIN_MASK;
15719 		val |= I40E_64BYTE_MSS;
15720 		wr32(hw, I40E_REG_MSS, val);
15721 	}
15722 
15723 	if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15724 		msleep(75);
15725 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15726 		if (err)
15727 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15728 				 i40e_stat_str(&pf->hw, err),
15729 				 i40e_aq_str(&pf->hw,
15730 					     pf->hw.aq.asq_last_status));
15731 	}
15732 	/* The main driver is (mostly) up and happy. We need to set this state
15733 	 * before setting up the misc vector or we get a race and the vector
15734 	 * ends up disabled forever.
15735 	 */
15736 	clear_bit(__I40E_DOWN, pf->state);
15737 
15738 	/* In case of MSIX we are going to setup the misc vector right here
15739 	 * to handle admin queue events etc. In case of legacy and MSI
15740 	 * the misc functionality and queue processing is combined in
15741 	 * the same vector and that gets setup at open.
15742 	 */
15743 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15744 		err = i40e_setup_misc_vector(pf);
15745 		if (err) {
15746 			dev_info(&pdev->dev,
15747 				 "setup of misc vector failed: %d\n", err);
15748 			i40e_cloud_filter_exit(pf);
15749 			i40e_fdir_teardown(pf);
15750 			goto err_vsis;
15751 		}
15752 	}
15753 
15754 #ifdef CONFIG_PCI_IOV
15755 	/* prep for VF support */
15756 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15757 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15758 	    !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15759 		/* disable link interrupts for VFs */
15760 		val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15761 		val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15762 		wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15763 		i40e_flush(hw);
15764 
15765 		if (pci_num_vf(pdev)) {
15766 			dev_info(&pdev->dev,
15767 				 "Active VFs found, allocating resources.\n");
15768 			err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15769 			if (err)
15770 				dev_info(&pdev->dev,
15771 					 "Error %d allocating resources for existing VFs\n",
15772 					 err);
15773 		}
15774 	}
15775 #endif /* CONFIG_PCI_IOV */
15776 
15777 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15778 		pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15779 						      pf->num_iwarp_msix,
15780 						      I40E_IWARP_IRQ_PILE_ID);
15781 		if (pf->iwarp_base_vector < 0) {
15782 			dev_info(&pdev->dev,
15783 				 "failed to get tracking for %d vectors for IWARP err=%d\n",
15784 				 pf->num_iwarp_msix, pf->iwarp_base_vector);
15785 			pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15786 		}
15787 	}
15788 
15789 	i40e_dbg_pf_init(pf);
15790 
15791 	/* tell the firmware that we're starting */
15792 	i40e_send_version(pf);
15793 
15794 	/* since everything's happy, start the service_task timer */
15795 	mod_timer(&pf->service_timer,
15796 		  round_jiffies(jiffies + pf->service_timer_period));
15797 
15798 	/* add this PF to client device list and launch a client service task */
15799 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15800 		err = i40e_lan_add_device(pf);
15801 		if (err)
15802 			dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15803 				 err);
15804 	}
15805 
15806 #define PCI_SPEED_SIZE 8
15807 #define PCI_WIDTH_SIZE 8
15808 	/* Devices on the IOSF bus do not have this information
15809 	 * and will report PCI Gen 1 x 1 by default so don't bother
15810 	 * checking them.
15811 	 */
15812 	if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15813 		char speed[PCI_SPEED_SIZE] = "Unknown";
15814 		char width[PCI_WIDTH_SIZE] = "Unknown";
15815 
15816 		/* Get the negotiated link width and speed from PCI config
15817 		 * space
15818 		 */
15819 		pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15820 					  &link_status);
15821 
15822 		i40e_set_pci_config_data(hw, link_status);
15823 
15824 		switch (hw->bus.speed) {
15825 		case i40e_bus_speed_8000:
15826 			strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15827 		case i40e_bus_speed_5000:
15828 			strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15829 		case i40e_bus_speed_2500:
15830 			strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15831 		default:
15832 			break;
15833 		}
15834 		switch (hw->bus.width) {
15835 		case i40e_bus_width_pcie_x8:
15836 			strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15837 		case i40e_bus_width_pcie_x4:
15838 			strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15839 		case i40e_bus_width_pcie_x2:
15840 			strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15841 		case i40e_bus_width_pcie_x1:
15842 			strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15843 		default:
15844 			break;
15845 		}
15846 
15847 		dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15848 			 speed, width);
15849 
15850 		if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15851 		    hw->bus.speed < i40e_bus_speed_8000) {
15852 			dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15853 			dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15854 		}
15855 	}
15856 
15857 	/* get the requested speeds from the fw */
15858 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15859 	if (err)
15860 		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
15861 			i40e_stat_str(&pf->hw, err),
15862 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15863 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15864 
15865 	/* set the FEC config due to the board capabilities */
15866 	i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15867 
15868 	/* get the supported phy types from the fw */
15869 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15870 	if (err)
15871 		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
15872 			i40e_stat_str(&pf->hw, err),
15873 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15874 
15875 	/* make sure the MFS hasn't been set lower than the default */
15876 #define MAX_FRAME_SIZE_DEFAULT 0x2600
15877 	val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
15878 	       I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
15879 	if (val < MAX_FRAME_SIZE_DEFAULT)
15880 		dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
15881 			 i, val);
15882 
15883 	/* Add a filter to drop all Flow control frames from any VSI from being
15884 	 * transmitted. By doing so we stop a malicious VF from sending out
15885 	 * PAUSE or PFC frames and potentially controlling traffic for other
15886 	 * PF/VF VSIs.
15887 	 * The FW can still send Flow control frames if enabled.
15888 	 */
15889 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15890 						       pf->main_vsi_seid);
15891 
15892 	if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15893 		(pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15894 		pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15895 	if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15896 		pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15897 	/* print a string summarizing features */
15898 	i40e_print_features(pf);
15899 
15900 	return 0;
15901 
15902 	/* Unwind what we've done if something failed in the setup */
15903 err_vsis:
15904 	set_bit(__I40E_DOWN, pf->state);
15905 	i40e_clear_interrupt_scheme(pf);
15906 	kfree(pf->vsi);
15907 err_switch_setup:
15908 	i40e_reset_interrupt_capability(pf);
15909 	del_timer_sync(&pf->service_timer);
15910 err_mac_addr:
15911 err_configure_lan_hmc:
15912 	(void)i40e_shutdown_lan_hmc(hw);
15913 err_init_lan_hmc:
15914 	kfree(pf->qp_pile);
15915 err_sw_init:
15916 err_adminq_setup:
15917 err_pf_reset:
15918 	iounmap(hw->hw_addr);
15919 err_ioremap:
15920 	kfree(pf);
15921 err_pf_alloc:
15922 	pci_disable_pcie_error_reporting(pdev);
15923 	pci_release_mem_regions(pdev);
15924 err_pci_reg:
15925 err_dma:
15926 	pci_disable_device(pdev);
15927 	return err;
15928 }
15929 
15930 /**
15931  * i40e_remove - Device removal routine
15932  * @pdev: PCI device information struct
15933  *
15934  * i40e_remove is called by the PCI subsystem to alert the driver
15935  * that is should release a PCI device.  This could be caused by a
15936  * Hot-Plug event, or because the driver is going to be removed from
15937  * memory.
15938  **/
15939 static void i40e_remove(struct pci_dev *pdev)
15940 {
15941 	struct i40e_pf *pf = pci_get_drvdata(pdev);
15942 	struct i40e_hw *hw = &pf->hw;
15943 	i40e_status ret_code;
15944 	int i;
15945 
15946 	i40e_dbg_pf_exit(pf);
15947 
15948 	i40e_ptp_stop(pf);
15949 
15950 	/* Disable RSS in hw */
15951 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15952 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15953 
15954 	/* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
15955 	 * flags, once they are set, i40e_rebuild should not be called as
15956 	 * i40e_prep_for_reset always returns early.
15957 	 */
15958 	while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
15959 		usleep_range(1000, 2000);
15960 	set_bit(__I40E_IN_REMOVE, pf->state);
15961 
15962 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15963 		set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
15964 		i40e_free_vfs(pf);
15965 		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15966 	}
15967 	/* no more scheduling of any task */
15968 	set_bit(__I40E_SUSPENDED, pf->state);
15969 	set_bit(__I40E_DOWN, pf->state);
15970 	if (pf->service_timer.function)
15971 		del_timer_sync(&pf->service_timer);
15972 	if (pf->service_task.func)
15973 		cancel_work_sync(&pf->service_task);
15974 
15975 	if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15976 		struct i40e_vsi *vsi = pf->vsi[0];
15977 
15978 		/* We know that we have allocated only one vsi for this PF,
15979 		 * it was just for registering netdevice, so the interface
15980 		 * could be visible in the 'ifconfig' output
15981 		 */
15982 		unregister_netdev(vsi->netdev);
15983 		free_netdev(vsi->netdev);
15984 
15985 		goto unmap;
15986 	}
15987 
15988 	/* Client close must be called explicitly here because the timer
15989 	 * has been stopped.
15990 	 */
15991 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15992 
15993 	i40e_fdir_teardown(pf);
15994 
15995 	/* If there is a switch structure or any orphans, remove them.
15996 	 * This will leave only the PF's VSI remaining.
15997 	 */
15998 	for (i = 0; i < I40E_MAX_VEB; i++) {
15999 		if (!pf->veb[i])
16000 			continue;
16001 
16002 		if (pf->veb[i]->uplink_seid == pf->mac_seid ||
16003 		    pf->veb[i]->uplink_seid == 0)
16004 			i40e_switch_branch_release(pf->veb[i]);
16005 	}
16006 
16007 	/* Now we can shutdown the PF's VSI, just before we kill
16008 	 * adminq and hmc.
16009 	 */
16010 	if (pf->vsi[pf->lan_vsi])
16011 		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
16012 
16013 	i40e_cloud_filter_exit(pf);
16014 
16015 	/* remove attached clients */
16016 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16017 		ret_code = i40e_lan_del_device(pf);
16018 		if (ret_code)
16019 			dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
16020 				 ret_code);
16021 	}
16022 
16023 	/* shutdown and destroy the HMC */
16024 	if (hw->hmc.hmc_obj) {
16025 		ret_code = i40e_shutdown_lan_hmc(hw);
16026 		if (ret_code)
16027 			dev_warn(&pdev->dev,
16028 				 "Failed to destroy the HMC resources: %d\n",
16029 				 ret_code);
16030 	}
16031 
16032 unmap:
16033 	/* Free MSI/legacy interrupt 0 when in recovery mode. */
16034 	if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16035 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16036 		free_irq(pf->pdev->irq, pf);
16037 
16038 	/* shutdown the adminq */
16039 	i40e_shutdown_adminq(hw);
16040 
16041 	/* destroy the locks only once, here */
16042 	mutex_destroy(&hw->aq.arq_mutex);
16043 	mutex_destroy(&hw->aq.asq_mutex);
16044 
16045 	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
16046 	rtnl_lock();
16047 	i40e_clear_interrupt_scheme(pf);
16048 	for (i = 0; i < pf->num_alloc_vsi; i++) {
16049 		if (pf->vsi[i]) {
16050 			if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
16051 				i40e_vsi_clear_rings(pf->vsi[i]);
16052 			i40e_vsi_clear(pf->vsi[i]);
16053 			pf->vsi[i] = NULL;
16054 		}
16055 	}
16056 	rtnl_unlock();
16057 
16058 	for (i = 0; i < I40E_MAX_VEB; i++) {
16059 		kfree(pf->veb[i]);
16060 		pf->veb[i] = NULL;
16061 	}
16062 
16063 	kfree(pf->qp_pile);
16064 	kfree(pf->vsi);
16065 
16066 	iounmap(hw->hw_addr);
16067 	kfree(pf);
16068 	pci_release_mem_regions(pdev);
16069 
16070 	pci_disable_pcie_error_reporting(pdev);
16071 	pci_disable_device(pdev);
16072 }
16073 
16074 /**
16075  * i40e_pci_error_detected - warning that something funky happened in PCI land
16076  * @pdev: PCI device information struct
16077  * @error: the type of PCI error
16078  *
16079  * Called to warn that something happened and the error handling steps
16080  * are in progress.  Allows the driver to quiesce things, be ready for
16081  * remediation.
16082  **/
16083 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
16084 						pci_channel_state_t error)
16085 {
16086 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16087 
16088 	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
16089 
16090 	if (!pf) {
16091 		dev_info(&pdev->dev,
16092 			 "Cannot recover - error happened during device probe\n");
16093 		return PCI_ERS_RESULT_DISCONNECT;
16094 	}
16095 
16096 	/* shutdown all operations */
16097 	if (!test_bit(__I40E_SUSPENDED, pf->state))
16098 		i40e_prep_for_reset(pf);
16099 
16100 	/* Request a slot reset */
16101 	return PCI_ERS_RESULT_NEED_RESET;
16102 }
16103 
16104 /**
16105  * i40e_pci_error_slot_reset - a PCI slot reset just happened
16106  * @pdev: PCI device information struct
16107  *
16108  * Called to find if the driver can work with the device now that
16109  * the pci slot has been reset.  If a basic connection seems good
16110  * (registers are readable and have sane content) then return a
16111  * happy little PCI_ERS_RESULT_xxx.
16112  **/
16113 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
16114 {
16115 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16116 	pci_ers_result_t result;
16117 	u32 reg;
16118 
16119 	dev_dbg(&pdev->dev, "%s\n", __func__);
16120 	if (pci_enable_device_mem(pdev)) {
16121 		dev_info(&pdev->dev,
16122 			 "Cannot re-enable PCI device after reset.\n");
16123 		result = PCI_ERS_RESULT_DISCONNECT;
16124 	} else {
16125 		pci_set_master(pdev);
16126 		pci_restore_state(pdev);
16127 		pci_save_state(pdev);
16128 		pci_wake_from_d3(pdev, false);
16129 
16130 		reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
16131 		if (reg == 0)
16132 			result = PCI_ERS_RESULT_RECOVERED;
16133 		else
16134 			result = PCI_ERS_RESULT_DISCONNECT;
16135 	}
16136 
16137 	return result;
16138 }
16139 
16140 /**
16141  * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16142  * @pdev: PCI device information struct
16143  */
16144 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16145 {
16146 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16147 
16148 	i40e_prep_for_reset(pf);
16149 }
16150 
16151 /**
16152  * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16153  * @pdev: PCI device information struct
16154  */
16155 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16156 {
16157 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16158 
16159 	if (test_bit(__I40E_IN_REMOVE, pf->state))
16160 		return;
16161 
16162 	i40e_reset_and_rebuild(pf, false, false);
16163 }
16164 
16165 /**
16166  * i40e_pci_error_resume - restart operations after PCI error recovery
16167  * @pdev: PCI device information struct
16168  *
16169  * Called to allow the driver to bring things back up after PCI error
16170  * and/or reset recovery has finished.
16171  **/
16172 static void i40e_pci_error_resume(struct pci_dev *pdev)
16173 {
16174 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16175 
16176 	dev_dbg(&pdev->dev, "%s\n", __func__);
16177 	if (test_bit(__I40E_SUSPENDED, pf->state))
16178 		return;
16179 
16180 	i40e_handle_reset_warning(pf, false);
16181 }
16182 
16183 /**
16184  * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16185  * using the mac_address_write admin q function
16186  * @pf: pointer to i40e_pf struct
16187  **/
16188 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16189 {
16190 	struct i40e_hw *hw = &pf->hw;
16191 	i40e_status ret;
16192 	u8 mac_addr[6];
16193 	u16 flags = 0;
16194 
16195 	/* Get current MAC address in case it's an LAA */
16196 	if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16197 		ether_addr_copy(mac_addr,
16198 				pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16199 	} else {
16200 		dev_err(&pf->pdev->dev,
16201 			"Failed to retrieve MAC address; using default\n");
16202 		ether_addr_copy(mac_addr, hw->mac.addr);
16203 	}
16204 
16205 	/* The FW expects the mac address write cmd to first be called with
16206 	 * one of these flags before calling it again with the multicast
16207 	 * enable flags.
16208 	 */
16209 	flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16210 
16211 	if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16212 		flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16213 
16214 	ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16215 	if (ret) {
16216 		dev_err(&pf->pdev->dev,
16217 			"Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16218 		return;
16219 	}
16220 
16221 	flags = I40E_AQC_MC_MAG_EN
16222 			| I40E_AQC_WOL_PRESERVE_ON_PFR
16223 			| I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16224 	ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16225 	if (ret)
16226 		dev_err(&pf->pdev->dev,
16227 			"Failed to enable Multicast Magic Packet wake up\n");
16228 }
16229 
16230 /**
16231  * i40e_shutdown - PCI callback for shutting down
16232  * @pdev: PCI device information struct
16233  **/
16234 static void i40e_shutdown(struct pci_dev *pdev)
16235 {
16236 	struct i40e_pf *pf = pci_get_drvdata(pdev);
16237 	struct i40e_hw *hw = &pf->hw;
16238 
16239 	set_bit(__I40E_SUSPENDED, pf->state);
16240 	set_bit(__I40E_DOWN, pf->state);
16241 
16242 	del_timer_sync(&pf->service_timer);
16243 	cancel_work_sync(&pf->service_task);
16244 	i40e_cloud_filter_exit(pf);
16245 	i40e_fdir_teardown(pf);
16246 
16247 	/* Client close must be called explicitly here because the timer
16248 	 * has been stopped.
16249 	 */
16250 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16251 
16252 	if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16253 		i40e_enable_mc_magic_wake(pf);
16254 
16255 	i40e_prep_for_reset(pf);
16256 
16257 	wr32(hw, I40E_PFPM_APM,
16258 	     (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16259 	wr32(hw, I40E_PFPM_WUFC,
16260 	     (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16261 
16262 	/* Free MSI/legacy interrupt 0 when in recovery mode. */
16263 	if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16264 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16265 		free_irq(pf->pdev->irq, pf);
16266 
16267 	/* Since we're going to destroy queues during the
16268 	 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16269 	 * whole section
16270 	 */
16271 	rtnl_lock();
16272 	i40e_clear_interrupt_scheme(pf);
16273 	rtnl_unlock();
16274 
16275 	if (system_state == SYSTEM_POWER_OFF) {
16276 		pci_wake_from_d3(pdev, pf->wol_en);
16277 		pci_set_power_state(pdev, PCI_D3hot);
16278 	}
16279 }
16280 
16281 /**
16282  * i40e_suspend - PM callback for moving to D3
16283  * @dev: generic device information structure
16284  **/
16285 static int __maybe_unused i40e_suspend(struct device *dev)
16286 {
16287 	struct i40e_pf *pf = dev_get_drvdata(dev);
16288 	struct i40e_hw *hw = &pf->hw;
16289 
16290 	/* If we're already suspended, then there is nothing to do */
16291 	if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16292 		return 0;
16293 
16294 	set_bit(__I40E_DOWN, pf->state);
16295 
16296 	/* Ensure service task will not be running */
16297 	del_timer_sync(&pf->service_timer);
16298 	cancel_work_sync(&pf->service_task);
16299 
16300 	/* Client close must be called explicitly here because the timer
16301 	 * has been stopped.
16302 	 */
16303 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16304 
16305 	if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16306 		i40e_enable_mc_magic_wake(pf);
16307 
16308 	/* Since we're going to destroy queues during the
16309 	 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16310 	 * whole section
16311 	 */
16312 	rtnl_lock();
16313 
16314 	i40e_prep_for_reset(pf);
16315 
16316 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16317 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16318 
16319 	/* Clear the interrupt scheme and release our IRQs so that the system
16320 	 * can safely hibernate even when there are a large number of CPUs.
16321 	 * Otherwise hibernation might fail when mapping all the vectors back
16322 	 * to CPU0.
16323 	 */
16324 	i40e_clear_interrupt_scheme(pf);
16325 
16326 	rtnl_unlock();
16327 
16328 	return 0;
16329 }
16330 
16331 /**
16332  * i40e_resume - PM callback for waking up from D3
16333  * @dev: generic device information structure
16334  **/
16335 static int __maybe_unused i40e_resume(struct device *dev)
16336 {
16337 	struct i40e_pf *pf = dev_get_drvdata(dev);
16338 	int err;
16339 
16340 	/* If we're not suspended, then there is nothing to do */
16341 	if (!test_bit(__I40E_SUSPENDED, pf->state))
16342 		return 0;
16343 
16344 	/* We need to hold the RTNL lock prior to restoring interrupt schemes,
16345 	 * since we're going to be restoring queues
16346 	 */
16347 	rtnl_lock();
16348 
16349 	/* We cleared the interrupt scheme when we suspended, so we need to
16350 	 * restore it now to resume device functionality.
16351 	 */
16352 	err = i40e_restore_interrupt_scheme(pf);
16353 	if (err) {
16354 		dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16355 			err);
16356 	}
16357 
16358 	clear_bit(__I40E_DOWN, pf->state);
16359 	i40e_reset_and_rebuild(pf, false, true);
16360 
16361 	rtnl_unlock();
16362 
16363 	/* Clear suspended state last after everything is recovered */
16364 	clear_bit(__I40E_SUSPENDED, pf->state);
16365 
16366 	/* Restart the service task */
16367 	mod_timer(&pf->service_timer,
16368 		  round_jiffies(jiffies + pf->service_timer_period));
16369 
16370 	return 0;
16371 }
16372 
16373 static const struct pci_error_handlers i40e_err_handler = {
16374 	.error_detected = i40e_pci_error_detected,
16375 	.slot_reset = i40e_pci_error_slot_reset,
16376 	.reset_prepare = i40e_pci_error_reset_prepare,
16377 	.reset_done = i40e_pci_error_reset_done,
16378 	.resume = i40e_pci_error_resume,
16379 };
16380 
16381 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16382 
16383 static struct pci_driver i40e_driver = {
16384 	.name     = i40e_driver_name,
16385 	.id_table = i40e_pci_tbl,
16386 	.probe    = i40e_probe,
16387 	.remove   = i40e_remove,
16388 	.driver   = {
16389 		.pm = &i40e_pm_ops,
16390 	},
16391 	.shutdown = i40e_shutdown,
16392 	.err_handler = &i40e_err_handler,
16393 	.sriov_configure = i40e_pci_sriov_configure,
16394 };
16395 
16396 /**
16397  * i40e_init_module - Driver registration routine
16398  *
16399  * i40e_init_module is the first routine called when the driver is
16400  * loaded. All it does is register with the PCI subsystem.
16401  **/
16402 static int __init i40e_init_module(void)
16403 {
16404 	pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16405 	pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16406 
16407 	/* There is no need to throttle the number of active tasks because
16408 	 * each device limits its own task using a state bit for scheduling
16409 	 * the service task, and the device tasks do not interfere with each
16410 	 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
16411 	 * since we need to be able to guarantee forward progress even under
16412 	 * memory pressure.
16413 	 */
16414 	i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16415 	if (!i40e_wq) {
16416 		pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16417 		return -ENOMEM;
16418 	}
16419 
16420 	i40e_dbg_init();
16421 	return pci_register_driver(&i40e_driver);
16422 }
16423 module_init(i40e_init_module);
16424 
16425 /**
16426  * i40e_exit_module - Driver exit cleanup routine
16427  *
16428  * i40e_exit_module is called just before the driver is removed
16429  * from memory.
16430  **/
16431 static void __exit i40e_exit_module(void)
16432 {
16433 	pci_unregister_driver(&i40e_driver);
16434 	destroy_workqueue(i40e_wq);
16435 	ida_destroy(&i40e_client_ida);
16436 	i40e_dbg_exit();
16437 }
16438 module_exit(i40e_exit_module);
16439