xref: /linux/drivers/net/ethernet/intel/i40e/i40e_main.c (revision 161db5d165123a72792e2687ecfd8de146dbae1a)
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2016 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30 
31 /* Local includes */
32 #include "i40e.h"
33 #include "i40e_diag.h"
34 #include <net/udp_tunnel.h>
35 
36 const char i40e_driver_name[] = "i40e";
37 static const char i40e_driver_string[] =
38 			"Intel(R) Ethernet Connection XL710 Network Driver";
39 
40 #define DRV_KERN "-k"
41 
42 #define DRV_VERSION_MAJOR 1
43 #define DRV_VERSION_MINOR 6
44 #define DRV_VERSION_BUILD 11
45 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
46 	     __stringify(DRV_VERSION_MINOR) "." \
47 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
48 const char i40e_driver_version_str[] = DRV_VERSION;
49 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
50 
51 /* a bit of forward declarations */
52 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
53 static void i40e_handle_reset_warning(struct i40e_pf *pf);
54 static int i40e_add_vsi(struct i40e_vsi *vsi);
55 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
56 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
57 static int i40e_setup_misc_vector(struct i40e_pf *pf);
58 static void i40e_determine_queue_usage(struct i40e_pf *pf);
59 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
60 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
61 			      u16 rss_table_size, u16 rss_size);
62 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
63 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
64 
65 /* i40e_pci_tbl - PCI Device ID Table
66  *
67  * Last entry must be all 0s
68  *
69  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
70  *   Class, Class Mask, private data (not used) }
71  */
72 static const struct pci_device_id i40e_pci_tbl[] = {
73 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
74 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
75 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
76 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
77 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
78 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
79 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
80 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
81 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
82 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
83 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
84 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
85 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
86 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
87 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
88 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
89 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
90 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
91 	/* required last entry */
92 	{0, }
93 };
94 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
95 
96 #define I40E_MAX_VF_COUNT 128
97 static int debug = -1;
98 module_param(debug, int, 0);
99 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
100 
101 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
102 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
103 MODULE_LICENSE("GPL");
104 MODULE_VERSION(DRV_VERSION);
105 
106 static struct workqueue_struct *i40e_wq;
107 
108 /**
109  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
110  * @hw:   pointer to the HW structure
111  * @mem:  ptr to mem struct to fill out
112  * @size: size of memory requested
113  * @alignment: what to align the allocation to
114  **/
115 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
116 			    u64 size, u32 alignment)
117 {
118 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
119 
120 	mem->size = ALIGN(size, alignment);
121 	mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
122 				      &mem->pa, GFP_KERNEL);
123 	if (!mem->va)
124 		return -ENOMEM;
125 
126 	return 0;
127 }
128 
129 /**
130  * i40e_free_dma_mem_d - OS specific memory free for shared code
131  * @hw:   pointer to the HW structure
132  * @mem:  ptr to mem struct to free
133  **/
134 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
135 {
136 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
137 
138 	dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
139 	mem->va = NULL;
140 	mem->pa = 0;
141 	mem->size = 0;
142 
143 	return 0;
144 }
145 
146 /**
147  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
148  * @hw:   pointer to the HW structure
149  * @mem:  ptr to mem struct to fill out
150  * @size: size of memory requested
151  **/
152 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
153 			     u32 size)
154 {
155 	mem->size = size;
156 	mem->va = kzalloc(size, GFP_KERNEL);
157 
158 	if (!mem->va)
159 		return -ENOMEM;
160 
161 	return 0;
162 }
163 
164 /**
165  * i40e_free_virt_mem_d - OS specific memory free for shared code
166  * @hw:   pointer to the HW structure
167  * @mem:  ptr to mem struct to free
168  **/
169 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
170 {
171 	/* it's ok to kfree a NULL pointer */
172 	kfree(mem->va);
173 	mem->va = NULL;
174 	mem->size = 0;
175 
176 	return 0;
177 }
178 
179 /**
180  * i40e_get_lump - find a lump of free generic resource
181  * @pf: board private structure
182  * @pile: the pile of resource to search
183  * @needed: the number of items needed
184  * @id: an owner id to stick on the items assigned
185  *
186  * Returns the base item index of the lump, or negative for error
187  *
188  * The search_hint trick and lack of advanced fit-finding only work
189  * because we're highly likely to have all the same size lump requests.
190  * Linear search time and any fragmentation should be minimal.
191  **/
192 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
193 			 u16 needed, u16 id)
194 {
195 	int ret = -ENOMEM;
196 	int i, j;
197 
198 	if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
199 		dev_info(&pf->pdev->dev,
200 			 "param err: pile=%p needed=%d id=0x%04x\n",
201 			 pile, needed, id);
202 		return -EINVAL;
203 	}
204 
205 	/* start the linear search with an imperfect hint */
206 	i = pile->search_hint;
207 	while (i < pile->num_entries) {
208 		/* skip already allocated entries */
209 		if (pile->list[i] & I40E_PILE_VALID_BIT) {
210 			i++;
211 			continue;
212 		}
213 
214 		/* do we have enough in this lump? */
215 		for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
216 			if (pile->list[i+j] & I40E_PILE_VALID_BIT)
217 				break;
218 		}
219 
220 		if (j == needed) {
221 			/* there was enough, so assign it to the requestor */
222 			for (j = 0; j < needed; j++)
223 				pile->list[i+j] = id | I40E_PILE_VALID_BIT;
224 			ret = i;
225 			pile->search_hint = i + j;
226 			break;
227 		}
228 
229 		/* not enough, so skip over it and continue looking */
230 		i += j;
231 	}
232 
233 	return ret;
234 }
235 
236 /**
237  * i40e_put_lump - return a lump of generic resource
238  * @pile: the pile of resource to search
239  * @index: the base item index
240  * @id: the owner id of the items assigned
241  *
242  * Returns the count of items in the lump
243  **/
244 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
245 {
246 	int valid_id = (id | I40E_PILE_VALID_BIT);
247 	int count = 0;
248 	int i;
249 
250 	if (!pile || index >= pile->num_entries)
251 		return -EINVAL;
252 
253 	for (i = index;
254 	     i < pile->num_entries && pile->list[i] == valid_id;
255 	     i++) {
256 		pile->list[i] = 0;
257 		count++;
258 	}
259 
260 	if (count && index < pile->search_hint)
261 		pile->search_hint = index;
262 
263 	return count;
264 }
265 
266 /**
267  * i40e_find_vsi_from_id - searches for the vsi with the given id
268  * @pf - the pf structure to search for the vsi
269  * @id - id of the vsi it is searching for
270  **/
271 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
272 {
273 	int i;
274 
275 	for (i = 0; i < pf->num_alloc_vsi; i++)
276 		if (pf->vsi[i] && (pf->vsi[i]->id == id))
277 			return pf->vsi[i];
278 
279 	return NULL;
280 }
281 
282 /**
283  * i40e_service_event_schedule - Schedule the service task to wake up
284  * @pf: board private structure
285  *
286  * If not already scheduled, this puts the task into the work queue
287  **/
288 void i40e_service_event_schedule(struct i40e_pf *pf)
289 {
290 	if (!test_bit(__I40E_DOWN, &pf->state) &&
291 	    !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
292 	    !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
293 		queue_work(i40e_wq, &pf->service_task);
294 }
295 
296 /**
297  * i40e_tx_timeout - Respond to a Tx Hang
298  * @netdev: network interface device structure
299  *
300  * If any port has noticed a Tx timeout, it is likely that the whole
301  * device is munged, not just the one netdev port, so go for the full
302  * reset.
303  **/
304 #ifdef I40E_FCOE
305 void i40e_tx_timeout(struct net_device *netdev)
306 #else
307 static void i40e_tx_timeout(struct net_device *netdev)
308 #endif
309 {
310 	struct i40e_netdev_priv *np = netdev_priv(netdev);
311 	struct i40e_vsi *vsi = np->vsi;
312 	struct i40e_pf *pf = vsi->back;
313 	struct i40e_ring *tx_ring = NULL;
314 	unsigned int i, hung_queue = 0;
315 	u32 head, val;
316 
317 	pf->tx_timeout_count++;
318 
319 	/* find the stopped queue the same way the stack does */
320 	for (i = 0; i < netdev->num_tx_queues; i++) {
321 		struct netdev_queue *q;
322 		unsigned long trans_start;
323 
324 		q = netdev_get_tx_queue(netdev, i);
325 		trans_start = q->trans_start;
326 		if (netif_xmit_stopped(q) &&
327 		    time_after(jiffies,
328 			       (trans_start + netdev->watchdog_timeo))) {
329 			hung_queue = i;
330 			break;
331 		}
332 	}
333 
334 	if (i == netdev->num_tx_queues) {
335 		netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
336 	} else {
337 		/* now that we have an index, find the tx_ring struct */
338 		for (i = 0; i < vsi->num_queue_pairs; i++) {
339 			if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
340 				if (hung_queue ==
341 				    vsi->tx_rings[i]->queue_index) {
342 					tx_ring = vsi->tx_rings[i];
343 					break;
344 				}
345 			}
346 		}
347 	}
348 
349 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
350 		pf->tx_timeout_recovery_level = 1;  /* reset after some time */
351 	else if (time_before(jiffies,
352 		      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
353 		return;   /* don't do any new action before the next timeout */
354 
355 	if (tx_ring) {
356 		head = i40e_get_head(tx_ring);
357 		/* Read interrupt register */
358 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
359 			val = rd32(&pf->hw,
360 			     I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
361 						tx_ring->vsi->base_vector - 1));
362 		else
363 			val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
364 
365 		netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
366 			    vsi->seid, hung_queue, tx_ring->next_to_clean,
367 			    head, tx_ring->next_to_use,
368 			    readl(tx_ring->tail), val);
369 	}
370 
371 	pf->tx_timeout_last_recovery = jiffies;
372 	netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
373 		    pf->tx_timeout_recovery_level, hung_queue);
374 
375 	switch (pf->tx_timeout_recovery_level) {
376 	case 1:
377 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
378 		break;
379 	case 2:
380 		set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
381 		break;
382 	case 3:
383 		set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
384 		break;
385 	default:
386 		netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
387 		break;
388 	}
389 
390 	i40e_service_event_schedule(pf);
391 	pf->tx_timeout_recovery_level++;
392 }
393 
394 /**
395  * i40e_get_vsi_stats_struct - Get System Network Statistics
396  * @vsi: the VSI we care about
397  *
398  * Returns the address of the device statistics structure.
399  * The statistics are actually updated from the service task.
400  **/
401 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
402 {
403 	return &vsi->net_stats;
404 }
405 
406 /**
407  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
408  * @netdev: network interface device structure
409  *
410  * Returns the address of the device statistics structure.
411  * The statistics are actually updated from the service task.
412  **/
413 #ifdef I40E_FCOE
414 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
415 					     struct net_device *netdev,
416 					     struct rtnl_link_stats64 *stats)
417 #else
418 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
419 					     struct net_device *netdev,
420 					     struct rtnl_link_stats64 *stats)
421 #endif
422 {
423 	struct i40e_netdev_priv *np = netdev_priv(netdev);
424 	struct i40e_ring *tx_ring, *rx_ring;
425 	struct i40e_vsi *vsi = np->vsi;
426 	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
427 	int i;
428 
429 	if (test_bit(__I40E_DOWN, &vsi->state))
430 		return stats;
431 
432 	if (!vsi->tx_rings)
433 		return stats;
434 
435 	rcu_read_lock();
436 	for (i = 0; i < vsi->num_queue_pairs; i++) {
437 		u64 bytes, packets;
438 		unsigned int start;
439 
440 		tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
441 		if (!tx_ring)
442 			continue;
443 
444 		do {
445 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
446 			packets = tx_ring->stats.packets;
447 			bytes   = tx_ring->stats.bytes;
448 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
449 
450 		stats->tx_packets += packets;
451 		stats->tx_bytes   += bytes;
452 		rx_ring = &tx_ring[1];
453 
454 		do {
455 			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
456 			packets = rx_ring->stats.packets;
457 			bytes   = rx_ring->stats.bytes;
458 		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
459 
460 		stats->rx_packets += packets;
461 		stats->rx_bytes   += bytes;
462 	}
463 	rcu_read_unlock();
464 
465 	/* following stats updated by i40e_watchdog_subtask() */
466 	stats->multicast	= vsi_stats->multicast;
467 	stats->tx_errors	= vsi_stats->tx_errors;
468 	stats->tx_dropped	= vsi_stats->tx_dropped;
469 	stats->rx_errors	= vsi_stats->rx_errors;
470 	stats->rx_dropped	= vsi_stats->rx_dropped;
471 	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
472 	stats->rx_length_errors	= vsi_stats->rx_length_errors;
473 
474 	return stats;
475 }
476 
477 /**
478  * i40e_vsi_reset_stats - Resets all stats of the given vsi
479  * @vsi: the VSI to have its stats reset
480  **/
481 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
482 {
483 	struct rtnl_link_stats64 *ns;
484 	int i;
485 
486 	if (!vsi)
487 		return;
488 
489 	ns = i40e_get_vsi_stats_struct(vsi);
490 	memset(ns, 0, sizeof(*ns));
491 	memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
492 	memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
493 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
494 	if (vsi->rx_rings && vsi->rx_rings[0]) {
495 		for (i = 0; i < vsi->num_queue_pairs; i++) {
496 			memset(&vsi->rx_rings[i]->stats, 0,
497 			       sizeof(vsi->rx_rings[i]->stats));
498 			memset(&vsi->rx_rings[i]->rx_stats, 0,
499 			       sizeof(vsi->rx_rings[i]->rx_stats));
500 			memset(&vsi->tx_rings[i]->stats, 0,
501 			       sizeof(vsi->tx_rings[i]->stats));
502 			memset(&vsi->tx_rings[i]->tx_stats, 0,
503 			       sizeof(vsi->tx_rings[i]->tx_stats));
504 		}
505 	}
506 	vsi->stat_offsets_loaded = false;
507 }
508 
509 /**
510  * i40e_pf_reset_stats - Reset all of the stats for the given PF
511  * @pf: the PF to be reset
512  **/
513 void i40e_pf_reset_stats(struct i40e_pf *pf)
514 {
515 	int i;
516 
517 	memset(&pf->stats, 0, sizeof(pf->stats));
518 	memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
519 	pf->stat_offsets_loaded = false;
520 
521 	for (i = 0; i < I40E_MAX_VEB; i++) {
522 		if (pf->veb[i]) {
523 			memset(&pf->veb[i]->stats, 0,
524 			       sizeof(pf->veb[i]->stats));
525 			memset(&pf->veb[i]->stats_offsets, 0,
526 			       sizeof(pf->veb[i]->stats_offsets));
527 			pf->veb[i]->stat_offsets_loaded = false;
528 		}
529 	}
530 }
531 
532 /**
533  * i40e_stat_update48 - read and update a 48 bit stat from the chip
534  * @hw: ptr to the hardware info
535  * @hireg: the high 32 bit reg to read
536  * @loreg: the low 32 bit reg to read
537  * @offset_loaded: has the initial offset been loaded yet
538  * @offset: ptr to current offset value
539  * @stat: ptr to the stat
540  *
541  * Since the device stats are not reset at PFReset, they likely will not
542  * be zeroed when the driver starts.  We'll save the first values read
543  * and use them as offsets to be subtracted from the raw values in order
544  * to report stats that count from zero.  In the process, we also manage
545  * the potential roll-over.
546  **/
547 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
548 			       bool offset_loaded, u64 *offset, u64 *stat)
549 {
550 	u64 new_data;
551 
552 	if (hw->device_id == I40E_DEV_ID_QEMU) {
553 		new_data = rd32(hw, loreg);
554 		new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
555 	} else {
556 		new_data = rd64(hw, loreg);
557 	}
558 	if (!offset_loaded)
559 		*offset = new_data;
560 	if (likely(new_data >= *offset))
561 		*stat = new_data - *offset;
562 	else
563 		*stat = (new_data + BIT_ULL(48)) - *offset;
564 	*stat &= 0xFFFFFFFFFFFFULL;
565 }
566 
567 /**
568  * i40e_stat_update32 - read and update a 32 bit stat from the chip
569  * @hw: ptr to the hardware info
570  * @reg: the hw reg to read
571  * @offset_loaded: has the initial offset been loaded yet
572  * @offset: ptr to current offset value
573  * @stat: ptr to the stat
574  **/
575 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
576 			       bool offset_loaded, u64 *offset, u64 *stat)
577 {
578 	u32 new_data;
579 
580 	new_data = rd32(hw, reg);
581 	if (!offset_loaded)
582 		*offset = new_data;
583 	if (likely(new_data >= *offset))
584 		*stat = (u32)(new_data - *offset);
585 	else
586 		*stat = (u32)((new_data + BIT_ULL(32)) - *offset);
587 }
588 
589 /**
590  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
591  * @vsi: the VSI to be updated
592  **/
593 void i40e_update_eth_stats(struct i40e_vsi *vsi)
594 {
595 	int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
596 	struct i40e_pf *pf = vsi->back;
597 	struct i40e_hw *hw = &pf->hw;
598 	struct i40e_eth_stats *oes;
599 	struct i40e_eth_stats *es;     /* device's eth stats */
600 
601 	es = &vsi->eth_stats;
602 	oes = &vsi->eth_stats_offsets;
603 
604 	/* Gather up the stats that the hw collects */
605 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
606 			   vsi->stat_offsets_loaded,
607 			   &oes->tx_errors, &es->tx_errors);
608 	i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
609 			   vsi->stat_offsets_loaded,
610 			   &oes->rx_discards, &es->rx_discards);
611 	i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
612 			   vsi->stat_offsets_loaded,
613 			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
614 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
615 			   vsi->stat_offsets_loaded,
616 			   &oes->tx_errors, &es->tx_errors);
617 
618 	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
619 			   I40E_GLV_GORCL(stat_idx),
620 			   vsi->stat_offsets_loaded,
621 			   &oes->rx_bytes, &es->rx_bytes);
622 	i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
623 			   I40E_GLV_UPRCL(stat_idx),
624 			   vsi->stat_offsets_loaded,
625 			   &oes->rx_unicast, &es->rx_unicast);
626 	i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
627 			   I40E_GLV_MPRCL(stat_idx),
628 			   vsi->stat_offsets_loaded,
629 			   &oes->rx_multicast, &es->rx_multicast);
630 	i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
631 			   I40E_GLV_BPRCL(stat_idx),
632 			   vsi->stat_offsets_loaded,
633 			   &oes->rx_broadcast, &es->rx_broadcast);
634 
635 	i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
636 			   I40E_GLV_GOTCL(stat_idx),
637 			   vsi->stat_offsets_loaded,
638 			   &oes->tx_bytes, &es->tx_bytes);
639 	i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
640 			   I40E_GLV_UPTCL(stat_idx),
641 			   vsi->stat_offsets_loaded,
642 			   &oes->tx_unicast, &es->tx_unicast);
643 	i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
644 			   I40E_GLV_MPTCL(stat_idx),
645 			   vsi->stat_offsets_loaded,
646 			   &oes->tx_multicast, &es->tx_multicast);
647 	i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
648 			   I40E_GLV_BPTCL(stat_idx),
649 			   vsi->stat_offsets_loaded,
650 			   &oes->tx_broadcast, &es->tx_broadcast);
651 	vsi->stat_offsets_loaded = true;
652 }
653 
654 /**
655  * i40e_update_veb_stats - Update Switch component statistics
656  * @veb: the VEB being updated
657  **/
658 static void i40e_update_veb_stats(struct i40e_veb *veb)
659 {
660 	struct i40e_pf *pf = veb->pf;
661 	struct i40e_hw *hw = &pf->hw;
662 	struct i40e_eth_stats *oes;
663 	struct i40e_eth_stats *es;     /* device's eth stats */
664 	struct i40e_veb_tc_stats *veb_oes;
665 	struct i40e_veb_tc_stats *veb_es;
666 	int i, idx = 0;
667 
668 	idx = veb->stats_idx;
669 	es = &veb->stats;
670 	oes = &veb->stats_offsets;
671 	veb_es = &veb->tc_stats;
672 	veb_oes = &veb->tc_stats_offsets;
673 
674 	/* Gather up the stats that the hw collects */
675 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
676 			   veb->stat_offsets_loaded,
677 			   &oes->tx_discards, &es->tx_discards);
678 	if (hw->revision_id > 0)
679 		i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
680 				   veb->stat_offsets_loaded,
681 				   &oes->rx_unknown_protocol,
682 				   &es->rx_unknown_protocol);
683 	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
684 			   veb->stat_offsets_loaded,
685 			   &oes->rx_bytes, &es->rx_bytes);
686 	i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
687 			   veb->stat_offsets_loaded,
688 			   &oes->rx_unicast, &es->rx_unicast);
689 	i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
690 			   veb->stat_offsets_loaded,
691 			   &oes->rx_multicast, &es->rx_multicast);
692 	i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
693 			   veb->stat_offsets_loaded,
694 			   &oes->rx_broadcast, &es->rx_broadcast);
695 
696 	i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
697 			   veb->stat_offsets_loaded,
698 			   &oes->tx_bytes, &es->tx_bytes);
699 	i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
700 			   veb->stat_offsets_loaded,
701 			   &oes->tx_unicast, &es->tx_unicast);
702 	i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
703 			   veb->stat_offsets_loaded,
704 			   &oes->tx_multicast, &es->tx_multicast);
705 	i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
706 			   veb->stat_offsets_loaded,
707 			   &oes->tx_broadcast, &es->tx_broadcast);
708 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
709 		i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
710 				   I40E_GLVEBTC_RPCL(i, idx),
711 				   veb->stat_offsets_loaded,
712 				   &veb_oes->tc_rx_packets[i],
713 				   &veb_es->tc_rx_packets[i]);
714 		i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
715 				   I40E_GLVEBTC_RBCL(i, idx),
716 				   veb->stat_offsets_loaded,
717 				   &veb_oes->tc_rx_bytes[i],
718 				   &veb_es->tc_rx_bytes[i]);
719 		i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
720 				   I40E_GLVEBTC_TPCL(i, idx),
721 				   veb->stat_offsets_loaded,
722 				   &veb_oes->tc_tx_packets[i],
723 				   &veb_es->tc_tx_packets[i]);
724 		i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
725 				   I40E_GLVEBTC_TBCL(i, idx),
726 				   veb->stat_offsets_loaded,
727 				   &veb_oes->tc_tx_bytes[i],
728 				   &veb_es->tc_tx_bytes[i]);
729 	}
730 	veb->stat_offsets_loaded = true;
731 }
732 
733 #ifdef I40E_FCOE
734 /**
735  * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
736  * @vsi: the VSI that is capable of doing FCoE
737  **/
738 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
739 {
740 	struct i40e_pf *pf = vsi->back;
741 	struct i40e_hw *hw = &pf->hw;
742 	struct i40e_fcoe_stats *ofs;
743 	struct i40e_fcoe_stats *fs;     /* device's eth stats */
744 	int idx;
745 
746 	if (vsi->type != I40E_VSI_FCOE)
747 		return;
748 
749 	idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
750 	fs = &vsi->fcoe_stats;
751 	ofs = &vsi->fcoe_stats_offsets;
752 
753 	i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
754 			   vsi->fcoe_stat_offsets_loaded,
755 			   &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
756 	i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
757 			   vsi->fcoe_stat_offsets_loaded,
758 			   &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
759 	i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
760 			   vsi->fcoe_stat_offsets_loaded,
761 			   &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
762 	i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
763 			   vsi->fcoe_stat_offsets_loaded,
764 			   &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
765 	i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
766 			   vsi->fcoe_stat_offsets_loaded,
767 			   &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
768 	i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
769 			   vsi->fcoe_stat_offsets_loaded,
770 			   &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
771 	i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
772 			   vsi->fcoe_stat_offsets_loaded,
773 			   &ofs->fcoe_last_error, &fs->fcoe_last_error);
774 	i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
775 			   vsi->fcoe_stat_offsets_loaded,
776 			   &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
777 
778 	vsi->fcoe_stat_offsets_loaded = true;
779 }
780 
781 #endif
782 /**
783  * i40e_update_vsi_stats - Update the vsi statistics counters.
784  * @vsi: the VSI to be updated
785  *
786  * There are a few instances where we store the same stat in a
787  * couple of different structs.  This is partly because we have
788  * the netdev stats that need to be filled out, which is slightly
789  * different from the "eth_stats" defined by the chip and used in
790  * VF communications.  We sort it out here.
791  **/
792 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
793 {
794 	struct i40e_pf *pf = vsi->back;
795 	struct rtnl_link_stats64 *ons;
796 	struct rtnl_link_stats64 *ns;   /* netdev stats */
797 	struct i40e_eth_stats *oes;
798 	struct i40e_eth_stats *es;     /* device's eth stats */
799 	u32 tx_restart, tx_busy;
800 	u64 tx_lost_interrupt;
801 	struct i40e_ring *p;
802 	u32 rx_page, rx_buf;
803 	u64 bytes, packets;
804 	unsigned int start;
805 	u64 tx_linearize;
806 	u64 tx_force_wb;
807 	u64 rx_p, rx_b;
808 	u64 tx_p, tx_b;
809 	u16 q;
810 
811 	if (test_bit(__I40E_DOWN, &vsi->state) ||
812 	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
813 		return;
814 
815 	ns = i40e_get_vsi_stats_struct(vsi);
816 	ons = &vsi->net_stats_offsets;
817 	es = &vsi->eth_stats;
818 	oes = &vsi->eth_stats_offsets;
819 
820 	/* Gather up the netdev and vsi stats that the driver collects
821 	 * on the fly during packet processing
822 	 */
823 	rx_b = rx_p = 0;
824 	tx_b = tx_p = 0;
825 	tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
826 	tx_lost_interrupt = 0;
827 	rx_page = 0;
828 	rx_buf = 0;
829 	rcu_read_lock();
830 	for (q = 0; q < vsi->num_queue_pairs; q++) {
831 		/* locate Tx ring */
832 		p = ACCESS_ONCE(vsi->tx_rings[q]);
833 
834 		do {
835 			start = u64_stats_fetch_begin_irq(&p->syncp);
836 			packets = p->stats.packets;
837 			bytes = p->stats.bytes;
838 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
839 		tx_b += bytes;
840 		tx_p += packets;
841 		tx_restart += p->tx_stats.restart_queue;
842 		tx_busy += p->tx_stats.tx_busy;
843 		tx_linearize += p->tx_stats.tx_linearize;
844 		tx_force_wb += p->tx_stats.tx_force_wb;
845 		tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
846 
847 		/* Rx queue is part of the same block as Tx queue */
848 		p = &p[1];
849 		do {
850 			start = u64_stats_fetch_begin_irq(&p->syncp);
851 			packets = p->stats.packets;
852 			bytes = p->stats.bytes;
853 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
854 		rx_b += bytes;
855 		rx_p += packets;
856 		rx_buf += p->rx_stats.alloc_buff_failed;
857 		rx_page += p->rx_stats.alloc_page_failed;
858 	}
859 	rcu_read_unlock();
860 	vsi->tx_restart = tx_restart;
861 	vsi->tx_busy = tx_busy;
862 	vsi->tx_linearize = tx_linearize;
863 	vsi->tx_force_wb = tx_force_wb;
864 	vsi->tx_lost_interrupt = tx_lost_interrupt;
865 	vsi->rx_page_failed = rx_page;
866 	vsi->rx_buf_failed = rx_buf;
867 
868 	ns->rx_packets = rx_p;
869 	ns->rx_bytes = rx_b;
870 	ns->tx_packets = tx_p;
871 	ns->tx_bytes = tx_b;
872 
873 	/* update netdev stats from eth stats */
874 	i40e_update_eth_stats(vsi);
875 	ons->tx_errors = oes->tx_errors;
876 	ns->tx_errors = es->tx_errors;
877 	ons->multicast = oes->rx_multicast;
878 	ns->multicast = es->rx_multicast;
879 	ons->rx_dropped = oes->rx_discards;
880 	ns->rx_dropped = es->rx_discards;
881 	ons->tx_dropped = oes->tx_discards;
882 	ns->tx_dropped = es->tx_discards;
883 
884 	/* pull in a couple PF stats if this is the main vsi */
885 	if (vsi == pf->vsi[pf->lan_vsi]) {
886 		ns->rx_crc_errors = pf->stats.crc_errors;
887 		ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
888 		ns->rx_length_errors = pf->stats.rx_length_errors;
889 	}
890 }
891 
892 /**
893  * i40e_update_pf_stats - Update the PF statistics counters.
894  * @pf: the PF to be updated
895  **/
896 static void i40e_update_pf_stats(struct i40e_pf *pf)
897 {
898 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
899 	struct i40e_hw_port_stats *nsd = &pf->stats;
900 	struct i40e_hw *hw = &pf->hw;
901 	u32 val;
902 	int i;
903 
904 	i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
905 			   I40E_GLPRT_GORCL(hw->port),
906 			   pf->stat_offsets_loaded,
907 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
908 	i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
909 			   I40E_GLPRT_GOTCL(hw->port),
910 			   pf->stat_offsets_loaded,
911 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
912 	i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
913 			   pf->stat_offsets_loaded,
914 			   &osd->eth.rx_discards,
915 			   &nsd->eth.rx_discards);
916 	i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
917 			   I40E_GLPRT_UPRCL(hw->port),
918 			   pf->stat_offsets_loaded,
919 			   &osd->eth.rx_unicast,
920 			   &nsd->eth.rx_unicast);
921 	i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
922 			   I40E_GLPRT_MPRCL(hw->port),
923 			   pf->stat_offsets_loaded,
924 			   &osd->eth.rx_multicast,
925 			   &nsd->eth.rx_multicast);
926 	i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
927 			   I40E_GLPRT_BPRCL(hw->port),
928 			   pf->stat_offsets_loaded,
929 			   &osd->eth.rx_broadcast,
930 			   &nsd->eth.rx_broadcast);
931 	i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
932 			   I40E_GLPRT_UPTCL(hw->port),
933 			   pf->stat_offsets_loaded,
934 			   &osd->eth.tx_unicast,
935 			   &nsd->eth.tx_unicast);
936 	i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
937 			   I40E_GLPRT_MPTCL(hw->port),
938 			   pf->stat_offsets_loaded,
939 			   &osd->eth.tx_multicast,
940 			   &nsd->eth.tx_multicast);
941 	i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
942 			   I40E_GLPRT_BPTCL(hw->port),
943 			   pf->stat_offsets_loaded,
944 			   &osd->eth.tx_broadcast,
945 			   &nsd->eth.tx_broadcast);
946 
947 	i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
948 			   pf->stat_offsets_loaded,
949 			   &osd->tx_dropped_link_down,
950 			   &nsd->tx_dropped_link_down);
951 
952 	i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
953 			   pf->stat_offsets_loaded,
954 			   &osd->crc_errors, &nsd->crc_errors);
955 
956 	i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
957 			   pf->stat_offsets_loaded,
958 			   &osd->illegal_bytes, &nsd->illegal_bytes);
959 
960 	i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
961 			   pf->stat_offsets_loaded,
962 			   &osd->mac_local_faults,
963 			   &nsd->mac_local_faults);
964 	i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
965 			   pf->stat_offsets_loaded,
966 			   &osd->mac_remote_faults,
967 			   &nsd->mac_remote_faults);
968 
969 	i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
970 			   pf->stat_offsets_loaded,
971 			   &osd->rx_length_errors,
972 			   &nsd->rx_length_errors);
973 
974 	i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
975 			   pf->stat_offsets_loaded,
976 			   &osd->link_xon_rx, &nsd->link_xon_rx);
977 	i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
978 			   pf->stat_offsets_loaded,
979 			   &osd->link_xon_tx, &nsd->link_xon_tx);
980 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
981 			   pf->stat_offsets_loaded,
982 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
983 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
984 			   pf->stat_offsets_loaded,
985 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
986 
987 	for (i = 0; i < 8; i++) {
988 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
989 				   pf->stat_offsets_loaded,
990 				   &osd->priority_xoff_rx[i],
991 				   &nsd->priority_xoff_rx[i]);
992 		i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
993 				   pf->stat_offsets_loaded,
994 				   &osd->priority_xon_rx[i],
995 				   &nsd->priority_xon_rx[i]);
996 		i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
997 				   pf->stat_offsets_loaded,
998 				   &osd->priority_xon_tx[i],
999 				   &nsd->priority_xon_tx[i]);
1000 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1001 				   pf->stat_offsets_loaded,
1002 				   &osd->priority_xoff_tx[i],
1003 				   &nsd->priority_xoff_tx[i]);
1004 		i40e_stat_update32(hw,
1005 				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1006 				   pf->stat_offsets_loaded,
1007 				   &osd->priority_xon_2_xoff[i],
1008 				   &nsd->priority_xon_2_xoff[i]);
1009 	}
1010 
1011 	i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1012 			   I40E_GLPRT_PRC64L(hw->port),
1013 			   pf->stat_offsets_loaded,
1014 			   &osd->rx_size_64, &nsd->rx_size_64);
1015 	i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1016 			   I40E_GLPRT_PRC127L(hw->port),
1017 			   pf->stat_offsets_loaded,
1018 			   &osd->rx_size_127, &nsd->rx_size_127);
1019 	i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1020 			   I40E_GLPRT_PRC255L(hw->port),
1021 			   pf->stat_offsets_loaded,
1022 			   &osd->rx_size_255, &nsd->rx_size_255);
1023 	i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1024 			   I40E_GLPRT_PRC511L(hw->port),
1025 			   pf->stat_offsets_loaded,
1026 			   &osd->rx_size_511, &nsd->rx_size_511);
1027 	i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1028 			   I40E_GLPRT_PRC1023L(hw->port),
1029 			   pf->stat_offsets_loaded,
1030 			   &osd->rx_size_1023, &nsd->rx_size_1023);
1031 	i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1032 			   I40E_GLPRT_PRC1522L(hw->port),
1033 			   pf->stat_offsets_loaded,
1034 			   &osd->rx_size_1522, &nsd->rx_size_1522);
1035 	i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1036 			   I40E_GLPRT_PRC9522L(hw->port),
1037 			   pf->stat_offsets_loaded,
1038 			   &osd->rx_size_big, &nsd->rx_size_big);
1039 
1040 	i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1041 			   I40E_GLPRT_PTC64L(hw->port),
1042 			   pf->stat_offsets_loaded,
1043 			   &osd->tx_size_64, &nsd->tx_size_64);
1044 	i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1045 			   I40E_GLPRT_PTC127L(hw->port),
1046 			   pf->stat_offsets_loaded,
1047 			   &osd->tx_size_127, &nsd->tx_size_127);
1048 	i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1049 			   I40E_GLPRT_PTC255L(hw->port),
1050 			   pf->stat_offsets_loaded,
1051 			   &osd->tx_size_255, &nsd->tx_size_255);
1052 	i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1053 			   I40E_GLPRT_PTC511L(hw->port),
1054 			   pf->stat_offsets_loaded,
1055 			   &osd->tx_size_511, &nsd->tx_size_511);
1056 	i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1057 			   I40E_GLPRT_PTC1023L(hw->port),
1058 			   pf->stat_offsets_loaded,
1059 			   &osd->tx_size_1023, &nsd->tx_size_1023);
1060 	i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1061 			   I40E_GLPRT_PTC1522L(hw->port),
1062 			   pf->stat_offsets_loaded,
1063 			   &osd->tx_size_1522, &nsd->tx_size_1522);
1064 	i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1065 			   I40E_GLPRT_PTC9522L(hw->port),
1066 			   pf->stat_offsets_loaded,
1067 			   &osd->tx_size_big, &nsd->tx_size_big);
1068 
1069 	i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1070 			   pf->stat_offsets_loaded,
1071 			   &osd->rx_undersize, &nsd->rx_undersize);
1072 	i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1073 			   pf->stat_offsets_loaded,
1074 			   &osd->rx_fragments, &nsd->rx_fragments);
1075 	i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1076 			   pf->stat_offsets_loaded,
1077 			   &osd->rx_oversize, &nsd->rx_oversize);
1078 	i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1079 			   pf->stat_offsets_loaded,
1080 			   &osd->rx_jabber, &nsd->rx_jabber);
1081 
1082 	/* FDIR stats */
1083 	i40e_stat_update32(hw,
1084 			   I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1085 			   pf->stat_offsets_loaded,
1086 			   &osd->fd_atr_match, &nsd->fd_atr_match);
1087 	i40e_stat_update32(hw,
1088 			   I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1089 			   pf->stat_offsets_loaded,
1090 			   &osd->fd_sb_match, &nsd->fd_sb_match);
1091 	i40e_stat_update32(hw,
1092 		      I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1093 		      pf->stat_offsets_loaded,
1094 		      &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1095 
1096 	val = rd32(hw, I40E_PRTPM_EEE_STAT);
1097 	nsd->tx_lpi_status =
1098 		       (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1099 			I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1100 	nsd->rx_lpi_status =
1101 		       (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1102 			I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1103 	i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1104 			   pf->stat_offsets_loaded,
1105 			   &osd->tx_lpi_count, &nsd->tx_lpi_count);
1106 	i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1107 			   pf->stat_offsets_loaded,
1108 			   &osd->rx_lpi_count, &nsd->rx_lpi_count);
1109 
1110 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1111 	    !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1112 		nsd->fd_sb_status = true;
1113 	else
1114 		nsd->fd_sb_status = false;
1115 
1116 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1117 	    !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1118 		nsd->fd_atr_status = true;
1119 	else
1120 		nsd->fd_atr_status = false;
1121 
1122 	pf->stat_offsets_loaded = true;
1123 }
1124 
1125 /**
1126  * i40e_update_stats - Update the various statistics counters.
1127  * @vsi: the VSI to be updated
1128  *
1129  * Update the various stats for this VSI and its related entities.
1130  **/
1131 void i40e_update_stats(struct i40e_vsi *vsi)
1132 {
1133 	struct i40e_pf *pf = vsi->back;
1134 
1135 	if (vsi == pf->vsi[pf->lan_vsi])
1136 		i40e_update_pf_stats(pf);
1137 
1138 	i40e_update_vsi_stats(vsi);
1139 #ifdef I40E_FCOE
1140 	i40e_update_fcoe_stats(vsi);
1141 #endif
1142 }
1143 
1144 /**
1145  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1146  * @vsi: the VSI to be searched
1147  * @macaddr: the MAC address
1148  * @vlan: the vlan
1149  * @is_vf: make sure its a VF filter, else doesn't matter
1150  * @is_netdev: make sure its a netdev filter, else doesn't matter
1151  *
1152  * Returns ptr to the filter object or NULL
1153  **/
1154 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1155 						u8 *macaddr, s16 vlan,
1156 						bool is_vf, bool is_netdev)
1157 {
1158 	struct i40e_mac_filter *f;
1159 
1160 	if (!vsi || !macaddr)
1161 		return NULL;
1162 
1163 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1164 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1165 		    (vlan == f->vlan)    &&
1166 		    (!is_vf || f->is_vf) &&
1167 		    (!is_netdev || f->is_netdev))
1168 			return f;
1169 	}
1170 	return NULL;
1171 }
1172 
1173 /**
1174  * i40e_find_mac - Find a mac addr in the macvlan filters list
1175  * @vsi: the VSI to be searched
1176  * @macaddr: the MAC address we are searching for
1177  * @is_vf: make sure its a VF filter, else doesn't matter
1178  * @is_netdev: make sure its a netdev filter, else doesn't matter
1179  *
1180  * Returns the first filter with the provided MAC address or NULL if
1181  * MAC address was not found
1182  **/
1183 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1184 				      bool is_vf, bool is_netdev)
1185 {
1186 	struct i40e_mac_filter *f;
1187 
1188 	if (!vsi || !macaddr)
1189 		return NULL;
1190 
1191 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1192 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1193 		    (!is_vf || f->is_vf) &&
1194 		    (!is_netdev || f->is_netdev))
1195 			return f;
1196 	}
1197 	return NULL;
1198 }
1199 
1200 /**
1201  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1202  * @vsi: the VSI to be searched
1203  *
1204  * Returns true if VSI is in vlan mode or false otherwise
1205  **/
1206 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1207 {
1208 	struct i40e_mac_filter *f;
1209 
1210 	/* Only -1 for all the filters denotes not in vlan mode
1211 	 * so we have to go through all the list in order to make sure
1212 	 */
1213 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1214 		if (f->vlan >= 0 || vsi->info.pvid)
1215 			return true;
1216 	}
1217 
1218 	return false;
1219 }
1220 
1221 /**
1222  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1223  * @vsi: the VSI to be searched
1224  * @macaddr: the mac address to be filtered
1225  * @is_vf: true if it is a VF
1226  * @is_netdev: true if it is a netdev
1227  *
1228  * Goes through all the macvlan filters and adds a
1229  * macvlan filter for each unique vlan that already exists
1230  *
1231  * Returns first filter found on success, else NULL
1232  **/
1233 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1234 					     bool is_vf, bool is_netdev)
1235 {
1236 	struct i40e_mac_filter *f;
1237 
1238 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1239 		if (vsi->info.pvid)
1240 			f->vlan = le16_to_cpu(vsi->info.pvid);
1241 		if (!i40e_find_filter(vsi, macaddr, f->vlan,
1242 				      is_vf, is_netdev)) {
1243 			if (!i40e_add_filter(vsi, macaddr, f->vlan,
1244 					     is_vf, is_netdev))
1245 				return NULL;
1246 		}
1247 	}
1248 
1249 	return list_first_entry_or_null(&vsi->mac_filter_list,
1250 					struct i40e_mac_filter, list);
1251 }
1252 
1253 /**
1254  * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
1255  * @vsi: the VSI to be searched
1256  * @macaddr: the mac address to be removed
1257  * @is_vf: true if it is a VF
1258  * @is_netdev: true if it is a netdev
1259  *
1260  * Removes a given MAC address from a VSI, regardless of VLAN
1261  *
1262  * Returns 0 for success, or error
1263  **/
1264 int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1265 			  bool is_vf, bool is_netdev)
1266 {
1267 	struct i40e_mac_filter *f = NULL;
1268 	int changed = 0;
1269 
1270 	WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
1271 	     "Missing mac_filter_list_lock\n");
1272 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1273 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1274 		    (is_vf == f->is_vf) &&
1275 		    (is_netdev == f->is_netdev)) {
1276 			f->counter--;
1277 			changed = 1;
1278 			if (f->counter == 0)
1279 				f->state = I40E_FILTER_REMOVE;
1280 		}
1281 	}
1282 	if (changed) {
1283 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1284 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1285 		return 0;
1286 	}
1287 	return -ENOENT;
1288 }
1289 
1290 /**
1291  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1292  * @vsi: the PF Main VSI - inappropriate for any other VSI
1293  * @macaddr: the MAC address
1294  *
1295  * Remove whatever filter the firmware set up so the driver can manage
1296  * its own filtering intelligently.
1297  **/
1298 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1299 {
1300 	struct i40e_aqc_remove_macvlan_element_data element;
1301 	struct i40e_pf *pf = vsi->back;
1302 
1303 	/* Only appropriate for the PF main VSI */
1304 	if (vsi->type != I40E_VSI_MAIN)
1305 		return;
1306 
1307 	memset(&element, 0, sizeof(element));
1308 	ether_addr_copy(element.mac_addr, macaddr);
1309 	element.vlan_tag = 0;
1310 	/* Ignore error returns, some firmware does it this way... */
1311 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1312 	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1313 
1314 	memset(&element, 0, sizeof(element));
1315 	ether_addr_copy(element.mac_addr, macaddr);
1316 	element.vlan_tag = 0;
1317 	/* ...and some firmware does it this way. */
1318 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1319 			I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1320 	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1321 }
1322 
1323 /**
1324  * i40e_add_filter - Add a mac/vlan filter to the VSI
1325  * @vsi: the VSI to be searched
1326  * @macaddr: the MAC address
1327  * @vlan: the vlan
1328  * @is_vf: make sure its a VF filter, else doesn't matter
1329  * @is_netdev: make sure its a netdev filter, else doesn't matter
1330  *
1331  * Returns ptr to the filter object or NULL when no memory available.
1332  *
1333  * NOTE: This function is expected to be called with mac_filter_list_lock
1334  * being held.
1335  **/
1336 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1337 					u8 *macaddr, s16 vlan,
1338 					bool is_vf, bool is_netdev)
1339 {
1340 	struct i40e_mac_filter *f;
1341 	int changed = false;
1342 
1343 	if (!vsi || !macaddr)
1344 		return NULL;
1345 
1346 	/* Do not allow broadcast filter to be added since broadcast filter
1347 	 * is added as part of add VSI for any newly created VSI except
1348 	 * FDIR VSI
1349 	 */
1350 	if (is_broadcast_ether_addr(macaddr))
1351 		return NULL;
1352 
1353 	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1354 	if (!f) {
1355 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
1356 		if (!f)
1357 			goto add_filter_out;
1358 
1359 		ether_addr_copy(f->macaddr, macaddr);
1360 		f->vlan = vlan;
1361 		/* If we're in overflow promisc mode, set the state directly
1362 		 * to failed, so we don't bother to try sending the filter
1363 		 * to the hardware.
1364 		 */
1365 		if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
1366 			f->state = I40E_FILTER_FAILED;
1367 		else
1368 			f->state = I40E_FILTER_NEW;
1369 		changed = true;
1370 		INIT_LIST_HEAD(&f->list);
1371 		list_add_tail(&f->list, &vsi->mac_filter_list);
1372 	}
1373 
1374 	/* increment counter and add a new flag if needed */
1375 	if (is_vf) {
1376 		if (!f->is_vf) {
1377 			f->is_vf = true;
1378 			f->counter++;
1379 		}
1380 	} else if (is_netdev) {
1381 		if (!f->is_netdev) {
1382 			f->is_netdev = true;
1383 			f->counter++;
1384 		}
1385 	} else {
1386 		f->counter++;
1387 	}
1388 
1389 	if (changed) {
1390 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1391 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1392 	}
1393 
1394 add_filter_out:
1395 	return f;
1396 }
1397 
1398 /**
1399  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1400  * @vsi: the VSI to be searched
1401  * @macaddr: the MAC address
1402  * @vlan: the vlan
1403  * @is_vf: make sure it's a VF filter, else doesn't matter
1404  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1405  *
1406  * NOTE: This function is expected to be called with mac_filter_list_lock
1407  * being held.
1408  * ANOTHER NOTE: This function MUST be called from within the context of
1409  * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1410  * instead of list_for_each_entry().
1411  **/
1412 void i40e_del_filter(struct i40e_vsi *vsi,
1413 		     u8 *macaddr, s16 vlan,
1414 		     bool is_vf, bool is_netdev)
1415 {
1416 	struct i40e_mac_filter *f;
1417 
1418 	if (!vsi || !macaddr)
1419 		return;
1420 
1421 	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1422 	if (!f || f->counter == 0)
1423 		return;
1424 
1425 	if (is_vf) {
1426 		if (f->is_vf) {
1427 			f->is_vf = false;
1428 			f->counter--;
1429 		}
1430 	} else if (is_netdev) {
1431 		if (f->is_netdev) {
1432 			f->is_netdev = false;
1433 			f->counter--;
1434 		}
1435 	} else {
1436 		/* make sure we don't remove a filter in use by VF or netdev */
1437 		int min_f = 0;
1438 
1439 		min_f += (f->is_vf ? 1 : 0);
1440 		min_f += (f->is_netdev ? 1 : 0);
1441 
1442 		if (f->counter > min_f)
1443 			f->counter--;
1444 	}
1445 
1446 	/* counter == 0 tells sync_filters_subtask to
1447 	 * remove the filter from the firmware's list
1448 	 */
1449 	if (f->counter == 0) {
1450 		if ((f->state == I40E_FILTER_FAILED) ||
1451 		    (f->state == I40E_FILTER_NEW)) {
1452 			/* this one never got added by the FW. Just remove it,
1453 			 * no need to sync anything.
1454 			 */
1455 			list_del(&f->list);
1456 			kfree(f);
1457 		} else {
1458 			f->state = I40E_FILTER_REMOVE;
1459 			vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1460 			vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1461 		}
1462 	}
1463 }
1464 
1465 /**
1466  * i40e_set_mac - NDO callback to set mac address
1467  * @netdev: network interface device structure
1468  * @p: pointer to an address structure
1469  *
1470  * Returns 0 on success, negative on failure
1471  **/
1472 #ifdef I40E_FCOE
1473 int i40e_set_mac(struct net_device *netdev, void *p)
1474 #else
1475 static int i40e_set_mac(struct net_device *netdev, void *p)
1476 #endif
1477 {
1478 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1479 	struct i40e_vsi *vsi = np->vsi;
1480 	struct i40e_pf *pf = vsi->back;
1481 	struct i40e_hw *hw = &pf->hw;
1482 	struct sockaddr *addr = p;
1483 
1484 	if (!is_valid_ether_addr(addr->sa_data))
1485 		return -EADDRNOTAVAIL;
1486 
1487 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1488 		netdev_info(netdev, "already using mac address %pM\n",
1489 			    addr->sa_data);
1490 		return 0;
1491 	}
1492 
1493 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1494 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1495 		return -EADDRNOTAVAIL;
1496 
1497 	if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1498 		netdev_info(netdev, "returning to hw mac address %pM\n",
1499 			    hw->mac.addr);
1500 	else
1501 		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1502 
1503 	spin_lock_bh(&vsi->mac_filter_list_lock);
1504 	i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true);
1505 	i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true);
1506 	spin_unlock_bh(&vsi->mac_filter_list_lock);
1507 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
1508 	if (vsi->type == I40E_VSI_MAIN) {
1509 		i40e_status ret;
1510 
1511 		ret = i40e_aq_mac_address_write(&vsi->back->hw,
1512 						I40E_AQC_WRITE_TYPE_LAA_WOL,
1513 						addr->sa_data, NULL);
1514 		if (ret)
1515 			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1516 				    i40e_stat_str(hw, ret),
1517 				    i40e_aq_str(hw, hw->aq.asq_last_status));
1518 	}
1519 
1520 	/* schedule our worker thread which will take care of
1521 	 * applying the new filter changes
1522 	 */
1523 	i40e_service_event_schedule(vsi->back);
1524 	return 0;
1525 }
1526 
1527 /**
1528  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1529  * @vsi: the VSI being setup
1530  * @ctxt: VSI context structure
1531  * @enabled_tc: Enabled TCs bitmap
1532  * @is_add: True if called before Add VSI
1533  *
1534  * Setup VSI queue mapping for enabled traffic classes.
1535  **/
1536 #ifdef I40E_FCOE
1537 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1538 			      struct i40e_vsi_context *ctxt,
1539 			      u8 enabled_tc,
1540 			      bool is_add)
1541 #else
1542 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1543 				     struct i40e_vsi_context *ctxt,
1544 				     u8 enabled_tc,
1545 				     bool is_add)
1546 #endif
1547 {
1548 	struct i40e_pf *pf = vsi->back;
1549 	u16 sections = 0;
1550 	u8 netdev_tc = 0;
1551 	u16 numtc = 0;
1552 	u16 qcount;
1553 	u8 offset;
1554 	u16 qmap;
1555 	int i;
1556 	u16 num_tc_qps = 0;
1557 
1558 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1559 	offset = 0;
1560 
1561 	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1562 		/* Find numtc from enabled TC bitmap */
1563 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1564 			if (enabled_tc & BIT(i)) /* TC is enabled */
1565 				numtc++;
1566 		}
1567 		if (!numtc) {
1568 			dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1569 			numtc = 1;
1570 		}
1571 	} else {
1572 		/* At least TC0 is enabled in case of non-DCB case */
1573 		numtc = 1;
1574 	}
1575 
1576 	vsi->tc_config.numtc = numtc;
1577 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1578 	/* Number of queues per enabled TC */
1579 	qcount = vsi->alloc_queue_pairs;
1580 
1581 	num_tc_qps = qcount / numtc;
1582 	num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1583 
1584 	/* Setup queue offset/count for all TCs for given VSI */
1585 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1586 		/* See if the given TC is enabled for the given VSI */
1587 		if (vsi->tc_config.enabled_tc & BIT(i)) {
1588 			/* TC is enabled */
1589 			int pow, num_qps;
1590 
1591 			switch (vsi->type) {
1592 			case I40E_VSI_MAIN:
1593 				qcount = min_t(int, pf->alloc_rss_size,
1594 					       num_tc_qps);
1595 				break;
1596 #ifdef I40E_FCOE
1597 			case I40E_VSI_FCOE:
1598 				qcount = num_tc_qps;
1599 				break;
1600 #endif
1601 			case I40E_VSI_FDIR:
1602 			case I40E_VSI_SRIOV:
1603 			case I40E_VSI_VMDQ2:
1604 			default:
1605 				qcount = num_tc_qps;
1606 				WARN_ON(i != 0);
1607 				break;
1608 			}
1609 			vsi->tc_config.tc_info[i].qoffset = offset;
1610 			vsi->tc_config.tc_info[i].qcount = qcount;
1611 
1612 			/* find the next higher power-of-2 of num queue pairs */
1613 			num_qps = qcount;
1614 			pow = 0;
1615 			while (num_qps && (BIT_ULL(pow) < qcount)) {
1616 				pow++;
1617 				num_qps >>= 1;
1618 			}
1619 
1620 			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1621 			qmap =
1622 			    (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1623 			    (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1624 
1625 			offset += qcount;
1626 		} else {
1627 			/* TC is not enabled so set the offset to
1628 			 * default queue and allocate one queue
1629 			 * for the given TC.
1630 			 */
1631 			vsi->tc_config.tc_info[i].qoffset = 0;
1632 			vsi->tc_config.tc_info[i].qcount = 1;
1633 			vsi->tc_config.tc_info[i].netdev_tc = 0;
1634 
1635 			qmap = 0;
1636 		}
1637 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1638 	}
1639 
1640 	/* Set actual Tx/Rx queue pairs */
1641 	vsi->num_queue_pairs = offset;
1642 	if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1643 		if (vsi->req_queue_pairs > 0)
1644 			vsi->num_queue_pairs = vsi->req_queue_pairs;
1645 		else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1646 			vsi->num_queue_pairs = pf->num_lan_msix;
1647 	}
1648 
1649 	/* Scheduler section valid can only be set for ADD VSI */
1650 	if (is_add) {
1651 		sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1652 
1653 		ctxt->info.up_enable_bits = enabled_tc;
1654 	}
1655 	if (vsi->type == I40E_VSI_SRIOV) {
1656 		ctxt->info.mapping_flags |=
1657 				     cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1658 		for (i = 0; i < vsi->num_queue_pairs; i++)
1659 			ctxt->info.queue_mapping[i] =
1660 					       cpu_to_le16(vsi->base_queue + i);
1661 	} else {
1662 		ctxt->info.mapping_flags |=
1663 					cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1664 		ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1665 	}
1666 	ctxt->info.valid_sections |= cpu_to_le16(sections);
1667 }
1668 
1669 /**
1670  * i40e_set_rx_mode - NDO callback to set the netdev filters
1671  * @netdev: network interface device structure
1672  **/
1673 #ifdef I40E_FCOE
1674 void i40e_set_rx_mode(struct net_device *netdev)
1675 #else
1676 static void i40e_set_rx_mode(struct net_device *netdev)
1677 #endif
1678 {
1679 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1680 	struct i40e_mac_filter *f, *ftmp;
1681 	struct i40e_vsi *vsi = np->vsi;
1682 	struct netdev_hw_addr *uca;
1683 	struct netdev_hw_addr *mca;
1684 	struct netdev_hw_addr *ha;
1685 
1686 	spin_lock_bh(&vsi->mac_filter_list_lock);
1687 
1688 	/* add addr if not already in the filter list */
1689 	netdev_for_each_uc_addr(uca, netdev) {
1690 		if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1691 			if (i40e_is_vsi_in_vlan(vsi))
1692 				i40e_put_mac_in_vlan(vsi, uca->addr,
1693 						     false, true);
1694 			else
1695 				i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1696 						false, true);
1697 		}
1698 	}
1699 
1700 	netdev_for_each_mc_addr(mca, netdev) {
1701 		if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1702 			if (i40e_is_vsi_in_vlan(vsi))
1703 				i40e_put_mac_in_vlan(vsi, mca->addr,
1704 						     false, true);
1705 			else
1706 				i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1707 						false, true);
1708 		}
1709 	}
1710 
1711 	/* remove filter if not in netdev list */
1712 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1713 
1714 		if (!f->is_netdev)
1715 			continue;
1716 
1717 		netdev_for_each_mc_addr(mca, netdev)
1718 			if (ether_addr_equal(mca->addr, f->macaddr))
1719 				goto bottom_of_search_loop;
1720 
1721 		netdev_for_each_uc_addr(uca, netdev)
1722 			if (ether_addr_equal(uca->addr, f->macaddr))
1723 				goto bottom_of_search_loop;
1724 
1725 		for_each_dev_addr(netdev, ha)
1726 			if (ether_addr_equal(ha->addr, f->macaddr))
1727 				goto bottom_of_search_loop;
1728 
1729 		/* f->macaddr wasn't found in uc, mc, or ha list so delete it */
1730 		i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1731 
1732 bottom_of_search_loop:
1733 		continue;
1734 	}
1735 	spin_unlock_bh(&vsi->mac_filter_list_lock);
1736 
1737 	/* check for other flag changes */
1738 	if (vsi->current_netdev_flags != vsi->netdev->flags) {
1739 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1740 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1741 	}
1742 
1743 	/* schedule our worker thread which will take care of
1744 	 * applying the new filter changes
1745 	 */
1746 	i40e_service_event_schedule(vsi->back);
1747 }
1748 
1749 /**
1750  * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1751  * @vsi: pointer to vsi struct
1752  * @from: Pointer to list which contains MAC filter entries - changes to
1753  *        those entries needs to be undone.
1754  *
1755  * MAC filter entries from list were slated to be removed from device.
1756  **/
1757 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1758 					 struct list_head *from)
1759 {
1760 	struct i40e_mac_filter *f, *ftmp;
1761 
1762 	list_for_each_entry_safe(f, ftmp, from, list) {
1763 		/* Move the element back into MAC filter list*/
1764 		list_move_tail(&f->list, &vsi->mac_filter_list);
1765 	}
1766 }
1767 
1768 /**
1769  * i40e_update_filter_state - Update filter state based on return data
1770  * from firmware
1771  * @count: Number of filters added
1772  * @add_list: return data from fw
1773  * @head: pointer to first filter in current batch
1774  * @aq_err: status from fw
1775  *
1776  * MAC filter entries from list were slated to be added to device. Returns
1777  * number of successful filters. Note that 0 does NOT mean success!
1778  **/
1779 static int
1780 i40e_update_filter_state(int count,
1781 			 struct i40e_aqc_add_macvlan_element_data *add_list,
1782 			 struct i40e_mac_filter *add_head, int aq_err)
1783 {
1784 	int retval = 0;
1785 	int i;
1786 
1787 
1788 	if (!aq_err) {
1789 		retval = count;
1790 		/* Everything's good, mark all filters active. */
1791 		for (i = 0; i < count ; i++) {
1792 			add_head->state = I40E_FILTER_ACTIVE;
1793 			add_head = list_next_entry(add_head, list);
1794 		}
1795 	} else if (aq_err == I40E_AQ_RC_ENOSPC) {
1796 		/* Device ran out of filter space. Check the return value
1797 		 * for each filter to see which ones are active.
1798 		 */
1799 		for (i = 0; i < count ; i++) {
1800 			if (add_list[i].match_method ==
1801 			    I40E_AQC_MM_ERR_NO_RES) {
1802 				add_head->state = I40E_FILTER_FAILED;
1803 			} else {
1804 				add_head->state = I40E_FILTER_ACTIVE;
1805 				retval++;
1806 			}
1807 			add_head = list_next_entry(add_head, list);
1808 		}
1809 	} else {
1810 		/* Some other horrible thing happened, fail all filters */
1811 		retval = 0;
1812 		for (i = 0; i < count ; i++) {
1813 			add_head->state = I40E_FILTER_FAILED;
1814 			add_head = list_next_entry(add_head, list);
1815 		}
1816 	}
1817 	return retval;
1818 }
1819 
1820 /**
1821  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1822  * @vsi: ptr to the VSI
1823  *
1824  * Push any outstanding VSI filter changes through the AdminQ.
1825  *
1826  * Returns 0 or error value
1827  **/
1828 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1829 {
1830 	struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
1831 	struct list_head tmp_add_list, tmp_del_list;
1832 	struct i40e_hw *hw = &vsi->back->hw;
1833 	bool promisc_changed = false;
1834 	char vsi_name[16] = "PF";
1835 	int filter_list_len = 0;
1836 	u32 changed_flags = 0;
1837 	i40e_status aq_ret = 0;
1838 	int retval = 0;
1839 	struct i40e_pf *pf;
1840 	int num_add = 0;
1841 	int num_del = 0;
1842 	int aq_err = 0;
1843 	u16 cmd_flags;
1844 	int list_size;
1845 	int fcnt;
1846 
1847 	/* empty array typed pointers, kcalloc later */
1848 	struct i40e_aqc_add_macvlan_element_data *add_list;
1849 	struct i40e_aqc_remove_macvlan_element_data *del_list;
1850 
1851 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1852 		usleep_range(1000, 2000);
1853 	pf = vsi->back;
1854 
1855 	if (vsi->netdev) {
1856 		changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1857 		vsi->current_netdev_flags = vsi->netdev->flags;
1858 	}
1859 
1860 	INIT_LIST_HEAD(&tmp_add_list);
1861 	INIT_LIST_HEAD(&tmp_del_list);
1862 
1863 	if (vsi->type == I40E_VSI_SRIOV)
1864 		snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
1865 	else if (vsi->type != I40E_VSI_MAIN)
1866 		snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
1867 
1868 	if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1869 		vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1870 
1871 		spin_lock_bh(&vsi->mac_filter_list_lock);
1872 		/* Create a list of filters to delete. */
1873 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1874 			if (f->state == I40E_FILTER_REMOVE) {
1875 				WARN_ON(f->counter != 0);
1876 				/* Move the element into temporary del_list */
1877 				list_move_tail(&f->list, &tmp_del_list);
1878 				vsi->active_filters--;
1879 			}
1880 			if (f->state == I40E_FILTER_NEW) {
1881 				WARN_ON(f->counter == 0);
1882 				/* Move the element into temporary add_list */
1883 				list_move_tail(&f->list, &tmp_add_list);
1884 			}
1885 		}
1886 		spin_unlock_bh(&vsi->mac_filter_list_lock);
1887 	}
1888 
1889 	/* Now process 'del_list' outside the lock */
1890 	if (!list_empty(&tmp_del_list)) {
1891 		filter_list_len = hw->aq.asq_buf_size /
1892 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
1893 		list_size = filter_list_len *
1894 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
1895 		del_list = kzalloc(list_size, GFP_ATOMIC);
1896 		if (!del_list) {
1897 			/* Undo VSI's MAC filter entry element updates */
1898 			spin_lock_bh(&vsi->mac_filter_list_lock);
1899 			i40e_undo_del_filter_entries(vsi, &tmp_del_list);
1900 			spin_unlock_bh(&vsi->mac_filter_list_lock);
1901 			retval = -ENOMEM;
1902 			goto out;
1903 		}
1904 
1905 		list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
1906 			cmd_flags = 0;
1907 
1908 			/* add to delete list */
1909 			ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1910 			if (f->vlan == I40E_VLAN_ANY) {
1911 				del_list[num_del].vlan_tag = 0;
1912 				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1913 			} else {
1914 				del_list[num_del].vlan_tag =
1915 					cpu_to_le16((u16)(f->vlan));
1916 			}
1917 
1918 			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1919 			del_list[num_del].flags = cmd_flags;
1920 			num_del++;
1921 
1922 			/* flush a full buffer */
1923 			if (num_del == filter_list_len) {
1924 				aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,
1925 								del_list,
1926 								num_del, NULL);
1927 				aq_err = hw->aq.asq_last_status;
1928 				num_del = 0;
1929 				memset(del_list, 0, list_size);
1930 
1931 				/* Explicitly ignore and do not report when
1932 				 * firmware returns ENOENT.
1933 				 */
1934 				if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1935 					retval = -EIO;
1936 					dev_info(&pf->pdev->dev,
1937 						 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1938 						 vsi_name,
1939 						 i40e_stat_str(hw, aq_ret),
1940 						 i40e_aq_str(hw, aq_err));
1941 				}
1942 			}
1943 			/* Release memory for MAC filter entries which were
1944 			 * synced up with HW.
1945 			 */
1946 			list_del(&f->list);
1947 			kfree(f);
1948 		}
1949 
1950 		if (num_del) {
1951 			aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
1952 							num_del, NULL);
1953 			aq_err = hw->aq.asq_last_status;
1954 			num_del = 0;
1955 
1956 			/* Explicitly ignore and do not report when firmware
1957 			 * returns ENOENT.
1958 			 */
1959 			if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1960 				retval = -EIO;
1961 				dev_info(&pf->pdev->dev,
1962 					 "ignoring delete macvlan error on %s, err %s aq_err %s\n",
1963 					 vsi_name,
1964 					 i40e_stat_str(hw, aq_ret),
1965 					 i40e_aq_str(hw, aq_err));
1966 			}
1967 		}
1968 
1969 		kfree(del_list);
1970 		del_list = NULL;
1971 	}
1972 
1973 	if (!list_empty(&tmp_add_list)) {
1974 		/* Do all the adds now. */
1975 		filter_list_len = hw->aq.asq_buf_size /
1976 			       sizeof(struct i40e_aqc_add_macvlan_element_data);
1977 		list_size = filter_list_len *
1978 			       sizeof(struct i40e_aqc_add_macvlan_element_data);
1979 		add_list = kzalloc(list_size, GFP_ATOMIC);
1980 		if (!add_list) {
1981 			retval = -ENOMEM;
1982 			goto out;
1983 		}
1984 		num_add = 0;
1985 		list_for_each_entry(f, &tmp_add_list, list) {
1986 			if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1987 				     &vsi->state)) {
1988 				f->state = I40E_FILTER_FAILED;
1989 				continue;
1990 			}
1991 			/* add to add array */
1992 			if (num_add == 0)
1993 				add_head = f;
1994 			cmd_flags = 0;
1995 			ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1996 			if (f->vlan == I40E_VLAN_ANY) {
1997 				add_list[num_add].vlan_tag = 0;
1998 				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1999 			} else {
2000 				add_list[num_add].vlan_tag =
2001 					cpu_to_le16((u16)(f->vlan));
2002 			}
2003 			add_list[num_add].queue_number = 0;
2004 			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2005 			add_list[num_add].flags = cpu_to_le16(cmd_flags);
2006 			num_add++;
2007 
2008 			/* flush a full buffer */
2009 			if (num_add == filter_list_len) {
2010 				aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
2011 							     add_list, num_add,
2012 							     NULL);
2013 				aq_err = hw->aq.asq_last_status;
2014 				fcnt = i40e_update_filter_state(num_add,
2015 								add_list,
2016 								add_head,
2017 								aq_ret);
2018 				vsi->active_filters += fcnt;
2019 
2020 				if (fcnt != num_add) {
2021 					promisc_changed = true;
2022 					set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2023 						&vsi->state);
2024 					vsi->promisc_threshold =
2025 						(vsi->active_filters * 3) / 4;
2026 					dev_warn(&pf->pdev->dev,
2027 						 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2028 						 i40e_aq_str(hw, aq_err),
2029 						 vsi_name);
2030 				}
2031 				memset(add_list, 0, list_size);
2032 				num_add = 0;
2033 			}
2034 		}
2035 		if (num_add) {
2036 			aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
2037 						     add_list, num_add, NULL);
2038 			aq_err = hw->aq.asq_last_status;
2039 			fcnt = i40e_update_filter_state(num_add, add_list,
2040 							add_head, aq_ret);
2041 			vsi->active_filters += fcnt;
2042 			if (fcnt != num_add) {
2043 				promisc_changed = true;
2044 				set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2045 					&vsi->state);
2046 				vsi->promisc_threshold =
2047 						(vsi->active_filters * 3) / 4;
2048 				dev_warn(&pf->pdev->dev,
2049 					 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2050 					 i40e_aq_str(hw, aq_err), vsi_name);
2051 			}
2052 		}
2053 		/* Now move all of the filters from the temp add list back to
2054 		 * the VSI's list.
2055 		 */
2056 		spin_lock_bh(&vsi->mac_filter_list_lock);
2057 		list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
2058 			list_move_tail(&f->list, &vsi->mac_filter_list);
2059 		}
2060 		spin_unlock_bh(&vsi->mac_filter_list_lock);
2061 		kfree(add_list);
2062 		add_list = NULL;
2063 	}
2064 
2065 	/* Check to see if we can drop out of overflow promiscuous mode. */
2066 	if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
2067 	    (vsi->active_filters < vsi->promisc_threshold)) {
2068 		int failed_count = 0;
2069 		/* See if we have any failed filters. We can't drop out of
2070 		 * promiscuous until these have all been deleted.
2071 		 */
2072 		spin_lock_bh(&vsi->mac_filter_list_lock);
2073 		list_for_each_entry(f, &vsi->mac_filter_list, list) {
2074 			if (f->state == I40E_FILTER_FAILED)
2075 				failed_count++;
2076 		}
2077 		spin_unlock_bh(&vsi->mac_filter_list_lock);
2078 		if (!failed_count) {
2079 			dev_info(&pf->pdev->dev,
2080 				 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2081 				 vsi_name);
2082 			clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2083 			promisc_changed = true;
2084 			vsi->promisc_threshold = 0;
2085 		}
2086 	}
2087 
2088 	/* if the VF is not trusted do not do promisc */
2089 	if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2090 		clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2091 		goto out;
2092 	}
2093 
2094 	/* check for changes in promiscuous modes */
2095 	if (changed_flags & IFF_ALLMULTI) {
2096 		bool cur_multipromisc;
2097 
2098 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2099 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2100 							       vsi->seid,
2101 							       cur_multipromisc,
2102 							       NULL);
2103 		if (aq_ret) {
2104 			retval = i40e_aq_rc_to_posix(aq_ret,
2105 						     hw->aq.asq_last_status);
2106 			dev_info(&pf->pdev->dev,
2107 				 "set multi promisc failed on %s, err %s aq_err %s\n",
2108 				 vsi_name,
2109 				 i40e_stat_str(hw, aq_ret),
2110 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2111 		}
2112 	}
2113 	if ((changed_flags & IFF_PROMISC) ||
2114 	    (promisc_changed &&
2115 	     test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
2116 		bool cur_promisc;
2117 
2118 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2119 			       test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2120 					&vsi->state));
2121 		if ((vsi->type == I40E_VSI_MAIN) &&
2122 		    (pf->lan_veb != I40E_NO_VEB) &&
2123 		    !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2124 			/* set defport ON for Main VSI instead of true promisc
2125 			 * this way we will get all unicast/multicast and VLAN
2126 			 * promisc behavior but will not get VF or VMDq traffic
2127 			 * replicated on the Main VSI.
2128 			 */
2129 			if (pf->cur_promisc != cur_promisc) {
2130 				pf->cur_promisc = cur_promisc;
2131 				if (cur_promisc)
2132 					aq_ret =
2133 					      i40e_aq_set_default_vsi(hw,
2134 								      vsi->seid,
2135 								      NULL);
2136 				else
2137 					aq_ret =
2138 					    i40e_aq_clear_default_vsi(hw,
2139 								      vsi->seid,
2140 								      NULL);
2141 				if (aq_ret) {
2142 					retval = i40e_aq_rc_to_posix(aq_ret,
2143 							hw->aq.asq_last_status);
2144 					dev_info(&pf->pdev->dev,
2145 						 "Set default VSI failed on %s, err %s, aq_err %s\n",
2146 						 vsi_name,
2147 						 i40e_stat_str(hw, aq_ret),
2148 						 i40e_aq_str(hw,
2149 						     hw->aq.asq_last_status));
2150 				}
2151 			}
2152 		} else {
2153 			aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2154 							  hw,
2155 							  vsi->seid,
2156 							  cur_promisc, NULL,
2157 							  true);
2158 			if (aq_ret) {
2159 				retval =
2160 				i40e_aq_rc_to_posix(aq_ret,
2161 						    hw->aq.asq_last_status);
2162 				dev_info(&pf->pdev->dev,
2163 					 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2164 					 vsi_name,
2165 					 i40e_stat_str(hw, aq_ret),
2166 					 i40e_aq_str(hw,
2167 						     hw->aq.asq_last_status));
2168 			}
2169 			aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2170 							  hw,
2171 							  vsi->seid,
2172 							  cur_promisc, NULL);
2173 			if (aq_ret) {
2174 				retval =
2175 				i40e_aq_rc_to_posix(aq_ret,
2176 						    hw->aq.asq_last_status);
2177 				dev_info(&pf->pdev->dev,
2178 					 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2179 					 vsi_name,
2180 					 i40e_stat_str(hw, aq_ret),
2181 					 i40e_aq_str(hw,
2182 						     hw->aq.asq_last_status));
2183 			}
2184 		}
2185 		aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2186 						   vsi->seid,
2187 						   cur_promisc, NULL);
2188 		if (aq_ret) {
2189 			retval = i40e_aq_rc_to_posix(aq_ret,
2190 						     pf->hw.aq.asq_last_status);
2191 			dev_info(&pf->pdev->dev,
2192 				 "set brdcast promisc failed, err %s, aq_err %s\n",
2193 					 i40e_stat_str(hw, aq_ret),
2194 					 i40e_aq_str(hw,
2195 						     hw->aq.asq_last_status));
2196 		}
2197 	}
2198 out:
2199 	/* if something went wrong then set the changed flag so we try again */
2200 	if (retval)
2201 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2202 
2203 	clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2204 	return retval;
2205 }
2206 
2207 /**
2208  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2209  * @pf: board private structure
2210  **/
2211 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2212 {
2213 	int v;
2214 
2215 	if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2216 		return;
2217 	pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2218 
2219 	for (v = 0; v < pf->num_alloc_vsi; v++) {
2220 		if (pf->vsi[v] &&
2221 		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2222 			int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2223 
2224 			if (ret) {
2225 				/* come back and try again later */
2226 				pf->flags |= I40E_FLAG_FILTER_SYNC;
2227 				break;
2228 			}
2229 		}
2230 	}
2231 }
2232 
2233 /**
2234  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2235  * @netdev: network interface device structure
2236  * @new_mtu: new value for maximum frame size
2237  *
2238  * Returns 0 on success, negative on failure
2239  **/
2240 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2241 {
2242 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2243 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2244 	struct i40e_vsi *vsi = np->vsi;
2245 
2246 	/* MTU < 68 is an error and causes problems on some kernels */
2247 	if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
2248 		return -EINVAL;
2249 
2250 	netdev_info(netdev, "changing MTU from %d to %d\n",
2251 		    netdev->mtu, new_mtu);
2252 	netdev->mtu = new_mtu;
2253 	if (netif_running(netdev))
2254 		i40e_vsi_reinit_locked(vsi);
2255 	i40e_notify_client_of_l2_param_changes(vsi);
2256 	return 0;
2257 }
2258 
2259 /**
2260  * i40e_ioctl - Access the hwtstamp interface
2261  * @netdev: network interface device structure
2262  * @ifr: interface request data
2263  * @cmd: ioctl command
2264  **/
2265 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2266 {
2267 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2268 	struct i40e_pf *pf = np->vsi->back;
2269 
2270 	switch (cmd) {
2271 	case SIOCGHWTSTAMP:
2272 		return i40e_ptp_get_ts_config(pf, ifr);
2273 	case SIOCSHWTSTAMP:
2274 		return i40e_ptp_set_ts_config(pf, ifr);
2275 	default:
2276 		return -EOPNOTSUPP;
2277 	}
2278 }
2279 
2280 /**
2281  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2282  * @vsi: the vsi being adjusted
2283  **/
2284 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2285 {
2286 	struct i40e_vsi_context ctxt;
2287 	i40e_status ret;
2288 
2289 	if ((vsi->info.valid_sections &
2290 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2291 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2292 		return;  /* already enabled */
2293 
2294 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2295 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2296 				    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2297 
2298 	ctxt.seid = vsi->seid;
2299 	ctxt.info = vsi->info;
2300 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2301 	if (ret) {
2302 		dev_info(&vsi->back->pdev->dev,
2303 			 "update vlan stripping failed, err %s aq_err %s\n",
2304 			 i40e_stat_str(&vsi->back->hw, ret),
2305 			 i40e_aq_str(&vsi->back->hw,
2306 				     vsi->back->hw.aq.asq_last_status));
2307 	}
2308 }
2309 
2310 /**
2311  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2312  * @vsi: the vsi being adjusted
2313  **/
2314 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2315 {
2316 	struct i40e_vsi_context ctxt;
2317 	i40e_status ret;
2318 
2319 	if ((vsi->info.valid_sections &
2320 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2321 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2322 	     I40E_AQ_VSI_PVLAN_EMOD_MASK))
2323 		return;  /* already disabled */
2324 
2325 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2326 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2327 				    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2328 
2329 	ctxt.seid = vsi->seid;
2330 	ctxt.info = vsi->info;
2331 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2332 	if (ret) {
2333 		dev_info(&vsi->back->pdev->dev,
2334 			 "update vlan stripping failed, err %s aq_err %s\n",
2335 			 i40e_stat_str(&vsi->back->hw, ret),
2336 			 i40e_aq_str(&vsi->back->hw,
2337 				     vsi->back->hw.aq.asq_last_status));
2338 	}
2339 }
2340 
2341 /**
2342  * i40e_vlan_rx_register - Setup or shutdown vlan offload
2343  * @netdev: network interface to be adjusted
2344  * @features: netdev features to test if VLAN offload is enabled or not
2345  **/
2346 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2347 {
2348 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2349 	struct i40e_vsi *vsi = np->vsi;
2350 
2351 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2352 		i40e_vlan_stripping_enable(vsi);
2353 	else
2354 		i40e_vlan_stripping_disable(vsi);
2355 }
2356 
2357 /**
2358  * i40e_vsi_add_vlan - Add vsi membership for given vlan
2359  * @vsi: the vsi being configured
2360  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2361  **/
2362 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2363 {
2364 	struct i40e_mac_filter *f, *ftmp, *add_f;
2365 	bool is_netdev, is_vf;
2366 
2367 	is_vf = (vsi->type == I40E_VSI_SRIOV);
2368 	is_netdev = !!(vsi->netdev);
2369 
2370 	/* Locked once because all functions invoked below iterates list*/
2371 	spin_lock_bh(&vsi->mac_filter_list_lock);
2372 
2373 	if (is_netdev) {
2374 		add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2375 					is_vf, is_netdev);
2376 		if (!add_f) {
2377 			dev_info(&vsi->back->pdev->dev,
2378 				 "Could not add vlan filter %d for %pM\n",
2379 				 vid, vsi->netdev->dev_addr);
2380 			spin_unlock_bh(&vsi->mac_filter_list_lock);
2381 			return -ENOMEM;
2382 		}
2383 	}
2384 
2385 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2386 		add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2387 		if (!add_f) {
2388 			dev_info(&vsi->back->pdev->dev,
2389 				 "Could not add vlan filter %d for %pM\n",
2390 				 vid, f->macaddr);
2391 			spin_unlock_bh(&vsi->mac_filter_list_lock);
2392 			return -ENOMEM;
2393 		}
2394 	}
2395 
2396 	/* Now if we add a vlan tag, make sure to check if it is the first
2397 	 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2398 	 * with 0, so we now accept untagged and specified tagged traffic
2399 	 * (and not all tags along with untagged)
2400 	 */
2401 	if (vid > 0) {
2402 		if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2403 						  I40E_VLAN_ANY,
2404 						  is_vf, is_netdev)) {
2405 			i40e_del_filter(vsi, vsi->netdev->dev_addr,
2406 					I40E_VLAN_ANY, is_vf, is_netdev);
2407 			add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2408 						is_vf, is_netdev);
2409 			if (!add_f) {
2410 				dev_info(&vsi->back->pdev->dev,
2411 					 "Could not add filter 0 for %pM\n",
2412 					 vsi->netdev->dev_addr);
2413 				spin_unlock_bh(&vsi->mac_filter_list_lock);
2414 				return -ENOMEM;
2415 			}
2416 		}
2417 	}
2418 
2419 	/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2420 	if (vid > 0 && !vsi->info.pvid) {
2421 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2422 			if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2423 					      is_vf, is_netdev))
2424 				continue;
2425 			i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2426 					is_vf, is_netdev);
2427 			add_f = i40e_add_filter(vsi, f->macaddr,
2428 						0, is_vf, is_netdev);
2429 			if (!add_f) {
2430 				dev_info(&vsi->back->pdev->dev,
2431 					 "Could not add filter 0 for %pM\n",
2432 					f->macaddr);
2433 				spin_unlock_bh(&vsi->mac_filter_list_lock);
2434 				return -ENOMEM;
2435 			}
2436 		}
2437 	}
2438 
2439 	spin_unlock_bh(&vsi->mac_filter_list_lock);
2440 
2441 	/* schedule our worker thread which will take care of
2442 	 * applying the new filter changes
2443 	 */
2444 	i40e_service_event_schedule(vsi->back);
2445 	return 0;
2446 }
2447 
2448 /**
2449  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2450  * @vsi: the vsi being configured
2451  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2452  *
2453  * Return: 0 on success or negative otherwise
2454  **/
2455 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2456 {
2457 	struct net_device *netdev = vsi->netdev;
2458 	struct i40e_mac_filter *f, *ftmp, *add_f;
2459 	bool is_vf, is_netdev;
2460 	int filter_count = 0;
2461 
2462 	is_vf = (vsi->type == I40E_VSI_SRIOV);
2463 	is_netdev = !!(netdev);
2464 
2465 	/* Locked once because all functions invoked below iterates list */
2466 	spin_lock_bh(&vsi->mac_filter_list_lock);
2467 
2468 	if (is_netdev)
2469 		i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2470 
2471 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
2472 		i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2473 
2474 	/* go through all the filters for this VSI and if there is only
2475 	 * vid == 0 it means there are no other filters, so vid 0 must
2476 	 * be replaced with -1. This signifies that we should from now
2477 	 * on accept any traffic (with any tag present, or untagged)
2478 	 */
2479 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
2480 		if (is_netdev) {
2481 			if (f->vlan &&
2482 			    ether_addr_equal(netdev->dev_addr, f->macaddr))
2483 				filter_count++;
2484 		}
2485 
2486 		if (f->vlan)
2487 			filter_count++;
2488 	}
2489 
2490 	if (!filter_count && is_netdev) {
2491 		i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2492 		f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2493 				    is_vf, is_netdev);
2494 		if (!f) {
2495 			dev_info(&vsi->back->pdev->dev,
2496 				 "Could not add filter %d for %pM\n",
2497 				 I40E_VLAN_ANY, netdev->dev_addr);
2498 			spin_unlock_bh(&vsi->mac_filter_list_lock);
2499 			return -ENOMEM;
2500 		}
2501 	}
2502 
2503 	if (!filter_count) {
2504 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2505 			i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2506 			add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2507 						is_vf, is_netdev);
2508 			if (!add_f) {
2509 				dev_info(&vsi->back->pdev->dev,
2510 					 "Could not add filter %d for %pM\n",
2511 					 I40E_VLAN_ANY, f->macaddr);
2512 				spin_unlock_bh(&vsi->mac_filter_list_lock);
2513 				return -ENOMEM;
2514 			}
2515 		}
2516 	}
2517 
2518 	spin_unlock_bh(&vsi->mac_filter_list_lock);
2519 
2520 	/* schedule our worker thread which will take care of
2521 	 * applying the new filter changes
2522 	 */
2523 	i40e_service_event_schedule(vsi->back);
2524 	return 0;
2525 }
2526 
2527 /**
2528  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2529  * @netdev: network interface to be adjusted
2530  * @vid: vlan id to be added
2531  *
2532  * net_device_ops implementation for adding vlan ids
2533  **/
2534 #ifdef I40E_FCOE
2535 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2536 			 __always_unused __be16 proto, u16 vid)
2537 #else
2538 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2539 				__always_unused __be16 proto, u16 vid)
2540 #endif
2541 {
2542 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2543 	struct i40e_vsi *vsi = np->vsi;
2544 	int ret = 0;
2545 
2546 	if (vid > 4095)
2547 		return -EINVAL;
2548 
2549 	/* If the network stack called us with vid = 0 then
2550 	 * it is asking to receive priority tagged packets with
2551 	 * vlan id 0.  Our HW receives them by default when configured
2552 	 * to receive untagged packets so there is no need to add an
2553 	 * extra filter for vlan 0 tagged packets.
2554 	 */
2555 	if (vid)
2556 		ret = i40e_vsi_add_vlan(vsi, vid);
2557 
2558 	if (!ret && (vid < VLAN_N_VID))
2559 		set_bit(vid, vsi->active_vlans);
2560 
2561 	return ret;
2562 }
2563 
2564 /**
2565  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2566  * @netdev: network interface to be adjusted
2567  * @vid: vlan id to be removed
2568  *
2569  * net_device_ops implementation for removing vlan ids
2570  **/
2571 #ifdef I40E_FCOE
2572 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2573 			  __always_unused __be16 proto, u16 vid)
2574 #else
2575 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2576 				 __always_unused __be16 proto, u16 vid)
2577 #endif
2578 {
2579 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2580 	struct i40e_vsi *vsi = np->vsi;
2581 
2582 	/* return code is ignored as there is nothing a user
2583 	 * can do about failure to remove and a log message was
2584 	 * already printed from the other function
2585 	 */
2586 	i40e_vsi_kill_vlan(vsi, vid);
2587 
2588 	clear_bit(vid, vsi->active_vlans);
2589 
2590 	return 0;
2591 }
2592 
2593 /**
2594  * i40e_macaddr_init - explicitly write the mac address filters
2595  *
2596  * @vsi: pointer to the vsi
2597  * @macaddr: the MAC address
2598  *
2599  * This is needed when the macaddr has been obtained by other
2600  * means than the default, e.g., from Open Firmware or IDPROM.
2601  * Returns 0 on success, negative on failure
2602  **/
2603 static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
2604 {
2605 	int ret;
2606 	struct i40e_aqc_add_macvlan_element_data element;
2607 
2608 	ret = i40e_aq_mac_address_write(&vsi->back->hw,
2609 					I40E_AQC_WRITE_TYPE_LAA_WOL,
2610 					macaddr, NULL);
2611 	if (ret) {
2612 		dev_info(&vsi->back->pdev->dev,
2613 			 "Addr change for VSI failed: %d\n", ret);
2614 		return -EADDRNOTAVAIL;
2615 	}
2616 
2617 	memset(&element, 0, sizeof(element));
2618 	ether_addr_copy(element.mac_addr, macaddr);
2619 	element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
2620 	ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
2621 	if (ret) {
2622 		dev_info(&vsi->back->pdev->dev,
2623 			 "add filter failed err %s aq_err %s\n",
2624 			 i40e_stat_str(&vsi->back->hw, ret),
2625 			 i40e_aq_str(&vsi->back->hw,
2626 				     vsi->back->hw.aq.asq_last_status));
2627 	}
2628 	return ret;
2629 }
2630 
2631 /**
2632  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2633  * @vsi: the vsi being brought back up
2634  **/
2635 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2636 {
2637 	u16 vid;
2638 
2639 	if (!vsi->netdev)
2640 		return;
2641 
2642 	i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2643 
2644 	for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2645 		i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2646 				     vid);
2647 }
2648 
2649 /**
2650  * i40e_vsi_add_pvid - Add pvid for the VSI
2651  * @vsi: the vsi being adjusted
2652  * @vid: the vlan id to set as a PVID
2653  **/
2654 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2655 {
2656 	struct i40e_vsi_context ctxt;
2657 	i40e_status ret;
2658 
2659 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2660 	vsi->info.pvid = cpu_to_le16(vid);
2661 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2662 				    I40E_AQ_VSI_PVLAN_INSERT_PVID |
2663 				    I40E_AQ_VSI_PVLAN_EMOD_STR;
2664 
2665 	ctxt.seid = vsi->seid;
2666 	ctxt.info = vsi->info;
2667 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2668 	if (ret) {
2669 		dev_info(&vsi->back->pdev->dev,
2670 			 "add pvid failed, err %s aq_err %s\n",
2671 			 i40e_stat_str(&vsi->back->hw, ret),
2672 			 i40e_aq_str(&vsi->back->hw,
2673 				     vsi->back->hw.aq.asq_last_status));
2674 		return -ENOENT;
2675 	}
2676 
2677 	return 0;
2678 }
2679 
2680 /**
2681  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2682  * @vsi: the vsi being adjusted
2683  *
2684  * Just use the vlan_rx_register() service to put it back to normal
2685  **/
2686 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2687 {
2688 	i40e_vlan_stripping_disable(vsi);
2689 
2690 	vsi->info.pvid = 0;
2691 }
2692 
2693 /**
2694  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2695  * @vsi: ptr to the VSI
2696  *
2697  * If this function returns with an error, then it's possible one or
2698  * more of the rings is populated (while the rest are not).  It is the
2699  * callers duty to clean those orphaned rings.
2700  *
2701  * Return 0 on success, negative on failure
2702  **/
2703 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2704 {
2705 	int i, err = 0;
2706 
2707 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2708 		err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2709 
2710 	return err;
2711 }
2712 
2713 /**
2714  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2715  * @vsi: ptr to the VSI
2716  *
2717  * Free VSI's transmit software resources
2718  **/
2719 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2720 {
2721 	int i;
2722 
2723 	if (!vsi->tx_rings)
2724 		return;
2725 
2726 	for (i = 0; i < vsi->num_queue_pairs; i++)
2727 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2728 			i40e_free_tx_resources(vsi->tx_rings[i]);
2729 }
2730 
2731 /**
2732  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2733  * @vsi: ptr to the VSI
2734  *
2735  * If this function returns with an error, then it's possible one or
2736  * more of the rings is populated (while the rest are not).  It is the
2737  * callers duty to clean those orphaned rings.
2738  *
2739  * Return 0 on success, negative on failure
2740  **/
2741 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2742 {
2743 	int i, err = 0;
2744 
2745 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2746 		err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2747 #ifdef I40E_FCOE
2748 	i40e_fcoe_setup_ddp_resources(vsi);
2749 #endif
2750 	return err;
2751 }
2752 
2753 /**
2754  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2755  * @vsi: ptr to the VSI
2756  *
2757  * Free all receive software resources
2758  **/
2759 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2760 {
2761 	int i;
2762 
2763 	if (!vsi->rx_rings)
2764 		return;
2765 
2766 	for (i = 0; i < vsi->num_queue_pairs; i++)
2767 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2768 			i40e_free_rx_resources(vsi->rx_rings[i]);
2769 #ifdef I40E_FCOE
2770 	i40e_fcoe_free_ddp_resources(vsi);
2771 #endif
2772 }
2773 
2774 /**
2775  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2776  * @ring: The Tx ring to configure
2777  *
2778  * This enables/disables XPS for a given Tx descriptor ring
2779  * based on the TCs enabled for the VSI that ring belongs to.
2780  **/
2781 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2782 {
2783 	struct i40e_vsi *vsi = ring->vsi;
2784 	cpumask_var_t mask;
2785 
2786 	if (!ring->q_vector || !ring->netdev)
2787 		return;
2788 
2789 	/* Single TC mode enable XPS */
2790 	if (vsi->tc_config.numtc <= 1) {
2791 		if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2792 			netif_set_xps_queue(ring->netdev,
2793 					    &ring->q_vector->affinity_mask,
2794 					    ring->queue_index);
2795 	} else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2796 		/* Disable XPS to allow selection based on TC */
2797 		bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2798 		netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2799 		free_cpumask_var(mask);
2800 	}
2801 
2802 	/* schedule our worker thread which will take care of
2803 	 * applying the new filter changes
2804 	 */
2805 	i40e_service_event_schedule(vsi->back);
2806 }
2807 
2808 /**
2809  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2810  * @ring: The Tx ring to configure
2811  *
2812  * Configure the Tx descriptor ring in the HMC context.
2813  **/
2814 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2815 {
2816 	struct i40e_vsi *vsi = ring->vsi;
2817 	u16 pf_q = vsi->base_queue + ring->queue_index;
2818 	struct i40e_hw *hw = &vsi->back->hw;
2819 	struct i40e_hmc_obj_txq tx_ctx;
2820 	i40e_status err = 0;
2821 	u32 qtx_ctl = 0;
2822 
2823 	/* some ATR related tx ring init */
2824 	if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2825 		ring->atr_sample_rate = vsi->back->atr_sample_rate;
2826 		ring->atr_count = 0;
2827 	} else {
2828 		ring->atr_sample_rate = 0;
2829 	}
2830 
2831 	/* configure XPS */
2832 	i40e_config_xps_tx_ring(ring);
2833 
2834 	/* clear the context structure first */
2835 	memset(&tx_ctx, 0, sizeof(tx_ctx));
2836 
2837 	tx_ctx.new_context = 1;
2838 	tx_ctx.base = (ring->dma / 128);
2839 	tx_ctx.qlen = ring->count;
2840 	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2841 					       I40E_FLAG_FD_ATR_ENABLED));
2842 #ifdef I40E_FCOE
2843 	tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2844 #endif
2845 	tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2846 	/* FDIR VSI tx ring can still use RS bit and writebacks */
2847 	if (vsi->type != I40E_VSI_FDIR)
2848 		tx_ctx.head_wb_ena = 1;
2849 	tx_ctx.head_wb_addr = ring->dma +
2850 			      (ring->count * sizeof(struct i40e_tx_desc));
2851 
2852 	/* As part of VSI creation/update, FW allocates certain
2853 	 * Tx arbitration queue sets for each TC enabled for
2854 	 * the VSI. The FW returns the handles to these queue
2855 	 * sets as part of the response buffer to Add VSI,
2856 	 * Update VSI, etc. AQ commands. It is expected that
2857 	 * these queue set handles be associated with the Tx
2858 	 * queues by the driver as part of the TX queue context
2859 	 * initialization. This has to be done regardless of
2860 	 * DCB as by default everything is mapped to TC0.
2861 	 */
2862 	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2863 	tx_ctx.rdylist_act = 0;
2864 
2865 	/* clear the context in the HMC */
2866 	err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2867 	if (err) {
2868 		dev_info(&vsi->back->pdev->dev,
2869 			 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2870 			 ring->queue_index, pf_q, err);
2871 		return -ENOMEM;
2872 	}
2873 
2874 	/* set the context in the HMC */
2875 	err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2876 	if (err) {
2877 		dev_info(&vsi->back->pdev->dev,
2878 			 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2879 			 ring->queue_index, pf_q, err);
2880 		return -ENOMEM;
2881 	}
2882 
2883 	/* Now associate this queue with this PCI function */
2884 	if (vsi->type == I40E_VSI_VMDQ2) {
2885 		qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2886 		qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2887 			   I40E_QTX_CTL_VFVM_INDX_MASK;
2888 	} else {
2889 		qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2890 	}
2891 
2892 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2893 		    I40E_QTX_CTL_PF_INDX_MASK);
2894 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2895 	i40e_flush(hw);
2896 
2897 	/* cache tail off for easier writes later */
2898 	ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2899 
2900 	return 0;
2901 }
2902 
2903 /**
2904  * i40e_configure_rx_ring - Configure a receive ring context
2905  * @ring: The Rx ring to configure
2906  *
2907  * Configure the Rx descriptor ring in the HMC context.
2908  **/
2909 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2910 {
2911 	struct i40e_vsi *vsi = ring->vsi;
2912 	u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2913 	u16 pf_q = vsi->base_queue + ring->queue_index;
2914 	struct i40e_hw *hw = &vsi->back->hw;
2915 	struct i40e_hmc_obj_rxq rx_ctx;
2916 	i40e_status err = 0;
2917 
2918 	ring->state = 0;
2919 
2920 	/* clear the context structure first */
2921 	memset(&rx_ctx, 0, sizeof(rx_ctx));
2922 
2923 	ring->rx_buf_len = vsi->rx_buf_len;
2924 
2925 	rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2926 
2927 	rx_ctx.base = (ring->dma / 128);
2928 	rx_ctx.qlen = ring->count;
2929 
2930 	/* use 32 byte descriptors */
2931 	rx_ctx.dsize = 1;
2932 
2933 	/* descriptor type is always zero
2934 	 * rx_ctx.dtype = 0;
2935 	 */
2936 	rx_ctx.hsplit_0 = 0;
2937 
2938 	rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
2939 	if (hw->revision_id == 0)
2940 		rx_ctx.lrxqthresh = 0;
2941 	else
2942 		rx_ctx.lrxqthresh = 2;
2943 	rx_ctx.crcstrip = 1;
2944 	rx_ctx.l2tsel = 1;
2945 	/* this controls whether VLAN is stripped from inner headers */
2946 	rx_ctx.showiv = 0;
2947 #ifdef I40E_FCOE
2948 	rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2949 #endif
2950 	/* set the prefena field to 1 because the manual says to */
2951 	rx_ctx.prefena = 1;
2952 
2953 	/* clear the context in the HMC */
2954 	err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2955 	if (err) {
2956 		dev_info(&vsi->back->pdev->dev,
2957 			 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2958 			 ring->queue_index, pf_q, err);
2959 		return -ENOMEM;
2960 	}
2961 
2962 	/* set the context in the HMC */
2963 	err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2964 	if (err) {
2965 		dev_info(&vsi->back->pdev->dev,
2966 			 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2967 			 ring->queue_index, pf_q, err);
2968 		return -ENOMEM;
2969 	}
2970 
2971 	/* cache tail for quicker writes, and clear the reg before use */
2972 	ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2973 	writel(0, ring->tail);
2974 
2975 	i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2976 
2977 	return 0;
2978 }
2979 
2980 /**
2981  * i40e_vsi_configure_tx - Configure the VSI for Tx
2982  * @vsi: VSI structure describing this set of rings and resources
2983  *
2984  * Configure the Tx VSI for operation.
2985  **/
2986 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2987 {
2988 	int err = 0;
2989 	u16 i;
2990 
2991 	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2992 		err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2993 
2994 	return err;
2995 }
2996 
2997 /**
2998  * i40e_vsi_configure_rx - Configure the VSI for Rx
2999  * @vsi: the VSI being configured
3000  *
3001  * Configure the Rx VSI for operation.
3002  **/
3003 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3004 {
3005 	int err = 0;
3006 	u16 i;
3007 
3008 	if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
3009 		vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
3010 			       + ETH_FCS_LEN + VLAN_HLEN;
3011 	else
3012 		vsi->max_frame = I40E_RXBUFFER_2048;
3013 
3014 	vsi->rx_buf_len = I40E_RXBUFFER_2048;
3015 
3016 #ifdef I40E_FCOE
3017 	/* setup rx buffer for FCoE */
3018 	if ((vsi->type == I40E_VSI_FCOE) &&
3019 	    (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
3020 		vsi->rx_buf_len = I40E_RXBUFFER_3072;
3021 		vsi->max_frame = I40E_RXBUFFER_3072;
3022 	}
3023 
3024 #endif /* I40E_FCOE */
3025 	/* round up for the chip's needs */
3026 	vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
3027 				BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3028 
3029 	/* set up individual rings */
3030 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3031 		err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3032 
3033 	return err;
3034 }
3035 
3036 /**
3037  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3038  * @vsi: ptr to the VSI
3039  **/
3040 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3041 {
3042 	struct i40e_ring *tx_ring, *rx_ring;
3043 	u16 qoffset, qcount;
3044 	int i, n;
3045 
3046 	if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3047 		/* Reset the TC information */
3048 		for (i = 0; i < vsi->num_queue_pairs; i++) {
3049 			rx_ring = vsi->rx_rings[i];
3050 			tx_ring = vsi->tx_rings[i];
3051 			rx_ring->dcb_tc = 0;
3052 			tx_ring->dcb_tc = 0;
3053 		}
3054 	}
3055 
3056 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3057 		if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3058 			continue;
3059 
3060 		qoffset = vsi->tc_config.tc_info[n].qoffset;
3061 		qcount = vsi->tc_config.tc_info[n].qcount;
3062 		for (i = qoffset; i < (qoffset + qcount); i++) {
3063 			rx_ring = vsi->rx_rings[i];
3064 			tx_ring = vsi->tx_rings[i];
3065 			rx_ring->dcb_tc = n;
3066 			tx_ring->dcb_tc = n;
3067 		}
3068 	}
3069 }
3070 
3071 /**
3072  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3073  * @vsi: ptr to the VSI
3074  **/
3075 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3076 {
3077 	struct i40e_pf *pf = vsi->back;
3078 	int err;
3079 
3080 	if (vsi->netdev)
3081 		i40e_set_rx_mode(vsi->netdev);
3082 
3083 	if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
3084 		err = i40e_macaddr_init(vsi, pf->hw.mac.addr);
3085 		if (err) {
3086 			dev_warn(&pf->pdev->dev,
3087 				 "could not set up macaddr; err %d\n", err);
3088 		}
3089 	}
3090 }
3091 
3092 /**
3093  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3094  * @vsi: Pointer to the targeted VSI
3095  *
3096  * This function replays the hlist on the hw where all the SB Flow Director
3097  * filters were saved.
3098  **/
3099 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3100 {
3101 	struct i40e_fdir_filter *filter;
3102 	struct i40e_pf *pf = vsi->back;
3103 	struct hlist_node *node;
3104 
3105 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3106 		return;
3107 
3108 	hlist_for_each_entry_safe(filter, node,
3109 				  &pf->fdir_filter_list, fdir_node) {
3110 		i40e_add_del_fdir(vsi, filter, true);
3111 	}
3112 }
3113 
3114 /**
3115  * i40e_vsi_configure - Set up the VSI for action
3116  * @vsi: the VSI being configured
3117  **/
3118 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3119 {
3120 	int err;
3121 
3122 	i40e_set_vsi_rx_mode(vsi);
3123 	i40e_restore_vlan(vsi);
3124 	i40e_vsi_config_dcb_rings(vsi);
3125 	err = i40e_vsi_configure_tx(vsi);
3126 	if (!err)
3127 		err = i40e_vsi_configure_rx(vsi);
3128 
3129 	return err;
3130 }
3131 
3132 /**
3133  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3134  * @vsi: the VSI being configured
3135  **/
3136 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3137 {
3138 	struct i40e_pf *pf = vsi->back;
3139 	struct i40e_hw *hw = &pf->hw;
3140 	u16 vector;
3141 	int i, q;
3142 	u32 qp;
3143 
3144 	/* The interrupt indexing is offset by 1 in the PFINT_ITRn
3145 	 * and PFINT_LNKLSTn registers, e.g.:
3146 	 *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
3147 	 */
3148 	qp = vsi->base_queue;
3149 	vector = vsi->base_vector;
3150 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3151 		struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3152 
3153 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
3154 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3155 		q_vector->rx.latency_range = I40E_LOW_LATENCY;
3156 		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3157 		     q_vector->rx.itr);
3158 		q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3159 		q_vector->tx.latency_range = I40E_LOW_LATENCY;
3160 		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3161 		     q_vector->tx.itr);
3162 		wr32(hw, I40E_PFINT_RATEN(vector - 1),
3163 		     INTRL_USEC_TO_REG(vsi->int_rate_limit));
3164 
3165 		/* Linked list for the queuepairs assigned to this vector */
3166 		wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3167 		for (q = 0; q < q_vector->num_ringpairs; q++) {
3168 			u32 val;
3169 
3170 			val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3171 			      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
3172 			      (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3173 			      (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3174 			      (I40E_QUEUE_TYPE_TX
3175 				      << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3176 
3177 			wr32(hw, I40E_QINT_RQCTL(qp), val);
3178 
3179 			val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3180 			      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
3181 			      (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3182 			      ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
3183 			      (I40E_QUEUE_TYPE_RX
3184 				      << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3185 
3186 			/* Terminate the linked list */
3187 			if (q == (q_vector->num_ringpairs - 1))
3188 				val |= (I40E_QUEUE_END_OF_LIST
3189 					   << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3190 
3191 			wr32(hw, I40E_QINT_TQCTL(qp), val);
3192 			qp++;
3193 		}
3194 	}
3195 
3196 	i40e_flush(hw);
3197 }
3198 
3199 /**
3200  * i40e_enable_misc_int_causes - enable the non-queue interrupts
3201  * @hw: ptr to the hardware info
3202  **/
3203 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3204 {
3205 	struct i40e_hw *hw = &pf->hw;
3206 	u32 val;
3207 
3208 	/* clear things first */
3209 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
3210 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
3211 
3212 	val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
3213 	      I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
3214 	      I40E_PFINT_ICR0_ENA_GRST_MASK          |
3215 	      I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3216 	      I40E_PFINT_ICR0_ENA_GPIO_MASK          |
3217 	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
3218 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
3219 	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3220 
3221 	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3222 		val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3223 
3224 	if (pf->flags & I40E_FLAG_PTP)
3225 		val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3226 
3227 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
3228 
3229 	/* SW_ITR_IDX = 0, but don't change INTENA */
3230 	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3231 					I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3232 
3233 	/* OTHER_ITR_IDX = 0 */
3234 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3235 }
3236 
3237 /**
3238  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3239  * @vsi: the VSI being configured
3240  **/
3241 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3242 {
3243 	struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3244 	struct i40e_pf *pf = vsi->back;
3245 	struct i40e_hw *hw = &pf->hw;
3246 	u32 val;
3247 
3248 	/* set the ITR configuration */
3249 	q_vector->itr_countdown = ITR_COUNTDOWN_START;
3250 	q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3251 	q_vector->rx.latency_range = I40E_LOW_LATENCY;
3252 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3253 	q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3254 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
3255 	wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3256 
3257 	i40e_enable_misc_int_causes(pf);
3258 
3259 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3260 	wr32(hw, I40E_PFINT_LNKLST0, 0);
3261 
3262 	/* Associate the queue pair to the vector and enable the queue int */
3263 	val = I40E_QINT_RQCTL_CAUSE_ENA_MASK		      |
3264 	      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3265 	      (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3266 
3267 	wr32(hw, I40E_QINT_RQCTL(0), val);
3268 
3269 	val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		      |
3270 	      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3271 	      (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3272 
3273 	wr32(hw, I40E_QINT_TQCTL(0), val);
3274 	i40e_flush(hw);
3275 }
3276 
3277 /**
3278  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3279  * @pf: board private structure
3280  **/
3281 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3282 {
3283 	struct i40e_hw *hw = &pf->hw;
3284 
3285 	wr32(hw, I40E_PFINT_DYN_CTL0,
3286 	     I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3287 	i40e_flush(hw);
3288 }
3289 
3290 /**
3291  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3292  * @pf: board private structure
3293  * @clearpba: true when all pending interrupt events should be cleared
3294  **/
3295 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
3296 {
3297 	struct i40e_hw *hw = &pf->hw;
3298 	u32 val;
3299 
3300 	val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
3301 	      (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
3302 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3303 
3304 	wr32(hw, I40E_PFINT_DYN_CTL0, val);
3305 	i40e_flush(hw);
3306 }
3307 
3308 /**
3309  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3310  * @irq: interrupt number
3311  * @data: pointer to a q_vector
3312  **/
3313 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3314 {
3315 	struct i40e_q_vector *q_vector = data;
3316 
3317 	if (!q_vector->tx.ring && !q_vector->rx.ring)
3318 		return IRQ_HANDLED;
3319 
3320 	napi_schedule_irqoff(&q_vector->napi);
3321 
3322 	return IRQ_HANDLED;
3323 }
3324 
3325 /**
3326  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3327  * @vsi: the VSI being configured
3328  * @basename: name for the vector
3329  *
3330  * Allocates MSI-X vectors and requests interrupts from the kernel.
3331  **/
3332 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3333 {
3334 	int q_vectors = vsi->num_q_vectors;
3335 	struct i40e_pf *pf = vsi->back;
3336 	int base = vsi->base_vector;
3337 	int rx_int_idx = 0;
3338 	int tx_int_idx = 0;
3339 	int vector, err;
3340 
3341 	for (vector = 0; vector < q_vectors; vector++) {
3342 		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3343 
3344 		if (q_vector->tx.ring && q_vector->rx.ring) {
3345 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3346 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3347 			tx_int_idx++;
3348 		} else if (q_vector->rx.ring) {
3349 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3350 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
3351 		} else if (q_vector->tx.ring) {
3352 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3353 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
3354 		} else {
3355 			/* skip this unused q_vector */
3356 			continue;
3357 		}
3358 		err = request_irq(pf->msix_entries[base + vector].vector,
3359 				  vsi->irq_handler,
3360 				  0,
3361 				  q_vector->name,
3362 				  q_vector);
3363 		if (err) {
3364 			dev_info(&pf->pdev->dev,
3365 				 "MSIX request_irq failed, error: %d\n", err);
3366 			goto free_queue_irqs;
3367 		}
3368 		/* assign the mask for this irq */
3369 		irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3370 				      &q_vector->affinity_mask);
3371 	}
3372 
3373 	vsi->irqs_ready = true;
3374 	return 0;
3375 
3376 free_queue_irqs:
3377 	while (vector) {
3378 		vector--;
3379 		irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3380 				      NULL);
3381 		free_irq(pf->msix_entries[base + vector].vector,
3382 			 &(vsi->q_vectors[vector]));
3383 	}
3384 	return err;
3385 }
3386 
3387 /**
3388  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3389  * @vsi: the VSI being un-configured
3390  **/
3391 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3392 {
3393 	struct i40e_pf *pf = vsi->back;
3394 	struct i40e_hw *hw = &pf->hw;
3395 	int base = vsi->base_vector;
3396 	int i;
3397 
3398 	for (i = 0; i < vsi->num_queue_pairs; i++) {
3399 		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3400 		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3401 	}
3402 
3403 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3404 		for (i = vsi->base_vector;
3405 		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3406 			wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3407 
3408 		i40e_flush(hw);
3409 		for (i = 0; i < vsi->num_q_vectors; i++)
3410 			synchronize_irq(pf->msix_entries[i + base].vector);
3411 	} else {
3412 		/* Legacy and MSI mode - this stops all interrupt handling */
3413 		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3414 		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3415 		i40e_flush(hw);
3416 		synchronize_irq(pf->pdev->irq);
3417 	}
3418 }
3419 
3420 /**
3421  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3422  * @vsi: the VSI being configured
3423  **/
3424 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3425 {
3426 	struct i40e_pf *pf = vsi->back;
3427 	int i;
3428 
3429 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3430 		for (i = 0; i < vsi->num_q_vectors; i++)
3431 			i40e_irq_dynamic_enable(vsi, i);
3432 	} else {
3433 		i40e_irq_dynamic_enable_icr0(pf, true);
3434 	}
3435 
3436 	i40e_flush(&pf->hw);
3437 	return 0;
3438 }
3439 
3440 /**
3441  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3442  * @pf: board private structure
3443  **/
3444 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3445 {
3446 	/* Disable ICR 0 */
3447 	wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3448 	i40e_flush(&pf->hw);
3449 }
3450 
3451 /**
3452  * i40e_intr - MSI/Legacy and non-queue interrupt handler
3453  * @irq: interrupt number
3454  * @data: pointer to a q_vector
3455  *
3456  * This is the handler used for all MSI/Legacy interrupts, and deals
3457  * with both queue and non-queue interrupts.  This is also used in
3458  * MSIX mode to handle the non-queue interrupts.
3459  **/
3460 static irqreturn_t i40e_intr(int irq, void *data)
3461 {
3462 	struct i40e_pf *pf = (struct i40e_pf *)data;
3463 	struct i40e_hw *hw = &pf->hw;
3464 	irqreturn_t ret = IRQ_NONE;
3465 	u32 icr0, icr0_remaining;
3466 	u32 val, ena_mask;
3467 
3468 	icr0 = rd32(hw, I40E_PFINT_ICR0);
3469 	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3470 
3471 	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
3472 	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3473 		goto enable_intr;
3474 
3475 	/* if interrupt but no bits showing, must be SWINT */
3476 	if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3477 	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3478 		pf->sw_int_count++;
3479 
3480 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3481 	    (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3482 		ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3483 		icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3484 		dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3485 	}
3486 
3487 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3488 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3489 		struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3490 		struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3491 
3492 		/* We do not have a way to disarm Queue causes while leaving
3493 		 * interrupt enabled for all other causes, ideally
3494 		 * interrupt should be disabled while we are in NAPI but
3495 		 * this is not a performance path and napi_schedule()
3496 		 * can deal with rescheduling.
3497 		 */
3498 		if (!test_bit(__I40E_DOWN, &pf->state))
3499 			napi_schedule_irqoff(&q_vector->napi);
3500 	}
3501 
3502 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3503 		ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3504 		set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3505 		i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3506 	}
3507 
3508 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3509 		ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3510 		set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3511 	}
3512 
3513 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3514 		ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3515 		set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3516 	}
3517 
3518 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3519 		if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3520 			set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3521 		ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3522 		val = rd32(hw, I40E_GLGEN_RSTAT);
3523 		val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3524 		       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3525 		if (val == I40E_RESET_CORER) {
3526 			pf->corer_count++;
3527 		} else if (val == I40E_RESET_GLOBR) {
3528 			pf->globr_count++;
3529 		} else if (val == I40E_RESET_EMPR) {
3530 			pf->empr_count++;
3531 			set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3532 		}
3533 	}
3534 
3535 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3536 		icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3537 		dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3538 		dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3539 			 rd32(hw, I40E_PFHMC_ERRORINFO),
3540 			 rd32(hw, I40E_PFHMC_ERRORDATA));
3541 	}
3542 
3543 	if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3544 		u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3545 
3546 		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3547 			icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3548 			i40e_ptp_tx_hwtstamp(pf);
3549 		}
3550 	}
3551 
3552 	/* If a critical error is pending we have no choice but to reset the
3553 	 * device.
3554 	 * Report and mask out any remaining unexpected interrupts.
3555 	 */
3556 	icr0_remaining = icr0 & ena_mask;
3557 	if (icr0_remaining) {
3558 		dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3559 			 icr0_remaining);
3560 		if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3561 		    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3562 		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3563 			dev_info(&pf->pdev->dev, "device will be reset\n");
3564 			set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3565 			i40e_service_event_schedule(pf);
3566 		}
3567 		ena_mask &= ~icr0_remaining;
3568 	}
3569 	ret = IRQ_HANDLED;
3570 
3571 enable_intr:
3572 	/* re-enable interrupt causes */
3573 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3574 	if (!test_bit(__I40E_DOWN, &pf->state)) {
3575 		i40e_service_event_schedule(pf);
3576 		i40e_irq_dynamic_enable_icr0(pf, false);
3577 	}
3578 
3579 	return ret;
3580 }
3581 
3582 /**
3583  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3584  * @tx_ring:  tx ring to clean
3585  * @budget:   how many cleans we're allowed
3586  *
3587  * Returns true if there's any budget left (e.g. the clean is finished)
3588  **/
3589 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3590 {
3591 	struct i40e_vsi *vsi = tx_ring->vsi;
3592 	u16 i = tx_ring->next_to_clean;
3593 	struct i40e_tx_buffer *tx_buf;
3594 	struct i40e_tx_desc *tx_desc;
3595 
3596 	tx_buf = &tx_ring->tx_bi[i];
3597 	tx_desc = I40E_TX_DESC(tx_ring, i);
3598 	i -= tx_ring->count;
3599 
3600 	do {
3601 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3602 
3603 		/* if next_to_watch is not set then there is no work pending */
3604 		if (!eop_desc)
3605 			break;
3606 
3607 		/* prevent any other reads prior to eop_desc */
3608 		read_barrier_depends();
3609 
3610 		/* if the descriptor isn't done, no work yet to do */
3611 		if (!(eop_desc->cmd_type_offset_bsz &
3612 		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3613 			break;
3614 
3615 		/* clear next_to_watch to prevent false hangs */
3616 		tx_buf->next_to_watch = NULL;
3617 
3618 		tx_desc->buffer_addr = 0;
3619 		tx_desc->cmd_type_offset_bsz = 0;
3620 		/* move past filter desc */
3621 		tx_buf++;
3622 		tx_desc++;
3623 		i++;
3624 		if (unlikely(!i)) {
3625 			i -= tx_ring->count;
3626 			tx_buf = tx_ring->tx_bi;
3627 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3628 		}
3629 		/* unmap skb header data */
3630 		dma_unmap_single(tx_ring->dev,
3631 				 dma_unmap_addr(tx_buf, dma),
3632 				 dma_unmap_len(tx_buf, len),
3633 				 DMA_TO_DEVICE);
3634 		if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3635 			kfree(tx_buf->raw_buf);
3636 
3637 		tx_buf->raw_buf = NULL;
3638 		tx_buf->tx_flags = 0;
3639 		tx_buf->next_to_watch = NULL;
3640 		dma_unmap_len_set(tx_buf, len, 0);
3641 		tx_desc->buffer_addr = 0;
3642 		tx_desc->cmd_type_offset_bsz = 0;
3643 
3644 		/* move us past the eop_desc for start of next FD desc */
3645 		tx_buf++;
3646 		tx_desc++;
3647 		i++;
3648 		if (unlikely(!i)) {
3649 			i -= tx_ring->count;
3650 			tx_buf = tx_ring->tx_bi;
3651 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3652 		}
3653 
3654 		/* update budget accounting */
3655 		budget--;
3656 	} while (likely(budget));
3657 
3658 	i += tx_ring->count;
3659 	tx_ring->next_to_clean = i;
3660 
3661 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3662 		i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3663 
3664 	return budget > 0;
3665 }
3666 
3667 /**
3668  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3669  * @irq: interrupt number
3670  * @data: pointer to a q_vector
3671  **/
3672 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3673 {
3674 	struct i40e_q_vector *q_vector = data;
3675 	struct i40e_vsi *vsi;
3676 
3677 	if (!q_vector->tx.ring)
3678 		return IRQ_HANDLED;
3679 
3680 	vsi = q_vector->tx.ring->vsi;
3681 	i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3682 
3683 	return IRQ_HANDLED;
3684 }
3685 
3686 /**
3687  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3688  * @vsi: the VSI being configured
3689  * @v_idx: vector index
3690  * @qp_idx: queue pair index
3691  **/
3692 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3693 {
3694 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3695 	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3696 	struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3697 
3698 	tx_ring->q_vector = q_vector;
3699 	tx_ring->next = q_vector->tx.ring;
3700 	q_vector->tx.ring = tx_ring;
3701 	q_vector->tx.count++;
3702 
3703 	rx_ring->q_vector = q_vector;
3704 	rx_ring->next = q_vector->rx.ring;
3705 	q_vector->rx.ring = rx_ring;
3706 	q_vector->rx.count++;
3707 }
3708 
3709 /**
3710  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3711  * @vsi: the VSI being configured
3712  *
3713  * This function maps descriptor rings to the queue-specific vectors
3714  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3715  * one vector per queue pair, but on a constrained vector budget, we
3716  * group the queue pairs as "efficiently" as possible.
3717  **/
3718 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3719 {
3720 	int qp_remaining = vsi->num_queue_pairs;
3721 	int q_vectors = vsi->num_q_vectors;
3722 	int num_ringpairs;
3723 	int v_start = 0;
3724 	int qp_idx = 0;
3725 
3726 	/* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3727 	 * group them so there are multiple queues per vector.
3728 	 * It is also important to go through all the vectors available to be
3729 	 * sure that if we don't use all the vectors, that the remaining vectors
3730 	 * are cleared. This is especially important when decreasing the
3731 	 * number of queues in use.
3732 	 */
3733 	for (; v_start < q_vectors; v_start++) {
3734 		struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3735 
3736 		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3737 
3738 		q_vector->num_ringpairs = num_ringpairs;
3739 
3740 		q_vector->rx.count = 0;
3741 		q_vector->tx.count = 0;
3742 		q_vector->rx.ring = NULL;
3743 		q_vector->tx.ring = NULL;
3744 
3745 		while (num_ringpairs--) {
3746 			i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3747 			qp_idx++;
3748 			qp_remaining--;
3749 		}
3750 	}
3751 }
3752 
3753 /**
3754  * i40e_vsi_request_irq - Request IRQ from the OS
3755  * @vsi: the VSI being configured
3756  * @basename: name for the vector
3757  **/
3758 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3759 {
3760 	struct i40e_pf *pf = vsi->back;
3761 	int err;
3762 
3763 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3764 		err = i40e_vsi_request_irq_msix(vsi, basename);
3765 	else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3766 		err = request_irq(pf->pdev->irq, i40e_intr, 0,
3767 				  pf->int_name, pf);
3768 	else
3769 		err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3770 				  pf->int_name, pf);
3771 
3772 	if (err)
3773 		dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3774 
3775 	return err;
3776 }
3777 
3778 #ifdef CONFIG_NET_POLL_CONTROLLER
3779 /**
3780  * i40e_netpoll - A Polling 'interrupt' handler
3781  * @netdev: network interface device structure
3782  *
3783  * This is used by netconsole to send skbs without having to re-enable
3784  * interrupts.  It's not called while the normal interrupt routine is executing.
3785  **/
3786 #ifdef I40E_FCOE
3787 void i40e_netpoll(struct net_device *netdev)
3788 #else
3789 static void i40e_netpoll(struct net_device *netdev)
3790 #endif
3791 {
3792 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3793 	struct i40e_vsi *vsi = np->vsi;
3794 	struct i40e_pf *pf = vsi->back;
3795 	int i;
3796 
3797 	/* if interface is down do nothing */
3798 	if (test_bit(__I40E_DOWN, &vsi->state))
3799 		return;
3800 
3801 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3802 		for (i = 0; i < vsi->num_q_vectors; i++)
3803 			i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3804 	} else {
3805 		i40e_intr(pf->pdev->irq, netdev);
3806 	}
3807 }
3808 #endif
3809 
3810 /**
3811  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3812  * @pf: the PF being configured
3813  * @pf_q: the PF queue
3814  * @enable: enable or disable state of the queue
3815  *
3816  * This routine will wait for the given Tx queue of the PF to reach the
3817  * enabled or disabled state.
3818  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3819  * multiple retries; else will return 0 in case of success.
3820  **/
3821 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3822 {
3823 	int i;
3824 	u32 tx_reg;
3825 
3826 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3827 		tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3828 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3829 			break;
3830 
3831 		usleep_range(10, 20);
3832 	}
3833 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3834 		return -ETIMEDOUT;
3835 
3836 	return 0;
3837 }
3838 
3839 /**
3840  * i40e_vsi_control_tx - Start or stop a VSI's rings
3841  * @vsi: the VSI being configured
3842  * @enable: start or stop the rings
3843  **/
3844 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3845 {
3846 	struct i40e_pf *pf = vsi->back;
3847 	struct i40e_hw *hw = &pf->hw;
3848 	int i, j, pf_q, ret = 0;
3849 	u32 tx_reg;
3850 
3851 	pf_q = vsi->base_queue;
3852 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3853 
3854 		/* warn the TX unit of coming changes */
3855 		i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3856 		if (!enable)
3857 			usleep_range(10, 20);
3858 
3859 		for (j = 0; j < 50; j++) {
3860 			tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3861 			if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3862 			    ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3863 				break;
3864 			usleep_range(1000, 2000);
3865 		}
3866 		/* Skip if the queue is already in the requested state */
3867 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3868 			continue;
3869 
3870 		/* turn on/off the queue */
3871 		if (enable) {
3872 			wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3873 			tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3874 		} else {
3875 			tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3876 		}
3877 
3878 		wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3879 		/* No waiting for the Tx queue to disable */
3880 		if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3881 			continue;
3882 
3883 		/* wait for the change to finish */
3884 		ret = i40e_pf_txq_wait(pf, pf_q, enable);
3885 		if (ret) {
3886 			dev_info(&pf->pdev->dev,
3887 				 "VSI seid %d Tx ring %d %sable timeout\n",
3888 				 vsi->seid, pf_q, (enable ? "en" : "dis"));
3889 			break;
3890 		}
3891 	}
3892 
3893 	if (hw->revision_id == 0)
3894 		mdelay(50);
3895 	return ret;
3896 }
3897 
3898 /**
3899  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3900  * @pf: the PF being configured
3901  * @pf_q: the PF queue
3902  * @enable: enable or disable state of the queue
3903  *
3904  * This routine will wait for the given Rx queue of the PF to reach the
3905  * enabled or disabled state.
3906  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3907  * multiple retries; else will return 0 in case of success.
3908  **/
3909 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3910 {
3911 	int i;
3912 	u32 rx_reg;
3913 
3914 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3915 		rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3916 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3917 			break;
3918 
3919 		usleep_range(10, 20);
3920 	}
3921 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3922 		return -ETIMEDOUT;
3923 
3924 	return 0;
3925 }
3926 
3927 /**
3928  * i40e_vsi_control_rx - Start or stop a VSI's rings
3929  * @vsi: the VSI being configured
3930  * @enable: start or stop the rings
3931  **/
3932 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3933 {
3934 	struct i40e_pf *pf = vsi->back;
3935 	struct i40e_hw *hw = &pf->hw;
3936 	int i, j, pf_q, ret = 0;
3937 	u32 rx_reg;
3938 
3939 	pf_q = vsi->base_queue;
3940 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3941 		for (j = 0; j < 50; j++) {
3942 			rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3943 			if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3944 			    ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3945 				break;
3946 			usleep_range(1000, 2000);
3947 		}
3948 
3949 		/* Skip if the queue is already in the requested state */
3950 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3951 			continue;
3952 
3953 		/* turn on/off the queue */
3954 		if (enable)
3955 			rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3956 		else
3957 			rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3958 		wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3959 		/* No waiting for the Tx queue to disable */
3960 		if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3961 			continue;
3962 
3963 		/* wait for the change to finish */
3964 		ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3965 		if (ret) {
3966 			dev_info(&pf->pdev->dev,
3967 				 "VSI seid %d Rx ring %d %sable timeout\n",
3968 				 vsi->seid, pf_q, (enable ? "en" : "dis"));
3969 			break;
3970 		}
3971 	}
3972 
3973 	return ret;
3974 }
3975 
3976 /**
3977  * i40e_vsi_control_rings - Start or stop a VSI's rings
3978  * @vsi: the VSI being configured
3979  * @enable: start or stop the rings
3980  **/
3981 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3982 {
3983 	int ret = 0;
3984 
3985 	/* do rx first for enable and last for disable */
3986 	if (request) {
3987 		ret = i40e_vsi_control_rx(vsi, request);
3988 		if (ret)
3989 			return ret;
3990 		ret = i40e_vsi_control_tx(vsi, request);
3991 	} else {
3992 		/* Ignore return value, we need to shutdown whatever we can */
3993 		i40e_vsi_control_tx(vsi, request);
3994 		i40e_vsi_control_rx(vsi, request);
3995 	}
3996 
3997 	return ret;
3998 }
3999 
4000 /**
4001  * i40e_vsi_free_irq - Free the irq association with the OS
4002  * @vsi: the VSI being configured
4003  **/
4004 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4005 {
4006 	struct i40e_pf *pf = vsi->back;
4007 	struct i40e_hw *hw = &pf->hw;
4008 	int base = vsi->base_vector;
4009 	u32 val, qp;
4010 	int i;
4011 
4012 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4013 		if (!vsi->q_vectors)
4014 			return;
4015 
4016 		if (!vsi->irqs_ready)
4017 			return;
4018 
4019 		vsi->irqs_ready = false;
4020 		for (i = 0; i < vsi->num_q_vectors; i++) {
4021 			u16 vector = i + base;
4022 
4023 			/* free only the irqs that were actually requested */
4024 			if (!vsi->q_vectors[i] ||
4025 			    !vsi->q_vectors[i]->num_ringpairs)
4026 				continue;
4027 
4028 			/* clear the affinity_mask in the IRQ descriptor */
4029 			irq_set_affinity_hint(pf->msix_entries[vector].vector,
4030 					      NULL);
4031 			synchronize_irq(pf->msix_entries[vector].vector);
4032 			free_irq(pf->msix_entries[vector].vector,
4033 				 vsi->q_vectors[i]);
4034 
4035 			/* Tear down the interrupt queue link list
4036 			 *
4037 			 * We know that they come in pairs and always
4038 			 * the Rx first, then the Tx.  To clear the
4039 			 * link list, stick the EOL value into the
4040 			 * next_q field of the registers.
4041 			 */
4042 			val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4043 			qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4044 				>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4045 			val |= I40E_QUEUE_END_OF_LIST
4046 				<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4047 			wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4048 
4049 			while (qp != I40E_QUEUE_END_OF_LIST) {
4050 				u32 next;
4051 
4052 				val = rd32(hw, I40E_QINT_RQCTL(qp));
4053 
4054 				val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4055 					 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4056 					 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4057 					 I40E_QINT_RQCTL_INTEVENT_MASK);
4058 
4059 				val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4060 					 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4061 
4062 				wr32(hw, I40E_QINT_RQCTL(qp), val);
4063 
4064 				val = rd32(hw, I40E_QINT_TQCTL(qp));
4065 
4066 				next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4067 					>> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4068 
4069 				val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4070 					 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4071 					 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4072 					 I40E_QINT_TQCTL_INTEVENT_MASK);
4073 
4074 				val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4075 					 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4076 
4077 				wr32(hw, I40E_QINT_TQCTL(qp), val);
4078 				qp = next;
4079 			}
4080 		}
4081 	} else {
4082 		free_irq(pf->pdev->irq, pf);
4083 
4084 		val = rd32(hw, I40E_PFINT_LNKLST0);
4085 		qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4086 			>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4087 		val |= I40E_QUEUE_END_OF_LIST
4088 			<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4089 		wr32(hw, I40E_PFINT_LNKLST0, val);
4090 
4091 		val = rd32(hw, I40E_QINT_RQCTL(qp));
4092 		val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4093 			 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4094 			 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4095 			 I40E_QINT_RQCTL_INTEVENT_MASK);
4096 
4097 		val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4098 			I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4099 
4100 		wr32(hw, I40E_QINT_RQCTL(qp), val);
4101 
4102 		val = rd32(hw, I40E_QINT_TQCTL(qp));
4103 
4104 		val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4105 			 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4106 			 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4107 			 I40E_QINT_TQCTL_INTEVENT_MASK);
4108 
4109 		val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4110 			I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4111 
4112 		wr32(hw, I40E_QINT_TQCTL(qp), val);
4113 	}
4114 }
4115 
4116 /**
4117  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4118  * @vsi: the VSI being configured
4119  * @v_idx: Index of vector to be freed
4120  *
4121  * This function frees the memory allocated to the q_vector.  In addition if
4122  * NAPI is enabled it will delete any references to the NAPI struct prior
4123  * to freeing the q_vector.
4124  **/
4125 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4126 {
4127 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4128 	struct i40e_ring *ring;
4129 
4130 	if (!q_vector)
4131 		return;
4132 
4133 	/* disassociate q_vector from rings */
4134 	i40e_for_each_ring(ring, q_vector->tx)
4135 		ring->q_vector = NULL;
4136 
4137 	i40e_for_each_ring(ring, q_vector->rx)
4138 		ring->q_vector = NULL;
4139 
4140 	/* only VSI w/ an associated netdev is set up w/ NAPI */
4141 	if (vsi->netdev)
4142 		netif_napi_del(&q_vector->napi);
4143 
4144 	vsi->q_vectors[v_idx] = NULL;
4145 
4146 	kfree_rcu(q_vector, rcu);
4147 }
4148 
4149 /**
4150  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4151  * @vsi: the VSI being un-configured
4152  *
4153  * This frees the memory allocated to the q_vectors and
4154  * deletes references to the NAPI struct.
4155  **/
4156 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4157 {
4158 	int v_idx;
4159 
4160 	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4161 		i40e_free_q_vector(vsi, v_idx);
4162 }
4163 
4164 /**
4165  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4166  * @pf: board private structure
4167  **/
4168 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4169 {
4170 	/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4171 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4172 		pci_disable_msix(pf->pdev);
4173 		kfree(pf->msix_entries);
4174 		pf->msix_entries = NULL;
4175 		kfree(pf->irq_pile);
4176 		pf->irq_pile = NULL;
4177 	} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4178 		pci_disable_msi(pf->pdev);
4179 	}
4180 	pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4181 }
4182 
4183 /**
4184  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4185  * @pf: board private structure
4186  *
4187  * We go through and clear interrupt specific resources and reset the structure
4188  * to pre-load conditions
4189  **/
4190 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4191 {
4192 	int i;
4193 
4194 	i40e_stop_misc_vector(pf);
4195 	if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4196 		synchronize_irq(pf->msix_entries[0].vector);
4197 		free_irq(pf->msix_entries[0].vector, pf);
4198 	}
4199 
4200 	i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4201 		      I40E_IWARP_IRQ_PILE_ID);
4202 
4203 	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4204 	for (i = 0; i < pf->num_alloc_vsi; i++)
4205 		if (pf->vsi[i])
4206 			i40e_vsi_free_q_vectors(pf->vsi[i]);
4207 	i40e_reset_interrupt_capability(pf);
4208 }
4209 
4210 /**
4211  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4212  * @vsi: the VSI being configured
4213  **/
4214 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4215 {
4216 	int q_idx;
4217 
4218 	if (!vsi->netdev)
4219 		return;
4220 
4221 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4222 		napi_enable(&vsi->q_vectors[q_idx]->napi);
4223 }
4224 
4225 /**
4226  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4227  * @vsi: the VSI being configured
4228  **/
4229 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4230 {
4231 	int q_idx;
4232 
4233 	if (!vsi->netdev)
4234 		return;
4235 
4236 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4237 		napi_disable(&vsi->q_vectors[q_idx]->napi);
4238 }
4239 
4240 /**
4241  * i40e_vsi_close - Shut down a VSI
4242  * @vsi: the vsi to be quelled
4243  **/
4244 static void i40e_vsi_close(struct i40e_vsi *vsi)
4245 {
4246 	bool reset = false;
4247 
4248 	if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4249 		i40e_down(vsi);
4250 	i40e_vsi_free_irq(vsi);
4251 	i40e_vsi_free_tx_resources(vsi);
4252 	i40e_vsi_free_rx_resources(vsi);
4253 	vsi->current_netdev_flags = 0;
4254 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4255 		reset = true;
4256 	i40e_notify_client_of_netdev_close(vsi, reset);
4257 }
4258 
4259 /**
4260  * i40e_quiesce_vsi - Pause a given VSI
4261  * @vsi: the VSI being paused
4262  **/
4263 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4264 {
4265 	if (test_bit(__I40E_DOWN, &vsi->state))
4266 		return;
4267 
4268 	/* No need to disable FCoE VSI when Tx suspended */
4269 	if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4270 	    vsi->type == I40E_VSI_FCOE) {
4271 		dev_dbg(&vsi->back->pdev->dev,
4272 			 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
4273 		return;
4274 	}
4275 
4276 	set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4277 	if (vsi->netdev && netif_running(vsi->netdev))
4278 		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4279 	else
4280 		i40e_vsi_close(vsi);
4281 }
4282 
4283 /**
4284  * i40e_unquiesce_vsi - Resume a given VSI
4285  * @vsi: the VSI being resumed
4286  **/
4287 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4288 {
4289 	if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4290 		return;
4291 
4292 	clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4293 	if (vsi->netdev && netif_running(vsi->netdev))
4294 		vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4295 	else
4296 		i40e_vsi_open(vsi);   /* this clears the DOWN bit */
4297 }
4298 
4299 /**
4300  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4301  * @pf: the PF
4302  **/
4303 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4304 {
4305 	int v;
4306 
4307 	for (v = 0; v < pf->num_alloc_vsi; v++) {
4308 		if (pf->vsi[v])
4309 			i40e_quiesce_vsi(pf->vsi[v]);
4310 	}
4311 }
4312 
4313 /**
4314  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4315  * @pf: the PF
4316  **/
4317 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4318 {
4319 	int v;
4320 
4321 	for (v = 0; v < pf->num_alloc_vsi; v++) {
4322 		if (pf->vsi[v])
4323 			i40e_unquiesce_vsi(pf->vsi[v]);
4324 	}
4325 }
4326 
4327 #ifdef CONFIG_I40E_DCB
4328 /**
4329  * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4330  * @vsi: the VSI being configured
4331  *
4332  * This function waits for the given VSI's queues to be disabled.
4333  **/
4334 static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4335 {
4336 	struct i40e_pf *pf = vsi->back;
4337 	int i, pf_q, ret;
4338 
4339 	pf_q = vsi->base_queue;
4340 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4341 		/* Check and wait for the disable status of the queue */
4342 		ret = i40e_pf_txq_wait(pf, pf_q, false);
4343 		if (ret) {
4344 			dev_info(&pf->pdev->dev,
4345 				 "VSI seid %d Tx ring %d disable timeout\n",
4346 				 vsi->seid, pf_q);
4347 			return ret;
4348 		}
4349 	}
4350 
4351 	pf_q = vsi->base_queue;
4352 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4353 		/* Check and wait for the disable status of the queue */
4354 		ret = i40e_pf_rxq_wait(pf, pf_q, false);
4355 		if (ret) {
4356 			dev_info(&pf->pdev->dev,
4357 				 "VSI seid %d Rx ring %d disable timeout\n",
4358 				 vsi->seid, pf_q);
4359 			return ret;
4360 		}
4361 	}
4362 
4363 	return 0;
4364 }
4365 
4366 /**
4367  * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4368  * @pf: the PF
4369  *
4370  * This function waits for the queues to be in disabled state for all the
4371  * VSIs that are managed by this PF.
4372  **/
4373 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4374 {
4375 	int v, ret = 0;
4376 
4377 	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4378 		/* No need to wait for FCoE VSI queues */
4379 		if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4380 			ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4381 			if (ret)
4382 				break;
4383 		}
4384 	}
4385 
4386 	return ret;
4387 }
4388 
4389 #endif
4390 
4391 /**
4392  * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4393  * @q_idx: TX queue number
4394  * @vsi: Pointer to VSI struct
4395  *
4396  * This function checks specified queue for given VSI. Detects hung condition.
4397  * Sets hung bit since it is two step process. Before next run of service task
4398  * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4399  * hung condition remain unchanged and during subsequent run, this function
4400  * issues SW interrupt to recover from hung condition.
4401  **/
4402 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4403 {
4404 	struct i40e_ring *tx_ring = NULL;
4405 	struct i40e_pf	*pf;
4406 	u32 head, val, tx_pending_hw;
4407 	int i;
4408 
4409 	pf = vsi->back;
4410 
4411 	/* now that we have an index, find the tx_ring struct */
4412 	for (i = 0; i < vsi->num_queue_pairs; i++) {
4413 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4414 			if (q_idx == vsi->tx_rings[i]->queue_index) {
4415 				tx_ring = vsi->tx_rings[i];
4416 				break;
4417 			}
4418 		}
4419 	}
4420 
4421 	if (!tx_ring)
4422 		return;
4423 
4424 	/* Read interrupt register */
4425 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4426 		val = rd32(&pf->hw,
4427 			   I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4428 					       tx_ring->vsi->base_vector - 1));
4429 	else
4430 		val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4431 
4432 	head = i40e_get_head(tx_ring);
4433 
4434 	tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
4435 
4436 	/* HW is done executing descriptors, updated HEAD write back,
4437 	 * but SW hasn't processed those descriptors. If interrupt is
4438 	 * not generated from this point ON, it could result into
4439 	 * dev_watchdog detecting timeout on those netdev_queue,
4440 	 * hence proactively trigger SW interrupt.
4441 	 */
4442 	if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4443 		/* NAPI Poll didn't run and clear since it was set */
4444 		if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
4445 				       &tx_ring->q_vector->hung_detected)) {
4446 			netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
4447 				    vsi->seid, q_idx, tx_pending_hw,
4448 				    tx_ring->next_to_clean, head,
4449 				    tx_ring->next_to_use,
4450 				    readl(tx_ring->tail));
4451 			netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
4452 				    vsi->seid, q_idx, val);
4453 			i40e_force_wb(vsi, tx_ring->q_vector);
4454 		} else {
4455 			/* First Chance - detected possible hung */
4456 			set_bit(I40E_Q_VECTOR_HUNG_DETECT,
4457 				&tx_ring->q_vector->hung_detected);
4458 		}
4459 	}
4460 
4461 	/* This is the case where we have interrupts missing,
4462 	 * so the tx_pending in HW will most likely be 0, but we
4463 	 * will have tx_pending in SW since the WB happened but the
4464 	 * interrupt got lost.
4465 	 */
4466 	if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
4467 	    (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4468 		if (napi_reschedule(&tx_ring->q_vector->napi))
4469 			tx_ring->tx_stats.tx_lost_interrupt++;
4470 	}
4471 }
4472 
4473 /**
4474  * i40e_detect_recover_hung - Function to detect and recover hung_queues
4475  * @pf:  pointer to PF struct
4476  *
4477  * LAN VSI has netdev and netdev has TX queues. This function is to check
4478  * each of those TX queues if they are hung, trigger recovery by issuing
4479  * SW interrupt.
4480  **/
4481 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4482 {
4483 	struct net_device *netdev;
4484 	struct i40e_vsi *vsi;
4485 	int i;
4486 
4487 	/* Only for LAN VSI */
4488 	vsi = pf->vsi[pf->lan_vsi];
4489 
4490 	if (!vsi)
4491 		return;
4492 
4493 	/* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4494 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4495 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4496 		return;
4497 
4498 	/* Make sure type is MAIN VSI */
4499 	if (vsi->type != I40E_VSI_MAIN)
4500 		return;
4501 
4502 	netdev = vsi->netdev;
4503 	if (!netdev)
4504 		return;
4505 
4506 	/* Bail out if netif_carrier is not OK */
4507 	if (!netif_carrier_ok(netdev))
4508 		return;
4509 
4510 	/* Go thru' TX queues for netdev */
4511 	for (i = 0; i < netdev->num_tx_queues; i++) {
4512 		struct netdev_queue *q;
4513 
4514 		q = netdev_get_tx_queue(netdev, i);
4515 		if (q)
4516 			i40e_detect_recover_hung_queue(i, vsi);
4517 	}
4518 }
4519 
4520 /**
4521  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4522  * @pf: pointer to PF
4523  *
4524  * Get TC map for ISCSI PF type that will include iSCSI TC
4525  * and LAN TC.
4526  **/
4527 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4528 {
4529 	struct i40e_dcb_app_priority_table app;
4530 	struct i40e_hw *hw = &pf->hw;
4531 	u8 enabled_tc = 1; /* TC0 is always enabled */
4532 	u8 tc, i;
4533 	/* Get the iSCSI APP TLV */
4534 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4535 
4536 	for (i = 0; i < dcbcfg->numapps; i++) {
4537 		app = dcbcfg->app[i];
4538 		if (app.selector == I40E_APP_SEL_TCPIP &&
4539 		    app.protocolid == I40E_APP_PROTOID_ISCSI) {
4540 			tc = dcbcfg->etscfg.prioritytable[app.priority];
4541 			enabled_tc |= BIT(tc);
4542 			break;
4543 		}
4544 	}
4545 
4546 	return enabled_tc;
4547 }
4548 
4549 /**
4550  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4551  * @dcbcfg: the corresponding DCBx configuration structure
4552  *
4553  * Return the number of TCs from given DCBx configuration
4554  **/
4555 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4556 {
4557 	int i, tc_unused = 0;
4558 	u8 num_tc = 0;
4559 	u8 ret = 0;
4560 
4561 	/* Scan the ETS Config Priority Table to find
4562 	 * traffic class enabled for a given priority
4563 	 * and create a bitmask of enabled TCs
4564 	 */
4565 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4566 		num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4567 
4568 	/* Now scan the bitmask to check for
4569 	 * contiguous TCs starting with TC0
4570 	 */
4571 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4572 		if (num_tc & BIT(i)) {
4573 			if (!tc_unused) {
4574 				ret++;
4575 			} else {
4576 				pr_err("Non-contiguous TC - Disabling DCB\n");
4577 				return 1;
4578 			}
4579 		} else {
4580 			tc_unused = 1;
4581 		}
4582 	}
4583 
4584 	/* There is always at least TC0 */
4585 	if (!ret)
4586 		ret = 1;
4587 
4588 	return ret;
4589 }
4590 
4591 /**
4592  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4593  * @dcbcfg: the corresponding DCBx configuration structure
4594  *
4595  * Query the current DCB configuration and return the number of
4596  * traffic classes enabled from the given DCBX config
4597  **/
4598 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4599 {
4600 	u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4601 	u8 enabled_tc = 1;
4602 	u8 i;
4603 
4604 	for (i = 0; i < num_tc; i++)
4605 		enabled_tc |= BIT(i);
4606 
4607 	return enabled_tc;
4608 }
4609 
4610 /**
4611  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4612  * @pf: PF being queried
4613  *
4614  * Return number of traffic classes enabled for the given PF
4615  **/
4616 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4617 {
4618 	struct i40e_hw *hw = &pf->hw;
4619 	u8 i, enabled_tc;
4620 	u8 num_tc = 0;
4621 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4622 
4623 	/* If DCB is not enabled then always in single TC */
4624 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4625 		return 1;
4626 
4627 	/* SFP mode will be enabled for all TCs on port */
4628 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4629 		return i40e_dcb_get_num_tc(dcbcfg);
4630 
4631 	/* MFP mode return count of enabled TCs for this PF */
4632 	if (pf->hw.func_caps.iscsi)
4633 		enabled_tc =  i40e_get_iscsi_tc_map(pf);
4634 	else
4635 		return 1; /* Only TC0 */
4636 
4637 	/* At least have TC0 */
4638 	enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4639 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4640 		if (enabled_tc & BIT(i))
4641 			num_tc++;
4642 	}
4643 	return num_tc;
4644 }
4645 
4646 /**
4647  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4648  * @pf: PF being queried
4649  *
4650  * Return a bitmap for first enabled traffic class for this PF.
4651  **/
4652 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4653 {
4654 	u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4655 	u8 i = 0;
4656 
4657 	if (!enabled_tc)
4658 		return 0x1; /* TC0 */
4659 
4660 	/* Find the first enabled TC */
4661 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4662 		if (enabled_tc & BIT(i))
4663 			break;
4664 	}
4665 
4666 	return BIT(i);
4667 }
4668 
4669 /**
4670  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4671  * @pf: PF being queried
4672  *
4673  * Return a bitmap for enabled traffic classes for this PF.
4674  **/
4675 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4676 {
4677 	/* If DCB is not enabled for this PF then just return default TC */
4678 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4679 		return i40e_pf_get_default_tc(pf);
4680 
4681 	/* SFP mode we want PF to be enabled for all TCs */
4682 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4683 		return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4684 
4685 	/* MFP enabled and iSCSI PF type */
4686 	if (pf->hw.func_caps.iscsi)
4687 		return i40e_get_iscsi_tc_map(pf);
4688 	else
4689 		return i40e_pf_get_default_tc(pf);
4690 }
4691 
4692 /**
4693  * i40e_vsi_get_bw_info - Query VSI BW Information
4694  * @vsi: the VSI being queried
4695  *
4696  * Returns 0 on success, negative value on failure
4697  **/
4698 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4699 {
4700 	struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4701 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4702 	struct i40e_pf *pf = vsi->back;
4703 	struct i40e_hw *hw = &pf->hw;
4704 	i40e_status ret;
4705 	u32 tc_bw_max;
4706 	int i;
4707 
4708 	/* Get the VSI level BW configuration */
4709 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4710 	if (ret) {
4711 		dev_info(&pf->pdev->dev,
4712 			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4713 			 i40e_stat_str(&pf->hw, ret),
4714 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4715 		return -EINVAL;
4716 	}
4717 
4718 	/* Get the VSI level BW configuration per TC */
4719 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4720 					       NULL);
4721 	if (ret) {
4722 		dev_info(&pf->pdev->dev,
4723 			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4724 			 i40e_stat_str(&pf->hw, ret),
4725 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4726 		return -EINVAL;
4727 	}
4728 
4729 	if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4730 		dev_info(&pf->pdev->dev,
4731 			 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4732 			 bw_config.tc_valid_bits,
4733 			 bw_ets_config.tc_valid_bits);
4734 		/* Still continuing */
4735 	}
4736 
4737 	vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4738 	vsi->bw_max_quanta = bw_config.max_bw;
4739 	tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4740 		    (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4741 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4742 		vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4743 		vsi->bw_ets_limit_credits[i] =
4744 					le16_to_cpu(bw_ets_config.credits[i]);
4745 		/* 3 bits out of 4 for each TC */
4746 		vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4747 	}
4748 
4749 	return 0;
4750 }
4751 
4752 /**
4753  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4754  * @vsi: the VSI being configured
4755  * @enabled_tc: TC bitmap
4756  * @bw_credits: BW shared credits per TC
4757  *
4758  * Returns 0 on success, negative value on failure
4759  **/
4760 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4761 				       u8 *bw_share)
4762 {
4763 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4764 	i40e_status ret;
4765 	int i;
4766 
4767 	bw_data.tc_valid_bits = enabled_tc;
4768 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4769 		bw_data.tc_bw_credits[i] = bw_share[i];
4770 
4771 	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4772 				       NULL);
4773 	if (ret) {
4774 		dev_info(&vsi->back->pdev->dev,
4775 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
4776 			 vsi->back->hw.aq.asq_last_status);
4777 		return -EINVAL;
4778 	}
4779 
4780 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4781 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4782 
4783 	return 0;
4784 }
4785 
4786 /**
4787  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4788  * @vsi: the VSI being configured
4789  * @enabled_tc: TC map to be enabled
4790  *
4791  **/
4792 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4793 {
4794 	struct net_device *netdev = vsi->netdev;
4795 	struct i40e_pf *pf = vsi->back;
4796 	struct i40e_hw *hw = &pf->hw;
4797 	u8 netdev_tc = 0;
4798 	int i;
4799 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4800 
4801 	if (!netdev)
4802 		return;
4803 
4804 	if (!enabled_tc) {
4805 		netdev_reset_tc(netdev);
4806 		return;
4807 	}
4808 
4809 	/* Set up actual enabled TCs on the VSI */
4810 	if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4811 		return;
4812 
4813 	/* set per TC queues for the VSI */
4814 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4815 		/* Only set TC queues for enabled tcs
4816 		 *
4817 		 * e.g. For a VSI that has TC0 and TC3 enabled the
4818 		 * enabled_tc bitmap would be 0x00001001; the driver
4819 		 * will set the numtc for netdev as 2 that will be
4820 		 * referenced by the netdev layer as TC 0 and 1.
4821 		 */
4822 		if (vsi->tc_config.enabled_tc & BIT(i))
4823 			netdev_set_tc_queue(netdev,
4824 					vsi->tc_config.tc_info[i].netdev_tc,
4825 					vsi->tc_config.tc_info[i].qcount,
4826 					vsi->tc_config.tc_info[i].qoffset);
4827 	}
4828 
4829 	/* Assign UP2TC map for the VSI */
4830 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4831 		/* Get the actual TC# for the UP */
4832 		u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4833 		/* Get the mapped netdev TC# for the UP */
4834 		netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4835 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
4836 	}
4837 }
4838 
4839 /**
4840  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4841  * @vsi: the VSI being configured
4842  * @ctxt: the ctxt buffer returned from AQ VSI update param command
4843  **/
4844 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4845 				      struct i40e_vsi_context *ctxt)
4846 {
4847 	/* copy just the sections touched not the entire info
4848 	 * since not all sections are valid as returned by
4849 	 * update vsi params
4850 	 */
4851 	vsi->info.mapping_flags = ctxt->info.mapping_flags;
4852 	memcpy(&vsi->info.queue_mapping,
4853 	       &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4854 	memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4855 	       sizeof(vsi->info.tc_mapping));
4856 }
4857 
4858 /**
4859  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4860  * @vsi: VSI to be configured
4861  * @enabled_tc: TC bitmap
4862  *
4863  * This configures a particular VSI for TCs that are mapped to the
4864  * given TC bitmap. It uses default bandwidth share for TCs across
4865  * VSIs to configure TC for a particular VSI.
4866  *
4867  * NOTE:
4868  * It is expected that the VSI queues have been quisced before calling
4869  * this function.
4870  **/
4871 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4872 {
4873 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4874 	struct i40e_vsi_context ctxt;
4875 	int ret = 0;
4876 	int i;
4877 
4878 	/* Check if enabled_tc is same as existing or new TCs */
4879 	if (vsi->tc_config.enabled_tc == enabled_tc)
4880 		return ret;
4881 
4882 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
4883 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4884 		if (enabled_tc & BIT(i))
4885 			bw_share[i] = 1;
4886 	}
4887 
4888 	ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4889 	if (ret) {
4890 		dev_info(&vsi->back->pdev->dev,
4891 			 "Failed configuring TC map %d for VSI %d\n",
4892 			 enabled_tc, vsi->seid);
4893 		goto out;
4894 	}
4895 
4896 	/* Update Queue Pairs Mapping for currently enabled UPs */
4897 	ctxt.seid = vsi->seid;
4898 	ctxt.pf_num = vsi->back->hw.pf_id;
4899 	ctxt.vf_num = 0;
4900 	ctxt.uplink_seid = vsi->uplink_seid;
4901 	ctxt.info = vsi->info;
4902 	i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4903 
4904 	if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
4905 		ctxt.info.valid_sections |=
4906 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
4907 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
4908 	}
4909 
4910 	/* Update the VSI after updating the VSI queue-mapping information */
4911 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4912 	if (ret) {
4913 		dev_info(&vsi->back->pdev->dev,
4914 			 "Update vsi tc config failed, err %s aq_err %s\n",
4915 			 i40e_stat_str(&vsi->back->hw, ret),
4916 			 i40e_aq_str(&vsi->back->hw,
4917 				     vsi->back->hw.aq.asq_last_status));
4918 		goto out;
4919 	}
4920 	/* update the local VSI info with updated queue map */
4921 	i40e_vsi_update_queue_map(vsi, &ctxt);
4922 	vsi->info.valid_sections = 0;
4923 
4924 	/* Update current VSI BW information */
4925 	ret = i40e_vsi_get_bw_info(vsi);
4926 	if (ret) {
4927 		dev_info(&vsi->back->pdev->dev,
4928 			 "Failed updating vsi bw info, err %s aq_err %s\n",
4929 			 i40e_stat_str(&vsi->back->hw, ret),
4930 			 i40e_aq_str(&vsi->back->hw,
4931 				     vsi->back->hw.aq.asq_last_status));
4932 		goto out;
4933 	}
4934 
4935 	/* Update the netdev TC setup */
4936 	i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4937 out:
4938 	return ret;
4939 }
4940 
4941 /**
4942  * i40e_veb_config_tc - Configure TCs for given VEB
4943  * @veb: given VEB
4944  * @enabled_tc: TC bitmap
4945  *
4946  * Configures given TC bitmap for VEB (switching) element
4947  **/
4948 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4949 {
4950 	struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4951 	struct i40e_pf *pf = veb->pf;
4952 	int ret = 0;
4953 	int i;
4954 
4955 	/* No TCs or already enabled TCs just return */
4956 	if (!enabled_tc || veb->enabled_tc == enabled_tc)
4957 		return ret;
4958 
4959 	bw_data.tc_valid_bits = enabled_tc;
4960 	/* bw_data.absolute_credits is not set (relative) */
4961 
4962 	/* Enable ETS TCs with equal BW Share for now */
4963 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4964 		if (enabled_tc & BIT(i))
4965 			bw_data.tc_bw_share_credits[i] = 1;
4966 	}
4967 
4968 	ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4969 						   &bw_data, NULL);
4970 	if (ret) {
4971 		dev_info(&pf->pdev->dev,
4972 			 "VEB bw config failed, err %s aq_err %s\n",
4973 			 i40e_stat_str(&pf->hw, ret),
4974 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4975 		goto out;
4976 	}
4977 
4978 	/* Update the BW information */
4979 	ret = i40e_veb_get_bw_info(veb);
4980 	if (ret) {
4981 		dev_info(&pf->pdev->dev,
4982 			 "Failed getting veb bw config, err %s aq_err %s\n",
4983 			 i40e_stat_str(&pf->hw, ret),
4984 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4985 	}
4986 
4987 out:
4988 	return ret;
4989 }
4990 
4991 #ifdef CONFIG_I40E_DCB
4992 /**
4993  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4994  * @pf: PF struct
4995  *
4996  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4997  * the caller would've quiesce all the VSIs before calling
4998  * this function
4999  **/
5000 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
5001 {
5002 	u8 tc_map = 0;
5003 	int ret;
5004 	u8 v;
5005 
5006 	/* Enable the TCs available on PF to all VEBs */
5007 	tc_map = i40e_pf_get_tc_map(pf);
5008 	for (v = 0; v < I40E_MAX_VEB; v++) {
5009 		if (!pf->veb[v])
5010 			continue;
5011 		ret = i40e_veb_config_tc(pf->veb[v], tc_map);
5012 		if (ret) {
5013 			dev_info(&pf->pdev->dev,
5014 				 "Failed configuring TC for VEB seid=%d\n",
5015 				 pf->veb[v]->seid);
5016 			/* Will try to configure as many components */
5017 		}
5018 	}
5019 
5020 	/* Update each VSI */
5021 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5022 		if (!pf->vsi[v])
5023 			continue;
5024 
5025 		/* - Enable all TCs for the LAN VSI
5026 #ifdef I40E_FCOE
5027 		 * - For FCoE VSI only enable the TC configured
5028 		 *   as per the APP TLV
5029 #endif
5030 		 * - For all others keep them at TC0 for now
5031 		 */
5032 		if (v == pf->lan_vsi)
5033 			tc_map = i40e_pf_get_tc_map(pf);
5034 		else
5035 			tc_map = i40e_pf_get_default_tc(pf);
5036 #ifdef I40E_FCOE
5037 		if (pf->vsi[v]->type == I40E_VSI_FCOE)
5038 			tc_map = i40e_get_fcoe_tc_map(pf);
5039 #endif /* #ifdef I40E_FCOE */
5040 
5041 		ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
5042 		if (ret) {
5043 			dev_info(&pf->pdev->dev,
5044 				 "Failed configuring TC for VSI seid=%d\n",
5045 				 pf->vsi[v]->seid);
5046 			/* Will try to configure as many components */
5047 		} else {
5048 			/* Re-configure VSI vectors based on updated TC map */
5049 			i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
5050 			if (pf->vsi[v]->netdev)
5051 				i40e_dcbnl_set_all(pf->vsi[v]);
5052 		}
5053 	}
5054 }
5055 
5056 /**
5057  * i40e_resume_port_tx - Resume port Tx
5058  * @pf: PF struct
5059  *
5060  * Resume a port's Tx and issue a PF reset in case of failure to
5061  * resume.
5062  **/
5063 static int i40e_resume_port_tx(struct i40e_pf *pf)
5064 {
5065 	struct i40e_hw *hw = &pf->hw;
5066 	int ret;
5067 
5068 	ret = i40e_aq_resume_port_tx(hw, NULL);
5069 	if (ret) {
5070 		dev_info(&pf->pdev->dev,
5071 			 "Resume Port Tx failed, err %s aq_err %s\n",
5072 			  i40e_stat_str(&pf->hw, ret),
5073 			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5074 		/* Schedule PF reset to recover */
5075 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5076 		i40e_service_event_schedule(pf);
5077 	}
5078 
5079 	return ret;
5080 }
5081 
5082 /**
5083  * i40e_init_pf_dcb - Initialize DCB configuration
5084  * @pf: PF being configured
5085  *
5086  * Query the current DCB configuration and cache it
5087  * in the hardware structure
5088  **/
5089 static int i40e_init_pf_dcb(struct i40e_pf *pf)
5090 {
5091 	struct i40e_hw *hw = &pf->hw;
5092 	int err = 0;
5093 
5094 	/* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
5095 	if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
5096 		goto out;
5097 
5098 	/* Get the initial DCB configuration */
5099 	err = i40e_init_dcb(hw);
5100 	if (!err) {
5101 		/* Device/Function is not DCBX capable */
5102 		if ((!hw->func_caps.dcb) ||
5103 		    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5104 			dev_info(&pf->pdev->dev,
5105 				 "DCBX offload is not supported or is disabled for this PF.\n");
5106 
5107 			if (pf->flags & I40E_FLAG_MFP_ENABLED)
5108 				goto out;
5109 
5110 		} else {
5111 			/* When status is not DISABLED then DCBX in FW */
5112 			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5113 				       DCB_CAP_DCBX_VER_IEEE;
5114 
5115 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
5116 			/* Enable DCB tagging only when more than one TC
5117 			 * or explicitly disable if only one TC
5118 			 */
5119 			if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5120 				pf->flags |= I40E_FLAG_DCB_ENABLED;
5121 			else
5122 				pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5123 			dev_dbg(&pf->pdev->dev,
5124 				"DCBX offload is supported for this PF.\n");
5125 		}
5126 	} else {
5127 		dev_info(&pf->pdev->dev,
5128 			 "Query for DCB configuration failed, err %s aq_err %s\n",
5129 			 i40e_stat_str(&pf->hw, err),
5130 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5131 	}
5132 
5133 out:
5134 	return err;
5135 }
5136 #endif /* CONFIG_I40E_DCB */
5137 #define SPEED_SIZE 14
5138 #define FC_SIZE 8
5139 /**
5140  * i40e_print_link_message - print link up or down
5141  * @vsi: the VSI for which link needs a message
5142  */
5143 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5144 {
5145 	char *speed = "Unknown";
5146 	char *fc = "Unknown";
5147 
5148 	if (vsi->current_isup == isup)
5149 		return;
5150 	vsi->current_isup = isup;
5151 	if (!isup) {
5152 		netdev_info(vsi->netdev, "NIC Link is Down\n");
5153 		return;
5154 	}
5155 
5156 	/* Warn user if link speed on NPAR enabled partition is not at
5157 	 * least 10GB
5158 	 */
5159 	if (vsi->back->hw.func_caps.npar_enable &&
5160 	    (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5161 	     vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5162 		netdev_warn(vsi->netdev,
5163 			    "The partition detected link speed that is less than 10Gbps\n");
5164 
5165 	switch (vsi->back->hw.phy.link_info.link_speed) {
5166 	case I40E_LINK_SPEED_40GB:
5167 		speed = "40 G";
5168 		break;
5169 	case I40E_LINK_SPEED_20GB:
5170 		speed = "20 G";
5171 		break;
5172 	case I40E_LINK_SPEED_10GB:
5173 		speed = "10 G";
5174 		break;
5175 	case I40E_LINK_SPEED_1GB:
5176 		speed = "1000 M";
5177 		break;
5178 	case I40E_LINK_SPEED_100MB:
5179 		speed = "100 M";
5180 		break;
5181 	default:
5182 		break;
5183 	}
5184 
5185 	switch (vsi->back->hw.fc.current_mode) {
5186 	case I40E_FC_FULL:
5187 		fc = "RX/TX";
5188 		break;
5189 	case I40E_FC_TX_PAUSE:
5190 		fc = "TX";
5191 		break;
5192 	case I40E_FC_RX_PAUSE:
5193 		fc = "RX";
5194 		break;
5195 	default:
5196 		fc = "None";
5197 		break;
5198 	}
5199 
5200 	netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
5201 		    speed, fc);
5202 }
5203 
5204 /**
5205  * i40e_up_complete - Finish the last steps of bringing up a connection
5206  * @vsi: the VSI being configured
5207  **/
5208 static int i40e_up_complete(struct i40e_vsi *vsi)
5209 {
5210 	struct i40e_pf *pf = vsi->back;
5211 	int err;
5212 
5213 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5214 		i40e_vsi_configure_msix(vsi);
5215 	else
5216 		i40e_configure_msi_and_legacy(vsi);
5217 
5218 	/* start rings */
5219 	err = i40e_vsi_control_rings(vsi, true);
5220 	if (err)
5221 		return err;
5222 
5223 	clear_bit(__I40E_DOWN, &vsi->state);
5224 	i40e_napi_enable_all(vsi);
5225 	i40e_vsi_enable_irq(vsi);
5226 
5227 	if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5228 	    (vsi->netdev)) {
5229 		i40e_print_link_message(vsi, true);
5230 		netif_tx_start_all_queues(vsi->netdev);
5231 		netif_carrier_on(vsi->netdev);
5232 	} else if (vsi->netdev) {
5233 		i40e_print_link_message(vsi, false);
5234 		/* need to check for qualified module here*/
5235 		if ((pf->hw.phy.link_info.link_info &
5236 			I40E_AQ_MEDIA_AVAILABLE) &&
5237 		    (!(pf->hw.phy.link_info.an_info &
5238 			I40E_AQ_QUALIFIED_MODULE)))
5239 			netdev_err(vsi->netdev,
5240 				   "the driver failed to link because an unqualified module was detected.");
5241 	}
5242 
5243 	/* replay FDIR SB filters */
5244 	if (vsi->type == I40E_VSI_FDIR) {
5245 		/* reset fd counters */
5246 		pf->fd_add_err = pf->fd_atr_cnt = 0;
5247 		if (pf->fd_tcp_rule > 0) {
5248 			pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5249 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
5250 				dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
5251 			pf->fd_tcp_rule = 0;
5252 		}
5253 		i40e_fdir_filter_restore(vsi);
5254 	}
5255 
5256 	/* On the next run of the service_task, notify any clients of the new
5257 	 * opened netdev
5258 	 */
5259 	pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5260 	i40e_service_event_schedule(pf);
5261 
5262 	return 0;
5263 }
5264 
5265 /**
5266  * i40e_vsi_reinit_locked - Reset the VSI
5267  * @vsi: the VSI being configured
5268  *
5269  * Rebuild the ring structs after some configuration
5270  * has changed, e.g. MTU size.
5271  **/
5272 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5273 {
5274 	struct i40e_pf *pf = vsi->back;
5275 
5276 	WARN_ON(in_interrupt());
5277 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
5278 		usleep_range(1000, 2000);
5279 	i40e_down(vsi);
5280 
5281 	i40e_up(vsi);
5282 	clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5283 }
5284 
5285 /**
5286  * i40e_up - Bring the connection back up after being down
5287  * @vsi: the VSI being configured
5288  **/
5289 int i40e_up(struct i40e_vsi *vsi)
5290 {
5291 	int err;
5292 
5293 	err = i40e_vsi_configure(vsi);
5294 	if (!err)
5295 		err = i40e_up_complete(vsi);
5296 
5297 	return err;
5298 }
5299 
5300 /**
5301  * i40e_down - Shutdown the connection processing
5302  * @vsi: the VSI being stopped
5303  **/
5304 void i40e_down(struct i40e_vsi *vsi)
5305 {
5306 	int i;
5307 
5308 	/* It is assumed that the caller of this function
5309 	 * sets the vsi->state __I40E_DOWN bit.
5310 	 */
5311 	if (vsi->netdev) {
5312 		netif_carrier_off(vsi->netdev);
5313 		netif_tx_disable(vsi->netdev);
5314 	}
5315 	i40e_vsi_disable_irq(vsi);
5316 	i40e_vsi_control_rings(vsi, false);
5317 	i40e_napi_disable_all(vsi);
5318 
5319 	for (i = 0; i < vsi->num_queue_pairs; i++) {
5320 		i40e_clean_tx_ring(vsi->tx_rings[i]);
5321 		i40e_clean_rx_ring(vsi->rx_rings[i]);
5322 	}
5323 
5324 	i40e_notify_client_of_netdev_close(vsi, false);
5325 
5326 }
5327 
5328 /**
5329  * i40e_setup_tc - configure multiple traffic classes
5330  * @netdev: net device to configure
5331  * @tc: number of traffic classes to enable
5332  **/
5333 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5334 {
5335 	struct i40e_netdev_priv *np = netdev_priv(netdev);
5336 	struct i40e_vsi *vsi = np->vsi;
5337 	struct i40e_pf *pf = vsi->back;
5338 	u8 enabled_tc = 0;
5339 	int ret = -EINVAL;
5340 	int i;
5341 
5342 	/* Check if DCB enabled to continue */
5343 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5344 		netdev_info(netdev, "DCB is not enabled for adapter\n");
5345 		goto exit;
5346 	}
5347 
5348 	/* Check if MFP enabled */
5349 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5350 		netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5351 		goto exit;
5352 	}
5353 
5354 	/* Check whether tc count is within enabled limit */
5355 	if (tc > i40e_pf_get_num_tc(pf)) {
5356 		netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5357 		goto exit;
5358 	}
5359 
5360 	/* Generate TC map for number of tc requested */
5361 	for (i = 0; i < tc; i++)
5362 		enabled_tc |= BIT(i);
5363 
5364 	/* Requesting same TC configuration as already enabled */
5365 	if (enabled_tc == vsi->tc_config.enabled_tc)
5366 		return 0;
5367 
5368 	/* Quiesce VSI queues */
5369 	i40e_quiesce_vsi(vsi);
5370 
5371 	/* Configure VSI for enabled TCs */
5372 	ret = i40e_vsi_config_tc(vsi, enabled_tc);
5373 	if (ret) {
5374 		netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5375 			    vsi->seid);
5376 		goto exit;
5377 	}
5378 
5379 	/* Unquiesce VSI */
5380 	i40e_unquiesce_vsi(vsi);
5381 
5382 exit:
5383 	return ret;
5384 }
5385 
5386 #ifdef I40E_FCOE
5387 int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5388 		    struct tc_to_netdev *tc)
5389 #else
5390 static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5391 			   struct tc_to_netdev *tc)
5392 #endif
5393 {
5394 	if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
5395 		return -EINVAL;
5396 	return i40e_setup_tc(netdev, tc->tc);
5397 }
5398 
5399 /**
5400  * i40e_open - Called when a network interface is made active
5401  * @netdev: network interface device structure
5402  *
5403  * The open entry point is called when a network interface is made
5404  * active by the system (IFF_UP).  At this point all resources needed
5405  * for transmit and receive operations are allocated, the interrupt
5406  * handler is registered with the OS, the netdev watchdog subtask is
5407  * enabled, and the stack is notified that the interface is ready.
5408  *
5409  * Returns 0 on success, negative value on failure
5410  **/
5411 int i40e_open(struct net_device *netdev)
5412 {
5413 	struct i40e_netdev_priv *np = netdev_priv(netdev);
5414 	struct i40e_vsi *vsi = np->vsi;
5415 	struct i40e_pf *pf = vsi->back;
5416 	int err;
5417 
5418 	/* disallow open during test or if eeprom is broken */
5419 	if (test_bit(__I40E_TESTING, &pf->state) ||
5420 	    test_bit(__I40E_BAD_EEPROM, &pf->state))
5421 		return -EBUSY;
5422 
5423 	netif_carrier_off(netdev);
5424 
5425 	err = i40e_vsi_open(vsi);
5426 	if (err)
5427 		return err;
5428 
5429 	/* configure global TSO hardware offload settings */
5430 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5431 						       TCP_FLAG_FIN) >> 16);
5432 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5433 						       TCP_FLAG_FIN |
5434 						       TCP_FLAG_CWR) >> 16);
5435 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5436 
5437 	udp_tunnel_get_rx_info(netdev);
5438 
5439 	return 0;
5440 }
5441 
5442 /**
5443  * i40e_vsi_open -
5444  * @vsi: the VSI to open
5445  *
5446  * Finish initialization of the VSI.
5447  *
5448  * Returns 0 on success, negative value on failure
5449  **/
5450 int i40e_vsi_open(struct i40e_vsi *vsi)
5451 {
5452 	struct i40e_pf *pf = vsi->back;
5453 	char int_name[I40E_INT_NAME_STR_LEN];
5454 	int err;
5455 
5456 	/* allocate descriptors */
5457 	err = i40e_vsi_setup_tx_resources(vsi);
5458 	if (err)
5459 		goto err_setup_tx;
5460 	err = i40e_vsi_setup_rx_resources(vsi);
5461 	if (err)
5462 		goto err_setup_rx;
5463 
5464 	err = i40e_vsi_configure(vsi);
5465 	if (err)
5466 		goto err_setup_rx;
5467 
5468 	if (vsi->netdev) {
5469 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5470 			 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5471 		err = i40e_vsi_request_irq(vsi, int_name);
5472 		if (err)
5473 			goto err_setup_rx;
5474 
5475 		/* Notify the stack of the actual queue counts. */
5476 		err = netif_set_real_num_tx_queues(vsi->netdev,
5477 						   vsi->num_queue_pairs);
5478 		if (err)
5479 			goto err_set_queues;
5480 
5481 		err = netif_set_real_num_rx_queues(vsi->netdev,
5482 						   vsi->num_queue_pairs);
5483 		if (err)
5484 			goto err_set_queues;
5485 
5486 	} else if (vsi->type == I40E_VSI_FDIR) {
5487 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5488 			 dev_driver_string(&pf->pdev->dev),
5489 			 dev_name(&pf->pdev->dev));
5490 		err = i40e_vsi_request_irq(vsi, int_name);
5491 
5492 	} else {
5493 		err = -EINVAL;
5494 		goto err_setup_rx;
5495 	}
5496 
5497 	err = i40e_up_complete(vsi);
5498 	if (err)
5499 		goto err_up_complete;
5500 
5501 	return 0;
5502 
5503 err_up_complete:
5504 	i40e_down(vsi);
5505 err_set_queues:
5506 	i40e_vsi_free_irq(vsi);
5507 err_setup_rx:
5508 	i40e_vsi_free_rx_resources(vsi);
5509 err_setup_tx:
5510 	i40e_vsi_free_tx_resources(vsi);
5511 	if (vsi == pf->vsi[pf->lan_vsi])
5512 		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5513 
5514 	return err;
5515 }
5516 
5517 /**
5518  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5519  * @pf: Pointer to PF
5520  *
5521  * This function destroys the hlist where all the Flow Director
5522  * filters were saved.
5523  **/
5524 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5525 {
5526 	struct i40e_fdir_filter *filter;
5527 	struct hlist_node *node2;
5528 
5529 	hlist_for_each_entry_safe(filter, node2,
5530 				  &pf->fdir_filter_list, fdir_node) {
5531 		hlist_del(&filter->fdir_node);
5532 		kfree(filter);
5533 	}
5534 	pf->fdir_pf_active_filters = 0;
5535 }
5536 
5537 /**
5538  * i40e_close - Disables a network interface
5539  * @netdev: network interface device structure
5540  *
5541  * The close entry point is called when an interface is de-activated
5542  * by the OS.  The hardware is still under the driver's control, but
5543  * this netdev interface is disabled.
5544  *
5545  * Returns 0, this is not allowed to fail
5546  **/
5547 int i40e_close(struct net_device *netdev)
5548 {
5549 	struct i40e_netdev_priv *np = netdev_priv(netdev);
5550 	struct i40e_vsi *vsi = np->vsi;
5551 
5552 	i40e_vsi_close(vsi);
5553 
5554 	return 0;
5555 }
5556 
5557 /**
5558  * i40e_do_reset - Start a PF or Core Reset sequence
5559  * @pf: board private structure
5560  * @reset_flags: which reset is requested
5561  *
5562  * The essential difference in resets is that the PF Reset
5563  * doesn't clear the packet buffers, doesn't reset the PE
5564  * firmware, and doesn't bother the other PFs on the chip.
5565  **/
5566 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5567 {
5568 	u32 val;
5569 
5570 	WARN_ON(in_interrupt());
5571 
5572 
5573 	/* do the biggest reset indicated */
5574 	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5575 
5576 		/* Request a Global Reset
5577 		 *
5578 		 * This will start the chip's countdown to the actual full
5579 		 * chip reset event, and a warning interrupt to be sent
5580 		 * to all PFs, including the requestor.  Our handler
5581 		 * for the warning interrupt will deal with the shutdown
5582 		 * and recovery of the switch setup.
5583 		 */
5584 		dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5585 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5586 		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5587 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5588 
5589 	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5590 
5591 		/* Request a Core Reset
5592 		 *
5593 		 * Same as Global Reset, except does *not* include the MAC/PHY
5594 		 */
5595 		dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5596 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5597 		val |= I40E_GLGEN_RTRIG_CORER_MASK;
5598 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5599 		i40e_flush(&pf->hw);
5600 
5601 	} else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5602 
5603 		/* Request a PF Reset
5604 		 *
5605 		 * Resets only the PF-specific registers
5606 		 *
5607 		 * This goes directly to the tear-down and rebuild of
5608 		 * the switch, since we need to do all the recovery as
5609 		 * for the Core Reset.
5610 		 */
5611 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
5612 		i40e_handle_reset_warning(pf);
5613 
5614 	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5615 		int v;
5616 
5617 		/* Find the VSI(s) that requested a re-init */
5618 		dev_info(&pf->pdev->dev,
5619 			 "VSI reinit requested\n");
5620 		for (v = 0; v < pf->num_alloc_vsi; v++) {
5621 			struct i40e_vsi *vsi = pf->vsi[v];
5622 
5623 			if (vsi != NULL &&
5624 			    test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5625 				i40e_vsi_reinit_locked(pf->vsi[v]);
5626 				clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5627 			}
5628 		}
5629 	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5630 		int v;
5631 
5632 		/* Find the VSI(s) that needs to be brought down */
5633 		dev_info(&pf->pdev->dev, "VSI down requested\n");
5634 		for (v = 0; v < pf->num_alloc_vsi; v++) {
5635 			struct i40e_vsi *vsi = pf->vsi[v];
5636 
5637 			if (vsi != NULL &&
5638 			    test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5639 				set_bit(__I40E_DOWN, &vsi->state);
5640 				i40e_down(vsi);
5641 				clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5642 			}
5643 		}
5644 	} else {
5645 		dev_info(&pf->pdev->dev,
5646 			 "bad reset request 0x%08x\n", reset_flags);
5647 	}
5648 }
5649 
5650 #ifdef CONFIG_I40E_DCB
5651 /**
5652  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5653  * @pf: board private structure
5654  * @old_cfg: current DCB config
5655  * @new_cfg: new DCB config
5656  **/
5657 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5658 			    struct i40e_dcbx_config *old_cfg,
5659 			    struct i40e_dcbx_config *new_cfg)
5660 {
5661 	bool need_reconfig = false;
5662 
5663 	/* Check if ETS configuration has changed */
5664 	if (memcmp(&new_cfg->etscfg,
5665 		   &old_cfg->etscfg,
5666 		   sizeof(new_cfg->etscfg))) {
5667 		/* If Priority Table has changed reconfig is needed */
5668 		if (memcmp(&new_cfg->etscfg.prioritytable,
5669 			   &old_cfg->etscfg.prioritytable,
5670 			   sizeof(new_cfg->etscfg.prioritytable))) {
5671 			need_reconfig = true;
5672 			dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5673 		}
5674 
5675 		if (memcmp(&new_cfg->etscfg.tcbwtable,
5676 			   &old_cfg->etscfg.tcbwtable,
5677 			   sizeof(new_cfg->etscfg.tcbwtable)))
5678 			dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5679 
5680 		if (memcmp(&new_cfg->etscfg.tsatable,
5681 			   &old_cfg->etscfg.tsatable,
5682 			   sizeof(new_cfg->etscfg.tsatable)))
5683 			dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5684 	}
5685 
5686 	/* Check if PFC configuration has changed */
5687 	if (memcmp(&new_cfg->pfc,
5688 		   &old_cfg->pfc,
5689 		   sizeof(new_cfg->pfc))) {
5690 		need_reconfig = true;
5691 		dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5692 	}
5693 
5694 	/* Check if APP Table has changed */
5695 	if (memcmp(&new_cfg->app,
5696 		   &old_cfg->app,
5697 		   sizeof(new_cfg->app))) {
5698 		need_reconfig = true;
5699 		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5700 	}
5701 
5702 	dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5703 	return need_reconfig;
5704 }
5705 
5706 /**
5707  * i40e_handle_lldp_event - Handle LLDP Change MIB event
5708  * @pf: board private structure
5709  * @e: event info posted on ARQ
5710  **/
5711 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5712 				  struct i40e_arq_event_info *e)
5713 {
5714 	struct i40e_aqc_lldp_get_mib *mib =
5715 		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5716 	struct i40e_hw *hw = &pf->hw;
5717 	struct i40e_dcbx_config tmp_dcbx_cfg;
5718 	bool need_reconfig = false;
5719 	int ret = 0;
5720 	u8 type;
5721 
5722 	/* Not DCB capable or capability disabled */
5723 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5724 		return ret;
5725 
5726 	/* Ignore if event is not for Nearest Bridge */
5727 	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5728 		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5729 	dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
5730 	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5731 		return ret;
5732 
5733 	/* Check MIB Type and return if event for Remote MIB update */
5734 	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5735 	dev_dbg(&pf->pdev->dev,
5736 		"LLDP event mib type %s\n", type ? "remote" : "local");
5737 	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5738 		/* Update the remote cached instance and return */
5739 		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5740 				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5741 				&hw->remote_dcbx_config);
5742 		goto exit;
5743 	}
5744 
5745 	/* Store the old configuration */
5746 	tmp_dcbx_cfg = hw->local_dcbx_config;
5747 
5748 	/* Reset the old DCBx configuration data */
5749 	memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5750 	/* Get updated DCBX data from firmware */
5751 	ret = i40e_get_dcb_config(&pf->hw);
5752 	if (ret) {
5753 		dev_info(&pf->pdev->dev,
5754 			 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5755 			 i40e_stat_str(&pf->hw, ret),
5756 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5757 		goto exit;
5758 	}
5759 
5760 	/* No change detected in DCBX configs */
5761 	if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5762 		    sizeof(tmp_dcbx_cfg))) {
5763 		dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5764 		goto exit;
5765 	}
5766 
5767 	need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5768 					       &hw->local_dcbx_config);
5769 
5770 	i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5771 
5772 	if (!need_reconfig)
5773 		goto exit;
5774 
5775 	/* Enable DCB tagging only when more than one TC */
5776 	if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5777 		pf->flags |= I40E_FLAG_DCB_ENABLED;
5778 	else
5779 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5780 
5781 	set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5782 	/* Reconfiguration needed quiesce all VSIs */
5783 	i40e_pf_quiesce_all_vsi(pf);
5784 
5785 	/* Changes in configuration update VEB/VSI */
5786 	i40e_dcb_reconfigure(pf);
5787 
5788 	ret = i40e_resume_port_tx(pf);
5789 
5790 	clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5791 	/* In case of error no point in resuming VSIs */
5792 	if (ret)
5793 		goto exit;
5794 
5795 	/* Wait for the PF's queues to be disabled */
5796 	ret = i40e_pf_wait_queues_disabled(pf);
5797 	if (ret) {
5798 		/* Schedule PF reset to recover */
5799 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5800 		i40e_service_event_schedule(pf);
5801 	} else {
5802 		i40e_pf_unquiesce_all_vsi(pf);
5803 		/* Notify the client for the DCB changes */
5804 		i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
5805 	}
5806 
5807 exit:
5808 	return ret;
5809 }
5810 #endif /* CONFIG_I40E_DCB */
5811 
5812 /**
5813  * i40e_do_reset_safe - Protected reset path for userland calls.
5814  * @pf: board private structure
5815  * @reset_flags: which reset is requested
5816  *
5817  **/
5818 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5819 {
5820 	rtnl_lock();
5821 	i40e_do_reset(pf, reset_flags);
5822 	rtnl_unlock();
5823 }
5824 
5825 /**
5826  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5827  * @pf: board private structure
5828  * @e: event info posted on ARQ
5829  *
5830  * Handler for LAN Queue Overflow Event generated by the firmware for PF
5831  * and VF queues
5832  **/
5833 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5834 					   struct i40e_arq_event_info *e)
5835 {
5836 	struct i40e_aqc_lan_overflow *data =
5837 		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5838 	u32 queue = le32_to_cpu(data->prtdcb_rupto);
5839 	u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5840 	struct i40e_hw *hw = &pf->hw;
5841 	struct i40e_vf *vf;
5842 	u16 vf_id;
5843 
5844 	dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5845 		queue, qtx_ctl);
5846 
5847 	/* Queue belongs to VF, find the VF and issue VF reset */
5848 	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5849 	    >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5850 		vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5851 			 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5852 		vf_id -= hw->func_caps.vf_base_id;
5853 		vf = &pf->vf[vf_id];
5854 		i40e_vc_notify_vf_reset(vf);
5855 		/* Allow VF to process pending reset notification */
5856 		msleep(20);
5857 		i40e_reset_vf(vf, false);
5858 	}
5859 }
5860 
5861 /**
5862  * i40e_service_event_complete - Finish up the service event
5863  * @pf: board private structure
5864  **/
5865 static void i40e_service_event_complete(struct i40e_pf *pf)
5866 {
5867 	WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5868 
5869 	/* flush memory to make sure state is correct before next watchog */
5870 	smp_mb__before_atomic();
5871 	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5872 }
5873 
5874 /**
5875  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5876  * @pf: board private structure
5877  **/
5878 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5879 {
5880 	u32 val, fcnt_prog;
5881 
5882 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5883 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5884 	return fcnt_prog;
5885 }
5886 
5887 /**
5888  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5889  * @pf: board private structure
5890  **/
5891 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5892 {
5893 	u32 val, fcnt_prog;
5894 
5895 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5896 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5897 		    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5898 		      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5899 	return fcnt_prog;
5900 }
5901 
5902 /**
5903  * i40e_get_global_fd_count - Get total FD filters programmed on device
5904  * @pf: board private structure
5905  **/
5906 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5907 {
5908 	u32 val, fcnt_prog;
5909 
5910 	val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5911 	fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5912 		    ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5913 		     I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5914 	return fcnt_prog;
5915 }
5916 
5917 /**
5918  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5919  * @pf: board private structure
5920  **/
5921 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5922 {
5923 	struct i40e_fdir_filter *filter;
5924 	u32 fcnt_prog, fcnt_avail;
5925 	struct hlist_node *node;
5926 
5927 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5928 		return;
5929 
5930 	/* Check if, FD SB or ATR was auto disabled and if there is enough room
5931 	 * to re-enable
5932 	 */
5933 	fcnt_prog = i40e_get_global_fd_count(pf);
5934 	fcnt_avail = pf->fdir_pf_filter_count;
5935 	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5936 	    (pf->fd_add_err == 0) ||
5937 	    (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5938 		if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5939 		    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5940 			pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5941 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
5942 				dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5943 		}
5944 	}
5945 	/* Wait for some more space to be available to turn on ATR */
5946 	if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5947 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5948 		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5949 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5950 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
5951 				dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5952 		}
5953 	}
5954 
5955 	/* if hw had a problem adding a filter, delete it */
5956 	if (pf->fd_inv > 0) {
5957 		hlist_for_each_entry_safe(filter, node,
5958 					  &pf->fdir_filter_list, fdir_node) {
5959 			if (filter->fd_id == pf->fd_inv) {
5960 				hlist_del(&filter->fdir_node);
5961 				kfree(filter);
5962 				pf->fdir_pf_active_filters--;
5963 			}
5964 		}
5965 	}
5966 }
5967 
5968 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5969 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5970 /**
5971  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5972  * @pf: board private structure
5973  **/
5974 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5975 {
5976 	unsigned long min_flush_time;
5977 	int flush_wait_retry = 50;
5978 	bool disable_atr = false;
5979 	int fd_room;
5980 	int reg;
5981 
5982 	if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5983 		return;
5984 
5985 	if (!time_after(jiffies, pf->fd_flush_timestamp +
5986 				 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
5987 		return;
5988 
5989 	/* If the flush is happening too quick and we have mostly SB rules we
5990 	 * should not re-enable ATR for some time.
5991 	 */
5992 	min_flush_time = pf->fd_flush_timestamp +
5993 			 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5994 	fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5995 
5996 	if (!(time_after(jiffies, min_flush_time)) &&
5997 	    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5998 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
5999 			dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
6000 		disable_atr = true;
6001 	}
6002 
6003 	pf->fd_flush_timestamp = jiffies;
6004 	pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
6005 	/* flush all filters */
6006 	wr32(&pf->hw, I40E_PFQF_CTL_1,
6007 	     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
6008 	i40e_flush(&pf->hw);
6009 	pf->fd_flush_cnt++;
6010 	pf->fd_add_err = 0;
6011 	do {
6012 		/* Check FD flush status every 5-6msec */
6013 		usleep_range(5000, 6000);
6014 		reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
6015 		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
6016 			break;
6017 	} while (flush_wait_retry--);
6018 	if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
6019 		dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
6020 	} else {
6021 		/* replay sideband filters */
6022 		i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
6023 		if (!disable_atr)
6024 			pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6025 		clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
6026 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
6027 			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
6028 	}
6029 }
6030 
6031 /**
6032  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
6033  * @pf: board private structure
6034  **/
6035 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
6036 {
6037 	return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
6038 }
6039 
6040 /* We can see up to 256 filter programming desc in transit if the filters are
6041  * being applied really fast; before we see the first
6042  * filter miss error on Rx queue 0. Accumulating enough error messages before
6043  * reacting will make sure we don't cause flush too often.
6044  */
6045 #define I40E_MAX_FD_PROGRAM_ERROR 256
6046 
6047 /**
6048  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
6049  * @pf: board private structure
6050  **/
6051 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6052 {
6053 
6054 	/* if interface is down do nothing */
6055 	if (test_bit(__I40E_DOWN, &pf->state))
6056 		return;
6057 
6058 	if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
6059 		return;
6060 
6061 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
6062 		i40e_fdir_flush_and_replay(pf);
6063 
6064 	i40e_fdir_check_and_reenable(pf);
6065 
6066 }
6067 
6068 /**
6069  * i40e_vsi_link_event - notify VSI of a link event
6070  * @vsi: vsi to be notified
6071  * @link_up: link up or down
6072  **/
6073 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6074 {
6075 	if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
6076 		return;
6077 
6078 	switch (vsi->type) {
6079 	case I40E_VSI_MAIN:
6080 #ifdef I40E_FCOE
6081 	case I40E_VSI_FCOE:
6082 #endif
6083 		if (!vsi->netdev || !vsi->netdev_registered)
6084 			break;
6085 
6086 		if (link_up) {
6087 			netif_carrier_on(vsi->netdev);
6088 			netif_tx_wake_all_queues(vsi->netdev);
6089 		} else {
6090 			netif_carrier_off(vsi->netdev);
6091 			netif_tx_stop_all_queues(vsi->netdev);
6092 		}
6093 		break;
6094 
6095 	case I40E_VSI_SRIOV:
6096 	case I40E_VSI_VMDQ2:
6097 	case I40E_VSI_CTRL:
6098 	case I40E_VSI_IWARP:
6099 	case I40E_VSI_MIRROR:
6100 	default:
6101 		/* there is no notification for other VSIs */
6102 		break;
6103 	}
6104 }
6105 
6106 /**
6107  * i40e_veb_link_event - notify elements on the veb of a link event
6108  * @veb: veb to be notified
6109  * @link_up: link up or down
6110  **/
6111 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6112 {
6113 	struct i40e_pf *pf;
6114 	int i;
6115 
6116 	if (!veb || !veb->pf)
6117 		return;
6118 	pf = veb->pf;
6119 
6120 	/* depth first... */
6121 	for (i = 0; i < I40E_MAX_VEB; i++)
6122 		if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6123 			i40e_veb_link_event(pf->veb[i], link_up);
6124 
6125 	/* ... now the local VSIs */
6126 	for (i = 0; i < pf->num_alloc_vsi; i++)
6127 		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6128 			i40e_vsi_link_event(pf->vsi[i], link_up);
6129 }
6130 
6131 /**
6132  * i40e_link_event - Update netif_carrier status
6133  * @pf: board private structure
6134  **/
6135 static void i40e_link_event(struct i40e_pf *pf)
6136 {
6137 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6138 	u8 new_link_speed, old_link_speed;
6139 	i40e_status status;
6140 	bool new_link, old_link;
6141 
6142 	/* save off old link status information */
6143 	pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6144 
6145 	/* set this to force the get_link_status call to refresh state */
6146 	pf->hw.phy.get_link_info = true;
6147 
6148 	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6149 
6150 	status = i40e_get_link_status(&pf->hw, &new_link);
6151 	if (status) {
6152 		dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6153 			status);
6154 		return;
6155 	}
6156 
6157 	old_link_speed = pf->hw.phy.link_info_old.link_speed;
6158 	new_link_speed = pf->hw.phy.link_info.link_speed;
6159 
6160 	if (new_link == old_link &&
6161 	    new_link_speed == old_link_speed &&
6162 	    (test_bit(__I40E_DOWN, &vsi->state) ||
6163 	     new_link == netif_carrier_ok(vsi->netdev)))
6164 		return;
6165 
6166 	if (!test_bit(__I40E_DOWN, &vsi->state))
6167 		i40e_print_link_message(vsi, new_link);
6168 
6169 	/* Notify the base of the switch tree connected to
6170 	 * the link.  Floating VEBs are not notified.
6171 	 */
6172 	if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6173 		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6174 	else
6175 		i40e_vsi_link_event(vsi, new_link);
6176 
6177 	if (pf->vf)
6178 		i40e_vc_notify_link_state(pf);
6179 
6180 	if (pf->flags & I40E_FLAG_PTP)
6181 		i40e_ptp_set_increment(pf);
6182 }
6183 
6184 /**
6185  * i40e_watchdog_subtask - periodic checks not using event driven response
6186  * @pf: board private structure
6187  **/
6188 static void i40e_watchdog_subtask(struct i40e_pf *pf)
6189 {
6190 	int i;
6191 
6192 	/* if interface is down do nothing */
6193 	if (test_bit(__I40E_DOWN, &pf->state) ||
6194 	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
6195 		return;
6196 
6197 	/* make sure we don't do these things too often */
6198 	if (time_before(jiffies, (pf->service_timer_previous +
6199 				  pf->service_timer_period)))
6200 		return;
6201 	pf->service_timer_previous = jiffies;
6202 
6203 	if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
6204 		i40e_link_event(pf);
6205 
6206 	/* Update the stats for active netdevs so the network stack
6207 	 * can look at updated numbers whenever it cares to
6208 	 */
6209 	for (i = 0; i < pf->num_alloc_vsi; i++)
6210 		if (pf->vsi[i] && pf->vsi[i]->netdev)
6211 			i40e_update_stats(pf->vsi[i]);
6212 
6213 	if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6214 		/* Update the stats for the active switching components */
6215 		for (i = 0; i < I40E_MAX_VEB; i++)
6216 			if (pf->veb[i])
6217 				i40e_update_veb_stats(pf->veb[i]);
6218 	}
6219 
6220 	i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
6221 }
6222 
6223 /**
6224  * i40e_reset_subtask - Set up for resetting the device and driver
6225  * @pf: board private structure
6226  **/
6227 static void i40e_reset_subtask(struct i40e_pf *pf)
6228 {
6229 	u32 reset_flags = 0;
6230 
6231 	rtnl_lock();
6232 	if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
6233 		reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6234 		clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
6235 	}
6236 	if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
6237 		reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6238 		clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6239 	}
6240 	if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
6241 		reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6242 		clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
6243 	}
6244 	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
6245 		reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6246 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
6247 	}
6248 	if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
6249 		reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6250 		clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
6251 	}
6252 
6253 	/* If there's a recovery already waiting, it takes
6254 	 * precedence before starting a new reset sequence.
6255 	 */
6256 	if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
6257 		i40e_handle_reset_warning(pf);
6258 		goto unlock;
6259 	}
6260 
6261 	/* If we're already down or resetting, just bail */
6262 	if (reset_flags &&
6263 	    !test_bit(__I40E_DOWN, &pf->state) &&
6264 	    !test_bit(__I40E_CONFIG_BUSY, &pf->state))
6265 		i40e_do_reset(pf, reset_flags);
6266 
6267 unlock:
6268 	rtnl_unlock();
6269 }
6270 
6271 /**
6272  * i40e_handle_link_event - Handle link event
6273  * @pf: board private structure
6274  * @e: event info posted on ARQ
6275  **/
6276 static void i40e_handle_link_event(struct i40e_pf *pf,
6277 				   struct i40e_arq_event_info *e)
6278 {
6279 	struct i40e_aqc_get_link_status *status =
6280 		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6281 
6282 	/* Do a new status request to re-enable LSE reporting
6283 	 * and load new status information into the hw struct
6284 	 * This completely ignores any state information
6285 	 * in the ARQ event info, instead choosing to always
6286 	 * issue the AQ update link status command.
6287 	 */
6288 	i40e_link_event(pf);
6289 
6290 	/* check for unqualified module, if link is down */
6291 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6292 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6293 	    (!(status->link_info & I40E_AQ_LINK_UP)))
6294 		dev_err(&pf->pdev->dev,
6295 			"The driver failed to link because an unqualified module was detected.\n");
6296 }
6297 
6298 /**
6299  * i40e_clean_adminq_subtask - Clean the AdminQ rings
6300  * @pf: board private structure
6301  **/
6302 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6303 {
6304 	struct i40e_arq_event_info event;
6305 	struct i40e_hw *hw = &pf->hw;
6306 	u16 pending, i = 0;
6307 	i40e_status ret;
6308 	u16 opcode;
6309 	u32 oldval;
6310 	u32 val;
6311 
6312 	/* Do not run clean AQ when PF reset fails */
6313 	if (test_bit(__I40E_RESET_FAILED, &pf->state))
6314 		return;
6315 
6316 	/* check for error indications */
6317 	val = rd32(&pf->hw, pf->hw.aq.arq.len);
6318 	oldval = val;
6319 	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6320 		if (hw->debug_mask & I40E_DEBUG_AQ)
6321 			dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6322 		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6323 	}
6324 	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6325 		if (hw->debug_mask & I40E_DEBUG_AQ)
6326 			dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6327 		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6328 		pf->arq_overflows++;
6329 	}
6330 	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6331 		if (hw->debug_mask & I40E_DEBUG_AQ)
6332 			dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6333 		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6334 	}
6335 	if (oldval != val)
6336 		wr32(&pf->hw, pf->hw.aq.arq.len, val);
6337 
6338 	val = rd32(&pf->hw, pf->hw.aq.asq.len);
6339 	oldval = val;
6340 	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6341 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6342 			dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6343 		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6344 	}
6345 	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6346 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6347 			dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6348 		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6349 	}
6350 	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6351 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6352 			dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6353 		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6354 	}
6355 	if (oldval != val)
6356 		wr32(&pf->hw, pf->hw.aq.asq.len, val);
6357 
6358 	event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6359 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6360 	if (!event.msg_buf)
6361 		return;
6362 
6363 	do {
6364 		ret = i40e_clean_arq_element(hw, &event, &pending);
6365 		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6366 			break;
6367 		else if (ret) {
6368 			dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6369 			break;
6370 		}
6371 
6372 		opcode = le16_to_cpu(event.desc.opcode);
6373 		switch (opcode) {
6374 
6375 		case i40e_aqc_opc_get_link_status:
6376 			i40e_handle_link_event(pf, &event);
6377 			break;
6378 		case i40e_aqc_opc_send_msg_to_pf:
6379 			ret = i40e_vc_process_vf_msg(pf,
6380 					le16_to_cpu(event.desc.retval),
6381 					le32_to_cpu(event.desc.cookie_high),
6382 					le32_to_cpu(event.desc.cookie_low),
6383 					event.msg_buf,
6384 					event.msg_len);
6385 			break;
6386 		case i40e_aqc_opc_lldp_update_mib:
6387 			dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6388 #ifdef CONFIG_I40E_DCB
6389 			rtnl_lock();
6390 			ret = i40e_handle_lldp_event(pf, &event);
6391 			rtnl_unlock();
6392 #endif /* CONFIG_I40E_DCB */
6393 			break;
6394 		case i40e_aqc_opc_event_lan_overflow:
6395 			dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6396 			i40e_handle_lan_overflow_event(pf, &event);
6397 			break;
6398 		case i40e_aqc_opc_send_msg_to_peer:
6399 			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6400 			break;
6401 		case i40e_aqc_opc_nvm_erase:
6402 		case i40e_aqc_opc_nvm_update:
6403 		case i40e_aqc_opc_oem_post_update:
6404 			i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6405 				   "ARQ NVM operation 0x%04x completed\n",
6406 				   opcode);
6407 			break;
6408 		default:
6409 			dev_info(&pf->pdev->dev,
6410 				 "ARQ: Unknown event 0x%04x ignored\n",
6411 				 opcode);
6412 			break;
6413 		}
6414 	} while (pending && (i++ < pf->adminq_work_limit));
6415 
6416 	clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6417 	/* re-enable Admin queue interrupt cause */
6418 	val = rd32(hw, I40E_PFINT_ICR0_ENA);
6419 	val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6420 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
6421 	i40e_flush(hw);
6422 
6423 	kfree(event.msg_buf);
6424 }
6425 
6426 /**
6427  * i40e_verify_eeprom - make sure eeprom is good to use
6428  * @pf: board private structure
6429  **/
6430 static void i40e_verify_eeprom(struct i40e_pf *pf)
6431 {
6432 	int err;
6433 
6434 	err = i40e_diag_eeprom_test(&pf->hw);
6435 	if (err) {
6436 		/* retry in case of garbage read */
6437 		err = i40e_diag_eeprom_test(&pf->hw);
6438 		if (err) {
6439 			dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6440 				 err);
6441 			set_bit(__I40E_BAD_EEPROM, &pf->state);
6442 		}
6443 	}
6444 
6445 	if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6446 		dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6447 		clear_bit(__I40E_BAD_EEPROM, &pf->state);
6448 	}
6449 }
6450 
6451 /**
6452  * i40e_enable_pf_switch_lb
6453  * @pf: pointer to the PF structure
6454  *
6455  * enable switch loop back or die - no point in a return value
6456  **/
6457 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6458 {
6459 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6460 	struct i40e_vsi_context ctxt;
6461 	int ret;
6462 
6463 	ctxt.seid = pf->main_vsi_seid;
6464 	ctxt.pf_num = pf->hw.pf_id;
6465 	ctxt.vf_num = 0;
6466 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6467 	if (ret) {
6468 		dev_info(&pf->pdev->dev,
6469 			 "couldn't get PF vsi config, err %s aq_err %s\n",
6470 			 i40e_stat_str(&pf->hw, ret),
6471 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6472 		return;
6473 	}
6474 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6475 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6476 	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6477 
6478 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6479 	if (ret) {
6480 		dev_info(&pf->pdev->dev,
6481 			 "update vsi switch failed, err %s aq_err %s\n",
6482 			 i40e_stat_str(&pf->hw, ret),
6483 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6484 	}
6485 }
6486 
6487 /**
6488  * i40e_disable_pf_switch_lb
6489  * @pf: pointer to the PF structure
6490  *
6491  * disable switch loop back or die - no point in a return value
6492  **/
6493 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6494 {
6495 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6496 	struct i40e_vsi_context ctxt;
6497 	int ret;
6498 
6499 	ctxt.seid = pf->main_vsi_seid;
6500 	ctxt.pf_num = pf->hw.pf_id;
6501 	ctxt.vf_num = 0;
6502 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6503 	if (ret) {
6504 		dev_info(&pf->pdev->dev,
6505 			 "couldn't get PF vsi config, err %s aq_err %s\n",
6506 			 i40e_stat_str(&pf->hw, ret),
6507 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6508 		return;
6509 	}
6510 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6511 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6512 	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6513 
6514 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6515 	if (ret) {
6516 		dev_info(&pf->pdev->dev,
6517 			 "update vsi switch failed, err %s aq_err %s\n",
6518 			 i40e_stat_str(&pf->hw, ret),
6519 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6520 	}
6521 }
6522 
6523 /**
6524  * i40e_config_bridge_mode - Configure the HW bridge mode
6525  * @veb: pointer to the bridge instance
6526  *
6527  * Configure the loop back mode for the LAN VSI that is downlink to the
6528  * specified HW bridge instance. It is expected this function is called
6529  * when a new HW bridge is instantiated.
6530  **/
6531 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6532 {
6533 	struct i40e_pf *pf = veb->pf;
6534 
6535 	if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6536 		dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6537 			 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6538 	if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6539 		i40e_disable_pf_switch_lb(pf);
6540 	else
6541 		i40e_enable_pf_switch_lb(pf);
6542 }
6543 
6544 /**
6545  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6546  * @veb: pointer to the VEB instance
6547  *
6548  * This is a recursive function that first builds the attached VSIs then
6549  * recurses in to build the next layer of VEB.  We track the connections
6550  * through our own index numbers because the seid's from the HW could
6551  * change across the reset.
6552  **/
6553 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6554 {
6555 	struct i40e_vsi *ctl_vsi = NULL;
6556 	struct i40e_pf *pf = veb->pf;
6557 	int v, veb_idx;
6558 	int ret;
6559 
6560 	/* build VSI that owns this VEB, temporarily attached to base VEB */
6561 	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6562 		if (pf->vsi[v] &&
6563 		    pf->vsi[v]->veb_idx == veb->idx &&
6564 		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6565 			ctl_vsi = pf->vsi[v];
6566 			break;
6567 		}
6568 	}
6569 	if (!ctl_vsi) {
6570 		dev_info(&pf->pdev->dev,
6571 			 "missing owner VSI for veb_idx %d\n", veb->idx);
6572 		ret = -ENOENT;
6573 		goto end_reconstitute;
6574 	}
6575 	if (ctl_vsi != pf->vsi[pf->lan_vsi])
6576 		ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6577 	ret = i40e_add_vsi(ctl_vsi);
6578 	if (ret) {
6579 		dev_info(&pf->pdev->dev,
6580 			 "rebuild of veb_idx %d owner VSI failed: %d\n",
6581 			 veb->idx, ret);
6582 		goto end_reconstitute;
6583 	}
6584 	i40e_vsi_reset_stats(ctl_vsi);
6585 
6586 	/* create the VEB in the switch and move the VSI onto the VEB */
6587 	ret = i40e_add_veb(veb, ctl_vsi);
6588 	if (ret)
6589 		goto end_reconstitute;
6590 
6591 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6592 		veb->bridge_mode = BRIDGE_MODE_VEB;
6593 	else
6594 		veb->bridge_mode = BRIDGE_MODE_VEPA;
6595 	i40e_config_bridge_mode(veb);
6596 
6597 	/* create the remaining VSIs attached to this VEB */
6598 	for (v = 0; v < pf->num_alloc_vsi; v++) {
6599 		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6600 			continue;
6601 
6602 		if (pf->vsi[v]->veb_idx == veb->idx) {
6603 			struct i40e_vsi *vsi = pf->vsi[v];
6604 
6605 			vsi->uplink_seid = veb->seid;
6606 			ret = i40e_add_vsi(vsi);
6607 			if (ret) {
6608 				dev_info(&pf->pdev->dev,
6609 					 "rebuild of vsi_idx %d failed: %d\n",
6610 					 v, ret);
6611 				goto end_reconstitute;
6612 			}
6613 			i40e_vsi_reset_stats(vsi);
6614 		}
6615 	}
6616 
6617 	/* create any VEBs attached to this VEB - RECURSION */
6618 	for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6619 		if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6620 			pf->veb[veb_idx]->uplink_seid = veb->seid;
6621 			ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6622 			if (ret)
6623 				break;
6624 		}
6625 	}
6626 
6627 end_reconstitute:
6628 	return ret;
6629 }
6630 
6631 /**
6632  * i40e_get_capabilities - get info about the HW
6633  * @pf: the PF struct
6634  **/
6635 static int i40e_get_capabilities(struct i40e_pf *pf)
6636 {
6637 	struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6638 	u16 data_size;
6639 	int buf_len;
6640 	int err;
6641 
6642 	buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6643 	do {
6644 		cap_buf = kzalloc(buf_len, GFP_KERNEL);
6645 		if (!cap_buf)
6646 			return -ENOMEM;
6647 
6648 		/* this loads the data into the hw struct for us */
6649 		err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6650 					    &data_size,
6651 					    i40e_aqc_opc_list_func_capabilities,
6652 					    NULL);
6653 		/* data loaded, buffer no longer needed */
6654 		kfree(cap_buf);
6655 
6656 		if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6657 			/* retry with a larger buffer */
6658 			buf_len = data_size;
6659 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6660 			dev_info(&pf->pdev->dev,
6661 				 "capability discovery failed, err %s aq_err %s\n",
6662 				 i40e_stat_str(&pf->hw, err),
6663 				 i40e_aq_str(&pf->hw,
6664 					     pf->hw.aq.asq_last_status));
6665 			return -ENODEV;
6666 		}
6667 	} while (err);
6668 
6669 	if (pf->hw.debug_mask & I40E_DEBUG_USER)
6670 		dev_info(&pf->pdev->dev,
6671 			 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6672 			 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6673 			 pf->hw.func_caps.num_msix_vectors,
6674 			 pf->hw.func_caps.num_msix_vectors_vf,
6675 			 pf->hw.func_caps.fd_filters_guaranteed,
6676 			 pf->hw.func_caps.fd_filters_best_effort,
6677 			 pf->hw.func_caps.num_tx_qp,
6678 			 pf->hw.func_caps.num_vsis);
6679 
6680 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6681 		       + pf->hw.func_caps.num_vfs)
6682 	if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6683 		dev_info(&pf->pdev->dev,
6684 			 "got num_vsis %d, setting num_vsis to %d\n",
6685 			 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6686 		pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6687 	}
6688 
6689 	return 0;
6690 }
6691 
6692 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6693 
6694 /**
6695  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6696  * @pf: board private structure
6697  **/
6698 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6699 {
6700 	struct i40e_vsi *vsi;
6701 	int i;
6702 
6703 	/* quick workaround for an NVM issue that leaves a critical register
6704 	 * uninitialized
6705 	 */
6706 	if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6707 		static const u32 hkey[] = {
6708 			0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6709 			0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6710 			0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6711 			0x95b3a76d};
6712 
6713 		for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6714 			wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6715 	}
6716 
6717 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6718 		return;
6719 
6720 	/* find existing VSI and see if it needs configuring */
6721 	vsi = NULL;
6722 	for (i = 0; i < pf->num_alloc_vsi; i++) {
6723 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6724 			vsi = pf->vsi[i];
6725 			break;
6726 		}
6727 	}
6728 
6729 	/* create a new VSI if none exists */
6730 	if (!vsi) {
6731 		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6732 				     pf->vsi[pf->lan_vsi]->seid, 0);
6733 		if (!vsi) {
6734 			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6735 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6736 			return;
6737 		}
6738 	}
6739 
6740 	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6741 }
6742 
6743 /**
6744  * i40e_fdir_teardown - release the Flow Director resources
6745  * @pf: board private structure
6746  **/
6747 static void i40e_fdir_teardown(struct i40e_pf *pf)
6748 {
6749 	int i;
6750 
6751 	i40e_fdir_filter_exit(pf);
6752 	for (i = 0; i < pf->num_alloc_vsi; i++) {
6753 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6754 			i40e_vsi_release(pf->vsi[i]);
6755 			break;
6756 		}
6757 	}
6758 }
6759 
6760 /**
6761  * i40e_prep_for_reset - prep for the core to reset
6762  * @pf: board private structure
6763  *
6764  * Close up the VFs and other things in prep for PF Reset.
6765   **/
6766 static void i40e_prep_for_reset(struct i40e_pf *pf)
6767 {
6768 	struct i40e_hw *hw = &pf->hw;
6769 	i40e_status ret = 0;
6770 	u32 v;
6771 
6772 	clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6773 	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6774 		return;
6775 	if (i40e_check_asq_alive(&pf->hw))
6776 		i40e_vc_notify_reset(pf);
6777 
6778 	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6779 
6780 	/* quiesce the VSIs and their queues that are not already DOWN */
6781 	i40e_pf_quiesce_all_vsi(pf);
6782 
6783 	for (v = 0; v < pf->num_alloc_vsi; v++) {
6784 		if (pf->vsi[v])
6785 			pf->vsi[v]->seid = 0;
6786 	}
6787 
6788 	i40e_shutdown_adminq(&pf->hw);
6789 
6790 	/* call shutdown HMC */
6791 	if (hw->hmc.hmc_obj) {
6792 		ret = i40e_shutdown_lan_hmc(hw);
6793 		if (ret)
6794 			dev_warn(&pf->pdev->dev,
6795 				 "shutdown_lan_hmc failed: %d\n", ret);
6796 	}
6797 }
6798 
6799 /**
6800  * i40e_send_version - update firmware with driver version
6801  * @pf: PF struct
6802  */
6803 static void i40e_send_version(struct i40e_pf *pf)
6804 {
6805 	struct i40e_driver_version dv;
6806 
6807 	dv.major_version = DRV_VERSION_MAJOR;
6808 	dv.minor_version = DRV_VERSION_MINOR;
6809 	dv.build_version = DRV_VERSION_BUILD;
6810 	dv.subbuild_version = 0;
6811 	strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6812 	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6813 }
6814 
6815 /**
6816  * i40e_reset_and_rebuild - reset and rebuild using a saved config
6817  * @pf: board private structure
6818  * @reinit: if the Main VSI needs to re-initialized.
6819  **/
6820 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6821 {
6822 	struct i40e_hw *hw = &pf->hw;
6823 	u8 set_fc_aq_fail = 0;
6824 	i40e_status ret;
6825 	u32 val;
6826 	u32 v;
6827 
6828 	/* Now we wait for GRST to settle out.
6829 	 * We don't have to delete the VEBs or VSIs from the hw switch
6830 	 * because the reset will make them disappear.
6831 	 */
6832 	ret = i40e_pf_reset(hw);
6833 	if (ret) {
6834 		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6835 		set_bit(__I40E_RESET_FAILED, &pf->state);
6836 		goto clear_recovery;
6837 	}
6838 	pf->pfr_count++;
6839 
6840 	if (test_bit(__I40E_DOWN, &pf->state))
6841 		goto clear_recovery;
6842 	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6843 
6844 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6845 	ret = i40e_init_adminq(&pf->hw);
6846 	if (ret) {
6847 		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6848 			 i40e_stat_str(&pf->hw, ret),
6849 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6850 		goto clear_recovery;
6851 	}
6852 
6853 	/* re-verify the eeprom if we just had an EMP reset */
6854 	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6855 		i40e_verify_eeprom(pf);
6856 
6857 	i40e_clear_pxe_mode(hw);
6858 	ret = i40e_get_capabilities(pf);
6859 	if (ret)
6860 		goto end_core_reset;
6861 
6862 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6863 				hw->func_caps.num_rx_qp,
6864 				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6865 	if (ret) {
6866 		dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6867 		goto end_core_reset;
6868 	}
6869 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6870 	if (ret) {
6871 		dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6872 		goto end_core_reset;
6873 	}
6874 
6875 #ifdef CONFIG_I40E_DCB
6876 	ret = i40e_init_pf_dcb(pf);
6877 	if (ret) {
6878 		dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6879 		pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6880 		/* Continue without DCB enabled */
6881 	}
6882 #endif /* CONFIG_I40E_DCB */
6883 #ifdef I40E_FCOE
6884 	i40e_init_pf_fcoe(pf);
6885 
6886 #endif
6887 	/* do basic switch setup */
6888 	ret = i40e_setup_pf_switch(pf, reinit);
6889 	if (ret)
6890 		goto end_core_reset;
6891 
6892 	/* The driver only wants link up/down and module qualification
6893 	 * reports from firmware.  Note the negative logic.
6894 	 */
6895 	ret = i40e_aq_set_phy_int_mask(&pf->hw,
6896 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
6897 					 I40E_AQ_EVENT_MEDIA_NA |
6898 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
6899 	if (ret)
6900 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6901 			 i40e_stat_str(&pf->hw, ret),
6902 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6903 
6904 	/* make sure our flow control settings are restored */
6905 	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6906 	if (ret)
6907 		dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6908 			i40e_stat_str(&pf->hw, ret),
6909 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6910 
6911 	/* Rebuild the VSIs and VEBs that existed before reset.
6912 	 * They are still in our local switch element arrays, so only
6913 	 * need to rebuild the switch model in the HW.
6914 	 *
6915 	 * If there were VEBs but the reconstitution failed, we'll try
6916 	 * try to recover minimal use by getting the basic PF VSI working.
6917 	 */
6918 	if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6919 		dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6920 		/* find the one VEB connected to the MAC, and find orphans */
6921 		for (v = 0; v < I40E_MAX_VEB; v++) {
6922 			if (!pf->veb[v])
6923 				continue;
6924 
6925 			if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6926 			    pf->veb[v]->uplink_seid == 0) {
6927 				ret = i40e_reconstitute_veb(pf->veb[v]);
6928 
6929 				if (!ret)
6930 					continue;
6931 
6932 				/* If Main VEB failed, we're in deep doodoo,
6933 				 * so give up rebuilding the switch and set up
6934 				 * for minimal rebuild of PF VSI.
6935 				 * If orphan failed, we'll report the error
6936 				 * but try to keep going.
6937 				 */
6938 				if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6939 					dev_info(&pf->pdev->dev,
6940 						 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6941 						 ret);
6942 					pf->vsi[pf->lan_vsi]->uplink_seid
6943 								= pf->mac_seid;
6944 					break;
6945 				} else if (pf->veb[v]->uplink_seid == 0) {
6946 					dev_info(&pf->pdev->dev,
6947 						 "rebuild of orphan VEB failed: %d\n",
6948 						 ret);
6949 				}
6950 			}
6951 		}
6952 	}
6953 
6954 	if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6955 		dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6956 		/* no VEB, so rebuild only the Main VSI */
6957 		ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6958 		if (ret) {
6959 			dev_info(&pf->pdev->dev,
6960 				 "rebuild of Main VSI failed: %d\n", ret);
6961 			goto end_core_reset;
6962 		}
6963 	}
6964 
6965 	/* Reconfigure hardware for allowing smaller MSS in the case
6966 	 * of TSO, so that we avoid the MDD being fired and causing
6967 	 * a reset in the case of small MSS+TSO.
6968 	 */
6969 #define I40E_REG_MSS          0x000E64DC
6970 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
6971 #define I40E_64BYTE_MSS       0x400000
6972 	val = rd32(hw, I40E_REG_MSS);
6973 	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
6974 		val &= ~I40E_REG_MSS_MIN_MASK;
6975 		val |= I40E_64BYTE_MSS;
6976 		wr32(hw, I40E_REG_MSS, val);
6977 	}
6978 
6979 	if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
6980 		msleep(75);
6981 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6982 		if (ret)
6983 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6984 				 i40e_stat_str(&pf->hw, ret),
6985 				 i40e_aq_str(&pf->hw,
6986 					     pf->hw.aq.asq_last_status));
6987 	}
6988 	/* reinit the misc interrupt */
6989 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6990 		ret = i40e_setup_misc_vector(pf);
6991 
6992 	/* Add a filter to drop all Flow control frames from any VSI from being
6993 	 * transmitted. By doing so we stop a malicious VF from sending out
6994 	 * PAUSE or PFC frames and potentially controlling traffic for other
6995 	 * PF/VF VSIs.
6996 	 * The FW can still send Flow control frames if enabled.
6997 	 */
6998 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
6999 						       pf->main_vsi_seid);
7000 
7001 	/* restart the VSIs that were rebuilt and running before the reset */
7002 	i40e_pf_unquiesce_all_vsi(pf);
7003 
7004 	if (pf->num_alloc_vfs) {
7005 		for (v = 0; v < pf->num_alloc_vfs; v++)
7006 			i40e_reset_vf(&pf->vf[v], true);
7007 	}
7008 
7009 	/* tell the firmware that we're starting */
7010 	i40e_send_version(pf);
7011 
7012 end_core_reset:
7013 	clear_bit(__I40E_RESET_FAILED, &pf->state);
7014 clear_recovery:
7015 	clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
7016 }
7017 
7018 /**
7019  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
7020  * @pf: board private structure
7021  *
7022  * Close up the VFs and other things in prep for a Core Reset,
7023  * then get ready to rebuild the world.
7024  **/
7025 static void i40e_handle_reset_warning(struct i40e_pf *pf)
7026 {
7027 	i40e_prep_for_reset(pf);
7028 	i40e_reset_and_rebuild(pf, false);
7029 }
7030 
7031 /**
7032  * i40e_handle_mdd_event
7033  * @pf: pointer to the PF structure
7034  *
7035  * Called from the MDD irq handler to identify possibly malicious vfs
7036  **/
7037 static void i40e_handle_mdd_event(struct i40e_pf *pf)
7038 {
7039 	struct i40e_hw *hw = &pf->hw;
7040 	bool mdd_detected = false;
7041 	bool pf_mdd_detected = false;
7042 	struct i40e_vf *vf;
7043 	u32 reg;
7044 	int i;
7045 
7046 	if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
7047 		return;
7048 
7049 	/* find what triggered the MDD event */
7050 	reg = rd32(hw, I40E_GL_MDET_TX);
7051 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
7052 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
7053 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
7054 		u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
7055 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
7056 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
7057 				I40E_GL_MDET_TX_EVENT_SHIFT;
7058 		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
7059 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
7060 				pf->hw.func_caps.base_queue;
7061 		if (netif_msg_tx_err(pf))
7062 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
7063 				 event, queue, pf_num, vf_num);
7064 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
7065 		mdd_detected = true;
7066 	}
7067 	reg = rd32(hw, I40E_GL_MDET_RX);
7068 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
7069 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
7070 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
7071 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
7072 				I40E_GL_MDET_RX_EVENT_SHIFT;
7073 		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
7074 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
7075 				pf->hw.func_caps.base_queue;
7076 		if (netif_msg_rx_err(pf))
7077 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7078 				 event, queue, func);
7079 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7080 		mdd_detected = true;
7081 	}
7082 
7083 	if (mdd_detected) {
7084 		reg = rd32(hw, I40E_PF_MDET_TX);
7085 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7086 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7087 			dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7088 			pf_mdd_detected = true;
7089 		}
7090 		reg = rd32(hw, I40E_PF_MDET_RX);
7091 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7092 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7093 			dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7094 			pf_mdd_detected = true;
7095 		}
7096 		/* Queue belongs to the PF, initiate a reset */
7097 		if (pf_mdd_detected) {
7098 			set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
7099 			i40e_service_event_schedule(pf);
7100 		}
7101 	}
7102 
7103 	/* see if one of the VFs needs its hand slapped */
7104 	for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7105 		vf = &(pf->vf[i]);
7106 		reg = rd32(hw, I40E_VP_MDET_TX(i));
7107 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7108 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7109 			vf->num_mdd_events++;
7110 			dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7111 				 i);
7112 		}
7113 
7114 		reg = rd32(hw, I40E_VP_MDET_RX(i));
7115 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7116 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7117 			vf->num_mdd_events++;
7118 			dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7119 				 i);
7120 		}
7121 
7122 		if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7123 			dev_info(&pf->pdev->dev,
7124 				 "Too many MDD events on VF %d, disabled\n", i);
7125 			dev_info(&pf->pdev->dev,
7126 				 "Use PF Control I/F to re-enable the VF\n");
7127 			set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
7128 		}
7129 	}
7130 
7131 	/* re-enable mdd interrupt cause */
7132 	clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
7133 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7134 	reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7135 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7136 	i40e_flush(hw);
7137 }
7138 
7139 /**
7140  * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
7141  * @pf: board private structure
7142  **/
7143 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7144 {
7145 	struct i40e_hw *hw = &pf->hw;
7146 	i40e_status ret;
7147 	__be16 port;
7148 	int i;
7149 
7150 	if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7151 		return;
7152 
7153 	pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7154 
7155 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7156 		if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7157 			pf->pending_udp_bitmap &= ~BIT_ULL(i);
7158 			port = pf->udp_ports[i].index;
7159 			if (port)
7160 				ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
7161 						     pf->udp_ports[i].type,
7162 						     NULL, NULL);
7163 			else
7164 				ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7165 
7166 			if (ret) {
7167 				dev_dbg(&pf->pdev->dev,
7168 					"%s %s port %d, index %d failed, err %s aq_err %s\n",
7169 					pf->udp_ports[i].type ? "vxlan" : "geneve",
7170 					port ? "add" : "delete",
7171 					ntohs(port), i,
7172 					i40e_stat_str(&pf->hw, ret),
7173 					i40e_aq_str(&pf->hw,
7174 						    pf->hw.aq.asq_last_status));
7175 				pf->udp_ports[i].index = 0;
7176 			}
7177 		}
7178 	}
7179 }
7180 
7181 /**
7182  * i40e_service_task - Run the driver's async subtasks
7183  * @work: pointer to work_struct containing our data
7184  **/
7185 static void i40e_service_task(struct work_struct *work)
7186 {
7187 	struct i40e_pf *pf = container_of(work,
7188 					  struct i40e_pf,
7189 					  service_task);
7190 	unsigned long start_time = jiffies;
7191 
7192 	/* don't bother with service tasks if a reset is in progress */
7193 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7194 		i40e_service_event_complete(pf);
7195 		return;
7196 	}
7197 
7198 	i40e_detect_recover_hung(pf);
7199 	i40e_sync_filters_subtask(pf);
7200 	i40e_reset_subtask(pf);
7201 	i40e_handle_mdd_event(pf);
7202 	i40e_vc_process_vflr_event(pf);
7203 	i40e_watchdog_subtask(pf);
7204 	i40e_fdir_reinit_subtask(pf);
7205 	i40e_client_subtask(pf);
7206 	i40e_sync_filters_subtask(pf);
7207 	i40e_sync_udp_filters_subtask(pf);
7208 	i40e_clean_adminq_subtask(pf);
7209 
7210 	i40e_service_event_complete(pf);
7211 
7212 	/* If the tasks have taken longer than one timer cycle or there
7213 	 * is more work to be done, reschedule the service task now
7214 	 * rather than wait for the timer to tick again.
7215 	 */
7216 	if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7217 	    test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)		 ||
7218 	    test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)		 ||
7219 	    test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
7220 		i40e_service_event_schedule(pf);
7221 }
7222 
7223 /**
7224  * i40e_service_timer - timer callback
7225  * @data: pointer to PF struct
7226  **/
7227 static void i40e_service_timer(unsigned long data)
7228 {
7229 	struct i40e_pf *pf = (struct i40e_pf *)data;
7230 
7231 	mod_timer(&pf->service_timer,
7232 		  round_jiffies(jiffies + pf->service_timer_period));
7233 	i40e_service_event_schedule(pf);
7234 }
7235 
7236 /**
7237  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7238  * @vsi: the VSI being configured
7239  **/
7240 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7241 {
7242 	struct i40e_pf *pf = vsi->back;
7243 
7244 	switch (vsi->type) {
7245 	case I40E_VSI_MAIN:
7246 		vsi->alloc_queue_pairs = pf->num_lan_qps;
7247 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7248 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
7249 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7250 			vsi->num_q_vectors = pf->num_lan_msix;
7251 		else
7252 			vsi->num_q_vectors = 1;
7253 
7254 		break;
7255 
7256 	case I40E_VSI_FDIR:
7257 		vsi->alloc_queue_pairs = 1;
7258 		vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7259 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
7260 		vsi->num_q_vectors = pf->num_fdsb_msix;
7261 		break;
7262 
7263 	case I40E_VSI_VMDQ2:
7264 		vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7265 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7266 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
7267 		vsi->num_q_vectors = pf->num_vmdq_msix;
7268 		break;
7269 
7270 	case I40E_VSI_SRIOV:
7271 		vsi->alloc_queue_pairs = pf->num_vf_qps;
7272 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7273 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
7274 		break;
7275 
7276 #ifdef I40E_FCOE
7277 	case I40E_VSI_FCOE:
7278 		vsi->alloc_queue_pairs = pf->num_fcoe_qps;
7279 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7280 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
7281 		vsi->num_q_vectors = pf->num_fcoe_msix;
7282 		break;
7283 
7284 #endif /* I40E_FCOE */
7285 	default:
7286 		WARN_ON(1);
7287 		return -ENODATA;
7288 	}
7289 
7290 	return 0;
7291 }
7292 
7293 /**
7294  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7295  * @type: VSI pointer
7296  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
7297  *
7298  * On error: returns error code (negative)
7299  * On success: returns 0
7300  **/
7301 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7302 {
7303 	int size;
7304 	int ret = 0;
7305 
7306 	/* allocate memory for both Tx and Rx ring pointers */
7307 	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
7308 	vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7309 	if (!vsi->tx_rings)
7310 		return -ENOMEM;
7311 	vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
7312 
7313 	if (alloc_qvectors) {
7314 		/* allocate memory for q_vector pointers */
7315 		size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7316 		vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7317 		if (!vsi->q_vectors) {
7318 			ret = -ENOMEM;
7319 			goto err_vectors;
7320 		}
7321 	}
7322 	return ret;
7323 
7324 err_vectors:
7325 	kfree(vsi->tx_rings);
7326 	return ret;
7327 }
7328 
7329 /**
7330  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7331  * @pf: board private structure
7332  * @type: type of VSI
7333  *
7334  * On error: returns error code (negative)
7335  * On success: returns vsi index in PF (positive)
7336  **/
7337 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7338 {
7339 	int ret = -ENODEV;
7340 	struct i40e_vsi *vsi;
7341 	int vsi_idx;
7342 	int i;
7343 
7344 	/* Need to protect the allocation of the VSIs at the PF level */
7345 	mutex_lock(&pf->switch_mutex);
7346 
7347 	/* VSI list may be fragmented if VSI creation/destruction has
7348 	 * been happening.  We can afford to do a quick scan to look
7349 	 * for any free VSIs in the list.
7350 	 *
7351 	 * find next empty vsi slot, looping back around if necessary
7352 	 */
7353 	i = pf->next_vsi;
7354 	while (i < pf->num_alloc_vsi && pf->vsi[i])
7355 		i++;
7356 	if (i >= pf->num_alloc_vsi) {
7357 		i = 0;
7358 		while (i < pf->next_vsi && pf->vsi[i])
7359 			i++;
7360 	}
7361 
7362 	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7363 		vsi_idx = i;             /* Found one! */
7364 	} else {
7365 		ret = -ENODEV;
7366 		goto unlock_pf;  /* out of VSI slots! */
7367 	}
7368 	pf->next_vsi = ++i;
7369 
7370 	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7371 	if (!vsi) {
7372 		ret = -ENOMEM;
7373 		goto unlock_pf;
7374 	}
7375 	vsi->type = type;
7376 	vsi->back = pf;
7377 	set_bit(__I40E_DOWN, &vsi->state);
7378 	vsi->flags = 0;
7379 	vsi->idx = vsi_idx;
7380 	vsi->int_rate_limit = 0;
7381 	vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7382 				pf->rss_table_size : 64;
7383 	vsi->netdev_registered = false;
7384 	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7385 	INIT_LIST_HEAD(&vsi->mac_filter_list);
7386 	vsi->irqs_ready = false;
7387 
7388 	ret = i40e_set_num_rings_in_vsi(vsi);
7389 	if (ret)
7390 		goto err_rings;
7391 
7392 	ret = i40e_vsi_alloc_arrays(vsi, true);
7393 	if (ret)
7394 		goto err_rings;
7395 
7396 	/* Setup default MSIX irq handler for VSI */
7397 	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7398 
7399 	/* Initialize VSI lock */
7400 	spin_lock_init(&vsi->mac_filter_list_lock);
7401 	pf->vsi[vsi_idx] = vsi;
7402 	ret = vsi_idx;
7403 	goto unlock_pf;
7404 
7405 err_rings:
7406 	pf->next_vsi = i - 1;
7407 	kfree(vsi);
7408 unlock_pf:
7409 	mutex_unlock(&pf->switch_mutex);
7410 	return ret;
7411 }
7412 
7413 /**
7414  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7415  * @type: VSI pointer
7416  * @free_qvectors: a bool to specify if q_vectors need to be freed.
7417  *
7418  * On error: returns error code (negative)
7419  * On success: returns 0
7420  **/
7421 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7422 {
7423 	/* free the ring and vector containers */
7424 	if (free_qvectors) {
7425 		kfree(vsi->q_vectors);
7426 		vsi->q_vectors = NULL;
7427 	}
7428 	kfree(vsi->tx_rings);
7429 	vsi->tx_rings = NULL;
7430 	vsi->rx_rings = NULL;
7431 }
7432 
7433 /**
7434  * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7435  * and lookup table
7436  * @vsi: Pointer to VSI structure
7437  */
7438 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7439 {
7440 	if (!vsi)
7441 		return;
7442 
7443 	kfree(vsi->rss_hkey_user);
7444 	vsi->rss_hkey_user = NULL;
7445 
7446 	kfree(vsi->rss_lut_user);
7447 	vsi->rss_lut_user = NULL;
7448 }
7449 
7450 /**
7451  * i40e_vsi_clear - Deallocate the VSI provided
7452  * @vsi: the VSI being un-configured
7453  **/
7454 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7455 {
7456 	struct i40e_pf *pf;
7457 
7458 	if (!vsi)
7459 		return 0;
7460 
7461 	if (!vsi->back)
7462 		goto free_vsi;
7463 	pf = vsi->back;
7464 
7465 	mutex_lock(&pf->switch_mutex);
7466 	if (!pf->vsi[vsi->idx]) {
7467 		dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7468 			vsi->idx, vsi->idx, vsi, vsi->type);
7469 		goto unlock_vsi;
7470 	}
7471 
7472 	if (pf->vsi[vsi->idx] != vsi) {
7473 		dev_err(&pf->pdev->dev,
7474 			"pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7475 			pf->vsi[vsi->idx]->idx,
7476 			pf->vsi[vsi->idx],
7477 			pf->vsi[vsi->idx]->type,
7478 			vsi->idx, vsi, vsi->type);
7479 		goto unlock_vsi;
7480 	}
7481 
7482 	/* updates the PF for this cleared vsi */
7483 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7484 	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7485 
7486 	i40e_vsi_free_arrays(vsi, true);
7487 	i40e_clear_rss_config_user(vsi);
7488 
7489 	pf->vsi[vsi->idx] = NULL;
7490 	if (vsi->idx < pf->next_vsi)
7491 		pf->next_vsi = vsi->idx;
7492 
7493 unlock_vsi:
7494 	mutex_unlock(&pf->switch_mutex);
7495 free_vsi:
7496 	kfree(vsi);
7497 
7498 	return 0;
7499 }
7500 
7501 /**
7502  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7503  * @vsi: the VSI being cleaned
7504  **/
7505 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7506 {
7507 	int i;
7508 
7509 	if (vsi->tx_rings && vsi->tx_rings[0]) {
7510 		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7511 			kfree_rcu(vsi->tx_rings[i], rcu);
7512 			vsi->tx_rings[i] = NULL;
7513 			vsi->rx_rings[i] = NULL;
7514 		}
7515 	}
7516 }
7517 
7518 /**
7519  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7520  * @vsi: the VSI being configured
7521  **/
7522 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7523 {
7524 	struct i40e_ring *tx_ring, *rx_ring;
7525 	struct i40e_pf *pf = vsi->back;
7526 	int i;
7527 
7528 	/* Set basic values in the rings to be used later during open() */
7529 	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7530 		/* allocate space for both Tx and Rx in one shot */
7531 		tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7532 		if (!tx_ring)
7533 			goto err_out;
7534 
7535 		tx_ring->queue_index = i;
7536 		tx_ring->reg_idx = vsi->base_queue + i;
7537 		tx_ring->ring_active = false;
7538 		tx_ring->vsi = vsi;
7539 		tx_ring->netdev = vsi->netdev;
7540 		tx_ring->dev = &pf->pdev->dev;
7541 		tx_ring->count = vsi->num_desc;
7542 		tx_ring->size = 0;
7543 		tx_ring->dcb_tc = 0;
7544 		if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7545 			tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7546 		tx_ring->tx_itr_setting = pf->tx_itr_default;
7547 		vsi->tx_rings[i] = tx_ring;
7548 
7549 		rx_ring = &tx_ring[1];
7550 		rx_ring->queue_index = i;
7551 		rx_ring->reg_idx = vsi->base_queue + i;
7552 		rx_ring->ring_active = false;
7553 		rx_ring->vsi = vsi;
7554 		rx_ring->netdev = vsi->netdev;
7555 		rx_ring->dev = &pf->pdev->dev;
7556 		rx_ring->count = vsi->num_desc;
7557 		rx_ring->size = 0;
7558 		rx_ring->dcb_tc = 0;
7559 		rx_ring->rx_itr_setting = pf->rx_itr_default;
7560 		vsi->rx_rings[i] = rx_ring;
7561 	}
7562 
7563 	return 0;
7564 
7565 err_out:
7566 	i40e_vsi_clear_rings(vsi);
7567 	return -ENOMEM;
7568 }
7569 
7570 /**
7571  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7572  * @pf: board private structure
7573  * @vectors: the number of MSI-X vectors to request
7574  *
7575  * Returns the number of vectors reserved, or error
7576  **/
7577 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7578 {
7579 	vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7580 					I40E_MIN_MSIX, vectors);
7581 	if (vectors < 0) {
7582 		dev_info(&pf->pdev->dev,
7583 			 "MSI-X vector reservation failed: %d\n", vectors);
7584 		vectors = 0;
7585 	}
7586 
7587 	return vectors;
7588 }
7589 
7590 /**
7591  * i40e_init_msix - Setup the MSIX capability
7592  * @pf: board private structure
7593  *
7594  * Work with the OS to set up the MSIX vectors needed.
7595  *
7596  * Returns the number of vectors reserved or negative on failure
7597  **/
7598 static int i40e_init_msix(struct i40e_pf *pf)
7599 {
7600 	struct i40e_hw *hw = &pf->hw;
7601 	int vectors_left;
7602 	int v_budget, i;
7603 	int v_actual;
7604 	int iwarp_requested = 0;
7605 
7606 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7607 		return -ENODEV;
7608 
7609 	/* The number of vectors we'll request will be comprised of:
7610 	 *   - Add 1 for "other" cause for Admin Queue events, etc.
7611 	 *   - The number of LAN queue pairs
7612 	 *	- Queues being used for RSS.
7613 	 *		We don't need as many as max_rss_size vectors.
7614 	 *		use rss_size instead in the calculation since that
7615 	 *		is governed by number of cpus in the system.
7616 	 *	- assumes symmetric Tx/Rx pairing
7617 	 *   - The number of VMDq pairs
7618 	 *   - The CPU count within the NUMA node if iWARP is enabled
7619 #ifdef I40E_FCOE
7620 	 *   - The number of FCOE qps.
7621 #endif
7622 	 * Once we count this up, try the request.
7623 	 *
7624 	 * If we can't get what we want, we'll simplify to nearly nothing
7625 	 * and try again.  If that still fails, we punt.
7626 	 */
7627 	vectors_left = hw->func_caps.num_msix_vectors;
7628 	v_budget = 0;
7629 
7630 	/* reserve one vector for miscellaneous handler */
7631 	if (vectors_left) {
7632 		v_budget++;
7633 		vectors_left--;
7634 	}
7635 
7636 	/* reserve vectors for the main PF traffic queues */
7637 	pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7638 	vectors_left -= pf->num_lan_msix;
7639 	v_budget += pf->num_lan_msix;
7640 
7641 	/* reserve one vector for sideband flow director */
7642 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7643 		if (vectors_left) {
7644 			pf->num_fdsb_msix = 1;
7645 			v_budget++;
7646 			vectors_left--;
7647 		} else {
7648 			pf->num_fdsb_msix = 0;
7649 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7650 		}
7651 	}
7652 
7653 #ifdef I40E_FCOE
7654 	/* can we reserve enough for FCoE? */
7655 	if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7656 		if (!vectors_left)
7657 			pf->num_fcoe_msix = 0;
7658 		else if (vectors_left >= pf->num_fcoe_qps)
7659 			pf->num_fcoe_msix = pf->num_fcoe_qps;
7660 		else
7661 			pf->num_fcoe_msix = 1;
7662 		v_budget += pf->num_fcoe_msix;
7663 		vectors_left -= pf->num_fcoe_msix;
7664 	}
7665 
7666 #endif
7667 	/* can we reserve enough for iWARP? */
7668 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7669 		if (!vectors_left)
7670 			pf->num_iwarp_msix = 0;
7671 		else if (vectors_left < pf->num_iwarp_msix)
7672 			pf->num_iwarp_msix = 1;
7673 		v_budget += pf->num_iwarp_msix;
7674 		vectors_left -= pf->num_iwarp_msix;
7675 	}
7676 
7677 	/* any vectors left over go for VMDq support */
7678 	if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7679 		int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7680 		int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7681 
7682 		/* if we're short on vectors for what's desired, we limit
7683 		 * the queues per vmdq.  If this is still more than are
7684 		 * available, the user will need to change the number of
7685 		 * queues/vectors used by the PF later with the ethtool
7686 		 * channels command
7687 		 */
7688 		if (vmdq_vecs < vmdq_vecs_wanted)
7689 			pf->num_vmdq_qps = 1;
7690 		pf->num_vmdq_msix = pf->num_vmdq_qps;
7691 
7692 		v_budget += vmdq_vecs;
7693 		vectors_left -= vmdq_vecs;
7694 	}
7695 
7696 	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7697 				   GFP_KERNEL);
7698 	if (!pf->msix_entries)
7699 		return -ENOMEM;
7700 
7701 	for (i = 0; i < v_budget; i++)
7702 		pf->msix_entries[i].entry = i;
7703 	v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7704 
7705 	if (v_actual != v_budget) {
7706 		/* If we have limited resources, we will start with no vectors
7707 		 * for the special features and then allocate vectors to some
7708 		 * of these features based on the policy and at the end disable
7709 		 * the features that did not get any vectors.
7710 		 */
7711 		iwarp_requested = pf->num_iwarp_msix;
7712 		pf->num_iwarp_msix = 0;
7713 #ifdef I40E_FCOE
7714 		pf->num_fcoe_qps = 0;
7715 		pf->num_fcoe_msix = 0;
7716 #endif
7717 		pf->num_vmdq_msix = 0;
7718 	}
7719 
7720 	if (v_actual < I40E_MIN_MSIX) {
7721 		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7722 		kfree(pf->msix_entries);
7723 		pf->msix_entries = NULL;
7724 		return -ENODEV;
7725 
7726 	} else if (v_actual == I40E_MIN_MSIX) {
7727 		/* Adjust for minimal MSIX use */
7728 		pf->num_vmdq_vsis = 0;
7729 		pf->num_vmdq_qps = 0;
7730 		pf->num_lan_qps = 1;
7731 		pf->num_lan_msix = 1;
7732 
7733 	} else if (v_actual != v_budget) {
7734 		int vec;
7735 
7736 		/* reserve the misc vector */
7737 		vec = v_actual - 1;
7738 
7739 		/* Scale vector usage down */
7740 		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
7741 		pf->num_vmdq_vsis = 1;
7742 		pf->num_vmdq_qps = 1;
7743 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7744 
7745 		/* partition out the remaining vectors */
7746 		switch (vec) {
7747 		case 2:
7748 			pf->num_lan_msix = 1;
7749 			break;
7750 		case 3:
7751 			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7752 				pf->num_lan_msix = 1;
7753 				pf->num_iwarp_msix = 1;
7754 			} else {
7755 				pf->num_lan_msix = 2;
7756 			}
7757 #ifdef I40E_FCOE
7758 			/* give one vector to FCoE */
7759 			if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7760 				pf->num_lan_msix = 1;
7761 				pf->num_fcoe_msix = 1;
7762 			}
7763 #endif
7764 			break;
7765 		default:
7766 			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7767 				pf->num_iwarp_msix = min_t(int, (vec / 3),
7768 						 iwarp_requested);
7769 				pf->num_vmdq_vsis = min_t(int, (vec / 3),
7770 						  I40E_DEFAULT_NUM_VMDQ_VSI);
7771 			} else {
7772 				pf->num_vmdq_vsis = min_t(int, (vec / 2),
7773 						  I40E_DEFAULT_NUM_VMDQ_VSI);
7774 			}
7775 			pf->num_lan_msix = min_t(int,
7776 			       (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
7777 							      pf->num_lan_msix);
7778 #ifdef I40E_FCOE
7779 			/* give one vector to FCoE */
7780 			if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7781 				pf->num_fcoe_msix = 1;
7782 				vec--;
7783 			}
7784 #endif
7785 			break;
7786 		}
7787 	}
7788 
7789 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7790 	    (pf->num_vmdq_msix == 0)) {
7791 		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7792 		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7793 	}
7794 
7795 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7796 	    (pf->num_iwarp_msix == 0)) {
7797 		dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
7798 		pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
7799 	}
7800 #ifdef I40E_FCOE
7801 
7802 	if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7803 		dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7804 		pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7805 	}
7806 #endif
7807 	return v_actual;
7808 }
7809 
7810 /**
7811  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7812  * @vsi: the VSI being configured
7813  * @v_idx: index of the vector in the vsi struct
7814  * @cpu: cpu to be used on affinity_mask
7815  *
7816  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7817  **/
7818 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
7819 {
7820 	struct i40e_q_vector *q_vector;
7821 
7822 	/* allocate q_vector */
7823 	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7824 	if (!q_vector)
7825 		return -ENOMEM;
7826 
7827 	q_vector->vsi = vsi;
7828 	q_vector->v_idx = v_idx;
7829 	cpumask_set_cpu(cpu, &q_vector->affinity_mask);
7830 
7831 	if (vsi->netdev)
7832 		netif_napi_add(vsi->netdev, &q_vector->napi,
7833 			       i40e_napi_poll, NAPI_POLL_WEIGHT);
7834 
7835 	q_vector->rx.latency_range = I40E_LOW_LATENCY;
7836 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
7837 
7838 	/* tie q_vector and vsi together */
7839 	vsi->q_vectors[v_idx] = q_vector;
7840 
7841 	return 0;
7842 }
7843 
7844 /**
7845  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7846  * @vsi: the VSI being configured
7847  *
7848  * We allocate one q_vector per queue interrupt.  If allocation fails we
7849  * return -ENOMEM.
7850  **/
7851 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7852 {
7853 	struct i40e_pf *pf = vsi->back;
7854 	int err, v_idx, num_q_vectors, current_cpu;
7855 
7856 	/* if not MSIX, give the one vector only to the LAN VSI */
7857 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7858 		num_q_vectors = vsi->num_q_vectors;
7859 	else if (vsi == pf->vsi[pf->lan_vsi])
7860 		num_q_vectors = 1;
7861 	else
7862 		return -EINVAL;
7863 
7864 	current_cpu = cpumask_first(cpu_online_mask);
7865 
7866 	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7867 		err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
7868 		if (err)
7869 			goto err_out;
7870 		current_cpu = cpumask_next(current_cpu, cpu_online_mask);
7871 		if (unlikely(current_cpu >= nr_cpu_ids))
7872 			current_cpu = cpumask_first(cpu_online_mask);
7873 	}
7874 
7875 	return 0;
7876 
7877 err_out:
7878 	while (v_idx--)
7879 		i40e_free_q_vector(vsi, v_idx);
7880 
7881 	return err;
7882 }
7883 
7884 /**
7885  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7886  * @pf: board private structure to initialize
7887  **/
7888 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7889 {
7890 	int vectors = 0;
7891 	ssize_t size;
7892 
7893 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7894 		vectors = i40e_init_msix(pf);
7895 		if (vectors < 0) {
7896 			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	|
7897 				       I40E_FLAG_IWARP_ENABLED	|
7898 #ifdef I40E_FCOE
7899 				       I40E_FLAG_FCOE_ENABLED	|
7900 #endif
7901 				       I40E_FLAG_RSS_ENABLED	|
7902 				       I40E_FLAG_DCB_CAPABLE	|
7903 				       I40E_FLAG_DCB_ENABLED	|
7904 				       I40E_FLAG_SRIOV_ENABLED	|
7905 				       I40E_FLAG_FD_SB_ENABLED	|
7906 				       I40E_FLAG_FD_ATR_ENABLED	|
7907 				       I40E_FLAG_VMDQ_ENABLED);
7908 
7909 			/* rework the queue expectations without MSIX */
7910 			i40e_determine_queue_usage(pf);
7911 		}
7912 	}
7913 
7914 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7915 	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7916 		dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7917 		vectors = pci_enable_msi(pf->pdev);
7918 		if (vectors < 0) {
7919 			dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7920 				 vectors);
7921 			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7922 		}
7923 		vectors = 1;  /* one MSI or Legacy vector */
7924 	}
7925 
7926 	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7927 		dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7928 
7929 	/* set up vector assignment tracking */
7930 	size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7931 	pf->irq_pile = kzalloc(size, GFP_KERNEL);
7932 	if (!pf->irq_pile) {
7933 		dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7934 		return -ENOMEM;
7935 	}
7936 	pf->irq_pile->num_entries = vectors;
7937 	pf->irq_pile->search_hint = 0;
7938 
7939 	/* track first vector for misc interrupts, ignore return */
7940 	(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7941 
7942 	return 0;
7943 }
7944 
7945 /**
7946  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7947  * @pf: board private structure
7948  *
7949  * This sets up the handler for MSIX 0, which is used to manage the
7950  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
7951  * when in MSI or Legacy interrupt mode.
7952  **/
7953 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7954 {
7955 	struct i40e_hw *hw = &pf->hw;
7956 	int err = 0;
7957 
7958 	/* Only request the irq if this is the first time through, and
7959 	 * not when we're rebuilding after a Reset
7960 	 */
7961 	if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7962 		err = request_irq(pf->msix_entries[0].vector,
7963 				  i40e_intr, 0, pf->int_name, pf);
7964 		if (err) {
7965 			dev_info(&pf->pdev->dev,
7966 				 "request_irq for %s failed: %d\n",
7967 				 pf->int_name, err);
7968 			return -EFAULT;
7969 		}
7970 	}
7971 
7972 	i40e_enable_misc_int_causes(pf);
7973 
7974 	/* associate no queues to the misc vector */
7975 	wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7976 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7977 
7978 	i40e_flush(hw);
7979 
7980 	i40e_irq_dynamic_enable_icr0(pf, true);
7981 
7982 	return err;
7983 }
7984 
7985 /**
7986  * i40e_config_rss_aq - Prepare for RSS using AQ commands
7987  * @vsi: vsi structure
7988  * @seed: RSS hash seed
7989  **/
7990 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
7991 			      u8 *lut, u16 lut_size)
7992 {
7993 	struct i40e_aqc_get_set_rss_key_data rss_key;
7994 	struct i40e_pf *pf = vsi->back;
7995 	struct i40e_hw *hw = &pf->hw;
7996 	bool pf_lut = false;
7997 	u8 *rss_lut;
7998 	int ret, i;
7999 
8000 	memcpy(&rss_key, seed, sizeof(rss_key));
8001 
8002 	rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
8003 	if (!rss_lut)
8004 		return -ENOMEM;
8005 
8006 	/* Populate the LUT with max no. of queues in round robin fashion */
8007 	for (i = 0; i < vsi->rss_table_size; i++)
8008 		rss_lut[i] = i % vsi->rss_size;
8009 
8010 	ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
8011 	if (ret) {
8012 		dev_info(&pf->pdev->dev,
8013 			 "Cannot set RSS key, err %s aq_err %s\n",
8014 			 i40e_stat_str(&pf->hw, ret),
8015 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8016 		goto config_rss_aq_out;
8017 	}
8018 
8019 	if (vsi->type == I40E_VSI_MAIN)
8020 		pf_lut = true;
8021 
8022 	ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
8023 				  vsi->rss_table_size);
8024 	if (ret)
8025 		dev_info(&pf->pdev->dev,
8026 			 "Cannot set RSS lut, err %s aq_err %s\n",
8027 			 i40e_stat_str(&pf->hw, ret),
8028 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8029 
8030 config_rss_aq_out:
8031 	kfree(rss_lut);
8032 	return ret;
8033 }
8034 
8035 /**
8036  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
8037  * @vsi: VSI structure
8038  **/
8039 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
8040 {
8041 	u8 seed[I40E_HKEY_ARRAY_SIZE];
8042 	struct i40e_pf *pf = vsi->back;
8043 	u8 *lut;
8044 	int ret;
8045 
8046 	if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
8047 		return 0;
8048 
8049 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8050 	if (!lut)
8051 		return -ENOMEM;
8052 
8053 	i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8054 	netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8055 	vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs);
8056 	ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8057 	kfree(lut);
8058 
8059 	return ret;
8060 }
8061 
8062 /**
8063  * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
8064  * @vsi: Pointer to vsi structure
8065  * @seed: Buffter to store the hash keys
8066  * @lut: Buffer to store the lookup table entries
8067  * @lut_size: Size of buffer to store the lookup table entries
8068  *
8069  * Return 0 on success, negative on failure
8070  */
8071 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8072 			   u8 *lut, u16 lut_size)
8073 {
8074 	struct i40e_pf *pf = vsi->back;
8075 	struct i40e_hw *hw = &pf->hw;
8076 	int ret = 0;
8077 
8078 	if (seed) {
8079 		ret = i40e_aq_get_rss_key(hw, vsi->id,
8080 			(struct i40e_aqc_get_set_rss_key_data *)seed);
8081 		if (ret) {
8082 			dev_info(&pf->pdev->dev,
8083 				 "Cannot get RSS key, err %s aq_err %s\n",
8084 				 i40e_stat_str(&pf->hw, ret),
8085 				 i40e_aq_str(&pf->hw,
8086 					     pf->hw.aq.asq_last_status));
8087 			return ret;
8088 		}
8089 	}
8090 
8091 	if (lut) {
8092 		bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8093 
8094 		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8095 		if (ret) {
8096 			dev_info(&pf->pdev->dev,
8097 				 "Cannot get RSS lut, err %s aq_err %s\n",
8098 				 i40e_stat_str(&pf->hw, ret),
8099 				 i40e_aq_str(&pf->hw,
8100 					     pf->hw.aq.asq_last_status));
8101 			return ret;
8102 		}
8103 	}
8104 
8105 	return ret;
8106 }
8107 
8108 /**
8109  * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
8110  * @vsi: Pointer to vsi structure
8111  * @seed: RSS hash seed
8112  * @lut: Lookup table
8113  * @lut_size: Lookup table size
8114  *
8115  * Returns 0 on success, negative on failure
8116  **/
8117 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8118 			       const u8 *lut, u16 lut_size)
8119 {
8120 	struct i40e_pf *pf = vsi->back;
8121 	struct i40e_hw *hw = &pf->hw;
8122 	u16 vf_id = vsi->vf_id;
8123 	u8 i;
8124 
8125 	/* Fill out hash function seed */
8126 	if (seed) {
8127 		u32 *seed_dw = (u32 *)seed;
8128 
8129 		if (vsi->type == I40E_VSI_MAIN) {
8130 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8131 				i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
8132 						  seed_dw[i]);
8133 		} else if (vsi->type == I40E_VSI_SRIOV) {
8134 			for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8135 				i40e_write_rx_ctl(hw,
8136 						  I40E_VFQF_HKEY1(i, vf_id),
8137 						  seed_dw[i]);
8138 		} else {
8139 			dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8140 		}
8141 	}
8142 
8143 	if (lut) {
8144 		u32 *lut_dw = (u32 *)lut;
8145 
8146 		if (vsi->type == I40E_VSI_MAIN) {
8147 			if (lut_size != I40E_HLUT_ARRAY_SIZE)
8148 				return -EINVAL;
8149 			for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8150 				wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8151 		} else if (vsi->type == I40E_VSI_SRIOV) {
8152 			if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8153 				return -EINVAL;
8154 			for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8155 				i40e_write_rx_ctl(hw,
8156 						  I40E_VFQF_HLUT1(i, vf_id),
8157 						  lut_dw[i]);
8158 		} else {
8159 			dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8160 		}
8161 	}
8162 	i40e_flush(hw);
8163 
8164 	return 0;
8165 }
8166 
8167 /**
8168  * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8169  * @vsi: Pointer to VSI structure
8170  * @seed: Buffer to store the keys
8171  * @lut: Buffer to store the lookup table entries
8172  * @lut_size: Size of buffer to store the lookup table entries
8173  *
8174  * Returns 0 on success, negative on failure
8175  */
8176 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8177 			    u8 *lut, u16 lut_size)
8178 {
8179 	struct i40e_pf *pf = vsi->back;
8180 	struct i40e_hw *hw = &pf->hw;
8181 	u16 i;
8182 
8183 	if (seed) {
8184 		u32 *seed_dw = (u32 *)seed;
8185 
8186 		for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8187 			seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
8188 	}
8189 	if (lut) {
8190 		u32 *lut_dw = (u32 *)lut;
8191 
8192 		if (lut_size != I40E_HLUT_ARRAY_SIZE)
8193 			return -EINVAL;
8194 		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8195 			lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8196 	}
8197 
8198 	return 0;
8199 }
8200 
8201 /**
8202  * i40e_config_rss - Configure RSS keys and lut
8203  * @vsi: Pointer to VSI structure
8204  * @seed: RSS hash seed
8205  * @lut: Lookup table
8206  * @lut_size: Lookup table size
8207  *
8208  * Returns 0 on success, negative on failure
8209  */
8210 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8211 {
8212 	struct i40e_pf *pf = vsi->back;
8213 
8214 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8215 		return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8216 	else
8217 		return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8218 }
8219 
8220 /**
8221  * i40e_get_rss - Get RSS keys and lut
8222  * @vsi: Pointer to VSI structure
8223  * @seed: Buffer to store the keys
8224  * @lut: Buffer to store the lookup table entries
8225  * lut_size: Size of buffer to store the lookup table entries
8226  *
8227  * Returns 0 on success, negative on failure
8228  */
8229 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8230 {
8231 	struct i40e_pf *pf = vsi->back;
8232 
8233 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8234 		return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8235 	else
8236 		return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8237 }
8238 
8239 /**
8240  * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8241  * @pf: Pointer to board private structure
8242  * @lut: Lookup table
8243  * @rss_table_size: Lookup table size
8244  * @rss_size: Range of queue number for hashing
8245  */
8246 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8247 			      u16 rss_table_size, u16 rss_size)
8248 {
8249 	u16 i;
8250 
8251 	for (i = 0; i < rss_table_size; i++)
8252 		lut[i] = i % rss_size;
8253 }
8254 
8255 /**
8256  * i40e_pf_config_rss - Prepare for RSS if used
8257  * @pf: board private structure
8258  **/
8259 static int i40e_pf_config_rss(struct i40e_pf *pf)
8260 {
8261 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8262 	u8 seed[I40E_HKEY_ARRAY_SIZE];
8263 	u8 *lut;
8264 	struct i40e_hw *hw = &pf->hw;
8265 	u32 reg_val;
8266 	u64 hena;
8267 	int ret;
8268 
8269 	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
8270 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8271 		((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
8272 	hena |= i40e_pf_get_default_rss_hena(pf);
8273 
8274 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8275 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8276 
8277 	/* Determine the RSS table size based on the hardware capabilities */
8278 	reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
8279 	reg_val = (pf->rss_table_size == 512) ?
8280 			(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8281 			(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8282 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
8283 
8284 	/* Determine the RSS size of the VSI */
8285 	if (!vsi->rss_size)
8286 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
8287 				      vsi->num_queue_pairs);
8288 
8289 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8290 	if (!lut)
8291 		return -ENOMEM;
8292 
8293 	/* Use user configured lut if there is one, otherwise use default */
8294 	if (vsi->rss_lut_user)
8295 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8296 	else
8297 		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8298 
8299 	/* Use user configured hash key if there is one, otherwise
8300 	 * use default.
8301 	 */
8302 	if (vsi->rss_hkey_user)
8303 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8304 	else
8305 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8306 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8307 	kfree(lut);
8308 
8309 	return ret;
8310 }
8311 
8312 /**
8313  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8314  * @pf: board private structure
8315  * @queue_count: the requested queue count for rss.
8316  *
8317  * returns 0 if rss is not enabled, if enabled returns the final rss queue
8318  * count which may be different from the requested queue count.
8319  **/
8320 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8321 {
8322 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8323 	int new_rss_size;
8324 
8325 	if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8326 		return 0;
8327 
8328 	new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8329 
8330 	if (queue_count != vsi->num_queue_pairs) {
8331 		vsi->req_queue_pairs = queue_count;
8332 		i40e_prep_for_reset(pf);
8333 
8334 		pf->alloc_rss_size = new_rss_size;
8335 
8336 		i40e_reset_and_rebuild(pf, true);
8337 
8338 		/* Discard the user configured hash keys and lut, if less
8339 		 * queues are enabled.
8340 		 */
8341 		if (queue_count < vsi->rss_size) {
8342 			i40e_clear_rss_config_user(vsi);
8343 			dev_dbg(&pf->pdev->dev,
8344 				"discard user configured hash keys and lut\n");
8345 		}
8346 
8347 		/* Reset vsi->rss_size, as number of enabled queues changed */
8348 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
8349 				      vsi->num_queue_pairs);
8350 
8351 		i40e_pf_config_rss(pf);
8352 	}
8353 	dev_info(&pf->pdev->dev, "RSS count/HW max RSS count:  %d/%d\n",
8354 		 pf->alloc_rss_size, pf->rss_size_max);
8355 	return pf->alloc_rss_size;
8356 }
8357 
8358 /**
8359  * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
8360  * @pf: board private structure
8361  **/
8362 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
8363 {
8364 	i40e_status status;
8365 	bool min_valid, max_valid;
8366 	u32 max_bw, min_bw;
8367 
8368 	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8369 					   &min_valid, &max_valid);
8370 
8371 	if (!status) {
8372 		if (min_valid)
8373 			pf->npar_min_bw = min_bw;
8374 		if (max_valid)
8375 			pf->npar_max_bw = max_bw;
8376 	}
8377 
8378 	return status;
8379 }
8380 
8381 /**
8382  * i40e_set_npar_bw_setting - Set BW settings for this PF partition
8383  * @pf: board private structure
8384  **/
8385 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
8386 {
8387 	struct i40e_aqc_configure_partition_bw_data bw_data;
8388 	i40e_status status;
8389 
8390 	/* Set the valid bit for this PF */
8391 	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8392 	bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
8393 	bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
8394 
8395 	/* Set the new bandwidths */
8396 	status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8397 
8398 	return status;
8399 }
8400 
8401 /**
8402  * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
8403  * @pf: board private structure
8404  **/
8405 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
8406 {
8407 	/* Commit temporary BW setting to permanent NVM image */
8408 	enum i40e_admin_queue_err last_aq_status;
8409 	i40e_status ret;
8410 	u16 nvm_word;
8411 
8412 	if (pf->hw.partition_id != 1) {
8413 		dev_info(&pf->pdev->dev,
8414 			 "Commit BW only works on partition 1! This is partition %d",
8415 			 pf->hw.partition_id);
8416 		ret = I40E_NOT_SUPPORTED;
8417 		goto bw_commit_out;
8418 	}
8419 
8420 	/* Acquire NVM for read access */
8421 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8422 	last_aq_status = pf->hw.aq.asq_last_status;
8423 	if (ret) {
8424 		dev_info(&pf->pdev->dev,
8425 			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8426 			 i40e_stat_str(&pf->hw, ret),
8427 			 i40e_aq_str(&pf->hw, last_aq_status));
8428 		goto bw_commit_out;
8429 	}
8430 
8431 	/* Read word 0x10 of NVM - SW compatibility word 1 */
8432 	ret = i40e_aq_read_nvm(&pf->hw,
8433 			       I40E_SR_NVM_CONTROL_WORD,
8434 			       0x10, sizeof(nvm_word), &nvm_word,
8435 			       false, NULL);
8436 	/* Save off last admin queue command status before releasing
8437 	 * the NVM
8438 	 */
8439 	last_aq_status = pf->hw.aq.asq_last_status;
8440 	i40e_release_nvm(&pf->hw);
8441 	if (ret) {
8442 		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8443 			 i40e_stat_str(&pf->hw, ret),
8444 			 i40e_aq_str(&pf->hw, last_aq_status));
8445 		goto bw_commit_out;
8446 	}
8447 
8448 	/* Wait a bit for NVM release to complete */
8449 	msleep(50);
8450 
8451 	/* Acquire NVM for write access */
8452 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8453 	last_aq_status = pf->hw.aq.asq_last_status;
8454 	if (ret) {
8455 		dev_info(&pf->pdev->dev,
8456 			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8457 			 i40e_stat_str(&pf->hw, ret),
8458 			 i40e_aq_str(&pf->hw, last_aq_status));
8459 		goto bw_commit_out;
8460 	}
8461 	/* Write it back out unchanged to initiate update NVM,
8462 	 * which will force a write of the shadow (alt) RAM to
8463 	 * the NVM - thus storing the bandwidth values permanently.
8464 	 */
8465 	ret = i40e_aq_update_nvm(&pf->hw,
8466 				 I40E_SR_NVM_CONTROL_WORD,
8467 				 0x10, sizeof(nvm_word),
8468 				 &nvm_word, true, NULL);
8469 	/* Save off last admin queue command status before releasing
8470 	 * the NVM
8471 	 */
8472 	last_aq_status = pf->hw.aq.asq_last_status;
8473 	i40e_release_nvm(&pf->hw);
8474 	if (ret)
8475 		dev_info(&pf->pdev->dev,
8476 			 "BW settings NOT SAVED, err %s aq_err %s\n",
8477 			 i40e_stat_str(&pf->hw, ret),
8478 			 i40e_aq_str(&pf->hw, last_aq_status));
8479 bw_commit_out:
8480 
8481 	return ret;
8482 }
8483 
8484 /**
8485  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8486  * @pf: board private structure to initialize
8487  *
8488  * i40e_sw_init initializes the Adapter private data structure.
8489  * Fields are initialized based on PCI device information and
8490  * OS network device settings (MTU size).
8491  **/
8492 static int i40e_sw_init(struct i40e_pf *pf)
8493 {
8494 	int err = 0;
8495 	int size;
8496 
8497 	pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
8498 				(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
8499 	if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
8500 		if (I40E_DEBUG_USER & debug)
8501 			pf->hw.debug_mask = debug;
8502 		pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
8503 						I40E_DEFAULT_MSG_ENABLE);
8504 	}
8505 
8506 	/* Set default capability flags */
8507 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8508 		    I40E_FLAG_MSI_ENABLED     |
8509 		    I40E_FLAG_MSIX_ENABLED;
8510 
8511 	/* Set default ITR */
8512 	pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8513 	pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8514 
8515 	/* Depending on PF configurations, it is possible that the RSS
8516 	 * maximum might end up larger than the available queues
8517 	 */
8518 	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8519 	pf->alloc_rss_size = 1;
8520 	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8521 	pf->rss_size_max = min_t(int, pf->rss_size_max,
8522 				 pf->hw.func_caps.num_tx_qp);
8523 	if (pf->hw.func_caps.rss) {
8524 		pf->flags |= I40E_FLAG_RSS_ENABLED;
8525 		pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8526 					   num_online_cpus());
8527 	}
8528 
8529 	/* MFP mode enabled */
8530 	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8531 		pf->flags |= I40E_FLAG_MFP_ENABLED;
8532 		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8533 		if (i40e_get_npar_bw_setting(pf))
8534 			dev_warn(&pf->pdev->dev,
8535 				 "Could not get NPAR bw settings\n");
8536 		else
8537 			dev_info(&pf->pdev->dev,
8538 				 "Min BW = %8.8x, Max BW = %8.8x\n",
8539 				 pf->npar_min_bw, pf->npar_max_bw);
8540 	}
8541 
8542 	/* FW/NVM is not yet fixed in this regard */
8543 	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8544 	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8545 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8546 		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8547 		if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8548 		    pf->hw.num_partitions > 1)
8549 			dev_info(&pf->pdev->dev,
8550 				 "Flow Director Sideband mode Disabled in MFP mode\n");
8551 		else
8552 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8553 		pf->fdir_pf_filter_count =
8554 				 pf->hw.func_caps.fd_filters_guaranteed;
8555 		pf->hw.fdir_shared_filter_count =
8556 				 pf->hw.func_caps.fd_filters_best_effort;
8557 	}
8558 
8559 	if (i40e_is_mac_710(&pf->hw) &&
8560 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
8561 	    (pf->hw.aq.fw_maj_ver < 4))) {
8562 		pf->flags |= I40E_FLAG_RESTART_AUTONEG;
8563 		/* No DCB support  for FW < v4.33 */
8564 		pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
8565 	}
8566 
8567 	/* Disable FW LLDP if FW < v4.3 */
8568 	if (i40e_is_mac_710(&pf->hw) &&
8569 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
8570 	    (pf->hw.aq.fw_maj_ver < 4)))
8571 		pf->flags |= I40E_FLAG_STOP_FW_LLDP;
8572 
8573 	/* Use the FW Set LLDP MIB API if FW > v4.40 */
8574 	if (i40e_is_mac_710(&pf->hw) &&
8575 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
8576 	    (pf->hw.aq.fw_maj_ver >= 5)))
8577 		pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
8578 
8579 	if (pf->hw.func_caps.vmdq) {
8580 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
8581 		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
8582 		pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
8583 	}
8584 
8585 	if (pf->hw.func_caps.iwarp) {
8586 		pf->flags |= I40E_FLAG_IWARP_ENABLED;
8587 		/* IWARP needs one extra vector for CQP just like MISC.*/
8588 		pf->num_iwarp_msix = (int)num_online_cpus() + 1;
8589 	}
8590 
8591 #ifdef I40E_FCOE
8592 	i40e_init_pf_fcoe(pf);
8593 
8594 #endif /* I40E_FCOE */
8595 #ifdef CONFIG_PCI_IOV
8596 	if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
8597 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
8598 		pf->flags |= I40E_FLAG_SRIOV_ENABLED;
8599 		pf->num_req_vfs = min_t(int,
8600 					pf->hw.func_caps.num_vfs,
8601 					I40E_MAX_VF_COUNT);
8602 	}
8603 #endif /* CONFIG_PCI_IOV */
8604 	if (pf->hw.mac.type == I40E_MAC_X722) {
8605 		pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
8606 			     I40E_FLAG_128_QP_RSS_CAPABLE |
8607 			     I40E_FLAG_HW_ATR_EVICT_CAPABLE |
8608 			     I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8609 			     I40E_FLAG_WB_ON_ITR_CAPABLE |
8610 			     I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8611 			     I40E_FLAG_NO_PCI_LINK_CHECK |
8612 			     I40E_FLAG_100M_SGMII_CAPABLE |
8613 			     I40E_FLAG_USE_SET_LLDP_MIB |
8614 			     I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8615 	} else if ((pf->hw.aq.api_maj_ver > 1) ||
8616 		   ((pf->hw.aq.api_maj_ver == 1) &&
8617 		    (pf->hw.aq.api_min_ver > 4))) {
8618 		/* Supported in FW API version higher than 1.4 */
8619 		pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8620 		pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8621 	} else {
8622 		pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8623 	}
8624 
8625 	pf->eeprom_version = 0xDEAD;
8626 	pf->lan_veb = I40E_NO_VEB;
8627 	pf->lan_vsi = I40E_NO_VSI;
8628 
8629 	/* By default FW has this off for performance reasons */
8630 	pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8631 
8632 	/* set up queue assignment tracking */
8633 	size = sizeof(struct i40e_lump_tracking)
8634 		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8635 	pf->qp_pile = kzalloc(size, GFP_KERNEL);
8636 	if (!pf->qp_pile) {
8637 		err = -ENOMEM;
8638 		goto sw_init_done;
8639 	}
8640 	pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8641 	pf->qp_pile->search_hint = 0;
8642 
8643 	pf->tx_timeout_recovery_level = 1;
8644 
8645 	mutex_init(&pf->switch_mutex);
8646 
8647 	/* If NPAR is enabled nudge the Tx scheduler */
8648 	if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8649 		i40e_set_npar_bw_setting(pf);
8650 
8651 sw_init_done:
8652 	return err;
8653 }
8654 
8655 /**
8656  * i40e_set_ntuple - set the ntuple feature flag and take action
8657  * @pf: board private structure to initialize
8658  * @features: the feature set that the stack is suggesting
8659  *
8660  * returns a bool to indicate if reset needs to happen
8661  **/
8662 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8663 {
8664 	bool need_reset = false;
8665 
8666 	/* Check if Flow Director n-tuple support was enabled or disabled.  If
8667 	 * the state changed, we need to reset.
8668 	 */
8669 	if (features & NETIF_F_NTUPLE) {
8670 		/* Enable filters and mark for reset */
8671 		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8672 			need_reset = true;
8673 		/* enable FD_SB only if there is MSI-X vector */
8674 		if (pf->num_fdsb_msix > 0)
8675 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8676 	} else {
8677 		/* turn off filters, mark for reset and clear SW filter list */
8678 		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8679 			need_reset = true;
8680 			i40e_fdir_filter_exit(pf);
8681 		}
8682 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8683 		pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8684 		/* reset fd counters */
8685 		pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8686 		pf->fdir_pf_active_filters = 0;
8687 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8688 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
8689 			dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8690 		/* if ATR was auto disabled it can be re-enabled. */
8691 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8692 		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
8693 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8694 	}
8695 	return need_reset;
8696 }
8697 
8698 /**
8699  * i40e_set_features - set the netdev feature flags
8700  * @netdev: ptr to the netdev being adjusted
8701  * @features: the feature set that the stack is suggesting
8702  **/
8703 static int i40e_set_features(struct net_device *netdev,
8704 			     netdev_features_t features)
8705 {
8706 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8707 	struct i40e_vsi *vsi = np->vsi;
8708 	struct i40e_pf *pf = vsi->back;
8709 	bool need_reset;
8710 
8711 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
8712 		i40e_vlan_stripping_enable(vsi);
8713 	else
8714 		i40e_vlan_stripping_disable(vsi);
8715 
8716 	need_reset = i40e_set_ntuple(pf, features);
8717 
8718 	if (need_reset)
8719 		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8720 
8721 	return 0;
8722 }
8723 
8724 /**
8725  * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
8726  * @pf: board private structure
8727  * @port: The UDP port to look up
8728  *
8729  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8730  **/
8731 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
8732 {
8733 	u8 i;
8734 
8735 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8736 		if (pf->udp_ports[i].index == port)
8737 			return i;
8738 	}
8739 
8740 	return i;
8741 }
8742 
8743 /**
8744  * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
8745  * @netdev: This physical port's netdev
8746  * @ti: Tunnel endpoint information
8747  **/
8748 static void i40e_udp_tunnel_add(struct net_device *netdev,
8749 				struct udp_tunnel_info *ti)
8750 {
8751 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8752 	struct i40e_vsi *vsi = np->vsi;
8753 	struct i40e_pf *pf = vsi->back;
8754 	__be16 port = ti->port;
8755 	u8 next_idx;
8756 	u8 idx;
8757 
8758 	idx = i40e_get_udp_port_idx(pf, port);
8759 
8760 	/* Check if port already exists */
8761 	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8762 		netdev_info(netdev, "port %d already offloaded\n",
8763 			    ntohs(port));
8764 		return;
8765 	}
8766 
8767 	/* Now check if there is space to add the new port */
8768 	next_idx = i40e_get_udp_port_idx(pf, 0);
8769 
8770 	if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8771 		netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
8772 			    ntohs(port));
8773 		return;
8774 	}
8775 
8776 	switch (ti->type) {
8777 	case UDP_TUNNEL_TYPE_VXLAN:
8778 		pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8779 		break;
8780 	case UDP_TUNNEL_TYPE_GENEVE:
8781 		if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8782 			return;
8783 		pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8784 		break;
8785 	default:
8786 		return;
8787 	}
8788 
8789 	/* New port: add it and mark its index in the bitmap */
8790 	pf->udp_ports[next_idx].index = port;
8791 	pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8792 	pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8793 }
8794 
8795 /**
8796  * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
8797  * @netdev: This physical port's netdev
8798  * @ti: Tunnel endpoint information
8799  **/
8800 static void i40e_udp_tunnel_del(struct net_device *netdev,
8801 				struct udp_tunnel_info *ti)
8802 {
8803 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8804 	struct i40e_vsi *vsi = np->vsi;
8805 	struct i40e_pf *pf = vsi->back;
8806 	__be16 port = ti->port;
8807 	u8 idx;
8808 
8809 	idx = i40e_get_udp_port_idx(pf, port);
8810 
8811 	/* Check if port already exists */
8812 	if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
8813 		goto not_found;
8814 
8815 	switch (ti->type) {
8816 	case UDP_TUNNEL_TYPE_VXLAN:
8817 		if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
8818 			goto not_found;
8819 		break;
8820 	case UDP_TUNNEL_TYPE_GENEVE:
8821 		if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
8822 			goto not_found;
8823 		break;
8824 	default:
8825 		goto not_found;
8826 	}
8827 
8828 	/* if port exists, set it to 0 (mark for deletion)
8829 	 * and make it pending
8830 	 */
8831 	pf->udp_ports[idx].index = 0;
8832 	pf->pending_udp_bitmap |= BIT_ULL(idx);
8833 	pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8834 
8835 	return;
8836 not_found:
8837 	netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
8838 		    ntohs(port));
8839 }
8840 
8841 static int i40e_get_phys_port_id(struct net_device *netdev,
8842 				 struct netdev_phys_item_id *ppid)
8843 {
8844 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8845 	struct i40e_pf *pf = np->vsi->back;
8846 	struct i40e_hw *hw = &pf->hw;
8847 
8848 	if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8849 		return -EOPNOTSUPP;
8850 
8851 	ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8852 	memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8853 
8854 	return 0;
8855 }
8856 
8857 /**
8858  * i40e_ndo_fdb_add - add an entry to the hardware database
8859  * @ndm: the input from the stack
8860  * @tb: pointer to array of nladdr (unused)
8861  * @dev: the net device pointer
8862  * @addr: the MAC address entry being added
8863  * @flags: instructions from stack about fdb operation
8864  */
8865 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8866 			    struct net_device *dev,
8867 			    const unsigned char *addr, u16 vid,
8868 			    u16 flags)
8869 {
8870 	struct i40e_netdev_priv *np = netdev_priv(dev);
8871 	struct i40e_pf *pf = np->vsi->back;
8872 	int err = 0;
8873 
8874 	if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8875 		return -EOPNOTSUPP;
8876 
8877 	if (vid) {
8878 		pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8879 		return -EINVAL;
8880 	}
8881 
8882 	/* Hardware does not support aging addresses so if a
8883 	 * ndm_state is given only allow permanent addresses
8884 	 */
8885 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8886 		netdev_info(dev, "FDB only supports static addresses\n");
8887 		return -EINVAL;
8888 	}
8889 
8890 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8891 		err = dev_uc_add_excl(dev, addr);
8892 	else if (is_multicast_ether_addr(addr))
8893 		err = dev_mc_add_excl(dev, addr);
8894 	else
8895 		err = -EINVAL;
8896 
8897 	/* Only return duplicate errors if NLM_F_EXCL is set */
8898 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
8899 		err = 0;
8900 
8901 	return err;
8902 }
8903 
8904 /**
8905  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8906  * @dev: the netdev being configured
8907  * @nlh: RTNL message
8908  *
8909  * Inserts a new hardware bridge if not already created and
8910  * enables the bridging mode requested (VEB or VEPA). If the
8911  * hardware bridge has already been inserted and the request
8912  * is to change the mode then that requires a PF reset to
8913  * allow rebuild of the components with required hardware
8914  * bridge mode enabled.
8915  **/
8916 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8917 				   struct nlmsghdr *nlh,
8918 				   u16 flags)
8919 {
8920 	struct i40e_netdev_priv *np = netdev_priv(dev);
8921 	struct i40e_vsi *vsi = np->vsi;
8922 	struct i40e_pf *pf = vsi->back;
8923 	struct i40e_veb *veb = NULL;
8924 	struct nlattr *attr, *br_spec;
8925 	int i, rem;
8926 
8927 	/* Only for PF VSI for now */
8928 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8929 		return -EOPNOTSUPP;
8930 
8931 	/* Find the HW bridge for PF VSI */
8932 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8933 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8934 			veb = pf->veb[i];
8935 	}
8936 
8937 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8938 
8939 	nla_for_each_nested(attr, br_spec, rem) {
8940 		__u16 mode;
8941 
8942 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
8943 			continue;
8944 
8945 		mode = nla_get_u16(attr);
8946 		if ((mode != BRIDGE_MODE_VEPA) &&
8947 		    (mode != BRIDGE_MODE_VEB))
8948 			return -EINVAL;
8949 
8950 		/* Insert a new HW bridge */
8951 		if (!veb) {
8952 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8953 					     vsi->tc_config.enabled_tc);
8954 			if (veb) {
8955 				veb->bridge_mode = mode;
8956 				i40e_config_bridge_mode(veb);
8957 			} else {
8958 				/* No Bridge HW offload available */
8959 				return -ENOENT;
8960 			}
8961 			break;
8962 		} else if (mode != veb->bridge_mode) {
8963 			/* Existing HW bridge but different mode needs reset */
8964 			veb->bridge_mode = mode;
8965 			/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8966 			if (mode == BRIDGE_MODE_VEB)
8967 				pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8968 			else
8969 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8970 			i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8971 			break;
8972 		}
8973 	}
8974 
8975 	return 0;
8976 }
8977 
8978 /**
8979  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8980  * @skb: skb buff
8981  * @pid: process id
8982  * @seq: RTNL message seq #
8983  * @dev: the netdev being configured
8984  * @filter_mask: unused
8985  * @nlflags: netlink flags passed in
8986  *
8987  * Return the mode in which the hardware bridge is operating in
8988  * i.e VEB or VEPA.
8989  **/
8990 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8991 				   struct net_device *dev,
8992 				   u32 __always_unused filter_mask,
8993 				   int nlflags)
8994 {
8995 	struct i40e_netdev_priv *np = netdev_priv(dev);
8996 	struct i40e_vsi *vsi = np->vsi;
8997 	struct i40e_pf *pf = vsi->back;
8998 	struct i40e_veb *veb = NULL;
8999 	int i;
9000 
9001 	/* Only for PF VSI for now */
9002 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9003 		return -EOPNOTSUPP;
9004 
9005 	/* Find the HW bridge for the PF VSI */
9006 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9007 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9008 			veb = pf->veb[i];
9009 	}
9010 
9011 	if (!veb)
9012 		return 0;
9013 
9014 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
9015 				       nlflags, 0, 0, filter_mask, NULL);
9016 }
9017 
9018 /* Hardware supports L4 tunnel length of 128B (=2^7) which includes
9019  * inner mac plus all inner ethertypes.
9020  */
9021 #define I40E_MAX_TUNNEL_HDR_LEN 128
9022 /**
9023  * i40e_features_check - Validate encapsulated packet conforms to limits
9024  * @skb: skb buff
9025  * @dev: This physical port's netdev
9026  * @features: Offload features that the stack believes apply
9027  **/
9028 static netdev_features_t i40e_features_check(struct sk_buff *skb,
9029 					     struct net_device *dev,
9030 					     netdev_features_t features)
9031 {
9032 	if (skb->encapsulation &&
9033 	    ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
9034 	     I40E_MAX_TUNNEL_HDR_LEN))
9035 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9036 
9037 	return features;
9038 }
9039 
9040 static const struct net_device_ops i40e_netdev_ops = {
9041 	.ndo_open		= i40e_open,
9042 	.ndo_stop		= i40e_close,
9043 	.ndo_start_xmit		= i40e_lan_xmit_frame,
9044 	.ndo_get_stats64	= i40e_get_netdev_stats_struct,
9045 	.ndo_set_rx_mode	= i40e_set_rx_mode,
9046 	.ndo_validate_addr	= eth_validate_addr,
9047 	.ndo_set_mac_address	= i40e_set_mac,
9048 	.ndo_change_mtu		= i40e_change_mtu,
9049 	.ndo_do_ioctl		= i40e_ioctl,
9050 	.ndo_tx_timeout		= i40e_tx_timeout,
9051 	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,
9052 	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid,
9053 #ifdef CONFIG_NET_POLL_CONTROLLER
9054 	.ndo_poll_controller	= i40e_netpoll,
9055 #endif
9056 	.ndo_setup_tc		= __i40e_setup_tc,
9057 #ifdef I40E_FCOE
9058 	.ndo_fcoe_enable	= i40e_fcoe_enable,
9059 	.ndo_fcoe_disable	= i40e_fcoe_disable,
9060 #endif
9061 	.ndo_set_features	= i40e_set_features,
9062 	.ndo_set_vf_mac		= i40e_ndo_set_vf_mac,
9063 	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan,
9064 	.ndo_set_vf_rate	= i40e_ndo_set_vf_bw,
9065 	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
9066 	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
9067 	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofchk,
9068 	.ndo_set_vf_trust	= i40e_ndo_set_vf_trust,
9069 	.ndo_udp_tunnel_add	= i40e_udp_tunnel_add,
9070 	.ndo_udp_tunnel_del	= i40e_udp_tunnel_del,
9071 	.ndo_get_phys_port_id	= i40e_get_phys_port_id,
9072 	.ndo_fdb_add		= i40e_ndo_fdb_add,
9073 	.ndo_features_check	= i40e_features_check,
9074 	.ndo_bridge_getlink	= i40e_ndo_bridge_getlink,
9075 	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink,
9076 };
9077 
9078 /**
9079  * i40e_config_netdev - Setup the netdev flags
9080  * @vsi: the VSI being configured
9081  *
9082  * Returns 0 on success, negative value on failure
9083  **/
9084 static int i40e_config_netdev(struct i40e_vsi *vsi)
9085 {
9086 	struct i40e_pf *pf = vsi->back;
9087 	struct i40e_hw *hw = &pf->hw;
9088 	struct i40e_netdev_priv *np;
9089 	struct net_device *netdev;
9090 	u8 mac_addr[ETH_ALEN];
9091 	int etherdev_size;
9092 
9093 	etherdev_size = sizeof(struct i40e_netdev_priv);
9094 	netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
9095 	if (!netdev)
9096 		return -ENOMEM;
9097 
9098 	vsi->netdev = netdev;
9099 	np = netdev_priv(netdev);
9100 	np->vsi = vsi;
9101 
9102 	netdev->hw_enc_features |= NETIF_F_SG			|
9103 				   NETIF_F_IP_CSUM		|
9104 				   NETIF_F_IPV6_CSUM		|
9105 				   NETIF_F_HIGHDMA		|
9106 				   NETIF_F_SOFT_FEATURES	|
9107 				   NETIF_F_TSO			|
9108 				   NETIF_F_TSO_ECN		|
9109 				   NETIF_F_TSO6			|
9110 				   NETIF_F_GSO_GRE		|
9111 				   NETIF_F_GSO_GRE_CSUM		|
9112 				   NETIF_F_GSO_IPXIP4		|
9113 				   NETIF_F_GSO_IPXIP6		|
9114 				   NETIF_F_GSO_UDP_TUNNEL	|
9115 				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
9116 				   NETIF_F_GSO_PARTIAL		|
9117 				   NETIF_F_SCTP_CRC		|
9118 				   NETIF_F_RXHASH		|
9119 				   NETIF_F_RXCSUM		|
9120 				   0;
9121 
9122 	if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
9123 		netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9124 
9125 	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
9126 
9127 	/* record features VLANs can make use of */
9128 	netdev->vlan_features |= netdev->hw_enc_features |
9129 				 NETIF_F_TSO_MANGLEID;
9130 
9131 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9132 		netdev->hw_features |= NETIF_F_NTUPLE;
9133 
9134 	netdev->hw_features |= netdev->hw_enc_features	|
9135 			       NETIF_F_HW_VLAN_CTAG_TX	|
9136 			       NETIF_F_HW_VLAN_CTAG_RX;
9137 
9138 	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
9139 	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9140 
9141 	if (vsi->type == I40E_VSI_MAIN) {
9142 		SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9143 		ether_addr_copy(mac_addr, hw->mac.perm_addr);
9144 		/* The following steps are necessary to prevent reception
9145 		 * of tagged packets - some older NVM configurations load a
9146 		 * default a MAC-VLAN filter that accepts any tagged packet
9147 		 * which must be replaced by a normal filter.
9148 		 */
9149 		i40e_rm_default_mac_filter(vsi, mac_addr);
9150 		spin_lock_bh(&vsi->mac_filter_list_lock);
9151 		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
9152 		spin_unlock_bh(&vsi->mac_filter_list_lock);
9153 	} else {
9154 		/* relate the VSI_VMDQ name to the VSI_MAIN name */
9155 		snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
9156 			 pf->vsi[pf->lan_vsi]->netdev->name);
9157 		random_ether_addr(mac_addr);
9158 
9159 		spin_lock_bh(&vsi->mac_filter_list_lock);
9160 		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
9161 		spin_unlock_bh(&vsi->mac_filter_list_lock);
9162 	}
9163 
9164 	ether_addr_copy(netdev->dev_addr, mac_addr);
9165 	ether_addr_copy(netdev->perm_addr, mac_addr);
9166 
9167 	netdev->priv_flags |= IFF_UNICAST_FLT;
9168 	netdev->priv_flags |= IFF_SUPP_NOFCS;
9169 	/* Setup netdev TC information */
9170 	i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9171 
9172 	netdev->netdev_ops = &i40e_netdev_ops;
9173 	netdev->watchdog_timeo = 5 * HZ;
9174 	i40e_set_ethtool_ops(netdev);
9175 #ifdef I40E_FCOE
9176 	i40e_fcoe_config_netdev(netdev, vsi);
9177 #endif
9178 
9179 	return 0;
9180 }
9181 
9182 /**
9183  * i40e_vsi_delete - Delete a VSI from the switch
9184  * @vsi: the VSI being removed
9185  *
9186  * Returns 0 on success, negative value on failure
9187  **/
9188 static void i40e_vsi_delete(struct i40e_vsi *vsi)
9189 {
9190 	/* remove default VSI is not allowed */
9191 	if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9192 		return;
9193 
9194 	i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9195 }
9196 
9197 /**
9198  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9199  * @vsi: the VSI being queried
9200  *
9201  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9202  **/
9203 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9204 {
9205 	struct i40e_veb *veb;
9206 	struct i40e_pf *pf = vsi->back;
9207 
9208 	/* Uplink is not a bridge so default to VEB */
9209 	if (vsi->veb_idx == I40E_NO_VEB)
9210 		return 1;
9211 
9212 	veb = pf->veb[vsi->veb_idx];
9213 	if (!veb) {
9214 		dev_info(&pf->pdev->dev,
9215 			 "There is no veb associated with the bridge\n");
9216 		return -ENOENT;
9217 	}
9218 
9219 	/* Uplink is a bridge in VEPA mode */
9220 	if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9221 		return 0;
9222 	} else {
9223 		/* Uplink is a bridge in VEB mode */
9224 		return 1;
9225 	}
9226 
9227 	/* VEPA is now default bridge, so return 0 */
9228 	return 0;
9229 }
9230 
9231 /**
9232  * i40e_add_vsi - Add a VSI to the switch
9233  * @vsi: the VSI being configured
9234  *
9235  * This initializes a VSI context depending on the VSI type to be added and
9236  * passes it down to the add_vsi aq command.
9237  **/
9238 static int i40e_add_vsi(struct i40e_vsi *vsi)
9239 {
9240 	int ret = -ENODEV;
9241 	i40e_status aq_ret = 0;
9242 	struct i40e_pf *pf = vsi->back;
9243 	struct i40e_hw *hw = &pf->hw;
9244 	struct i40e_vsi_context ctxt;
9245 	struct i40e_mac_filter *f, *ftmp;
9246 
9247 	u8 enabled_tc = 0x1; /* TC0 enabled */
9248 	int f_count = 0;
9249 
9250 	memset(&ctxt, 0, sizeof(ctxt));
9251 	switch (vsi->type) {
9252 	case I40E_VSI_MAIN:
9253 		/* The PF's main VSI is already setup as part of the
9254 		 * device initialization, so we'll not bother with
9255 		 * the add_vsi call, but we will retrieve the current
9256 		 * VSI context.
9257 		 */
9258 		ctxt.seid = pf->main_vsi_seid;
9259 		ctxt.pf_num = pf->hw.pf_id;
9260 		ctxt.vf_num = 0;
9261 		ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9262 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9263 		if (ret) {
9264 			dev_info(&pf->pdev->dev,
9265 				 "couldn't get PF vsi config, err %s aq_err %s\n",
9266 				 i40e_stat_str(&pf->hw, ret),
9267 				 i40e_aq_str(&pf->hw,
9268 					     pf->hw.aq.asq_last_status));
9269 			return -ENOENT;
9270 		}
9271 		vsi->info = ctxt.info;
9272 		vsi->info.valid_sections = 0;
9273 
9274 		vsi->seid = ctxt.seid;
9275 		vsi->id = ctxt.vsi_number;
9276 
9277 		enabled_tc = i40e_pf_get_tc_map(pf);
9278 
9279 		/* MFP mode setup queue map and update VSI */
9280 		if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9281 		    !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
9282 			memset(&ctxt, 0, sizeof(ctxt));
9283 			ctxt.seid = pf->main_vsi_seid;
9284 			ctxt.pf_num = pf->hw.pf_id;
9285 			ctxt.vf_num = 0;
9286 			i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9287 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9288 			if (ret) {
9289 				dev_info(&pf->pdev->dev,
9290 					 "update vsi failed, err %s aq_err %s\n",
9291 					 i40e_stat_str(&pf->hw, ret),
9292 					 i40e_aq_str(&pf->hw,
9293 						    pf->hw.aq.asq_last_status));
9294 				ret = -ENOENT;
9295 				goto err;
9296 			}
9297 			/* update the local VSI info queue map */
9298 			i40e_vsi_update_queue_map(vsi, &ctxt);
9299 			vsi->info.valid_sections = 0;
9300 		} else {
9301 			/* Default/Main VSI is only enabled for TC0
9302 			 * reconfigure it to enable all TCs that are
9303 			 * available on the port in SFP mode.
9304 			 * For MFP case the iSCSI PF would use this
9305 			 * flow to enable LAN+iSCSI TC.
9306 			 */
9307 			ret = i40e_vsi_config_tc(vsi, enabled_tc);
9308 			if (ret) {
9309 				dev_info(&pf->pdev->dev,
9310 					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9311 					 enabled_tc,
9312 					 i40e_stat_str(&pf->hw, ret),
9313 					 i40e_aq_str(&pf->hw,
9314 						    pf->hw.aq.asq_last_status));
9315 				ret = -ENOENT;
9316 			}
9317 		}
9318 		break;
9319 
9320 	case I40E_VSI_FDIR:
9321 		ctxt.pf_num = hw->pf_id;
9322 		ctxt.vf_num = 0;
9323 		ctxt.uplink_seid = vsi->uplink_seid;
9324 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9325 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9326 		if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9327 		    (i40e_is_vsi_uplink_mode_veb(vsi))) {
9328 			ctxt.info.valid_sections |=
9329 			     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9330 			ctxt.info.switch_id =
9331 			   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9332 		}
9333 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9334 		break;
9335 
9336 	case I40E_VSI_VMDQ2:
9337 		ctxt.pf_num = hw->pf_id;
9338 		ctxt.vf_num = 0;
9339 		ctxt.uplink_seid = vsi->uplink_seid;
9340 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9341 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9342 
9343 		/* This VSI is connected to VEB so the switch_id
9344 		 * should be set to zero by default.
9345 		 */
9346 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9347 			ctxt.info.valid_sections |=
9348 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9349 			ctxt.info.switch_id =
9350 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9351 		}
9352 
9353 		/* Setup the VSI tx/rx queue map for TC0 only for now */
9354 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9355 		break;
9356 
9357 	case I40E_VSI_SRIOV:
9358 		ctxt.pf_num = hw->pf_id;
9359 		ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9360 		ctxt.uplink_seid = vsi->uplink_seid;
9361 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9362 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9363 
9364 		/* This VSI is connected to VEB so the switch_id
9365 		 * should be set to zero by default.
9366 		 */
9367 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9368 			ctxt.info.valid_sections |=
9369 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9370 			ctxt.info.switch_id =
9371 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9372 		}
9373 
9374 		if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9375 			ctxt.info.valid_sections |=
9376 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9377 			ctxt.info.queueing_opt_flags |=
9378 				(I40E_AQ_VSI_QUE_OPT_TCP_ENA |
9379 				 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
9380 		}
9381 
9382 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9383 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9384 		if (pf->vf[vsi->vf_id].spoofchk) {
9385 			ctxt.info.valid_sections |=
9386 				cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9387 			ctxt.info.sec_flags |=
9388 				(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9389 				 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9390 		}
9391 		/* Setup the VSI tx/rx queue map for TC0 only for now */
9392 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9393 		break;
9394 
9395 #ifdef I40E_FCOE
9396 	case I40E_VSI_FCOE:
9397 		ret = i40e_fcoe_vsi_init(vsi, &ctxt);
9398 		if (ret) {
9399 			dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
9400 			return ret;
9401 		}
9402 		break;
9403 
9404 #endif /* I40E_FCOE */
9405 	case I40E_VSI_IWARP:
9406 		/* send down message to iWARP */
9407 		break;
9408 
9409 	default:
9410 		return -ENODEV;
9411 	}
9412 
9413 	if (vsi->type != I40E_VSI_MAIN) {
9414 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9415 		if (ret) {
9416 			dev_info(&vsi->back->pdev->dev,
9417 				 "add vsi failed, err %s aq_err %s\n",
9418 				 i40e_stat_str(&pf->hw, ret),
9419 				 i40e_aq_str(&pf->hw,
9420 					     pf->hw.aq.asq_last_status));
9421 			ret = -ENOENT;
9422 			goto err;
9423 		}
9424 		vsi->info = ctxt.info;
9425 		vsi->info.valid_sections = 0;
9426 		vsi->seid = ctxt.seid;
9427 		vsi->id = ctxt.vsi_number;
9428 	}
9429 	/* Except FDIR VSI, for all othet VSI set the broadcast filter */
9430 	if (vsi->type != I40E_VSI_FDIR) {
9431 		aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
9432 		if (aq_ret) {
9433 			ret = i40e_aq_rc_to_posix(aq_ret,
9434 						  hw->aq.asq_last_status);
9435 			dev_info(&pf->pdev->dev,
9436 				 "set brdcast promisc failed, err %s, aq_err %s\n",
9437 				 i40e_stat_str(hw, aq_ret),
9438 				 i40e_aq_str(hw, hw->aq.asq_last_status));
9439 		}
9440 	}
9441 
9442 	vsi->active_filters = 0;
9443 	clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
9444 	spin_lock_bh(&vsi->mac_filter_list_lock);
9445 	/* If macvlan filters already exist, force them to get loaded */
9446 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
9447 		f->state = I40E_FILTER_NEW;
9448 		f_count++;
9449 	}
9450 	spin_unlock_bh(&vsi->mac_filter_list_lock);
9451 
9452 	if (f_count) {
9453 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
9454 		pf->flags |= I40E_FLAG_FILTER_SYNC;
9455 	}
9456 
9457 	/* Update VSI BW information */
9458 	ret = i40e_vsi_get_bw_info(vsi);
9459 	if (ret) {
9460 		dev_info(&pf->pdev->dev,
9461 			 "couldn't get vsi bw info, err %s aq_err %s\n",
9462 			 i40e_stat_str(&pf->hw, ret),
9463 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9464 		/* VSI is already added so not tearing that up */
9465 		ret = 0;
9466 	}
9467 
9468 err:
9469 	return ret;
9470 }
9471 
9472 /**
9473  * i40e_vsi_release - Delete a VSI and free its resources
9474  * @vsi: the VSI being removed
9475  *
9476  * Returns 0 on success or < 0 on error
9477  **/
9478 int i40e_vsi_release(struct i40e_vsi *vsi)
9479 {
9480 	struct i40e_mac_filter *f, *ftmp;
9481 	struct i40e_veb *veb = NULL;
9482 	struct i40e_pf *pf;
9483 	u16 uplink_seid;
9484 	int i, n;
9485 
9486 	pf = vsi->back;
9487 
9488 	/* release of a VEB-owner or last VSI is not allowed */
9489 	if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
9490 		dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
9491 			 vsi->seid, vsi->uplink_seid);
9492 		return -ENODEV;
9493 	}
9494 	if (vsi == pf->vsi[pf->lan_vsi] &&
9495 	    !test_bit(__I40E_DOWN, &pf->state)) {
9496 		dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9497 		return -ENODEV;
9498 	}
9499 
9500 	uplink_seid = vsi->uplink_seid;
9501 	if (vsi->type != I40E_VSI_SRIOV) {
9502 		if (vsi->netdev_registered) {
9503 			vsi->netdev_registered = false;
9504 			if (vsi->netdev) {
9505 				/* results in a call to i40e_close() */
9506 				unregister_netdev(vsi->netdev);
9507 			}
9508 		} else {
9509 			i40e_vsi_close(vsi);
9510 		}
9511 		i40e_vsi_disable_irq(vsi);
9512 	}
9513 
9514 	spin_lock_bh(&vsi->mac_filter_list_lock);
9515 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
9516 		i40e_del_filter(vsi, f->macaddr, f->vlan,
9517 				f->is_vf, f->is_netdev);
9518 	spin_unlock_bh(&vsi->mac_filter_list_lock);
9519 
9520 	i40e_sync_vsi_filters(vsi);
9521 
9522 	i40e_vsi_delete(vsi);
9523 	i40e_vsi_free_q_vectors(vsi);
9524 	if (vsi->netdev) {
9525 		free_netdev(vsi->netdev);
9526 		vsi->netdev = NULL;
9527 	}
9528 	i40e_vsi_clear_rings(vsi);
9529 	i40e_vsi_clear(vsi);
9530 
9531 	/* If this was the last thing on the VEB, except for the
9532 	 * controlling VSI, remove the VEB, which puts the controlling
9533 	 * VSI onto the next level down in the switch.
9534 	 *
9535 	 * Well, okay, there's one more exception here: don't remove
9536 	 * the orphan VEBs yet.  We'll wait for an explicit remove request
9537 	 * from up the network stack.
9538 	 */
9539 	for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
9540 		if (pf->vsi[i] &&
9541 		    pf->vsi[i]->uplink_seid == uplink_seid &&
9542 		    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9543 			n++;      /* count the VSIs */
9544 		}
9545 	}
9546 	for (i = 0; i < I40E_MAX_VEB; i++) {
9547 		if (!pf->veb[i])
9548 			continue;
9549 		if (pf->veb[i]->uplink_seid == uplink_seid)
9550 			n++;     /* count the VEBs */
9551 		if (pf->veb[i]->seid == uplink_seid)
9552 			veb = pf->veb[i];
9553 	}
9554 	if (n == 0 && veb && veb->uplink_seid != 0)
9555 		i40e_veb_release(veb);
9556 
9557 	return 0;
9558 }
9559 
9560 /**
9561  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
9562  * @vsi: ptr to the VSI
9563  *
9564  * This should only be called after i40e_vsi_mem_alloc() which allocates the
9565  * corresponding SW VSI structure and initializes num_queue_pairs for the
9566  * newly allocated VSI.
9567  *
9568  * Returns 0 on success or negative on failure
9569  **/
9570 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
9571 {
9572 	int ret = -ENOENT;
9573 	struct i40e_pf *pf = vsi->back;
9574 
9575 	if (vsi->q_vectors[0]) {
9576 		dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
9577 			 vsi->seid);
9578 		return -EEXIST;
9579 	}
9580 
9581 	if (vsi->base_vector) {
9582 		dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
9583 			 vsi->seid, vsi->base_vector);
9584 		return -EEXIST;
9585 	}
9586 
9587 	ret = i40e_vsi_alloc_q_vectors(vsi);
9588 	if (ret) {
9589 		dev_info(&pf->pdev->dev,
9590 			 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9591 			 vsi->num_q_vectors, vsi->seid, ret);
9592 		vsi->num_q_vectors = 0;
9593 		goto vector_setup_out;
9594 	}
9595 
9596 	/* In Legacy mode, we do not have to get any other vector since we
9597 	 * piggyback on the misc/ICR0 for queue interrupts.
9598 	*/
9599 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
9600 		return ret;
9601 	if (vsi->num_q_vectors)
9602 		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
9603 						 vsi->num_q_vectors, vsi->idx);
9604 	if (vsi->base_vector < 0) {
9605 		dev_info(&pf->pdev->dev,
9606 			 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9607 			 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
9608 		i40e_vsi_free_q_vectors(vsi);
9609 		ret = -ENOENT;
9610 		goto vector_setup_out;
9611 	}
9612 
9613 vector_setup_out:
9614 	return ret;
9615 }
9616 
9617 /**
9618  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
9619  * @vsi: pointer to the vsi.
9620  *
9621  * This re-allocates a vsi's queue resources.
9622  *
9623  * Returns pointer to the successfully allocated and configured VSI sw struct
9624  * on success, otherwise returns NULL on failure.
9625  **/
9626 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
9627 {
9628 	struct i40e_pf *pf;
9629 	u8 enabled_tc;
9630 	int ret;
9631 
9632 	if (!vsi)
9633 		return NULL;
9634 
9635 	pf = vsi->back;
9636 
9637 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9638 	i40e_vsi_clear_rings(vsi);
9639 
9640 	i40e_vsi_free_arrays(vsi, false);
9641 	i40e_set_num_rings_in_vsi(vsi);
9642 	ret = i40e_vsi_alloc_arrays(vsi, false);
9643 	if (ret)
9644 		goto err_vsi;
9645 
9646 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
9647 	if (ret < 0) {
9648 		dev_info(&pf->pdev->dev,
9649 			 "failed to get tracking for %d queues for VSI %d err %d\n",
9650 			 vsi->alloc_queue_pairs, vsi->seid, ret);
9651 		goto err_vsi;
9652 	}
9653 	vsi->base_queue = ret;
9654 
9655 	/* Update the FW view of the VSI. Force a reset of TC and queue
9656 	 * layout configurations.
9657 	 */
9658 	enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9659 	pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9660 	pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9661 	i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9662 	if (vsi->type == I40E_VSI_MAIN)
9663 		i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
9664 
9665 	/* assign it some queues */
9666 	ret = i40e_alloc_rings(vsi);
9667 	if (ret)
9668 		goto err_rings;
9669 
9670 	/* map all of the rings to the q_vectors */
9671 	i40e_vsi_map_rings_to_vectors(vsi);
9672 	return vsi;
9673 
9674 err_rings:
9675 	i40e_vsi_free_q_vectors(vsi);
9676 	if (vsi->netdev_registered) {
9677 		vsi->netdev_registered = false;
9678 		unregister_netdev(vsi->netdev);
9679 		free_netdev(vsi->netdev);
9680 		vsi->netdev = NULL;
9681 	}
9682 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9683 err_vsi:
9684 	i40e_vsi_clear(vsi);
9685 	return NULL;
9686 }
9687 
9688 /**
9689  * i40e_vsi_setup - Set up a VSI by a given type
9690  * @pf: board private structure
9691  * @type: VSI type
9692  * @uplink_seid: the switch element to link to
9693  * @param1: usage depends upon VSI type. For VF types, indicates VF id
9694  *
9695  * This allocates the sw VSI structure and its queue resources, then add a VSI
9696  * to the identified VEB.
9697  *
9698  * Returns pointer to the successfully allocated and configure VSI sw struct on
9699  * success, otherwise returns NULL on failure.
9700  **/
9701 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9702 				u16 uplink_seid, u32 param1)
9703 {
9704 	struct i40e_vsi *vsi = NULL;
9705 	struct i40e_veb *veb = NULL;
9706 	int ret, i;
9707 	int v_idx;
9708 
9709 	/* The requested uplink_seid must be either
9710 	 *     - the PF's port seid
9711 	 *              no VEB is needed because this is the PF
9712 	 *              or this is a Flow Director special case VSI
9713 	 *     - seid of an existing VEB
9714 	 *     - seid of a VSI that owns an existing VEB
9715 	 *     - seid of a VSI that doesn't own a VEB
9716 	 *              a new VEB is created and the VSI becomes the owner
9717 	 *     - seid of the PF VSI, which is what creates the first VEB
9718 	 *              this is a special case of the previous
9719 	 *
9720 	 * Find which uplink_seid we were given and create a new VEB if needed
9721 	 */
9722 	for (i = 0; i < I40E_MAX_VEB; i++) {
9723 		if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9724 			veb = pf->veb[i];
9725 			break;
9726 		}
9727 	}
9728 
9729 	if (!veb && uplink_seid != pf->mac_seid) {
9730 
9731 		for (i = 0; i < pf->num_alloc_vsi; i++) {
9732 			if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9733 				vsi = pf->vsi[i];
9734 				break;
9735 			}
9736 		}
9737 		if (!vsi) {
9738 			dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9739 				 uplink_seid);
9740 			return NULL;
9741 		}
9742 
9743 		if (vsi->uplink_seid == pf->mac_seid)
9744 			veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9745 					     vsi->tc_config.enabled_tc);
9746 		else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9747 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9748 					     vsi->tc_config.enabled_tc);
9749 		if (veb) {
9750 			if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9751 				dev_info(&vsi->back->pdev->dev,
9752 					 "New VSI creation error, uplink seid of LAN VSI expected.\n");
9753 				return NULL;
9754 			}
9755 			/* We come up by default in VEPA mode if SRIOV is not
9756 			 * already enabled, in which case we can't force VEPA
9757 			 * mode.
9758 			 */
9759 			if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9760 				veb->bridge_mode = BRIDGE_MODE_VEPA;
9761 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9762 			}
9763 			i40e_config_bridge_mode(veb);
9764 		}
9765 		for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9766 			if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9767 				veb = pf->veb[i];
9768 		}
9769 		if (!veb) {
9770 			dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9771 			return NULL;
9772 		}
9773 
9774 		vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9775 		uplink_seid = veb->seid;
9776 	}
9777 
9778 	/* get vsi sw struct */
9779 	v_idx = i40e_vsi_mem_alloc(pf, type);
9780 	if (v_idx < 0)
9781 		goto err_alloc;
9782 	vsi = pf->vsi[v_idx];
9783 	if (!vsi)
9784 		goto err_alloc;
9785 	vsi->type = type;
9786 	vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9787 
9788 	if (type == I40E_VSI_MAIN)
9789 		pf->lan_vsi = v_idx;
9790 	else if (type == I40E_VSI_SRIOV)
9791 		vsi->vf_id = param1;
9792 	/* assign it some queues */
9793 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9794 				vsi->idx);
9795 	if (ret < 0) {
9796 		dev_info(&pf->pdev->dev,
9797 			 "failed to get tracking for %d queues for VSI %d err=%d\n",
9798 			 vsi->alloc_queue_pairs, vsi->seid, ret);
9799 		goto err_vsi;
9800 	}
9801 	vsi->base_queue = ret;
9802 
9803 	/* get a VSI from the hardware */
9804 	vsi->uplink_seid = uplink_seid;
9805 	ret = i40e_add_vsi(vsi);
9806 	if (ret)
9807 		goto err_vsi;
9808 
9809 	switch (vsi->type) {
9810 	/* setup the netdev if needed */
9811 	case I40E_VSI_MAIN:
9812 		/* Apply relevant filters if a platform-specific mac
9813 		 * address was selected.
9814 		 */
9815 		if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
9816 			ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
9817 			if (ret) {
9818 				dev_warn(&pf->pdev->dev,
9819 					 "could not set up macaddr; err %d\n",
9820 					 ret);
9821 			}
9822 		}
9823 	case I40E_VSI_VMDQ2:
9824 	case I40E_VSI_FCOE:
9825 		ret = i40e_config_netdev(vsi);
9826 		if (ret)
9827 			goto err_netdev;
9828 		ret = register_netdev(vsi->netdev);
9829 		if (ret)
9830 			goto err_netdev;
9831 		vsi->netdev_registered = true;
9832 		netif_carrier_off(vsi->netdev);
9833 #ifdef CONFIG_I40E_DCB
9834 		/* Setup DCB netlink interface */
9835 		i40e_dcbnl_setup(vsi);
9836 #endif /* CONFIG_I40E_DCB */
9837 		/* fall through */
9838 
9839 	case I40E_VSI_FDIR:
9840 		/* set up vectors and rings if needed */
9841 		ret = i40e_vsi_setup_vectors(vsi);
9842 		if (ret)
9843 			goto err_msix;
9844 
9845 		ret = i40e_alloc_rings(vsi);
9846 		if (ret)
9847 			goto err_rings;
9848 
9849 		/* map all of the rings to the q_vectors */
9850 		i40e_vsi_map_rings_to_vectors(vsi);
9851 
9852 		i40e_vsi_reset_stats(vsi);
9853 		break;
9854 
9855 	default:
9856 		/* no netdev or rings for the other VSI types */
9857 		break;
9858 	}
9859 
9860 	if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9861 	    (vsi->type == I40E_VSI_VMDQ2)) {
9862 		ret = i40e_vsi_config_rss(vsi);
9863 	}
9864 	return vsi;
9865 
9866 err_rings:
9867 	i40e_vsi_free_q_vectors(vsi);
9868 err_msix:
9869 	if (vsi->netdev_registered) {
9870 		vsi->netdev_registered = false;
9871 		unregister_netdev(vsi->netdev);
9872 		free_netdev(vsi->netdev);
9873 		vsi->netdev = NULL;
9874 	}
9875 err_netdev:
9876 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9877 err_vsi:
9878 	i40e_vsi_clear(vsi);
9879 err_alloc:
9880 	return NULL;
9881 }
9882 
9883 /**
9884  * i40e_veb_get_bw_info - Query VEB BW information
9885  * @veb: the veb to query
9886  *
9887  * Query the Tx scheduler BW configuration data for given VEB
9888  **/
9889 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9890 {
9891 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9892 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9893 	struct i40e_pf *pf = veb->pf;
9894 	struct i40e_hw *hw = &pf->hw;
9895 	u32 tc_bw_max;
9896 	int ret = 0;
9897 	int i;
9898 
9899 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9900 						  &bw_data, NULL);
9901 	if (ret) {
9902 		dev_info(&pf->pdev->dev,
9903 			 "query veb bw config failed, err %s aq_err %s\n",
9904 			 i40e_stat_str(&pf->hw, ret),
9905 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9906 		goto out;
9907 	}
9908 
9909 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9910 						   &ets_data, NULL);
9911 	if (ret) {
9912 		dev_info(&pf->pdev->dev,
9913 			 "query veb bw ets config failed, err %s aq_err %s\n",
9914 			 i40e_stat_str(&pf->hw, ret),
9915 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9916 		goto out;
9917 	}
9918 
9919 	veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9920 	veb->bw_max_quanta = ets_data.tc_bw_max;
9921 	veb->is_abs_credits = bw_data.absolute_credits_enable;
9922 	veb->enabled_tc = ets_data.tc_valid_bits;
9923 	tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9924 		    (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9925 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9926 		veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9927 		veb->bw_tc_limit_credits[i] =
9928 					le16_to_cpu(bw_data.tc_bw_limits[i]);
9929 		veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9930 	}
9931 
9932 out:
9933 	return ret;
9934 }
9935 
9936 /**
9937  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9938  * @pf: board private structure
9939  *
9940  * On error: returns error code (negative)
9941  * On success: returns vsi index in PF (positive)
9942  **/
9943 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9944 {
9945 	int ret = -ENOENT;
9946 	struct i40e_veb *veb;
9947 	int i;
9948 
9949 	/* Need to protect the allocation of switch elements at the PF level */
9950 	mutex_lock(&pf->switch_mutex);
9951 
9952 	/* VEB list may be fragmented if VEB creation/destruction has
9953 	 * been happening.  We can afford to do a quick scan to look
9954 	 * for any free slots in the list.
9955 	 *
9956 	 * find next empty veb slot, looping back around if necessary
9957 	 */
9958 	i = 0;
9959 	while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9960 		i++;
9961 	if (i >= I40E_MAX_VEB) {
9962 		ret = -ENOMEM;
9963 		goto err_alloc_veb;  /* out of VEB slots! */
9964 	}
9965 
9966 	veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9967 	if (!veb) {
9968 		ret = -ENOMEM;
9969 		goto err_alloc_veb;
9970 	}
9971 	veb->pf = pf;
9972 	veb->idx = i;
9973 	veb->enabled_tc = 1;
9974 
9975 	pf->veb[i] = veb;
9976 	ret = i;
9977 err_alloc_veb:
9978 	mutex_unlock(&pf->switch_mutex);
9979 	return ret;
9980 }
9981 
9982 /**
9983  * i40e_switch_branch_release - Delete a branch of the switch tree
9984  * @branch: where to start deleting
9985  *
9986  * This uses recursion to find the tips of the branch to be
9987  * removed, deleting until we get back to and can delete this VEB.
9988  **/
9989 static void i40e_switch_branch_release(struct i40e_veb *branch)
9990 {
9991 	struct i40e_pf *pf = branch->pf;
9992 	u16 branch_seid = branch->seid;
9993 	u16 veb_idx = branch->idx;
9994 	int i;
9995 
9996 	/* release any VEBs on this VEB - RECURSION */
9997 	for (i = 0; i < I40E_MAX_VEB; i++) {
9998 		if (!pf->veb[i])
9999 			continue;
10000 		if (pf->veb[i]->uplink_seid == branch->seid)
10001 			i40e_switch_branch_release(pf->veb[i]);
10002 	}
10003 
10004 	/* Release the VSIs on this VEB, but not the owner VSI.
10005 	 *
10006 	 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
10007 	 *       the VEB itself, so don't use (*branch) after this loop.
10008 	 */
10009 	for (i = 0; i < pf->num_alloc_vsi; i++) {
10010 		if (!pf->vsi[i])
10011 			continue;
10012 		if (pf->vsi[i]->uplink_seid == branch_seid &&
10013 		   (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10014 			i40e_vsi_release(pf->vsi[i]);
10015 		}
10016 	}
10017 
10018 	/* There's one corner case where the VEB might not have been
10019 	 * removed, so double check it here and remove it if needed.
10020 	 * This case happens if the veb was created from the debugfs
10021 	 * commands and no VSIs were added to it.
10022 	 */
10023 	if (pf->veb[veb_idx])
10024 		i40e_veb_release(pf->veb[veb_idx]);
10025 }
10026 
10027 /**
10028  * i40e_veb_clear - remove veb struct
10029  * @veb: the veb to remove
10030  **/
10031 static void i40e_veb_clear(struct i40e_veb *veb)
10032 {
10033 	if (!veb)
10034 		return;
10035 
10036 	if (veb->pf) {
10037 		struct i40e_pf *pf = veb->pf;
10038 
10039 		mutex_lock(&pf->switch_mutex);
10040 		if (pf->veb[veb->idx] == veb)
10041 			pf->veb[veb->idx] = NULL;
10042 		mutex_unlock(&pf->switch_mutex);
10043 	}
10044 
10045 	kfree(veb);
10046 }
10047 
10048 /**
10049  * i40e_veb_release - Delete a VEB and free its resources
10050  * @veb: the VEB being removed
10051  **/
10052 void i40e_veb_release(struct i40e_veb *veb)
10053 {
10054 	struct i40e_vsi *vsi = NULL;
10055 	struct i40e_pf *pf;
10056 	int i, n = 0;
10057 
10058 	pf = veb->pf;
10059 
10060 	/* find the remaining VSI and check for extras */
10061 	for (i = 0; i < pf->num_alloc_vsi; i++) {
10062 		if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10063 			n++;
10064 			vsi = pf->vsi[i];
10065 		}
10066 	}
10067 	if (n != 1) {
10068 		dev_info(&pf->pdev->dev,
10069 			 "can't remove VEB %d with %d VSIs left\n",
10070 			 veb->seid, n);
10071 		return;
10072 	}
10073 
10074 	/* move the remaining VSI to uplink veb */
10075 	vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10076 	if (veb->uplink_seid) {
10077 		vsi->uplink_seid = veb->uplink_seid;
10078 		if (veb->uplink_seid == pf->mac_seid)
10079 			vsi->veb_idx = I40E_NO_VEB;
10080 		else
10081 			vsi->veb_idx = veb->veb_idx;
10082 	} else {
10083 		/* floating VEB */
10084 		vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10085 		vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10086 	}
10087 
10088 	i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10089 	i40e_veb_clear(veb);
10090 }
10091 
10092 /**
10093  * i40e_add_veb - create the VEB in the switch
10094  * @veb: the VEB to be instantiated
10095  * @vsi: the controlling VSI
10096  **/
10097 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10098 {
10099 	struct i40e_pf *pf = veb->pf;
10100 	bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10101 	int ret;
10102 
10103 	ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10104 			      veb->enabled_tc, false,
10105 			      &veb->seid, enable_stats, NULL);
10106 
10107 	/* get a VEB from the hardware */
10108 	if (ret) {
10109 		dev_info(&pf->pdev->dev,
10110 			 "couldn't add VEB, err %s aq_err %s\n",
10111 			 i40e_stat_str(&pf->hw, ret),
10112 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10113 		return -EPERM;
10114 	}
10115 
10116 	/* get statistics counter */
10117 	ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10118 					 &veb->stats_idx, NULL, NULL, NULL);
10119 	if (ret) {
10120 		dev_info(&pf->pdev->dev,
10121 			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10122 			 i40e_stat_str(&pf->hw, ret),
10123 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10124 		return -EPERM;
10125 	}
10126 	ret = i40e_veb_get_bw_info(veb);
10127 	if (ret) {
10128 		dev_info(&pf->pdev->dev,
10129 			 "couldn't get VEB bw info, err %s aq_err %s\n",
10130 			 i40e_stat_str(&pf->hw, ret),
10131 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10132 		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10133 		return -ENOENT;
10134 	}
10135 
10136 	vsi->uplink_seid = veb->seid;
10137 	vsi->veb_idx = veb->idx;
10138 	vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10139 
10140 	return 0;
10141 }
10142 
10143 /**
10144  * i40e_veb_setup - Set up a VEB
10145  * @pf: board private structure
10146  * @flags: VEB setup flags
10147  * @uplink_seid: the switch element to link to
10148  * @vsi_seid: the initial VSI seid
10149  * @enabled_tc: Enabled TC bit-map
10150  *
10151  * This allocates the sw VEB structure and links it into the switch
10152  * It is possible and legal for this to be a duplicate of an already
10153  * existing VEB.  It is also possible for both uplink and vsi seids
10154  * to be zero, in order to create a floating VEB.
10155  *
10156  * Returns pointer to the successfully allocated VEB sw struct on
10157  * success, otherwise returns NULL on failure.
10158  **/
10159 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10160 				u16 uplink_seid, u16 vsi_seid,
10161 				u8 enabled_tc)
10162 {
10163 	struct i40e_veb *veb, *uplink_veb = NULL;
10164 	int vsi_idx, veb_idx;
10165 	int ret;
10166 
10167 	/* if one seid is 0, the other must be 0 to create a floating relay */
10168 	if ((uplink_seid == 0 || vsi_seid == 0) &&
10169 	    (uplink_seid + vsi_seid != 0)) {
10170 		dev_info(&pf->pdev->dev,
10171 			 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10172 			 uplink_seid, vsi_seid);
10173 		return NULL;
10174 	}
10175 
10176 	/* make sure there is such a vsi and uplink */
10177 	for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10178 		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10179 			break;
10180 	if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10181 		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10182 			 vsi_seid);
10183 		return NULL;
10184 	}
10185 
10186 	if (uplink_seid && uplink_seid != pf->mac_seid) {
10187 		for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10188 			if (pf->veb[veb_idx] &&
10189 			    pf->veb[veb_idx]->seid == uplink_seid) {
10190 				uplink_veb = pf->veb[veb_idx];
10191 				break;
10192 			}
10193 		}
10194 		if (!uplink_veb) {
10195 			dev_info(&pf->pdev->dev,
10196 				 "uplink seid %d not found\n", uplink_seid);
10197 			return NULL;
10198 		}
10199 	}
10200 
10201 	/* get veb sw struct */
10202 	veb_idx = i40e_veb_mem_alloc(pf);
10203 	if (veb_idx < 0)
10204 		goto err_alloc;
10205 	veb = pf->veb[veb_idx];
10206 	veb->flags = flags;
10207 	veb->uplink_seid = uplink_seid;
10208 	veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10209 	veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10210 
10211 	/* create the VEB in the switch */
10212 	ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10213 	if (ret)
10214 		goto err_veb;
10215 	if (vsi_idx == pf->lan_vsi)
10216 		pf->lan_veb = veb->idx;
10217 
10218 	return veb;
10219 
10220 err_veb:
10221 	i40e_veb_clear(veb);
10222 err_alloc:
10223 	return NULL;
10224 }
10225 
10226 /**
10227  * i40e_setup_pf_switch_element - set PF vars based on switch type
10228  * @pf: board private structure
10229  * @ele: element we are building info from
10230  * @num_reported: total number of elements
10231  * @printconfig: should we print the contents
10232  *
10233  * helper function to assist in extracting a few useful SEID values.
10234  **/
10235 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10236 				struct i40e_aqc_switch_config_element_resp *ele,
10237 				u16 num_reported, bool printconfig)
10238 {
10239 	u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10240 	u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10241 	u8 element_type = ele->element_type;
10242 	u16 seid = le16_to_cpu(ele->seid);
10243 
10244 	if (printconfig)
10245 		dev_info(&pf->pdev->dev,
10246 			 "type=%d seid=%d uplink=%d downlink=%d\n",
10247 			 element_type, seid, uplink_seid, downlink_seid);
10248 
10249 	switch (element_type) {
10250 	case I40E_SWITCH_ELEMENT_TYPE_MAC:
10251 		pf->mac_seid = seid;
10252 		break;
10253 	case I40E_SWITCH_ELEMENT_TYPE_VEB:
10254 		/* Main VEB? */
10255 		if (uplink_seid != pf->mac_seid)
10256 			break;
10257 		if (pf->lan_veb == I40E_NO_VEB) {
10258 			int v;
10259 
10260 			/* find existing or else empty VEB */
10261 			for (v = 0; v < I40E_MAX_VEB; v++) {
10262 				if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10263 					pf->lan_veb = v;
10264 					break;
10265 				}
10266 			}
10267 			if (pf->lan_veb == I40E_NO_VEB) {
10268 				v = i40e_veb_mem_alloc(pf);
10269 				if (v < 0)
10270 					break;
10271 				pf->lan_veb = v;
10272 			}
10273 		}
10274 
10275 		pf->veb[pf->lan_veb]->seid = seid;
10276 		pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10277 		pf->veb[pf->lan_veb]->pf = pf;
10278 		pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10279 		break;
10280 	case I40E_SWITCH_ELEMENT_TYPE_VSI:
10281 		if (num_reported != 1)
10282 			break;
10283 		/* This is immediately after a reset so we can assume this is
10284 		 * the PF's VSI
10285 		 */
10286 		pf->mac_seid = uplink_seid;
10287 		pf->pf_seid = downlink_seid;
10288 		pf->main_vsi_seid = seid;
10289 		if (printconfig)
10290 			dev_info(&pf->pdev->dev,
10291 				 "pf_seid=%d main_vsi_seid=%d\n",
10292 				 pf->pf_seid, pf->main_vsi_seid);
10293 		break;
10294 	case I40E_SWITCH_ELEMENT_TYPE_PF:
10295 	case I40E_SWITCH_ELEMENT_TYPE_VF:
10296 	case I40E_SWITCH_ELEMENT_TYPE_EMP:
10297 	case I40E_SWITCH_ELEMENT_TYPE_BMC:
10298 	case I40E_SWITCH_ELEMENT_TYPE_PE:
10299 	case I40E_SWITCH_ELEMENT_TYPE_PA:
10300 		/* ignore these for now */
10301 		break;
10302 	default:
10303 		dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10304 			 element_type, seid);
10305 		break;
10306 	}
10307 }
10308 
10309 /**
10310  * i40e_fetch_switch_configuration - Get switch config from firmware
10311  * @pf: board private structure
10312  * @printconfig: should we print the contents
10313  *
10314  * Get the current switch configuration from the device and
10315  * extract a few useful SEID values.
10316  **/
10317 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10318 {
10319 	struct i40e_aqc_get_switch_config_resp *sw_config;
10320 	u16 next_seid = 0;
10321 	int ret = 0;
10322 	u8 *aq_buf;
10323 	int i;
10324 
10325 	aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10326 	if (!aq_buf)
10327 		return -ENOMEM;
10328 
10329 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10330 	do {
10331 		u16 num_reported, num_total;
10332 
10333 		ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10334 						I40E_AQ_LARGE_BUF,
10335 						&next_seid, NULL);
10336 		if (ret) {
10337 			dev_info(&pf->pdev->dev,
10338 				 "get switch config failed err %s aq_err %s\n",
10339 				 i40e_stat_str(&pf->hw, ret),
10340 				 i40e_aq_str(&pf->hw,
10341 					     pf->hw.aq.asq_last_status));
10342 			kfree(aq_buf);
10343 			return -ENOENT;
10344 		}
10345 
10346 		num_reported = le16_to_cpu(sw_config->header.num_reported);
10347 		num_total = le16_to_cpu(sw_config->header.num_total);
10348 
10349 		if (printconfig)
10350 			dev_info(&pf->pdev->dev,
10351 				 "header: %d reported %d total\n",
10352 				 num_reported, num_total);
10353 
10354 		for (i = 0; i < num_reported; i++) {
10355 			struct i40e_aqc_switch_config_element_resp *ele =
10356 				&sw_config->element[i];
10357 
10358 			i40e_setup_pf_switch_element(pf, ele, num_reported,
10359 						     printconfig);
10360 		}
10361 	} while (next_seid != 0);
10362 
10363 	kfree(aq_buf);
10364 	return ret;
10365 }
10366 
10367 /**
10368  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10369  * @pf: board private structure
10370  * @reinit: if the Main VSI needs to re-initialized.
10371  *
10372  * Returns 0 on success, negative value on failure
10373  **/
10374 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10375 {
10376 	u16 flags = 0;
10377 	int ret;
10378 
10379 	/* find out what's out there already */
10380 	ret = i40e_fetch_switch_configuration(pf, false);
10381 	if (ret) {
10382 		dev_info(&pf->pdev->dev,
10383 			 "couldn't fetch switch config, err %s aq_err %s\n",
10384 			 i40e_stat_str(&pf->hw, ret),
10385 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10386 		return ret;
10387 	}
10388 	i40e_pf_reset_stats(pf);
10389 
10390 	/* set the switch config bit for the whole device to
10391 	 * support limited promisc or true promisc
10392 	 * when user requests promisc. The default is limited
10393 	 * promisc.
10394 	*/
10395 
10396 	if ((pf->hw.pf_id == 0) &&
10397 	    !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
10398 		flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10399 
10400 	if (pf->hw.pf_id == 0) {
10401 		u16 valid_flags;
10402 
10403 		valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10404 		ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
10405 						NULL);
10406 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
10407 			dev_info(&pf->pdev->dev,
10408 				 "couldn't set switch config bits, err %s aq_err %s\n",
10409 				 i40e_stat_str(&pf->hw, ret),
10410 				 i40e_aq_str(&pf->hw,
10411 					     pf->hw.aq.asq_last_status));
10412 			/* not a fatal problem, just keep going */
10413 		}
10414 	}
10415 
10416 	/* first time setup */
10417 	if (pf->lan_vsi == I40E_NO_VSI || reinit) {
10418 		struct i40e_vsi *vsi = NULL;
10419 		u16 uplink_seid;
10420 
10421 		/* Set up the PF VSI associated with the PF's main VSI
10422 		 * that is already in the HW switch
10423 		 */
10424 		if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
10425 			uplink_seid = pf->veb[pf->lan_veb]->seid;
10426 		else
10427 			uplink_seid = pf->mac_seid;
10428 		if (pf->lan_vsi == I40E_NO_VSI)
10429 			vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
10430 		else if (reinit)
10431 			vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
10432 		if (!vsi) {
10433 			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
10434 			i40e_fdir_teardown(pf);
10435 			return -EAGAIN;
10436 		}
10437 	} else {
10438 		/* force a reset of TC and queue layout configurations */
10439 		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10440 
10441 		pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10442 		pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10443 		i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10444 	}
10445 	i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
10446 
10447 	i40e_fdir_sb_setup(pf);
10448 
10449 	/* Setup static PF queue filter control settings */
10450 	ret = i40e_setup_pf_filter_control(pf);
10451 	if (ret) {
10452 		dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
10453 			 ret);
10454 		/* Failure here should not stop continuing other steps */
10455 	}
10456 
10457 	/* enable RSS in the HW, even for only one queue, as the stack can use
10458 	 * the hash
10459 	 */
10460 	if ((pf->flags & I40E_FLAG_RSS_ENABLED))
10461 		i40e_pf_config_rss(pf);
10462 
10463 	/* fill in link information and enable LSE reporting */
10464 	i40e_update_link_info(&pf->hw);
10465 	i40e_link_event(pf);
10466 
10467 	/* Initialize user-specific link properties */
10468 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
10469 				  I40E_AQ_AN_COMPLETED) ? true : false);
10470 
10471 	i40e_ptp_init(pf);
10472 
10473 	return ret;
10474 }
10475 
10476 /**
10477  * i40e_determine_queue_usage - Work out queue distribution
10478  * @pf: board private structure
10479  **/
10480 static void i40e_determine_queue_usage(struct i40e_pf *pf)
10481 {
10482 	int queues_left;
10483 
10484 	pf->num_lan_qps = 0;
10485 #ifdef I40E_FCOE
10486 	pf->num_fcoe_qps = 0;
10487 #endif
10488 
10489 	/* Find the max queues to be put into basic use.  We'll always be
10490 	 * using TC0, whether or not DCB is running, and TC0 will get the
10491 	 * big RSS set.
10492 	 */
10493 	queues_left = pf->hw.func_caps.num_tx_qp;
10494 
10495 	if ((queues_left == 1) ||
10496 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
10497 		/* one qp for PF, no queues for anything else */
10498 		queues_left = 0;
10499 		pf->alloc_rss_size = pf->num_lan_qps = 1;
10500 
10501 		/* make sure all the fancies are disabled */
10502 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
10503 			       I40E_FLAG_IWARP_ENABLED	|
10504 #ifdef I40E_FCOE
10505 			       I40E_FLAG_FCOE_ENABLED	|
10506 #endif
10507 			       I40E_FLAG_FD_SB_ENABLED	|
10508 			       I40E_FLAG_FD_ATR_ENABLED	|
10509 			       I40E_FLAG_DCB_CAPABLE	|
10510 			       I40E_FLAG_DCB_ENABLED	|
10511 			       I40E_FLAG_SRIOV_ENABLED	|
10512 			       I40E_FLAG_VMDQ_ENABLED);
10513 	} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
10514 				  I40E_FLAG_FD_SB_ENABLED |
10515 				  I40E_FLAG_FD_ATR_ENABLED |
10516 				  I40E_FLAG_DCB_CAPABLE))) {
10517 		/* one qp for PF */
10518 		pf->alloc_rss_size = pf->num_lan_qps = 1;
10519 		queues_left -= pf->num_lan_qps;
10520 
10521 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
10522 			       I40E_FLAG_IWARP_ENABLED	|
10523 #ifdef I40E_FCOE
10524 			       I40E_FLAG_FCOE_ENABLED	|
10525 #endif
10526 			       I40E_FLAG_FD_SB_ENABLED	|
10527 			       I40E_FLAG_FD_ATR_ENABLED	|
10528 			       I40E_FLAG_DCB_ENABLED	|
10529 			       I40E_FLAG_VMDQ_ENABLED);
10530 	} else {
10531 		/* Not enough queues for all TCs */
10532 		if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
10533 		    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
10534 			pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
10535 					I40E_FLAG_DCB_ENABLED);
10536 			dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10537 		}
10538 		pf->num_lan_qps = max_t(int, pf->rss_size_max,
10539 					num_online_cpus());
10540 		pf->num_lan_qps = min_t(int, pf->num_lan_qps,
10541 					pf->hw.func_caps.num_tx_qp);
10542 
10543 		queues_left -= pf->num_lan_qps;
10544 	}
10545 
10546 #ifdef I40E_FCOE
10547 	if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
10548 		if (I40E_DEFAULT_FCOE <= queues_left) {
10549 			pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
10550 		} else if (I40E_MINIMUM_FCOE <= queues_left) {
10551 			pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
10552 		} else {
10553 			pf->num_fcoe_qps = 0;
10554 			pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
10555 			dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
10556 		}
10557 
10558 		queues_left -= pf->num_fcoe_qps;
10559 	}
10560 
10561 #endif
10562 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10563 		if (queues_left > 1) {
10564 			queues_left -= 1; /* save 1 queue for FD */
10565 		} else {
10566 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10567 			dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10568 		}
10569 	}
10570 
10571 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10572 	    pf->num_vf_qps && pf->num_req_vfs && queues_left) {
10573 		pf->num_req_vfs = min_t(int, pf->num_req_vfs,
10574 					(queues_left / pf->num_vf_qps));
10575 		queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
10576 	}
10577 
10578 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10579 	    pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
10580 		pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
10581 					  (queues_left / pf->num_vmdq_qps));
10582 		queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
10583 	}
10584 
10585 	pf->queues_left = queues_left;
10586 	dev_dbg(&pf->pdev->dev,
10587 		"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10588 		pf->hw.func_caps.num_tx_qp,
10589 		!!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
10590 		pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
10591 		pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
10592 		queues_left);
10593 #ifdef I40E_FCOE
10594 	dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
10595 #endif
10596 }
10597 
10598 /**
10599  * i40e_setup_pf_filter_control - Setup PF static filter control
10600  * @pf: PF to be setup
10601  *
10602  * i40e_setup_pf_filter_control sets up a PF's initial filter control
10603  * settings. If PE/FCoE are enabled then it will also set the per PF
10604  * based filter sizes required for them. It also enables Flow director,
10605  * ethertype and macvlan type filter settings for the pf.
10606  *
10607  * Returns 0 on success, negative on failure
10608  **/
10609 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
10610 {
10611 	struct i40e_filter_control_settings *settings = &pf->filter_settings;
10612 
10613 	settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
10614 
10615 	/* Flow Director is enabled */
10616 	if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
10617 		settings->enable_fdir = true;
10618 
10619 	/* Ethtype and MACVLAN filters enabled for PF */
10620 	settings->enable_ethtype = true;
10621 	settings->enable_macvlan = true;
10622 
10623 	if (i40e_set_filter_control(&pf->hw, settings))
10624 		return -ENOENT;
10625 
10626 	return 0;
10627 }
10628 
10629 #define INFO_STRING_LEN 255
10630 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
10631 static void i40e_print_features(struct i40e_pf *pf)
10632 {
10633 	struct i40e_hw *hw = &pf->hw;
10634 	char *buf;
10635 	int i;
10636 
10637 	buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
10638 	if (!buf)
10639 		return;
10640 
10641 	i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
10642 #ifdef CONFIG_PCI_IOV
10643 	i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
10644 #endif
10645 	i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
10646 		      pf->hw.func_caps.num_vsis,
10647 		      pf->vsi[pf->lan_vsi]->num_queue_pairs);
10648 	if (pf->flags & I40E_FLAG_RSS_ENABLED)
10649 		i += snprintf(&buf[i], REMAIN(i), " RSS");
10650 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
10651 		i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
10652 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10653 		i += snprintf(&buf[i], REMAIN(i), " FD_SB");
10654 		i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
10655 	}
10656 	if (pf->flags & I40E_FLAG_DCB_CAPABLE)
10657 		i += snprintf(&buf[i], REMAIN(i), " DCB");
10658 	i += snprintf(&buf[i], REMAIN(i), " VxLAN");
10659 	i += snprintf(&buf[i], REMAIN(i), " Geneve");
10660 	if (pf->flags & I40E_FLAG_PTP)
10661 		i += snprintf(&buf[i], REMAIN(i), " PTP");
10662 #ifdef I40E_FCOE
10663 	if (pf->flags & I40E_FLAG_FCOE_ENABLED)
10664 		i += snprintf(&buf[i], REMAIN(i), " FCOE");
10665 #endif
10666 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10667 		i += snprintf(&buf[i], REMAIN(i), " VEB");
10668 	else
10669 		i += snprintf(&buf[i], REMAIN(i), " VEPA");
10670 
10671 	dev_info(&pf->pdev->dev, "%s\n", buf);
10672 	kfree(buf);
10673 	WARN_ON(i > INFO_STRING_LEN);
10674 }
10675 
10676 /**
10677  * i40e_get_platform_mac_addr - get platform-specific MAC address
10678  *
10679  * @pdev: PCI device information struct
10680  * @pf: board private structure
10681  *
10682  * Look up the MAC address in Open Firmware  on systems that support it,
10683  * and use IDPROM on SPARC if no OF address is found. On return, the
10684  * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
10685  * has been selected.
10686  **/
10687 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
10688 {
10689 	pf->flags &= ~I40E_FLAG_PF_MAC;
10690 	if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
10691 		pf->flags |= I40E_FLAG_PF_MAC;
10692 }
10693 
10694 /**
10695  * i40e_probe - Device initialization routine
10696  * @pdev: PCI device information struct
10697  * @ent: entry in i40e_pci_tbl
10698  *
10699  * i40e_probe initializes a PF identified by a pci_dev structure.
10700  * The OS initialization, configuring of the PF private structure,
10701  * and a hardware reset occur.
10702  *
10703  * Returns 0 on success, negative on failure
10704  **/
10705 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10706 {
10707 	struct i40e_aq_get_phy_abilities_resp abilities;
10708 	struct i40e_pf *pf;
10709 	struct i40e_hw *hw;
10710 	static u16 pfs_found;
10711 	u16 wol_nvm_bits;
10712 	u16 link_status;
10713 	int err;
10714 	u32 val;
10715 	u32 i;
10716 	u8 set_fc_aq_fail;
10717 
10718 	err = pci_enable_device_mem(pdev);
10719 	if (err)
10720 		return err;
10721 
10722 	/* set up for high or low dma */
10723 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10724 	if (err) {
10725 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10726 		if (err) {
10727 			dev_err(&pdev->dev,
10728 				"DMA configuration failed: 0x%x\n", err);
10729 			goto err_dma;
10730 		}
10731 	}
10732 
10733 	/* set up pci connections */
10734 	err = pci_request_mem_regions(pdev, i40e_driver_name);
10735 	if (err) {
10736 		dev_info(&pdev->dev,
10737 			 "pci_request_selected_regions failed %d\n", err);
10738 		goto err_pci_reg;
10739 	}
10740 
10741 	pci_enable_pcie_error_reporting(pdev);
10742 	pci_set_master(pdev);
10743 
10744 	/* Now that we have a PCI connection, we need to do the
10745 	 * low level device setup.  This is primarily setting up
10746 	 * the Admin Queue structures and then querying for the
10747 	 * device's current profile information.
10748 	 */
10749 	pf = kzalloc(sizeof(*pf), GFP_KERNEL);
10750 	if (!pf) {
10751 		err = -ENOMEM;
10752 		goto err_pf_alloc;
10753 	}
10754 	pf->next_vsi = 0;
10755 	pf->pdev = pdev;
10756 	set_bit(__I40E_DOWN, &pf->state);
10757 
10758 	hw = &pf->hw;
10759 	hw->back = pf;
10760 
10761 	pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10762 				I40E_MAX_CSR_SPACE);
10763 
10764 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
10765 	if (!hw->hw_addr) {
10766 		err = -EIO;
10767 		dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10768 			 (unsigned int)pci_resource_start(pdev, 0),
10769 			 pf->ioremap_len, err);
10770 		goto err_ioremap;
10771 	}
10772 	hw->vendor_id = pdev->vendor;
10773 	hw->device_id = pdev->device;
10774 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10775 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
10776 	hw->subsystem_device_id = pdev->subsystem_device;
10777 	hw->bus.device = PCI_SLOT(pdev->devfn);
10778 	hw->bus.func = PCI_FUNC(pdev->devfn);
10779 	pf->instance = pfs_found;
10780 
10781 	/* set up the locks for the AQ, do this only once in probe
10782 	 * and destroy them only once in remove
10783 	 */
10784 	mutex_init(&hw->aq.asq_mutex);
10785 	mutex_init(&hw->aq.arq_mutex);
10786 
10787 	if (debug != -1) {
10788 		pf->msg_enable = pf->hw.debug_mask;
10789 		pf->msg_enable = debug;
10790 	}
10791 
10792 	/* do a special CORER for clearing PXE mode once at init */
10793 	if (hw->revision_id == 0 &&
10794 	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10795 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10796 		i40e_flush(hw);
10797 		msleep(200);
10798 		pf->corer_count++;
10799 
10800 		i40e_clear_pxe_mode(hw);
10801 	}
10802 
10803 	/* Reset here to make sure all is clean and to define PF 'n' */
10804 	i40e_clear_hw(hw);
10805 	err = i40e_pf_reset(hw);
10806 	if (err) {
10807 		dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10808 		goto err_pf_reset;
10809 	}
10810 	pf->pfr_count++;
10811 
10812 	hw->aq.num_arq_entries = I40E_AQ_LEN;
10813 	hw->aq.num_asq_entries = I40E_AQ_LEN;
10814 	hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10815 	hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10816 	pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
10817 
10818 	snprintf(pf->int_name, sizeof(pf->int_name) - 1,
10819 		 "%s-%s:misc",
10820 		 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
10821 
10822 	err = i40e_init_shared_code(hw);
10823 	if (err) {
10824 		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10825 			 err);
10826 		goto err_pf_reset;
10827 	}
10828 
10829 	/* set up a default setting for link flow control */
10830 	pf->hw.fc.requested_mode = I40E_FC_NONE;
10831 
10832 	err = i40e_init_adminq(hw);
10833 	if (err) {
10834 		if (err == I40E_ERR_FIRMWARE_API_VERSION)
10835 			dev_info(&pdev->dev,
10836 				 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10837 		else
10838 			dev_info(&pdev->dev,
10839 				 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
10840 
10841 		goto err_pf_reset;
10842 	}
10843 
10844 	/* provide nvm, fw, api versions */
10845 	dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
10846 		 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
10847 		 hw->aq.api_maj_ver, hw->aq.api_min_ver,
10848 		 i40e_nvm_version_str(hw));
10849 
10850 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10851 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
10852 		dev_info(&pdev->dev,
10853 			 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10854 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10855 		 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
10856 		dev_info(&pdev->dev,
10857 			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10858 
10859 	i40e_verify_eeprom(pf);
10860 
10861 	/* Rev 0 hardware was never productized */
10862 	if (hw->revision_id < 1)
10863 		dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10864 
10865 	i40e_clear_pxe_mode(hw);
10866 	err = i40e_get_capabilities(pf);
10867 	if (err)
10868 		goto err_adminq_setup;
10869 
10870 	err = i40e_sw_init(pf);
10871 	if (err) {
10872 		dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10873 		goto err_sw_init;
10874 	}
10875 
10876 	err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10877 				hw->func_caps.num_rx_qp,
10878 				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10879 	if (err) {
10880 		dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10881 		goto err_init_lan_hmc;
10882 	}
10883 
10884 	err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10885 	if (err) {
10886 		dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10887 		err = -ENOENT;
10888 		goto err_configure_lan_hmc;
10889 	}
10890 
10891 	/* Disable LLDP for NICs that have firmware versions lower than v4.3.
10892 	 * Ignore error return codes because if it was already disabled via
10893 	 * hardware settings this will fail
10894 	 */
10895 	if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
10896 		dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10897 		i40e_aq_stop_lldp(hw, true, NULL);
10898 	}
10899 
10900 	i40e_get_mac_addr(hw, hw->mac.addr);
10901 	/* allow a platform config to override the HW addr */
10902 	i40e_get_platform_mac_addr(pdev, pf);
10903 	if (!is_valid_ether_addr(hw->mac.addr)) {
10904 		dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10905 		err = -EIO;
10906 		goto err_mac_addr;
10907 	}
10908 	dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10909 	ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10910 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10911 	if (is_valid_ether_addr(hw->mac.port_addr))
10912 		pf->flags |= I40E_FLAG_PORT_ID_VALID;
10913 #ifdef I40E_FCOE
10914 	err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10915 	if (err)
10916 		dev_info(&pdev->dev,
10917 			 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10918 	if (!is_valid_ether_addr(hw->mac.san_addr)) {
10919 		dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10920 			 hw->mac.san_addr);
10921 		ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10922 	}
10923 	dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10924 #endif /* I40E_FCOE */
10925 
10926 	pci_set_drvdata(pdev, pf);
10927 	pci_save_state(pdev);
10928 #ifdef CONFIG_I40E_DCB
10929 	err = i40e_init_pf_dcb(pf);
10930 	if (err) {
10931 		dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10932 		pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
10933 		/* Continue without DCB enabled */
10934 	}
10935 #endif /* CONFIG_I40E_DCB */
10936 
10937 	/* set up periodic task facility */
10938 	setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10939 	pf->service_timer_period = HZ;
10940 
10941 	INIT_WORK(&pf->service_task, i40e_service_task);
10942 	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10943 	pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10944 
10945 	/* NVM bit on means WoL disabled for the port */
10946 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
10947 	if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
10948 		pf->wol_en = false;
10949 	else
10950 		pf->wol_en = true;
10951 	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10952 
10953 	/* set up the main switch operations */
10954 	i40e_determine_queue_usage(pf);
10955 	err = i40e_init_interrupt_scheme(pf);
10956 	if (err)
10957 		goto err_switch_setup;
10958 
10959 	/* The number of VSIs reported by the FW is the minimum guaranteed
10960 	 * to us; HW supports far more and we share the remaining pool with
10961 	 * the other PFs. We allocate space for more than the guarantee with
10962 	 * the understanding that we might not get them all later.
10963 	 */
10964 	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10965 		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10966 	else
10967 		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10968 
10969 	/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
10970 	pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
10971 			  GFP_KERNEL);
10972 	if (!pf->vsi) {
10973 		err = -ENOMEM;
10974 		goto err_switch_setup;
10975 	}
10976 
10977 #ifdef CONFIG_PCI_IOV
10978 	/* prep for VF support */
10979 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10980 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10981 	    !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10982 		if (pci_num_vf(pdev))
10983 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10984 	}
10985 #endif
10986 	err = i40e_setup_pf_switch(pf, false);
10987 	if (err) {
10988 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10989 		goto err_vsis;
10990 	}
10991 
10992 	/* Make sure flow control is set according to current settings */
10993 	err = i40e_set_fc(hw, &set_fc_aq_fail, true);
10994 	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
10995 		dev_dbg(&pf->pdev->dev,
10996 			"Set fc with err %s aq_err %s on get_phy_cap\n",
10997 			i40e_stat_str(hw, err),
10998 			i40e_aq_str(hw, hw->aq.asq_last_status));
10999 	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
11000 		dev_dbg(&pf->pdev->dev,
11001 			"Set fc with err %s aq_err %s on set_phy_config\n",
11002 			i40e_stat_str(hw, err),
11003 			i40e_aq_str(hw, hw->aq.asq_last_status));
11004 	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
11005 		dev_dbg(&pf->pdev->dev,
11006 			"Set fc with err %s aq_err %s on get_link_info\n",
11007 			i40e_stat_str(hw, err),
11008 			i40e_aq_str(hw, hw->aq.asq_last_status));
11009 
11010 	/* if FDIR VSI was set up, start it now */
11011 	for (i = 0; i < pf->num_alloc_vsi; i++) {
11012 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
11013 			i40e_vsi_open(pf->vsi[i]);
11014 			break;
11015 		}
11016 	}
11017 
11018 	/* The driver only wants link up/down and module qualification
11019 	 * reports from firmware.  Note the negative logic.
11020 	 */
11021 	err = i40e_aq_set_phy_int_mask(&pf->hw,
11022 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
11023 					 I40E_AQ_EVENT_MEDIA_NA |
11024 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
11025 	if (err)
11026 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
11027 			 i40e_stat_str(&pf->hw, err),
11028 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11029 
11030 	/* Reconfigure hardware for allowing smaller MSS in the case
11031 	 * of TSO, so that we avoid the MDD being fired and causing
11032 	 * a reset in the case of small MSS+TSO.
11033 	 */
11034 	val = rd32(hw, I40E_REG_MSS);
11035 	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11036 		val &= ~I40E_REG_MSS_MIN_MASK;
11037 		val |= I40E_64BYTE_MSS;
11038 		wr32(hw, I40E_REG_MSS, val);
11039 	}
11040 
11041 	if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
11042 		msleep(75);
11043 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11044 		if (err)
11045 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11046 				 i40e_stat_str(&pf->hw, err),
11047 				 i40e_aq_str(&pf->hw,
11048 					     pf->hw.aq.asq_last_status));
11049 	}
11050 	/* The main driver is (mostly) up and happy. We need to set this state
11051 	 * before setting up the misc vector or we get a race and the vector
11052 	 * ends up disabled forever.
11053 	 */
11054 	clear_bit(__I40E_DOWN, &pf->state);
11055 
11056 	/* In case of MSIX we are going to setup the misc vector right here
11057 	 * to handle admin queue events etc. In case of legacy and MSI
11058 	 * the misc functionality and queue processing is combined in
11059 	 * the same vector and that gets setup at open.
11060 	 */
11061 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11062 		err = i40e_setup_misc_vector(pf);
11063 		if (err) {
11064 			dev_info(&pdev->dev,
11065 				 "setup of misc vector failed: %d\n", err);
11066 			goto err_vsis;
11067 		}
11068 	}
11069 
11070 #ifdef CONFIG_PCI_IOV
11071 	/* prep for VF support */
11072 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11073 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11074 	    !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
11075 		/* disable link interrupts for VFs */
11076 		val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11077 		val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11078 		wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11079 		i40e_flush(hw);
11080 
11081 		if (pci_num_vf(pdev)) {
11082 			dev_info(&pdev->dev,
11083 				 "Active VFs found, allocating resources.\n");
11084 			err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11085 			if (err)
11086 				dev_info(&pdev->dev,
11087 					 "Error %d allocating resources for existing VFs\n",
11088 					 err);
11089 		}
11090 	}
11091 #endif /* CONFIG_PCI_IOV */
11092 
11093 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11094 		pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11095 						      pf->num_iwarp_msix,
11096 						      I40E_IWARP_IRQ_PILE_ID);
11097 		if (pf->iwarp_base_vector < 0) {
11098 			dev_info(&pdev->dev,
11099 				 "failed to get tracking for %d vectors for IWARP err=%d\n",
11100 				 pf->num_iwarp_msix, pf->iwarp_base_vector);
11101 			pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11102 		}
11103 	}
11104 
11105 	i40e_dbg_pf_init(pf);
11106 
11107 	/* tell the firmware that we're starting */
11108 	i40e_send_version(pf);
11109 
11110 	/* since everything's happy, start the service_task timer */
11111 	mod_timer(&pf->service_timer,
11112 		  round_jiffies(jiffies + pf->service_timer_period));
11113 
11114 	/* add this PF to client device list and launch a client service task */
11115 	err = i40e_lan_add_device(pf);
11116 	if (err)
11117 		dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11118 			 err);
11119 
11120 #ifdef I40E_FCOE
11121 	/* create FCoE interface */
11122 	i40e_fcoe_vsi_setup(pf);
11123 
11124 #endif
11125 #define PCI_SPEED_SIZE 8
11126 #define PCI_WIDTH_SIZE 8
11127 	/* Devices on the IOSF bus do not have this information
11128 	 * and will report PCI Gen 1 x 1 by default so don't bother
11129 	 * checking them.
11130 	 */
11131 	if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
11132 		char speed[PCI_SPEED_SIZE] = "Unknown";
11133 		char width[PCI_WIDTH_SIZE] = "Unknown";
11134 
11135 		/* Get the negotiated link width and speed from PCI config
11136 		 * space
11137 		 */
11138 		pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11139 					  &link_status);
11140 
11141 		i40e_set_pci_config_data(hw, link_status);
11142 
11143 		switch (hw->bus.speed) {
11144 		case i40e_bus_speed_8000:
11145 			strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11146 		case i40e_bus_speed_5000:
11147 			strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11148 		case i40e_bus_speed_2500:
11149 			strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11150 		default:
11151 			break;
11152 		}
11153 		switch (hw->bus.width) {
11154 		case i40e_bus_width_pcie_x8:
11155 			strncpy(width, "8", PCI_WIDTH_SIZE); break;
11156 		case i40e_bus_width_pcie_x4:
11157 			strncpy(width, "4", PCI_WIDTH_SIZE); break;
11158 		case i40e_bus_width_pcie_x2:
11159 			strncpy(width, "2", PCI_WIDTH_SIZE); break;
11160 		case i40e_bus_width_pcie_x1:
11161 			strncpy(width, "1", PCI_WIDTH_SIZE); break;
11162 		default:
11163 			break;
11164 		}
11165 
11166 		dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11167 			 speed, width);
11168 
11169 		if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11170 		    hw->bus.speed < i40e_bus_speed_8000) {
11171 			dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11172 			dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11173 		}
11174 	}
11175 
11176 	/* get the requested speeds from the fw */
11177 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11178 	if (err)
11179 		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
11180 			i40e_stat_str(&pf->hw, err),
11181 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11182 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11183 
11184 	/* get the supported phy types from the fw */
11185 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11186 	if (err)
11187 		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
11188 			i40e_stat_str(&pf->hw, err),
11189 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11190 	pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
11191 
11192 	/* Add a filter to drop all Flow control frames from any VSI from being
11193 	 * transmitted. By doing so we stop a malicious VF from sending out
11194 	 * PAUSE or PFC frames and potentially controlling traffic for other
11195 	 * PF/VF VSIs.
11196 	 * The FW can still send Flow control frames if enabled.
11197 	 */
11198 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11199 						       pf->main_vsi_seid);
11200 
11201 	if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11202 	    (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11203 		pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
11204 
11205 	/* print a string summarizing features */
11206 	i40e_print_features(pf);
11207 
11208 	return 0;
11209 
11210 	/* Unwind what we've done if something failed in the setup */
11211 err_vsis:
11212 	set_bit(__I40E_DOWN, &pf->state);
11213 	i40e_clear_interrupt_scheme(pf);
11214 	kfree(pf->vsi);
11215 err_switch_setup:
11216 	i40e_reset_interrupt_capability(pf);
11217 	del_timer_sync(&pf->service_timer);
11218 err_mac_addr:
11219 err_configure_lan_hmc:
11220 	(void)i40e_shutdown_lan_hmc(hw);
11221 err_init_lan_hmc:
11222 	kfree(pf->qp_pile);
11223 err_sw_init:
11224 err_adminq_setup:
11225 err_pf_reset:
11226 	iounmap(hw->hw_addr);
11227 err_ioremap:
11228 	kfree(pf);
11229 err_pf_alloc:
11230 	pci_disable_pcie_error_reporting(pdev);
11231 	pci_release_mem_regions(pdev);
11232 err_pci_reg:
11233 err_dma:
11234 	pci_disable_device(pdev);
11235 	return err;
11236 }
11237 
11238 /**
11239  * i40e_remove - Device removal routine
11240  * @pdev: PCI device information struct
11241  *
11242  * i40e_remove is called by the PCI subsystem to alert the driver
11243  * that is should release a PCI device.  This could be caused by a
11244  * Hot-Plug event, or because the driver is going to be removed from
11245  * memory.
11246  **/
11247 static void i40e_remove(struct pci_dev *pdev)
11248 {
11249 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11250 	struct i40e_hw *hw = &pf->hw;
11251 	i40e_status ret_code;
11252 	int i;
11253 
11254 	i40e_dbg_pf_exit(pf);
11255 
11256 	i40e_ptp_stop(pf);
11257 
11258 	/* Disable RSS in hw */
11259 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11260 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
11261 
11262 	/* no more scheduling of any task */
11263 	set_bit(__I40E_SUSPENDED, &pf->state);
11264 	set_bit(__I40E_DOWN, &pf->state);
11265 	if (pf->service_timer.data)
11266 		del_timer_sync(&pf->service_timer);
11267 	if (pf->service_task.func)
11268 		cancel_work_sync(&pf->service_task);
11269 
11270 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11271 		i40e_free_vfs(pf);
11272 		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11273 	}
11274 
11275 	i40e_fdir_teardown(pf);
11276 
11277 	/* If there is a switch structure or any orphans, remove them.
11278 	 * This will leave only the PF's VSI remaining.
11279 	 */
11280 	for (i = 0; i < I40E_MAX_VEB; i++) {
11281 		if (!pf->veb[i])
11282 			continue;
11283 
11284 		if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11285 		    pf->veb[i]->uplink_seid == 0)
11286 			i40e_switch_branch_release(pf->veb[i]);
11287 	}
11288 
11289 	/* Now we can shutdown the PF's VSI, just before we kill
11290 	 * adminq and hmc.
11291 	 */
11292 	if (pf->vsi[pf->lan_vsi])
11293 		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11294 
11295 	/* remove attached clients */
11296 	ret_code = i40e_lan_del_device(pf);
11297 	if (ret_code) {
11298 		dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11299 			 ret_code);
11300 	}
11301 
11302 	/* shutdown and destroy the HMC */
11303 	if (hw->hmc.hmc_obj) {
11304 		ret_code = i40e_shutdown_lan_hmc(hw);
11305 		if (ret_code)
11306 			dev_warn(&pdev->dev,
11307 				 "Failed to destroy the HMC resources: %d\n",
11308 				 ret_code);
11309 	}
11310 
11311 	/* shutdown the adminq */
11312 	ret_code = i40e_shutdown_adminq(hw);
11313 	if (ret_code)
11314 		dev_warn(&pdev->dev,
11315 			 "Failed to destroy the Admin Queue resources: %d\n",
11316 			 ret_code);
11317 
11318 	/* destroy the locks only once, here */
11319 	mutex_destroy(&hw->aq.arq_mutex);
11320 	mutex_destroy(&hw->aq.asq_mutex);
11321 
11322 	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11323 	i40e_clear_interrupt_scheme(pf);
11324 	for (i = 0; i < pf->num_alloc_vsi; i++) {
11325 		if (pf->vsi[i]) {
11326 			i40e_vsi_clear_rings(pf->vsi[i]);
11327 			i40e_vsi_clear(pf->vsi[i]);
11328 			pf->vsi[i] = NULL;
11329 		}
11330 	}
11331 
11332 	for (i = 0; i < I40E_MAX_VEB; i++) {
11333 		kfree(pf->veb[i]);
11334 		pf->veb[i] = NULL;
11335 	}
11336 
11337 	kfree(pf->qp_pile);
11338 	kfree(pf->vsi);
11339 
11340 	iounmap(hw->hw_addr);
11341 	kfree(pf);
11342 	pci_release_mem_regions(pdev);
11343 
11344 	pci_disable_pcie_error_reporting(pdev);
11345 	pci_disable_device(pdev);
11346 }
11347 
11348 /**
11349  * i40e_pci_error_detected - warning that something funky happened in PCI land
11350  * @pdev: PCI device information struct
11351  *
11352  * Called to warn that something happened and the error handling steps
11353  * are in progress.  Allows the driver to quiesce things, be ready for
11354  * remediation.
11355  **/
11356 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11357 						enum pci_channel_state error)
11358 {
11359 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11360 
11361 	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11362 
11363 	/* shutdown all operations */
11364 	if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
11365 		rtnl_lock();
11366 		i40e_prep_for_reset(pf);
11367 		rtnl_unlock();
11368 	}
11369 
11370 	/* Request a slot reset */
11371 	return PCI_ERS_RESULT_NEED_RESET;
11372 }
11373 
11374 /**
11375  * i40e_pci_error_slot_reset - a PCI slot reset just happened
11376  * @pdev: PCI device information struct
11377  *
11378  * Called to find if the driver can work with the device now that
11379  * the pci slot has been reset.  If a basic connection seems good
11380  * (registers are readable and have sane content) then return a
11381  * happy little PCI_ERS_RESULT_xxx.
11382  **/
11383 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11384 {
11385 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11386 	pci_ers_result_t result;
11387 	int err;
11388 	u32 reg;
11389 
11390 	dev_dbg(&pdev->dev, "%s\n", __func__);
11391 	if (pci_enable_device_mem(pdev)) {
11392 		dev_info(&pdev->dev,
11393 			 "Cannot re-enable PCI device after reset.\n");
11394 		result = PCI_ERS_RESULT_DISCONNECT;
11395 	} else {
11396 		pci_set_master(pdev);
11397 		pci_restore_state(pdev);
11398 		pci_save_state(pdev);
11399 		pci_wake_from_d3(pdev, false);
11400 
11401 		reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11402 		if (reg == 0)
11403 			result = PCI_ERS_RESULT_RECOVERED;
11404 		else
11405 			result = PCI_ERS_RESULT_DISCONNECT;
11406 	}
11407 
11408 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
11409 	if (err) {
11410 		dev_info(&pdev->dev,
11411 			 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11412 			 err);
11413 		/* non-fatal, continue */
11414 	}
11415 
11416 	return result;
11417 }
11418 
11419 /**
11420  * i40e_pci_error_resume - restart operations after PCI error recovery
11421  * @pdev: PCI device information struct
11422  *
11423  * Called to allow the driver to bring things back up after PCI error
11424  * and/or reset recovery has finished.
11425  **/
11426 static void i40e_pci_error_resume(struct pci_dev *pdev)
11427 {
11428 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11429 
11430 	dev_dbg(&pdev->dev, "%s\n", __func__);
11431 	if (test_bit(__I40E_SUSPENDED, &pf->state))
11432 		return;
11433 
11434 	rtnl_lock();
11435 	i40e_handle_reset_warning(pf);
11436 	rtnl_unlock();
11437 }
11438 
11439 /**
11440  * i40e_shutdown - PCI callback for shutting down
11441  * @pdev: PCI device information struct
11442  **/
11443 static void i40e_shutdown(struct pci_dev *pdev)
11444 {
11445 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11446 	struct i40e_hw *hw = &pf->hw;
11447 
11448 	set_bit(__I40E_SUSPENDED, &pf->state);
11449 	set_bit(__I40E_DOWN, &pf->state);
11450 	rtnl_lock();
11451 	i40e_prep_for_reset(pf);
11452 	rtnl_unlock();
11453 
11454 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11455 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11456 
11457 	del_timer_sync(&pf->service_timer);
11458 	cancel_work_sync(&pf->service_task);
11459 	i40e_fdir_teardown(pf);
11460 
11461 	rtnl_lock();
11462 	i40e_prep_for_reset(pf);
11463 	rtnl_unlock();
11464 
11465 	wr32(hw, I40E_PFPM_APM,
11466 	     (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11467 	wr32(hw, I40E_PFPM_WUFC,
11468 	     (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11469 
11470 	i40e_clear_interrupt_scheme(pf);
11471 
11472 	if (system_state == SYSTEM_POWER_OFF) {
11473 		pci_wake_from_d3(pdev, pf->wol_en);
11474 		pci_set_power_state(pdev, PCI_D3hot);
11475 	}
11476 }
11477 
11478 #ifdef CONFIG_PM
11479 /**
11480  * i40e_suspend - PCI callback for moving to D3
11481  * @pdev: PCI device information struct
11482  **/
11483 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11484 {
11485 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11486 	struct i40e_hw *hw = &pf->hw;
11487 	int retval = 0;
11488 
11489 	set_bit(__I40E_SUSPENDED, &pf->state);
11490 	set_bit(__I40E_DOWN, &pf->state);
11491 
11492 	rtnl_lock();
11493 	i40e_prep_for_reset(pf);
11494 	rtnl_unlock();
11495 
11496 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11497 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11498 
11499 	i40e_stop_misc_vector(pf);
11500 
11501 	retval = pci_save_state(pdev);
11502 	if (retval)
11503 		return retval;
11504 
11505 	pci_wake_from_d3(pdev, pf->wol_en);
11506 	pci_set_power_state(pdev, PCI_D3hot);
11507 
11508 	return retval;
11509 }
11510 
11511 /**
11512  * i40e_resume - PCI callback for waking up from D3
11513  * @pdev: PCI device information struct
11514  **/
11515 static int i40e_resume(struct pci_dev *pdev)
11516 {
11517 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11518 	u32 err;
11519 
11520 	pci_set_power_state(pdev, PCI_D0);
11521 	pci_restore_state(pdev);
11522 	/* pci_restore_state() clears dev->state_saves, so
11523 	 * call pci_save_state() again to restore it.
11524 	 */
11525 	pci_save_state(pdev);
11526 
11527 	err = pci_enable_device_mem(pdev);
11528 	if (err) {
11529 		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
11530 		return err;
11531 	}
11532 	pci_set_master(pdev);
11533 
11534 	/* no wakeup events while running */
11535 	pci_wake_from_d3(pdev, false);
11536 
11537 	/* handling the reset will rebuild the device state */
11538 	if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
11539 		clear_bit(__I40E_DOWN, &pf->state);
11540 		rtnl_lock();
11541 		i40e_reset_and_rebuild(pf, false);
11542 		rtnl_unlock();
11543 	}
11544 
11545 	return 0;
11546 }
11547 
11548 #endif
11549 static const struct pci_error_handlers i40e_err_handler = {
11550 	.error_detected = i40e_pci_error_detected,
11551 	.slot_reset = i40e_pci_error_slot_reset,
11552 	.resume = i40e_pci_error_resume,
11553 };
11554 
11555 static struct pci_driver i40e_driver = {
11556 	.name     = i40e_driver_name,
11557 	.id_table = i40e_pci_tbl,
11558 	.probe    = i40e_probe,
11559 	.remove   = i40e_remove,
11560 #ifdef CONFIG_PM
11561 	.suspend  = i40e_suspend,
11562 	.resume   = i40e_resume,
11563 #endif
11564 	.shutdown = i40e_shutdown,
11565 	.err_handler = &i40e_err_handler,
11566 	.sriov_configure = i40e_pci_sriov_configure,
11567 };
11568 
11569 /**
11570  * i40e_init_module - Driver registration routine
11571  *
11572  * i40e_init_module is the first routine called when the driver is
11573  * loaded. All it does is register with the PCI subsystem.
11574  **/
11575 static int __init i40e_init_module(void)
11576 {
11577 	pr_info("%s: %s - version %s\n", i40e_driver_name,
11578 		i40e_driver_string, i40e_driver_version_str);
11579 	pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
11580 
11581 	/* we will see if single thread per module is enough for now,
11582 	 * it can't be any worse than using the system workqueue which
11583 	 * was already single threaded
11584 	 */
11585 	i40e_wq = create_singlethread_workqueue(i40e_driver_name);
11586 	if (!i40e_wq) {
11587 		pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
11588 		return -ENOMEM;
11589 	}
11590 
11591 	i40e_dbg_init();
11592 	return pci_register_driver(&i40e_driver);
11593 }
11594 module_init(i40e_init_module);
11595 
11596 /**
11597  * i40e_exit_module - Driver exit cleanup routine
11598  *
11599  * i40e_exit_module is called just before the driver is removed
11600  * from memory.
11601  **/
11602 static void __exit i40e_exit_module(void)
11603 {
11604 	pci_unregister_driver(&i40e_driver);
11605 	destroy_workqueue(i40e_wq);
11606 	i40e_dbg_exit();
11607 }
11608 module_exit(i40e_exit_module);
11609