xref: /linux/drivers/net/ethernet/intel/igc/igc_main.c (revision 860a9bed265146b10311bcadbbcef59c3af4454d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c)  2018 Intel Corporation */
3 
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
7 #include <linux/tcp.h>
8 #include <linux/udp.h>
9 #include <linux/ip.h>
10 #include <linux/pm_runtime.h>
11 #include <net/pkt_sched.h>
12 #include <linux/bpf_trace.h>
13 #include <net/xdp_sock_drv.h>
14 #include <linux/pci.h>
15 
16 #include <net/ipv6.h>
17 
18 #include "igc.h"
19 #include "igc_hw.h"
20 #include "igc_tsn.h"
21 #include "igc_xdp.h"
22 
23 #define DRV_SUMMARY	"Intel(R) 2.5G Ethernet Linux Driver"
24 
25 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
26 
27 #define IGC_XDP_PASS		0
28 #define IGC_XDP_CONSUMED	BIT(0)
29 #define IGC_XDP_TX		BIT(1)
30 #define IGC_XDP_REDIRECT	BIT(2)
31 
32 static int debug = -1;
33 
34 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
35 MODULE_DESCRIPTION(DRV_SUMMARY);
36 MODULE_LICENSE("GPL v2");
37 module_param(debug, int, 0);
38 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
39 
40 char igc_driver_name[] = "igc";
41 static const char igc_driver_string[] = DRV_SUMMARY;
42 static const char igc_copyright[] =
43 	"Copyright(c) 2018 Intel Corporation.";
44 
45 static const struct igc_info *igc_info_tbl[] = {
46 	[board_base] = &igc_base_info,
47 };
48 
49 static const struct pci_device_id igc_pci_tbl[] = {
50 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
51 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
52 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
53 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
54 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
55 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
56 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
57 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
58 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base },
59 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
60 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
61 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
62 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
63 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
64 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
65 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
66 	/* required last entry */
67 	{0, }
68 };
69 
70 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
71 
72 enum latency_range {
73 	lowest_latency = 0,
74 	low_latency = 1,
75 	bulk_latency = 2,
76 	latency_invalid = 255
77 };
78 
79 void igc_reset(struct igc_adapter *adapter)
80 {
81 	struct net_device *dev = adapter->netdev;
82 	struct igc_hw *hw = &adapter->hw;
83 	struct igc_fc_info *fc = &hw->fc;
84 	u32 pba, hwm;
85 
86 	/* Repartition PBA for greater than 9k MTU if required */
87 	pba = IGC_PBA_34K;
88 
89 	/* flow control settings
90 	 * The high water mark must be low enough to fit one full frame
91 	 * after transmitting the pause frame.  As such we must have enough
92 	 * space to allow for us to complete our current transmit and then
93 	 * receive the frame that is in progress from the link partner.
94 	 * Set it to:
95 	 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
96 	 */
97 	hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
98 
99 	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
100 	fc->low_water = fc->high_water - 16;
101 	fc->pause_time = 0xFFFF;
102 	fc->send_xon = 1;
103 	fc->current_mode = fc->requested_mode;
104 
105 	hw->mac.ops.reset_hw(hw);
106 
107 	if (hw->mac.ops.init_hw(hw))
108 		netdev_err(dev, "Error on hardware initialization\n");
109 
110 	/* Re-establish EEE setting */
111 	igc_set_eee_i225(hw, true, true, true);
112 
113 	if (!netif_running(adapter->netdev))
114 		igc_power_down_phy_copper_base(&adapter->hw);
115 
116 	/* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
117 	wr32(IGC_VET, ETH_P_8021Q);
118 
119 	/* Re-enable PTP, where applicable. */
120 	igc_ptp_reset(adapter);
121 
122 	/* Re-enable TSN offloading, where applicable. */
123 	igc_tsn_reset(adapter);
124 
125 	igc_get_phy_info(hw);
126 }
127 
128 /**
129  * igc_power_up_link - Power up the phy link
130  * @adapter: address of board private structure
131  */
132 static void igc_power_up_link(struct igc_adapter *adapter)
133 {
134 	igc_reset_phy(&adapter->hw);
135 
136 	igc_power_up_phy_copper(&adapter->hw);
137 
138 	igc_setup_link(&adapter->hw);
139 }
140 
141 /**
142  * igc_release_hw_control - release control of the h/w to f/w
143  * @adapter: address of board private structure
144  *
145  * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
146  * For ASF and Pass Through versions of f/w this means that the
147  * driver is no longer loaded.
148  */
149 static void igc_release_hw_control(struct igc_adapter *adapter)
150 {
151 	struct igc_hw *hw = &adapter->hw;
152 	u32 ctrl_ext;
153 
154 	if (!pci_device_is_present(adapter->pdev))
155 		return;
156 
157 	/* Let firmware take over control of h/w */
158 	ctrl_ext = rd32(IGC_CTRL_EXT);
159 	wr32(IGC_CTRL_EXT,
160 	     ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
161 }
162 
163 /**
164  * igc_get_hw_control - get control of the h/w from f/w
165  * @adapter: address of board private structure
166  *
167  * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
168  * For ASF and Pass Through versions of f/w this means that
169  * the driver is loaded.
170  */
171 static void igc_get_hw_control(struct igc_adapter *adapter)
172 {
173 	struct igc_hw *hw = &adapter->hw;
174 	u32 ctrl_ext;
175 
176 	/* Let firmware know the driver has taken over */
177 	ctrl_ext = rd32(IGC_CTRL_EXT);
178 	wr32(IGC_CTRL_EXT,
179 	     ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
180 }
181 
182 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
183 {
184 	dma_unmap_single(dev, dma_unmap_addr(buf, dma),
185 			 dma_unmap_len(buf, len), DMA_TO_DEVICE);
186 
187 	dma_unmap_len_set(buf, len, 0);
188 }
189 
190 /**
191  * igc_clean_tx_ring - Free Tx Buffers
192  * @tx_ring: ring to be cleaned
193  */
194 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
195 {
196 	u16 i = tx_ring->next_to_clean;
197 	struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
198 	u32 xsk_frames = 0;
199 
200 	while (i != tx_ring->next_to_use) {
201 		union igc_adv_tx_desc *eop_desc, *tx_desc;
202 
203 		switch (tx_buffer->type) {
204 		case IGC_TX_BUFFER_TYPE_XSK:
205 			xsk_frames++;
206 			break;
207 		case IGC_TX_BUFFER_TYPE_XDP:
208 			xdp_return_frame(tx_buffer->xdpf);
209 			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
210 			break;
211 		case IGC_TX_BUFFER_TYPE_SKB:
212 			dev_kfree_skb_any(tx_buffer->skb);
213 			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
214 			break;
215 		default:
216 			netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
217 			break;
218 		}
219 
220 		/* check for eop_desc to determine the end of the packet */
221 		eop_desc = tx_buffer->next_to_watch;
222 		tx_desc = IGC_TX_DESC(tx_ring, i);
223 
224 		/* unmap remaining buffers */
225 		while (tx_desc != eop_desc) {
226 			tx_buffer++;
227 			tx_desc++;
228 			i++;
229 			if (unlikely(i == tx_ring->count)) {
230 				i = 0;
231 				tx_buffer = tx_ring->tx_buffer_info;
232 				tx_desc = IGC_TX_DESC(tx_ring, 0);
233 			}
234 
235 			/* unmap any remaining paged data */
236 			if (dma_unmap_len(tx_buffer, len))
237 				igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
238 		}
239 
240 		tx_buffer->next_to_watch = NULL;
241 
242 		/* move us one more past the eop_desc for start of next pkt */
243 		tx_buffer++;
244 		i++;
245 		if (unlikely(i == tx_ring->count)) {
246 			i = 0;
247 			tx_buffer = tx_ring->tx_buffer_info;
248 		}
249 	}
250 
251 	if (tx_ring->xsk_pool && xsk_frames)
252 		xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
253 
254 	/* reset BQL for queue */
255 	netdev_tx_reset_queue(txring_txq(tx_ring));
256 
257 	/* Zero out the buffer ring */
258 	memset(tx_ring->tx_buffer_info, 0,
259 	       sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
260 
261 	/* Zero out the descriptor ring */
262 	memset(tx_ring->desc, 0, tx_ring->size);
263 
264 	/* reset next_to_use and next_to_clean */
265 	tx_ring->next_to_use = 0;
266 	tx_ring->next_to_clean = 0;
267 }
268 
269 /**
270  * igc_free_tx_resources - Free Tx Resources per Queue
271  * @tx_ring: Tx descriptor ring for a specific queue
272  *
273  * Free all transmit software resources
274  */
275 void igc_free_tx_resources(struct igc_ring *tx_ring)
276 {
277 	igc_disable_tx_ring(tx_ring);
278 
279 	vfree(tx_ring->tx_buffer_info);
280 	tx_ring->tx_buffer_info = NULL;
281 
282 	/* if not set, then don't free */
283 	if (!tx_ring->desc)
284 		return;
285 
286 	dma_free_coherent(tx_ring->dev, tx_ring->size,
287 			  tx_ring->desc, tx_ring->dma);
288 
289 	tx_ring->desc = NULL;
290 }
291 
292 /**
293  * igc_free_all_tx_resources - Free Tx Resources for All Queues
294  * @adapter: board private structure
295  *
296  * Free all transmit software resources
297  */
298 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
299 {
300 	int i;
301 
302 	for (i = 0; i < adapter->num_tx_queues; i++)
303 		igc_free_tx_resources(adapter->tx_ring[i]);
304 }
305 
306 /**
307  * igc_clean_all_tx_rings - Free Tx Buffers for all queues
308  * @adapter: board private structure
309  */
310 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
311 {
312 	int i;
313 
314 	for (i = 0; i < adapter->num_tx_queues; i++)
315 		if (adapter->tx_ring[i])
316 			igc_clean_tx_ring(adapter->tx_ring[i]);
317 }
318 
319 static void igc_disable_tx_ring_hw(struct igc_ring *ring)
320 {
321 	struct igc_hw *hw = &ring->q_vector->adapter->hw;
322 	u8 idx = ring->reg_idx;
323 	u32 txdctl;
324 
325 	txdctl = rd32(IGC_TXDCTL(idx));
326 	txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
327 	txdctl |= IGC_TXDCTL_SWFLUSH;
328 	wr32(IGC_TXDCTL(idx), txdctl);
329 }
330 
331 /**
332  * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
333  * @adapter: board private structure
334  */
335 static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter)
336 {
337 	int i;
338 
339 	for (i = 0; i < adapter->num_tx_queues; i++) {
340 		struct igc_ring *tx_ring = adapter->tx_ring[i];
341 
342 		igc_disable_tx_ring_hw(tx_ring);
343 	}
344 }
345 
346 /**
347  * igc_setup_tx_resources - allocate Tx resources (Descriptors)
348  * @tx_ring: tx descriptor ring (for a specific queue) to setup
349  *
350  * Return 0 on success, negative on failure
351  */
352 int igc_setup_tx_resources(struct igc_ring *tx_ring)
353 {
354 	struct net_device *ndev = tx_ring->netdev;
355 	struct device *dev = tx_ring->dev;
356 	int size = 0;
357 
358 	size = sizeof(struct igc_tx_buffer) * tx_ring->count;
359 	tx_ring->tx_buffer_info = vzalloc(size);
360 	if (!tx_ring->tx_buffer_info)
361 		goto err;
362 
363 	/* round up to nearest 4K */
364 	tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
365 	tx_ring->size = ALIGN(tx_ring->size, 4096);
366 
367 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
368 					   &tx_ring->dma, GFP_KERNEL);
369 
370 	if (!tx_ring->desc)
371 		goto err;
372 
373 	tx_ring->next_to_use = 0;
374 	tx_ring->next_to_clean = 0;
375 
376 	return 0;
377 
378 err:
379 	vfree(tx_ring->tx_buffer_info);
380 	netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
381 	return -ENOMEM;
382 }
383 
384 /**
385  * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
386  * @adapter: board private structure
387  *
388  * Return 0 on success, negative on failure
389  */
390 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
391 {
392 	struct net_device *dev = adapter->netdev;
393 	int i, err = 0;
394 
395 	for (i = 0; i < adapter->num_tx_queues; i++) {
396 		err = igc_setup_tx_resources(adapter->tx_ring[i]);
397 		if (err) {
398 			netdev_err(dev, "Error on Tx queue %u setup\n", i);
399 			for (i--; i >= 0; i--)
400 				igc_free_tx_resources(adapter->tx_ring[i]);
401 			break;
402 		}
403 	}
404 
405 	return err;
406 }
407 
408 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
409 {
410 	u16 i = rx_ring->next_to_clean;
411 
412 	dev_kfree_skb(rx_ring->skb);
413 	rx_ring->skb = NULL;
414 
415 	/* Free all the Rx ring sk_buffs */
416 	while (i != rx_ring->next_to_alloc) {
417 		struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
418 
419 		/* Invalidate cache lines that may have been written to by
420 		 * device so that we avoid corrupting memory.
421 		 */
422 		dma_sync_single_range_for_cpu(rx_ring->dev,
423 					      buffer_info->dma,
424 					      buffer_info->page_offset,
425 					      igc_rx_bufsz(rx_ring),
426 					      DMA_FROM_DEVICE);
427 
428 		/* free resources associated with mapping */
429 		dma_unmap_page_attrs(rx_ring->dev,
430 				     buffer_info->dma,
431 				     igc_rx_pg_size(rx_ring),
432 				     DMA_FROM_DEVICE,
433 				     IGC_RX_DMA_ATTR);
434 		__page_frag_cache_drain(buffer_info->page,
435 					buffer_info->pagecnt_bias);
436 
437 		i++;
438 		if (i == rx_ring->count)
439 			i = 0;
440 	}
441 }
442 
443 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
444 {
445 	struct igc_rx_buffer *bi;
446 	u16 i;
447 
448 	for (i = 0; i < ring->count; i++) {
449 		bi = &ring->rx_buffer_info[i];
450 		if (!bi->xdp)
451 			continue;
452 
453 		xsk_buff_free(bi->xdp);
454 		bi->xdp = NULL;
455 	}
456 }
457 
458 /**
459  * igc_clean_rx_ring - Free Rx Buffers per Queue
460  * @ring: ring to free buffers from
461  */
462 static void igc_clean_rx_ring(struct igc_ring *ring)
463 {
464 	if (ring->xsk_pool)
465 		igc_clean_rx_ring_xsk_pool(ring);
466 	else
467 		igc_clean_rx_ring_page_shared(ring);
468 
469 	clear_ring_uses_large_buffer(ring);
470 
471 	ring->next_to_alloc = 0;
472 	ring->next_to_clean = 0;
473 	ring->next_to_use = 0;
474 }
475 
476 /**
477  * igc_clean_all_rx_rings - Free Rx Buffers for all queues
478  * @adapter: board private structure
479  */
480 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
481 {
482 	int i;
483 
484 	for (i = 0; i < adapter->num_rx_queues; i++)
485 		if (adapter->rx_ring[i])
486 			igc_clean_rx_ring(adapter->rx_ring[i]);
487 }
488 
489 /**
490  * igc_free_rx_resources - Free Rx Resources
491  * @rx_ring: ring to clean the resources from
492  *
493  * Free all receive software resources
494  */
495 void igc_free_rx_resources(struct igc_ring *rx_ring)
496 {
497 	igc_clean_rx_ring(rx_ring);
498 
499 	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
500 
501 	vfree(rx_ring->rx_buffer_info);
502 	rx_ring->rx_buffer_info = NULL;
503 
504 	/* if not set, then don't free */
505 	if (!rx_ring->desc)
506 		return;
507 
508 	dma_free_coherent(rx_ring->dev, rx_ring->size,
509 			  rx_ring->desc, rx_ring->dma);
510 
511 	rx_ring->desc = NULL;
512 }
513 
514 /**
515  * igc_free_all_rx_resources - Free Rx Resources for All Queues
516  * @adapter: board private structure
517  *
518  * Free all receive software resources
519  */
520 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
521 {
522 	int i;
523 
524 	for (i = 0; i < adapter->num_rx_queues; i++)
525 		igc_free_rx_resources(adapter->rx_ring[i]);
526 }
527 
528 /**
529  * igc_setup_rx_resources - allocate Rx resources (Descriptors)
530  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
531  *
532  * Returns 0 on success, negative on failure
533  */
534 int igc_setup_rx_resources(struct igc_ring *rx_ring)
535 {
536 	struct net_device *ndev = rx_ring->netdev;
537 	struct device *dev = rx_ring->dev;
538 	u8 index = rx_ring->queue_index;
539 	int size, desc_len, res;
540 
541 	/* XDP RX-queue info */
542 	if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
543 		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
544 	res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
545 			       rx_ring->q_vector->napi.napi_id);
546 	if (res < 0) {
547 		netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
548 			   index);
549 		return res;
550 	}
551 
552 	size = sizeof(struct igc_rx_buffer) * rx_ring->count;
553 	rx_ring->rx_buffer_info = vzalloc(size);
554 	if (!rx_ring->rx_buffer_info)
555 		goto err;
556 
557 	desc_len = sizeof(union igc_adv_rx_desc);
558 
559 	/* Round up to nearest 4K */
560 	rx_ring->size = rx_ring->count * desc_len;
561 	rx_ring->size = ALIGN(rx_ring->size, 4096);
562 
563 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
564 					   &rx_ring->dma, GFP_KERNEL);
565 
566 	if (!rx_ring->desc)
567 		goto err;
568 
569 	rx_ring->next_to_alloc = 0;
570 	rx_ring->next_to_clean = 0;
571 	rx_ring->next_to_use = 0;
572 
573 	return 0;
574 
575 err:
576 	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
577 	vfree(rx_ring->rx_buffer_info);
578 	rx_ring->rx_buffer_info = NULL;
579 	netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
580 	return -ENOMEM;
581 }
582 
583 /**
584  * igc_setup_all_rx_resources - wrapper to allocate Rx resources
585  *                                (Descriptors) for all queues
586  * @adapter: board private structure
587  *
588  * Return 0 on success, negative on failure
589  */
590 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
591 {
592 	struct net_device *dev = adapter->netdev;
593 	int i, err = 0;
594 
595 	for (i = 0; i < adapter->num_rx_queues; i++) {
596 		err = igc_setup_rx_resources(adapter->rx_ring[i]);
597 		if (err) {
598 			netdev_err(dev, "Error on Rx queue %u setup\n", i);
599 			for (i--; i >= 0; i--)
600 				igc_free_rx_resources(adapter->rx_ring[i]);
601 			break;
602 		}
603 	}
604 
605 	return err;
606 }
607 
608 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
609 					      struct igc_ring *ring)
610 {
611 	if (!igc_xdp_is_enabled(adapter) ||
612 	    !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
613 		return NULL;
614 
615 	return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
616 }
617 
618 /**
619  * igc_configure_rx_ring - Configure a receive ring after Reset
620  * @adapter: board private structure
621  * @ring: receive ring to be configured
622  *
623  * Configure the Rx unit of the MAC after a reset.
624  */
625 static void igc_configure_rx_ring(struct igc_adapter *adapter,
626 				  struct igc_ring *ring)
627 {
628 	struct igc_hw *hw = &adapter->hw;
629 	union igc_adv_rx_desc *rx_desc;
630 	int reg_idx = ring->reg_idx;
631 	u32 srrctl = 0, rxdctl = 0;
632 	u64 rdba = ring->dma;
633 	u32 buf_size;
634 
635 	xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
636 	ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
637 	if (ring->xsk_pool) {
638 		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
639 						   MEM_TYPE_XSK_BUFF_POOL,
640 						   NULL));
641 		xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
642 	} else {
643 		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
644 						   MEM_TYPE_PAGE_SHARED,
645 						   NULL));
646 	}
647 
648 	if (igc_xdp_is_enabled(adapter))
649 		set_ring_uses_large_buffer(ring);
650 
651 	/* disable the queue */
652 	wr32(IGC_RXDCTL(reg_idx), 0);
653 
654 	/* Set DMA base address registers */
655 	wr32(IGC_RDBAL(reg_idx),
656 	     rdba & 0x00000000ffffffffULL);
657 	wr32(IGC_RDBAH(reg_idx), rdba >> 32);
658 	wr32(IGC_RDLEN(reg_idx),
659 	     ring->count * sizeof(union igc_adv_rx_desc));
660 
661 	/* initialize head and tail */
662 	ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
663 	wr32(IGC_RDH(reg_idx), 0);
664 	writel(0, ring->tail);
665 
666 	/* reset next-to- use/clean to place SW in sync with hardware */
667 	ring->next_to_clean = 0;
668 	ring->next_to_use = 0;
669 
670 	if (ring->xsk_pool)
671 		buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
672 	else if (ring_uses_large_buffer(ring))
673 		buf_size = IGC_RXBUFFER_3072;
674 	else
675 		buf_size = IGC_RXBUFFER_2048;
676 
677 	srrctl = rd32(IGC_SRRCTL(reg_idx));
678 	srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK |
679 		    IGC_SRRCTL_DESCTYPE_MASK);
680 	srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN);
681 	srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size);
682 	srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
683 
684 	wr32(IGC_SRRCTL(reg_idx), srrctl);
685 
686 	rxdctl |= IGC_RX_PTHRESH;
687 	rxdctl |= IGC_RX_HTHRESH << 8;
688 	rxdctl |= IGC_RX_WTHRESH << 16;
689 
690 	/* initialize rx_buffer_info */
691 	memset(ring->rx_buffer_info, 0,
692 	       sizeof(struct igc_rx_buffer) * ring->count);
693 
694 	/* initialize Rx descriptor 0 */
695 	rx_desc = IGC_RX_DESC(ring, 0);
696 	rx_desc->wb.upper.length = 0;
697 
698 	/* enable receive descriptor fetching */
699 	rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
700 
701 	wr32(IGC_RXDCTL(reg_idx), rxdctl);
702 }
703 
704 /**
705  * igc_configure_rx - Configure receive Unit after Reset
706  * @adapter: board private structure
707  *
708  * Configure the Rx unit of the MAC after a reset.
709  */
710 static void igc_configure_rx(struct igc_adapter *adapter)
711 {
712 	int i;
713 
714 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
715 	 * the Base and Length of the Rx Descriptor Ring
716 	 */
717 	for (i = 0; i < adapter->num_rx_queues; i++)
718 		igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
719 }
720 
721 /**
722  * igc_configure_tx_ring - Configure transmit ring after Reset
723  * @adapter: board private structure
724  * @ring: tx ring to configure
725  *
726  * Configure a transmit ring after a reset.
727  */
728 static void igc_configure_tx_ring(struct igc_adapter *adapter,
729 				  struct igc_ring *ring)
730 {
731 	struct igc_hw *hw = &adapter->hw;
732 	int reg_idx = ring->reg_idx;
733 	u64 tdba = ring->dma;
734 	u32 txdctl = 0;
735 
736 	ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
737 
738 	/* disable the queue */
739 	wr32(IGC_TXDCTL(reg_idx), 0);
740 	wrfl();
741 
742 	wr32(IGC_TDLEN(reg_idx),
743 	     ring->count * sizeof(union igc_adv_tx_desc));
744 	wr32(IGC_TDBAL(reg_idx),
745 	     tdba & 0x00000000ffffffffULL);
746 	wr32(IGC_TDBAH(reg_idx), tdba >> 32);
747 
748 	ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
749 	wr32(IGC_TDH(reg_idx), 0);
750 	writel(0, ring->tail);
751 
752 	txdctl |= IGC_TX_PTHRESH;
753 	txdctl |= IGC_TX_HTHRESH << 8;
754 	txdctl |= IGC_TX_WTHRESH << 16;
755 
756 	txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
757 	wr32(IGC_TXDCTL(reg_idx), txdctl);
758 }
759 
760 /**
761  * igc_configure_tx - Configure transmit Unit after Reset
762  * @adapter: board private structure
763  *
764  * Configure the Tx unit of the MAC after a reset.
765  */
766 static void igc_configure_tx(struct igc_adapter *adapter)
767 {
768 	int i;
769 
770 	for (i = 0; i < adapter->num_tx_queues; i++)
771 		igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
772 }
773 
774 /**
775  * igc_setup_mrqc - configure the multiple receive queue control registers
776  * @adapter: Board private structure
777  */
778 static void igc_setup_mrqc(struct igc_adapter *adapter)
779 {
780 	struct igc_hw *hw = &adapter->hw;
781 	u32 j, num_rx_queues;
782 	u32 mrqc, rxcsum;
783 	u32 rss_key[10];
784 
785 	netdev_rss_key_fill(rss_key, sizeof(rss_key));
786 	for (j = 0; j < 10; j++)
787 		wr32(IGC_RSSRK(j), rss_key[j]);
788 
789 	num_rx_queues = adapter->rss_queues;
790 
791 	if (adapter->rss_indir_tbl_init != num_rx_queues) {
792 		for (j = 0; j < IGC_RETA_SIZE; j++)
793 			adapter->rss_indir_tbl[j] =
794 			(j * num_rx_queues) / IGC_RETA_SIZE;
795 		adapter->rss_indir_tbl_init = num_rx_queues;
796 	}
797 	igc_write_rss_indir_tbl(adapter);
798 
799 	/* Disable raw packet checksumming so that RSS hash is placed in
800 	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
801 	 * offloads as they are enabled by default
802 	 */
803 	rxcsum = rd32(IGC_RXCSUM);
804 	rxcsum |= IGC_RXCSUM_PCSD;
805 
806 	/* Enable Receive Checksum Offload for SCTP */
807 	rxcsum |= IGC_RXCSUM_CRCOFL;
808 
809 	/* Don't need to set TUOFL or IPOFL, they default to 1 */
810 	wr32(IGC_RXCSUM, rxcsum);
811 
812 	/* Generate RSS hash based on packet types, TCP/UDP
813 	 * port numbers and/or IPv4/v6 src and dst addresses
814 	 */
815 	mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
816 	       IGC_MRQC_RSS_FIELD_IPV4_TCP |
817 	       IGC_MRQC_RSS_FIELD_IPV6 |
818 	       IGC_MRQC_RSS_FIELD_IPV6_TCP |
819 	       IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
820 
821 	if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
822 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
823 	if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
824 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
825 
826 	mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
827 
828 	wr32(IGC_MRQC, mrqc);
829 }
830 
831 /**
832  * igc_setup_rctl - configure the receive control registers
833  * @adapter: Board private structure
834  */
835 static void igc_setup_rctl(struct igc_adapter *adapter)
836 {
837 	struct igc_hw *hw = &adapter->hw;
838 	u32 rctl;
839 
840 	rctl = rd32(IGC_RCTL);
841 
842 	rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
843 	rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
844 
845 	rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
846 		(hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
847 
848 	/* enable stripping of CRC. Newer features require
849 	 * that the HW strips the CRC.
850 	 */
851 	rctl |= IGC_RCTL_SECRC;
852 
853 	/* disable store bad packets and clear size bits. */
854 	rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
855 
856 	/* enable LPE to allow for reception of jumbo frames */
857 	rctl |= IGC_RCTL_LPE;
858 
859 	/* disable queue 0 to prevent tail write w/o re-config */
860 	wr32(IGC_RXDCTL(0), 0);
861 
862 	/* This is useful for sniffing bad packets. */
863 	if (adapter->netdev->features & NETIF_F_RXALL) {
864 		/* UPE and MPE will be handled by normal PROMISC logic
865 		 * in set_rx_mode
866 		 */
867 		rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
868 			 IGC_RCTL_BAM | /* RX All Bcast Pkts */
869 			 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
870 
871 		rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
872 			  IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
873 	}
874 
875 	wr32(IGC_RCTL, rctl);
876 }
877 
878 /**
879  * igc_setup_tctl - configure the transmit control registers
880  * @adapter: Board private structure
881  */
882 static void igc_setup_tctl(struct igc_adapter *adapter)
883 {
884 	struct igc_hw *hw = &adapter->hw;
885 	u32 tctl;
886 
887 	/* disable queue 0 which icould be enabled by default */
888 	wr32(IGC_TXDCTL(0), 0);
889 
890 	/* Program the Transmit Control Register */
891 	tctl = rd32(IGC_TCTL);
892 	tctl &= ~IGC_TCTL_CT;
893 	tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
894 		(IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
895 
896 	/* Enable transmits */
897 	tctl |= IGC_TCTL_EN;
898 
899 	wr32(IGC_TCTL, tctl);
900 }
901 
902 /**
903  * igc_set_mac_filter_hw() - Set MAC address filter in hardware
904  * @adapter: Pointer to adapter where the filter should be set
905  * @index: Filter index
906  * @type: MAC address filter type (source or destination)
907  * @addr: MAC address
908  * @queue: If non-negative, queue assignment feature is enabled and frames
909  *         matching the filter are enqueued onto 'queue'. Otherwise, queue
910  *         assignment is disabled.
911  */
912 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
913 				  enum igc_mac_filter_type type,
914 				  const u8 *addr, int queue)
915 {
916 	struct net_device *dev = adapter->netdev;
917 	struct igc_hw *hw = &adapter->hw;
918 	u32 ral, rah;
919 
920 	if (WARN_ON(index >= hw->mac.rar_entry_count))
921 		return;
922 
923 	ral = le32_to_cpup((__le32 *)(addr));
924 	rah = le16_to_cpup((__le16 *)(addr + 4));
925 
926 	if (type == IGC_MAC_FILTER_TYPE_SRC) {
927 		rah &= ~IGC_RAH_ASEL_MASK;
928 		rah |= IGC_RAH_ASEL_SRC_ADDR;
929 	}
930 
931 	if (queue >= 0) {
932 		rah &= ~IGC_RAH_QSEL_MASK;
933 		rah |= (queue << IGC_RAH_QSEL_SHIFT);
934 		rah |= IGC_RAH_QSEL_ENABLE;
935 	}
936 
937 	rah |= IGC_RAH_AV;
938 
939 	wr32(IGC_RAL(index), ral);
940 	wr32(IGC_RAH(index), rah);
941 
942 	netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
943 }
944 
945 /**
946  * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
947  * @adapter: Pointer to adapter where the filter should be cleared
948  * @index: Filter index
949  */
950 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
951 {
952 	struct net_device *dev = adapter->netdev;
953 	struct igc_hw *hw = &adapter->hw;
954 
955 	if (WARN_ON(index >= hw->mac.rar_entry_count))
956 		return;
957 
958 	wr32(IGC_RAL(index), 0);
959 	wr32(IGC_RAH(index), 0);
960 
961 	netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
962 }
963 
964 /* Set default MAC address for the PF in the first RAR entry */
965 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
966 {
967 	struct net_device *dev = adapter->netdev;
968 	u8 *addr = adapter->hw.mac.addr;
969 
970 	netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
971 
972 	igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
973 }
974 
975 /**
976  * igc_set_mac - Change the Ethernet Address of the NIC
977  * @netdev: network interface device structure
978  * @p: pointer to an address structure
979  *
980  * Returns 0 on success, negative on failure
981  */
982 static int igc_set_mac(struct net_device *netdev, void *p)
983 {
984 	struct igc_adapter *adapter = netdev_priv(netdev);
985 	struct igc_hw *hw = &adapter->hw;
986 	struct sockaddr *addr = p;
987 
988 	if (!is_valid_ether_addr(addr->sa_data))
989 		return -EADDRNOTAVAIL;
990 
991 	eth_hw_addr_set(netdev, addr->sa_data);
992 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
993 
994 	/* set the correct pool for the new PF MAC address in entry 0 */
995 	igc_set_default_mac_filter(adapter);
996 
997 	return 0;
998 }
999 
1000 /**
1001  *  igc_write_mc_addr_list - write multicast addresses to MTA
1002  *  @netdev: network interface device structure
1003  *
1004  *  Writes multicast address list to the MTA hash table.
1005  *  Returns: -ENOMEM on failure
1006  *           0 on no addresses written
1007  *           X on writing X addresses to MTA
1008  **/
1009 static int igc_write_mc_addr_list(struct net_device *netdev)
1010 {
1011 	struct igc_adapter *adapter = netdev_priv(netdev);
1012 	struct igc_hw *hw = &adapter->hw;
1013 	struct netdev_hw_addr *ha;
1014 	u8  *mta_list;
1015 	int i;
1016 
1017 	if (netdev_mc_empty(netdev)) {
1018 		/* nothing to program, so clear mc list */
1019 		igc_update_mc_addr_list(hw, NULL, 0);
1020 		return 0;
1021 	}
1022 
1023 	mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
1024 	if (!mta_list)
1025 		return -ENOMEM;
1026 
1027 	/* The shared function expects a packed array of only addresses. */
1028 	i = 0;
1029 	netdev_for_each_mc_addr(ha, netdev)
1030 		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1031 
1032 	igc_update_mc_addr_list(hw, mta_list, i);
1033 	kfree(mta_list);
1034 
1035 	return netdev_mc_count(netdev);
1036 }
1037 
1038 static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
1039 				bool *first_flag, bool *insert_empty)
1040 {
1041 	struct igc_adapter *adapter = netdev_priv(ring->netdev);
1042 	ktime_t cycle_time = adapter->cycle_time;
1043 	ktime_t base_time = adapter->base_time;
1044 	ktime_t now = ktime_get_clocktai();
1045 	ktime_t baset_est, end_of_cycle;
1046 	s32 launchtime;
1047 	s64 n;
1048 
1049 	n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
1050 
1051 	baset_est = ktime_add_ns(base_time, cycle_time * (n));
1052 	end_of_cycle = ktime_add_ns(baset_est, cycle_time);
1053 
1054 	if (ktime_compare(txtime, end_of_cycle) >= 0) {
1055 		if (baset_est != ring->last_ff_cycle) {
1056 			*first_flag = true;
1057 			ring->last_ff_cycle = baset_est;
1058 
1059 			if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0)
1060 				*insert_empty = true;
1061 		}
1062 	}
1063 
1064 	/* Introducing a window at end of cycle on which packets
1065 	 * potentially not honor launchtime. Window of 5us chosen
1066 	 * considering software update the tail pointer and packets
1067 	 * are dma'ed to packet buffer.
1068 	 */
1069 	if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
1070 		netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
1071 			    txtime);
1072 
1073 	ring->last_tx_cycle = end_of_cycle;
1074 
1075 	launchtime = ktime_sub_ns(txtime, baset_est);
1076 	if (launchtime > 0)
1077 		div_s64_rem(launchtime, cycle_time, &launchtime);
1078 	else
1079 		launchtime = 0;
1080 
1081 	return cpu_to_le32(launchtime);
1082 }
1083 
1084 static int igc_init_empty_frame(struct igc_ring *ring,
1085 				struct igc_tx_buffer *buffer,
1086 				struct sk_buff *skb)
1087 {
1088 	unsigned int size;
1089 	dma_addr_t dma;
1090 
1091 	size = skb_headlen(skb);
1092 
1093 	dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
1094 	if (dma_mapping_error(ring->dev, dma)) {
1095 		netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
1096 		return -ENOMEM;
1097 	}
1098 
1099 	buffer->skb = skb;
1100 	buffer->protocol = 0;
1101 	buffer->bytecount = skb->len;
1102 	buffer->gso_segs = 1;
1103 	buffer->time_stamp = jiffies;
1104 	dma_unmap_len_set(buffer, len, skb->len);
1105 	dma_unmap_addr_set(buffer, dma, dma);
1106 
1107 	return 0;
1108 }
1109 
1110 static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
1111 					struct sk_buff *skb,
1112 					struct igc_tx_buffer *first)
1113 {
1114 	union igc_adv_tx_desc *desc;
1115 	u32 cmd_type, olinfo_status;
1116 	int err;
1117 
1118 	if (!igc_desc_unused(ring))
1119 		return -EBUSY;
1120 
1121 	err = igc_init_empty_frame(ring, first, skb);
1122 	if (err)
1123 		return err;
1124 
1125 	cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
1126 		   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
1127 		   first->bytecount;
1128 	olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
1129 
1130 	desc = IGC_TX_DESC(ring, ring->next_to_use);
1131 	desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1132 	desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1133 	desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
1134 
1135 	netdev_tx_sent_queue(txring_txq(ring), skb->len);
1136 
1137 	first->next_to_watch = desc;
1138 
1139 	ring->next_to_use++;
1140 	if (ring->next_to_use == ring->count)
1141 		ring->next_to_use = 0;
1142 
1143 	return 0;
1144 }
1145 
1146 #define IGC_EMPTY_FRAME_SIZE 60
1147 
1148 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1149 			    __le32 launch_time, bool first_flag,
1150 			    u32 vlan_macip_lens, u32 type_tucmd,
1151 			    u32 mss_l4len_idx)
1152 {
1153 	struct igc_adv_tx_context_desc *context_desc;
1154 	u16 i = tx_ring->next_to_use;
1155 
1156 	context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1157 
1158 	i++;
1159 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1160 
1161 	/* set bits to identify this as an advanced context descriptor */
1162 	type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1163 
1164 	/* For i225, context index must be unique per ring. */
1165 	if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1166 		mss_l4len_idx |= tx_ring->reg_idx << 4;
1167 
1168 	if (first_flag)
1169 		mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
1170 
1171 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
1172 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
1173 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
1174 	context_desc->launch_time	= launch_time;
1175 }
1176 
1177 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
1178 			__le32 launch_time, bool first_flag)
1179 {
1180 	struct sk_buff *skb = first->skb;
1181 	u32 vlan_macip_lens = 0;
1182 	u32 type_tucmd = 0;
1183 
1184 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
1185 csum_failed:
1186 		if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1187 		    !tx_ring->launchtime_enable)
1188 			return;
1189 		goto no_csum;
1190 	}
1191 
1192 	switch (skb->csum_offset) {
1193 	case offsetof(struct tcphdr, check):
1194 		type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1195 		fallthrough;
1196 	case offsetof(struct udphdr, check):
1197 		break;
1198 	case offsetof(struct sctphdr, checksum):
1199 		/* validate that this is actually an SCTP request */
1200 		if (skb_csum_is_sctp(skb)) {
1201 			type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1202 			break;
1203 		}
1204 		fallthrough;
1205 	default:
1206 		skb_checksum_help(skb);
1207 		goto csum_failed;
1208 	}
1209 
1210 	/* update TX checksum flag */
1211 	first->tx_flags |= IGC_TX_FLAGS_CSUM;
1212 	vlan_macip_lens = skb_checksum_start_offset(skb) -
1213 			  skb_network_offset(skb);
1214 no_csum:
1215 	vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1216 	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1217 
1218 	igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1219 			vlan_macip_lens, type_tucmd, 0);
1220 }
1221 
1222 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1223 {
1224 	struct net_device *netdev = tx_ring->netdev;
1225 
1226 	netif_stop_subqueue(netdev, tx_ring->queue_index);
1227 
1228 	/* memory barriier comment */
1229 	smp_mb();
1230 
1231 	/* We need to check again in a case another CPU has just
1232 	 * made room available.
1233 	 */
1234 	if (igc_desc_unused(tx_ring) < size)
1235 		return -EBUSY;
1236 
1237 	/* A reprieve! */
1238 	netif_wake_subqueue(netdev, tx_ring->queue_index);
1239 
1240 	u64_stats_update_begin(&tx_ring->tx_syncp2);
1241 	tx_ring->tx_stats.restart_queue2++;
1242 	u64_stats_update_end(&tx_ring->tx_syncp2);
1243 
1244 	return 0;
1245 }
1246 
1247 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1248 {
1249 	if (igc_desc_unused(tx_ring) >= size)
1250 		return 0;
1251 	return __igc_maybe_stop_tx(tx_ring, size);
1252 }
1253 
1254 #define IGC_SET_FLAG(_input, _flag, _result) \
1255 	(((_flag) <= (_result)) ?				\
1256 	 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) :	\
1257 	 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1258 
1259 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1260 {
1261 	/* set type for advanced descriptor with frame checksum insertion */
1262 	u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1263 		       IGC_ADVTXD_DCMD_DEXT |
1264 		       IGC_ADVTXD_DCMD_IFCS;
1265 
1266 	/* set HW vlan bit if vlan is present */
1267 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
1268 				 IGC_ADVTXD_DCMD_VLE);
1269 
1270 	/* set segmentation bits for TSO */
1271 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1272 				 (IGC_ADVTXD_DCMD_TSE));
1273 
1274 	/* set timestamp bit if present, will select the register set
1275 	 * based on the _TSTAMP(_X) bit.
1276 	 */
1277 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1278 				 (IGC_ADVTXD_MAC_TSTAMP));
1279 
1280 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1,
1281 				 (IGC_ADVTXD_TSTAMP_REG_1));
1282 
1283 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2,
1284 				 (IGC_ADVTXD_TSTAMP_REG_2));
1285 
1286 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3,
1287 				 (IGC_ADVTXD_TSTAMP_REG_3));
1288 
1289 	/* insert frame checksum */
1290 	cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
1291 
1292 	return cmd_type;
1293 }
1294 
1295 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1296 				 union igc_adv_tx_desc *tx_desc,
1297 				 u32 tx_flags, unsigned int paylen)
1298 {
1299 	u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1300 
1301 	/* insert L4 checksum */
1302 	olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM,
1303 				      (IGC_TXD_POPTS_TXSM << 8));
1304 
1305 	/* insert IPv4 checksum */
1306 	olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4,
1307 				      (IGC_TXD_POPTS_IXSM << 8));
1308 
1309 	/* Use the second timer (free running, in general) for the timestamp */
1310 	olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1,
1311 				      IGC_TXD_PTP2_TIMER_1);
1312 
1313 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1314 }
1315 
1316 static int igc_tx_map(struct igc_ring *tx_ring,
1317 		      struct igc_tx_buffer *first,
1318 		      const u8 hdr_len)
1319 {
1320 	struct sk_buff *skb = first->skb;
1321 	struct igc_tx_buffer *tx_buffer;
1322 	union igc_adv_tx_desc *tx_desc;
1323 	u32 tx_flags = first->tx_flags;
1324 	skb_frag_t *frag;
1325 	u16 i = tx_ring->next_to_use;
1326 	unsigned int data_len, size;
1327 	dma_addr_t dma;
1328 	u32 cmd_type;
1329 
1330 	cmd_type = igc_tx_cmd_type(skb, tx_flags);
1331 	tx_desc = IGC_TX_DESC(tx_ring, i);
1332 
1333 	igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1334 
1335 	size = skb_headlen(skb);
1336 	data_len = skb->data_len;
1337 
1338 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1339 
1340 	tx_buffer = first;
1341 
1342 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1343 		if (dma_mapping_error(tx_ring->dev, dma))
1344 			goto dma_error;
1345 
1346 		/* record length, and DMA address */
1347 		dma_unmap_len_set(tx_buffer, len, size);
1348 		dma_unmap_addr_set(tx_buffer, dma, dma);
1349 
1350 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
1351 
1352 		while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1353 			tx_desc->read.cmd_type_len =
1354 				cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1355 
1356 			i++;
1357 			tx_desc++;
1358 			if (i == tx_ring->count) {
1359 				tx_desc = IGC_TX_DESC(tx_ring, 0);
1360 				i = 0;
1361 			}
1362 			tx_desc->read.olinfo_status = 0;
1363 
1364 			dma += IGC_MAX_DATA_PER_TXD;
1365 			size -= IGC_MAX_DATA_PER_TXD;
1366 
1367 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
1368 		}
1369 
1370 		if (likely(!data_len))
1371 			break;
1372 
1373 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1374 
1375 		i++;
1376 		tx_desc++;
1377 		if (i == tx_ring->count) {
1378 			tx_desc = IGC_TX_DESC(tx_ring, 0);
1379 			i = 0;
1380 		}
1381 		tx_desc->read.olinfo_status = 0;
1382 
1383 		size = skb_frag_size(frag);
1384 		data_len -= size;
1385 
1386 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1387 				       size, DMA_TO_DEVICE);
1388 
1389 		tx_buffer = &tx_ring->tx_buffer_info[i];
1390 	}
1391 
1392 	/* write last descriptor with RS and EOP bits */
1393 	cmd_type |= size | IGC_TXD_DCMD;
1394 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1395 
1396 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1397 
1398 	/* set the timestamp */
1399 	first->time_stamp = jiffies;
1400 
1401 	skb_tx_timestamp(skb);
1402 
1403 	/* Force memory writes to complete before letting h/w know there
1404 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
1405 	 * memory model archs, such as IA-64).
1406 	 *
1407 	 * We also need this memory barrier to make certain all of the
1408 	 * status bits have been updated before next_to_watch is written.
1409 	 */
1410 	wmb();
1411 
1412 	/* set next_to_watch value indicating a packet is present */
1413 	first->next_to_watch = tx_desc;
1414 
1415 	i++;
1416 	if (i == tx_ring->count)
1417 		i = 0;
1418 
1419 	tx_ring->next_to_use = i;
1420 
1421 	/* Make sure there is space in the ring for the next send. */
1422 	igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1423 
1424 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1425 		writel(i, tx_ring->tail);
1426 	}
1427 
1428 	return 0;
1429 dma_error:
1430 	netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1431 	tx_buffer = &tx_ring->tx_buffer_info[i];
1432 
1433 	/* clear dma mappings for failed tx_buffer_info map */
1434 	while (tx_buffer != first) {
1435 		if (dma_unmap_len(tx_buffer, len))
1436 			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1437 
1438 		if (i-- == 0)
1439 			i += tx_ring->count;
1440 		tx_buffer = &tx_ring->tx_buffer_info[i];
1441 	}
1442 
1443 	if (dma_unmap_len(tx_buffer, len))
1444 		igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1445 
1446 	dev_kfree_skb_any(tx_buffer->skb);
1447 	tx_buffer->skb = NULL;
1448 
1449 	tx_ring->next_to_use = i;
1450 
1451 	return -1;
1452 }
1453 
1454 static int igc_tso(struct igc_ring *tx_ring,
1455 		   struct igc_tx_buffer *first,
1456 		   __le32 launch_time, bool first_flag,
1457 		   u8 *hdr_len)
1458 {
1459 	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1460 	struct sk_buff *skb = first->skb;
1461 	union {
1462 		struct iphdr *v4;
1463 		struct ipv6hdr *v6;
1464 		unsigned char *hdr;
1465 	} ip;
1466 	union {
1467 		struct tcphdr *tcp;
1468 		struct udphdr *udp;
1469 		unsigned char *hdr;
1470 	} l4;
1471 	u32 paylen, l4_offset;
1472 	int err;
1473 
1474 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1475 		return 0;
1476 
1477 	if (!skb_is_gso(skb))
1478 		return 0;
1479 
1480 	err = skb_cow_head(skb, 0);
1481 	if (err < 0)
1482 		return err;
1483 
1484 	ip.hdr = skb_network_header(skb);
1485 	l4.hdr = skb_checksum_start(skb);
1486 
1487 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1488 	type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1489 
1490 	/* initialize outer IP header fields */
1491 	if (ip.v4->version == 4) {
1492 		unsigned char *csum_start = skb_checksum_start(skb);
1493 		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1494 
1495 		/* IP header will have to cancel out any data that
1496 		 * is not a part of the outer IP header
1497 		 */
1498 		ip.v4->check = csum_fold(csum_partial(trans_start,
1499 						      csum_start - trans_start,
1500 						      0));
1501 		type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1502 
1503 		ip.v4->tot_len = 0;
1504 		first->tx_flags |= IGC_TX_FLAGS_TSO |
1505 				   IGC_TX_FLAGS_CSUM |
1506 				   IGC_TX_FLAGS_IPV4;
1507 	} else {
1508 		ip.v6->payload_len = 0;
1509 		first->tx_flags |= IGC_TX_FLAGS_TSO |
1510 				   IGC_TX_FLAGS_CSUM;
1511 	}
1512 
1513 	/* determine offset of inner transport header */
1514 	l4_offset = l4.hdr - skb->data;
1515 
1516 	/* remove payload length from inner checksum */
1517 	paylen = skb->len - l4_offset;
1518 	if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1519 		/* compute length of segmentation header */
1520 		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
1521 		csum_replace_by_diff(&l4.tcp->check,
1522 				     (__force __wsum)htonl(paylen));
1523 	} else {
1524 		/* compute length of segmentation header */
1525 		*hdr_len = sizeof(*l4.udp) + l4_offset;
1526 		csum_replace_by_diff(&l4.udp->check,
1527 				     (__force __wsum)htonl(paylen));
1528 	}
1529 
1530 	/* update gso size and bytecount with header size */
1531 	first->gso_segs = skb_shinfo(skb)->gso_segs;
1532 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
1533 
1534 	/* MSS L4LEN IDX */
1535 	mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1536 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1537 
1538 	/* VLAN MACLEN IPLEN */
1539 	vlan_macip_lens = l4.hdr - ip.hdr;
1540 	vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1541 	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1542 
1543 	igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1544 			vlan_macip_lens, type_tucmd, mss_l4len_idx);
1545 
1546 	return 1;
1547 }
1548 
1549 static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags)
1550 {
1551 	int i;
1552 
1553 	for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
1554 		struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
1555 
1556 		if (tstamp->skb)
1557 			continue;
1558 
1559 		tstamp->skb = skb_get(skb);
1560 		tstamp->start = jiffies;
1561 		*flags = tstamp->flags;
1562 
1563 		return true;
1564 	}
1565 
1566 	return false;
1567 }
1568 
1569 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1570 				       struct igc_ring *tx_ring)
1571 {
1572 	struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1573 	bool first_flag = false, insert_empty = false;
1574 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
1575 	__be16 protocol = vlan_get_protocol(skb);
1576 	struct igc_tx_buffer *first;
1577 	__le32 launch_time = 0;
1578 	u32 tx_flags = 0;
1579 	unsigned short f;
1580 	ktime_t txtime;
1581 	u8 hdr_len = 0;
1582 	int tso = 0;
1583 
1584 	/* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1585 	 *	+ 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1586 	 *	+ 2 desc gap to keep tail from touching head,
1587 	 *	+ 1 desc for context descriptor,
1588 	 * otherwise try next time
1589 	 */
1590 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1591 		count += TXD_USE_COUNT(skb_frag_size(
1592 						&skb_shinfo(skb)->frags[f]));
1593 
1594 	if (igc_maybe_stop_tx(tx_ring, count + 5)) {
1595 		/* this is a hard error */
1596 		return NETDEV_TX_BUSY;
1597 	}
1598 
1599 	if (!tx_ring->launchtime_enable)
1600 		goto done;
1601 
1602 	txtime = skb->tstamp;
1603 	skb->tstamp = ktime_set(0, 0);
1604 	launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
1605 
1606 	if (insert_empty) {
1607 		struct igc_tx_buffer *empty_info;
1608 		struct sk_buff *empty;
1609 		void *data;
1610 
1611 		empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1612 		empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
1613 		if (!empty)
1614 			goto done;
1615 
1616 		data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
1617 		memset(data, 0, IGC_EMPTY_FRAME_SIZE);
1618 
1619 		igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
1620 
1621 		if (igc_init_tx_empty_descriptor(tx_ring,
1622 						 empty,
1623 						 empty_info) < 0)
1624 			dev_kfree_skb_any(empty);
1625 	}
1626 
1627 done:
1628 	/* record the location of the first descriptor for this packet */
1629 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1630 	first->type = IGC_TX_BUFFER_TYPE_SKB;
1631 	first->skb = skb;
1632 	first->bytecount = skb->len;
1633 	first->gso_segs = 1;
1634 
1635 	if (adapter->qbv_transition || tx_ring->oper_gate_closed)
1636 		goto out_drop;
1637 
1638 	if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) {
1639 		adapter->stats.txdrop++;
1640 		goto out_drop;
1641 	}
1642 
1643 	if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
1644 		     skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1645 		unsigned long flags;
1646 		u32 tstamp_flags;
1647 
1648 		spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
1649 		if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
1650 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1651 			tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
1652 			if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
1653 				tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
1654 		} else {
1655 			adapter->tx_hwtstamp_skipped++;
1656 		}
1657 
1658 		spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
1659 	}
1660 
1661 	if (skb_vlan_tag_present(skb)) {
1662 		tx_flags |= IGC_TX_FLAGS_VLAN;
1663 		tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
1664 	}
1665 
1666 	/* record initial flags and protocol */
1667 	first->tx_flags = tx_flags;
1668 	first->protocol = protocol;
1669 
1670 	tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
1671 	if (tso < 0)
1672 		goto out_drop;
1673 	else if (!tso)
1674 		igc_tx_csum(tx_ring, first, launch_time, first_flag);
1675 
1676 	igc_tx_map(tx_ring, first, hdr_len);
1677 
1678 	return NETDEV_TX_OK;
1679 
1680 out_drop:
1681 	dev_kfree_skb_any(first->skb);
1682 	first->skb = NULL;
1683 
1684 	return NETDEV_TX_OK;
1685 }
1686 
1687 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1688 						    struct sk_buff *skb)
1689 {
1690 	unsigned int r_idx = skb->queue_mapping;
1691 
1692 	if (r_idx >= adapter->num_tx_queues)
1693 		r_idx = r_idx % adapter->num_tx_queues;
1694 
1695 	return adapter->tx_ring[r_idx];
1696 }
1697 
1698 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1699 				  struct net_device *netdev)
1700 {
1701 	struct igc_adapter *adapter = netdev_priv(netdev);
1702 
1703 	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1704 	 * in order to meet this minimum size requirement.
1705 	 */
1706 	if (skb->len < 17) {
1707 		if (skb_padto(skb, 17))
1708 			return NETDEV_TX_OK;
1709 		skb->len = 17;
1710 	}
1711 
1712 	return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1713 }
1714 
1715 static void igc_rx_checksum(struct igc_ring *ring,
1716 			    union igc_adv_rx_desc *rx_desc,
1717 			    struct sk_buff *skb)
1718 {
1719 	skb_checksum_none_assert(skb);
1720 
1721 	/* Ignore Checksum bit is set */
1722 	if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1723 		return;
1724 
1725 	/* Rx checksum disabled via ethtool */
1726 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
1727 		return;
1728 
1729 	/* TCP/UDP checksum error bit is set */
1730 	if (igc_test_staterr(rx_desc,
1731 			     IGC_RXDEXT_STATERR_L4E |
1732 			     IGC_RXDEXT_STATERR_IPE)) {
1733 		/* work around errata with sctp packets where the TCPE aka
1734 		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1735 		 * packets (aka let the stack check the crc32c)
1736 		 */
1737 		if (!(skb->len == 60 &&
1738 		      test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1739 			u64_stats_update_begin(&ring->rx_syncp);
1740 			ring->rx_stats.csum_err++;
1741 			u64_stats_update_end(&ring->rx_syncp);
1742 		}
1743 		/* let the stack verify checksum errors */
1744 		return;
1745 	}
1746 	/* It must be a TCP or UDP packet with a valid checksum */
1747 	if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1748 				      IGC_RXD_STAT_UDPCS))
1749 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1750 
1751 	netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1752 		   le32_to_cpu(rx_desc->wb.upper.status_error));
1753 }
1754 
1755 /* Mapping HW RSS Type to enum pkt_hash_types */
1756 static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
1757 	[IGC_RSS_TYPE_NO_HASH]		= PKT_HASH_TYPE_L2,
1758 	[IGC_RSS_TYPE_HASH_TCP_IPV4]	= PKT_HASH_TYPE_L4,
1759 	[IGC_RSS_TYPE_HASH_IPV4]	= PKT_HASH_TYPE_L3,
1760 	[IGC_RSS_TYPE_HASH_TCP_IPV6]	= PKT_HASH_TYPE_L4,
1761 	[IGC_RSS_TYPE_HASH_IPV6_EX]	= PKT_HASH_TYPE_L3,
1762 	[IGC_RSS_TYPE_HASH_IPV6]	= PKT_HASH_TYPE_L3,
1763 	[IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
1764 	[IGC_RSS_TYPE_HASH_UDP_IPV4]	= PKT_HASH_TYPE_L4,
1765 	[IGC_RSS_TYPE_HASH_UDP_IPV6]	= PKT_HASH_TYPE_L4,
1766 	[IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
1767 	[10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW  */
1768 	[11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask   */
1769 	[12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons       */
1770 	[13] = PKT_HASH_TYPE_NONE,
1771 	[14] = PKT_HASH_TYPE_NONE,
1772 	[15] = PKT_HASH_TYPE_NONE,
1773 };
1774 
1775 static inline void igc_rx_hash(struct igc_ring *ring,
1776 			       union igc_adv_rx_desc *rx_desc,
1777 			       struct sk_buff *skb)
1778 {
1779 	if (ring->netdev->features & NETIF_F_RXHASH) {
1780 		u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1781 		u32 rss_type = igc_rss_type(rx_desc);
1782 
1783 		skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
1784 	}
1785 }
1786 
1787 static void igc_rx_vlan(struct igc_ring *rx_ring,
1788 			union igc_adv_rx_desc *rx_desc,
1789 			struct sk_buff *skb)
1790 {
1791 	struct net_device *dev = rx_ring->netdev;
1792 	u16 vid;
1793 
1794 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1795 	    igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
1796 		if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
1797 		    test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
1798 			vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
1799 		else
1800 			vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1801 
1802 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1803 	}
1804 }
1805 
1806 /**
1807  * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1808  * @rx_ring: rx descriptor ring packet is being transacted on
1809  * @rx_desc: pointer to the EOP Rx descriptor
1810  * @skb: pointer to current skb being populated
1811  *
1812  * This function checks the ring, descriptor, and packet information in order
1813  * to populate the hash, checksum, VLAN, protocol, and other fields within the
1814  * skb.
1815  */
1816 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1817 				   union igc_adv_rx_desc *rx_desc,
1818 				   struct sk_buff *skb)
1819 {
1820 	igc_rx_hash(rx_ring, rx_desc, skb);
1821 
1822 	igc_rx_checksum(rx_ring, rx_desc, skb);
1823 
1824 	igc_rx_vlan(rx_ring, rx_desc, skb);
1825 
1826 	skb_record_rx_queue(skb, rx_ring->queue_index);
1827 
1828 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1829 }
1830 
1831 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
1832 {
1833 	bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1834 	struct igc_adapter *adapter = netdev_priv(netdev);
1835 	struct igc_hw *hw = &adapter->hw;
1836 	u32 ctrl;
1837 
1838 	ctrl = rd32(IGC_CTRL);
1839 
1840 	if (enable) {
1841 		/* enable VLAN tag insert/strip */
1842 		ctrl |= IGC_CTRL_VME;
1843 	} else {
1844 		/* disable VLAN tag insert/strip */
1845 		ctrl &= ~IGC_CTRL_VME;
1846 	}
1847 	wr32(IGC_CTRL, ctrl);
1848 }
1849 
1850 static void igc_restore_vlan(struct igc_adapter *adapter)
1851 {
1852 	igc_vlan_mode(adapter->netdev, adapter->netdev->features);
1853 }
1854 
1855 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1856 					       const unsigned int size,
1857 					       int *rx_buffer_pgcnt)
1858 {
1859 	struct igc_rx_buffer *rx_buffer;
1860 
1861 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1862 	*rx_buffer_pgcnt =
1863 #if (PAGE_SIZE < 8192)
1864 		page_count(rx_buffer->page);
1865 #else
1866 		0;
1867 #endif
1868 	prefetchw(rx_buffer->page);
1869 
1870 	/* we are reusing so sync this buffer for CPU use */
1871 	dma_sync_single_range_for_cpu(rx_ring->dev,
1872 				      rx_buffer->dma,
1873 				      rx_buffer->page_offset,
1874 				      size,
1875 				      DMA_FROM_DEVICE);
1876 
1877 	rx_buffer->pagecnt_bias--;
1878 
1879 	return rx_buffer;
1880 }
1881 
1882 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1883 			       unsigned int truesize)
1884 {
1885 #if (PAGE_SIZE < 8192)
1886 	buffer->page_offset ^= truesize;
1887 #else
1888 	buffer->page_offset += truesize;
1889 #endif
1890 }
1891 
1892 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1893 					      unsigned int size)
1894 {
1895 	unsigned int truesize;
1896 
1897 #if (PAGE_SIZE < 8192)
1898 	truesize = igc_rx_pg_size(ring) / 2;
1899 #else
1900 	truesize = ring_uses_build_skb(ring) ?
1901 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1902 		   SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1903 		   SKB_DATA_ALIGN(size);
1904 #endif
1905 	return truesize;
1906 }
1907 
1908 /**
1909  * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1910  * @rx_ring: rx descriptor ring to transact packets on
1911  * @rx_buffer: buffer containing page to add
1912  * @skb: sk_buff to place the data into
1913  * @size: size of buffer to be added
1914  *
1915  * This function will add the data contained in rx_buffer->page to the skb.
1916  */
1917 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1918 			    struct igc_rx_buffer *rx_buffer,
1919 			    struct sk_buff *skb,
1920 			    unsigned int size)
1921 {
1922 	unsigned int truesize;
1923 
1924 #if (PAGE_SIZE < 8192)
1925 	truesize = igc_rx_pg_size(rx_ring) / 2;
1926 #else
1927 	truesize = ring_uses_build_skb(rx_ring) ?
1928 		   SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1929 		   SKB_DATA_ALIGN(size);
1930 #endif
1931 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1932 			rx_buffer->page_offset, size, truesize);
1933 
1934 	igc_rx_buffer_flip(rx_buffer, truesize);
1935 }
1936 
1937 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1938 				     struct igc_rx_buffer *rx_buffer,
1939 				     struct xdp_buff *xdp)
1940 {
1941 	unsigned int size = xdp->data_end - xdp->data;
1942 	unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1943 	unsigned int metasize = xdp->data - xdp->data_meta;
1944 	struct sk_buff *skb;
1945 
1946 	/* prefetch first cache line of first page */
1947 	net_prefetch(xdp->data_meta);
1948 
1949 	/* build an skb around the page buffer */
1950 	skb = napi_build_skb(xdp->data_hard_start, truesize);
1951 	if (unlikely(!skb))
1952 		return NULL;
1953 
1954 	/* update pointers within the skb to store the data */
1955 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
1956 	__skb_put(skb, size);
1957 	if (metasize)
1958 		skb_metadata_set(skb, metasize);
1959 
1960 	igc_rx_buffer_flip(rx_buffer, truesize);
1961 	return skb;
1962 }
1963 
1964 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1965 					 struct igc_rx_buffer *rx_buffer,
1966 					 struct igc_xdp_buff *ctx)
1967 {
1968 	struct xdp_buff *xdp = &ctx->xdp;
1969 	unsigned int metasize = xdp->data - xdp->data_meta;
1970 	unsigned int size = xdp->data_end - xdp->data;
1971 	unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1972 	void *va = xdp->data;
1973 	unsigned int headlen;
1974 	struct sk_buff *skb;
1975 
1976 	/* prefetch first cache line of first page */
1977 	net_prefetch(xdp->data_meta);
1978 
1979 	/* allocate a skb to store the frags */
1980 	skb = napi_alloc_skb(&rx_ring->q_vector->napi,
1981 			     IGC_RX_HDR_LEN + metasize);
1982 	if (unlikely(!skb))
1983 		return NULL;
1984 
1985 	if (ctx->rx_ts) {
1986 		skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
1987 		skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
1988 	}
1989 
1990 	/* Determine available headroom for copy */
1991 	headlen = size;
1992 	if (headlen > IGC_RX_HDR_LEN)
1993 		headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1994 
1995 	/* align pull length to size of long to optimize memcpy performance */
1996 	memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
1997 	       ALIGN(headlen + metasize, sizeof(long)));
1998 
1999 	if (metasize) {
2000 		skb_metadata_set(skb, metasize);
2001 		__skb_pull(skb, metasize);
2002 	}
2003 
2004 	/* update all of the pointers */
2005 	size -= headlen;
2006 	if (size) {
2007 		skb_add_rx_frag(skb, 0, rx_buffer->page,
2008 				(va + headlen) - page_address(rx_buffer->page),
2009 				size, truesize);
2010 		igc_rx_buffer_flip(rx_buffer, truesize);
2011 	} else {
2012 		rx_buffer->pagecnt_bias++;
2013 	}
2014 
2015 	return skb;
2016 }
2017 
2018 /**
2019  * igc_reuse_rx_page - page flip buffer and store it back on the ring
2020  * @rx_ring: rx descriptor ring to store buffers on
2021  * @old_buff: donor buffer to have page reused
2022  *
2023  * Synchronizes page for reuse by the adapter
2024  */
2025 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
2026 			      struct igc_rx_buffer *old_buff)
2027 {
2028 	u16 nta = rx_ring->next_to_alloc;
2029 	struct igc_rx_buffer *new_buff;
2030 
2031 	new_buff = &rx_ring->rx_buffer_info[nta];
2032 
2033 	/* update, and store next to alloc */
2034 	nta++;
2035 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
2036 
2037 	/* Transfer page from old buffer to new buffer.
2038 	 * Move each member individually to avoid possible store
2039 	 * forwarding stalls.
2040 	 */
2041 	new_buff->dma		= old_buff->dma;
2042 	new_buff->page		= old_buff->page;
2043 	new_buff->page_offset	= old_buff->page_offset;
2044 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
2045 }
2046 
2047 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
2048 				  int rx_buffer_pgcnt)
2049 {
2050 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
2051 	struct page *page = rx_buffer->page;
2052 
2053 	/* avoid re-using remote and pfmemalloc pages */
2054 	if (!dev_page_is_reusable(page))
2055 		return false;
2056 
2057 #if (PAGE_SIZE < 8192)
2058 	/* if we are only owner of page we can reuse it */
2059 	if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
2060 		return false;
2061 #else
2062 #define IGC_LAST_OFFSET \
2063 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
2064 
2065 	if (rx_buffer->page_offset > IGC_LAST_OFFSET)
2066 		return false;
2067 #endif
2068 
2069 	/* If we have drained the page fragment pool we need to update
2070 	 * the pagecnt_bias and page count so that we fully restock the
2071 	 * number of references the driver holds.
2072 	 */
2073 	if (unlikely(pagecnt_bias == 1)) {
2074 		page_ref_add(page, USHRT_MAX - 1);
2075 		rx_buffer->pagecnt_bias = USHRT_MAX;
2076 	}
2077 
2078 	return true;
2079 }
2080 
2081 /**
2082  * igc_is_non_eop - process handling of non-EOP buffers
2083  * @rx_ring: Rx ring being processed
2084  * @rx_desc: Rx descriptor for current buffer
2085  *
2086  * This function updates next to clean.  If the buffer is an EOP buffer
2087  * this function exits returning false, otherwise it will place the
2088  * sk_buff in the next buffer to be chained and return true indicating
2089  * that this is in fact a non-EOP buffer.
2090  */
2091 static bool igc_is_non_eop(struct igc_ring *rx_ring,
2092 			   union igc_adv_rx_desc *rx_desc)
2093 {
2094 	u32 ntc = rx_ring->next_to_clean + 1;
2095 
2096 	/* fetch, update, and store next to clean */
2097 	ntc = (ntc < rx_ring->count) ? ntc : 0;
2098 	rx_ring->next_to_clean = ntc;
2099 
2100 	prefetch(IGC_RX_DESC(rx_ring, ntc));
2101 
2102 	if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
2103 		return false;
2104 
2105 	return true;
2106 }
2107 
2108 /**
2109  * igc_cleanup_headers - Correct corrupted or empty headers
2110  * @rx_ring: rx descriptor ring packet is being transacted on
2111  * @rx_desc: pointer to the EOP Rx descriptor
2112  * @skb: pointer to current skb being fixed
2113  *
2114  * Address the case where we are pulling data in on pages only
2115  * and as such no data is present in the skb header.
2116  *
2117  * In addition if skb is not at least 60 bytes we need to pad it so that
2118  * it is large enough to qualify as a valid Ethernet frame.
2119  *
2120  * Returns true if an error was encountered and skb was freed.
2121  */
2122 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
2123 				union igc_adv_rx_desc *rx_desc,
2124 				struct sk_buff *skb)
2125 {
2126 	/* XDP packets use error pointer so abort at this point */
2127 	if (IS_ERR(skb))
2128 		return true;
2129 
2130 	if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
2131 		struct net_device *netdev = rx_ring->netdev;
2132 
2133 		if (!(netdev->features & NETIF_F_RXALL)) {
2134 			dev_kfree_skb_any(skb);
2135 			return true;
2136 		}
2137 	}
2138 
2139 	/* if eth_skb_pad returns an error the skb was freed */
2140 	if (eth_skb_pad(skb))
2141 		return true;
2142 
2143 	return false;
2144 }
2145 
2146 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
2147 			      struct igc_rx_buffer *rx_buffer,
2148 			      int rx_buffer_pgcnt)
2149 {
2150 	if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2151 		/* hand second half of page back to the ring */
2152 		igc_reuse_rx_page(rx_ring, rx_buffer);
2153 	} else {
2154 		/* We are not reusing the buffer so unmap it and free
2155 		 * any references we are holding to it
2156 		 */
2157 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2158 				     igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
2159 				     IGC_RX_DMA_ATTR);
2160 		__page_frag_cache_drain(rx_buffer->page,
2161 					rx_buffer->pagecnt_bias);
2162 	}
2163 
2164 	/* clear contents of rx_buffer */
2165 	rx_buffer->page = NULL;
2166 }
2167 
2168 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
2169 {
2170 	struct igc_adapter *adapter = rx_ring->q_vector->adapter;
2171 
2172 	if (ring_uses_build_skb(rx_ring))
2173 		return IGC_SKB_PAD;
2174 	if (igc_xdp_is_enabled(adapter))
2175 		return XDP_PACKET_HEADROOM;
2176 
2177 	return 0;
2178 }
2179 
2180 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
2181 				  struct igc_rx_buffer *bi)
2182 {
2183 	struct page *page = bi->page;
2184 	dma_addr_t dma;
2185 
2186 	/* since we are recycling buffers we should seldom need to alloc */
2187 	if (likely(page))
2188 		return true;
2189 
2190 	/* alloc new page for storage */
2191 	page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
2192 	if (unlikely(!page)) {
2193 		rx_ring->rx_stats.alloc_failed++;
2194 		return false;
2195 	}
2196 
2197 	/* map page for use */
2198 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
2199 				 igc_rx_pg_size(rx_ring),
2200 				 DMA_FROM_DEVICE,
2201 				 IGC_RX_DMA_ATTR);
2202 
2203 	/* if mapping failed free memory back to system since
2204 	 * there isn't much point in holding memory we can't use
2205 	 */
2206 	if (dma_mapping_error(rx_ring->dev, dma)) {
2207 		__free_page(page);
2208 
2209 		rx_ring->rx_stats.alloc_failed++;
2210 		return false;
2211 	}
2212 
2213 	bi->dma = dma;
2214 	bi->page = page;
2215 	bi->page_offset = igc_rx_offset(rx_ring);
2216 	page_ref_add(page, USHRT_MAX - 1);
2217 	bi->pagecnt_bias = USHRT_MAX;
2218 
2219 	return true;
2220 }
2221 
2222 /**
2223  * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2224  * @rx_ring: rx descriptor ring
2225  * @cleaned_count: number of buffers to clean
2226  */
2227 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
2228 {
2229 	union igc_adv_rx_desc *rx_desc;
2230 	u16 i = rx_ring->next_to_use;
2231 	struct igc_rx_buffer *bi;
2232 	u16 bufsz;
2233 
2234 	/* nothing to do */
2235 	if (!cleaned_count)
2236 		return;
2237 
2238 	rx_desc = IGC_RX_DESC(rx_ring, i);
2239 	bi = &rx_ring->rx_buffer_info[i];
2240 	i -= rx_ring->count;
2241 
2242 	bufsz = igc_rx_bufsz(rx_ring);
2243 
2244 	do {
2245 		if (!igc_alloc_mapped_page(rx_ring, bi))
2246 			break;
2247 
2248 		/* sync the buffer for use by the device */
2249 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
2250 						 bi->page_offset, bufsz,
2251 						 DMA_FROM_DEVICE);
2252 
2253 		/* Refresh the desc even if buffer_addrs didn't change
2254 		 * because each write-back erases this info.
2255 		 */
2256 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2257 
2258 		rx_desc++;
2259 		bi++;
2260 		i++;
2261 		if (unlikely(!i)) {
2262 			rx_desc = IGC_RX_DESC(rx_ring, 0);
2263 			bi = rx_ring->rx_buffer_info;
2264 			i -= rx_ring->count;
2265 		}
2266 
2267 		/* clear the length for the next_to_use descriptor */
2268 		rx_desc->wb.upper.length = 0;
2269 
2270 		cleaned_count--;
2271 	} while (cleaned_count);
2272 
2273 	i += rx_ring->count;
2274 
2275 	if (rx_ring->next_to_use != i) {
2276 		/* record the next descriptor to use */
2277 		rx_ring->next_to_use = i;
2278 
2279 		/* update next to alloc since we have filled the ring */
2280 		rx_ring->next_to_alloc = i;
2281 
2282 		/* Force memory writes to complete before letting h/w
2283 		 * know there are new descriptors to fetch.  (Only
2284 		 * applicable for weak-ordered memory model archs,
2285 		 * such as IA-64).
2286 		 */
2287 		wmb();
2288 		writel(i, rx_ring->tail);
2289 	}
2290 }
2291 
2292 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2293 {
2294 	union igc_adv_rx_desc *desc;
2295 	u16 i = ring->next_to_use;
2296 	struct igc_rx_buffer *bi;
2297 	dma_addr_t dma;
2298 	bool ok = true;
2299 
2300 	if (!count)
2301 		return ok;
2302 
2303 	XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff);
2304 
2305 	desc = IGC_RX_DESC(ring, i);
2306 	bi = &ring->rx_buffer_info[i];
2307 	i -= ring->count;
2308 
2309 	do {
2310 		bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2311 		if (!bi->xdp) {
2312 			ok = false;
2313 			break;
2314 		}
2315 
2316 		dma = xsk_buff_xdp_get_dma(bi->xdp);
2317 		desc->read.pkt_addr = cpu_to_le64(dma);
2318 
2319 		desc++;
2320 		bi++;
2321 		i++;
2322 		if (unlikely(!i)) {
2323 			desc = IGC_RX_DESC(ring, 0);
2324 			bi = ring->rx_buffer_info;
2325 			i -= ring->count;
2326 		}
2327 
2328 		/* Clear the length for the next_to_use descriptor. */
2329 		desc->wb.upper.length = 0;
2330 
2331 		count--;
2332 	} while (count);
2333 
2334 	i += ring->count;
2335 
2336 	if (ring->next_to_use != i) {
2337 		ring->next_to_use = i;
2338 
2339 		/* Force memory writes to complete before letting h/w
2340 		 * know there are new descriptors to fetch.  (Only
2341 		 * applicable for weak-ordered memory model archs,
2342 		 * such as IA-64).
2343 		 */
2344 		wmb();
2345 		writel(i, ring->tail);
2346 	}
2347 
2348 	return ok;
2349 }
2350 
2351 /* This function requires __netif_tx_lock is held by the caller. */
2352 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
2353 				      struct xdp_frame *xdpf)
2354 {
2355 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
2356 	u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
2357 	u16 count, index = ring->next_to_use;
2358 	struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
2359 	struct igc_tx_buffer *buffer = head;
2360 	union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
2361 	u32 olinfo_status, len = xdpf->len, cmd_type;
2362 	void *data = xdpf->data;
2363 	u16 i;
2364 
2365 	count = TXD_USE_COUNT(len);
2366 	for (i = 0; i < nr_frags; i++)
2367 		count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
2368 
2369 	if (igc_maybe_stop_tx(ring, count + 3)) {
2370 		/* this is a hard error */
2371 		return -EBUSY;
2372 	}
2373 
2374 	i = 0;
2375 	head->bytecount = xdp_get_frame_len(xdpf);
2376 	head->type = IGC_TX_BUFFER_TYPE_XDP;
2377 	head->gso_segs = 1;
2378 	head->xdpf = xdpf;
2379 
2380 	olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2381 	desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2382 
2383 	for (;;) {
2384 		dma_addr_t dma;
2385 
2386 		dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
2387 		if (dma_mapping_error(ring->dev, dma)) {
2388 			netdev_err_once(ring->netdev,
2389 					"Failed to map DMA for TX\n");
2390 			goto unmap;
2391 		}
2392 
2393 		dma_unmap_len_set(buffer, len, len);
2394 		dma_unmap_addr_set(buffer, dma, dma);
2395 
2396 		cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2397 			   IGC_ADVTXD_DCMD_IFCS | len;
2398 
2399 		desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2400 		desc->read.buffer_addr = cpu_to_le64(dma);
2401 
2402 		buffer->protocol = 0;
2403 
2404 		if (++index == ring->count)
2405 			index = 0;
2406 
2407 		if (i == nr_frags)
2408 			break;
2409 
2410 		buffer = &ring->tx_buffer_info[index];
2411 		desc = IGC_TX_DESC(ring, index);
2412 		desc->read.olinfo_status = 0;
2413 
2414 		data = skb_frag_address(&sinfo->frags[i]);
2415 		len = skb_frag_size(&sinfo->frags[i]);
2416 		i++;
2417 	}
2418 	desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
2419 
2420 	netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
2421 	/* set the timestamp */
2422 	head->time_stamp = jiffies;
2423 	/* set next_to_watch value indicating a packet is present */
2424 	head->next_to_watch = desc;
2425 	ring->next_to_use = index;
2426 
2427 	return 0;
2428 
2429 unmap:
2430 	for (;;) {
2431 		buffer = &ring->tx_buffer_info[index];
2432 		if (dma_unmap_len(buffer, len))
2433 			dma_unmap_page(ring->dev,
2434 				       dma_unmap_addr(buffer, dma),
2435 				       dma_unmap_len(buffer, len),
2436 				       DMA_TO_DEVICE);
2437 		dma_unmap_len_set(buffer, len, 0);
2438 		if (buffer == head)
2439 			break;
2440 
2441 		if (!index)
2442 			index += ring->count;
2443 		index--;
2444 	}
2445 
2446 	return -ENOMEM;
2447 }
2448 
2449 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
2450 					    int cpu)
2451 {
2452 	int index = cpu;
2453 
2454 	if (unlikely(index < 0))
2455 		index = 0;
2456 
2457 	while (index >= adapter->num_tx_queues)
2458 		index -= adapter->num_tx_queues;
2459 
2460 	return adapter->tx_ring[index];
2461 }
2462 
2463 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2464 {
2465 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2466 	int cpu = smp_processor_id();
2467 	struct netdev_queue *nq;
2468 	struct igc_ring *ring;
2469 	int res;
2470 
2471 	if (unlikely(!xdpf))
2472 		return -EFAULT;
2473 
2474 	ring = igc_xdp_get_tx_ring(adapter, cpu);
2475 	nq = txring_txq(ring);
2476 
2477 	__netif_tx_lock(nq, cpu);
2478 	/* Avoid transmit queue timeout since we share it with the slow path */
2479 	txq_trans_cond_update(nq);
2480 	res = igc_xdp_init_tx_descriptor(ring, xdpf);
2481 	__netif_tx_unlock(nq);
2482 	return res;
2483 }
2484 
2485 /* This function assumes rcu_read_lock() is held by the caller. */
2486 static int __igc_xdp_run_prog(struct igc_adapter *adapter,
2487 			      struct bpf_prog *prog,
2488 			      struct xdp_buff *xdp)
2489 {
2490 	u32 act = bpf_prog_run_xdp(prog, xdp);
2491 
2492 	switch (act) {
2493 	case XDP_PASS:
2494 		return IGC_XDP_PASS;
2495 	case XDP_TX:
2496 		if (igc_xdp_xmit_back(adapter, xdp) < 0)
2497 			goto out_failure;
2498 		return IGC_XDP_TX;
2499 	case XDP_REDIRECT:
2500 		if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2501 			goto out_failure;
2502 		return IGC_XDP_REDIRECT;
2503 		break;
2504 	default:
2505 		bpf_warn_invalid_xdp_action(adapter->netdev, prog, act);
2506 		fallthrough;
2507 	case XDP_ABORTED:
2508 out_failure:
2509 		trace_xdp_exception(adapter->netdev, prog, act);
2510 		fallthrough;
2511 	case XDP_DROP:
2512 		return IGC_XDP_CONSUMED;
2513 	}
2514 }
2515 
2516 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2517 					struct xdp_buff *xdp)
2518 {
2519 	struct bpf_prog *prog;
2520 	int res;
2521 
2522 	prog = READ_ONCE(adapter->xdp_prog);
2523 	if (!prog) {
2524 		res = IGC_XDP_PASS;
2525 		goto out;
2526 	}
2527 
2528 	res = __igc_xdp_run_prog(adapter, prog, xdp);
2529 
2530 out:
2531 	return ERR_PTR(-res);
2532 }
2533 
2534 /* This function assumes __netif_tx_lock is held by the caller. */
2535 static void igc_flush_tx_descriptors(struct igc_ring *ring)
2536 {
2537 	/* Once tail pointer is updated, hardware can fetch the descriptors
2538 	 * any time so we issue a write membar here to ensure all memory
2539 	 * writes are complete before the tail pointer is updated.
2540 	 */
2541 	wmb();
2542 	writel(ring->next_to_use, ring->tail);
2543 }
2544 
2545 static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2546 {
2547 	int cpu = smp_processor_id();
2548 	struct netdev_queue *nq;
2549 	struct igc_ring *ring;
2550 
2551 	if (status & IGC_XDP_TX) {
2552 		ring = igc_xdp_get_tx_ring(adapter, cpu);
2553 		nq = txring_txq(ring);
2554 
2555 		__netif_tx_lock(nq, cpu);
2556 		igc_flush_tx_descriptors(ring);
2557 		__netif_tx_unlock(nq);
2558 	}
2559 
2560 	if (status & IGC_XDP_REDIRECT)
2561 		xdp_do_flush();
2562 }
2563 
2564 static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2565 				unsigned int packets, unsigned int bytes)
2566 {
2567 	struct igc_ring *ring = q_vector->rx.ring;
2568 
2569 	u64_stats_update_begin(&ring->rx_syncp);
2570 	ring->rx_stats.packets += packets;
2571 	ring->rx_stats.bytes += bytes;
2572 	u64_stats_update_end(&ring->rx_syncp);
2573 
2574 	q_vector->rx.total_packets += packets;
2575 	q_vector->rx.total_bytes += bytes;
2576 }
2577 
2578 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2579 {
2580 	unsigned int total_bytes = 0, total_packets = 0;
2581 	struct igc_adapter *adapter = q_vector->adapter;
2582 	struct igc_ring *rx_ring = q_vector->rx.ring;
2583 	struct sk_buff *skb = rx_ring->skb;
2584 	u16 cleaned_count = igc_desc_unused(rx_ring);
2585 	int xdp_status = 0, rx_buffer_pgcnt;
2586 
2587 	while (likely(total_packets < budget)) {
2588 		struct igc_xdp_buff ctx = { .rx_ts = NULL };
2589 		struct igc_rx_buffer *rx_buffer;
2590 		union igc_adv_rx_desc *rx_desc;
2591 		unsigned int size, truesize;
2592 		int pkt_offset = 0;
2593 		void *pktbuf;
2594 
2595 		/* return some buffers to hardware, one at a time is too slow */
2596 		if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2597 			igc_alloc_rx_buffers(rx_ring, cleaned_count);
2598 			cleaned_count = 0;
2599 		}
2600 
2601 		rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2602 		size = le16_to_cpu(rx_desc->wb.upper.length);
2603 		if (!size)
2604 			break;
2605 
2606 		/* This memory barrier is needed to keep us from reading
2607 		 * any other fields out of the rx_desc until we know the
2608 		 * descriptor has been written back
2609 		 */
2610 		dma_rmb();
2611 
2612 		rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2613 		truesize = igc_get_rx_frame_truesize(rx_ring, size);
2614 
2615 		pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2616 
2617 		if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2618 			ctx.rx_ts = pktbuf;
2619 			pkt_offset = IGC_TS_HDR_LEN;
2620 			size -= IGC_TS_HDR_LEN;
2621 		}
2622 
2623 		if (!skb) {
2624 			xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
2625 			xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
2626 					 igc_rx_offset(rx_ring) + pkt_offset,
2627 					 size, true);
2628 			xdp_buff_clear_frags_flag(&ctx.xdp);
2629 			ctx.rx_desc = rx_desc;
2630 
2631 			skb = igc_xdp_run_prog(adapter, &ctx.xdp);
2632 		}
2633 
2634 		if (IS_ERR(skb)) {
2635 			unsigned int xdp_res = -PTR_ERR(skb);
2636 
2637 			switch (xdp_res) {
2638 			case IGC_XDP_CONSUMED:
2639 				rx_buffer->pagecnt_bias++;
2640 				break;
2641 			case IGC_XDP_TX:
2642 			case IGC_XDP_REDIRECT:
2643 				igc_rx_buffer_flip(rx_buffer, truesize);
2644 				xdp_status |= xdp_res;
2645 				break;
2646 			}
2647 
2648 			total_packets++;
2649 			total_bytes += size;
2650 		} else if (skb)
2651 			igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2652 		else if (ring_uses_build_skb(rx_ring))
2653 			skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
2654 		else
2655 			skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
2656 
2657 		/* exit if we failed to retrieve a buffer */
2658 		if (!skb) {
2659 			rx_ring->rx_stats.alloc_failed++;
2660 			rx_buffer->pagecnt_bias++;
2661 			break;
2662 		}
2663 
2664 		igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2665 		cleaned_count++;
2666 
2667 		/* fetch next buffer in frame if non-eop */
2668 		if (igc_is_non_eop(rx_ring, rx_desc))
2669 			continue;
2670 
2671 		/* verify the packet layout is correct */
2672 		if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2673 			skb = NULL;
2674 			continue;
2675 		}
2676 
2677 		/* probably a little skewed due to removing CRC */
2678 		total_bytes += skb->len;
2679 
2680 		/* populate checksum, VLAN, and protocol */
2681 		igc_process_skb_fields(rx_ring, rx_desc, skb);
2682 
2683 		napi_gro_receive(&q_vector->napi, skb);
2684 
2685 		/* reset skb pointer */
2686 		skb = NULL;
2687 
2688 		/* update budget accounting */
2689 		total_packets++;
2690 	}
2691 
2692 	if (xdp_status)
2693 		igc_finalize_xdp(adapter, xdp_status);
2694 
2695 	/* place incomplete frames back on ring for completion */
2696 	rx_ring->skb = skb;
2697 
2698 	igc_update_rx_stats(q_vector, total_packets, total_bytes);
2699 
2700 	if (cleaned_count)
2701 		igc_alloc_rx_buffers(rx_ring, cleaned_count);
2702 
2703 	return total_packets;
2704 }
2705 
2706 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2707 					    struct xdp_buff *xdp)
2708 {
2709 	unsigned int totalsize = xdp->data_end - xdp->data_meta;
2710 	unsigned int metasize = xdp->data - xdp->data_meta;
2711 	struct sk_buff *skb;
2712 
2713 	net_prefetch(xdp->data_meta);
2714 
2715 	skb = napi_alloc_skb(&ring->q_vector->napi, totalsize);
2716 	if (unlikely(!skb))
2717 		return NULL;
2718 
2719 	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
2720 	       ALIGN(totalsize, sizeof(long)));
2721 
2722 	if (metasize) {
2723 		skb_metadata_set(skb, metasize);
2724 		__skb_pull(skb, metasize);
2725 	}
2726 
2727 	return skb;
2728 }
2729 
2730 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2731 				union igc_adv_rx_desc *desc,
2732 				struct xdp_buff *xdp,
2733 				ktime_t timestamp)
2734 {
2735 	struct igc_ring *ring = q_vector->rx.ring;
2736 	struct sk_buff *skb;
2737 
2738 	skb = igc_construct_skb_zc(ring, xdp);
2739 	if (!skb) {
2740 		ring->rx_stats.alloc_failed++;
2741 		return;
2742 	}
2743 
2744 	if (timestamp)
2745 		skb_hwtstamps(skb)->hwtstamp = timestamp;
2746 
2747 	if (igc_cleanup_headers(ring, desc, skb))
2748 		return;
2749 
2750 	igc_process_skb_fields(ring, desc, skb);
2751 	napi_gro_receive(&q_vector->napi, skb);
2752 }
2753 
2754 static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp)
2755 {
2756 	/* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The
2757 	 * igc_xdp_buff shares its layout with xdp_buff_xsk and private
2758 	 * igc_xdp_buff fields fall into xdp_buff_xsk->cb
2759 	 */
2760        return (struct igc_xdp_buff *)xdp;
2761 }
2762 
2763 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2764 {
2765 	struct igc_adapter *adapter = q_vector->adapter;
2766 	struct igc_ring *ring = q_vector->rx.ring;
2767 	u16 cleaned_count = igc_desc_unused(ring);
2768 	int total_bytes = 0, total_packets = 0;
2769 	u16 ntc = ring->next_to_clean;
2770 	struct bpf_prog *prog;
2771 	bool failure = false;
2772 	int xdp_status = 0;
2773 
2774 	rcu_read_lock();
2775 
2776 	prog = READ_ONCE(adapter->xdp_prog);
2777 
2778 	while (likely(total_packets < budget)) {
2779 		union igc_adv_rx_desc *desc;
2780 		struct igc_rx_buffer *bi;
2781 		struct igc_xdp_buff *ctx;
2782 		ktime_t timestamp = 0;
2783 		unsigned int size;
2784 		int res;
2785 
2786 		desc = IGC_RX_DESC(ring, ntc);
2787 		size = le16_to_cpu(desc->wb.upper.length);
2788 		if (!size)
2789 			break;
2790 
2791 		/* This memory barrier is needed to keep us from reading
2792 		 * any other fields out of the rx_desc until we know the
2793 		 * descriptor has been written back
2794 		 */
2795 		dma_rmb();
2796 
2797 		bi = &ring->rx_buffer_info[ntc];
2798 
2799 		ctx = xsk_buff_to_igc_ctx(bi->xdp);
2800 		ctx->rx_desc = desc;
2801 
2802 		if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2803 			ctx->rx_ts = bi->xdp->data;
2804 
2805 			bi->xdp->data += IGC_TS_HDR_LEN;
2806 
2807 			/* HW timestamp has been copied into local variable. Metadata
2808 			 * length when XDP program is called should be 0.
2809 			 */
2810 			bi->xdp->data_meta += IGC_TS_HDR_LEN;
2811 			size -= IGC_TS_HDR_LEN;
2812 		}
2813 
2814 		bi->xdp->data_end = bi->xdp->data + size;
2815 		xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2816 
2817 		res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2818 		switch (res) {
2819 		case IGC_XDP_PASS:
2820 			igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
2821 			fallthrough;
2822 		case IGC_XDP_CONSUMED:
2823 			xsk_buff_free(bi->xdp);
2824 			break;
2825 		case IGC_XDP_TX:
2826 		case IGC_XDP_REDIRECT:
2827 			xdp_status |= res;
2828 			break;
2829 		}
2830 
2831 		bi->xdp = NULL;
2832 		total_bytes += size;
2833 		total_packets++;
2834 		cleaned_count++;
2835 		ntc++;
2836 		if (ntc == ring->count)
2837 			ntc = 0;
2838 	}
2839 
2840 	ring->next_to_clean = ntc;
2841 	rcu_read_unlock();
2842 
2843 	if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2844 		failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2845 
2846 	if (xdp_status)
2847 		igc_finalize_xdp(adapter, xdp_status);
2848 
2849 	igc_update_rx_stats(q_vector, total_packets, total_bytes);
2850 
2851 	if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2852 		if (failure || ring->next_to_clean == ring->next_to_use)
2853 			xsk_set_rx_need_wakeup(ring->xsk_pool);
2854 		else
2855 			xsk_clear_rx_need_wakeup(ring->xsk_pool);
2856 		return total_packets;
2857 	}
2858 
2859 	return failure ? budget : total_packets;
2860 }
2861 
2862 static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2863 				unsigned int packets, unsigned int bytes)
2864 {
2865 	struct igc_ring *ring = q_vector->tx.ring;
2866 
2867 	u64_stats_update_begin(&ring->tx_syncp);
2868 	ring->tx_stats.bytes += bytes;
2869 	ring->tx_stats.packets += packets;
2870 	u64_stats_update_end(&ring->tx_syncp);
2871 
2872 	q_vector->tx.total_bytes += bytes;
2873 	q_vector->tx.total_packets += packets;
2874 }
2875 
2876 static void igc_xdp_xmit_zc(struct igc_ring *ring)
2877 {
2878 	struct xsk_buff_pool *pool = ring->xsk_pool;
2879 	struct netdev_queue *nq = txring_txq(ring);
2880 	union igc_adv_tx_desc *tx_desc = NULL;
2881 	int cpu = smp_processor_id();
2882 	struct xdp_desc xdp_desc;
2883 	u16 budget, ntu;
2884 
2885 	if (!netif_carrier_ok(ring->netdev))
2886 		return;
2887 
2888 	__netif_tx_lock(nq, cpu);
2889 
2890 	/* Avoid transmit queue timeout since we share it with the slow path */
2891 	txq_trans_cond_update(nq);
2892 
2893 	ntu = ring->next_to_use;
2894 	budget = igc_desc_unused(ring);
2895 
2896 	while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
2897 		u32 cmd_type, olinfo_status;
2898 		struct igc_tx_buffer *bi;
2899 		dma_addr_t dma;
2900 
2901 		cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2902 			   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2903 			   xdp_desc.len;
2904 		olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
2905 
2906 		dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2907 		xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
2908 
2909 		tx_desc = IGC_TX_DESC(ring, ntu);
2910 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2911 		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2912 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
2913 
2914 		bi = &ring->tx_buffer_info[ntu];
2915 		bi->type = IGC_TX_BUFFER_TYPE_XSK;
2916 		bi->protocol = 0;
2917 		bi->bytecount = xdp_desc.len;
2918 		bi->gso_segs = 1;
2919 		bi->time_stamp = jiffies;
2920 		bi->next_to_watch = tx_desc;
2921 
2922 		netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
2923 
2924 		ntu++;
2925 		if (ntu == ring->count)
2926 			ntu = 0;
2927 	}
2928 
2929 	ring->next_to_use = ntu;
2930 	if (tx_desc) {
2931 		igc_flush_tx_descriptors(ring);
2932 		xsk_tx_release(pool);
2933 	}
2934 
2935 	__netif_tx_unlock(nq);
2936 }
2937 
2938 /**
2939  * igc_clean_tx_irq - Reclaim resources after transmit completes
2940  * @q_vector: pointer to q_vector containing needed info
2941  * @napi_budget: Used to determine if we are in netpoll
2942  *
2943  * returns true if ring is completely cleaned
2944  */
2945 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2946 {
2947 	struct igc_adapter *adapter = q_vector->adapter;
2948 	unsigned int total_bytes = 0, total_packets = 0;
2949 	unsigned int budget = q_vector->tx.work_limit;
2950 	struct igc_ring *tx_ring = q_vector->tx.ring;
2951 	unsigned int i = tx_ring->next_to_clean;
2952 	struct igc_tx_buffer *tx_buffer;
2953 	union igc_adv_tx_desc *tx_desc;
2954 	u32 xsk_frames = 0;
2955 
2956 	if (test_bit(__IGC_DOWN, &adapter->state))
2957 		return true;
2958 
2959 	tx_buffer = &tx_ring->tx_buffer_info[i];
2960 	tx_desc = IGC_TX_DESC(tx_ring, i);
2961 	i -= tx_ring->count;
2962 
2963 	do {
2964 		union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2965 
2966 		/* if next_to_watch is not set then there is no work pending */
2967 		if (!eop_desc)
2968 			break;
2969 
2970 		/* prevent any other reads prior to eop_desc */
2971 		smp_rmb();
2972 
2973 		/* if DD is not set pending work has not been completed */
2974 		if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2975 			break;
2976 
2977 		/* clear next_to_watch to prevent false hangs */
2978 		tx_buffer->next_to_watch = NULL;
2979 
2980 		/* update the statistics for this packet */
2981 		total_bytes += tx_buffer->bytecount;
2982 		total_packets += tx_buffer->gso_segs;
2983 
2984 		switch (tx_buffer->type) {
2985 		case IGC_TX_BUFFER_TYPE_XSK:
2986 			xsk_frames++;
2987 			break;
2988 		case IGC_TX_BUFFER_TYPE_XDP:
2989 			xdp_return_frame(tx_buffer->xdpf);
2990 			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2991 			break;
2992 		case IGC_TX_BUFFER_TYPE_SKB:
2993 			napi_consume_skb(tx_buffer->skb, napi_budget);
2994 			igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2995 			break;
2996 		default:
2997 			netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
2998 			break;
2999 		}
3000 
3001 		/* clear last DMA location and unmap remaining buffers */
3002 		while (tx_desc != eop_desc) {
3003 			tx_buffer++;
3004 			tx_desc++;
3005 			i++;
3006 			if (unlikely(!i)) {
3007 				i -= tx_ring->count;
3008 				tx_buffer = tx_ring->tx_buffer_info;
3009 				tx_desc = IGC_TX_DESC(tx_ring, 0);
3010 			}
3011 
3012 			/* unmap any remaining paged data */
3013 			if (dma_unmap_len(tx_buffer, len))
3014 				igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
3015 		}
3016 
3017 		/* move us one more past the eop_desc for start of next pkt */
3018 		tx_buffer++;
3019 		tx_desc++;
3020 		i++;
3021 		if (unlikely(!i)) {
3022 			i -= tx_ring->count;
3023 			tx_buffer = tx_ring->tx_buffer_info;
3024 			tx_desc = IGC_TX_DESC(tx_ring, 0);
3025 		}
3026 
3027 		/* issue prefetch for next Tx descriptor */
3028 		prefetch(tx_desc);
3029 
3030 		/* update budget accounting */
3031 		budget--;
3032 	} while (likely(budget));
3033 
3034 	netdev_tx_completed_queue(txring_txq(tx_ring),
3035 				  total_packets, total_bytes);
3036 
3037 	i += tx_ring->count;
3038 	tx_ring->next_to_clean = i;
3039 
3040 	igc_update_tx_stats(q_vector, total_packets, total_bytes);
3041 
3042 	if (tx_ring->xsk_pool) {
3043 		if (xsk_frames)
3044 			xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
3045 		if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
3046 			xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
3047 		igc_xdp_xmit_zc(tx_ring);
3048 	}
3049 
3050 	if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
3051 		struct igc_hw *hw = &adapter->hw;
3052 
3053 		/* Detect a transmit hang in hardware, this serializes the
3054 		 * check with the clearing of time_stamp and movement of i
3055 		 */
3056 		clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
3057 		if (tx_buffer->next_to_watch &&
3058 		    time_after(jiffies, tx_buffer->time_stamp +
3059 		    (adapter->tx_timeout_factor * HZ)) &&
3060 		    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
3061 		    (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) &&
3062 		    !tx_ring->oper_gate_closed) {
3063 			/* detected Tx unit hang */
3064 			netdev_err(tx_ring->netdev,
3065 				   "Detected Tx Unit Hang\n"
3066 				   "  Tx Queue             <%d>\n"
3067 				   "  TDH                  <%x>\n"
3068 				   "  TDT                  <%x>\n"
3069 				   "  next_to_use          <%x>\n"
3070 				   "  next_to_clean        <%x>\n"
3071 				   "buffer_info[next_to_clean]\n"
3072 				   "  time_stamp           <%lx>\n"
3073 				   "  next_to_watch        <%p>\n"
3074 				   "  jiffies              <%lx>\n"
3075 				   "  desc.status          <%x>\n",
3076 				   tx_ring->queue_index,
3077 				   rd32(IGC_TDH(tx_ring->reg_idx)),
3078 				   readl(tx_ring->tail),
3079 				   tx_ring->next_to_use,
3080 				   tx_ring->next_to_clean,
3081 				   tx_buffer->time_stamp,
3082 				   tx_buffer->next_to_watch,
3083 				   jiffies,
3084 				   tx_buffer->next_to_watch->wb.status);
3085 			netif_stop_subqueue(tx_ring->netdev,
3086 					    tx_ring->queue_index);
3087 
3088 			/* we are about to reset, no point in enabling stuff */
3089 			return true;
3090 		}
3091 	}
3092 
3093 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
3094 	if (unlikely(total_packets &&
3095 		     netif_carrier_ok(tx_ring->netdev) &&
3096 		     igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
3097 		/* Make sure that anybody stopping the queue after this
3098 		 * sees the new next_to_clean.
3099 		 */
3100 		smp_mb();
3101 		if (__netif_subqueue_stopped(tx_ring->netdev,
3102 					     tx_ring->queue_index) &&
3103 		    !(test_bit(__IGC_DOWN, &adapter->state))) {
3104 			netif_wake_subqueue(tx_ring->netdev,
3105 					    tx_ring->queue_index);
3106 
3107 			u64_stats_update_begin(&tx_ring->tx_syncp);
3108 			tx_ring->tx_stats.restart_queue++;
3109 			u64_stats_update_end(&tx_ring->tx_syncp);
3110 		}
3111 	}
3112 
3113 	return !!budget;
3114 }
3115 
3116 static int igc_find_mac_filter(struct igc_adapter *adapter,
3117 			       enum igc_mac_filter_type type, const u8 *addr)
3118 {
3119 	struct igc_hw *hw = &adapter->hw;
3120 	int max_entries = hw->mac.rar_entry_count;
3121 	u32 ral, rah;
3122 	int i;
3123 
3124 	for (i = 0; i < max_entries; i++) {
3125 		ral = rd32(IGC_RAL(i));
3126 		rah = rd32(IGC_RAH(i));
3127 
3128 		if (!(rah & IGC_RAH_AV))
3129 			continue;
3130 		if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
3131 			continue;
3132 		if ((rah & IGC_RAH_RAH_MASK) !=
3133 		    le16_to_cpup((__le16 *)(addr + 4)))
3134 			continue;
3135 		if (ral != le32_to_cpup((__le32 *)(addr)))
3136 			continue;
3137 
3138 		return i;
3139 	}
3140 
3141 	return -1;
3142 }
3143 
3144 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
3145 {
3146 	struct igc_hw *hw = &adapter->hw;
3147 	int max_entries = hw->mac.rar_entry_count;
3148 	u32 rah;
3149 	int i;
3150 
3151 	for (i = 0; i < max_entries; i++) {
3152 		rah = rd32(IGC_RAH(i));
3153 
3154 		if (!(rah & IGC_RAH_AV))
3155 			return i;
3156 	}
3157 
3158 	return -1;
3159 }
3160 
3161 /**
3162  * igc_add_mac_filter() - Add MAC address filter
3163  * @adapter: Pointer to adapter where the filter should be added
3164  * @type: MAC address filter type (source or destination)
3165  * @addr: MAC address
3166  * @queue: If non-negative, queue assignment feature is enabled and frames
3167  *         matching the filter are enqueued onto 'queue'. Otherwise, queue
3168  *         assignment is disabled.
3169  *
3170  * Return: 0 in case of success, negative errno code otherwise.
3171  */
3172 static int igc_add_mac_filter(struct igc_adapter *adapter,
3173 			      enum igc_mac_filter_type type, const u8 *addr,
3174 			      int queue)
3175 {
3176 	struct net_device *dev = adapter->netdev;
3177 	int index;
3178 
3179 	index = igc_find_mac_filter(adapter, type, addr);
3180 	if (index >= 0)
3181 		goto update_filter;
3182 
3183 	index = igc_get_avail_mac_filter_slot(adapter);
3184 	if (index < 0)
3185 		return -ENOSPC;
3186 
3187 	netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
3188 		   index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3189 		   addr, queue);
3190 
3191 update_filter:
3192 	igc_set_mac_filter_hw(adapter, index, type, addr, queue);
3193 	return 0;
3194 }
3195 
3196 /**
3197  * igc_del_mac_filter() - Delete MAC address filter
3198  * @adapter: Pointer to adapter where the filter should be deleted from
3199  * @type: MAC address filter type (source or destination)
3200  * @addr: MAC address
3201  */
3202 static void igc_del_mac_filter(struct igc_adapter *adapter,
3203 			       enum igc_mac_filter_type type, const u8 *addr)
3204 {
3205 	struct net_device *dev = adapter->netdev;
3206 	int index;
3207 
3208 	index = igc_find_mac_filter(adapter, type, addr);
3209 	if (index < 0)
3210 		return;
3211 
3212 	if (index == 0) {
3213 		/* If this is the default filter, we don't actually delete it.
3214 		 * We just reset to its default value i.e. disable queue
3215 		 * assignment.
3216 		 */
3217 		netdev_dbg(dev, "Disable default MAC filter queue assignment");
3218 
3219 		igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
3220 	} else {
3221 		netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
3222 			   index,
3223 			   type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3224 			   addr);
3225 
3226 		igc_clear_mac_filter_hw(adapter, index);
3227 	}
3228 }
3229 
3230 /**
3231  * igc_add_vlan_prio_filter() - Add VLAN priority filter
3232  * @adapter: Pointer to adapter where the filter should be added
3233  * @prio: VLAN priority value
3234  * @queue: Queue number which matching frames are assigned to
3235  *
3236  * Return: 0 in case of success, negative errno code otherwise.
3237  */
3238 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
3239 				    int queue)
3240 {
3241 	struct net_device *dev = adapter->netdev;
3242 	struct igc_hw *hw = &adapter->hw;
3243 	u32 vlanpqf;
3244 
3245 	vlanpqf = rd32(IGC_VLANPQF);
3246 
3247 	if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
3248 		netdev_dbg(dev, "VLAN priority filter already in use\n");
3249 		return -EEXIST;
3250 	}
3251 
3252 	vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
3253 	vlanpqf |= IGC_VLANPQF_VALID(prio);
3254 
3255 	wr32(IGC_VLANPQF, vlanpqf);
3256 
3257 	netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
3258 		   prio, queue);
3259 	return 0;
3260 }
3261 
3262 /**
3263  * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3264  * @adapter: Pointer to adapter where the filter should be deleted from
3265  * @prio: VLAN priority value
3266  */
3267 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
3268 {
3269 	struct igc_hw *hw = &adapter->hw;
3270 	u32 vlanpqf;
3271 
3272 	vlanpqf = rd32(IGC_VLANPQF);
3273 
3274 	vlanpqf &= ~IGC_VLANPQF_VALID(prio);
3275 	vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
3276 
3277 	wr32(IGC_VLANPQF, vlanpqf);
3278 
3279 	netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
3280 		   prio);
3281 }
3282 
3283 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
3284 {
3285 	struct igc_hw *hw = &adapter->hw;
3286 	int i;
3287 
3288 	for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3289 		u32 etqf = rd32(IGC_ETQF(i));
3290 
3291 		if (!(etqf & IGC_ETQF_FILTER_ENABLE))
3292 			return i;
3293 	}
3294 
3295 	return -1;
3296 }
3297 
3298 /**
3299  * igc_add_etype_filter() - Add ethertype filter
3300  * @adapter: Pointer to adapter where the filter should be added
3301  * @etype: Ethertype value
3302  * @queue: If non-negative, queue assignment feature is enabled and frames
3303  *         matching the filter are enqueued onto 'queue'. Otherwise, queue
3304  *         assignment is disabled.
3305  *
3306  * Return: 0 in case of success, negative errno code otherwise.
3307  */
3308 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
3309 				int queue)
3310 {
3311 	struct igc_hw *hw = &adapter->hw;
3312 	int index;
3313 	u32 etqf;
3314 
3315 	index = igc_get_avail_etype_filter_slot(adapter);
3316 	if (index < 0)
3317 		return -ENOSPC;
3318 
3319 	etqf = rd32(IGC_ETQF(index));
3320 
3321 	etqf &= ~IGC_ETQF_ETYPE_MASK;
3322 	etqf |= etype;
3323 
3324 	if (queue >= 0) {
3325 		etqf &= ~IGC_ETQF_QUEUE_MASK;
3326 		etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3327 		etqf |= IGC_ETQF_QUEUE_ENABLE;
3328 	}
3329 
3330 	etqf |= IGC_ETQF_FILTER_ENABLE;
3331 
3332 	wr32(IGC_ETQF(index), etqf);
3333 
3334 	netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3335 		   etype, queue);
3336 	return 0;
3337 }
3338 
3339 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
3340 {
3341 	struct igc_hw *hw = &adapter->hw;
3342 	int i;
3343 
3344 	for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3345 		u32 etqf = rd32(IGC_ETQF(i));
3346 
3347 		if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3348 			return i;
3349 	}
3350 
3351 	return -1;
3352 }
3353 
3354 /**
3355  * igc_del_etype_filter() - Delete ethertype filter
3356  * @adapter: Pointer to adapter where the filter should be deleted from
3357  * @etype: Ethertype value
3358  */
3359 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
3360 {
3361 	struct igc_hw *hw = &adapter->hw;
3362 	int index;
3363 
3364 	index = igc_find_etype_filter(adapter, etype);
3365 	if (index < 0)
3366 		return;
3367 
3368 	wr32(IGC_ETQF(index), 0);
3369 
3370 	netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3371 		   etype);
3372 }
3373 
3374 static int igc_flex_filter_select(struct igc_adapter *adapter,
3375 				  struct igc_flex_filter *input,
3376 				  u32 *fhft)
3377 {
3378 	struct igc_hw *hw = &adapter->hw;
3379 	u8 fhft_index;
3380 	u32 fhftsl;
3381 
3382 	if (input->index >= MAX_FLEX_FILTER) {
3383 		netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n");
3384 		return -EINVAL;
3385 	}
3386 
3387 	/* Indirect table select register */
3388 	fhftsl = rd32(IGC_FHFTSL);
3389 	fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
3390 	switch (input->index) {
3391 	case 0 ... 7:
3392 		fhftsl |= 0x00;
3393 		break;
3394 	case 8 ... 15:
3395 		fhftsl |= 0x01;
3396 		break;
3397 	case 16 ... 23:
3398 		fhftsl |= 0x02;
3399 		break;
3400 	case 24 ... 31:
3401 		fhftsl |= 0x03;
3402 		break;
3403 	}
3404 	wr32(IGC_FHFTSL, fhftsl);
3405 
3406 	/* Normalize index down to host table register */
3407 	fhft_index = input->index % 8;
3408 
3409 	*fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
3410 		IGC_FHFT_EXT(fhft_index - 4);
3411 
3412 	return 0;
3413 }
3414 
3415 static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
3416 				    struct igc_flex_filter *input)
3417 {
3418 	struct igc_hw *hw = &adapter->hw;
3419 	u8 *data = input->data;
3420 	u8 *mask = input->mask;
3421 	u32 queuing;
3422 	u32 fhft;
3423 	u32 wufc;
3424 	int ret;
3425 	int i;
3426 
3427 	/* Length has to be aligned to 8. Otherwise the filter will fail. Bail
3428 	 * out early to avoid surprises later.
3429 	 */
3430 	if (input->length % 8 != 0) {
3431 		netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n");
3432 		return -EINVAL;
3433 	}
3434 
3435 	/* Select corresponding flex filter register and get base for host table. */
3436 	ret = igc_flex_filter_select(adapter, input, &fhft);
3437 	if (ret)
3438 		return ret;
3439 
3440 	/* When adding a filter globally disable flex filter feature. That is
3441 	 * recommended within the datasheet.
3442 	 */
3443 	wufc = rd32(IGC_WUFC);
3444 	wufc &= ~IGC_WUFC_FLEX_HQ;
3445 	wr32(IGC_WUFC, wufc);
3446 
3447 	/* Configure filter */
3448 	queuing = input->length & IGC_FHFT_LENGTH_MASK;
3449 	queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue);
3450 	queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio);
3451 
3452 	if (input->immediate_irq)
3453 		queuing |= IGC_FHFT_IMM_INT;
3454 
3455 	if (input->drop)
3456 		queuing |= IGC_FHFT_DROP;
3457 
3458 	wr32(fhft + 0xFC, queuing);
3459 
3460 	/* Write data (128 byte) and mask (128 bit) */
3461 	for (i = 0; i < 16; ++i) {
3462 		const size_t data_idx = i * 8;
3463 		const size_t row_idx = i * 16;
3464 		u32 dw0 =
3465 			(data[data_idx + 0] << 0) |
3466 			(data[data_idx + 1] << 8) |
3467 			(data[data_idx + 2] << 16) |
3468 			(data[data_idx + 3] << 24);
3469 		u32 dw1 =
3470 			(data[data_idx + 4] << 0) |
3471 			(data[data_idx + 5] << 8) |
3472 			(data[data_idx + 6] << 16) |
3473 			(data[data_idx + 7] << 24);
3474 		u32 tmp;
3475 
3476 		/* Write row: dw0, dw1 and mask */
3477 		wr32(fhft + row_idx, dw0);
3478 		wr32(fhft + row_idx + 4, dw1);
3479 
3480 		/* mask is only valid for MASK(7, 0) */
3481 		tmp = rd32(fhft + row_idx + 8);
3482 		tmp &= ~GENMASK(7, 0);
3483 		tmp |= mask[i];
3484 		wr32(fhft + row_idx + 8, tmp);
3485 	}
3486 
3487 	/* Enable filter. */
3488 	wufc |= IGC_WUFC_FLEX_HQ;
3489 	if (input->index > 8) {
3490 		/* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
3491 		u32 wufc_ext = rd32(IGC_WUFC_EXT);
3492 
3493 		wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
3494 
3495 		wr32(IGC_WUFC_EXT, wufc_ext);
3496 	} else {
3497 		wufc |= (IGC_WUFC_FLX0 << input->index);
3498 	}
3499 	wr32(IGC_WUFC, wufc);
3500 
3501 	netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n",
3502 		   input->index);
3503 
3504 	return 0;
3505 }
3506 
3507 static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
3508 				      const void *src, unsigned int offset,
3509 				      size_t len, const void *mask)
3510 {
3511 	int i;
3512 
3513 	/* data */
3514 	memcpy(&flex->data[offset], src, len);
3515 
3516 	/* mask */
3517 	for (i = 0; i < len; ++i) {
3518 		const unsigned int idx = i + offset;
3519 		const u8 *ptr = mask;
3520 
3521 		if (mask) {
3522 			if (ptr[i] & 0xff)
3523 				flex->mask[idx / 8] |= BIT(idx % 8);
3524 
3525 			continue;
3526 		}
3527 
3528 		flex->mask[idx / 8] |= BIT(idx % 8);
3529 	}
3530 }
3531 
3532 static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
3533 {
3534 	struct igc_hw *hw = &adapter->hw;
3535 	u32 wufc, wufc_ext;
3536 	int i;
3537 
3538 	wufc = rd32(IGC_WUFC);
3539 	wufc_ext = rd32(IGC_WUFC_EXT);
3540 
3541 	for (i = 0; i < MAX_FLEX_FILTER; i++) {
3542 		if (i < 8) {
3543 			if (!(wufc & (IGC_WUFC_FLX0 << i)))
3544 				return i;
3545 		} else {
3546 			if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
3547 				return i;
3548 		}
3549 	}
3550 
3551 	return -ENOSPC;
3552 }
3553 
3554 static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
3555 {
3556 	struct igc_hw *hw = &adapter->hw;
3557 	u32 wufc, wufc_ext;
3558 
3559 	wufc = rd32(IGC_WUFC);
3560 	wufc_ext = rd32(IGC_WUFC_EXT);
3561 
3562 	if (wufc & IGC_WUFC_FILTER_MASK)
3563 		return true;
3564 
3565 	if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
3566 		return true;
3567 
3568 	return false;
3569 }
3570 
3571 static int igc_add_flex_filter(struct igc_adapter *adapter,
3572 			       struct igc_nfc_rule *rule)
3573 {
3574 	struct igc_nfc_filter *filter = &rule->filter;
3575 	unsigned int eth_offset, user_offset;
3576 	struct igc_flex_filter flex = { };
3577 	int ret, index;
3578 	bool vlan;
3579 
3580 	index = igc_find_avail_flex_filter_slot(adapter);
3581 	if (index < 0)
3582 		return -ENOSPC;
3583 
3584 	/* Construct the flex filter:
3585 	 *  -> dest_mac [6]
3586 	 *  -> src_mac [6]
3587 	 *  -> tpid [2]
3588 	 *  -> vlan tci [2]
3589 	 *  -> ether type [2]
3590 	 *  -> user data [8]
3591 	 *  -> = 26 bytes => 32 length
3592 	 */
3593 	flex.index    = index;
3594 	flex.length   = 32;
3595 	flex.rx_queue = rule->action;
3596 
3597 	vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
3598 	eth_offset = vlan ? 16 : 12;
3599 	user_offset = vlan ? 18 : 14;
3600 
3601 	/* Add destination MAC  */
3602 	if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3603 		igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
3604 					  ETH_ALEN, NULL);
3605 
3606 	/* Add source MAC */
3607 	if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3608 		igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
3609 					  ETH_ALEN, NULL);
3610 
3611 	/* Add VLAN etype */
3612 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
3613 		__be16 vlan_etype = cpu_to_be16(filter->vlan_etype);
3614 
3615 		igc_flex_filter_add_field(&flex, &vlan_etype, 12,
3616 					  sizeof(vlan_etype), NULL);
3617 	}
3618 
3619 	/* Add VLAN TCI */
3620 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
3621 		igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
3622 					  sizeof(filter->vlan_tci), NULL);
3623 
3624 	/* Add Ether type */
3625 	if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3626 		__be16 etype = cpu_to_be16(filter->etype);
3627 
3628 		igc_flex_filter_add_field(&flex, &etype, eth_offset,
3629 					  sizeof(etype), NULL);
3630 	}
3631 
3632 	/* Add user data */
3633 	if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
3634 		igc_flex_filter_add_field(&flex, &filter->user_data,
3635 					  user_offset,
3636 					  sizeof(filter->user_data),
3637 					  filter->user_mask);
3638 
3639 	/* Add it down to the hardware and enable it. */
3640 	ret = igc_write_flex_filter_ll(adapter, &flex);
3641 	if (ret)
3642 		return ret;
3643 
3644 	filter->flex_index = index;
3645 
3646 	return 0;
3647 }
3648 
3649 static void igc_del_flex_filter(struct igc_adapter *adapter,
3650 				u16 reg_index)
3651 {
3652 	struct igc_hw *hw = &adapter->hw;
3653 	u32 wufc;
3654 
3655 	/* Just disable the filter. The filter table itself is kept
3656 	 * intact. Another flex_filter_add() should override the "old" data
3657 	 * then.
3658 	 */
3659 	if (reg_index > 8) {
3660 		u32 wufc_ext = rd32(IGC_WUFC_EXT);
3661 
3662 		wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
3663 		wr32(IGC_WUFC_EXT, wufc_ext);
3664 	} else {
3665 		wufc = rd32(IGC_WUFC);
3666 
3667 		wufc &= ~(IGC_WUFC_FLX0 << reg_index);
3668 		wr32(IGC_WUFC, wufc);
3669 	}
3670 
3671 	if (igc_flex_filter_in_use(adapter))
3672 		return;
3673 
3674 	/* No filters are in use, we may disable flex filters */
3675 	wufc = rd32(IGC_WUFC);
3676 	wufc &= ~IGC_WUFC_FLEX_HQ;
3677 	wr32(IGC_WUFC, wufc);
3678 }
3679 
3680 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
3681 			       struct igc_nfc_rule *rule)
3682 {
3683 	int err;
3684 
3685 	if (rule->flex) {
3686 		return igc_add_flex_filter(adapter, rule);
3687 	}
3688 
3689 	if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3690 		err = igc_add_etype_filter(adapter, rule->filter.etype,
3691 					   rule->action);
3692 		if (err)
3693 			return err;
3694 	}
3695 
3696 	if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
3697 		err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3698 					 rule->filter.src_addr, rule->action);
3699 		if (err)
3700 			return err;
3701 	}
3702 
3703 	if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
3704 		err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3705 					 rule->filter.dst_addr, rule->action);
3706 		if (err)
3707 			return err;
3708 	}
3709 
3710 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3711 		int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
3712 
3713 		err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
3714 		if (err)
3715 			return err;
3716 	}
3717 
3718 	return 0;
3719 }
3720 
3721 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
3722 				 const struct igc_nfc_rule *rule)
3723 {
3724 	if (rule->flex) {
3725 		igc_del_flex_filter(adapter, rule->filter.flex_index);
3726 		return;
3727 	}
3728 
3729 	if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
3730 		igc_del_etype_filter(adapter, rule->filter.etype);
3731 
3732 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3733 		int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
3734 
3735 		igc_del_vlan_prio_filter(adapter, prio);
3736 	}
3737 
3738 	if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3739 		igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3740 				   rule->filter.src_addr);
3741 
3742 	if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3743 		igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3744 				   rule->filter.dst_addr);
3745 }
3746 
3747 /**
3748  * igc_get_nfc_rule() - Get NFC rule
3749  * @adapter: Pointer to adapter
3750  * @location: Rule location
3751  *
3752  * Context: Expects adapter->nfc_rule_lock to be held by caller.
3753  *
3754  * Return: Pointer to NFC rule at @location. If not found, NULL.
3755  */
3756 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
3757 				      u32 location)
3758 {
3759 	struct igc_nfc_rule *rule;
3760 
3761 	list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
3762 		if (rule->location == location)
3763 			return rule;
3764 		if (rule->location > location)
3765 			break;
3766 	}
3767 
3768 	return NULL;
3769 }
3770 
3771 /**
3772  * igc_del_nfc_rule() - Delete NFC rule
3773  * @adapter: Pointer to adapter
3774  * @rule: Pointer to rule to be deleted
3775  *
3776  * Disable NFC rule in hardware and delete it from adapter.
3777  *
3778  * Context: Expects adapter->nfc_rule_lock to be held by caller.
3779  */
3780 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3781 {
3782 	igc_disable_nfc_rule(adapter, rule);
3783 
3784 	list_del(&rule->list);
3785 	adapter->nfc_rule_count--;
3786 
3787 	kfree(rule);
3788 }
3789 
3790 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
3791 {
3792 	struct igc_nfc_rule *rule, *tmp;
3793 
3794 	mutex_lock(&adapter->nfc_rule_lock);
3795 
3796 	list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
3797 		igc_del_nfc_rule(adapter, rule);
3798 
3799 	mutex_unlock(&adapter->nfc_rule_lock);
3800 }
3801 
3802 /**
3803  * igc_add_nfc_rule() - Add NFC rule
3804  * @adapter: Pointer to adapter
3805  * @rule: Pointer to rule to be added
3806  *
3807  * Enable NFC rule in hardware and add it to adapter.
3808  *
3809  * Context: Expects adapter->nfc_rule_lock to be held by caller.
3810  *
3811  * Return: 0 on success, negative errno on failure.
3812  */
3813 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3814 {
3815 	struct igc_nfc_rule *pred, *cur;
3816 	int err;
3817 
3818 	err = igc_enable_nfc_rule(adapter, rule);
3819 	if (err)
3820 		return err;
3821 
3822 	pred = NULL;
3823 	list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
3824 		if (cur->location >= rule->location)
3825 			break;
3826 		pred = cur;
3827 	}
3828 
3829 	list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
3830 	adapter->nfc_rule_count++;
3831 	return 0;
3832 }
3833 
3834 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
3835 {
3836 	struct igc_nfc_rule *rule;
3837 
3838 	mutex_lock(&adapter->nfc_rule_lock);
3839 
3840 	list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
3841 		igc_enable_nfc_rule(adapter, rule);
3842 
3843 	mutex_unlock(&adapter->nfc_rule_lock);
3844 }
3845 
3846 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
3847 {
3848 	struct igc_adapter *adapter = netdev_priv(netdev);
3849 
3850 	return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
3851 }
3852 
3853 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
3854 {
3855 	struct igc_adapter *adapter = netdev_priv(netdev);
3856 
3857 	igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
3858 	return 0;
3859 }
3860 
3861 /**
3862  * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3863  * @netdev: network interface device structure
3864  *
3865  * The set_rx_mode entry point is called whenever the unicast or multicast
3866  * address lists or the network interface flags are updated.  This routine is
3867  * responsible for configuring the hardware for proper unicast, multicast,
3868  * promiscuous mode, and all-multi behavior.
3869  */
3870 static void igc_set_rx_mode(struct net_device *netdev)
3871 {
3872 	struct igc_adapter *adapter = netdev_priv(netdev);
3873 	struct igc_hw *hw = &adapter->hw;
3874 	u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
3875 	int count;
3876 
3877 	/* Check for Promiscuous and All Multicast modes */
3878 	if (netdev->flags & IFF_PROMISC) {
3879 		rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
3880 	} else {
3881 		if (netdev->flags & IFF_ALLMULTI) {
3882 			rctl |= IGC_RCTL_MPE;
3883 		} else {
3884 			/* Write addresses to the MTA, if the attempt fails
3885 			 * then we should just turn on promiscuous mode so
3886 			 * that we can at least receive multicast traffic
3887 			 */
3888 			count = igc_write_mc_addr_list(netdev);
3889 			if (count < 0)
3890 				rctl |= IGC_RCTL_MPE;
3891 		}
3892 	}
3893 
3894 	/* Write addresses to available RAR registers, if there is not
3895 	 * sufficient space to store all the addresses then enable
3896 	 * unicast promiscuous mode
3897 	 */
3898 	if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
3899 		rctl |= IGC_RCTL_UPE;
3900 
3901 	/* update state of unicast and multicast */
3902 	rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
3903 	wr32(IGC_RCTL, rctl);
3904 
3905 #if (PAGE_SIZE < 8192)
3906 	if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
3907 		rlpml = IGC_MAX_FRAME_BUILD_SKB;
3908 #endif
3909 	wr32(IGC_RLPML, rlpml);
3910 }
3911 
3912 /**
3913  * igc_configure - configure the hardware for RX and TX
3914  * @adapter: private board structure
3915  */
3916 static void igc_configure(struct igc_adapter *adapter)
3917 {
3918 	struct net_device *netdev = adapter->netdev;
3919 	int i = 0;
3920 
3921 	igc_get_hw_control(adapter);
3922 	igc_set_rx_mode(netdev);
3923 
3924 	igc_restore_vlan(adapter);
3925 
3926 	igc_setup_tctl(adapter);
3927 	igc_setup_mrqc(adapter);
3928 	igc_setup_rctl(adapter);
3929 
3930 	igc_set_default_mac_filter(adapter);
3931 	igc_restore_nfc_rules(adapter);
3932 
3933 	igc_configure_tx(adapter);
3934 	igc_configure_rx(adapter);
3935 
3936 	igc_rx_fifo_flush_base(&adapter->hw);
3937 
3938 	/* call igc_desc_unused which always leaves
3939 	 * at least 1 descriptor unused to make sure
3940 	 * next_to_use != next_to_clean
3941 	 */
3942 	for (i = 0; i < adapter->num_rx_queues; i++) {
3943 		struct igc_ring *ring = adapter->rx_ring[i];
3944 
3945 		if (ring->xsk_pool)
3946 			igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
3947 		else
3948 			igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
3949 	}
3950 }
3951 
3952 /**
3953  * igc_write_ivar - configure ivar for given MSI-X vector
3954  * @hw: pointer to the HW structure
3955  * @msix_vector: vector number we are allocating to a given ring
3956  * @index: row index of IVAR register to write within IVAR table
3957  * @offset: column offset of in IVAR, should be multiple of 8
3958  *
3959  * The IVAR table consists of 2 columns,
3960  * each containing an cause allocation for an Rx and Tx ring, and a
3961  * variable number of rows depending on the number of queues supported.
3962  */
3963 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
3964 			   int index, int offset)
3965 {
3966 	u32 ivar = array_rd32(IGC_IVAR0, index);
3967 
3968 	/* clear any bits that are currently set */
3969 	ivar &= ~((u32)0xFF << offset);
3970 
3971 	/* write vector and valid bit */
3972 	ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
3973 
3974 	array_wr32(IGC_IVAR0, index, ivar);
3975 }
3976 
3977 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
3978 {
3979 	struct igc_adapter *adapter = q_vector->adapter;
3980 	struct igc_hw *hw = &adapter->hw;
3981 	int rx_queue = IGC_N0_QUEUE;
3982 	int tx_queue = IGC_N0_QUEUE;
3983 
3984 	if (q_vector->rx.ring)
3985 		rx_queue = q_vector->rx.ring->reg_idx;
3986 	if (q_vector->tx.ring)
3987 		tx_queue = q_vector->tx.ring->reg_idx;
3988 
3989 	switch (hw->mac.type) {
3990 	case igc_i225:
3991 		if (rx_queue > IGC_N0_QUEUE)
3992 			igc_write_ivar(hw, msix_vector,
3993 				       rx_queue >> 1,
3994 				       (rx_queue & 0x1) << 4);
3995 		if (tx_queue > IGC_N0_QUEUE)
3996 			igc_write_ivar(hw, msix_vector,
3997 				       tx_queue >> 1,
3998 				       ((tx_queue & 0x1) << 4) + 8);
3999 		q_vector->eims_value = BIT(msix_vector);
4000 		break;
4001 	default:
4002 		WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
4003 		break;
4004 	}
4005 
4006 	/* add q_vector eims value to global eims_enable_mask */
4007 	adapter->eims_enable_mask |= q_vector->eims_value;
4008 
4009 	/* configure q_vector to set itr on first interrupt */
4010 	q_vector->set_itr = 1;
4011 }
4012 
4013 /**
4014  * igc_configure_msix - Configure MSI-X hardware
4015  * @adapter: Pointer to adapter structure
4016  *
4017  * igc_configure_msix sets up the hardware to properly
4018  * generate MSI-X interrupts.
4019  */
4020 static void igc_configure_msix(struct igc_adapter *adapter)
4021 {
4022 	struct igc_hw *hw = &adapter->hw;
4023 	int i, vector = 0;
4024 	u32 tmp;
4025 
4026 	adapter->eims_enable_mask = 0;
4027 
4028 	/* set vector for other causes, i.e. link changes */
4029 	switch (hw->mac.type) {
4030 	case igc_i225:
4031 		/* Turn on MSI-X capability first, or our settings
4032 		 * won't stick.  And it will take days to debug.
4033 		 */
4034 		wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
4035 		     IGC_GPIE_PBA | IGC_GPIE_EIAME |
4036 		     IGC_GPIE_NSICR);
4037 
4038 		/* enable msix_other interrupt */
4039 		adapter->eims_other = BIT(vector);
4040 		tmp = (vector++ | IGC_IVAR_VALID) << 8;
4041 
4042 		wr32(IGC_IVAR_MISC, tmp);
4043 		break;
4044 	default:
4045 		/* do nothing, since nothing else supports MSI-X */
4046 		break;
4047 	} /* switch (hw->mac.type) */
4048 
4049 	adapter->eims_enable_mask |= adapter->eims_other;
4050 
4051 	for (i = 0; i < adapter->num_q_vectors; i++)
4052 		igc_assign_vector(adapter->q_vector[i], vector++);
4053 
4054 	wrfl();
4055 }
4056 
4057 /**
4058  * igc_irq_enable - Enable default interrupt generation settings
4059  * @adapter: board private structure
4060  */
4061 static void igc_irq_enable(struct igc_adapter *adapter)
4062 {
4063 	struct igc_hw *hw = &adapter->hw;
4064 
4065 	if (adapter->msix_entries) {
4066 		u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
4067 		u32 regval = rd32(IGC_EIAC);
4068 
4069 		wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
4070 		regval = rd32(IGC_EIAM);
4071 		wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
4072 		wr32(IGC_EIMS, adapter->eims_enable_mask);
4073 		wr32(IGC_IMS, ims);
4074 	} else {
4075 		wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
4076 		wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
4077 	}
4078 }
4079 
4080 /**
4081  * igc_irq_disable - Mask off interrupt generation on the NIC
4082  * @adapter: board private structure
4083  */
4084 static void igc_irq_disable(struct igc_adapter *adapter)
4085 {
4086 	struct igc_hw *hw = &adapter->hw;
4087 
4088 	if (adapter->msix_entries) {
4089 		u32 regval = rd32(IGC_EIAM);
4090 
4091 		wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
4092 		wr32(IGC_EIMC, adapter->eims_enable_mask);
4093 		regval = rd32(IGC_EIAC);
4094 		wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
4095 	}
4096 
4097 	wr32(IGC_IAM, 0);
4098 	wr32(IGC_IMC, ~0);
4099 	wrfl();
4100 
4101 	if (adapter->msix_entries) {
4102 		int vector = 0, i;
4103 
4104 		synchronize_irq(adapter->msix_entries[vector++].vector);
4105 
4106 		for (i = 0; i < adapter->num_q_vectors; i++)
4107 			synchronize_irq(adapter->msix_entries[vector++].vector);
4108 	} else {
4109 		synchronize_irq(adapter->pdev->irq);
4110 	}
4111 }
4112 
4113 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
4114 			      const u32 max_rss_queues)
4115 {
4116 	/* Determine if we need to pair queues. */
4117 	/* If rss_queues > half of max_rss_queues, pair the queues in
4118 	 * order to conserve interrupts due to limited supply.
4119 	 */
4120 	if (adapter->rss_queues > (max_rss_queues / 2))
4121 		adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4122 	else
4123 		adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
4124 }
4125 
4126 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
4127 {
4128 	return IGC_MAX_RX_QUEUES;
4129 }
4130 
4131 static void igc_init_queue_configuration(struct igc_adapter *adapter)
4132 {
4133 	u32 max_rss_queues;
4134 
4135 	max_rss_queues = igc_get_max_rss_queues(adapter);
4136 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
4137 
4138 	igc_set_flag_queue_pairs(adapter, max_rss_queues);
4139 }
4140 
4141 /**
4142  * igc_reset_q_vector - Reset config for interrupt vector
4143  * @adapter: board private structure to initialize
4144  * @v_idx: Index of vector to be reset
4145  *
4146  * If NAPI is enabled it will delete any references to the
4147  * NAPI struct. This is preparation for igc_free_q_vector.
4148  */
4149 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
4150 {
4151 	struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4152 
4153 	/* if we're coming from igc_set_interrupt_capability, the vectors are
4154 	 * not yet allocated
4155 	 */
4156 	if (!q_vector)
4157 		return;
4158 
4159 	if (q_vector->tx.ring)
4160 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
4161 
4162 	if (q_vector->rx.ring)
4163 		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
4164 
4165 	netif_napi_del(&q_vector->napi);
4166 }
4167 
4168 /**
4169  * igc_free_q_vector - Free memory allocated for specific interrupt vector
4170  * @adapter: board private structure to initialize
4171  * @v_idx: Index of vector to be freed
4172  *
4173  * This function frees the memory allocated to the q_vector.
4174  */
4175 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
4176 {
4177 	struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4178 
4179 	adapter->q_vector[v_idx] = NULL;
4180 
4181 	/* igc_get_stats64() might access the rings on this vector,
4182 	 * we must wait a grace period before freeing it.
4183 	 */
4184 	if (q_vector)
4185 		kfree_rcu(q_vector, rcu);
4186 }
4187 
4188 /**
4189  * igc_free_q_vectors - Free memory allocated for interrupt vectors
4190  * @adapter: board private structure to initialize
4191  *
4192  * This function frees the memory allocated to the q_vectors.  In addition if
4193  * NAPI is enabled it will delete any references to the NAPI struct prior
4194  * to freeing the q_vector.
4195  */
4196 static void igc_free_q_vectors(struct igc_adapter *adapter)
4197 {
4198 	int v_idx = adapter->num_q_vectors;
4199 
4200 	adapter->num_tx_queues = 0;
4201 	adapter->num_rx_queues = 0;
4202 	adapter->num_q_vectors = 0;
4203 
4204 	while (v_idx--) {
4205 		igc_reset_q_vector(adapter, v_idx);
4206 		igc_free_q_vector(adapter, v_idx);
4207 	}
4208 }
4209 
4210 /**
4211  * igc_update_itr - update the dynamic ITR value based on statistics
4212  * @q_vector: pointer to q_vector
4213  * @ring_container: ring info to update the itr for
4214  *
4215  * Stores a new ITR value based on packets and byte
4216  * counts during the last interrupt.  The advantage of per interrupt
4217  * computation is faster updates and more accurate ITR for the current
4218  * traffic pattern.  Constants in this function were computed
4219  * based on theoretical maximum wire speed and thresholds were set based
4220  * on testing data as well as attempting to minimize response time
4221  * while increasing bulk throughput.
4222  * NOTE: These calculations are only valid when operating in a single-
4223  * queue environment.
4224  */
4225 static void igc_update_itr(struct igc_q_vector *q_vector,
4226 			   struct igc_ring_container *ring_container)
4227 {
4228 	unsigned int packets = ring_container->total_packets;
4229 	unsigned int bytes = ring_container->total_bytes;
4230 	u8 itrval = ring_container->itr;
4231 
4232 	/* no packets, exit with status unchanged */
4233 	if (packets == 0)
4234 		return;
4235 
4236 	switch (itrval) {
4237 	case lowest_latency:
4238 		/* handle TSO and jumbo frames */
4239 		if (bytes / packets > 8000)
4240 			itrval = bulk_latency;
4241 		else if ((packets < 5) && (bytes > 512))
4242 			itrval = low_latency;
4243 		break;
4244 	case low_latency:  /* 50 usec aka 20000 ints/s */
4245 		if (bytes > 10000) {
4246 			/* this if handles the TSO accounting */
4247 			if (bytes / packets > 8000)
4248 				itrval = bulk_latency;
4249 			else if ((packets < 10) || ((bytes / packets) > 1200))
4250 				itrval = bulk_latency;
4251 			else if ((packets > 35))
4252 				itrval = lowest_latency;
4253 		} else if (bytes / packets > 2000) {
4254 			itrval = bulk_latency;
4255 		} else if (packets <= 2 && bytes < 512) {
4256 			itrval = lowest_latency;
4257 		}
4258 		break;
4259 	case bulk_latency: /* 250 usec aka 4000 ints/s */
4260 		if (bytes > 25000) {
4261 			if (packets > 35)
4262 				itrval = low_latency;
4263 		} else if (bytes < 1500) {
4264 			itrval = low_latency;
4265 		}
4266 		break;
4267 	}
4268 
4269 	/* clear work counters since we have the values we need */
4270 	ring_container->total_bytes = 0;
4271 	ring_container->total_packets = 0;
4272 
4273 	/* write updated itr to ring container */
4274 	ring_container->itr = itrval;
4275 }
4276 
4277 static void igc_set_itr(struct igc_q_vector *q_vector)
4278 {
4279 	struct igc_adapter *adapter = q_vector->adapter;
4280 	u32 new_itr = q_vector->itr_val;
4281 	u8 current_itr = 0;
4282 
4283 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4284 	switch (adapter->link_speed) {
4285 	case SPEED_10:
4286 	case SPEED_100:
4287 		current_itr = 0;
4288 		new_itr = IGC_4K_ITR;
4289 		goto set_itr_now;
4290 	default:
4291 		break;
4292 	}
4293 
4294 	igc_update_itr(q_vector, &q_vector->tx);
4295 	igc_update_itr(q_vector, &q_vector->rx);
4296 
4297 	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
4298 
4299 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
4300 	if (current_itr == lowest_latency &&
4301 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4302 	    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4303 		current_itr = low_latency;
4304 
4305 	switch (current_itr) {
4306 	/* counts and packets in update_itr are dependent on these numbers */
4307 	case lowest_latency:
4308 		new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
4309 		break;
4310 	case low_latency:
4311 		new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
4312 		break;
4313 	case bulk_latency:
4314 		new_itr = IGC_4K_ITR;  /* 4,000 ints/sec */
4315 		break;
4316 	default:
4317 		break;
4318 	}
4319 
4320 set_itr_now:
4321 	if (new_itr != q_vector->itr_val) {
4322 		/* this attempts to bias the interrupt rate towards Bulk
4323 		 * by adding intermediate steps when interrupt rate is
4324 		 * increasing
4325 		 */
4326 		new_itr = new_itr > q_vector->itr_val ?
4327 			  max((new_itr * q_vector->itr_val) /
4328 			  (new_itr + (q_vector->itr_val >> 2)),
4329 			  new_itr) : new_itr;
4330 		/* Don't write the value here; it resets the adapter's
4331 		 * internal timer, and causes us to delay far longer than
4332 		 * we should between interrupts.  Instead, we write the ITR
4333 		 * value at the beginning of the next interrupt so the timing
4334 		 * ends up being correct.
4335 		 */
4336 		q_vector->itr_val = new_itr;
4337 		q_vector->set_itr = 1;
4338 	}
4339 }
4340 
4341 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
4342 {
4343 	int v_idx = adapter->num_q_vectors;
4344 
4345 	if (adapter->msix_entries) {
4346 		pci_disable_msix(adapter->pdev);
4347 		kfree(adapter->msix_entries);
4348 		adapter->msix_entries = NULL;
4349 	} else if (adapter->flags & IGC_FLAG_HAS_MSI) {
4350 		pci_disable_msi(adapter->pdev);
4351 	}
4352 
4353 	while (v_idx--)
4354 		igc_reset_q_vector(adapter, v_idx);
4355 }
4356 
4357 /**
4358  * igc_set_interrupt_capability - set MSI or MSI-X if supported
4359  * @adapter: Pointer to adapter structure
4360  * @msix: boolean value for MSI-X capability
4361  *
4362  * Attempt to configure interrupts using the best available
4363  * capabilities of the hardware and kernel.
4364  */
4365 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
4366 					 bool msix)
4367 {
4368 	int numvecs, i;
4369 	int err;
4370 
4371 	if (!msix)
4372 		goto msi_only;
4373 	adapter->flags |= IGC_FLAG_HAS_MSIX;
4374 
4375 	/* Number of supported queues. */
4376 	adapter->num_rx_queues = adapter->rss_queues;
4377 
4378 	adapter->num_tx_queues = adapter->rss_queues;
4379 
4380 	/* start with one vector for every Rx queue */
4381 	numvecs = adapter->num_rx_queues;
4382 
4383 	/* if Tx handler is separate add 1 for every Tx queue */
4384 	if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
4385 		numvecs += adapter->num_tx_queues;
4386 
4387 	/* store the number of vectors reserved for queues */
4388 	adapter->num_q_vectors = numvecs;
4389 
4390 	/* add 1 vector for link status interrupts */
4391 	numvecs++;
4392 
4393 	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
4394 					GFP_KERNEL);
4395 
4396 	if (!adapter->msix_entries)
4397 		return;
4398 
4399 	/* populate entry values */
4400 	for (i = 0; i < numvecs; i++)
4401 		adapter->msix_entries[i].entry = i;
4402 
4403 	err = pci_enable_msix_range(adapter->pdev,
4404 				    adapter->msix_entries,
4405 				    numvecs,
4406 				    numvecs);
4407 	if (err > 0)
4408 		return;
4409 
4410 	kfree(adapter->msix_entries);
4411 	adapter->msix_entries = NULL;
4412 
4413 	igc_reset_interrupt_capability(adapter);
4414 
4415 msi_only:
4416 	adapter->flags &= ~IGC_FLAG_HAS_MSIX;
4417 
4418 	adapter->rss_queues = 1;
4419 	adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4420 	adapter->num_rx_queues = 1;
4421 	adapter->num_tx_queues = 1;
4422 	adapter->num_q_vectors = 1;
4423 	if (!pci_enable_msi(adapter->pdev))
4424 		adapter->flags |= IGC_FLAG_HAS_MSI;
4425 }
4426 
4427 /**
4428  * igc_update_ring_itr - update the dynamic ITR value based on packet size
4429  * @q_vector: pointer to q_vector
4430  *
4431  * Stores a new ITR value based on strictly on packet size.  This
4432  * algorithm is less sophisticated than that used in igc_update_itr,
4433  * due to the difficulty of synchronizing statistics across multiple
4434  * receive rings.  The divisors and thresholds used by this function
4435  * were determined based on theoretical maximum wire speed and testing
4436  * data, in order to minimize response time while increasing bulk
4437  * throughput.
4438  * NOTE: This function is called only when operating in a multiqueue
4439  * receive environment.
4440  */
4441 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
4442 {
4443 	struct igc_adapter *adapter = q_vector->adapter;
4444 	int new_val = q_vector->itr_val;
4445 	int avg_wire_size = 0;
4446 	unsigned int packets;
4447 
4448 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
4449 	 * ints/sec - ITR timer value of 120 ticks.
4450 	 */
4451 	switch (adapter->link_speed) {
4452 	case SPEED_10:
4453 	case SPEED_100:
4454 		new_val = IGC_4K_ITR;
4455 		goto set_itr_val;
4456 	default:
4457 		break;
4458 	}
4459 
4460 	packets = q_vector->rx.total_packets;
4461 	if (packets)
4462 		avg_wire_size = q_vector->rx.total_bytes / packets;
4463 
4464 	packets = q_vector->tx.total_packets;
4465 	if (packets)
4466 		avg_wire_size = max_t(u32, avg_wire_size,
4467 				      q_vector->tx.total_bytes / packets);
4468 
4469 	/* if avg_wire_size isn't set no work was done */
4470 	if (!avg_wire_size)
4471 		goto clear_counts;
4472 
4473 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
4474 	avg_wire_size += 24;
4475 
4476 	/* Don't starve jumbo frames */
4477 	avg_wire_size = min(avg_wire_size, 3000);
4478 
4479 	/* Give a little boost to mid-size frames */
4480 	if (avg_wire_size > 300 && avg_wire_size < 1200)
4481 		new_val = avg_wire_size / 3;
4482 	else
4483 		new_val = avg_wire_size / 2;
4484 
4485 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
4486 	if (new_val < IGC_20K_ITR &&
4487 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4488 	    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4489 		new_val = IGC_20K_ITR;
4490 
4491 set_itr_val:
4492 	if (new_val != q_vector->itr_val) {
4493 		q_vector->itr_val = new_val;
4494 		q_vector->set_itr = 1;
4495 	}
4496 clear_counts:
4497 	q_vector->rx.total_bytes = 0;
4498 	q_vector->rx.total_packets = 0;
4499 	q_vector->tx.total_bytes = 0;
4500 	q_vector->tx.total_packets = 0;
4501 }
4502 
4503 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
4504 {
4505 	struct igc_adapter *adapter = q_vector->adapter;
4506 	struct igc_hw *hw = &adapter->hw;
4507 
4508 	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
4509 	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
4510 		if (adapter->num_q_vectors == 1)
4511 			igc_set_itr(q_vector);
4512 		else
4513 			igc_update_ring_itr(q_vector);
4514 	}
4515 
4516 	if (!test_bit(__IGC_DOWN, &adapter->state)) {
4517 		if (adapter->msix_entries)
4518 			wr32(IGC_EIMS, q_vector->eims_value);
4519 		else
4520 			igc_irq_enable(adapter);
4521 	}
4522 }
4523 
4524 static void igc_add_ring(struct igc_ring *ring,
4525 			 struct igc_ring_container *head)
4526 {
4527 	head->ring = ring;
4528 	head->count++;
4529 }
4530 
4531 /**
4532  * igc_cache_ring_register - Descriptor ring to register mapping
4533  * @adapter: board private structure to initialize
4534  *
4535  * Once we know the feature-set enabled for the device, we'll cache
4536  * the register offset the descriptor ring is assigned to.
4537  */
4538 static void igc_cache_ring_register(struct igc_adapter *adapter)
4539 {
4540 	int i = 0, j = 0;
4541 
4542 	switch (adapter->hw.mac.type) {
4543 	case igc_i225:
4544 	default:
4545 		for (; i < adapter->num_rx_queues; i++)
4546 			adapter->rx_ring[i]->reg_idx = i;
4547 		for (; j < adapter->num_tx_queues; j++)
4548 			adapter->tx_ring[j]->reg_idx = j;
4549 		break;
4550 	}
4551 }
4552 
4553 /**
4554  * igc_poll - NAPI Rx polling callback
4555  * @napi: napi polling structure
4556  * @budget: count of how many packets we should handle
4557  */
4558 static int igc_poll(struct napi_struct *napi, int budget)
4559 {
4560 	struct igc_q_vector *q_vector = container_of(napi,
4561 						     struct igc_q_vector,
4562 						     napi);
4563 	struct igc_ring *rx_ring = q_vector->rx.ring;
4564 	bool clean_complete = true;
4565 	int work_done = 0;
4566 
4567 	if (q_vector->tx.ring)
4568 		clean_complete = igc_clean_tx_irq(q_vector, budget);
4569 
4570 	if (rx_ring) {
4571 		int cleaned = rx_ring->xsk_pool ?
4572 			      igc_clean_rx_irq_zc(q_vector, budget) :
4573 			      igc_clean_rx_irq(q_vector, budget);
4574 
4575 		work_done += cleaned;
4576 		if (cleaned >= budget)
4577 			clean_complete = false;
4578 	}
4579 
4580 	/* If all work not completed, return budget and keep polling */
4581 	if (!clean_complete)
4582 		return budget;
4583 
4584 	/* Exit the polling mode, but don't re-enable interrupts if stack might
4585 	 * poll us due to busy-polling
4586 	 */
4587 	if (likely(napi_complete_done(napi, work_done)))
4588 		igc_ring_irq_enable(q_vector);
4589 
4590 	return min(work_done, budget - 1);
4591 }
4592 
4593 /**
4594  * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4595  * @adapter: board private structure to initialize
4596  * @v_count: q_vectors allocated on adapter, used for ring interleaving
4597  * @v_idx: index of vector in adapter struct
4598  * @txr_count: total number of Tx rings to allocate
4599  * @txr_idx: index of first Tx ring to allocate
4600  * @rxr_count: total number of Rx rings to allocate
4601  * @rxr_idx: index of first Rx ring to allocate
4602  *
4603  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
4604  */
4605 static int igc_alloc_q_vector(struct igc_adapter *adapter,
4606 			      unsigned int v_count, unsigned int v_idx,
4607 			      unsigned int txr_count, unsigned int txr_idx,
4608 			      unsigned int rxr_count, unsigned int rxr_idx)
4609 {
4610 	struct igc_q_vector *q_vector;
4611 	struct igc_ring *ring;
4612 	int ring_count;
4613 
4614 	/* igc only supports 1 Tx and/or 1 Rx queue per vector */
4615 	if (txr_count > 1 || rxr_count > 1)
4616 		return -ENOMEM;
4617 
4618 	ring_count = txr_count + rxr_count;
4619 
4620 	/* allocate q_vector and rings */
4621 	q_vector = adapter->q_vector[v_idx];
4622 	if (!q_vector)
4623 		q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
4624 				   GFP_KERNEL);
4625 	else
4626 		memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
4627 	if (!q_vector)
4628 		return -ENOMEM;
4629 
4630 	/* initialize NAPI */
4631 	netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
4632 
4633 	/* tie q_vector and adapter together */
4634 	adapter->q_vector[v_idx] = q_vector;
4635 	q_vector->adapter = adapter;
4636 
4637 	/* initialize work limits */
4638 	q_vector->tx.work_limit = adapter->tx_work_limit;
4639 
4640 	/* initialize ITR configuration */
4641 	q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
4642 	q_vector->itr_val = IGC_START_ITR;
4643 
4644 	/* initialize pointer to rings */
4645 	ring = q_vector->ring;
4646 
4647 	/* initialize ITR */
4648 	if (rxr_count) {
4649 		/* rx or rx/tx vector */
4650 		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
4651 			q_vector->itr_val = adapter->rx_itr_setting;
4652 	} else {
4653 		/* tx only vector */
4654 		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
4655 			q_vector->itr_val = adapter->tx_itr_setting;
4656 	}
4657 
4658 	if (txr_count) {
4659 		/* assign generic ring traits */
4660 		ring->dev = &adapter->pdev->dev;
4661 		ring->netdev = adapter->netdev;
4662 
4663 		/* configure backlink on ring */
4664 		ring->q_vector = q_vector;
4665 
4666 		/* update q_vector Tx values */
4667 		igc_add_ring(ring, &q_vector->tx);
4668 
4669 		/* apply Tx specific ring traits */
4670 		ring->count = adapter->tx_ring_count;
4671 		ring->queue_index = txr_idx;
4672 
4673 		/* assign ring to adapter */
4674 		adapter->tx_ring[txr_idx] = ring;
4675 
4676 		/* push pointer to next ring */
4677 		ring++;
4678 	}
4679 
4680 	if (rxr_count) {
4681 		/* assign generic ring traits */
4682 		ring->dev = &adapter->pdev->dev;
4683 		ring->netdev = adapter->netdev;
4684 
4685 		/* configure backlink on ring */
4686 		ring->q_vector = q_vector;
4687 
4688 		/* update q_vector Rx values */
4689 		igc_add_ring(ring, &q_vector->rx);
4690 
4691 		/* apply Rx specific ring traits */
4692 		ring->count = adapter->rx_ring_count;
4693 		ring->queue_index = rxr_idx;
4694 
4695 		/* assign ring to adapter */
4696 		adapter->rx_ring[rxr_idx] = ring;
4697 	}
4698 
4699 	return 0;
4700 }
4701 
4702 /**
4703  * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4704  * @adapter: board private structure to initialize
4705  *
4706  * We allocate one q_vector per queue interrupt.  If allocation fails we
4707  * return -ENOMEM.
4708  */
4709 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
4710 {
4711 	int rxr_remaining = adapter->num_rx_queues;
4712 	int txr_remaining = adapter->num_tx_queues;
4713 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
4714 	int q_vectors = adapter->num_q_vectors;
4715 	int err;
4716 
4717 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
4718 		for (; rxr_remaining; v_idx++) {
4719 			err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4720 						 0, 0, 1, rxr_idx);
4721 
4722 			if (err)
4723 				goto err_out;
4724 
4725 			/* update counts and index */
4726 			rxr_remaining--;
4727 			rxr_idx++;
4728 		}
4729 	}
4730 
4731 	for (; v_idx < q_vectors; v_idx++) {
4732 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
4733 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
4734 
4735 		err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4736 					 tqpv, txr_idx, rqpv, rxr_idx);
4737 
4738 		if (err)
4739 			goto err_out;
4740 
4741 		/* update counts and index */
4742 		rxr_remaining -= rqpv;
4743 		txr_remaining -= tqpv;
4744 		rxr_idx++;
4745 		txr_idx++;
4746 	}
4747 
4748 	return 0;
4749 
4750 err_out:
4751 	adapter->num_tx_queues = 0;
4752 	adapter->num_rx_queues = 0;
4753 	adapter->num_q_vectors = 0;
4754 
4755 	while (v_idx--)
4756 		igc_free_q_vector(adapter, v_idx);
4757 
4758 	return -ENOMEM;
4759 }
4760 
4761 /**
4762  * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4763  * @adapter: Pointer to adapter structure
4764  * @msix: boolean for MSI-X capability
4765  *
4766  * This function initializes the interrupts and allocates all of the queues.
4767  */
4768 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
4769 {
4770 	struct net_device *dev = adapter->netdev;
4771 	int err = 0;
4772 
4773 	igc_set_interrupt_capability(adapter, msix);
4774 
4775 	err = igc_alloc_q_vectors(adapter);
4776 	if (err) {
4777 		netdev_err(dev, "Unable to allocate memory for vectors\n");
4778 		goto err_alloc_q_vectors;
4779 	}
4780 
4781 	igc_cache_ring_register(adapter);
4782 
4783 	return 0;
4784 
4785 err_alloc_q_vectors:
4786 	igc_reset_interrupt_capability(adapter);
4787 	return err;
4788 }
4789 
4790 /**
4791  * igc_sw_init - Initialize general software structures (struct igc_adapter)
4792  * @adapter: board private structure to initialize
4793  *
4794  * igc_sw_init initializes the Adapter private data structure.
4795  * Fields are initialized based on PCI device information and
4796  * OS network device settings (MTU size).
4797  */
4798 static int igc_sw_init(struct igc_adapter *adapter)
4799 {
4800 	struct net_device *netdev = adapter->netdev;
4801 	struct pci_dev *pdev = adapter->pdev;
4802 	struct igc_hw *hw = &adapter->hw;
4803 
4804 	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4805 
4806 	/* set default ring sizes */
4807 	adapter->tx_ring_count = IGC_DEFAULT_TXD;
4808 	adapter->rx_ring_count = IGC_DEFAULT_RXD;
4809 
4810 	/* set default ITR values */
4811 	adapter->rx_itr_setting = IGC_DEFAULT_ITR;
4812 	adapter->tx_itr_setting = IGC_DEFAULT_ITR;
4813 
4814 	/* set default work limits */
4815 	adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
4816 
4817 	/* adjust max frame to be at least the size of a standard frame */
4818 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
4819 				VLAN_HLEN;
4820 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4821 
4822 	mutex_init(&adapter->nfc_rule_lock);
4823 	INIT_LIST_HEAD(&adapter->nfc_rule_list);
4824 	adapter->nfc_rule_count = 0;
4825 
4826 	spin_lock_init(&adapter->stats64_lock);
4827 	spin_lock_init(&adapter->qbv_tx_lock);
4828 	/* Assume MSI-X interrupts, will be checked during IRQ allocation */
4829 	adapter->flags |= IGC_FLAG_HAS_MSIX;
4830 
4831 	igc_init_queue_configuration(adapter);
4832 
4833 	/* This call may decrease the number of queues */
4834 	if (igc_init_interrupt_scheme(adapter, true)) {
4835 		netdev_err(netdev, "Unable to allocate memory for queues\n");
4836 		return -ENOMEM;
4837 	}
4838 
4839 	/* Explicitly disable IRQ since the NIC can be in any state. */
4840 	igc_irq_disable(adapter);
4841 
4842 	set_bit(__IGC_DOWN, &adapter->state);
4843 
4844 	return 0;
4845 }
4846 
4847 /**
4848  * igc_up - Open the interface and prepare it to handle traffic
4849  * @adapter: board private structure
4850  */
4851 void igc_up(struct igc_adapter *adapter)
4852 {
4853 	struct igc_hw *hw = &adapter->hw;
4854 	int i = 0;
4855 
4856 	/* hardware has been reset, we need to reload some things */
4857 	igc_configure(adapter);
4858 
4859 	clear_bit(__IGC_DOWN, &adapter->state);
4860 
4861 	for (i = 0; i < adapter->num_q_vectors; i++)
4862 		napi_enable(&adapter->q_vector[i]->napi);
4863 
4864 	if (adapter->msix_entries)
4865 		igc_configure_msix(adapter);
4866 	else
4867 		igc_assign_vector(adapter->q_vector[0], 0);
4868 
4869 	/* Clear any pending interrupts. */
4870 	rd32(IGC_ICR);
4871 	igc_irq_enable(adapter);
4872 
4873 	netif_tx_start_all_queues(adapter->netdev);
4874 
4875 	/* start the watchdog. */
4876 	hw->mac.get_link_status = true;
4877 	schedule_work(&adapter->watchdog_task);
4878 }
4879 
4880 /**
4881  * igc_update_stats - Update the board statistics counters
4882  * @adapter: board private structure
4883  */
4884 void igc_update_stats(struct igc_adapter *adapter)
4885 {
4886 	struct rtnl_link_stats64 *net_stats = &adapter->stats64;
4887 	struct pci_dev *pdev = adapter->pdev;
4888 	struct igc_hw *hw = &adapter->hw;
4889 	u64 _bytes, _packets;
4890 	u64 bytes, packets;
4891 	unsigned int start;
4892 	u32 mpc;
4893 	int i;
4894 
4895 	/* Prevent stats update while adapter is being reset, or if the pci
4896 	 * connection is down.
4897 	 */
4898 	if (adapter->link_speed == 0)
4899 		return;
4900 	if (pci_channel_offline(pdev))
4901 		return;
4902 
4903 	packets = 0;
4904 	bytes = 0;
4905 
4906 	rcu_read_lock();
4907 	for (i = 0; i < adapter->num_rx_queues; i++) {
4908 		struct igc_ring *ring = adapter->rx_ring[i];
4909 		u32 rqdpc = rd32(IGC_RQDPC(i));
4910 
4911 		if (hw->mac.type >= igc_i225)
4912 			wr32(IGC_RQDPC(i), 0);
4913 
4914 		if (rqdpc) {
4915 			ring->rx_stats.drops += rqdpc;
4916 			net_stats->rx_fifo_errors += rqdpc;
4917 		}
4918 
4919 		do {
4920 			start = u64_stats_fetch_begin(&ring->rx_syncp);
4921 			_bytes = ring->rx_stats.bytes;
4922 			_packets = ring->rx_stats.packets;
4923 		} while (u64_stats_fetch_retry(&ring->rx_syncp, start));
4924 		bytes += _bytes;
4925 		packets += _packets;
4926 	}
4927 
4928 	net_stats->rx_bytes = bytes;
4929 	net_stats->rx_packets = packets;
4930 
4931 	packets = 0;
4932 	bytes = 0;
4933 	for (i = 0; i < adapter->num_tx_queues; i++) {
4934 		struct igc_ring *ring = adapter->tx_ring[i];
4935 
4936 		do {
4937 			start = u64_stats_fetch_begin(&ring->tx_syncp);
4938 			_bytes = ring->tx_stats.bytes;
4939 			_packets = ring->tx_stats.packets;
4940 		} while (u64_stats_fetch_retry(&ring->tx_syncp, start));
4941 		bytes += _bytes;
4942 		packets += _packets;
4943 	}
4944 	net_stats->tx_bytes = bytes;
4945 	net_stats->tx_packets = packets;
4946 	rcu_read_unlock();
4947 
4948 	/* read stats registers */
4949 	adapter->stats.crcerrs += rd32(IGC_CRCERRS);
4950 	adapter->stats.gprc += rd32(IGC_GPRC);
4951 	adapter->stats.gorc += rd32(IGC_GORCL);
4952 	rd32(IGC_GORCH); /* clear GORCL */
4953 	adapter->stats.bprc += rd32(IGC_BPRC);
4954 	adapter->stats.mprc += rd32(IGC_MPRC);
4955 	adapter->stats.roc += rd32(IGC_ROC);
4956 
4957 	adapter->stats.prc64 += rd32(IGC_PRC64);
4958 	adapter->stats.prc127 += rd32(IGC_PRC127);
4959 	adapter->stats.prc255 += rd32(IGC_PRC255);
4960 	adapter->stats.prc511 += rd32(IGC_PRC511);
4961 	adapter->stats.prc1023 += rd32(IGC_PRC1023);
4962 	adapter->stats.prc1522 += rd32(IGC_PRC1522);
4963 	adapter->stats.tlpic += rd32(IGC_TLPIC);
4964 	adapter->stats.rlpic += rd32(IGC_RLPIC);
4965 	adapter->stats.hgptc += rd32(IGC_HGPTC);
4966 
4967 	mpc = rd32(IGC_MPC);
4968 	adapter->stats.mpc += mpc;
4969 	net_stats->rx_fifo_errors += mpc;
4970 	adapter->stats.scc += rd32(IGC_SCC);
4971 	adapter->stats.ecol += rd32(IGC_ECOL);
4972 	adapter->stats.mcc += rd32(IGC_MCC);
4973 	adapter->stats.latecol += rd32(IGC_LATECOL);
4974 	adapter->stats.dc += rd32(IGC_DC);
4975 	adapter->stats.rlec += rd32(IGC_RLEC);
4976 	adapter->stats.xonrxc += rd32(IGC_XONRXC);
4977 	adapter->stats.xontxc += rd32(IGC_XONTXC);
4978 	adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
4979 	adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
4980 	adapter->stats.fcruc += rd32(IGC_FCRUC);
4981 	adapter->stats.gptc += rd32(IGC_GPTC);
4982 	adapter->stats.gotc += rd32(IGC_GOTCL);
4983 	rd32(IGC_GOTCH); /* clear GOTCL */
4984 	adapter->stats.rnbc += rd32(IGC_RNBC);
4985 	adapter->stats.ruc += rd32(IGC_RUC);
4986 	adapter->stats.rfc += rd32(IGC_RFC);
4987 	adapter->stats.rjc += rd32(IGC_RJC);
4988 	adapter->stats.tor += rd32(IGC_TORH);
4989 	adapter->stats.tot += rd32(IGC_TOTH);
4990 	adapter->stats.tpr += rd32(IGC_TPR);
4991 
4992 	adapter->stats.ptc64 += rd32(IGC_PTC64);
4993 	adapter->stats.ptc127 += rd32(IGC_PTC127);
4994 	adapter->stats.ptc255 += rd32(IGC_PTC255);
4995 	adapter->stats.ptc511 += rd32(IGC_PTC511);
4996 	adapter->stats.ptc1023 += rd32(IGC_PTC1023);
4997 	adapter->stats.ptc1522 += rd32(IGC_PTC1522);
4998 
4999 	adapter->stats.mptc += rd32(IGC_MPTC);
5000 	adapter->stats.bptc += rd32(IGC_BPTC);
5001 
5002 	adapter->stats.tpt += rd32(IGC_TPT);
5003 	adapter->stats.colc += rd32(IGC_COLC);
5004 	adapter->stats.colc += rd32(IGC_RERC);
5005 
5006 	adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
5007 
5008 	adapter->stats.tsctc += rd32(IGC_TSCTC);
5009 
5010 	adapter->stats.iac += rd32(IGC_IAC);
5011 
5012 	/* Fill out the OS statistics structure */
5013 	net_stats->multicast = adapter->stats.mprc;
5014 	net_stats->collisions = adapter->stats.colc;
5015 
5016 	/* Rx Errors */
5017 
5018 	/* RLEC on some newer hardware can be incorrect so build
5019 	 * our own version based on RUC and ROC
5020 	 */
5021 	net_stats->rx_errors = adapter->stats.rxerrc +
5022 		adapter->stats.crcerrs + adapter->stats.algnerrc +
5023 		adapter->stats.ruc + adapter->stats.roc +
5024 		adapter->stats.cexterr;
5025 	net_stats->rx_length_errors = adapter->stats.ruc +
5026 				      adapter->stats.roc;
5027 	net_stats->rx_crc_errors = adapter->stats.crcerrs;
5028 	net_stats->rx_frame_errors = adapter->stats.algnerrc;
5029 	net_stats->rx_missed_errors = adapter->stats.mpc;
5030 
5031 	/* Tx Errors */
5032 	net_stats->tx_errors = adapter->stats.ecol +
5033 			       adapter->stats.latecol;
5034 	net_stats->tx_aborted_errors = adapter->stats.ecol;
5035 	net_stats->tx_window_errors = adapter->stats.latecol;
5036 	net_stats->tx_carrier_errors = adapter->stats.tncrs;
5037 
5038 	/* Tx Dropped */
5039 	net_stats->tx_dropped = adapter->stats.txdrop;
5040 
5041 	/* Management Stats */
5042 	adapter->stats.mgptc += rd32(IGC_MGTPTC);
5043 	adapter->stats.mgprc += rd32(IGC_MGTPRC);
5044 	adapter->stats.mgpdc += rd32(IGC_MGTPDC);
5045 }
5046 
5047 /**
5048  * igc_down - Close the interface
5049  * @adapter: board private structure
5050  */
5051 void igc_down(struct igc_adapter *adapter)
5052 {
5053 	struct net_device *netdev = adapter->netdev;
5054 	struct igc_hw *hw = &adapter->hw;
5055 	u32 tctl, rctl;
5056 	int i = 0;
5057 
5058 	set_bit(__IGC_DOWN, &adapter->state);
5059 
5060 	igc_ptp_suspend(adapter);
5061 
5062 	if (pci_device_is_present(adapter->pdev)) {
5063 		/* disable receives in the hardware */
5064 		rctl = rd32(IGC_RCTL);
5065 		wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
5066 		/* flush and sleep below */
5067 	}
5068 	/* set trans_start so we don't get spurious watchdogs during reset */
5069 	netif_trans_update(netdev);
5070 
5071 	netif_carrier_off(netdev);
5072 	netif_tx_stop_all_queues(netdev);
5073 
5074 	if (pci_device_is_present(adapter->pdev)) {
5075 		/* disable transmits in the hardware */
5076 		tctl = rd32(IGC_TCTL);
5077 		tctl &= ~IGC_TCTL_EN;
5078 		wr32(IGC_TCTL, tctl);
5079 		/* flush both disables and wait for them to finish */
5080 		wrfl();
5081 		usleep_range(10000, 20000);
5082 
5083 		igc_irq_disable(adapter);
5084 	}
5085 
5086 	adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5087 
5088 	for (i = 0; i < adapter->num_q_vectors; i++) {
5089 		if (adapter->q_vector[i]) {
5090 			napi_synchronize(&adapter->q_vector[i]->napi);
5091 			napi_disable(&adapter->q_vector[i]->napi);
5092 		}
5093 	}
5094 
5095 	del_timer_sync(&adapter->watchdog_timer);
5096 	del_timer_sync(&adapter->phy_info_timer);
5097 
5098 	/* record the stats before reset*/
5099 	spin_lock(&adapter->stats64_lock);
5100 	igc_update_stats(adapter);
5101 	spin_unlock(&adapter->stats64_lock);
5102 
5103 	adapter->link_speed = 0;
5104 	adapter->link_duplex = 0;
5105 
5106 	if (!pci_channel_offline(adapter->pdev))
5107 		igc_reset(adapter);
5108 
5109 	/* clear VLAN promisc flag so VFTA will be updated if necessary */
5110 	adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
5111 
5112 	igc_disable_all_tx_rings_hw(adapter);
5113 	igc_clean_all_tx_rings(adapter);
5114 	igc_clean_all_rx_rings(adapter);
5115 }
5116 
5117 void igc_reinit_locked(struct igc_adapter *adapter)
5118 {
5119 	while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5120 		usleep_range(1000, 2000);
5121 	igc_down(adapter);
5122 	igc_up(adapter);
5123 	clear_bit(__IGC_RESETTING, &adapter->state);
5124 }
5125 
5126 static void igc_reset_task(struct work_struct *work)
5127 {
5128 	struct igc_adapter *adapter;
5129 
5130 	adapter = container_of(work, struct igc_adapter, reset_task);
5131 
5132 	rtnl_lock();
5133 	/* If we're already down or resetting, just bail */
5134 	if (test_bit(__IGC_DOWN, &adapter->state) ||
5135 	    test_bit(__IGC_RESETTING, &adapter->state)) {
5136 		rtnl_unlock();
5137 		return;
5138 	}
5139 
5140 	igc_rings_dump(adapter);
5141 	igc_regs_dump(adapter);
5142 	netdev_err(adapter->netdev, "Reset adapter\n");
5143 	igc_reinit_locked(adapter);
5144 	rtnl_unlock();
5145 }
5146 
5147 /**
5148  * igc_change_mtu - Change the Maximum Transfer Unit
5149  * @netdev: network interface device structure
5150  * @new_mtu: new value for maximum frame size
5151  *
5152  * Returns 0 on success, negative on failure
5153  */
5154 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
5155 {
5156 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5157 	struct igc_adapter *adapter = netdev_priv(netdev);
5158 
5159 	if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
5160 		netdev_dbg(netdev, "Jumbo frames not supported with XDP");
5161 		return -EINVAL;
5162 	}
5163 
5164 	/* adjust max frame to be at least the size of a standard frame */
5165 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5166 		max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5167 
5168 	while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5169 		usleep_range(1000, 2000);
5170 
5171 	/* igc_down has a dependency on max_frame_size */
5172 	adapter->max_frame_size = max_frame;
5173 
5174 	if (netif_running(netdev))
5175 		igc_down(adapter);
5176 
5177 	netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5178 	netdev->mtu = new_mtu;
5179 
5180 	if (netif_running(netdev))
5181 		igc_up(adapter);
5182 	else
5183 		igc_reset(adapter);
5184 
5185 	clear_bit(__IGC_RESETTING, &adapter->state);
5186 
5187 	return 0;
5188 }
5189 
5190 /**
5191  * igc_tx_timeout - Respond to a Tx Hang
5192  * @netdev: network interface device structure
5193  * @txqueue: queue number that timed out
5194  **/
5195 static void igc_tx_timeout(struct net_device *netdev,
5196 			   unsigned int __always_unused txqueue)
5197 {
5198 	struct igc_adapter *adapter = netdev_priv(netdev);
5199 	struct igc_hw *hw = &adapter->hw;
5200 
5201 	/* Do the reset outside of interrupt context */
5202 	adapter->tx_timeout_count++;
5203 	schedule_work(&adapter->reset_task);
5204 	wr32(IGC_EICS,
5205 	     (adapter->eims_enable_mask & ~adapter->eims_other));
5206 }
5207 
5208 /**
5209  * igc_get_stats64 - Get System Network Statistics
5210  * @netdev: network interface device structure
5211  * @stats: rtnl_link_stats64 pointer
5212  *
5213  * Returns the address of the device statistics structure.
5214  * The statistics are updated here and also from the timer callback.
5215  */
5216 static void igc_get_stats64(struct net_device *netdev,
5217 			    struct rtnl_link_stats64 *stats)
5218 {
5219 	struct igc_adapter *adapter = netdev_priv(netdev);
5220 
5221 	spin_lock(&adapter->stats64_lock);
5222 	if (!test_bit(__IGC_RESETTING, &adapter->state))
5223 		igc_update_stats(adapter);
5224 	memcpy(stats, &adapter->stats64, sizeof(*stats));
5225 	spin_unlock(&adapter->stats64_lock);
5226 }
5227 
5228 static netdev_features_t igc_fix_features(struct net_device *netdev,
5229 					  netdev_features_t features)
5230 {
5231 	/* Since there is no support for separate Rx/Tx vlan accel
5232 	 * enable/disable make sure Tx flag is always in same state as Rx.
5233 	 */
5234 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5235 		features |= NETIF_F_HW_VLAN_CTAG_TX;
5236 	else
5237 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
5238 
5239 	return features;
5240 }
5241 
5242 static int igc_set_features(struct net_device *netdev,
5243 			    netdev_features_t features)
5244 {
5245 	netdev_features_t changed = netdev->features ^ features;
5246 	struct igc_adapter *adapter = netdev_priv(netdev);
5247 
5248 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
5249 		igc_vlan_mode(netdev, features);
5250 
5251 	/* Add VLAN support */
5252 	if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
5253 		return 0;
5254 
5255 	if (!(features & NETIF_F_NTUPLE))
5256 		igc_flush_nfc_rules(adapter);
5257 
5258 	netdev->features = features;
5259 
5260 	if (netif_running(netdev))
5261 		igc_reinit_locked(adapter);
5262 	else
5263 		igc_reset(adapter);
5264 
5265 	return 1;
5266 }
5267 
5268 static netdev_features_t
5269 igc_features_check(struct sk_buff *skb, struct net_device *dev,
5270 		   netdev_features_t features)
5271 {
5272 	unsigned int network_hdr_len, mac_hdr_len;
5273 
5274 	/* Make certain the headers can be described by a context descriptor */
5275 	mac_hdr_len = skb_network_offset(skb);
5276 	if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
5277 		return features & ~(NETIF_F_HW_CSUM |
5278 				    NETIF_F_SCTP_CRC |
5279 				    NETIF_F_HW_VLAN_CTAG_TX |
5280 				    NETIF_F_TSO |
5281 				    NETIF_F_TSO6);
5282 
5283 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
5284 	if (unlikely(network_hdr_len >  IGC_MAX_NETWORK_HDR_LEN))
5285 		return features & ~(NETIF_F_HW_CSUM |
5286 				    NETIF_F_SCTP_CRC |
5287 				    NETIF_F_TSO |
5288 				    NETIF_F_TSO6);
5289 
5290 	/* We can only support IPv4 TSO in tunnels if we can mangle the
5291 	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
5292 	 */
5293 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
5294 		features &= ~NETIF_F_TSO;
5295 
5296 	return features;
5297 }
5298 
5299 static void igc_tsync_interrupt(struct igc_adapter *adapter)
5300 {
5301 	struct igc_hw *hw = &adapter->hw;
5302 	u32 tsauxc, sec, nsec, tsicr;
5303 	struct ptp_clock_event event;
5304 	struct timespec64 ts;
5305 
5306 	tsicr = rd32(IGC_TSICR);
5307 
5308 	if (tsicr & IGC_TSICR_SYS_WRAP) {
5309 		event.type = PTP_CLOCK_PPS;
5310 		if (adapter->ptp_caps.pps)
5311 			ptp_clock_event(adapter->ptp_clock, &event);
5312 	}
5313 
5314 	if (tsicr & IGC_TSICR_TXTS) {
5315 		/* retrieve hardware timestamp */
5316 		igc_ptp_tx_tstamp_event(adapter);
5317 	}
5318 
5319 	if (tsicr & IGC_TSICR_TT0) {
5320 		spin_lock(&adapter->tmreg_lock);
5321 		ts = timespec64_add(adapter->perout[0].start,
5322 				    adapter->perout[0].period);
5323 		wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5324 		wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
5325 		tsauxc = rd32(IGC_TSAUXC);
5326 		tsauxc |= IGC_TSAUXC_EN_TT0;
5327 		wr32(IGC_TSAUXC, tsauxc);
5328 		adapter->perout[0].start = ts;
5329 		spin_unlock(&adapter->tmreg_lock);
5330 	}
5331 
5332 	if (tsicr & IGC_TSICR_TT1) {
5333 		spin_lock(&adapter->tmreg_lock);
5334 		ts = timespec64_add(adapter->perout[1].start,
5335 				    adapter->perout[1].period);
5336 		wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5337 		wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
5338 		tsauxc = rd32(IGC_TSAUXC);
5339 		tsauxc |= IGC_TSAUXC_EN_TT1;
5340 		wr32(IGC_TSAUXC, tsauxc);
5341 		adapter->perout[1].start = ts;
5342 		spin_unlock(&adapter->tmreg_lock);
5343 	}
5344 
5345 	if (tsicr & IGC_TSICR_AUTT0) {
5346 		nsec = rd32(IGC_AUXSTMPL0);
5347 		sec  = rd32(IGC_AUXSTMPH0);
5348 		event.type = PTP_CLOCK_EXTTS;
5349 		event.index = 0;
5350 		event.timestamp = sec * NSEC_PER_SEC + nsec;
5351 		ptp_clock_event(adapter->ptp_clock, &event);
5352 	}
5353 
5354 	if (tsicr & IGC_TSICR_AUTT1) {
5355 		nsec = rd32(IGC_AUXSTMPL1);
5356 		sec  = rd32(IGC_AUXSTMPH1);
5357 		event.type = PTP_CLOCK_EXTTS;
5358 		event.index = 1;
5359 		event.timestamp = sec * NSEC_PER_SEC + nsec;
5360 		ptp_clock_event(adapter->ptp_clock, &event);
5361 	}
5362 }
5363 
5364 /**
5365  * igc_msix_other - msix other interrupt handler
5366  * @irq: interrupt number
5367  * @data: pointer to a q_vector
5368  */
5369 static irqreturn_t igc_msix_other(int irq, void *data)
5370 {
5371 	struct igc_adapter *adapter = data;
5372 	struct igc_hw *hw = &adapter->hw;
5373 	u32 icr = rd32(IGC_ICR);
5374 
5375 	/* reading ICR causes bit 31 of EICR to be cleared */
5376 	if (icr & IGC_ICR_DRSTA)
5377 		schedule_work(&adapter->reset_task);
5378 
5379 	if (icr & IGC_ICR_DOUTSYNC) {
5380 		/* HW is reporting DMA is out of sync */
5381 		adapter->stats.doosync++;
5382 	}
5383 
5384 	if (icr & IGC_ICR_LSC) {
5385 		hw->mac.get_link_status = true;
5386 		/* guard against interrupt when we're going down */
5387 		if (!test_bit(__IGC_DOWN, &adapter->state))
5388 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
5389 	}
5390 
5391 	if (icr & IGC_ICR_TS)
5392 		igc_tsync_interrupt(adapter);
5393 
5394 	wr32(IGC_EIMS, adapter->eims_other);
5395 
5396 	return IRQ_HANDLED;
5397 }
5398 
5399 static void igc_write_itr(struct igc_q_vector *q_vector)
5400 {
5401 	u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
5402 
5403 	if (!q_vector->set_itr)
5404 		return;
5405 
5406 	if (!itr_val)
5407 		itr_val = IGC_ITR_VAL_MASK;
5408 
5409 	itr_val |= IGC_EITR_CNT_IGNR;
5410 
5411 	writel(itr_val, q_vector->itr_register);
5412 	q_vector->set_itr = 0;
5413 }
5414 
5415 static irqreturn_t igc_msix_ring(int irq, void *data)
5416 {
5417 	struct igc_q_vector *q_vector = data;
5418 
5419 	/* Write the ITR value calculated from the previous interrupt. */
5420 	igc_write_itr(q_vector);
5421 
5422 	napi_schedule(&q_vector->napi);
5423 
5424 	return IRQ_HANDLED;
5425 }
5426 
5427 /**
5428  * igc_request_msix - Initialize MSI-X interrupts
5429  * @adapter: Pointer to adapter structure
5430  *
5431  * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5432  * kernel.
5433  */
5434 static int igc_request_msix(struct igc_adapter *adapter)
5435 {
5436 	unsigned int num_q_vectors = adapter->num_q_vectors;
5437 	int i = 0, err = 0, vector = 0, free_vector = 0;
5438 	struct net_device *netdev = adapter->netdev;
5439 
5440 	err = request_irq(adapter->msix_entries[vector].vector,
5441 			  &igc_msix_other, 0, netdev->name, adapter);
5442 	if (err)
5443 		goto err_out;
5444 
5445 	if (num_q_vectors > MAX_Q_VECTORS) {
5446 		num_q_vectors = MAX_Q_VECTORS;
5447 		dev_warn(&adapter->pdev->dev,
5448 			 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
5449 			 adapter->num_q_vectors, MAX_Q_VECTORS);
5450 	}
5451 	for (i = 0; i < num_q_vectors; i++) {
5452 		struct igc_q_vector *q_vector = adapter->q_vector[i];
5453 
5454 		vector++;
5455 
5456 		q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
5457 
5458 		if (q_vector->rx.ring && q_vector->tx.ring)
5459 			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
5460 				q_vector->rx.ring->queue_index);
5461 		else if (q_vector->tx.ring)
5462 			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
5463 				q_vector->tx.ring->queue_index);
5464 		else if (q_vector->rx.ring)
5465 			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
5466 				q_vector->rx.ring->queue_index);
5467 		else
5468 			sprintf(q_vector->name, "%s-unused", netdev->name);
5469 
5470 		err = request_irq(adapter->msix_entries[vector].vector,
5471 				  igc_msix_ring, 0, q_vector->name,
5472 				  q_vector);
5473 		if (err)
5474 			goto err_free;
5475 	}
5476 
5477 	igc_configure_msix(adapter);
5478 	return 0;
5479 
5480 err_free:
5481 	/* free already assigned IRQs */
5482 	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
5483 
5484 	vector--;
5485 	for (i = 0; i < vector; i++) {
5486 		free_irq(adapter->msix_entries[free_vector++].vector,
5487 			 adapter->q_vector[i]);
5488 	}
5489 err_out:
5490 	return err;
5491 }
5492 
5493 /**
5494  * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5495  * @adapter: Pointer to adapter structure
5496  *
5497  * This function resets the device so that it has 0 rx queues, tx queues, and
5498  * MSI-X interrupts allocated.
5499  */
5500 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
5501 {
5502 	igc_free_q_vectors(adapter);
5503 	igc_reset_interrupt_capability(adapter);
5504 }
5505 
5506 /* Need to wait a few seconds after link up to get diagnostic information from
5507  * the phy
5508  */
5509 static void igc_update_phy_info(struct timer_list *t)
5510 {
5511 	struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5512 
5513 	igc_get_phy_info(&adapter->hw);
5514 }
5515 
5516 /**
5517  * igc_has_link - check shared code for link and determine up/down
5518  * @adapter: pointer to driver private info
5519  */
5520 bool igc_has_link(struct igc_adapter *adapter)
5521 {
5522 	struct igc_hw *hw = &adapter->hw;
5523 	bool link_active = false;
5524 
5525 	/* get_link_status is set on LSC (link status) interrupt or
5526 	 * rx sequence error interrupt.  get_link_status will stay
5527 	 * false until the igc_check_for_link establishes link
5528 	 * for copper adapters ONLY
5529 	 */
5530 	if (!hw->mac.get_link_status)
5531 		return true;
5532 	hw->mac.ops.check_for_link(hw);
5533 	link_active = !hw->mac.get_link_status;
5534 
5535 	if (hw->mac.type == igc_i225) {
5536 		if (!netif_carrier_ok(adapter->netdev)) {
5537 			adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5538 		} else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
5539 			adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
5540 			adapter->link_check_timeout = jiffies;
5541 		}
5542 	}
5543 
5544 	return link_active;
5545 }
5546 
5547 /**
5548  * igc_watchdog - Timer Call-back
5549  * @t: timer for the watchdog
5550  */
5551 static void igc_watchdog(struct timer_list *t)
5552 {
5553 	struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5554 	/* Do the rest outside of interrupt context */
5555 	schedule_work(&adapter->watchdog_task);
5556 }
5557 
5558 static void igc_watchdog_task(struct work_struct *work)
5559 {
5560 	struct igc_adapter *adapter = container_of(work,
5561 						   struct igc_adapter,
5562 						   watchdog_task);
5563 	struct net_device *netdev = adapter->netdev;
5564 	struct igc_hw *hw = &adapter->hw;
5565 	struct igc_phy_info *phy = &hw->phy;
5566 	u16 phy_data, retry_count = 20;
5567 	u32 link;
5568 	int i;
5569 
5570 	link = igc_has_link(adapter);
5571 
5572 	if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
5573 		if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5574 			adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5575 		else
5576 			link = false;
5577 	}
5578 
5579 	if (link) {
5580 		/* Cancel scheduled suspend requests. */
5581 		pm_runtime_resume(netdev->dev.parent);
5582 
5583 		if (!netif_carrier_ok(netdev)) {
5584 			u32 ctrl;
5585 
5586 			hw->mac.ops.get_speed_and_duplex(hw,
5587 							 &adapter->link_speed,
5588 							 &adapter->link_duplex);
5589 
5590 			ctrl = rd32(IGC_CTRL);
5591 			/* Link status message must follow this format */
5592 			netdev_info(netdev,
5593 				    "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5594 				    adapter->link_speed,
5595 				    adapter->link_duplex == FULL_DUPLEX ?
5596 				    "Full" : "Half",
5597 				    (ctrl & IGC_CTRL_TFCE) &&
5598 				    (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
5599 				    (ctrl & IGC_CTRL_RFCE) ?  "RX" :
5600 				    (ctrl & IGC_CTRL_TFCE) ?  "TX" : "None");
5601 
5602 			/* disable EEE if enabled */
5603 			if ((adapter->flags & IGC_FLAG_EEE) &&
5604 			    adapter->link_duplex == HALF_DUPLEX) {
5605 				netdev_info(netdev,
5606 					    "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
5607 				adapter->hw.dev_spec._base.eee_enable = false;
5608 				adapter->flags &= ~IGC_FLAG_EEE;
5609 			}
5610 
5611 			/* check if SmartSpeed worked */
5612 			igc_check_downshift(hw);
5613 			if (phy->speed_downgraded)
5614 				netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5615 
5616 			/* adjust timeout factor according to speed/duplex */
5617 			adapter->tx_timeout_factor = 1;
5618 			switch (adapter->link_speed) {
5619 			case SPEED_10:
5620 				adapter->tx_timeout_factor = 14;
5621 				break;
5622 			case SPEED_100:
5623 			case SPEED_1000:
5624 			case SPEED_2500:
5625 				adapter->tx_timeout_factor = 1;
5626 				break;
5627 			}
5628 
5629 			/* Once the launch time has been set on the wire, there
5630 			 * is a delay before the link speed can be determined
5631 			 * based on link-up activity. Write into the register
5632 			 * as soon as we know the correct link speed.
5633 			 */
5634 			igc_tsn_adjust_txtime_offset(adapter);
5635 
5636 			if (adapter->link_speed != SPEED_1000)
5637 				goto no_wait;
5638 
5639 			/* wait for Remote receiver status OK */
5640 retry_read_status:
5641 			if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
5642 					      &phy_data)) {
5643 				if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5644 				    retry_count) {
5645 					msleep(100);
5646 					retry_count--;
5647 					goto retry_read_status;
5648 				} else if (!retry_count) {
5649 					netdev_err(netdev, "exceed max 2 second\n");
5650 				}
5651 			} else {
5652 				netdev_err(netdev, "read 1000Base-T Status Reg\n");
5653 			}
5654 no_wait:
5655 			netif_carrier_on(netdev);
5656 
5657 			/* link state has changed, schedule phy info update */
5658 			if (!test_bit(__IGC_DOWN, &adapter->state))
5659 				mod_timer(&adapter->phy_info_timer,
5660 					  round_jiffies(jiffies + 2 * HZ));
5661 		}
5662 	} else {
5663 		if (netif_carrier_ok(netdev)) {
5664 			adapter->link_speed = 0;
5665 			adapter->link_duplex = 0;
5666 
5667 			/* Links status message must follow this format */
5668 			netdev_info(netdev, "NIC Link is Down\n");
5669 			netif_carrier_off(netdev);
5670 
5671 			/* link state has changed, schedule phy info update */
5672 			if (!test_bit(__IGC_DOWN, &adapter->state))
5673 				mod_timer(&adapter->phy_info_timer,
5674 					  round_jiffies(jiffies + 2 * HZ));
5675 
5676 			pm_schedule_suspend(netdev->dev.parent,
5677 					    MSEC_PER_SEC * 5);
5678 		}
5679 	}
5680 
5681 	spin_lock(&adapter->stats64_lock);
5682 	igc_update_stats(adapter);
5683 	spin_unlock(&adapter->stats64_lock);
5684 
5685 	for (i = 0; i < adapter->num_tx_queues; i++) {
5686 		struct igc_ring *tx_ring = adapter->tx_ring[i];
5687 
5688 		if (!netif_carrier_ok(netdev)) {
5689 			/* We've lost link, so the controller stops DMA,
5690 			 * but we've got queued Tx work that's never going
5691 			 * to get done, so reset controller to flush Tx.
5692 			 * (Do the reset outside of interrupt context).
5693 			 */
5694 			if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5695 				adapter->tx_timeout_count++;
5696 				schedule_work(&adapter->reset_task);
5697 				/* return immediately since reset is imminent */
5698 				return;
5699 			}
5700 		}
5701 
5702 		/* Force detection of hung controller every watchdog period */
5703 		set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5704 	}
5705 
5706 	/* Cause software interrupt to ensure Rx ring is cleaned */
5707 	if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5708 		u32 eics = 0;
5709 
5710 		for (i = 0; i < adapter->num_q_vectors; i++)
5711 			eics |= adapter->q_vector[i]->eims_value;
5712 		wr32(IGC_EICS, eics);
5713 	} else {
5714 		wr32(IGC_ICS, IGC_ICS_RXDMT0);
5715 	}
5716 
5717 	igc_ptp_tx_hang(adapter);
5718 
5719 	/* Reset the timer */
5720 	if (!test_bit(__IGC_DOWN, &adapter->state)) {
5721 		if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
5722 			mod_timer(&adapter->watchdog_timer,
5723 				  round_jiffies(jiffies +  HZ));
5724 		else
5725 			mod_timer(&adapter->watchdog_timer,
5726 				  round_jiffies(jiffies + 2 * HZ));
5727 	}
5728 }
5729 
5730 /**
5731  * igc_intr_msi - Interrupt Handler
5732  * @irq: interrupt number
5733  * @data: pointer to a network interface device structure
5734  */
5735 static irqreturn_t igc_intr_msi(int irq, void *data)
5736 {
5737 	struct igc_adapter *adapter = data;
5738 	struct igc_q_vector *q_vector = adapter->q_vector[0];
5739 	struct igc_hw *hw = &adapter->hw;
5740 	/* read ICR disables interrupts using IAM */
5741 	u32 icr = rd32(IGC_ICR);
5742 
5743 	igc_write_itr(q_vector);
5744 
5745 	if (icr & IGC_ICR_DRSTA)
5746 		schedule_work(&adapter->reset_task);
5747 
5748 	if (icr & IGC_ICR_DOUTSYNC) {
5749 		/* HW is reporting DMA is out of sync */
5750 		adapter->stats.doosync++;
5751 	}
5752 
5753 	if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5754 		hw->mac.get_link_status = true;
5755 		if (!test_bit(__IGC_DOWN, &adapter->state))
5756 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
5757 	}
5758 
5759 	if (icr & IGC_ICR_TS)
5760 		igc_tsync_interrupt(adapter);
5761 
5762 	napi_schedule(&q_vector->napi);
5763 
5764 	return IRQ_HANDLED;
5765 }
5766 
5767 /**
5768  * igc_intr - Legacy Interrupt Handler
5769  * @irq: interrupt number
5770  * @data: pointer to a network interface device structure
5771  */
5772 static irqreturn_t igc_intr(int irq, void *data)
5773 {
5774 	struct igc_adapter *adapter = data;
5775 	struct igc_q_vector *q_vector = adapter->q_vector[0];
5776 	struct igc_hw *hw = &adapter->hw;
5777 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
5778 	 * need for the IMC write
5779 	 */
5780 	u32 icr = rd32(IGC_ICR);
5781 
5782 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5783 	 * not set, then the adapter didn't send an interrupt
5784 	 */
5785 	if (!(icr & IGC_ICR_INT_ASSERTED))
5786 		return IRQ_NONE;
5787 
5788 	igc_write_itr(q_vector);
5789 
5790 	if (icr & IGC_ICR_DRSTA)
5791 		schedule_work(&adapter->reset_task);
5792 
5793 	if (icr & IGC_ICR_DOUTSYNC) {
5794 		/* HW is reporting DMA is out of sync */
5795 		adapter->stats.doosync++;
5796 	}
5797 
5798 	if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5799 		hw->mac.get_link_status = true;
5800 		/* guard against interrupt when we're going down */
5801 		if (!test_bit(__IGC_DOWN, &adapter->state))
5802 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
5803 	}
5804 
5805 	if (icr & IGC_ICR_TS)
5806 		igc_tsync_interrupt(adapter);
5807 
5808 	napi_schedule(&q_vector->napi);
5809 
5810 	return IRQ_HANDLED;
5811 }
5812 
5813 static void igc_free_irq(struct igc_adapter *adapter)
5814 {
5815 	if (adapter->msix_entries) {
5816 		int vector = 0, i;
5817 
5818 		free_irq(adapter->msix_entries[vector++].vector, adapter);
5819 
5820 		for (i = 0; i < adapter->num_q_vectors; i++)
5821 			free_irq(adapter->msix_entries[vector++].vector,
5822 				 adapter->q_vector[i]);
5823 	} else {
5824 		free_irq(adapter->pdev->irq, adapter);
5825 	}
5826 }
5827 
5828 /**
5829  * igc_request_irq - initialize interrupts
5830  * @adapter: Pointer to adapter structure
5831  *
5832  * Attempts to configure interrupts using the best available
5833  * capabilities of the hardware and kernel.
5834  */
5835 static int igc_request_irq(struct igc_adapter *adapter)
5836 {
5837 	struct net_device *netdev = adapter->netdev;
5838 	struct pci_dev *pdev = adapter->pdev;
5839 	int err = 0;
5840 
5841 	if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5842 		err = igc_request_msix(adapter);
5843 		if (!err)
5844 			goto request_done;
5845 		/* fall back to MSI */
5846 		igc_free_all_tx_resources(adapter);
5847 		igc_free_all_rx_resources(adapter);
5848 
5849 		igc_clear_interrupt_scheme(adapter);
5850 		err = igc_init_interrupt_scheme(adapter, false);
5851 		if (err)
5852 			goto request_done;
5853 		igc_setup_all_tx_resources(adapter);
5854 		igc_setup_all_rx_resources(adapter);
5855 		igc_configure(adapter);
5856 	}
5857 
5858 	igc_assign_vector(adapter->q_vector[0], 0);
5859 
5860 	if (adapter->flags & IGC_FLAG_HAS_MSI) {
5861 		err = request_irq(pdev->irq, &igc_intr_msi, 0,
5862 				  netdev->name, adapter);
5863 		if (!err)
5864 			goto request_done;
5865 
5866 		/* fall back to legacy interrupts */
5867 		igc_reset_interrupt_capability(adapter);
5868 		adapter->flags &= ~IGC_FLAG_HAS_MSI;
5869 	}
5870 
5871 	err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
5872 			  netdev->name, adapter);
5873 
5874 	if (err)
5875 		netdev_err(netdev, "Error %d getting interrupt\n", err);
5876 
5877 request_done:
5878 	return err;
5879 }
5880 
5881 /**
5882  * __igc_open - Called when a network interface is made active
5883  * @netdev: network interface device structure
5884  * @resuming: boolean indicating if the device is resuming
5885  *
5886  * Returns 0 on success, negative value on failure
5887  *
5888  * The open entry point is called when a network interface is made
5889  * active by the system (IFF_UP).  At this point all resources needed
5890  * for transmit and receive operations are allocated, the interrupt
5891  * handler is registered with the OS, the watchdog timer is started,
5892  * and the stack is notified that the interface is ready.
5893  */
5894 static int __igc_open(struct net_device *netdev, bool resuming)
5895 {
5896 	struct igc_adapter *adapter = netdev_priv(netdev);
5897 	struct pci_dev *pdev = adapter->pdev;
5898 	struct igc_hw *hw = &adapter->hw;
5899 	int err = 0;
5900 	int i = 0;
5901 
5902 	/* disallow open during test */
5903 
5904 	if (test_bit(__IGC_TESTING, &adapter->state)) {
5905 		WARN_ON(resuming);
5906 		return -EBUSY;
5907 	}
5908 
5909 	if (!resuming)
5910 		pm_runtime_get_sync(&pdev->dev);
5911 
5912 	netif_carrier_off(netdev);
5913 
5914 	/* allocate transmit descriptors */
5915 	err = igc_setup_all_tx_resources(adapter);
5916 	if (err)
5917 		goto err_setup_tx;
5918 
5919 	/* allocate receive descriptors */
5920 	err = igc_setup_all_rx_resources(adapter);
5921 	if (err)
5922 		goto err_setup_rx;
5923 
5924 	igc_power_up_link(adapter);
5925 
5926 	igc_configure(adapter);
5927 
5928 	err = igc_request_irq(adapter);
5929 	if (err)
5930 		goto err_req_irq;
5931 
5932 	clear_bit(__IGC_DOWN, &adapter->state);
5933 
5934 	for (i = 0; i < adapter->num_q_vectors; i++)
5935 		napi_enable(&adapter->q_vector[i]->napi);
5936 
5937 	/* Clear any pending interrupts. */
5938 	rd32(IGC_ICR);
5939 	igc_irq_enable(adapter);
5940 
5941 	if (!resuming)
5942 		pm_runtime_put(&pdev->dev);
5943 
5944 	netif_tx_start_all_queues(netdev);
5945 
5946 	/* start the watchdog. */
5947 	hw->mac.get_link_status = true;
5948 	schedule_work(&adapter->watchdog_task);
5949 
5950 	return IGC_SUCCESS;
5951 
5952 err_req_irq:
5953 	igc_release_hw_control(adapter);
5954 	igc_power_down_phy_copper_base(&adapter->hw);
5955 	igc_free_all_rx_resources(adapter);
5956 err_setup_rx:
5957 	igc_free_all_tx_resources(adapter);
5958 err_setup_tx:
5959 	igc_reset(adapter);
5960 	if (!resuming)
5961 		pm_runtime_put(&pdev->dev);
5962 
5963 	return err;
5964 }
5965 
5966 int igc_open(struct net_device *netdev)
5967 {
5968 	struct igc_adapter *adapter = netdev_priv(netdev);
5969 	int err;
5970 
5971 	/* Notify the stack of the actual queue counts. */
5972 	err = netif_set_real_num_queues(netdev, adapter->num_tx_queues,
5973 					adapter->num_rx_queues);
5974 	if (err) {
5975 		netdev_err(netdev, "error setting real queue count\n");
5976 		return err;
5977 	}
5978 
5979 	return __igc_open(netdev, false);
5980 }
5981 
5982 /**
5983  * __igc_close - Disables a network interface
5984  * @netdev: network interface device structure
5985  * @suspending: boolean indicating the device is suspending
5986  *
5987  * Returns 0, this is not allowed to fail
5988  *
5989  * The close entry point is called when an interface is de-activated
5990  * by the OS.  The hardware is still under the driver's control, but
5991  * needs to be disabled.  A global MAC reset is issued to stop the
5992  * hardware, and all transmit and receive resources are freed.
5993  */
5994 static int __igc_close(struct net_device *netdev, bool suspending)
5995 {
5996 	struct igc_adapter *adapter = netdev_priv(netdev);
5997 	struct pci_dev *pdev = adapter->pdev;
5998 
5999 	WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
6000 
6001 	if (!suspending)
6002 		pm_runtime_get_sync(&pdev->dev);
6003 
6004 	igc_down(adapter);
6005 
6006 	igc_release_hw_control(adapter);
6007 
6008 	igc_free_irq(adapter);
6009 
6010 	igc_free_all_tx_resources(adapter);
6011 	igc_free_all_rx_resources(adapter);
6012 
6013 	if (!suspending)
6014 		pm_runtime_put_sync(&pdev->dev);
6015 
6016 	return 0;
6017 }
6018 
6019 int igc_close(struct net_device *netdev)
6020 {
6021 	if (netif_device_present(netdev) || netdev->dismantle)
6022 		return __igc_close(netdev, false);
6023 	return 0;
6024 }
6025 
6026 /**
6027  * igc_ioctl - Access the hwtstamp interface
6028  * @netdev: network interface device structure
6029  * @ifr: interface request data
6030  * @cmd: ioctl command
6031  **/
6032 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6033 {
6034 	switch (cmd) {
6035 	case SIOCGHWTSTAMP:
6036 		return igc_ptp_get_ts_config(netdev, ifr);
6037 	case SIOCSHWTSTAMP:
6038 		return igc_ptp_set_ts_config(netdev, ifr);
6039 	default:
6040 		return -EOPNOTSUPP;
6041 	}
6042 }
6043 
6044 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
6045 				      bool enable)
6046 {
6047 	struct igc_ring *ring;
6048 
6049 	if (queue < 0 || queue >= adapter->num_tx_queues)
6050 		return -EINVAL;
6051 
6052 	ring = adapter->tx_ring[queue];
6053 	ring->launchtime_enable = enable;
6054 
6055 	return 0;
6056 }
6057 
6058 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
6059 {
6060 	struct timespec64 b;
6061 
6062 	b = ktime_to_timespec64(base_time);
6063 
6064 	return timespec64_compare(now, &b) > 0;
6065 }
6066 
6067 static bool validate_schedule(struct igc_adapter *adapter,
6068 			      const struct tc_taprio_qopt_offload *qopt)
6069 {
6070 	int queue_uses[IGC_MAX_TX_QUEUES] = { };
6071 	struct igc_hw *hw = &adapter->hw;
6072 	struct timespec64 now;
6073 	size_t n;
6074 
6075 	if (qopt->cycle_time_extension)
6076 		return false;
6077 
6078 	igc_ptp_read(adapter, &now);
6079 
6080 	/* If we program the controller's BASET registers with a time
6081 	 * in the future, it will hold all the packets until that
6082 	 * time, causing a lot of TX Hangs, so to avoid that, we
6083 	 * reject schedules that would start in the future.
6084 	 * Note: Limitation above is no longer in i226.
6085 	 */
6086 	if (!is_base_time_past(qopt->base_time, &now) &&
6087 	    igc_is_device_id_i225(hw))
6088 		return false;
6089 
6090 	for (n = 0; n < qopt->num_entries; n++) {
6091 		const struct tc_taprio_sched_entry *e, *prev;
6092 		int i;
6093 
6094 		prev = n ? &qopt->entries[n - 1] : NULL;
6095 		e = &qopt->entries[n];
6096 
6097 		/* i225 only supports "global" frame preemption
6098 		 * settings.
6099 		 */
6100 		if (e->command != TC_TAPRIO_CMD_SET_GATES)
6101 			return false;
6102 
6103 		for (i = 0; i < adapter->num_tx_queues; i++)
6104 			if (e->gate_mask & BIT(i)) {
6105 				queue_uses[i]++;
6106 
6107 				/* There are limitations: A single queue cannot
6108 				 * be opened and closed multiple times per cycle
6109 				 * unless the gate stays open. Check for it.
6110 				 */
6111 				if (queue_uses[i] > 1 &&
6112 				    !(prev->gate_mask & BIT(i)))
6113 					return false;
6114 			}
6115 	}
6116 
6117 	return true;
6118 }
6119 
6120 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
6121 				     struct tc_etf_qopt_offload *qopt)
6122 {
6123 	struct igc_hw *hw = &adapter->hw;
6124 	int err;
6125 
6126 	if (hw->mac.type != igc_i225)
6127 		return -EOPNOTSUPP;
6128 
6129 	err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
6130 	if (err)
6131 		return err;
6132 
6133 	return igc_tsn_offload_apply(adapter);
6134 }
6135 
6136 static int igc_qbv_clear_schedule(struct igc_adapter *adapter)
6137 {
6138 	unsigned long flags;
6139 	int i;
6140 
6141 	adapter->base_time = 0;
6142 	adapter->cycle_time = NSEC_PER_SEC;
6143 	adapter->taprio_offload_enable = false;
6144 	adapter->qbv_config_change_errors = 0;
6145 	adapter->qbv_count = 0;
6146 
6147 	for (i = 0; i < adapter->num_tx_queues; i++) {
6148 		struct igc_ring *ring = adapter->tx_ring[i];
6149 
6150 		ring->start_time = 0;
6151 		ring->end_time = NSEC_PER_SEC;
6152 		ring->max_sdu = 0;
6153 	}
6154 
6155 	spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
6156 
6157 	adapter->qbv_transition = false;
6158 
6159 	for (i = 0; i < adapter->num_tx_queues; i++) {
6160 		struct igc_ring *ring = adapter->tx_ring[i];
6161 
6162 		ring->oper_gate_closed = false;
6163 		ring->admin_gate_closed = false;
6164 	}
6165 
6166 	spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
6167 
6168 	return 0;
6169 }
6170 
6171 static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
6172 {
6173 	igc_qbv_clear_schedule(adapter);
6174 
6175 	return 0;
6176 }
6177 
6178 static void igc_taprio_stats(struct net_device *dev,
6179 			     struct tc_taprio_qopt_stats *stats)
6180 {
6181 	/* When Strict_End is enabled, the tx_overruns counter
6182 	 * will always be zero.
6183 	 */
6184 	stats->tx_overruns = 0;
6185 }
6186 
6187 static void igc_taprio_queue_stats(struct net_device *dev,
6188 				   struct tc_taprio_qopt_queue_stats *queue_stats)
6189 {
6190 	struct tc_taprio_qopt_stats *stats = &queue_stats->stats;
6191 
6192 	/* When Strict_End is enabled, the tx_overruns counter
6193 	 * will always be zero.
6194 	 */
6195 	stats->tx_overruns = 0;
6196 }
6197 
6198 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
6199 				 struct tc_taprio_qopt_offload *qopt)
6200 {
6201 	bool queue_configured[IGC_MAX_TX_QUEUES] = { };
6202 	struct igc_hw *hw = &adapter->hw;
6203 	u32 start_time = 0, end_time = 0;
6204 	struct timespec64 now;
6205 	unsigned long flags;
6206 	size_t n;
6207 	int i;
6208 
6209 	switch (qopt->cmd) {
6210 	case TAPRIO_CMD_REPLACE:
6211 		break;
6212 	case TAPRIO_CMD_DESTROY:
6213 		return igc_tsn_clear_schedule(adapter);
6214 	case TAPRIO_CMD_STATS:
6215 		igc_taprio_stats(adapter->netdev, &qopt->stats);
6216 		return 0;
6217 	case TAPRIO_CMD_QUEUE_STATS:
6218 		igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
6219 		return 0;
6220 	default:
6221 		return -EOPNOTSUPP;
6222 	}
6223 
6224 	if (qopt->base_time < 0)
6225 		return -ERANGE;
6226 
6227 	if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable)
6228 		return -EALREADY;
6229 
6230 	if (!validate_schedule(adapter, qopt))
6231 		return -EINVAL;
6232 
6233 	adapter->cycle_time = qopt->cycle_time;
6234 	adapter->base_time = qopt->base_time;
6235 	adapter->taprio_offload_enable = true;
6236 
6237 	igc_ptp_read(adapter, &now);
6238 
6239 	for (n = 0; n < qopt->num_entries; n++) {
6240 		struct tc_taprio_sched_entry *e = &qopt->entries[n];
6241 
6242 		end_time += e->interval;
6243 
6244 		/* If any of the conditions below are true, we need to manually
6245 		 * control the end time of the cycle.
6246 		 * 1. Qbv users can specify a cycle time that is not equal
6247 		 * to the total GCL intervals. Hence, recalculation is
6248 		 * necessary here to exclude the time interval that
6249 		 * exceeds the cycle time.
6250 		 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
6251 		 * once the end of the list is reached, it will switch
6252 		 * to the END_OF_CYCLE state and leave the gates in the
6253 		 * same state until the next cycle is started.
6254 		 */
6255 		if (end_time > adapter->cycle_time ||
6256 		    n + 1 == qopt->num_entries)
6257 			end_time = adapter->cycle_time;
6258 
6259 		for (i = 0; i < adapter->num_tx_queues; i++) {
6260 			struct igc_ring *ring = adapter->tx_ring[i];
6261 
6262 			if (!(e->gate_mask & BIT(i)))
6263 				continue;
6264 
6265 			/* Check whether a queue stays open for more than one
6266 			 * entry. If so, keep the start and advance the end
6267 			 * time.
6268 			 */
6269 			if (!queue_configured[i])
6270 				ring->start_time = start_time;
6271 			ring->end_time = end_time;
6272 
6273 			if (ring->start_time >= adapter->cycle_time)
6274 				queue_configured[i] = false;
6275 			else
6276 				queue_configured[i] = true;
6277 		}
6278 
6279 		start_time += e->interval;
6280 	}
6281 
6282 	spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
6283 
6284 	/* Check whether a queue gets configured.
6285 	 * If not, set the start and end time to be end time.
6286 	 */
6287 	for (i = 0; i < adapter->num_tx_queues; i++) {
6288 		struct igc_ring *ring = adapter->tx_ring[i];
6289 
6290 		if (!is_base_time_past(qopt->base_time, &now)) {
6291 			ring->admin_gate_closed = false;
6292 		} else {
6293 			ring->oper_gate_closed = false;
6294 			ring->admin_gate_closed = false;
6295 		}
6296 
6297 		if (!queue_configured[i]) {
6298 			if (!is_base_time_past(qopt->base_time, &now))
6299 				ring->admin_gate_closed = true;
6300 			else
6301 				ring->oper_gate_closed = true;
6302 
6303 			ring->start_time = end_time;
6304 			ring->end_time = end_time;
6305 		}
6306 	}
6307 
6308 	spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
6309 
6310 	for (i = 0; i < adapter->num_tx_queues; i++) {
6311 		struct igc_ring *ring = adapter->tx_ring[i];
6312 		struct net_device *dev = adapter->netdev;
6313 
6314 		if (qopt->max_sdu[i])
6315 			ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN;
6316 		else
6317 			ring->max_sdu = 0;
6318 	}
6319 
6320 	return 0;
6321 }
6322 
6323 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
6324 					 struct tc_taprio_qopt_offload *qopt)
6325 {
6326 	struct igc_hw *hw = &adapter->hw;
6327 	int err;
6328 
6329 	if (hw->mac.type != igc_i225)
6330 		return -EOPNOTSUPP;
6331 
6332 	err = igc_save_qbv_schedule(adapter, qopt);
6333 	if (err)
6334 		return err;
6335 
6336 	return igc_tsn_offload_apply(adapter);
6337 }
6338 
6339 static int igc_save_cbs_params(struct igc_adapter *adapter, int queue,
6340 			       bool enable, int idleslope, int sendslope,
6341 			       int hicredit, int locredit)
6342 {
6343 	bool cbs_status[IGC_MAX_SR_QUEUES] = { false };
6344 	struct net_device *netdev = adapter->netdev;
6345 	struct igc_ring *ring;
6346 	int i;
6347 
6348 	/* i225 has two sets of credit-based shaper logic.
6349 	 * Supporting it only on the top two priority queues
6350 	 */
6351 	if (queue < 0 || queue > 1)
6352 		return -EINVAL;
6353 
6354 	ring = adapter->tx_ring[queue];
6355 
6356 	for (i = 0; i < IGC_MAX_SR_QUEUES; i++)
6357 		if (adapter->tx_ring[i])
6358 			cbs_status[i] = adapter->tx_ring[i]->cbs_enable;
6359 
6360 	/* CBS should be enabled on the highest priority queue first in order
6361 	 * for the CBS algorithm to operate as intended.
6362 	 */
6363 	if (enable) {
6364 		if (queue == 1 && !cbs_status[0]) {
6365 			netdev_err(netdev,
6366 				   "Enabling CBS on queue1 before queue0\n");
6367 			return -EINVAL;
6368 		}
6369 	} else {
6370 		if (queue == 0 && cbs_status[1]) {
6371 			netdev_err(netdev,
6372 				   "Disabling CBS on queue0 before queue1\n");
6373 			return -EINVAL;
6374 		}
6375 	}
6376 
6377 	ring->cbs_enable = enable;
6378 	ring->idleslope = idleslope;
6379 	ring->sendslope = sendslope;
6380 	ring->hicredit = hicredit;
6381 	ring->locredit = locredit;
6382 
6383 	return 0;
6384 }
6385 
6386 static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
6387 			      struct tc_cbs_qopt_offload *qopt)
6388 {
6389 	struct igc_hw *hw = &adapter->hw;
6390 	int err;
6391 
6392 	if (hw->mac.type != igc_i225)
6393 		return -EOPNOTSUPP;
6394 
6395 	if (qopt->queue < 0 || qopt->queue > 1)
6396 		return -EINVAL;
6397 
6398 	err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable,
6399 				  qopt->idleslope, qopt->sendslope,
6400 				  qopt->hicredit, qopt->locredit);
6401 	if (err)
6402 		return err;
6403 
6404 	return igc_tsn_offload_apply(adapter);
6405 }
6406 
6407 static int igc_tc_query_caps(struct igc_adapter *adapter,
6408 			     struct tc_query_caps_base *base)
6409 {
6410 	struct igc_hw *hw = &adapter->hw;
6411 
6412 	switch (base->type) {
6413 	case TC_SETUP_QDISC_TAPRIO: {
6414 		struct tc_taprio_caps *caps = base->caps;
6415 
6416 		caps->broken_mqprio = true;
6417 
6418 		if (hw->mac.type == igc_i225) {
6419 			caps->supports_queue_max_sdu = true;
6420 			caps->gate_mask_per_txq = true;
6421 		}
6422 
6423 		return 0;
6424 	}
6425 	default:
6426 		return -EOPNOTSUPP;
6427 	}
6428 }
6429 
6430 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
6431 			void *type_data)
6432 {
6433 	struct igc_adapter *adapter = netdev_priv(dev);
6434 
6435 	adapter->tc_setup_type = type;
6436 
6437 	switch (type) {
6438 	case TC_QUERY_CAPS:
6439 		return igc_tc_query_caps(adapter, type_data);
6440 	case TC_SETUP_QDISC_TAPRIO:
6441 		return igc_tsn_enable_qbv_scheduling(adapter, type_data);
6442 
6443 	case TC_SETUP_QDISC_ETF:
6444 		return igc_tsn_enable_launchtime(adapter, type_data);
6445 
6446 	case TC_SETUP_QDISC_CBS:
6447 		return igc_tsn_enable_cbs(adapter, type_data);
6448 
6449 	default:
6450 		return -EOPNOTSUPP;
6451 	}
6452 }
6453 
6454 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6455 {
6456 	struct igc_adapter *adapter = netdev_priv(dev);
6457 
6458 	switch (bpf->command) {
6459 	case XDP_SETUP_PROG:
6460 		return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
6461 	case XDP_SETUP_XSK_POOL:
6462 		return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
6463 					  bpf->xsk.queue_id);
6464 	default:
6465 		return -EOPNOTSUPP;
6466 	}
6467 }
6468 
6469 static int igc_xdp_xmit(struct net_device *dev, int num_frames,
6470 			struct xdp_frame **frames, u32 flags)
6471 {
6472 	struct igc_adapter *adapter = netdev_priv(dev);
6473 	int cpu = smp_processor_id();
6474 	struct netdev_queue *nq;
6475 	struct igc_ring *ring;
6476 	int i, nxmit;
6477 
6478 	if (unlikely(!netif_carrier_ok(dev)))
6479 		return -ENETDOWN;
6480 
6481 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6482 		return -EINVAL;
6483 
6484 	ring = igc_xdp_get_tx_ring(adapter, cpu);
6485 	nq = txring_txq(ring);
6486 
6487 	__netif_tx_lock(nq, cpu);
6488 
6489 	/* Avoid transmit queue timeout since we share it with the slow path */
6490 	txq_trans_cond_update(nq);
6491 
6492 	nxmit = 0;
6493 	for (i = 0; i < num_frames; i++) {
6494 		int err;
6495 		struct xdp_frame *xdpf = frames[i];
6496 
6497 		err = igc_xdp_init_tx_descriptor(ring, xdpf);
6498 		if (err)
6499 			break;
6500 		nxmit++;
6501 	}
6502 
6503 	if (flags & XDP_XMIT_FLUSH)
6504 		igc_flush_tx_descriptors(ring);
6505 
6506 	__netif_tx_unlock(nq);
6507 
6508 	return nxmit;
6509 }
6510 
6511 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
6512 					struct igc_q_vector *q_vector)
6513 {
6514 	struct igc_hw *hw = &adapter->hw;
6515 	u32 eics = 0;
6516 
6517 	eics |= q_vector->eims_value;
6518 	wr32(IGC_EICS, eics);
6519 }
6520 
6521 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
6522 {
6523 	struct igc_adapter *adapter = netdev_priv(dev);
6524 	struct igc_q_vector *q_vector;
6525 	struct igc_ring *ring;
6526 
6527 	if (test_bit(__IGC_DOWN, &adapter->state))
6528 		return -ENETDOWN;
6529 
6530 	if (!igc_xdp_is_enabled(adapter))
6531 		return -ENXIO;
6532 
6533 	if (queue_id >= adapter->num_rx_queues)
6534 		return -EINVAL;
6535 
6536 	ring = adapter->rx_ring[queue_id];
6537 
6538 	if (!ring->xsk_pool)
6539 		return -ENXIO;
6540 
6541 	q_vector = adapter->q_vector[queue_id];
6542 	if (!napi_if_scheduled_mark_missed(&q_vector->napi))
6543 		igc_trigger_rxtxq_interrupt(adapter, q_vector);
6544 
6545 	return 0;
6546 }
6547 
6548 static ktime_t igc_get_tstamp(struct net_device *dev,
6549 			      const struct skb_shared_hwtstamps *hwtstamps,
6550 			      bool cycles)
6551 {
6552 	struct igc_adapter *adapter = netdev_priv(dev);
6553 	struct igc_inline_rx_tstamps *tstamp;
6554 	ktime_t timestamp;
6555 
6556 	tstamp = hwtstamps->netdev_data;
6557 
6558 	if (cycles)
6559 		timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1);
6560 	else
6561 		timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
6562 
6563 	return timestamp;
6564 }
6565 
6566 static const struct net_device_ops igc_netdev_ops = {
6567 	.ndo_open		= igc_open,
6568 	.ndo_stop		= igc_close,
6569 	.ndo_start_xmit		= igc_xmit_frame,
6570 	.ndo_set_rx_mode	= igc_set_rx_mode,
6571 	.ndo_set_mac_address	= igc_set_mac,
6572 	.ndo_change_mtu		= igc_change_mtu,
6573 	.ndo_tx_timeout		= igc_tx_timeout,
6574 	.ndo_get_stats64	= igc_get_stats64,
6575 	.ndo_fix_features	= igc_fix_features,
6576 	.ndo_set_features	= igc_set_features,
6577 	.ndo_features_check	= igc_features_check,
6578 	.ndo_eth_ioctl		= igc_ioctl,
6579 	.ndo_setup_tc		= igc_setup_tc,
6580 	.ndo_bpf		= igc_bpf,
6581 	.ndo_xdp_xmit		= igc_xdp_xmit,
6582 	.ndo_xsk_wakeup		= igc_xsk_wakeup,
6583 	.ndo_get_tstamp		= igc_get_tstamp,
6584 };
6585 
6586 /* PCIe configuration access */
6587 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6588 {
6589 	struct igc_adapter *adapter = hw->back;
6590 
6591 	pci_read_config_word(adapter->pdev, reg, value);
6592 }
6593 
6594 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6595 {
6596 	struct igc_adapter *adapter = hw->back;
6597 
6598 	pci_write_config_word(adapter->pdev, reg, *value);
6599 }
6600 
6601 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6602 {
6603 	struct igc_adapter *adapter = hw->back;
6604 
6605 	if (!pci_is_pcie(adapter->pdev))
6606 		return -IGC_ERR_CONFIG;
6607 
6608 	pcie_capability_read_word(adapter->pdev, reg, value);
6609 
6610 	return IGC_SUCCESS;
6611 }
6612 
6613 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6614 {
6615 	struct igc_adapter *adapter = hw->back;
6616 
6617 	if (!pci_is_pcie(adapter->pdev))
6618 		return -IGC_ERR_CONFIG;
6619 
6620 	pcie_capability_write_word(adapter->pdev, reg, *value);
6621 
6622 	return IGC_SUCCESS;
6623 }
6624 
6625 u32 igc_rd32(struct igc_hw *hw, u32 reg)
6626 {
6627 	struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
6628 	u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
6629 	u32 value = 0;
6630 
6631 	if (IGC_REMOVED(hw_addr))
6632 		return ~value;
6633 
6634 	value = readl(&hw_addr[reg]);
6635 
6636 	/* reads should not return all F's */
6637 	if (!(~value) && (!reg || !(~readl(hw_addr)))) {
6638 		struct net_device *netdev = igc->netdev;
6639 
6640 		hw->hw_addr = NULL;
6641 		netif_device_detach(netdev);
6642 		netdev_err(netdev, "PCIe link lost, device now detached\n");
6643 		WARN(pci_device_is_present(igc->pdev),
6644 		     "igc: Failed to read reg 0x%x!\n", reg);
6645 	}
6646 
6647 	return value;
6648 }
6649 
6650 /* Mapping HW RSS Type to enum xdp_rss_hash_type */
6651 static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = {
6652 	[IGC_RSS_TYPE_NO_HASH]		= XDP_RSS_TYPE_L2,
6653 	[IGC_RSS_TYPE_HASH_TCP_IPV4]	= XDP_RSS_TYPE_L4_IPV4_TCP,
6654 	[IGC_RSS_TYPE_HASH_IPV4]	= XDP_RSS_TYPE_L3_IPV4,
6655 	[IGC_RSS_TYPE_HASH_TCP_IPV6]	= XDP_RSS_TYPE_L4_IPV6_TCP,
6656 	[IGC_RSS_TYPE_HASH_IPV6_EX]	= XDP_RSS_TYPE_L3_IPV6_EX,
6657 	[IGC_RSS_TYPE_HASH_IPV6]	= XDP_RSS_TYPE_L3_IPV6,
6658 	[IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
6659 	[IGC_RSS_TYPE_HASH_UDP_IPV4]	= XDP_RSS_TYPE_L4_IPV4_UDP,
6660 	[IGC_RSS_TYPE_HASH_UDP_IPV6]	= XDP_RSS_TYPE_L4_IPV6_UDP,
6661 	[IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX,
6662 	[10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW  */
6663 	[11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask   */
6664 	[12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons       */
6665 	[13] = XDP_RSS_TYPE_NONE,
6666 	[14] = XDP_RSS_TYPE_NONE,
6667 	[15] = XDP_RSS_TYPE_NONE,
6668 };
6669 
6670 static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
6671 			   enum xdp_rss_hash_type *rss_type)
6672 {
6673 	const struct igc_xdp_buff *ctx = (void *)_ctx;
6674 
6675 	if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))
6676 		return -ENODATA;
6677 
6678 	*hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss);
6679 	*rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)];
6680 
6681 	return 0;
6682 }
6683 
6684 static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
6685 {
6686 	const struct igc_xdp_buff *ctx = (void *)_ctx;
6687 	struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev);
6688 	struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts;
6689 
6690 	if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
6691 		*timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
6692 
6693 		return 0;
6694 	}
6695 
6696 	return -ENODATA;
6697 }
6698 
6699 static const struct xdp_metadata_ops igc_xdp_metadata_ops = {
6700 	.xmo_rx_hash			= igc_xdp_rx_hash,
6701 	.xmo_rx_timestamp		= igc_xdp_rx_timestamp,
6702 };
6703 
6704 static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
6705 {
6706 	struct igc_adapter *adapter = container_of(timer, struct igc_adapter,
6707 						   hrtimer);
6708 	unsigned long flags;
6709 	unsigned int i;
6710 
6711 	spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
6712 
6713 	adapter->qbv_transition = true;
6714 	for (i = 0; i < adapter->num_tx_queues; i++) {
6715 		struct igc_ring *tx_ring = adapter->tx_ring[i];
6716 
6717 		if (tx_ring->admin_gate_closed) {
6718 			tx_ring->admin_gate_closed = false;
6719 			tx_ring->oper_gate_closed = true;
6720 		} else {
6721 			tx_ring->oper_gate_closed = false;
6722 		}
6723 	}
6724 	adapter->qbv_transition = false;
6725 
6726 	spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
6727 
6728 	return HRTIMER_NORESTART;
6729 }
6730 
6731 /**
6732  * igc_probe - Device Initialization Routine
6733  * @pdev: PCI device information struct
6734  * @ent: entry in igc_pci_tbl
6735  *
6736  * Returns 0 on success, negative on failure
6737  *
6738  * igc_probe initializes an adapter identified by a pci_dev structure.
6739  * The OS initialization, configuring the adapter private structure,
6740  * and a hardware reset occur.
6741  */
6742 static int igc_probe(struct pci_dev *pdev,
6743 		     const struct pci_device_id *ent)
6744 {
6745 	struct igc_adapter *adapter;
6746 	struct net_device *netdev;
6747 	struct igc_hw *hw;
6748 	const struct igc_info *ei = igc_info_tbl[ent->driver_data];
6749 	int err;
6750 
6751 	err = pci_enable_device_mem(pdev);
6752 	if (err)
6753 		return err;
6754 
6755 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6756 	if (err) {
6757 		dev_err(&pdev->dev,
6758 			"No usable DMA configuration, aborting\n");
6759 		goto err_dma;
6760 	}
6761 
6762 	err = pci_request_mem_regions(pdev, igc_driver_name);
6763 	if (err)
6764 		goto err_pci_reg;
6765 
6766 	err = pci_enable_ptm(pdev, NULL);
6767 	if (err < 0)
6768 		dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
6769 
6770 	pci_set_master(pdev);
6771 
6772 	err = -ENOMEM;
6773 	netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
6774 				   IGC_MAX_TX_QUEUES);
6775 
6776 	if (!netdev)
6777 		goto err_alloc_etherdev;
6778 
6779 	SET_NETDEV_DEV(netdev, &pdev->dev);
6780 
6781 	pci_set_drvdata(pdev, netdev);
6782 	adapter = netdev_priv(netdev);
6783 	adapter->netdev = netdev;
6784 	adapter->pdev = pdev;
6785 	hw = &adapter->hw;
6786 	hw->back = adapter;
6787 	adapter->port_num = hw->bus.func;
6788 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6789 
6790 	err = pci_save_state(pdev);
6791 	if (err)
6792 		goto err_ioremap;
6793 
6794 	err = -EIO;
6795 	adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
6796 				   pci_resource_len(pdev, 0));
6797 	if (!adapter->io_addr)
6798 		goto err_ioremap;
6799 
6800 	/* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
6801 	hw->hw_addr = adapter->io_addr;
6802 
6803 	netdev->netdev_ops = &igc_netdev_ops;
6804 	netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
6805 	igc_ethtool_set_ops(netdev);
6806 	netdev->watchdog_timeo = 5 * HZ;
6807 
6808 	netdev->mem_start = pci_resource_start(pdev, 0);
6809 	netdev->mem_end = pci_resource_end(pdev, 0);
6810 
6811 	/* PCI config space info */
6812 	hw->vendor_id = pdev->vendor;
6813 	hw->device_id = pdev->device;
6814 	hw->revision_id = pdev->revision;
6815 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
6816 	hw->subsystem_device_id = pdev->subsystem_device;
6817 
6818 	/* Copy the default MAC and PHY function pointers */
6819 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6820 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6821 
6822 	/* Initialize skew-specific constants */
6823 	err = ei->get_invariants(hw);
6824 	if (err)
6825 		goto err_sw_init;
6826 
6827 	/* Add supported features to the features list*/
6828 	netdev->features |= NETIF_F_SG;
6829 	netdev->features |= NETIF_F_TSO;
6830 	netdev->features |= NETIF_F_TSO6;
6831 	netdev->features |= NETIF_F_TSO_ECN;
6832 	netdev->features |= NETIF_F_RXHASH;
6833 	netdev->features |= NETIF_F_RXCSUM;
6834 	netdev->features |= NETIF_F_HW_CSUM;
6835 	netdev->features |= NETIF_F_SCTP_CRC;
6836 	netdev->features |= NETIF_F_HW_TC;
6837 
6838 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
6839 				  NETIF_F_GSO_GRE_CSUM | \
6840 				  NETIF_F_GSO_IPXIP4 | \
6841 				  NETIF_F_GSO_IPXIP6 | \
6842 				  NETIF_F_GSO_UDP_TUNNEL | \
6843 				  NETIF_F_GSO_UDP_TUNNEL_CSUM)
6844 
6845 	netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
6846 	netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
6847 
6848 	/* setup the private structure */
6849 	err = igc_sw_init(adapter);
6850 	if (err)
6851 		goto err_sw_init;
6852 
6853 	/* copy netdev features into list of user selectable features */
6854 	netdev->hw_features |= NETIF_F_NTUPLE;
6855 	netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
6856 	netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
6857 	netdev->hw_features |= netdev->features;
6858 
6859 	netdev->features |= NETIF_F_HIGHDMA;
6860 
6861 	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
6862 	netdev->mpls_features |= NETIF_F_HW_CSUM;
6863 	netdev->hw_enc_features |= netdev->vlan_features;
6864 
6865 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
6866 			       NETDEV_XDP_ACT_XSK_ZEROCOPY;
6867 
6868 	/* MTU range: 68 - 9216 */
6869 	netdev->min_mtu = ETH_MIN_MTU;
6870 	netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
6871 
6872 	/* before reading the NVM, reset the controller to put the device in a
6873 	 * known good starting state
6874 	 */
6875 	hw->mac.ops.reset_hw(hw);
6876 
6877 	if (igc_get_flash_presence_i225(hw)) {
6878 		if (hw->nvm.ops.validate(hw) < 0) {
6879 			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6880 			err = -EIO;
6881 			goto err_eeprom;
6882 		}
6883 	}
6884 
6885 	if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
6886 		/* copy the MAC address out of the NVM */
6887 		if (hw->mac.ops.read_mac_addr(hw))
6888 			dev_err(&pdev->dev, "NVM Read Error\n");
6889 	}
6890 
6891 	eth_hw_addr_set(netdev, hw->mac.addr);
6892 
6893 	if (!is_valid_ether_addr(netdev->dev_addr)) {
6894 		dev_err(&pdev->dev, "Invalid MAC Address\n");
6895 		err = -EIO;
6896 		goto err_eeprom;
6897 	}
6898 
6899 	/* configure RXPBSIZE and TXPBSIZE */
6900 	wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
6901 	wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
6902 
6903 	timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
6904 	timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
6905 
6906 	INIT_WORK(&adapter->reset_task, igc_reset_task);
6907 	INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
6908 
6909 	hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6910 	adapter->hrtimer.function = &igc_qbv_scheduling_timer;
6911 
6912 	/* Initialize link properties that are user-changeable */
6913 	adapter->fc_autoneg = true;
6914 	hw->mac.autoneg = true;
6915 	hw->phy.autoneg_advertised = 0xaf;
6916 
6917 	hw->fc.requested_mode = igc_fc_default;
6918 	hw->fc.current_mode = igc_fc_default;
6919 
6920 	/* By default, support wake on port A */
6921 	adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
6922 
6923 	/* initialize the wol settings based on the eeprom settings */
6924 	if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
6925 		adapter->wol |= IGC_WUFC_MAG;
6926 
6927 	device_set_wakeup_enable(&adapter->pdev->dev,
6928 				 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
6929 
6930 	igc_ptp_init(adapter);
6931 
6932 	igc_tsn_clear_schedule(adapter);
6933 
6934 	/* reset the hardware with the new settings */
6935 	igc_reset(adapter);
6936 
6937 	/* let the f/w know that the h/w is now under the control of the
6938 	 * driver.
6939 	 */
6940 	igc_get_hw_control(adapter);
6941 
6942 	strscpy(netdev->name, "eth%d", sizeof(netdev->name));
6943 	err = register_netdev(netdev);
6944 	if (err)
6945 		goto err_register;
6946 
6947 	 /* carrier off reporting is important to ethtool even BEFORE open */
6948 	netif_carrier_off(netdev);
6949 
6950 	/* Check if Media Autosense is enabled */
6951 	adapter->ei = *ei;
6952 
6953 	/* print pcie link status and MAC address */
6954 	pcie_print_link_status(pdev);
6955 	netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
6956 
6957 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
6958 	/* Disable EEE for internal PHY devices */
6959 	hw->dev_spec._base.eee_enable = false;
6960 	adapter->flags &= ~IGC_FLAG_EEE;
6961 	igc_set_eee_i225(hw, false, false, false);
6962 
6963 	pm_runtime_put_noidle(&pdev->dev);
6964 
6965 	if (IS_ENABLED(CONFIG_IGC_LEDS)) {
6966 		err = igc_led_setup(adapter);
6967 		if (err)
6968 			goto err_register;
6969 	}
6970 
6971 	return 0;
6972 
6973 err_register:
6974 	igc_release_hw_control(adapter);
6975 err_eeprom:
6976 	if (!igc_check_reset_block(hw))
6977 		igc_reset_phy(hw);
6978 err_sw_init:
6979 	igc_clear_interrupt_scheme(adapter);
6980 	iounmap(adapter->io_addr);
6981 err_ioremap:
6982 	free_netdev(netdev);
6983 err_alloc_etherdev:
6984 	pci_release_mem_regions(pdev);
6985 err_pci_reg:
6986 err_dma:
6987 	pci_disable_device(pdev);
6988 	return err;
6989 }
6990 
6991 /**
6992  * igc_remove - Device Removal Routine
6993  * @pdev: PCI device information struct
6994  *
6995  * igc_remove is called by the PCI subsystem to alert the driver
6996  * that it should release a PCI device.  This could be caused by a
6997  * Hot-Plug event, or because the driver is going to be removed from
6998  * memory.
6999  */
7000 static void igc_remove(struct pci_dev *pdev)
7001 {
7002 	struct net_device *netdev = pci_get_drvdata(pdev);
7003 	struct igc_adapter *adapter = netdev_priv(netdev);
7004 
7005 	pm_runtime_get_noresume(&pdev->dev);
7006 
7007 	igc_flush_nfc_rules(adapter);
7008 
7009 	igc_ptp_stop(adapter);
7010 
7011 	pci_disable_ptm(pdev);
7012 	pci_clear_master(pdev);
7013 
7014 	set_bit(__IGC_DOWN, &adapter->state);
7015 
7016 	del_timer_sync(&adapter->watchdog_timer);
7017 	del_timer_sync(&adapter->phy_info_timer);
7018 
7019 	cancel_work_sync(&adapter->reset_task);
7020 	cancel_work_sync(&adapter->watchdog_task);
7021 	hrtimer_cancel(&adapter->hrtimer);
7022 
7023 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
7024 	 * would have already happened in close and is redundant.
7025 	 */
7026 	igc_release_hw_control(adapter);
7027 	unregister_netdev(netdev);
7028 
7029 	igc_clear_interrupt_scheme(adapter);
7030 	pci_iounmap(pdev, adapter->io_addr);
7031 	pci_release_mem_regions(pdev);
7032 
7033 	free_netdev(netdev);
7034 
7035 	pci_disable_device(pdev);
7036 }
7037 
7038 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
7039 			  bool runtime)
7040 {
7041 	struct net_device *netdev = pci_get_drvdata(pdev);
7042 	struct igc_adapter *adapter = netdev_priv(netdev);
7043 	u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
7044 	struct igc_hw *hw = &adapter->hw;
7045 	u32 ctrl, rctl, status;
7046 	bool wake;
7047 
7048 	rtnl_lock();
7049 	netif_device_detach(netdev);
7050 
7051 	if (netif_running(netdev))
7052 		__igc_close(netdev, true);
7053 
7054 	igc_ptp_suspend(adapter);
7055 
7056 	igc_clear_interrupt_scheme(adapter);
7057 	rtnl_unlock();
7058 
7059 	status = rd32(IGC_STATUS);
7060 	if (status & IGC_STATUS_LU)
7061 		wufc &= ~IGC_WUFC_LNKC;
7062 
7063 	if (wufc) {
7064 		igc_setup_rctl(adapter);
7065 		igc_set_rx_mode(netdev);
7066 
7067 		/* turn on all-multi mode if wake on multicast is enabled */
7068 		if (wufc & IGC_WUFC_MC) {
7069 			rctl = rd32(IGC_RCTL);
7070 			rctl |= IGC_RCTL_MPE;
7071 			wr32(IGC_RCTL, rctl);
7072 		}
7073 
7074 		ctrl = rd32(IGC_CTRL);
7075 		ctrl |= IGC_CTRL_ADVD3WUC;
7076 		wr32(IGC_CTRL, ctrl);
7077 
7078 		/* Allow time for pending master requests to run */
7079 		igc_disable_pcie_master(hw);
7080 
7081 		wr32(IGC_WUC, IGC_WUC_PME_EN);
7082 		wr32(IGC_WUFC, wufc);
7083 	} else {
7084 		wr32(IGC_WUC, 0);
7085 		wr32(IGC_WUFC, 0);
7086 	}
7087 
7088 	wake = wufc || adapter->en_mng_pt;
7089 	if (!wake)
7090 		igc_power_down_phy_copper_base(&adapter->hw);
7091 	else
7092 		igc_power_up_link(adapter);
7093 
7094 	if (enable_wake)
7095 		*enable_wake = wake;
7096 
7097 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
7098 	 * would have already happened in close and is redundant.
7099 	 */
7100 	igc_release_hw_control(adapter);
7101 
7102 	pci_disable_device(pdev);
7103 
7104 	return 0;
7105 }
7106 
7107 static int igc_runtime_suspend(struct device *dev)
7108 {
7109 	return __igc_shutdown(to_pci_dev(dev), NULL, 1);
7110 }
7111 
7112 static void igc_deliver_wake_packet(struct net_device *netdev)
7113 {
7114 	struct igc_adapter *adapter = netdev_priv(netdev);
7115 	struct igc_hw *hw = &adapter->hw;
7116 	struct sk_buff *skb;
7117 	u32 wupl;
7118 
7119 	wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
7120 
7121 	/* WUPM stores only the first 128 bytes of the wake packet.
7122 	 * Read the packet only if we have the whole thing.
7123 	 */
7124 	if (wupl == 0 || wupl > IGC_WUPM_BYTES)
7125 		return;
7126 
7127 	skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
7128 	if (!skb)
7129 		return;
7130 
7131 	skb_put(skb, wupl);
7132 
7133 	/* Ensure reads are 32-bit aligned */
7134 	wupl = roundup(wupl, 4);
7135 
7136 	memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
7137 
7138 	skb->protocol = eth_type_trans(skb, netdev);
7139 	netif_rx(skb);
7140 }
7141 
7142 static int igc_resume(struct device *dev)
7143 {
7144 	struct pci_dev *pdev = to_pci_dev(dev);
7145 	struct net_device *netdev = pci_get_drvdata(pdev);
7146 	struct igc_adapter *adapter = netdev_priv(netdev);
7147 	struct igc_hw *hw = &adapter->hw;
7148 	u32 err, val;
7149 
7150 	pci_set_power_state(pdev, PCI_D0);
7151 	pci_restore_state(pdev);
7152 	pci_save_state(pdev);
7153 
7154 	if (!pci_device_is_present(pdev))
7155 		return -ENODEV;
7156 	err = pci_enable_device_mem(pdev);
7157 	if (err) {
7158 		netdev_err(netdev, "Cannot enable PCI device from suspend\n");
7159 		return err;
7160 	}
7161 	pci_set_master(pdev);
7162 
7163 	pci_enable_wake(pdev, PCI_D3hot, 0);
7164 	pci_enable_wake(pdev, PCI_D3cold, 0);
7165 
7166 	if (igc_init_interrupt_scheme(adapter, true)) {
7167 		netdev_err(netdev, "Unable to allocate memory for queues\n");
7168 		return -ENOMEM;
7169 	}
7170 
7171 	igc_reset(adapter);
7172 
7173 	/* let the f/w know that the h/w is now under the control of the
7174 	 * driver.
7175 	 */
7176 	igc_get_hw_control(adapter);
7177 
7178 	val = rd32(IGC_WUS);
7179 	if (val & WAKE_PKT_WUS)
7180 		igc_deliver_wake_packet(netdev);
7181 
7182 	wr32(IGC_WUS, ~0);
7183 
7184 	if (netif_running(netdev)) {
7185 		err = __igc_open(netdev, true);
7186 		if (!err)
7187 			netif_device_attach(netdev);
7188 	}
7189 
7190 	return err;
7191 }
7192 
7193 static int igc_runtime_resume(struct device *dev)
7194 {
7195 	return igc_resume(dev);
7196 }
7197 
7198 static int igc_suspend(struct device *dev)
7199 {
7200 	return __igc_shutdown(to_pci_dev(dev), NULL, 0);
7201 }
7202 
7203 static int __maybe_unused igc_runtime_idle(struct device *dev)
7204 {
7205 	struct net_device *netdev = dev_get_drvdata(dev);
7206 	struct igc_adapter *adapter = netdev_priv(netdev);
7207 
7208 	if (!igc_has_link(adapter))
7209 		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
7210 
7211 	return -EBUSY;
7212 }
7213 
7214 static void igc_shutdown(struct pci_dev *pdev)
7215 {
7216 	bool wake;
7217 
7218 	__igc_shutdown(pdev, &wake, 0);
7219 
7220 	if (system_state == SYSTEM_POWER_OFF) {
7221 		pci_wake_from_d3(pdev, wake);
7222 		pci_set_power_state(pdev, PCI_D3hot);
7223 	}
7224 }
7225 
7226 /**
7227  *  igc_io_error_detected - called when PCI error is detected
7228  *  @pdev: Pointer to PCI device
7229  *  @state: The current PCI connection state
7230  *
7231  *  This function is called after a PCI bus error affecting
7232  *  this device has been detected.
7233  **/
7234 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
7235 					      pci_channel_state_t state)
7236 {
7237 	struct net_device *netdev = pci_get_drvdata(pdev);
7238 	struct igc_adapter *adapter = netdev_priv(netdev);
7239 
7240 	netif_device_detach(netdev);
7241 
7242 	if (state == pci_channel_io_perm_failure)
7243 		return PCI_ERS_RESULT_DISCONNECT;
7244 
7245 	if (netif_running(netdev))
7246 		igc_down(adapter);
7247 	pci_disable_device(pdev);
7248 
7249 	/* Request a slot reset. */
7250 	return PCI_ERS_RESULT_NEED_RESET;
7251 }
7252 
7253 /**
7254  *  igc_io_slot_reset - called after the PCI bus has been reset.
7255  *  @pdev: Pointer to PCI device
7256  *
7257  *  Restart the card from scratch, as if from a cold-boot. Implementation
7258  *  resembles the first-half of the igc_resume routine.
7259  **/
7260 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
7261 {
7262 	struct net_device *netdev = pci_get_drvdata(pdev);
7263 	struct igc_adapter *adapter = netdev_priv(netdev);
7264 	struct igc_hw *hw = &adapter->hw;
7265 	pci_ers_result_t result;
7266 
7267 	if (pci_enable_device_mem(pdev)) {
7268 		netdev_err(netdev, "Could not re-enable PCI device after reset\n");
7269 		result = PCI_ERS_RESULT_DISCONNECT;
7270 	} else {
7271 		pci_set_master(pdev);
7272 		pci_restore_state(pdev);
7273 		pci_save_state(pdev);
7274 
7275 		pci_enable_wake(pdev, PCI_D3hot, 0);
7276 		pci_enable_wake(pdev, PCI_D3cold, 0);
7277 
7278 		/* In case of PCI error, adapter loses its HW address
7279 		 * so we should re-assign it here.
7280 		 */
7281 		hw->hw_addr = adapter->io_addr;
7282 
7283 		igc_reset(adapter);
7284 		wr32(IGC_WUS, ~0);
7285 		result = PCI_ERS_RESULT_RECOVERED;
7286 	}
7287 
7288 	return result;
7289 }
7290 
7291 /**
7292  *  igc_io_resume - called when traffic can start to flow again.
7293  *  @pdev: Pointer to PCI device
7294  *
7295  *  This callback is called when the error recovery driver tells us that
7296  *  its OK to resume normal operation. Implementation resembles the
7297  *  second-half of the igc_resume routine.
7298  */
7299 static void igc_io_resume(struct pci_dev *pdev)
7300 {
7301 	struct net_device *netdev = pci_get_drvdata(pdev);
7302 	struct igc_adapter *adapter = netdev_priv(netdev);
7303 
7304 	rtnl_lock();
7305 	if (netif_running(netdev)) {
7306 		if (igc_open(netdev)) {
7307 			netdev_err(netdev, "igc_open failed after reset\n");
7308 			return;
7309 		}
7310 	}
7311 
7312 	netif_device_attach(netdev);
7313 
7314 	/* let the f/w know that the h/w is now under the control of the
7315 	 * driver.
7316 	 */
7317 	igc_get_hw_control(adapter);
7318 	rtnl_unlock();
7319 }
7320 
7321 static const struct pci_error_handlers igc_err_handler = {
7322 	.error_detected = igc_io_error_detected,
7323 	.slot_reset = igc_io_slot_reset,
7324 	.resume = igc_io_resume,
7325 };
7326 
7327 static _DEFINE_DEV_PM_OPS(igc_pm_ops, igc_suspend, igc_resume,
7328 			  igc_runtime_suspend, igc_runtime_resume,
7329 			  igc_runtime_idle);
7330 
7331 static struct pci_driver igc_driver = {
7332 	.name     = igc_driver_name,
7333 	.id_table = igc_pci_tbl,
7334 	.probe    = igc_probe,
7335 	.remove   = igc_remove,
7336 	.driver.pm = pm_ptr(&igc_pm_ops),
7337 	.shutdown = igc_shutdown,
7338 	.err_handler = &igc_err_handler,
7339 };
7340 
7341 /**
7342  * igc_reinit_queues - return error
7343  * @adapter: pointer to adapter structure
7344  */
7345 int igc_reinit_queues(struct igc_adapter *adapter)
7346 {
7347 	struct net_device *netdev = adapter->netdev;
7348 	int err = 0;
7349 
7350 	if (netif_running(netdev))
7351 		igc_close(netdev);
7352 
7353 	igc_reset_interrupt_capability(adapter);
7354 
7355 	if (igc_init_interrupt_scheme(adapter, true)) {
7356 		netdev_err(netdev, "Unable to allocate memory for queues\n");
7357 		return -ENOMEM;
7358 	}
7359 
7360 	if (netif_running(netdev))
7361 		err = igc_open(netdev);
7362 
7363 	return err;
7364 }
7365 
7366 /**
7367  * igc_get_hw_dev - return device
7368  * @hw: pointer to hardware structure
7369  *
7370  * used by hardware layer to print debugging information
7371  */
7372 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
7373 {
7374 	struct igc_adapter *adapter = hw->back;
7375 
7376 	return adapter->netdev;
7377 }
7378 
7379 static void igc_disable_rx_ring_hw(struct igc_ring *ring)
7380 {
7381 	struct igc_hw *hw = &ring->q_vector->adapter->hw;
7382 	u8 idx = ring->reg_idx;
7383 	u32 rxdctl;
7384 
7385 	rxdctl = rd32(IGC_RXDCTL(idx));
7386 	rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
7387 	rxdctl |= IGC_RXDCTL_SWFLUSH;
7388 	wr32(IGC_RXDCTL(idx), rxdctl);
7389 }
7390 
7391 void igc_disable_rx_ring(struct igc_ring *ring)
7392 {
7393 	igc_disable_rx_ring_hw(ring);
7394 	igc_clean_rx_ring(ring);
7395 }
7396 
7397 void igc_enable_rx_ring(struct igc_ring *ring)
7398 {
7399 	struct igc_adapter *adapter = ring->q_vector->adapter;
7400 
7401 	igc_configure_rx_ring(adapter, ring);
7402 
7403 	if (ring->xsk_pool)
7404 		igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
7405 	else
7406 		igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
7407 }
7408 
7409 void igc_disable_tx_ring(struct igc_ring *ring)
7410 {
7411 	igc_disable_tx_ring_hw(ring);
7412 	igc_clean_tx_ring(ring);
7413 }
7414 
7415 void igc_enable_tx_ring(struct igc_ring *ring)
7416 {
7417 	struct igc_adapter *adapter = ring->q_vector->adapter;
7418 
7419 	igc_configure_tx_ring(adapter, ring);
7420 }
7421 
7422 /**
7423  * igc_init_module - Driver Registration Routine
7424  *
7425  * igc_init_module is the first routine called when the driver is
7426  * loaded. All it does is register with the PCI subsystem.
7427  */
7428 static int __init igc_init_module(void)
7429 {
7430 	int ret;
7431 
7432 	pr_info("%s\n", igc_driver_string);
7433 	pr_info("%s\n", igc_copyright);
7434 
7435 	ret = pci_register_driver(&igc_driver);
7436 	return ret;
7437 }
7438 
7439 module_init(igc_init_module);
7440 
7441 /**
7442  * igc_exit_module - Driver Exit Cleanup Routine
7443  *
7444  * igc_exit_module is called just before the driver is removed
7445  * from memory.
7446  */
7447 static void __exit igc_exit_module(void)
7448 {
7449 	pci_unregister_driver(&igc_driver);
7450 }
7451 
7452 module_exit(igc_exit_module);
7453 /* igc_main.c */
7454