1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Intel Corporation */
3
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
7 #include <linux/tcp.h>
8 #include <linux/udp.h>
9 #include <linux/ip.h>
10 #include <linux/pm_runtime.h>
11 #include <net/pkt_sched.h>
12 #include <linux/bpf_trace.h>
13 #include <net/xdp_sock_drv.h>
14 #include <linux/pci.h>
15 #include <linux/mdio.h>
16
17 #include <net/ipv6.h>
18
19 #include "igc.h"
20 #include "igc_hw.h"
21 #include "igc_tsn.h"
22 #include "igc_xdp.h"
23
24 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
25
26 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
27
28 #define IGC_XDP_PASS 0
29 #define IGC_XDP_CONSUMED BIT(0)
30 #define IGC_XDP_TX BIT(1)
31 #define IGC_XDP_REDIRECT BIT(2)
32
33 static int debug = -1;
34
35 MODULE_DESCRIPTION(DRV_SUMMARY);
36 MODULE_LICENSE("GPL v2");
37 module_param(debug, int, 0);
38 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
39
40 char igc_driver_name[] = "igc";
41 static const char igc_driver_string[] = DRV_SUMMARY;
42 static const char igc_copyright[] =
43 "Copyright(c) 2018 Intel Corporation.";
44
45 static const struct igc_info *igc_info_tbl[] = {
46 [board_base] = &igc_base_info,
47 };
48
49 static const struct pci_device_id igc_pci_tbl[] = {
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base },
59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
64 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
65 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
66 /* required last entry */
67 {0, }
68 };
69
70 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
71
72 enum latency_range {
73 lowest_latency = 0,
74 low_latency = 1,
75 bulk_latency = 2,
76 latency_invalid = 255
77 };
78
igc_reset(struct igc_adapter * adapter)79 void igc_reset(struct igc_adapter *adapter)
80 {
81 struct net_device *dev = adapter->netdev;
82 struct igc_hw *hw = &adapter->hw;
83 struct igc_fc_info *fc = &hw->fc;
84 u32 pba, hwm;
85
86 /* Repartition PBA for greater than 9k MTU if required */
87 pba = IGC_PBA_34K;
88
89 /* flow control settings
90 * The high water mark must be low enough to fit one full frame
91 * after transmitting the pause frame. As such we must have enough
92 * space to allow for us to complete our current transmit and then
93 * receive the frame that is in progress from the link partner.
94 * Set it to:
95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
96 */
97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
98
99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
100 fc->low_water = fc->high_water - 16;
101 fc->pause_time = 0xFFFF;
102 fc->send_xon = 1;
103 fc->current_mode = fc->requested_mode;
104
105 hw->mac.ops.reset_hw(hw);
106
107 if (hw->mac.ops.init_hw(hw))
108 netdev_err(dev, "Error on hardware initialization\n");
109
110 /* Re-establish EEE setting */
111 igc_set_eee_i225(hw, true, true, true);
112
113 if (!netif_running(adapter->netdev))
114 igc_power_down_phy_copper_base(&adapter->hw);
115
116 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
117 wr32(IGC_VET, ETH_P_8021Q);
118
119 /* Re-enable PTP, where applicable. */
120 igc_ptp_reset(adapter);
121
122 /* Re-enable TSN offloading, where applicable. */
123 igc_tsn_reset(adapter);
124
125 igc_get_phy_info(hw);
126 }
127
128 /**
129 * igc_power_up_link - Power up the phy link
130 * @adapter: address of board private structure
131 */
igc_power_up_link(struct igc_adapter * adapter)132 static void igc_power_up_link(struct igc_adapter *adapter)
133 {
134 igc_reset_phy(&adapter->hw);
135
136 igc_power_up_phy_copper(&adapter->hw);
137
138 igc_setup_link(&adapter->hw);
139 }
140
141 /**
142 * igc_release_hw_control - release control of the h/w to f/w
143 * @adapter: address of board private structure
144 *
145 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
146 * For ASF and Pass Through versions of f/w this means that the
147 * driver is no longer loaded.
148 */
igc_release_hw_control(struct igc_adapter * adapter)149 static void igc_release_hw_control(struct igc_adapter *adapter)
150 {
151 struct igc_hw *hw = &adapter->hw;
152 u32 ctrl_ext;
153
154 if (!pci_device_is_present(adapter->pdev))
155 return;
156
157 /* Let firmware take over control of h/w */
158 ctrl_ext = rd32(IGC_CTRL_EXT);
159 wr32(IGC_CTRL_EXT,
160 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
161 }
162
163 /**
164 * igc_get_hw_control - get control of the h/w from f/w
165 * @adapter: address of board private structure
166 *
167 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
168 * For ASF and Pass Through versions of f/w this means that
169 * the driver is loaded.
170 */
igc_get_hw_control(struct igc_adapter * adapter)171 static void igc_get_hw_control(struct igc_adapter *adapter)
172 {
173 struct igc_hw *hw = &adapter->hw;
174 u32 ctrl_ext;
175
176 /* Let firmware know the driver has taken over */
177 ctrl_ext = rd32(IGC_CTRL_EXT);
178 wr32(IGC_CTRL_EXT,
179 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
180 }
181
igc_unmap_tx_buffer(struct device * dev,struct igc_tx_buffer * buf)182 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
183 {
184 dma_unmap_single(dev, dma_unmap_addr(buf, dma),
185 dma_unmap_len(buf, len), DMA_TO_DEVICE);
186
187 dma_unmap_len_set(buf, len, 0);
188 }
189
190 /**
191 * igc_clean_tx_ring - Free Tx Buffers
192 * @tx_ring: ring to be cleaned
193 */
igc_clean_tx_ring(struct igc_ring * tx_ring)194 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
195 {
196 u16 i = tx_ring->next_to_clean;
197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
198 u32 xsk_frames = 0;
199
200 while (i != tx_ring->next_to_use) {
201 union igc_adv_tx_desc *eop_desc, *tx_desc;
202
203 switch (tx_buffer->type) {
204 case IGC_TX_BUFFER_TYPE_XSK:
205 xsk_frames++;
206 break;
207 case IGC_TX_BUFFER_TYPE_XDP:
208 xdp_return_frame(tx_buffer->xdpf);
209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
210 break;
211 case IGC_TX_BUFFER_TYPE_SKB:
212 dev_kfree_skb_any(tx_buffer->skb);
213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
214 break;
215 default:
216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
217 break;
218 }
219
220 /* check for eop_desc to determine the end of the packet */
221 eop_desc = tx_buffer->next_to_watch;
222 tx_desc = IGC_TX_DESC(tx_ring, i);
223
224 /* unmap remaining buffers */
225 while (tx_desc != eop_desc) {
226 tx_buffer++;
227 tx_desc++;
228 i++;
229 if (unlikely(i == tx_ring->count)) {
230 i = 0;
231 tx_buffer = tx_ring->tx_buffer_info;
232 tx_desc = IGC_TX_DESC(tx_ring, 0);
233 }
234
235 /* unmap any remaining paged data */
236 if (dma_unmap_len(tx_buffer, len))
237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
238 }
239
240 tx_buffer->next_to_watch = NULL;
241
242 /* move us one more past the eop_desc for start of next pkt */
243 tx_buffer++;
244 i++;
245 if (unlikely(i == tx_ring->count)) {
246 i = 0;
247 tx_buffer = tx_ring->tx_buffer_info;
248 }
249 }
250
251 if (tx_ring->xsk_pool && xsk_frames)
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
253
254 /* reset BQL for queue */
255 netdev_tx_reset_queue(txring_txq(tx_ring));
256
257 /* Zero out the buffer ring */
258 memset(tx_ring->tx_buffer_info, 0,
259 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
260
261 /* Zero out the descriptor ring */
262 memset(tx_ring->desc, 0, tx_ring->size);
263
264 /* reset next_to_use and next_to_clean */
265 tx_ring->next_to_use = 0;
266 tx_ring->next_to_clean = 0;
267
268 /* Clear any lingering XSK TX timestamp requests */
269 if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) {
270 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
271
272 igc_ptp_clear_xsk_tx_tstamp_queue(adapter, tx_ring->queue_index);
273 }
274 }
275
276 /**
277 * igc_free_tx_resources - Free Tx Resources per Queue
278 * @tx_ring: Tx descriptor ring for a specific queue
279 *
280 * Free all transmit software resources
281 */
igc_free_tx_resources(struct igc_ring * tx_ring)282 void igc_free_tx_resources(struct igc_ring *tx_ring)
283 {
284 igc_disable_tx_ring(tx_ring);
285
286 vfree(tx_ring->tx_buffer_info);
287 tx_ring->tx_buffer_info = NULL;
288
289 /* if not set, then don't free */
290 if (!tx_ring->desc)
291 return;
292
293 dma_free_coherent(tx_ring->dev, tx_ring->size,
294 tx_ring->desc, tx_ring->dma);
295
296 tx_ring->desc = NULL;
297 }
298
299 /**
300 * igc_free_all_tx_resources - Free Tx Resources for All Queues
301 * @adapter: board private structure
302 *
303 * Free all transmit software resources
304 */
igc_free_all_tx_resources(struct igc_adapter * adapter)305 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
306 {
307 int i;
308
309 for (i = 0; i < adapter->num_tx_queues; i++)
310 igc_free_tx_resources(adapter->tx_ring[i]);
311 }
312
313 /**
314 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
315 * @adapter: board private structure
316 */
igc_clean_all_tx_rings(struct igc_adapter * adapter)317 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
318 {
319 int i;
320
321 for (i = 0; i < adapter->num_tx_queues; i++)
322 if (adapter->tx_ring[i])
323 igc_clean_tx_ring(adapter->tx_ring[i]);
324 }
325
igc_disable_tx_ring_hw(struct igc_ring * ring)326 static void igc_disable_tx_ring_hw(struct igc_ring *ring)
327 {
328 struct igc_hw *hw = &ring->q_vector->adapter->hw;
329 u8 idx = ring->reg_idx;
330 u32 txdctl;
331
332 txdctl = rd32(IGC_TXDCTL(idx));
333 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
334 txdctl |= IGC_TXDCTL_SWFLUSH;
335 wr32(IGC_TXDCTL(idx), txdctl);
336 }
337
338 /**
339 * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
340 * @adapter: board private structure
341 */
igc_disable_all_tx_rings_hw(struct igc_adapter * adapter)342 static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter)
343 {
344 int i;
345
346 for (i = 0; i < adapter->num_tx_queues; i++) {
347 struct igc_ring *tx_ring = adapter->tx_ring[i];
348
349 igc_disable_tx_ring_hw(tx_ring);
350 }
351 }
352
353 /**
354 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
355 * @tx_ring: tx descriptor ring (for a specific queue) to setup
356 *
357 * Return 0 on success, negative on failure
358 */
igc_setup_tx_resources(struct igc_ring * tx_ring)359 int igc_setup_tx_resources(struct igc_ring *tx_ring)
360 {
361 struct net_device *ndev = tx_ring->netdev;
362 struct device *dev = tx_ring->dev;
363 int size = 0;
364
365 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
366 tx_ring->tx_buffer_info = vzalloc(size);
367 if (!tx_ring->tx_buffer_info)
368 goto err;
369
370 /* round up to nearest 4K */
371 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
372 tx_ring->size = ALIGN(tx_ring->size, 4096);
373
374 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
375 &tx_ring->dma, GFP_KERNEL);
376
377 if (!tx_ring->desc)
378 goto err;
379
380 tx_ring->next_to_use = 0;
381 tx_ring->next_to_clean = 0;
382
383 return 0;
384
385 err:
386 vfree(tx_ring->tx_buffer_info);
387 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
388 return -ENOMEM;
389 }
390
391 /**
392 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
393 * @adapter: board private structure
394 *
395 * Return 0 on success, negative on failure
396 */
igc_setup_all_tx_resources(struct igc_adapter * adapter)397 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
398 {
399 struct net_device *dev = adapter->netdev;
400 int i, err = 0;
401
402 for (i = 0; i < adapter->num_tx_queues; i++) {
403 err = igc_setup_tx_resources(adapter->tx_ring[i]);
404 if (err) {
405 netdev_err(dev, "Error on Tx queue %u setup\n", i);
406 for (i--; i >= 0; i--)
407 igc_free_tx_resources(adapter->tx_ring[i]);
408 break;
409 }
410 }
411
412 return err;
413 }
414
igc_clean_rx_ring_page_shared(struct igc_ring * rx_ring)415 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
416 {
417 u16 i = rx_ring->next_to_clean;
418
419 dev_kfree_skb(rx_ring->skb);
420 rx_ring->skb = NULL;
421
422 /* Free all the Rx ring sk_buffs */
423 while (i != rx_ring->next_to_alloc) {
424 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
425
426 /* Invalidate cache lines that may have been written to by
427 * device so that we avoid corrupting memory.
428 */
429 dma_sync_single_range_for_cpu(rx_ring->dev,
430 buffer_info->dma,
431 buffer_info->page_offset,
432 igc_rx_bufsz(rx_ring),
433 DMA_FROM_DEVICE);
434
435 /* free resources associated with mapping */
436 dma_unmap_page_attrs(rx_ring->dev,
437 buffer_info->dma,
438 igc_rx_pg_size(rx_ring),
439 DMA_FROM_DEVICE,
440 IGC_RX_DMA_ATTR);
441 __page_frag_cache_drain(buffer_info->page,
442 buffer_info->pagecnt_bias);
443
444 i++;
445 if (i == rx_ring->count)
446 i = 0;
447 }
448 }
449
igc_clean_rx_ring_xsk_pool(struct igc_ring * ring)450 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
451 {
452 struct igc_rx_buffer *bi;
453 u16 i;
454
455 for (i = 0; i < ring->count; i++) {
456 bi = &ring->rx_buffer_info[i];
457 if (!bi->xdp)
458 continue;
459
460 xsk_buff_free(bi->xdp);
461 bi->xdp = NULL;
462 }
463 }
464
465 /**
466 * igc_clean_rx_ring - Free Rx Buffers per Queue
467 * @ring: ring to free buffers from
468 */
igc_clean_rx_ring(struct igc_ring * ring)469 static void igc_clean_rx_ring(struct igc_ring *ring)
470 {
471 if (ring->xsk_pool)
472 igc_clean_rx_ring_xsk_pool(ring);
473 else
474 igc_clean_rx_ring_page_shared(ring);
475
476 clear_ring_uses_large_buffer(ring);
477
478 ring->next_to_alloc = 0;
479 ring->next_to_clean = 0;
480 ring->next_to_use = 0;
481 }
482
483 /**
484 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
485 * @adapter: board private structure
486 */
igc_clean_all_rx_rings(struct igc_adapter * adapter)487 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
488 {
489 int i;
490
491 for (i = 0; i < adapter->num_rx_queues; i++)
492 if (adapter->rx_ring[i])
493 igc_clean_rx_ring(adapter->rx_ring[i]);
494 }
495
496 /**
497 * igc_free_rx_resources - Free Rx Resources
498 * @rx_ring: ring to clean the resources from
499 *
500 * Free all receive software resources
501 */
igc_free_rx_resources(struct igc_ring * rx_ring)502 void igc_free_rx_resources(struct igc_ring *rx_ring)
503 {
504 igc_clean_rx_ring(rx_ring);
505
506 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
507
508 vfree(rx_ring->rx_buffer_info);
509 rx_ring->rx_buffer_info = NULL;
510
511 /* if not set, then don't free */
512 if (!rx_ring->desc)
513 return;
514
515 dma_free_coherent(rx_ring->dev, rx_ring->size,
516 rx_ring->desc, rx_ring->dma);
517
518 rx_ring->desc = NULL;
519 }
520
521 /**
522 * igc_free_all_rx_resources - Free Rx Resources for All Queues
523 * @adapter: board private structure
524 *
525 * Free all receive software resources
526 */
igc_free_all_rx_resources(struct igc_adapter * adapter)527 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
528 {
529 int i;
530
531 for (i = 0; i < adapter->num_rx_queues; i++)
532 igc_free_rx_resources(adapter->rx_ring[i]);
533 }
534
535 /**
536 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
537 * @rx_ring: rx descriptor ring (for a specific queue) to setup
538 *
539 * Returns 0 on success, negative on failure
540 */
igc_setup_rx_resources(struct igc_ring * rx_ring)541 int igc_setup_rx_resources(struct igc_ring *rx_ring)
542 {
543 struct net_device *ndev = rx_ring->netdev;
544 struct device *dev = rx_ring->dev;
545 u8 index = rx_ring->queue_index;
546 int size, desc_len, res;
547
548 /* XDP RX-queue info */
549 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
550 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
551 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
552 rx_ring->q_vector->napi.napi_id);
553 if (res < 0) {
554 netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
555 index);
556 return res;
557 }
558
559 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
560 rx_ring->rx_buffer_info = vzalloc(size);
561 if (!rx_ring->rx_buffer_info)
562 goto err;
563
564 desc_len = sizeof(union igc_adv_rx_desc);
565
566 /* Round up to nearest 4K */
567 rx_ring->size = rx_ring->count * desc_len;
568 rx_ring->size = ALIGN(rx_ring->size, 4096);
569
570 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
571 &rx_ring->dma, GFP_KERNEL);
572
573 if (!rx_ring->desc)
574 goto err;
575
576 rx_ring->next_to_alloc = 0;
577 rx_ring->next_to_clean = 0;
578 rx_ring->next_to_use = 0;
579
580 return 0;
581
582 err:
583 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
584 vfree(rx_ring->rx_buffer_info);
585 rx_ring->rx_buffer_info = NULL;
586 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
587 return -ENOMEM;
588 }
589
590 /**
591 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
592 * (Descriptors) for all queues
593 * @adapter: board private structure
594 *
595 * Return 0 on success, negative on failure
596 */
igc_setup_all_rx_resources(struct igc_adapter * adapter)597 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
598 {
599 struct net_device *dev = adapter->netdev;
600 int i, err = 0;
601
602 for (i = 0; i < adapter->num_rx_queues; i++) {
603 err = igc_setup_rx_resources(adapter->rx_ring[i]);
604 if (err) {
605 netdev_err(dev, "Error on Rx queue %u setup\n", i);
606 for (i--; i >= 0; i--)
607 igc_free_rx_resources(adapter->rx_ring[i]);
608 break;
609 }
610 }
611
612 return err;
613 }
614
igc_get_xsk_pool(struct igc_adapter * adapter,struct igc_ring * ring)615 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
616 struct igc_ring *ring)
617 {
618 if (!igc_xdp_is_enabled(adapter) ||
619 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
620 return NULL;
621
622 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
623 }
624
625 /**
626 * igc_configure_rx_ring - Configure a receive ring after Reset
627 * @adapter: board private structure
628 * @ring: receive ring to be configured
629 *
630 * Configure the Rx unit of the MAC after a reset.
631 */
igc_configure_rx_ring(struct igc_adapter * adapter,struct igc_ring * ring)632 static void igc_configure_rx_ring(struct igc_adapter *adapter,
633 struct igc_ring *ring)
634 {
635 struct igc_hw *hw = &adapter->hw;
636 union igc_adv_rx_desc *rx_desc;
637 int reg_idx = ring->reg_idx;
638 u32 srrctl = 0, rxdctl = 0;
639 u64 rdba = ring->dma;
640 u32 buf_size;
641
642 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
643 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
644 if (ring->xsk_pool) {
645 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
646 MEM_TYPE_XSK_BUFF_POOL,
647 NULL));
648 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
649 } else {
650 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
651 MEM_TYPE_PAGE_SHARED,
652 NULL));
653 }
654
655 if (igc_xdp_is_enabled(adapter))
656 set_ring_uses_large_buffer(ring);
657
658 /* disable the queue */
659 wr32(IGC_RXDCTL(reg_idx), 0);
660
661 /* Set DMA base address registers */
662 wr32(IGC_RDBAL(reg_idx),
663 rdba & 0x00000000ffffffffULL);
664 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
665 wr32(IGC_RDLEN(reg_idx),
666 ring->count * sizeof(union igc_adv_rx_desc));
667
668 /* initialize head and tail */
669 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
670 wr32(IGC_RDH(reg_idx), 0);
671 writel(0, ring->tail);
672
673 /* reset next-to- use/clean to place SW in sync with hardware */
674 ring->next_to_clean = 0;
675 ring->next_to_use = 0;
676
677 if (ring->xsk_pool)
678 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
679 else if (ring_uses_large_buffer(ring))
680 buf_size = IGC_RXBUFFER_3072;
681 else
682 buf_size = IGC_RXBUFFER_2048;
683
684 srrctl = rd32(IGC_SRRCTL(reg_idx));
685 srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK |
686 IGC_SRRCTL_DESCTYPE_MASK);
687 srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN);
688 srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size);
689 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
690
691 wr32(IGC_SRRCTL(reg_idx), srrctl);
692
693 rxdctl |= IGC_RXDCTL_PTHRESH;
694 rxdctl |= IGC_RXDCTL_HTHRESH << 8;
695 rxdctl |= IGC_RXDCTL_WTHRESH << 16;
696
697 /* initialize rx_buffer_info */
698 memset(ring->rx_buffer_info, 0,
699 sizeof(struct igc_rx_buffer) * ring->count);
700
701 /* initialize Rx descriptor 0 */
702 rx_desc = IGC_RX_DESC(ring, 0);
703 rx_desc->wb.upper.length = 0;
704
705 /* enable receive descriptor fetching */
706 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
707
708 wr32(IGC_RXDCTL(reg_idx), rxdctl);
709 }
710
711 /**
712 * igc_configure_rx - Configure receive Unit after Reset
713 * @adapter: board private structure
714 *
715 * Configure the Rx unit of the MAC after a reset.
716 */
igc_configure_rx(struct igc_adapter * adapter)717 static void igc_configure_rx(struct igc_adapter *adapter)
718 {
719 int i;
720
721 /* Setup the HW Rx Head and Tail Descriptor Pointers and
722 * the Base and Length of the Rx Descriptor Ring
723 */
724 for (i = 0; i < adapter->num_rx_queues; i++)
725 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
726 }
727
728 /**
729 * igc_configure_tx_ring - Configure transmit ring after Reset
730 * @adapter: board private structure
731 * @ring: tx ring to configure
732 *
733 * Configure a transmit ring after a reset.
734 */
igc_configure_tx_ring(struct igc_adapter * adapter,struct igc_ring * ring)735 static void igc_configure_tx_ring(struct igc_adapter *adapter,
736 struct igc_ring *ring)
737 {
738 struct igc_hw *hw = &adapter->hw;
739 int reg_idx = ring->reg_idx;
740 u64 tdba = ring->dma;
741 u32 txdctl = 0;
742
743 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
744
745 /* disable the queue */
746 wr32(IGC_TXDCTL(reg_idx), 0);
747 wrfl();
748
749 wr32(IGC_TDLEN(reg_idx),
750 ring->count * sizeof(union igc_adv_tx_desc));
751 wr32(IGC_TDBAL(reg_idx),
752 tdba & 0x00000000ffffffffULL);
753 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
754
755 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
756 wr32(IGC_TDH(reg_idx), 0);
757 writel(0, ring->tail);
758
759 txdctl |= IGC_TXDCTL_PTHRESH(8) | IGC_TXDCTL_HTHRESH(1) |
760 IGC_TXDCTL_WTHRESH(16) | IGC_TXDCTL_QUEUE_ENABLE;
761
762 wr32(IGC_TXDCTL(reg_idx), txdctl);
763 }
764
765 /**
766 * igc_configure_tx - Configure transmit Unit after Reset
767 * @adapter: board private structure
768 *
769 * Configure the Tx unit of the MAC after a reset.
770 */
igc_configure_tx(struct igc_adapter * adapter)771 static void igc_configure_tx(struct igc_adapter *adapter)
772 {
773 int i;
774
775 for (i = 0; i < adapter->num_tx_queues; i++)
776 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
777 }
778
779 /**
780 * igc_setup_mrqc - configure the multiple receive queue control registers
781 * @adapter: Board private structure
782 */
igc_setup_mrqc(struct igc_adapter * adapter)783 static void igc_setup_mrqc(struct igc_adapter *adapter)
784 {
785 struct igc_hw *hw = &adapter->hw;
786 u32 j, num_rx_queues;
787 u32 mrqc, rxcsum;
788 u32 rss_key[10];
789
790 netdev_rss_key_fill(rss_key, sizeof(rss_key));
791 for (j = 0; j < 10; j++)
792 wr32(IGC_RSSRK(j), rss_key[j]);
793
794 num_rx_queues = adapter->rss_queues;
795
796 if (adapter->rss_indir_tbl_init != num_rx_queues) {
797 for (j = 0; j < IGC_RETA_SIZE; j++)
798 adapter->rss_indir_tbl[j] =
799 (j * num_rx_queues) / IGC_RETA_SIZE;
800 adapter->rss_indir_tbl_init = num_rx_queues;
801 }
802 igc_write_rss_indir_tbl(adapter);
803
804 /* Disable raw packet checksumming so that RSS hash is placed in
805 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
806 * offloads as they are enabled by default
807 */
808 rxcsum = rd32(IGC_RXCSUM);
809 rxcsum |= IGC_RXCSUM_PCSD;
810
811 /* Enable Receive Checksum Offload for SCTP */
812 rxcsum |= IGC_RXCSUM_CRCOFL;
813
814 /* Don't need to set TUOFL or IPOFL, they default to 1 */
815 wr32(IGC_RXCSUM, rxcsum);
816
817 /* Generate RSS hash based on packet types, TCP/UDP
818 * port numbers and/or IPv4/v6 src and dst addresses
819 */
820 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
821 IGC_MRQC_RSS_FIELD_IPV4_TCP |
822 IGC_MRQC_RSS_FIELD_IPV6 |
823 IGC_MRQC_RSS_FIELD_IPV6_TCP |
824 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
825
826 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
827 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
828 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
829 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
830
831 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
832
833 wr32(IGC_MRQC, mrqc);
834 }
835
836 /**
837 * igc_setup_rctl - configure the receive control registers
838 * @adapter: Board private structure
839 */
igc_setup_rctl(struct igc_adapter * adapter)840 static void igc_setup_rctl(struct igc_adapter *adapter)
841 {
842 struct igc_hw *hw = &adapter->hw;
843 u32 rctl;
844
845 rctl = rd32(IGC_RCTL);
846
847 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
848 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
849
850 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
851 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
852
853 /* enable stripping of CRC. Newer features require
854 * that the HW strips the CRC.
855 */
856 rctl |= IGC_RCTL_SECRC;
857
858 /* disable store bad packets and clear size bits. */
859 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
860
861 /* enable LPE to allow for reception of jumbo frames */
862 rctl |= IGC_RCTL_LPE;
863
864 /* disable queue 0 to prevent tail write w/o re-config */
865 wr32(IGC_RXDCTL(0), 0);
866
867 /* This is useful for sniffing bad packets. */
868 if (adapter->netdev->features & NETIF_F_RXALL) {
869 /* UPE and MPE will be handled by normal PROMISC logic
870 * in set_rx_mode
871 */
872 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
873 IGC_RCTL_BAM | /* RX All Bcast Pkts */
874 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
875
876 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
877 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
878 }
879
880 wr32(IGC_RCTL, rctl);
881 }
882
883 /**
884 * igc_setup_tctl - configure the transmit control registers
885 * @adapter: Board private structure
886 */
igc_setup_tctl(struct igc_adapter * adapter)887 static void igc_setup_tctl(struct igc_adapter *adapter)
888 {
889 struct igc_hw *hw = &adapter->hw;
890 u32 tctl;
891
892 /* disable queue 0 which icould be enabled by default */
893 wr32(IGC_TXDCTL(0), 0);
894
895 /* Program the Transmit Control Register */
896 tctl = rd32(IGC_TCTL);
897 tctl &= ~IGC_TCTL_CT;
898 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
899 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
900
901 /* Enable transmits */
902 tctl |= IGC_TCTL_EN;
903
904 wr32(IGC_TCTL, tctl);
905 }
906
907 /**
908 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
909 * @adapter: Pointer to adapter where the filter should be set
910 * @index: Filter index
911 * @type: MAC address filter type (source or destination)
912 * @addr: MAC address
913 * @queue: If non-negative, queue assignment feature is enabled and frames
914 * matching the filter are enqueued onto 'queue'. Otherwise, queue
915 * assignment is disabled.
916 */
igc_set_mac_filter_hw(struct igc_adapter * adapter,int index,enum igc_mac_filter_type type,const u8 * addr,int queue)917 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
918 enum igc_mac_filter_type type,
919 const u8 *addr, int queue)
920 {
921 struct net_device *dev = adapter->netdev;
922 struct igc_hw *hw = &adapter->hw;
923 u32 ral, rah;
924
925 if (WARN_ON(index >= hw->mac.rar_entry_count))
926 return;
927
928 ral = le32_to_cpup((__le32 *)(addr));
929 rah = le16_to_cpup((__le16 *)(addr + 4));
930
931 if (type == IGC_MAC_FILTER_TYPE_SRC) {
932 rah &= ~IGC_RAH_ASEL_MASK;
933 rah |= IGC_RAH_ASEL_SRC_ADDR;
934 }
935
936 if (queue >= 0) {
937 rah &= ~IGC_RAH_QSEL_MASK;
938 rah |= (queue << IGC_RAH_QSEL_SHIFT);
939 rah |= IGC_RAH_QSEL_ENABLE;
940 }
941
942 rah |= IGC_RAH_AV;
943
944 wr32(IGC_RAL(index), ral);
945 wr32(IGC_RAH(index), rah);
946
947 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
948 }
949
950 /**
951 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
952 * @adapter: Pointer to adapter where the filter should be cleared
953 * @index: Filter index
954 */
igc_clear_mac_filter_hw(struct igc_adapter * adapter,int index)955 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
956 {
957 struct net_device *dev = adapter->netdev;
958 struct igc_hw *hw = &adapter->hw;
959
960 if (WARN_ON(index >= hw->mac.rar_entry_count))
961 return;
962
963 wr32(IGC_RAL(index), 0);
964 wr32(IGC_RAH(index), 0);
965
966 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
967 }
968
969 /* Set default MAC address for the PF in the first RAR entry */
igc_set_default_mac_filter(struct igc_adapter * adapter)970 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
971 {
972 struct net_device *dev = adapter->netdev;
973 u8 *addr = adapter->hw.mac.addr;
974
975 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
976
977 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
978 }
979
980 /**
981 * igc_set_mac - Change the Ethernet Address of the NIC
982 * @netdev: network interface device structure
983 * @p: pointer to an address structure
984 *
985 * Returns 0 on success, negative on failure
986 */
igc_set_mac(struct net_device * netdev,void * p)987 static int igc_set_mac(struct net_device *netdev, void *p)
988 {
989 struct igc_adapter *adapter = netdev_priv(netdev);
990 struct igc_hw *hw = &adapter->hw;
991 struct sockaddr *addr = p;
992
993 if (!is_valid_ether_addr(addr->sa_data))
994 return -EADDRNOTAVAIL;
995
996 eth_hw_addr_set(netdev, addr->sa_data);
997 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
998
999 /* set the correct pool for the new PF MAC address in entry 0 */
1000 igc_set_default_mac_filter(adapter);
1001
1002 return 0;
1003 }
1004
1005 /**
1006 * igc_write_mc_addr_list - write multicast addresses to MTA
1007 * @netdev: network interface device structure
1008 *
1009 * Writes multicast address list to the MTA hash table.
1010 * Returns: -ENOMEM on failure
1011 * 0 on no addresses written
1012 * X on writing X addresses to MTA
1013 **/
igc_write_mc_addr_list(struct net_device * netdev)1014 static int igc_write_mc_addr_list(struct net_device *netdev)
1015 {
1016 struct igc_adapter *adapter = netdev_priv(netdev);
1017 struct igc_hw *hw = &adapter->hw;
1018 struct netdev_hw_addr *ha;
1019 u8 *mta_list;
1020 int i;
1021
1022 if (netdev_mc_empty(netdev)) {
1023 /* nothing to program, so clear mc list */
1024 igc_update_mc_addr_list(hw, NULL, 0);
1025 return 0;
1026 }
1027
1028 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
1029 if (!mta_list)
1030 return -ENOMEM;
1031
1032 /* The shared function expects a packed array of only addresses. */
1033 i = 0;
1034 netdev_for_each_mc_addr(ha, netdev)
1035 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1036
1037 igc_update_mc_addr_list(hw, mta_list, i);
1038 kfree(mta_list);
1039
1040 return netdev_mc_count(netdev);
1041 }
1042
igc_tx_launchtime(struct igc_ring * ring,ktime_t txtime,bool * first_flag,bool * insert_empty)1043 static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
1044 bool *first_flag, bool *insert_empty)
1045 {
1046 struct igc_adapter *adapter = netdev_priv(ring->netdev);
1047 ktime_t cycle_time = adapter->cycle_time;
1048 ktime_t base_time = adapter->base_time;
1049 ktime_t now = ktime_get_clocktai();
1050 ktime_t baset_est, end_of_cycle;
1051 s32 launchtime;
1052 s64 n;
1053
1054 n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
1055
1056 baset_est = ktime_add_ns(base_time, cycle_time * (n));
1057 end_of_cycle = ktime_add_ns(baset_est, cycle_time);
1058
1059 if (ktime_compare(txtime, end_of_cycle) >= 0) {
1060 if (baset_est != ring->last_ff_cycle) {
1061 *first_flag = true;
1062 ring->last_ff_cycle = baset_est;
1063
1064 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0)
1065 *insert_empty = true;
1066 }
1067 }
1068
1069 /* Introducing a window at end of cycle on which packets
1070 * potentially not honor launchtime. Window of 5us chosen
1071 * considering software update the tail pointer and packets
1072 * are dma'ed to packet buffer.
1073 */
1074 if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
1075 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
1076 txtime);
1077
1078 ring->last_tx_cycle = end_of_cycle;
1079
1080 launchtime = ktime_sub_ns(txtime, baset_est);
1081 if (launchtime > 0)
1082 div_s64_rem(launchtime, cycle_time, &launchtime);
1083 else
1084 launchtime = 0;
1085
1086 return cpu_to_le32(launchtime);
1087 }
1088
igc_init_empty_frame(struct igc_ring * ring,struct igc_tx_buffer * buffer,struct sk_buff * skb)1089 static int igc_init_empty_frame(struct igc_ring *ring,
1090 struct igc_tx_buffer *buffer,
1091 struct sk_buff *skb)
1092 {
1093 unsigned int size;
1094 dma_addr_t dma;
1095
1096 size = skb_headlen(skb);
1097
1098 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
1099 if (dma_mapping_error(ring->dev, dma)) {
1100 net_err_ratelimited("%s: DMA mapping error for empty frame\n",
1101 netdev_name(ring->netdev));
1102 return -ENOMEM;
1103 }
1104
1105 buffer->type = IGC_TX_BUFFER_TYPE_SKB;
1106 buffer->skb = skb;
1107 buffer->protocol = 0;
1108 buffer->bytecount = skb->len;
1109 buffer->gso_segs = 1;
1110 buffer->time_stamp = jiffies;
1111 dma_unmap_len_set(buffer, len, skb->len);
1112 dma_unmap_addr_set(buffer, dma, dma);
1113
1114 return 0;
1115 }
1116
igc_init_tx_empty_descriptor(struct igc_ring * ring,struct sk_buff * skb,struct igc_tx_buffer * first)1117 static void igc_init_tx_empty_descriptor(struct igc_ring *ring,
1118 struct sk_buff *skb,
1119 struct igc_tx_buffer *first)
1120 {
1121 union igc_adv_tx_desc *desc;
1122 u32 cmd_type, olinfo_status;
1123
1124 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
1125 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
1126 first->bytecount;
1127 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
1128
1129 desc = IGC_TX_DESC(ring, ring->next_to_use);
1130 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1131 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1132 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
1133
1134 netdev_tx_sent_queue(txring_txq(ring), skb->len);
1135
1136 first->next_to_watch = desc;
1137
1138 ring->next_to_use++;
1139 if (ring->next_to_use == ring->count)
1140 ring->next_to_use = 0;
1141 }
1142
1143 #define IGC_EMPTY_FRAME_SIZE 60
1144
igc_tx_ctxtdesc(struct igc_ring * tx_ring,__le32 launch_time,bool first_flag,u32 vlan_macip_lens,u32 type_tucmd,u32 mss_l4len_idx)1145 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1146 __le32 launch_time, bool first_flag,
1147 u32 vlan_macip_lens, u32 type_tucmd,
1148 u32 mss_l4len_idx)
1149 {
1150 struct igc_adv_tx_context_desc *context_desc;
1151 u16 i = tx_ring->next_to_use;
1152
1153 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1154
1155 i++;
1156 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1157
1158 /* set bits to identify this as an advanced context descriptor */
1159 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1160
1161 /* For i225, context index must be unique per ring. */
1162 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1163 mss_l4len_idx |= tx_ring->reg_idx << 4;
1164
1165 if (first_flag)
1166 mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
1167
1168 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1169 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1170 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1171 context_desc->launch_time = launch_time;
1172 }
1173
igc_tx_csum(struct igc_ring * tx_ring,struct igc_tx_buffer * first,__le32 launch_time,bool first_flag)1174 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
1175 __le32 launch_time, bool first_flag)
1176 {
1177 struct sk_buff *skb = first->skb;
1178 u32 vlan_macip_lens = 0;
1179 u32 type_tucmd = 0;
1180
1181 if (skb->ip_summed != CHECKSUM_PARTIAL) {
1182 csum_failed:
1183 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1184 !tx_ring->launchtime_enable)
1185 return;
1186 goto no_csum;
1187 }
1188
1189 switch (skb->csum_offset) {
1190 case offsetof(struct tcphdr, check):
1191 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1192 fallthrough;
1193 case offsetof(struct udphdr, check):
1194 break;
1195 case offsetof(struct sctphdr, checksum):
1196 /* validate that this is actually an SCTP request */
1197 if (skb_csum_is_sctp(skb)) {
1198 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1199 break;
1200 }
1201 fallthrough;
1202 default:
1203 skb_checksum_help(skb);
1204 goto csum_failed;
1205 }
1206
1207 /* update TX checksum flag */
1208 first->tx_flags |= IGC_TX_FLAGS_CSUM;
1209 vlan_macip_lens = skb_checksum_start_offset(skb) -
1210 skb_network_offset(skb);
1211 no_csum:
1212 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1213 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1214
1215 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1216 vlan_macip_lens, type_tucmd, 0);
1217 }
1218
__igc_maybe_stop_tx(struct igc_ring * tx_ring,const u16 size)1219 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1220 {
1221 struct net_device *netdev = tx_ring->netdev;
1222
1223 netif_stop_subqueue(netdev, tx_ring->queue_index);
1224
1225 /* memory barriier comment */
1226 smp_mb();
1227
1228 /* We need to check again in a case another CPU has just
1229 * made room available.
1230 */
1231 if (igc_desc_unused(tx_ring) < size)
1232 return -EBUSY;
1233
1234 /* A reprieve! */
1235 netif_wake_subqueue(netdev, tx_ring->queue_index);
1236
1237 u64_stats_update_begin(&tx_ring->tx_syncp2);
1238 tx_ring->tx_stats.restart_queue2++;
1239 u64_stats_update_end(&tx_ring->tx_syncp2);
1240
1241 return 0;
1242 }
1243
igc_maybe_stop_tx(struct igc_ring * tx_ring,const u16 size)1244 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1245 {
1246 if (igc_desc_unused(tx_ring) >= size)
1247 return 0;
1248 return __igc_maybe_stop_tx(tx_ring, size);
1249 }
1250
1251 #define IGC_SET_FLAG(_input, _flag, _result) \
1252 (((_flag) <= (_result)) ? \
1253 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1254 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1255
igc_tx_cmd_type(struct sk_buff * skb,u32 tx_flags)1256 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1257 {
1258 /* set type for advanced descriptor with frame checksum insertion */
1259 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1260 IGC_ADVTXD_DCMD_DEXT |
1261 IGC_ADVTXD_DCMD_IFCS;
1262
1263 /* set HW vlan bit if vlan is present */
1264 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
1265 IGC_ADVTXD_DCMD_VLE);
1266
1267 /* set segmentation bits for TSO */
1268 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1269 (IGC_ADVTXD_DCMD_TSE));
1270
1271 /* set timestamp bit if present, will select the register set
1272 * based on the _TSTAMP(_X) bit.
1273 */
1274 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1275 (IGC_ADVTXD_MAC_TSTAMP));
1276
1277 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1,
1278 (IGC_ADVTXD_TSTAMP_REG_1));
1279
1280 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2,
1281 (IGC_ADVTXD_TSTAMP_REG_2));
1282
1283 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3,
1284 (IGC_ADVTXD_TSTAMP_REG_3));
1285
1286 /* insert frame checksum */
1287 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
1288
1289 return cmd_type;
1290 }
1291
igc_tx_olinfo_status(struct igc_ring * tx_ring,union igc_adv_tx_desc * tx_desc,u32 tx_flags,unsigned int paylen)1292 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1293 union igc_adv_tx_desc *tx_desc,
1294 u32 tx_flags, unsigned int paylen)
1295 {
1296 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1297
1298 /* insert L4 checksum */
1299 olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM,
1300 (IGC_TXD_POPTS_TXSM << 8));
1301
1302 /* insert IPv4 checksum */
1303 olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4,
1304 (IGC_TXD_POPTS_IXSM << 8));
1305
1306 /* Use the second timer (free running, in general) for the timestamp */
1307 olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1,
1308 IGC_TXD_PTP2_TIMER_1);
1309
1310 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1311 }
1312
igc_tx_map(struct igc_ring * tx_ring,struct igc_tx_buffer * first,const u8 hdr_len)1313 static int igc_tx_map(struct igc_ring *tx_ring,
1314 struct igc_tx_buffer *first,
1315 const u8 hdr_len)
1316 {
1317 struct sk_buff *skb = first->skb;
1318 struct igc_tx_buffer *tx_buffer;
1319 union igc_adv_tx_desc *tx_desc;
1320 u32 tx_flags = first->tx_flags;
1321 skb_frag_t *frag;
1322 u16 i = tx_ring->next_to_use;
1323 unsigned int data_len, size;
1324 dma_addr_t dma;
1325 u32 cmd_type;
1326
1327 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1328 tx_desc = IGC_TX_DESC(tx_ring, i);
1329
1330 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1331
1332 size = skb_headlen(skb);
1333 data_len = skb->data_len;
1334
1335 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1336
1337 tx_buffer = first;
1338
1339 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1340 if (dma_mapping_error(tx_ring->dev, dma))
1341 goto dma_error;
1342
1343 /* record length, and DMA address */
1344 dma_unmap_len_set(tx_buffer, len, size);
1345 dma_unmap_addr_set(tx_buffer, dma, dma);
1346
1347 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1348
1349 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1350 tx_desc->read.cmd_type_len =
1351 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1352
1353 i++;
1354 tx_desc++;
1355 if (i == tx_ring->count) {
1356 tx_desc = IGC_TX_DESC(tx_ring, 0);
1357 i = 0;
1358 }
1359 tx_desc->read.olinfo_status = 0;
1360
1361 dma += IGC_MAX_DATA_PER_TXD;
1362 size -= IGC_MAX_DATA_PER_TXD;
1363
1364 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1365 }
1366
1367 if (likely(!data_len))
1368 break;
1369
1370 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1371
1372 i++;
1373 tx_desc++;
1374 if (i == tx_ring->count) {
1375 tx_desc = IGC_TX_DESC(tx_ring, 0);
1376 i = 0;
1377 }
1378 tx_desc->read.olinfo_status = 0;
1379
1380 size = skb_frag_size(frag);
1381 data_len -= size;
1382
1383 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1384 size, DMA_TO_DEVICE);
1385
1386 tx_buffer = &tx_ring->tx_buffer_info[i];
1387 }
1388
1389 /* write last descriptor with RS and EOP bits */
1390 cmd_type |= size | IGC_TXD_DCMD;
1391 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1392
1393 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1394
1395 /* set the timestamp */
1396 first->time_stamp = jiffies;
1397
1398 skb_tx_timestamp(skb);
1399
1400 /* Force memory writes to complete before letting h/w know there
1401 * are new descriptors to fetch. (Only applicable for weak-ordered
1402 * memory model archs, such as IA-64).
1403 *
1404 * We also need this memory barrier to make certain all of the
1405 * status bits have been updated before next_to_watch is written.
1406 */
1407 wmb();
1408
1409 /* set next_to_watch value indicating a packet is present */
1410 first->next_to_watch = tx_desc;
1411
1412 i++;
1413 if (i == tx_ring->count)
1414 i = 0;
1415
1416 tx_ring->next_to_use = i;
1417
1418 /* Make sure there is space in the ring for the next send. */
1419 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1420
1421 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1422 writel(i, tx_ring->tail);
1423 }
1424
1425 return 0;
1426 dma_error:
1427 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1428 tx_buffer = &tx_ring->tx_buffer_info[i];
1429
1430 /* clear dma mappings for failed tx_buffer_info map */
1431 while (tx_buffer != first) {
1432 if (dma_unmap_len(tx_buffer, len))
1433 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1434
1435 if (i-- == 0)
1436 i += tx_ring->count;
1437 tx_buffer = &tx_ring->tx_buffer_info[i];
1438 }
1439
1440 if (dma_unmap_len(tx_buffer, len))
1441 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1442
1443 dev_kfree_skb_any(tx_buffer->skb);
1444 tx_buffer->skb = NULL;
1445
1446 tx_ring->next_to_use = i;
1447
1448 return -1;
1449 }
1450
igc_tso(struct igc_ring * tx_ring,struct igc_tx_buffer * first,__le32 launch_time,bool first_flag,u8 * hdr_len)1451 static int igc_tso(struct igc_ring *tx_ring,
1452 struct igc_tx_buffer *first,
1453 __le32 launch_time, bool first_flag,
1454 u8 *hdr_len)
1455 {
1456 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1457 struct sk_buff *skb = first->skb;
1458 union {
1459 struct iphdr *v4;
1460 struct ipv6hdr *v6;
1461 unsigned char *hdr;
1462 } ip;
1463 union {
1464 struct tcphdr *tcp;
1465 struct udphdr *udp;
1466 unsigned char *hdr;
1467 } l4;
1468 u32 paylen, l4_offset;
1469 int err;
1470
1471 if (skb->ip_summed != CHECKSUM_PARTIAL)
1472 return 0;
1473
1474 if (!skb_is_gso(skb))
1475 return 0;
1476
1477 err = skb_cow_head(skb, 0);
1478 if (err < 0)
1479 return err;
1480
1481 ip.hdr = skb_network_header(skb);
1482 l4.hdr = skb_checksum_start(skb);
1483
1484 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1485 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1486
1487 /* initialize outer IP header fields */
1488 if (ip.v4->version == 4) {
1489 unsigned char *csum_start = skb_checksum_start(skb);
1490 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1491
1492 /* IP header will have to cancel out any data that
1493 * is not a part of the outer IP header
1494 */
1495 ip.v4->check = csum_fold(csum_partial(trans_start,
1496 csum_start - trans_start,
1497 0));
1498 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1499
1500 ip.v4->tot_len = 0;
1501 first->tx_flags |= IGC_TX_FLAGS_TSO |
1502 IGC_TX_FLAGS_CSUM |
1503 IGC_TX_FLAGS_IPV4;
1504 } else {
1505 ip.v6->payload_len = 0;
1506 first->tx_flags |= IGC_TX_FLAGS_TSO |
1507 IGC_TX_FLAGS_CSUM;
1508 }
1509
1510 /* determine offset of inner transport header */
1511 l4_offset = l4.hdr - skb->data;
1512
1513 /* remove payload length from inner checksum */
1514 paylen = skb->len - l4_offset;
1515 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1516 /* compute length of segmentation header */
1517 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1518 csum_replace_by_diff(&l4.tcp->check,
1519 (__force __wsum)htonl(paylen));
1520 } else {
1521 /* compute length of segmentation header */
1522 *hdr_len = sizeof(*l4.udp) + l4_offset;
1523 csum_replace_by_diff(&l4.udp->check,
1524 (__force __wsum)htonl(paylen));
1525 }
1526
1527 /* update gso size and bytecount with header size */
1528 first->gso_segs = skb_shinfo(skb)->gso_segs;
1529 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1530
1531 /* MSS L4LEN IDX */
1532 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1533 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1534
1535 /* VLAN MACLEN IPLEN */
1536 vlan_macip_lens = l4.hdr - ip.hdr;
1537 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1538 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1539
1540 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1541 vlan_macip_lens, type_tucmd, mss_l4len_idx);
1542
1543 return 1;
1544 }
1545
igc_request_tx_tstamp(struct igc_adapter * adapter,struct sk_buff * skb,u32 * flags)1546 static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags)
1547 {
1548 int i;
1549
1550 for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
1551 struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
1552
1553 if (tstamp->skb)
1554 continue;
1555
1556 tstamp->skb = skb_get(skb);
1557 tstamp->start = jiffies;
1558 *flags = tstamp->flags;
1559
1560 return true;
1561 }
1562
1563 return false;
1564 }
1565
igc_insert_empty_frame(struct igc_ring * tx_ring)1566 static int igc_insert_empty_frame(struct igc_ring *tx_ring)
1567 {
1568 struct igc_tx_buffer *empty_info;
1569 struct sk_buff *empty_skb;
1570 void *data;
1571 int ret;
1572
1573 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1574 empty_skb = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
1575 if (unlikely(!empty_skb)) {
1576 net_err_ratelimited("%s: skb alloc error for empty frame\n",
1577 netdev_name(tx_ring->netdev));
1578 return -ENOMEM;
1579 }
1580
1581 data = skb_put(empty_skb, IGC_EMPTY_FRAME_SIZE);
1582 memset(data, 0, IGC_EMPTY_FRAME_SIZE);
1583
1584 /* Prepare DMA mapping and Tx buffer information */
1585 ret = igc_init_empty_frame(tx_ring, empty_info, empty_skb);
1586 if (unlikely(ret)) {
1587 dev_kfree_skb_any(empty_skb);
1588 return ret;
1589 }
1590
1591 /* Prepare advanced context descriptor for empty packet */
1592 igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
1593
1594 /* Prepare advanced data descriptor for empty packet */
1595 igc_init_tx_empty_descriptor(tx_ring, empty_skb, empty_info);
1596
1597 return 0;
1598 }
1599
igc_xmit_frame_ring(struct sk_buff * skb,struct igc_ring * tx_ring)1600 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1601 struct igc_ring *tx_ring)
1602 {
1603 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1604 bool first_flag = false, insert_empty = false;
1605 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1606 __be16 protocol = vlan_get_protocol(skb);
1607 struct igc_tx_buffer *first;
1608 __le32 launch_time = 0;
1609 u32 tx_flags = 0;
1610 unsigned short f;
1611 ktime_t txtime;
1612 u8 hdr_len = 0;
1613 int tso = 0;
1614
1615 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1616 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1617 * + 2 desc gap to keep tail from touching head,
1618 * + 1 desc for context descriptor,
1619 * + 2 desc for inserting an empty packet for launch time,
1620 * otherwise try next time
1621 */
1622 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1623 count += TXD_USE_COUNT(skb_frag_size(
1624 &skb_shinfo(skb)->frags[f]));
1625
1626 if (igc_maybe_stop_tx(tx_ring, count + 5)) {
1627 /* this is a hard error */
1628 return NETDEV_TX_BUSY;
1629 }
1630
1631 if (!tx_ring->launchtime_enable)
1632 goto done;
1633
1634 txtime = skb->tstamp;
1635 skb->tstamp = ktime_set(0, 0);
1636 launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
1637
1638 if (insert_empty) {
1639 /* Reset the launch time if the required empty frame fails to
1640 * be inserted. However, this packet is not dropped, so it
1641 * "dirties" the current Qbv cycle. This ensures that the
1642 * upcoming packet, which is scheduled in the next Qbv cycle,
1643 * does not require an empty frame. This way, the launch time
1644 * continues to function correctly despite the current failure
1645 * to insert the empty frame.
1646 */
1647 if (igc_insert_empty_frame(tx_ring))
1648 launch_time = 0;
1649 }
1650
1651 done:
1652 /* record the location of the first descriptor for this packet */
1653 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1654 first->type = IGC_TX_BUFFER_TYPE_SKB;
1655 first->skb = skb;
1656 first->bytecount = skb->len;
1657 first->gso_segs = 1;
1658
1659 if (adapter->qbv_transition || tx_ring->oper_gate_closed)
1660 goto out_drop;
1661
1662 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) {
1663 adapter->stats.txdrop++;
1664 goto out_drop;
1665 }
1666
1667 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
1668 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1669 unsigned long flags;
1670 u32 tstamp_flags;
1671
1672 spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
1673 if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
1674 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1675 tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
1676 if (skb->sk &&
1677 READ_ONCE(skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
1678 tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
1679 } else {
1680 adapter->tx_hwtstamp_skipped++;
1681 }
1682
1683 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
1684 }
1685
1686 if (skb_vlan_tag_present(skb)) {
1687 tx_flags |= IGC_TX_FLAGS_VLAN;
1688 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
1689 }
1690
1691 /* record initial flags and protocol */
1692 first->tx_flags = tx_flags;
1693 first->protocol = protocol;
1694
1695 /* For preemptible queue, manually pad the skb so that HW includes
1696 * padding bytes in mCRC calculation
1697 */
1698 if (tx_ring->preemptible && skb->len < ETH_ZLEN) {
1699 if (skb_padto(skb, ETH_ZLEN))
1700 goto out_drop;
1701 skb_put(skb, ETH_ZLEN - skb->len);
1702 }
1703
1704 tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
1705 if (tso < 0)
1706 goto out_drop;
1707 else if (!tso)
1708 igc_tx_csum(tx_ring, first, launch_time, first_flag);
1709
1710 igc_tx_map(tx_ring, first, hdr_len);
1711
1712 return NETDEV_TX_OK;
1713
1714 out_drop:
1715 dev_kfree_skb_any(first->skb);
1716 first->skb = NULL;
1717
1718 return NETDEV_TX_OK;
1719 }
1720
igc_tx_queue_mapping(struct igc_adapter * adapter,struct sk_buff * skb)1721 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1722 struct sk_buff *skb)
1723 {
1724 unsigned int r_idx = skb->queue_mapping;
1725
1726 if (r_idx >= adapter->num_tx_queues)
1727 r_idx = r_idx % adapter->num_tx_queues;
1728
1729 return adapter->tx_ring[r_idx];
1730 }
1731
igc_xmit_frame(struct sk_buff * skb,struct net_device * netdev)1732 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1733 struct net_device *netdev)
1734 {
1735 struct igc_adapter *adapter = netdev_priv(netdev);
1736
1737 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1738 * in order to meet this minimum size requirement.
1739 */
1740 if (skb_put_padto(skb, 17))
1741 return NETDEV_TX_OK;
1742
1743 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1744 }
1745
igc_rx_checksum(struct igc_ring * ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1746 static void igc_rx_checksum(struct igc_ring *ring,
1747 union igc_adv_rx_desc *rx_desc,
1748 struct sk_buff *skb)
1749 {
1750 skb_checksum_none_assert(skb);
1751
1752 /* Ignore Checksum bit is set */
1753 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1754 return;
1755
1756 /* Rx checksum disabled via ethtool */
1757 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1758 return;
1759
1760 /* TCP/UDP checksum error bit is set */
1761 if (igc_test_staterr(rx_desc,
1762 IGC_RXDEXT_STATERR_L4E |
1763 IGC_RXDEXT_STATERR_IPE)) {
1764 /* work around errata with sctp packets where the TCPE aka
1765 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1766 * packets (aka let the stack check the crc32c)
1767 */
1768 if (!(skb->len == 60 &&
1769 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1770 u64_stats_update_begin(&ring->rx_syncp);
1771 ring->rx_stats.csum_err++;
1772 u64_stats_update_end(&ring->rx_syncp);
1773 }
1774 /* let the stack verify checksum errors */
1775 return;
1776 }
1777 /* It must be a TCP or UDP packet with a valid checksum */
1778 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1779 IGC_RXD_STAT_UDPCS))
1780 skb->ip_summed = CHECKSUM_UNNECESSARY;
1781
1782 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1783 le32_to_cpu(rx_desc->wb.upper.status_error));
1784 }
1785
1786 /* Mapping HW RSS Type to enum pkt_hash_types */
1787 static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
1788 [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2,
1789 [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4,
1790 [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3,
1791 [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4,
1792 [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3,
1793 [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3,
1794 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
1795 [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4,
1796 [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4,
1797 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
1798 [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
1799 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
1800 [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */
1801 [13] = PKT_HASH_TYPE_NONE,
1802 [14] = PKT_HASH_TYPE_NONE,
1803 [15] = PKT_HASH_TYPE_NONE,
1804 };
1805
igc_rx_hash(struct igc_ring * ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1806 static inline void igc_rx_hash(struct igc_ring *ring,
1807 union igc_adv_rx_desc *rx_desc,
1808 struct sk_buff *skb)
1809 {
1810 if (ring->netdev->features & NETIF_F_RXHASH) {
1811 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1812 u32 rss_type = igc_rss_type(rx_desc);
1813
1814 skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
1815 }
1816 }
1817
igc_rx_vlan(struct igc_ring * rx_ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1818 static void igc_rx_vlan(struct igc_ring *rx_ring,
1819 union igc_adv_rx_desc *rx_desc,
1820 struct sk_buff *skb)
1821 {
1822 struct net_device *dev = rx_ring->netdev;
1823 u16 vid;
1824
1825 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1826 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
1827 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
1828 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
1829 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
1830 else
1831 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1832
1833 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1834 }
1835 }
1836
1837 /**
1838 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1839 * @rx_ring: rx descriptor ring packet is being transacted on
1840 * @rx_desc: pointer to the EOP Rx descriptor
1841 * @skb: pointer to current skb being populated
1842 *
1843 * This function checks the ring, descriptor, and packet information in order
1844 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1845 * skb.
1846 */
igc_process_skb_fields(struct igc_ring * rx_ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1847 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1848 union igc_adv_rx_desc *rx_desc,
1849 struct sk_buff *skb)
1850 {
1851 igc_rx_hash(rx_ring, rx_desc, skb);
1852
1853 igc_rx_checksum(rx_ring, rx_desc, skb);
1854
1855 igc_rx_vlan(rx_ring, rx_desc, skb);
1856
1857 skb_record_rx_queue(skb, rx_ring->queue_index);
1858
1859 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1860 }
1861
igc_vlan_mode(struct net_device * netdev,netdev_features_t features)1862 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
1863 {
1864 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1865 struct igc_adapter *adapter = netdev_priv(netdev);
1866 struct igc_hw *hw = &adapter->hw;
1867 u32 ctrl;
1868
1869 ctrl = rd32(IGC_CTRL);
1870
1871 if (enable) {
1872 /* enable VLAN tag insert/strip */
1873 ctrl |= IGC_CTRL_VME;
1874 } else {
1875 /* disable VLAN tag insert/strip */
1876 ctrl &= ~IGC_CTRL_VME;
1877 }
1878 wr32(IGC_CTRL, ctrl);
1879 }
1880
igc_restore_vlan(struct igc_adapter * adapter)1881 static void igc_restore_vlan(struct igc_adapter *adapter)
1882 {
1883 igc_vlan_mode(adapter->netdev, adapter->netdev->features);
1884 }
1885
igc_get_rx_buffer(struct igc_ring * rx_ring,const unsigned int size,int * rx_buffer_pgcnt)1886 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1887 const unsigned int size,
1888 int *rx_buffer_pgcnt)
1889 {
1890 struct igc_rx_buffer *rx_buffer;
1891
1892 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1893 *rx_buffer_pgcnt =
1894 #if (PAGE_SIZE < 8192)
1895 page_count(rx_buffer->page);
1896 #else
1897 0;
1898 #endif
1899 prefetchw(rx_buffer->page);
1900
1901 /* we are reusing so sync this buffer for CPU use */
1902 dma_sync_single_range_for_cpu(rx_ring->dev,
1903 rx_buffer->dma,
1904 rx_buffer->page_offset,
1905 size,
1906 DMA_FROM_DEVICE);
1907
1908 rx_buffer->pagecnt_bias--;
1909
1910 return rx_buffer;
1911 }
1912
igc_rx_buffer_flip(struct igc_rx_buffer * buffer,unsigned int truesize)1913 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1914 unsigned int truesize)
1915 {
1916 #if (PAGE_SIZE < 8192)
1917 buffer->page_offset ^= truesize;
1918 #else
1919 buffer->page_offset += truesize;
1920 #endif
1921 }
1922
igc_get_rx_frame_truesize(struct igc_ring * ring,unsigned int size)1923 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1924 unsigned int size)
1925 {
1926 unsigned int truesize;
1927
1928 #if (PAGE_SIZE < 8192)
1929 truesize = igc_rx_pg_size(ring) / 2;
1930 #else
1931 truesize = ring_uses_build_skb(ring) ?
1932 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1933 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1934 SKB_DATA_ALIGN(size);
1935 #endif
1936 return truesize;
1937 }
1938
1939 /**
1940 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1941 * @rx_ring: rx descriptor ring to transact packets on
1942 * @rx_buffer: buffer containing page to add
1943 * @skb: sk_buff to place the data into
1944 * @size: size of buffer to be added
1945 *
1946 * This function will add the data contained in rx_buffer->page to the skb.
1947 */
igc_add_rx_frag(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)1948 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1949 struct igc_rx_buffer *rx_buffer,
1950 struct sk_buff *skb,
1951 unsigned int size)
1952 {
1953 unsigned int truesize;
1954
1955 #if (PAGE_SIZE < 8192)
1956 truesize = igc_rx_pg_size(rx_ring) / 2;
1957 #else
1958 truesize = ring_uses_build_skb(rx_ring) ?
1959 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1960 SKB_DATA_ALIGN(size);
1961 #endif
1962 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1963 rx_buffer->page_offset, size, truesize);
1964
1965 igc_rx_buffer_flip(rx_buffer, truesize);
1966 }
1967
igc_build_skb(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,struct xdp_buff * xdp)1968 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1969 struct igc_rx_buffer *rx_buffer,
1970 struct xdp_buff *xdp)
1971 {
1972 unsigned int size = xdp->data_end - xdp->data;
1973 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1974 unsigned int metasize = xdp->data - xdp->data_meta;
1975 struct sk_buff *skb;
1976
1977 /* prefetch first cache line of first page */
1978 net_prefetch(xdp->data_meta);
1979
1980 /* build an skb around the page buffer */
1981 skb = napi_build_skb(xdp->data_hard_start, truesize);
1982 if (unlikely(!skb))
1983 return NULL;
1984
1985 /* update pointers within the skb to store the data */
1986 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1987 __skb_put(skb, size);
1988 if (metasize)
1989 skb_metadata_set(skb, metasize);
1990
1991 igc_rx_buffer_flip(rx_buffer, truesize);
1992 return skb;
1993 }
1994
igc_construct_skb(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,struct igc_xdp_buff * ctx)1995 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1996 struct igc_rx_buffer *rx_buffer,
1997 struct igc_xdp_buff *ctx)
1998 {
1999 struct xdp_buff *xdp = &ctx->xdp;
2000 unsigned int metasize = xdp->data - xdp->data_meta;
2001 unsigned int size = xdp->data_end - xdp->data;
2002 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
2003 void *va = xdp->data;
2004 unsigned int headlen;
2005 struct sk_buff *skb;
2006
2007 /* prefetch first cache line of first page */
2008 net_prefetch(xdp->data_meta);
2009
2010 /* allocate a skb to store the frags */
2011 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
2012 IGC_RX_HDR_LEN + metasize);
2013 if (unlikely(!skb))
2014 return NULL;
2015
2016 if (ctx->rx_ts) {
2017 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
2018 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
2019 }
2020
2021 /* Determine available headroom for copy */
2022 headlen = size;
2023 if (headlen > IGC_RX_HDR_LEN)
2024 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
2025
2026 /* align pull length to size of long to optimize memcpy performance */
2027 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
2028 ALIGN(headlen + metasize, sizeof(long)));
2029
2030 if (metasize) {
2031 skb_metadata_set(skb, metasize);
2032 __skb_pull(skb, metasize);
2033 }
2034
2035 /* update all of the pointers */
2036 size -= headlen;
2037 if (size) {
2038 skb_add_rx_frag(skb, 0, rx_buffer->page,
2039 (va + headlen) - page_address(rx_buffer->page),
2040 size, truesize);
2041 igc_rx_buffer_flip(rx_buffer, truesize);
2042 } else {
2043 rx_buffer->pagecnt_bias++;
2044 }
2045
2046 return skb;
2047 }
2048
2049 /**
2050 * igc_reuse_rx_page - page flip buffer and store it back on the ring
2051 * @rx_ring: rx descriptor ring to store buffers on
2052 * @old_buff: donor buffer to have page reused
2053 *
2054 * Synchronizes page for reuse by the adapter
2055 */
igc_reuse_rx_page(struct igc_ring * rx_ring,struct igc_rx_buffer * old_buff)2056 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
2057 struct igc_rx_buffer *old_buff)
2058 {
2059 u16 nta = rx_ring->next_to_alloc;
2060 struct igc_rx_buffer *new_buff;
2061
2062 new_buff = &rx_ring->rx_buffer_info[nta];
2063
2064 /* update, and store next to alloc */
2065 nta++;
2066 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
2067
2068 /* Transfer page from old buffer to new buffer.
2069 * Move each member individually to avoid possible store
2070 * forwarding stalls.
2071 */
2072 new_buff->dma = old_buff->dma;
2073 new_buff->page = old_buff->page;
2074 new_buff->page_offset = old_buff->page_offset;
2075 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
2076 }
2077
igc_can_reuse_rx_page(struct igc_rx_buffer * rx_buffer,int rx_buffer_pgcnt)2078 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
2079 int rx_buffer_pgcnt)
2080 {
2081 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
2082 struct page *page = rx_buffer->page;
2083
2084 /* avoid re-using remote and pfmemalloc pages */
2085 if (!dev_page_is_reusable(page))
2086 return false;
2087
2088 #if (PAGE_SIZE < 8192)
2089 /* if we are only owner of page we can reuse it */
2090 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
2091 return false;
2092 #else
2093 #define IGC_LAST_OFFSET \
2094 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
2095
2096 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
2097 return false;
2098 #endif
2099
2100 /* If we have drained the page fragment pool we need to update
2101 * the pagecnt_bias and page count so that we fully restock the
2102 * number of references the driver holds.
2103 */
2104 if (unlikely(pagecnt_bias == 1)) {
2105 page_ref_add(page, USHRT_MAX - 1);
2106 rx_buffer->pagecnt_bias = USHRT_MAX;
2107 }
2108
2109 return true;
2110 }
2111
2112 /**
2113 * igc_is_non_eop - process handling of non-EOP buffers
2114 * @rx_ring: Rx ring being processed
2115 * @rx_desc: Rx descriptor for current buffer
2116 *
2117 * This function updates next to clean. If the buffer is an EOP buffer
2118 * this function exits returning false, otherwise it will place the
2119 * sk_buff in the next buffer to be chained and return true indicating
2120 * that this is in fact a non-EOP buffer.
2121 */
igc_is_non_eop(struct igc_ring * rx_ring,union igc_adv_rx_desc * rx_desc)2122 static bool igc_is_non_eop(struct igc_ring *rx_ring,
2123 union igc_adv_rx_desc *rx_desc)
2124 {
2125 u32 ntc = rx_ring->next_to_clean + 1;
2126
2127 /* fetch, update, and store next to clean */
2128 ntc = (ntc < rx_ring->count) ? ntc : 0;
2129 rx_ring->next_to_clean = ntc;
2130
2131 prefetch(IGC_RX_DESC(rx_ring, ntc));
2132
2133 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
2134 return false;
2135
2136 return true;
2137 }
2138
2139 /**
2140 * igc_cleanup_headers - Correct corrupted or empty headers
2141 * @rx_ring: rx descriptor ring packet is being transacted on
2142 * @rx_desc: pointer to the EOP Rx descriptor
2143 * @skb: pointer to current skb being fixed
2144 *
2145 * Address the case where we are pulling data in on pages only
2146 * and as such no data is present in the skb header.
2147 *
2148 * In addition if skb is not at least 60 bytes we need to pad it so that
2149 * it is large enough to qualify as a valid Ethernet frame.
2150 *
2151 * Returns true if an error was encountered and skb was freed.
2152 */
igc_cleanup_headers(struct igc_ring * rx_ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)2153 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
2154 union igc_adv_rx_desc *rx_desc,
2155 struct sk_buff *skb)
2156 {
2157 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
2158 struct net_device *netdev = rx_ring->netdev;
2159
2160 if (!(netdev->features & NETIF_F_RXALL)) {
2161 dev_kfree_skb_any(skb);
2162 return true;
2163 }
2164 }
2165
2166 /* if eth_skb_pad returns an error the skb was freed */
2167 if (eth_skb_pad(skb))
2168 return true;
2169
2170 return false;
2171 }
2172
igc_put_rx_buffer(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,int rx_buffer_pgcnt)2173 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
2174 struct igc_rx_buffer *rx_buffer,
2175 int rx_buffer_pgcnt)
2176 {
2177 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2178 /* hand second half of page back to the ring */
2179 igc_reuse_rx_page(rx_ring, rx_buffer);
2180 } else {
2181 /* We are not reusing the buffer so unmap it and free
2182 * any references we are holding to it
2183 */
2184 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2185 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
2186 IGC_RX_DMA_ATTR);
2187 __page_frag_cache_drain(rx_buffer->page,
2188 rx_buffer->pagecnt_bias);
2189 }
2190
2191 /* clear contents of rx_buffer */
2192 rx_buffer->page = NULL;
2193 }
2194
igc_rx_offset(struct igc_ring * rx_ring)2195 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
2196 {
2197 struct igc_adapter *adapter = rx_ring->q_vector->adapter;
2198
2199 if (ring_uses_build_skb(rx_ring))
2200 return IGC_SKB_PAD;
2201 if (igc_xdp_is_enabled(adapter))
2202 return XDP_PACKET_HEADROOM;
2203
2204 return 0;
2205 }
2206
igc_alloc_mapped_page(struct igc_ring * rx_ring,struct igc_rx_buffer * bi)2207 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
2208 struct igc_rx_buffer *bi)
2209 {
2210 struct page *page = bi->page;
2211 dma_addr_t dma;
2212
2213 /* since we are recycling buffers we should seldom need to alloc */
2214 if (likely(page))
2215 return true;
2216
2217 /* alloc new page for storage */
2218 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
2219 if (unlikely(!page)) {
2220 rx_ring->rx_stats.alloc_failed++;
2221 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
2222 return false;
2223 }
2224
2225 /* map page for use */
2226 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
2227 igc_rx_pg_size(rx_ring),
2228 DMA_FROM_DEVICE,
2229 IGC_RX_DMA_ATTR);
2230
2231 /* if mapping failed free memory back to system since
2232 * there isn't much point in holding memory we can't use
2233 */
2234 if (dma_mapping_error(rx_ring->dev, dma)) {
2235 __free_page(page);
2236
2237 rx_ring->rx_stats.alloc_failed++;
2238 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
2239 return false;
2240 }
2241
2242 bi->dma = dma;
2243 bi->page = page;
2244 bi->page_offset = igc_rx_offset(rx_ring);
2245 page_ref_add(page, USHRT_MAX - 1);
2246 bi->pagecnt_bias = USHRT_MAX;
2247
2248 return true;
2249 }
2250
2251 /**
2252 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2253 * @rx_ring: rx descriptor ring
2254 * @cleaned_count: number of buffers to clean
2255 */
igc_alloc_rx_buffers(struct igc_ring * rx_ring,u16 cleaned_count)2256 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
2257 {
2258 union igc_adv_rx_desc *rx_desc;
2259 u16 i = rx_ring->next_to_use;
2260 struct igc_rx_buffer *bi;
2261 u16 bufsz;
2262
2263 /* nothing to do */
2264 if (!cleaned_count)
2265 return;
2266
2267 rx_desc = IGC_RX_DESC(rx_ring, i);
2268 bi = &rx_ring->rx_buffer_info[i];
2269 i -= rx_ring->count;
2270
2271 bufsz = igc_rx_bufsz(rx_ring);
2272
2273 do {
2274 if (!igc_alloc_mapped_page(rx_ring, bi))
2275 break;
2276
2277 /* sync the buffer for use by the device */
2278 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
2279 bi->page_offset, bufsz,
2280 DMA_FROM_DEVICE);
2281
2282 /* Refresh the desc even if buffer_addrs didn't change
2283 * because each write-back erases this info.
2284 */
2285 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2286
2287 rx_desc++;
2288 bi++;
2289 i++;
2290 if (unlikely(!i)) {
2291 rx_desc = IGC_RX_DESC(rx_ring, 0);
2292 bi = rx_ring->rx_buffer_info;
2293 i -= rx_ring->count;
2294 }
2295
2296 /* clear the length for the next_to_use descriptor */
2297 rx_desc->wb.upper.length = 0;
2298
2299 cleaned_count--;
2300 } while (cleaned_count);
2301
2302 i += rx_ring->count;
2303
2304 if (rx_ring->next_to_use != i) {
2305 /* record the next descriptor to use */
2306 rx_ring->next_to_use = i;
2307
2308 /* update next to alloc since we have filled the ring */
2309 rx_ring->next_to_alloc = i;
2310
2311 /* Force memory writes to complete before letting h/w
2312 * know there are new descriptors to fetch. (Only
2313 * applicable for weak-ordered memory model archs,
2314 * such as IA-64).
2315 */
2316 wmb();
2317 writel(i, rx_ring->tail);
2318 }
2319 }
2320
igc_alloc_rx_buffers_zc(struct igc_ring * ring,u16 count)2321 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2322 {
2323 union igc_adv_rx_desc *desc;
2324 u16 i = ring->next_to_use;
2325 struct igc_rx_buffer *bi;
2326 dma_addr_t dma;
2327 bool ok = true;
2328
2329 if (!count)
2330 return ok;
2331
2332 XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff);
2333
2334 desc = IGC_RX_DESC(ring, i);
2335 bi = &ring->rx_buffer_info[i];
2336 i -= ring->count;
2337
2338 do {
2339 bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2340 if (!bi->xdp) {
2341 ok = false;
2342 break;
2343 }
2344
2345 dma = xsk_buff_xdp_get_dma(bi->xdp);
2346 desc->read.pkt_addr = cpu_to_le64(dma);
2347
2348 desc++;
2349 bi++;
2350 i++;
2351 if (unlikely(!i)) {
2352 desc = IGC_RX_DESC(ring, 0);
2353 bi = ring->rx_buffer_info;
2354 i -= ring->count;
2355 }
2356
2357 /* Clear the length for the next_to_use descriptor. */
2358 desc->wb.upper.length = 0;
2359
2360 count--;
2361 } while (count);
2362
2363 i += ring->count;
2364
2365 if (ring->next_to_use != i) {
2366 ring->next_to_use = i;
2367
2368 /* Force memory writes to complete before letting h/w
2369 * know there are new descriptors to fetch. (Only
2370 * applicable for weak-ordered memory model archs,
2371 * such as IA-64).
2372 */
2373 wmb();
2374 writel(i, ring->tail);
2375 }
2376
2377 return ok;
2378 }
2379
2380 /* This function requires __netif_tx_lock is held by the caller. */
igc_xdp_init_tx_descriptor(struct igc_ring * ring,struct xdp_frame * xdpf)2381 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
2382 struct xdp_frame *xdpf)
2383 {
2384 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
2385 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
2386 u16 count, index = ring->next_to_use;
2387 struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
2388 struct igc_tx_buffer *buffer = head;
2389 union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
2390 u32 olinfo_status, len = xdpf->len, cmd_type;
2391 void *data = xdpf->data;
2392 u16 i;
2393
2394 count = TXD_USE_COUNT(len);
2395 for (i = 0; i < nr_frags; i++)
2396 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
2397
2398 if (igc_maybe_stop_tx(ring, count + 3)) {
2399 /* this is a hard error */
2400 return -EBUSY;
2401 }
2402
2403 i = 0;
2404 head->bytecount = xdp_get_frame_len(xdpf);
2405 head->type = IGC_TX_BUFFER_TYPE_XDP;
2406 head->gso_segs = 1;
2407 head->xdpf = xdpf;
2408
2409 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2410 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2411
2412 for (;;) {
2413 dma_addr_t dma;
2414
2415 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
2416 if (dma_mapping_error(ring->dev, dma)) {
2417 netdev_err_once(ring->netdev,
2418 "Failed to map DMA for TX\n");
2419 goto unmap;
2420 }
2421
2422 dma_unmap_len_set(buffer, len, len);
2423 dma_unmap_addr_set(buffer, dma, dma);
2424
2425 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2426 IGC_ADVTXD_DCMD_IFCS | len;
2427
2428 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2429 desc->read.buffer_addr = cpu_to_le64(dma);
2430
2431 buffer->protocol = 0;
2432
2433 if (++index == ring->count)
2434 index = 0;
2435
2436 if (i == nr_frags)
2437 break;
2438
2439 buffer = &ring->tx_buffer_info[index];
2440 desc = IGC_TX_DESC(ring, index);
2441 desc->read.olinfo_status = 0;
2442
2443 data = skb_frag_address(&sinfo->frags[i]);
2444 len = skb_frag_size(&sinfo->frags[i]);
2445 i++;
2446 }
2447 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
2448
2449 netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
2450 /* set the timestamp */
2451 head->time_stamp = jiffies;
2452 /* set next_to_watch value indicating a packet is present */
2453 head->next_to_watch = desc;
2454 ring->next_to_use = index;
2455
2456 return 0;
2457
2458 unmap:
2459 for (;;) {
2460 buffer = &ring->tx_buffer_info[index];
2461 if (dma_unmap_len(buffer, len))
2462 dma_unmap_page(ring->dev,
2463 dma_unmap_addr(buffer, dma),
2464 dma_unmap_len(buffer, len),
2465 DMA_TO_DEVICE);
2466 dma_unmap_len_set(buffer, len, 0);
2467 if (buffer == head)
2468 break;
2469
2470 if (!index)
2471 index += ring->count;
2472 index--;
2473 }
2474
2475 return -ENOMEM;
2476 }
2477
igc_get_tx_ring(struct igc_adapter * adapter,int cpu)2478 struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu)
2479 {
2480 int index = cpu;
2481
2482 if (unlikely(index < 0))
2483 index = 0;
2484
2485 while (index >= adapter->num_tx_queues)
2486 index -= adapter->num_tx_queues;
2487
2488 return adapter->tx_ring[index];
2489 }
2490
igc_xdp_xmit_back(struct igc_adapter * adapter,struct xdp_buff * xdp)2491 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2492 {
2493 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2494 int cpu = smp_processor_id();
2495 struct netdev_queue *nq;
2496 struct igc_ring *ring;
2497 int res;
2498
2499 if (unlikely(!xdpf))
2500 return -EFAULT;
2501
2502 ring = igc_get_tx_ring(adapter, cpu);
2503 nq = txring_txq(ring);
2504
2505 __netif_tx_lock(nq, cpu);
2506 /* Avoid transmit queue timeout since we share it with the slow path */
2507 txq_trans_cond_update(nq);
2508 res = igc_xdp_init_tx_descriptor(ring, xdpf);
2509 __netif_tx_unlock(nq);
2510 return res;
2511 }
2512
2513 /* This function assumes rcu_read_lock() is held by the caller. */
__igc_xdp_run_prog(struct igc_adapter * adapter,struct bpf_prog * prog,struct xdp_buff * xdp)2514 static int __igc_xdp_run_prog(struct igc_adapter *adapter,
2515 struct bpf_prog *prog,
2516 struct xdp_buff *xdp)
2517 {
2518 u32 act = bpf_prog_run_xdp(prog, xdp);
2519
2520 switch (act) {
2521 case XDP_PASS:
2522 return IGC_XDP_PASS;
2523 case XDP_TX:
2524 if (igc_xdp_xmit_back(adapter, xdp) < 0)
2525 goto out_failure;
2526 return IGC_XDP_TX;
2527 case XDP_REDIRECT:
2528 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2529 goto out_failure;
2530 return IGC_XDP_REDIRECT;
2531 break;
2532 default:
2533 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act);
2534 fallthrough;
2535 case XDP_ABORTED:
2536 out_failure:
2537 trace_xdp_exception(adapter->netdev, prog, act);
2538 fallthrough;
2539 case XDP_DROP:
2540 return IGC_XDP_CONSUMED;
2541 }
2542 }
2543
igc_xdp_run_prog(struct igc_adapter * adapter,struct xdp_buff * xdp)2544 static int igc_xdp_run_prog(struct igc_adapter *adapter, struct xdp_buff *xdp)
2545 {
2546 struct bpf_prog *prog;
2547 int res;
2548
2549 prog = READ_ONCE(adapter->xdp_prog);
2550 if (!prog) {
2551 res = IGC_XDP_PASS;
2552 goto out;
2553 }
2554
2555 res = __igc_xdp_run_prog(adapter, prog, xdp);
2556
2557 out:
2558 return res;
2559 }
2560
2561 /* This function assumes __netif_tx_lock is held by the caller. */
igc_flush_tx_descriptors(struct igc_ring * ring)2562 void igc_flush_tx_descriptors(struct igc_ring *ring)
2563 {
2564 /* Once tail pointer is updated, hardware can fetch the descriptors
2565 * any time so we issue a write membar here to ensure all memory
2566 * writes are complete before the tail pointer is updated.
2567 */
2568 wmb();
2569 writel(ring->next_to_use, ring->tail);
2570 }
2571
igc_finalize_xdp(struct igc_adapter * adapter,int status)2572 static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2573 {
2574 int cpu = smp_processor_id();
2575 struct netdev_queue *nq;
2576 struct igc_ring *ring;
2577
2578 if (status & IGC_XDP_TX) {
2579 ring = igc_get_tx_ring(adapter, cpu);
2580 nq = txring_txq(ring);
2581
2582 __netif_tx_lock(nq, cpu);
2583 igc_flush_tx_descriptors(ring);
2584 __netif_tx_unlock(nq);
2585 }
2586
2587 if (status & IGC_XDP_REDIRECT)
2588 xdp_do_flush();
2589 }
2590
igc_update_rx_stats(struct igc_q_vector * q_vector,unsigned int packets,unsigned int bytes)2591 static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2592 unsigned int packets, unsigned int bytes)
2593 {
2594 struct igc_ring *ring = q_vector->rx.ring;
2595
2596 u64_stats_update_begin(&ring->rx_syncp);
2597 ring->rx_stats.packets += packets;
2598 ring->rx_stats.bytes += bytes;
2599 u64_stats_update_end(&ring->rx_syncp);
2600
2601 q_vector->rx.total_packets += packets;
2602 q_vector->rx.total_bytes += bytes;
2603 }
2604
igc_clean_rx_irq(struct igc_q_vector * q_vector,const int budget)2605 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2606 {
2607 unsigned int total_bytes = 0, total_packets = 0;
2608 struct igc_adapter *adapter = q_vector->adapter;
2609 struct igc_ring *rx_ring = q_vector->rx.ring;
2610 struct sk_buff *skb = rx_ring->skb;
2611 u16 cleaned_count = igc_desc_unused(rx_ring);
2612 int xdp_status = 0, rx_buffer_pgcnt;
2613 int xdp_res = 0;
2614
2615 while (likely(total_packets < budget)) {
2616 struct igc_xdp_buff ctx = { .rx_ts = NULL };
2617 struct igc_rx_buffer *rx_buffer;
2618 union igc_adv_rx_desc *rx_desc;
2619 unsigned int size, truesize;
2620 int pkt_offset = 0;
2621 void *pktbuf;
2622
2623 /* return some buffers to hardware, one at a time is too slow */
2624 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2625 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2626 cleaned_count = 0;
2627 }
2628
2629 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2630 size = le16_to_cpu(rx_desc->wb.upper.length);
2631 if (!size)
2632 break;
2633
2634 /* This memory barrier is needed to keep us from reading
2635 * any other fields out of the rx_desc until we know the
2636 * descriptor has been written back
2637 */
2638 dma_rmb();
2639
2640 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2641 truesize = igc_get_rx_frame_truesize(rx_ring, size);
2642
2643 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2644
2645 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2646 ctx.rx_ts = pktbuf;
2647 pkt_offset = IGC_TS_HDR_LEN;
2648 size -= IGC_TS_HDR_LEN;
2649 }
2650
2651 if (igc_fpe_is_pmac_enabled(adapter) &&
2652 igc_fpe_handle_mpacket(adapter, rx_desc, size, pktbuf)) {
2653 /* Advance the ring next-to-clean */
2654 igc_is_non_eop(rx_ring, rx_desc);
2655 cleaned_count++;
2656 continue;
2657 }
2658
2659 if (!skb) {
2660 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
2661 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
2662 igc_rx_offset(rx_ring) + pkt_offset,
2663 size, true);
2664 xdp_buff_clear_frags_flag(&ctx.xdp);
2665 ctx.rx_desc = rx_desc;
2666
2667 xdp_res = igc_xdp_run_prog(adapter, &ctx.xdp);
2668 }
2669
2670 if (xdp_res) {
2671 switch (xdp_res) {
2672 case IGC_XDP_CONSUMED:
2673 rx_buffer->pagecnt_bias++;
2674 break;
2675 case IGC_XDP_TX:
2676 case IGC_XDP_REDIRECT:
2677 igc_rx_buffer_flip(rx_buffer, truesize);
2678 xdp_status |= xdp_res;
2679 break;
2680 }
2681
2682 total_packets++;
2683 total_bytes += size;
2684 } else if (skb)
2685 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2686 else if (ring_uses_build_skb(rx_ring))
2687 skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
2688 else
2689 skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
2690
2691 /* exit if we failed to retrieve a buffer */
2692 if (!xdp_res && !skb) {
2693 rx_ring->rx_stats.alloc_failed++;
2694 rx_buffer->pagecnt_bias++;
2695 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
2696 break;
2697 }
2698
2699 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2700 cleaned_count++;
2701
2702 /* fetch next buffer in frame if non-eop */
2703 if (igc_is_non_eop(rx_ring, rx_desc))
2704 continue;
2705
2706 /* verify the packet layout is correct */
2707 if (xdp_res || igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2708 skb = NULL;
2709 continue;
2710 }
2711
2712 /* probably a little skewed due to removing CRC */
2713 total_bytes += skb->len;
2714
2715 /* populate checksum, VLAN, and protocol */
2716 igc_process_skb_fields(rx_ring, rx_desc, skb);
2717
2718 napi_gro_receive(&q_vector->napi, skb);
2719
2720 /* reset skb pointer */
2721 skb = NULL;
2722
2723 /* update budget accounting */
2724 total_packets++;
2725 }
2726
2727 if (xdp_status)
2728 igc_finalize_xdp(adapter, xdp_status);
2729
2730 /* place incomplete frames back on ring for completion */
2731 rx_ring->skb = skb;
2732
2733 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2734
2735 if (cleaned_count)
2736 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2737
2738 return total_packets;
2739 }
2740
igc_construct_skb_zc(struct igc_ring * ring,struct igc_xdp_buff * ctx)2741 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2742 struct igc_xdp_buff *ctx)
2743 {
2744 struct xdp_buff *xdp = &ctx->xdp;
2745 unsigned int totalsize = xdp->data_end - xdp->data_meta;
2746 unsigned int metasize = xdp->data - xdp->data_meta;
2747 struct sk_buff *skb;
2748
2749 net_prefetch(xdp->data_meta);
2750
2751 skb = napi_alloc_skb(&ring->q_vector->napi, totalsize);
2752 if (unlikely(!skb))
2753 return NULL;
2754
2755 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
2756 ALIGN(totalsize, sizeof(long)));
2757
2758 if (metasize) {
2759 skb_metadata_set(skb, metasize);
2760 __skb_pull(skb, metasize);
2761 }
2762
2763 if (ctx->rx_ts) {
2764 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
2765 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
2766 }
2767
2768 return skb;
2769 }
2770
igc_dispatch_skb_zc(struct igc_q_vector * q_vector,union igc_adv_rx_desc * desc,struct igc_xdp_buff * ctx)2771 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2772 union igc_adv_rx_desc *desc,
2773 struct igc_xdp_buff *ctx)
2774 {
2775 struct igc_ring *ring = q_vector->rx.ring;
2776 struct sk_buff *skb;
2777
2778 skb = igc_construct_skb_zc(ring, ctx);
2779 if (!skb) {
2780 ring->rx_stats.alloc_failed++;
2781 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
2782 return;
2783 }
2784
2785 if (igc_cleanup_headers(ring, desc, skb))
2786 return;
2787
2788 igc_process_skb_fields(ring, desc, skb);
2789 napi_gro_receive(&q_vector->napi, skb);
2790 }
2791
xsk_buff_to_igc_ctx(struct xdp_buff * xdp)2792 static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp)
2793 {
2794 /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The
2795 * igc_xdp_buff shares its layout with xdp_buff_xsk and private
2796 * igc_xdp_buff fields fall into xdp_buff_xsk->cb
2797 */
2798 return (struct igc_xdp_buff *)xdp;
2799 }
2800
igc_clean_rx_irq_zc(struct igc_q_vector * q_vector,const int budget)2801 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2802 {
2803 struct igc_adapter *adapter = q_vector->adapter;
2804 struct igc_ring *ring = q_vector->rx.ring;
2805 u16 cleaned_count = igc_desc_unused(ring);
2806 int total_bytes = 0, total_packets = 0;
2807 u16 ntc = ring->next_to_clean;
2808 struct bpf_prog *prog;
2809 bool failure = false;
2810 int xdp_status = 0;
2811
2812 rcu_read_lock();
2813
2814 prog = READ_ONCE(adapter->xdp_prog);
2815
2816 while (likely(total_packets < budget)) {
2817 union igc_adv_rx_desc *desc;
2818 struct igc_rx_buffer *bi;
2819 struct igc_xdp_buff *ctx;
2820 unsigned int size;
2821 int res;
2822
2823 desc = IGC_RX_DESC(ring, ntc);
2824 size = le16_to_cpu(desc->wb.upper.length);
2825 if (!size)
2826 break;
2827
2828 /* This memory barrier is needed to keep us from reading
2829 * any other fields out of the rx_desc until we know the
2830 * descriptor has been written back
2831 */
2832 dma_rmb();
2833
2834 bi = &ring->rx_buffer_info[ntc];
2835
2836 ctx = xsk_buff_to_igc_ctx(bi->xdp);
2837 ctx->rx_desc = desc;
2838
2839 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2840 ctx->rx_ts = bi->xdp->data;
2841
2842 bi->xdp->data += IGC_TS_HDR_LEN;
2843
2844 /* HW timestamp has been copied into local variable. Metadata
2845 * length when XDP program is called should be 0.
2846 */
2847 bi->xdp->data_meta += IGC_TS_HDR_LEN;
2848 size -= IGC_TS_HDR_LEN;
2849 } else {
2850 ctx->rx_ts = NULL;
2851 }
2852
2853 bi->xdp->data_end = bi->xdp->data + size;
2854 xsk_buff_dma_sync_for_cpu(bi->xdp);
2855
2856 res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2857 switch (res) {
2858 case IGC_XDP_PASS:
2859 igc_dispatch_skb_zc(q_vector, desc, ctx);
2860 fallthrough;
2861 case IGC_XDP_CONSUMED:
2862 xsk_buff_free(bi->xdp);
2863 break;
2864 case IGC_XDP_TX:
2865 case IGC_XDP_REDIRECT:
2866 xdp_status |= res;
2867 break;
2868 }
2869
2870 bi->xdp = NULL;
2871 total_bytes += size;
2872 total_packets++;
2873 cleaned_count++;
2874 ntc++;
2875 if (ntc == ring->count)
2876 ntc = 0;
2877 }
2878
2879 ring->next_to_clean = ntc;
2880 rcu_read_unlock();
2881
2882 if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2883 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2884
2885 if (xdp_status)
2886 igc_finalize_xdp(adapter, xdp_status);
2887
2888 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2889
2890 if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2891 if (failure || ring->next_to_clean == ring->next_to_use)
2892 xsk_set_rx_need_wakeup(ring->xsk_pool);
2893 else
2894 xsk_clear_rx_need_wakeup(ring->xsk_pool);
2895 return total_packets;
2896 }
2897
2898 return failure ? budget : total_packets;
2899 }
2900
igc_update_tx_stats(struct igc_q_vector * q_vector,unsigned int packets,unsigned int bytes)2901 static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2902 unsigned int packets, unsigned int bytes)
2903 {
2904 struct igc_ring *ring = q_vector->tx.ring;
2905
2906 u64_stats_update_begin(&ring->tx_syncp);
2907 ring->tx_stats.bytes += bytes;
2908 ring->tx_stats.packets += packets;
2909 u64_stats_update_end(&ring->tx_syncp);
2910
2911 q_vector->tx.total_bytes += bytes;
2912 q_vector->tx.total_packets += packets;
2913 }
2914
igc_xsk_request_timestamp(void * _priv)2915 static void igc_xsk_request_timestamp(void *_priv)
2916 {
2917 struct igc_metadata_request *meta_req = _priv;
2918 struct igc_ring *tx_ring = meta_req->tx_ring;
2919 struct igc_tx_timestamp_request *tstamp;
2920 u32 tx_flags = IGC_TX_FLAGS_TSTAMP;
2921 struct igc_adapter *adapter;
2922 unsigned long lock_flags;
2923 bool found = false;
2924 int i;
2925
2926 if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) {
2927 adapter = netdev_priv(tx_ring->netdev);
2928
2929 spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags);
2930
2931 /* Search for available tstamp regs */
2932 for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
2933 tstamp = &adapter->tx_tstamp[i];
2934
2935 /* tstamp->skb and tstamp->xsk_tx_buffer are in union.
2936 * When tstamp->skb is equal to NULL,
2937 * tstamp->xsk_tx_buffer is equal to NULL as well.
2938 * This condition means that the particular tstamp reg
2939 * is not occupied by other packet.
2940 */
2941 if (!tstamp->skb) {
2942 found = true;
2943 break;
2944 }
2945 }
2946
2947 /* Return if no available tstamp regs */
2948 if (!found) {
2949 adapter->tx_hwtstamp_skipped++;
2950 spin_unlock_irqrestore(&adapter->ptp_tx_lock,
2951 lock_flags);
2952 return;
2953 }
2954
2955 tstamp->start = jiffies;
2956 tstamp->xsk_queue_index = tx_ring->queue_index;
2957 tstamp->xsk_tx_buffer = meta_req->tx_buffer;
2958 tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK;
2959
2960 /* Hold the transmit completion until timestamp is ready */
2961 meta_req->tx_buffer->xsk_pending_ts = true;
2962
2963 /* Keep the pointer to tx_timestamp, which is located in XDP
2964 * metadata area. It is the location to store the value of
2965 * tx hardware timestamp.
2966 */
2967 xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta);
2968
2969 /* Set timestamp bit based on the _TSTAMP(_X) bit. */
2970 tx_flags |= tstamp->flags;
2971 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
2972 IGC_TX_FLAGS_TSTAMP,
2973 (IGC_ADVTXD_MAC_TSTAMP));
2974 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
2975 IGC_TX_FLAGS_TSTAMP_1,
2976 (IGC_ADVTXD_TSTAMP_REG_1));
2977 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
2978 IGC_TX_FLAGS_TSTAMP_2,
2979 (IGC_ADVTXD_TSTAMP_REG_2));
2980 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
2981 IGC_TX_FLAGS_TSTAMP_3,
2982 (IGC_ADVTXD_TSTAMP_REG_3));
2983
2984 spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags);
2985 }
2986 }
2987
igc_xsk_fill_timestamp(void * _priv)2988 static u64 igc_xsk_fill_timestamp(void *_priv)
2989 {
2990 return *(u64 *)_priv;
2991 }
2992
igc_xsk_request_launch_time(u64 launch_time,void * _priv)2993 static void igc_xsk_request_launch_time(u64 launch_time, void *_priv)
2994 {
2995 struct igc_metadata_request *meta_req = _priv;
2996 struct igc_ring *tx_ring = meta_req->tx_ring;
2997 __le32 launch_time_offset;
2998 bool insert_empty = false;
2999 bool first_flag = false;
3000 u16 used_desc = 0;
3001
3002 if (!tx_ring->launchtime_enable)
3003 return;
3004
3005 launch_time_offset = igc_tx_launchtime(tx_ring,
3006 ns_to_ktime(launch_time),
3007 &first_flag, &insert_empty);
3008 if (insert_empty) {
3009 /* Disregard the launch time request if the required empty frame
3010 * fails to be inserted.
3011 */
3012 if (igc_insert_empty_frame(tx_ring))
3013 return;
3014
3015 meta_req->tx_buffer =
3016 &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3017 /* Inserting an empty packet requires two descriptors:
3018 * one data descriptor and one context descriptor.
3019 */
3020 used_desc += 2;
3021 }
3022
3023 /* Use one context descriptor to specify launch time and first flag. */
3024 igc_tx_ctxtdesc(tx_ring, launch_time_offset, first_flag, 0, 0, 0);
3025 used_desc += 1;
3026
3027 /* Update the number of used descriptors in this request */
3028 meta_req->used_desc += used_desc;
3029 }
3030
3031 const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = {
3032 .tmo_request_timestamp = igc_xsk_request_timestamp,
3033 .tmo_fill_timestamp = igc_xsk_fill_timestamp,
3034 .tmo_request_launch_time = igc_xsk_request_launch_time,
3035 };
3036
igc_xdp_xmit_zc(struct igc_ring * ring)3037 static void igc_xdp_xmit_zc(struct igc_ring *ring)
3038 {
3039 struct xsk_buff_pool *pool = ring->xsk_pool;
3040 struct netdev_queue *nq = txring_txq(ring);
3041 union igc_adv_tx_desc *tx_desc = NULL;
3042 int cpu = smp_processor_id();
3043 struct xdp_desc xdp_desc;
3044 u16 budget, ntu;
3045
3046 if (!netif_carrier_ok(ring->netdev))
3047 return;
3048
3049 __netif_tx_lock(nq, cpu);
3050
3051 /* Avoid transmit queue timeout since we share it with the slow path */
3052 txq_trans_cond_update(nq);
3053
3054 ntu = ring->next_to_use;
3055 budget = igc_desc_unused(ring);
3056
3057 /* Packets with launch time require one data descriptor and one context
3058 * descriptor. When the launch time falls into the next Qbv cycle, we
3059 * may need to insert an empty packet, which requires two more
3060 * descriptors. Therefore, to be safe, we always ensure we have at least
3061 * 4 descriptors available.
3062 */
3063 while (budget >= 4 && xsk_tx_peek_desc(pool, &xdp_desc)) {
3064 struct igc_metadata_request meta_req;
3065 struct xsk_tx_metadata *meta = NULL;
3066 struct igc_tx_buffer *bi;
3067 u32 olinfo_status;
3068 dma_addr_t dma;
3069
3070 meta_req.cmd_type = IGC_ADVTXD_DTYP_DATA |
3071 IGC_ADVTXD_DCMD_DEXT |
3072 IGC_ADVTXD_DCMD_IFCS |
3073 IGC_TXD_DCMD | xdp_desc.len;
3074 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
3075
3076 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
3077 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
3078 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
3079 bi = &ring->tx_buffer_info[ntu];
3080
3081 meta_req.tx_ring = ring;
3082 meta_req.tx_buffer = bi;
3083 meta_req.meta = meta;
3084 meta_req.used_desc = 0;
3085 xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops,
3086 &meta_req);
3087
3088 /* xsk_tx_metadata_request() may have updated next_to_use */
3089 ntu = ring->next_to_use;
3090
3091 /* xsk_tx_metadata_request() may have updated Tx buffer info */
3092 bi = meta_req.tx_buffer;
3093
3094 /* xsk_tx_metadata_request() may use a few descriptors */
3095 budget -= meta_req.used_desc;
3096
3097 tx_desc = IGC_TX_DESC(ring, ntu);
3098 tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type);
3099 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3100 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3101
3102 bi->type = IGC_TX_BUFFER_TYPE_XSK;
3103 bi->protocol = 0;
3104 bi->bytecount = xdp_desc.len;
3105 bi->gso_segs = 1;
3106 bi->time_stamp = jiffies;
3107 bi->next_to_watch = tx_desc;
3108
3109 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
3110
3111 ntu++;
3112 if (ntu == ring->count)
3113 ntu = 0;
3114
3115 ring->next_to_use = ntu;
3116 budget--;
3117 }
3118
3119 if (tx_desc) {
3120 igc_flush_tx_descriptors(ring);
3121 xsk_tx_release(pool);
3122 }
3123
3124 __netif_tx_unlock(nq);
3125 }
3126
3127 /**
3128 * igc_clean_tx_irq - Reclaim resources after transmit completes
3129 * @q_vector: pointer to q_vector containing needed info
3130 * @napi_budget: Used to determine if we are in netpoll
3131 *
3132 * returns true if ring is completely cleaned
3133 */
igc_clean_tx_irq(struct igc_q_vector * q_vector,int napi_budget)3134 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
3135 {
3136 struct igc_adapter *adapter = q_vector->adapter;
3137 unsigned int total_bytes = 0, total_packets = 0;
3138 unsigned int budget = q_vector->tx.work_limit;
3139 struct igc_ring *tx_ring = q_vector->tx.ring;
3140 unsigned int i = tx_ring->next_to_clean;
3141 struct igc_tx_buffer *tx_buffer;
3142 union igc_adv_tx_desc *tx_desc;
3143 u32 xsk_frames = 0;
3144
3145 if (test_bit(__IGC_DOWN, &adapter->state))
3146 return true;
3147
3148 tx_buffer = &tx_ring->tx_buffer_info[i];
3149 tx_desc = IGC_TX_DESC(tx_ring, i);
3150 i -= tx_ring->count;
3151
3152 do {
3153 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
3154
3155 /* if next_to_watch is not set then there is no work pending */
3156 if (!eop_desc)
3157 break;
3158
3159 /* prevent any other reads prior to eop_desc */
3160 smp_rmb();
3161
3162 /* if DD is not set pending work has not been completed */
3163 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
3164 break;
3165
3166 if (igc_fpe_is_pmac_enabled(adapter) &&
3167 igc_fpe_transmitted_smd_v(tx_desc))
3168 ethtool_mmsv_event_handle(&adapter->fpe.mmsv,
3169 ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
3170
3171 /* Hold the completions while there's a pending tx hardware
3172 * timestamp request from XDP Tx metadata.
3173 */
3174 if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK &&
3175 tx_buffer->xsk_pending_ts)
3176 break;
3177
3178 /* clear next_to_watch to prevent false hangs */
3179 tx_buffer->next_to_watch = NULL;
3180
3181 /* update the statistics for this packet */
3182 total_bytes += tx_buffer->bytecount;
3183 total_packets += tx_buffer->gso_segs;
3184
3185 switch (tx_buffer->type) {
3186 case IGC_TX_BUFFER_TYPE_XSK:
3187 xsk_frames++;
3188 break;
3189 case IGC_TX_BUFFER_TYPE_XDP:
3190 xdp_return_frame(tx_buffer->xdpf);
3191 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
3192 break;
3193 case IGC_TX_BUFFER_TYPE_SKB:
3194 napi_consume_skb(tx_buffer->skb, napi_budget);
3195 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
3196 break;
3197 default:
3198 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
3199 break;
3200 }
3201
3202 /* clear last DMA location and unmap remaining buffers */
3203 while (tx_desc != eop_desc) {
3204 tx_buffer++;
3205 tx_desc++;
3206 i++;
3207 if (unlikely(!i)) {
3208 i -= tx_ring->count;
3209 tx_buffer = tx_ring->tx_buffer_info;
3210 tx_desc = IGC_TX_DESC(tx_ring, 0);
3211 }
3212
3213 /* unmap any remaining paged data */
3214 if (dma_unmap_len(tx_buffer, len))
3215 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
3216 }
3217
3218 /* move us one more past the eop_desc for start of next pkt */
3219 tx_buffer++;
3220 tx_desc++;
3221 i++;
3222 if (unlikely(!i)) {
3223 i -= tx_ring->count;
3224 tx_buffer = tx_ring->tx_buffer_info;
3225 tx_desc = IGC_TX_DESC(tx_ring, 0);
3226 }
3227
3228 /* issue prefetch for next Tx descriptor */
3229 prefetch(tx_desc);
3230
3231 /* update budget accounting */
3232 budget--;
3233 } while (likely(budget));
3234
3235 netdev_tx_completed_queue(txring_txq(tx_ring),
3236 total_packets, total_bytes);
3237
3238 i += tx_ring->count;
3239 tx_ring->next_to_clean = i;
3240
3241 igc_update_tx_stats(q_vector, total_packets, total_bytes);
3242
3243 if (tx_ring->xsk_pool) {
3244 if (xsk_frames)
3245 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
3246 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
3247 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
3248 igc_xdp_xmit_zc(tx_ring);
3249 }
3250
3251 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
3252 struct igc_hw *hw = &adapter->hw;
3253
3254 /* Detect a transmit hang in hardware, this serializes the
3255 * check with the clearing of time_stamp and movement of i
3256 */
3257 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
3258 if (tx_buffer->next_to_watch &&
3259 time_after(jiffies, tx_buffer->time_stamp +
3260 (adapter->tx_timeout_factor * HZ)) &&
3261 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
3262 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) &&
3263 !tx_ring->oper_gate_closed) {
3264 /* detected Tx unit hang */
3265 netdev_err(tx_ring->netdev,
3266 "Detected Tx Unit Hang\n"
3267 " Tx Queue <%d>\n"
3268 " TDH <%x>\n"
3269 " TDT <%x>\n"
3270 " next_to_use <%x>\n"
3271 " next_to_clean <%x>\n"
3272 "buffer_info[next_to_clean]\n"
3273 " time_stamp <%lx>\n"
3274 " next_to_watch <%p>\n"
3275 " jiffies <%lx>\n"
3276 " desc.status <%x>\n",
3277 tx_ring->queue_index,
3278 rd32(IGC_TDH(tx_ring->reg_idx)),
3279 readl(tx_ring->tail),
3280 tx_ring->next_to_use,
3281 tx_ring->next_to_clean,
3282 tx_buffer->time_stamp,
3283 tx_buffer->next_to_watch,
3284 jiffies,
3285 tx_buffer->next_to_watch->wb.status);
3286 netif_stop_subqueue(tx_ring->netdev,
3287 tx_ring->queue_index);
3288
3289 /* we are about to reset, no point in enabling stuff */
3290 return true;
3291 }
3292 }
3293
3294 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
3295 if (unlikely(total_packets &&
3296 netif_carrier_ok(tx_ring->netdev) &&
3297 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
3298 /* Make sure that anybody stopping the queue after this
3299 * sees the new next_to_clean.
3300 */
3301 smp_mb();
3302 if (__netif_subqueue_stopped(tx_ring->netdev,
3303 tx_ring->queue_index) &&
3304 !(test_bit(__IGC_DOWN, &adapter->state))) {
3305 netif_wake_subqueue(tx_ring->netdev,
3306 tx_ring->queue_index);
3307
3308 u64_stats_update_begin(&tx_ring->tx_syncp);
3309 tx_ring->tx_stats.restart_queue++;
3310 u64_stats_update_end(&tx_ring->tx_syncp);
3311 }
3312 }
3313
3314 return !!budget;
3315 }
3316
igc_find_mac_filter(struct igc_adapter * adapter,enum igc_mac_filter_type type,const u8 * addr)3317 static int igc_find_mac_filter(struct igc_adapter *adapter,
3318 enum igc_mac_filter_type type, const u8 *addr)
3319 {
3320 struct igc_hw *hw = &adapter->hw;
3321 int max_entries = hw->mac.rar_entry_count;
3322 u32 ral, rah;
3323 int i;
3324
3325 for (i = 0; i < max_entries; i++) {
3326 ral = rd32(IGC_RAL(i));
3327 rah = rd32(IGC_RAH(i));
3328
3329 if (!(rah & IGC_RAH_AV))
3330 continue;
3331 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
3332 continue;
3333 if ((rah & IGC_RAH_RAH_MASK) !=
3334 le16_to_cpup((__le16 *)(addr + 4)))
3335 continue;
3336 if (ral != le32_to_cpup((__le32 *)(addr)))
3337 continue;
3338
3339 return i;
3340 }
3341
3342 return -1;
3343 }
3344
igc_get_avail_mac_filter_slot(struct igc_adapter * adapter)3345 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
3346 {
3347 struct igc_hw *hw = &adapter->hw;
3348 int max_entries = hw->mac.rar_entry_count;
3349 u32 rah;
3350 int i;
3351
3352 for (i = 0; i < max_entries; i++) {
3353 rah = rd32(IGC_RAH(i));
3354
3355 if (!(rah & IGC_RAH_AV))
3356 return i;
3357 }
3358
3359 return -1;
3360 }
3361
3362 /**
3363 * igc_add_mac_filter() - Add MAC address filter
3364 * @adapter: Pointer to adapter where the filter should be added
3365 * @type: MAC address filter type (source or destination)
3366 * @addr: MAC address
3367 * @queue: If non-negative, queue assignment feature is enabled and frames
3368 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3369 * assignment is disabled.
3370 *
3371 * Return: 0 in case of success, negative errno code otherwise.
3372 */
igc_add_mac_filter(struct igc_adapter * adapter,enum igc_mac_filter_type type,const u8 * addr,int queue)3373 static int igc_add_mac_filter(struct igc_adapter *adapter,
3374 enum igc_mac_filter_type type, const u8 *addr,
3375 int queue)
3376 {
3377 struct net_device *dev = adapter->netdev;
3378 int index;
3379
3380 index = igc_find_mac_filter(adapter, type, addr);
3381 if (index >= 0)
3382 goto update_filter;
3383
3384 index = igc_get_avail_mac_filter_slot(adapter);
3385 if (index < 0)
3386 return -ENOSPC;
3387
3388 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
3389 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3390 addr, queue);
3391
3392 update_filter:
3393 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
3394 return 0;
3395 }
3396
3397 /**
3398 * igc_del_mac_filter() - Delete MAC address filter
3399 * @adapter: Pointer to adapter where the filter should be deleted from
3400 * @type: MAC address filter type (source or destination)
3401 * @addr: MAC address
3402 */
igc_del_mac_filter(struct igc_adapter * adapter,enum igc_mac_filter_type type,const u8 * addr)3403 static void igc_del_mac_filter(struct igc_adapter *adapter,
3404 enum igc_mac_filter_type type, const u8 *addr)
3405 {
3406 struct net_device *dev = adapter->netdev;
3407 int index;
3408
3409 index = igc_find_mac_filter(adapter, type, addr);
3410 if (index < 0)
3411 return;
3412
3413 if (index == 0) {
3414 /* If this is the default filter, we don't actually delete it.
3415 * We just reset to its default value i.e. disable queue
3416 * assignment.
3417 */
3418 netdev_dbg(dev, "Disable default MAC filter queue assignment");
3419
3420 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
3421 } else {
3422 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
3423 index,
3424 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3425 addr);
3426
3427 igc_clear_mac_filter_hw(adapter, index);
3428 }
3429 }
3430
3431 /**
3432 * igc_add_vlan_prio_filter() - Add VLAN priority filter
3433 * @adapter: Pointer to adapter where the filter should be added
3434 * @prio: VLAN priority value
3435 * @queue: Queue number which matching frames are assigned to
3436 *
3437 * Return: 0 in case of success, negative errno code otherwise.
3438 */
igc_add_vlan_prio_filter(struct igc_adapter * adapter,int prio,int queue)3439 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
3440 int queue)
3441 {
3442 struct net_device *dev = adapter->netdev;
3443 struct igc_hw *hw = &adapter->hw;
3444 u32 vlanpqf;
3445
3446 vlanpqf = rd32(IGC_VLANPQF);
3447
3448 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
3449 netdev_dbg(dev, "VLAN priority filter already in use\n");
3450 return -EEXIST;
3451 }
3452
3453 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
3454 vlanpqf |= IGC_VLANPQF_VALID(prio);
3455
3456 wr32(IGC_VLANPQF, vlanpqf);
3457
3458 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
3459 prio, queue);
3460 return 0;
3461 }
3462
3463 /**
3464 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3465 * @adapter: Pointer to adapter where the filter should be deleted from
3466 * @prio: VLAN priority value
3467 */
igc_del_vlan_prio_filter(struct igc_adapter * adapter,int prio)3468 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
3469 {
3470 struct igc_hw *hw = &adapter->hw;
3471 u32 vlanpqf;
3472
3473 vlanpqf = rd32(IGC_VLANPQF);
3474
3475 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
3476 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
3477
3478 wr32(IGC_VLANPQF, vlanpqf);
3479
3480 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
3481 prio);
3482 }
3483
igc_get_avail_etype_filter_slot(struct igc_adapter * adapter)3484 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
3485 {
3486 struct igc_hw *hw = &adapter->hw;
3487 int i;
3488
3489 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3490 u32 etqf = rd32(IGC_ETQF(i));
3491
3492 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
3493 return i;
3494 }
3495
3496 return -1;
3497 }
3498
3499 /**
3500 * igc_add_etype_filter() - Add ethertype filter
3501 * @adapter: Pointer to adapter where the filter should be added
3502 * @etype: Ethertype value
3503 * @queue: If non-negative, queue assignment feature is enabled and frames
3504 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3505 * assignment is disabled.
3506 *
3507 * Return: 0 in case of success, negative errno code otherwise.
3508 */
igc_add_etype_filter(struct igc_adapter * adapter,u16 etype,int queue)3509 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
3510 int queue)
3511 {
3512 struct igc_hw *hw = &adapter->hw;
3513 int index;
3514 u32 etqf;
3515
3516 index = igc_get_avail_etype_filter_slot(adapter);
3517 if (index < 0)
3518 return -ENOSPC;
3519
3520 etqf = rd32(IGC_ETQF(index));
3521
3522 etqf &= ~IGC_ETQF_ETYPE_MASK;
3523 etqf |= etype;
3524
3525 if (queue >= 0) {
3526 etqf &= ~IGC_ETQF_QUEUE_MASK;
3527 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3528 etqf |= IGC_ETQF_QUEUE_ENABLE;
3529 }
3530
3531 etqf |= IGC_ETQF_FILTER_ENABLE;
3532
3533 wr32(IGC_ETQF(index), etqf);
3534
3535 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3536 etype, queue);
3537 return 0;
3538 }
3539
igc_find_etype_filter(struct igc_adapter * adapter,u16 etype)3540 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
3541 {
3542 struct igc_hw *hw = &adapter->hw;
3543 int i;
3544
3545 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3546 u32 etqf = rd32(IGC_ETQF(i));
3547
3548 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3549 return i;
3550 }
3551
3552 return -1;
3553 }
3554
3555 /**
3556 * igc_del_etype_filter() - Delete ethertype filter
3557 * @adapter: Pointer to adapter where the filter should be deleted from
3558 * @etype: Ethertype value
3559 */
igc_del_etype_filter(struct igc_adapter * adapter,u16 etype)3560 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
3561 {
3562 struct igc_hw *hw = &adapter->hw;
3563 int index;
3564
3565 index = igc_find_etype_filter(adapter, etype);
3566 if (index < 0)
3567 return;
3568
3569 wr32(IGC_ETQF(index), 0);
3570
3571 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3572 etype);
3573 }
3574
igc_flex_filter_select(struct igc_adapter * adapter,struct igc_flex_filter * input,u32 * fhft)3575 static int igc_flex_filter_select(struct igc_adapter *adapter,
3576 struct igc_flex_filter *input,
3577 u32 *fhft)
3578 {
3579 struct igc_hw *hw = &adapter->hw;
3580 u8 fhft_index;
3581 u32 fhftsl;
3582
3583 if (input->index >= MAX_FLEX_FILTER) {
3584 netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n");
3585 return -EINVAL;
3586 }
3587
3588 /* Indirect table select register */
3589 fhftsl = rd32(IGC_FHFTSL);
3590 fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
3591 switch (input->index) {
3592 case 0 ... 7:
3593 fhftsl |= 0x00;
3594 break;
3595 case 8 ... 15:
3596 fhftsl |= 0x01;
3597 break;
3598 case 16 ... 23:
3599 fhftsl |= 0x02;
3600 break;
3601 case 24 ... 31:
3602 fhftsl |= 0x03;
3603 break;
3604 }
3605 wr32(IGC_FHFTSL, fhftsl);
3606
3607 /* Normalize index down to host table register */
3608 fhft_index = input->index % 8;
3609
3610 *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
3611 IGC_FHFT_EXT(fhft_index - 4);
3612
3613 return 0;
3614 }
3615
igc_write_flex_filter_ll(struct igc_adapter * adapter,struct igc_flex_filter * input)3616 static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
3617 struct igc_flex_filter *input)
3618 {
3619 struct igc_hw *hw = &adapter->hw;
3620 u8 *data = input->data;
3621 u8 *mask = input->mask;
3622 u32 queuing;
3623 u32 fhft;
3624 u32 wufc;
3625 int ret;
3626 int i;
3627
3628 /* Length has to be aligned to 8. Otherwise the filter will fail. Bail
3629 * out early to avoid surprises later.
3630 */
3631 if (input->length % 8 != 0) {
3632 netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n");
3633 return -EINVAL;
3634 }
3635
3636 /* Select corresponding flex filter register and get base for host table. */
3637 ret = igc_flex_filter_select(adapter, input, &fhft);
3638 if (ret)
3639 return ret;
3640
3641 /* When adding a filter globally disable flex filter feature. That is
3642 * recommended within the datasheet.
3643 */
3644 wufc = rd32(IGC_WUFC);
3645 wufc &= ~IGC_WUFC_FLEX_HQ;
3646 wr32(IGC_WUFC, wufc);
3647
3648 /* Configure filter */
3649 queuing = input->length & IGC_FHFT_LENGTH_MASK;
3650 queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue);
3651 queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio);
3652
3653 if (input->immediate_irq)
3654 queuing |= IGC_FHFT_IMM_INT;
3655
3656 if (input->drop)
3657 queuing |= IGC_FHFT_DROP;
3658
3659 wr32(fhft + 0xFC, queuing);
3660
3661 /* Write data (128 byte) and mask (128 bit) */
3662 for (i = 0; i < 16; ++i) {
3663 const size_t data_idx = i * 8;
3664 const size_t row_idx = i * 16;
3665 u32 dw0 =
3666 (data[data_idx + 0] << 0) |
3667 (data[data_idx + 1] << 8) |
3668 (data[data_idx + 2] << 16) |
3669 (data[data_idx + 3] << 24);
3670 u32 dw1 =
3671 (data[data_idx + 4] << 0) |
3672 (data[data_idx + 5] << 8) |
3673 (data[data_idx + 6] << 16) |
3674 (data[data_idx + 7] << 24);
3675 u32 tmp;
3676
3677 /* Write row: dw0, dw1 and mask */
3678 wr32(fhft + row_idx, dw0);
3679 wr32(fhft + row_idx + 4, dw1);
3680
3681 /* mask is only valid for MASK(7, 0) */
3682 tmp = rd32(fhft + row_idx + 8);
3683 tmp &= ~GENMASK(7, 0);
3684 tmp |= mask[i];
3685 wr32(fhft + row_idx + 8, tmp);
3686 }
3687
3688 /* Enable filter. */
3689 wufc |= IGC_WUFC_FLEX_HQ;
3690 if (input->index > 8) {
3691 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
3692 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3693
3694 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
3695
3696 wr32(IGC_WUFC_EXT, wufc_ext);
3697 } else {
3698 wufc |= (IGC_WUFC_FLX0 << input->index);
3699 }
3700 wr32(IGC_WUFC, wufc);
3701
3702 netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n",
3703 input->index);
3704
3705 return 0;
3706 }
3707
igc_flex_filter_add_field(struct igc_flex_filter * flex,const void * src,unsigned int offset,size_t len,const void * mask)3708 static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
3709 const void *src, unsigned int offset,
3710 size_t len, const void *mask)
3711 {
3712 int i;
3713
3714 /* data */
3715 memcpy(&flex->data[offset], src, len);
3716
3717 /* mask */
3718 for (i = 0; i < len; ++i) {
3719 const unsigned int idx = i + offset;
3720 const u8 *ptr = mask;
3721
3722 if (mask) {
3723 if (ptr[i] & 0xff)
3724 flex->mask[idx / 8] |= BIT(idx % 8);
3725
3726 continue;
3727 }
3728
3729 flex->mask[idx / 8] |= BIT(idx % 8);
3730 }
3731 }
3732
igc_find_avail_flex_filter_slot(struct igc_adapter * adapter)3733 static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
3734 {
3735 struct igc_hw *hw = &adapter->hw;
3736 u32 wufc, wufc_ext;
3737 int i;
3738
3739 wufc = rd32(IGC_WUFC);
3740 wufc_ext = rd32(IGC_WUFC_EXT);
3741
3742 for (i = 0; i < MAX_FLEX_FILTER; i++) {
3743 if (i < 8) {
3744 if (!(wufc & (IGC_WUFC_FLX0 << i)))
3745 return i;
3746 } else {
3747 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
3748 return i;
3749 }
3750 }
3751
3752 return -ENOSPC;
3753 }
3754
igc_flex_filter_in_use(struct igc_adapter * adapter)3755 static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
3756 {
3757 struct igc_hw *hw = &adapter->hw;
3758 u32 wufc, wufc_ext;
3759
3760 wufc = rd32(IGC_WUFC);
3761 wufc_ext = rd32(IGC_WUFC_EXT);
3762
3763 if (wufc & IGC_WUFC_FILTER_MASK)
3764 return true;
3765
3766 if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
3767 return true;
3768
3769 return false;
3770 }
3771
igc_add_flex_filter(struct igc_adapter * adapter,struct igc_nfc_rule * rule)3772 static int igc_add_flex_filter(struct igc_adapter *adapter,
3773 struct igc_nfc_rule *rule)
3774 {
3775 struct igc_nfc_filter *filter = &rule->filter;
3776 unsigned int eth_offset, user_offset;
3777 struct igc_flex_filter flex = { };
3778 int ret, index;
3779 bool vlan;
3780
3781 index = igc_find_avail_flex_filter_slot(adapter);
3782 if (index < 0)
3783 return -ENOSPC;
3784
3785 /* Construct the flex filter:
3786 * -> dest_mac [6]
3787 * -> src_mac [6]
3788 * -> tpid [2]
3789 * -> vlan tci [2]
3790 * -> ether type [2]
3791 * -> user data [8]
3792 * -> = 26 bytes => 32 length
3793 */
3794 flex.index = index;
3795 flex.length = 32;
3796 flex.rx_queue = rule->action;
3797
3798 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
3799 eth_offset = vlan ? 16 : 12;
3800 user_offset = vlan ? 18 : 14;
3801
3802 /* Add destination MAC */
3803 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3804 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
3805 ETH_ALEN, NULL);
3806
3807 /* Add source MAC */
3808 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3809 igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
3810 ETH_ALEN, NULL);
3811
3812 /* Add VLAN etype */
3813 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
3814 __be16 vlan_etype = cpu_to_be16(filter->vlan_etype);
3815
3816 igc_flex_filter_add_field(&flex, &vlan_etype, 12,
3817 sizeof(vlan_etype), NULL);
3818 }
3819
3820 /* Add VLAN TCI */
3821 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
3822 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
3823 sizeof(filter->vlan_tci), NULL);
3824
3825 /* Add Ether type */
3826 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3827 __be16 etype = cpu_to_be16(filter->etype);
3828
3829 igc_flex_filter_add_field(&flex, &etype, eth_offset,
3830 sizeof(etype), NULL);
3831 }
3832
3833 /* Add user data */
3834 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
3835 igc_flex_filter_add_field(&flex, &filter->user_data,
3836 user_offset,
3837 sizeof(filter->user_data),
3838 filter->user_mask);
3839
3840 /* Add it down to the hardware and enable it. */
3841 ret = igc_write_flex_filter_ll(adapter, &flex);
3842 if (ret)
3843 return ret;
3844
3845 filter->flex_index = index;
3846
3847 return 0;
3848 }
3849
igc_del_flex_filter(struct igc_adapter * adapter,u16 reg_index)3850 static void igc_del_flex_filter(struct igc_adapter *adapter,
3851 u16 reg_index)
3852 {
3853 struct igc_hw *hw = &adapter->hw;
3854 u32 wufc;
3855
3856 /* Just disable the filter. The filter table itself is kept
3857 * intact. Another flex_filter_add() should override the "old" data
3858 * then.
3859 */
3860 if (reg_index > 8) {
3861 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3862
3863 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
3864 wr32(IGC_WUFC_EXT, wufc_ext);
3865 } else {
3866 wufc = rd32(IGC_WUFC);
3867
3868 wufc &= ~(IGC_WUFC_FLX0 << reg_index);
3869 wr32(IGC_WUFC, wufc);
3870 }
3871
3872 if (igc_flex_filter_in_use(adapter))
3873 return;
3874
3875 /* No filters are in use, we may disable flex filters */
3876 wufc = rd32(IGC_WUFC);
3877 wufc &= ~IGC_WUFC_FLEX_HQ;
3878 wr32(IGC_WUFC, wufc);
3879 }
3880
igc_set_default_queue_filter(struct igc_adapter * adapter,u32 queue)3881 static void igc_set_default_queue_filter(struct igc_adapter *adapter, u32 queue)
3882 {
3883 struct igc_hw *hw = &adapter->hw;
3884 u32 mrqc = rd32(IGC_MRQC);
3885
3886 mrqc &= ~IGC_MRQC_DEFAULT_QUEUE_MASK;
3887 mrqc |= FIELD_PREP(IGC_MRQC_DEFAULT_QUEUE_MASK, queue);
3888 wr32(IGC_MRQC, mrqc);
3889 }
3890
igc_reset_default_queue_filter(struct igc_adapter * adapter)3891 static void igc_reset_default_queue_filter(struct igc_adapter *adapter)
3892 {
3893 /* Reset the default queue to its default value which is Queue 0 */
3894 igc_set_default_queue_filter(adapter, 0);
3895 }
3896
igc_enable_nfc_rule(struct igc_adapter * adapter,struct igc_nfc_rule * rule)3897 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
3898 struct igc_nfc_rule *rule)
3899 {
3900 int err;
3901
3902 if (rule->flex) {
3903 return igc_add_flex_filter(adapter, rule);
3904 }
3905
3906 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3907 err = igc_add_etype_filter(adapter, rule->filter.etype,
3908 rule->action);
3909 if (err)
3910 return err;
3911 }
3912
3913 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
3914 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3915 rule->filter.src_addr, rule->action);
3916 if (err)
3917 return err;
3918 }
3919
3920 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
3921 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3922 rule->filter.dst_addr, rule->action);
3923 if (err)
3924 return err;
3925 }
3926
3927 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3928 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
3929
3930 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
3931 if (err)
3932 return err;
3933 }
3934
3935 if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
3936 igc_set_default_queue_filter(adapter, rule->action);
3937
3938 return 0;
3939 }
3940
igc_disable_nfc_rule(struct igc_adapter * adapter,const struct igc_nfc_rule * rule)3941 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
3942 const struct igc_nfc_rule *rule)
3943 {
3944 if (rule->flex) {
3945 igc_del_flex_filter(adapter, rule->filter.flex_index);
3946 return;
3947 }
3948
3949 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
3950 igc_del_etype_filter(adapter, rule->filter.etype);
3951
3952 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3953 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
3954
3955 igc_del_vlan_prio_filter(adapter, prio);
3956 }
3957
3958 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3959 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3960 rule->filter.src_addr);
3961
3962 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3963 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3964 rule->filter.dst_addr);
3965
3966 if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
3967 igc_reset_default_queue_filter(adapter);
3968 }
3969
3970 /**
3971 * igc_get_nfc_rule() - Get NFC rule
3972 * @adapter: Pointer to adapter
3973 * @location: Rule location
3974 *
3975 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3976 *
3977 * Return: Pointer to NFC rule at @location. If not found, NULL.
3978 */
igc_get_nfc_rule(struct igc_adapter * adapter,u32 location)3979 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
3980 u32 location)
3981 {
3982 struct igc_nfc_rule *rule;
3983
3984 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
3985 if (rule->location == location)
3986 return rule;
3987 if (rule->location > location)
3988 break;
3989 }
3990
3991 return NULL;
3992 }
3993
3994 /**
3995 * igc_del_nfc_rule() - Delete NFC rule
3996 * @adapter: Pointer to adapter
3997 * @rule: Pointer to rule to be deleted
3998 *
3999 * Disable NFC rule in hardware and delete it from adapter.
4000 *
4001 * Context: Expects adapter->nfc_rule_lock to be held by caller.
4002 */
igc_del_nfc_rule(struct igc_adapter * adapter,struct igc_nfc_rule * rule)4003 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
4004 {
4005 igc_disable_nfc_rule(adapter, rule);
4006
4007 list_del(&rule->list);
4008 adapter->nfc_rule_count--;
4009
4010 kfree(rule);
4011 }
4012
igc_flush_nfc_rules(struct igc_adapter * adapter)4013 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
4014 {
4015 struct igc_nfc_rule *rule, *tmp;
4016
4017 mutex_lock(&adapter->nfc_rule_lock);
4018
4019 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
4020 igc_del_nfc_rule(adapter, rule);
4021
4022 mutex_unlock(&adapter->nfc_rule_lock);
4023 }
4024
4025 /**
4026 * igc_add_nfc_rule() - Add NFC rule
4027 * @adapter: Pointer to adapter
4028 * @rule: Pointer to rule to be added
4029 *
4030 * Enable NFC rule in hardware and add it to adapter.
4031 *
4032 * Context: Expects adapter->nfc_rule_lock to be held by caller.
4033 *
4034 * Return: 0 on success, negative errno on failure.
4035 */
igc_add_nfc_rule(struct igc_adapter * adapter,struct igc_nfc_rule * rule)4036 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
4037 {
4038 struct igc_nfc_rule *pred, *cur;
4039 int err;
4040
4041 err = igc_enable_nfc_rule(adapter, rule);
4042 if (err)
4043 return err;
4044
4045 pred = NULL;
4046 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
4047 if (cur->location >= rule->location)
4048 break;
4049 pred = cur;
4050 }
4051
4052 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
4053 adapter->nfc_rule_count++;
4054 return 0;
4055 }
4056
igc_restore_nfc_rules(struct igc_adapter * adapter)4057 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
4058 {
4059 struct igc_nfc_rule *rule;
4060
4061 mutex_lock(&adapter->nfc_rule_lock);
4062
4063 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
4064 igc_enable_nfc_rule(adapter, rule);
4065
4066 mutex_unlock(&adapter->nfc_rule_lock);
4067 }
4068
igc_uc_sync(struct net_device * netdev,const unsigned char * addr)4069 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
4070 {
4071 struct igc_adapter *adapter = netdev_priv(netdev);
4072
4073 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
4074 }
4075
igc_uc_unsync(struct net_device * netdev,const unsigned char * addr)4076 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4077 {
4078 struct igc_adapter *adapter = netdev_priv(netdev);
4079
4080 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
4081 return 0;
4082 }
4083
4084 /**
4085 * igc_enable_empty_addr_recv - Enable Rx of packets with all-zeroes MAC address
4086 * @adapter: Pointer to the igc_adapter structure.
4087 *
4088 * Frame preemption verification requires that packets with the all-zeroes
4089 * MAC address are allowed to be received by the driver. This function adds the
4090 * all-zeroes destination address to the list of acceptable addresses.
4091 *
4092 * Return: 0 on success, negative value otherwise.
4093 */
igc_enable_empty_addr_recv(struct igc_adapter * adapter)4094 int igc_enable_empty_addr_recv(struct igc_adapter *adapter)
4095 {
4096 u8 empty[ETH_ALEN] = {};
4097
4098 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty, -1);
4099 }
4100
igc_disable_empty_addr_recv(struct igc_adapter * adapter)4101 void igc_disable_empty_addr_recv(struct igc_adapter *adapter)
4102 {
4103 u8 empty[ETH_ALEN] = {};
4104
4105 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty);
4106 }
4107
4108 /**
4109 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
4110 * @netdev: network interface device structure
4111 *
4112 * The set_rx_mode entry point is called whenever the unicast or multicast
4113 * address lists or the network interface flags are updated. This routine is
4114 * responsible for configuring the hardware for proper unicast, multicast,
4115 * promiscuous mode, and all-multi behavior.
4116 */
igc_set_rx_mode(struct net_device * netdev)4117 static void igc_set_rx_mode(struct net_device *netdev)
4118 {
4119 struct igc_adapter *adapter = netdev_priv(netdev);
4120 struct igc_hw *hw = &adapter->hw;
4121 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
4122 int count;
4123
4124 /* Check for Promiscuous and All Multicast modes */
4125 if (netdev->flags & IFF_PROMISC) {
4126 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
4127 } else {
4128 if (netdev->flags & IFF_ALLMULTI) {
4129 rctl |= IGC_RCTL_MPE;
4130 } else {
4131 /* Write addresses to the MTA, if the attempt fails
4132 * then we should just turn on promiscuous mode so
4133 * that we can at least receive multicast traffic
4134 */
4135 count = igc_write_mc_addr_list(netdev);
4136 if (count < 0)
4137 rctl |= IGC_RCTL_MPE;
4138 }
4139 }
4140
4141 /* Write addresses to available RAR registers, if there is not
4142 * sufficient space to store all the addresses then enable
4143 * unicast promiscuous mode
4144 */
4145 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
4146 rctl |= IGC_RCTL_UPE;
4147
4148 /* update state of unicast and multicast */
4149 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
4150 wr32(IGC_RCTL, rctl);
4151
4152 #if (PAGE_SIZE < 8192)
4153 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
4154 rlpml = IGC_MAX_FRAME_BUILD_SKB;
4155 #endif
4156 wr32(IGC_RLPML, rlpml);
4157 }
4158
4159 /**
4160 * igc_configure - configure the hardware for RX and TX
4161 * @adapter: private board structure
4162 */
igc_configure(struct igc_adapter * adapter)4163 static void igc_configure(struct igc_adapter *adapter)
4164 {
4165 struct net_device *netdev = adapter->netdev;
4166 int i = 0;
4167
4168 igc_get_hw_control(adapter);
4169 igc_set_rx_mode(netdev);
4170
4171 igc_restore_vlan(adapter);
4172
4173 igc_setup_tctl(adapter);
4174 igc_setup_mrqc(adapter);
4175 igc_setup_rctl(adapter);
4176
4177 igc_set_default_mac_filter(adapter);
4178 igc_restore_nfc_rules(adapter);
4179
4180 igc_configure_tx(adapter);
4181 igc_configure_rx(adapter);
4182
4183 igc_rx_fifo_flush_base(&adapter->hw);
4184
4185 /* call igc_desc_unused which always leaves
4186 * at least 1 descriptor unused to make sure
4187 * next_to_use != next_to_clean
4188 */
4189 for (i = 0; i < adapter->num_rx_queues; i++) {
4190 struct igc_ring *ring = adapter->rx_ring[i];
4191
4192 if (ring->xsk_pool)
4193 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
4194 else
4195 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
4196 }
4197 }
4198
4199 /**
4200 * igc_write_ivar - configure ivar for given MSI-X vector
4201 * @hw: pointer to the HW structure
4202 * @msix_vector: vector number we are allocating to a given ring
4203 * @index: row index of IVAR register to write within IVAR table
4204 * @offset: column offset of in IVAR, should be multiple of 8
4205 *
4206 * The IVAR table consists of 2 columns,
4207 * each containing an cause allocation for an Rx and Tx ring, and a
4208 * variable number of rows depending on the number of queues supported.
4209 */
igc_write_ivar(struct igc_hw * hw,int msix_vector,int index,int offset)4210 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
4211 int index, int offset)
4212 {
4213 u32 ivar = array_rd32(IGC_IVAR0, index);
4214
4215 /* clear any bits that are currently set */
4216 ivar &= ~((u32)0xFF << offset);
4217
4218 /* write vector and valid bit */
4219 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
4220
4221 array_wr32(IGC_IVAR0, index, ivar);
4222 }
4223
igc_assign_vector(struct igc_q_vector * q_vector,int msix_vector)4224 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
4225 {
4226 struct igc_adapter *adapter = q_vector->adapter;
4227 struct igc_hw *hw = &adapter->hw;
4228 int rx_queue = IGC_N0_QUEUE;
4229 int tx_queue = IGC_N0_QUEUE;
4230
4231 if (q_vector->rx.ring)
4232 rx_queue = q_vector->rx.ring->reg_idx;
4233 if (q_vector->tx.ring)
4234 tx_queue = q_vector->tx.ring->reg_idx;
4235
4236 switch (hw->mac.type) {
4237 case igc_i225:
4238 if (rx_queue > IGC_N0_QUEUE)
4239 igc_write_ivar(hw, msix_vector,
4240 rx_queue >> 1,
4241 (rx_queue & 0x1) << 4);
4242 if (tx_queue > IGC_N0_QUEUE)
4243 igc_write_ivar(hw, msix_vector,
4244 tx_queue >> 1,
4245 ((tx_queue & 0x1) << 4) + 8);
4246 q_vector->eims_value = BIT(msix_vector);
4247 break;
4248 default:
4249 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
4250 break;
4251 }
4252
4253 /* add q_vector eims value to global eims_enable_mask */
4254 adapter->eims_enable_mask |= q_vector->eims_value;
4255
4256 /* configure q_vector to set itr on first interrupt */
4257 q_vector->set_itr = 1;
4258 }
4259
4260 /**
4261 * igc_configure_msix - Configure MSI-X hardware
4262 * @adapter: Pointer to adapter structure
4263 *
4264 * igc_configure_msix sets up the hardware to properly
4265 * generate MSI-X interrupts.
4266 */
igc_configure_msix(struct igc_adapter * adapter)4267 static void igc_configure_msix(struct igc_adapter *adapter)
4268 {
4269 struct igc_hw *hw = &adapter->hw;
4270 int i, vector = 0;
4271 u32 tmp;
4272
4273 adapter->eims_enable_mask = 0;
4274
4275 /* set vector for other causes, i.e. link changes */
4276 switch (hw->mac.type) {
4277 case igc_i225:
4278 /* Turn on MSI-X capability first, or our settings
4279 * won't stick. And it will take days to debug.
4280 */
4281 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
4282 IGC_GPIE_PBA | IGC_GPIE_EIAME |
4283 IGC_GPIE_NSICR);
4284
4285 /* enable msix_other interrupt */
4286 adapter->eims_other = BIT(vector);
4287 tmp = (vector++ | IGC_IVAR_VALID) << 8;
4288
4289 wr32(IGC_IVAR_MISC, tmp);
4290 break;
4291 default:
4292 /* do nothing, since nothing else supports MSI-X */
4293 break;
4294 } /* switch (hw->mac.type) */
4295
4296 adapter->eims_enable_mask |= adapter->eims_other;
4297
4298 for (i = 0; i < adapter->num_q_vectors; i++)
4299 igc_assign_vector(adapter->q_vector[i], vector++);
4300
4301 wrfl();
4302 }
4303
4304 /**
4305 * igc_irq_enable - Enable default interrupt generation settings
4306 * @adapter: board private structure
4307 */
igc_irq_enable(struct igc_adapter * adapter)4308 static void igc_irq_enable(struct igc_adapter *adapter)
4309 {
4310 struct igc_hw *hw = &adapter->hw;
4311
4312 if (adapter->msix_entries) {
4313 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
4314 u32 regval = rd32(IGC_EIAC);
4315
4316 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
4317 regval = rd32(IGC_EIAM);
4318 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
4319 wr32(IGC_EIMS, adapter->eims_enable_mask);
4320 wr32(IGC_IMS, ims);
4321 } else {
4322 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
4323 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
4324 }
4325 }
4326
4327 /**
4328 * igc_irq_disable - Mask off interrupt generation on the NIC
4329 * @adapter: board private structure
4330 */
igc_irq_disable(struct igc_adapter * adapter)4331 static void igc_irq_disable(struct igc_adapter *adapter)
4332 {
4333 struct igc_hw *hw = &adapter->hw;
4334
4335 if (adapter->msix_entries) {
4336 u32 regval = rd32(IGC_EIAM);
4337
4338 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
4339 wr32(IGC_EIMC, adapter->eims_enable_mask);
4340 regval = rd32(IGC_EIAC);
4341 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
4342 }
4343
4344 wr32(IGC_IAM, 0);
4345 wr32(IGC_IMC, ~0);
4346 wrfl();
4347
4348 if (adapter->msix_entries) {
4349 int vector = 0, i;
4350
4351 synchronize_irq(adapter->msix_entries[vector++].vector);
4352
4353 for (i = 0; i < adapter->num_q_vectors; i++)
4354 synchronize_irq(adapter->msix_entries[vector++].vector);
4355 } else {
4356 synchronize_irq(adapter->pdev->irq);
4357 }
4358 }
4359
igc_set_flag_queue_pairs(struct igc_adapter * adapter,const u32 max_rss_queues)4360 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
4361 const u32 max_rss_queues)
4362 {
4363 /* Determine if we need to pair queues. */
4364 /* If rss_queues > half of max_rss_queues, pair the queues in
4365 * order to conserve interrupts due to limited supply.
4366 */
4367 if (adapter->rss_queues > (max_rss_queues / 2))
4368 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4369 else
4370 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
4371 }
4372
igc_get_max_rss_queues(struct igc_adapter * adapter)4373 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
4374 {
4375 return IGC_MAX_RX_QUEUES;
4376 }
4377
igc_init_queue_configuration(struct igc_adapter * adapter)4378 static void igc_init_queue_configuration(struct igc_adapter *adapter)
4379 {
4380 u32 max_rss_queues;
4381
4382 max_rss_queues = igc_get_max_rss_queues(adapter);
4383 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
4384
4385 igc_set_flag_queue_pairs(adapter, max_rss_queues);
4386 }
4387
4388 /**
4389 * igc_reset_q_vector - Reset config for interrupt vector
4390 * @adapter: board private structure to initialize
4391 * @v_idx: Index of vector to be reset
4392 *
4393 * If NAPI is enabled it will delete any references to the
4394 * NAPI struct. This is preparation for igc_free_q_vector.
4395 */
igc_reset_q_vector(struct igc_adapter * adapter,int v_idx)4396 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
4397 {
4398 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4399
4400 /* if we're coming from igc_set_interrupt_capability, the vectors are
4401 * not yet allocated
4402 */
4403 if (!q_vector)
4404 return;
4405
4406 if (q_vector->tx.ring)
4407 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
4408
4409 if (q_vector->rx.ring)
4410 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
4411
4412 netif_napi_del(&q_vector->napi);
4413 }
4414
4415 /**
4416 * igc_free_q_vector - Free memory allocated for specific interrupt vector
4417 * @adapter: board private structure to initialize
4418 * @v_idx: Index of vector to be freed
4419 *
4420 * This function frees the memory allocated to the q_vector.
4421 */
igc_free_q_vector(struct igc_adapter * adapter,int v_idx)4422 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
4423 {
4424 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4425
4426 adapter->q_vector[v_idx] = NULL;
4427
4428 /* igc_get_stats64() might access the rings on this vector,
4429 * we must wait a grace period before freeing it.
4430 */
4431 if (q_vector)
4432 kfree_rcu(q_vector, rcu);
4433 }
4434
4435 /**
4436 * igc_free_q_vectors - Free memory allocated for interrupt vectors
4437 * @adapter: board private structure to initialize
4438 *
4439 * This function frees the memory allocated to the q_vectors. In addition if
4440 * NAPI is enabled it will delete any references to the NAPI struct prior
4441 * to freeing the q_vector.
4442 */
igc_free_q_vectors(struct igc_adapter * adapter)4443 static void igc_free_q_vectors(struct igc_adapter *adapter)
4444 {
4445 int v_idx = adapter->num_q_vectors;
4446
4447 adapter->num_tx_queues = 0;
4448 adapter->num_rx_queues = 0;
4449 adapter->num_q_vectors = 0;
4450
4451 while (v_idx--) {
4452 igc_reset_q_vector(adapter, v_idx);
4453 igc_free_q_vector(adapter, v_idx);
4454 }
4455 }
4456
4457 /**
4458 * igc_update_itr - update the dynamic ITR value based on statistics
4459 * @q_vector: pointer to q_vector
4460 * @ring_container: ring info to update the itr for
4461 *
4462 * Stores a new ITR value based on packets and byte
4463 * counts during the last interrupt. The advantage of per interrupt
4464 * computation is faster updates and more accurate ITR for the current
4465 * traffic pattern. Constants in this function were computed
4466 * based on theoretical maximum wire speed and thresholds were set based
4467 * on testing data as well as attempting to minimize response time
4468 * while increasing bulk throughput.
4469 * NOTE: These calculations are only valid when operating in a single-
4470 * queue environment.
4471 */
igc_update_itr(struct igc_q_vector * q_vector,struct igc_ring_container * ring_container)4472 static void igc_update_itr(struct igc_q_vector *q_vector,
4473 struct igc_ring_container *ring_container)
4474 {
4475 unsigned int packets = ring_container->total_packets;
4476 unsigned int bytes = ring_container->total_bytes;
4477 u8 itrval = ring_container->itr;
4478
4479 /* no packets, exit with status unchanged */
4480 if (packets == 0)
4481 return;
4482
4483 switch (itrval) {
4484 case lowest_latency:
4485 /* handle TSO and jumbo frames */
4486 if (bytes / packets > 8000)
4487 itrval = bulk_latency;
4488 else if ((packets < 5) && (bytes > 512))
4489 itrval = low_latency;
4490 break;
4491 case low_latency: /* 50 usec aka 20000 ints/s */
4492 if (bytes > 10000) {
4493 /* this if handles the TSO accounting */
4494 if (bytes / packets > 8000)
4495 itrval = bulk_latency;
4496 else if ((packets < 10) || ((bytes / packets) > 1200))
4497 itrval = bulk_latency;
4498 else if ((packets > 35))
4499 itrval = lowest_latency;
4500 } else if (bytes / packets > 2000) {
4501 itrval = bulk_latency;
4502 } else if (packets <= 2 && bytes < 512) {
4503 itrval = lowest_latency;
4504 }
4505 break;
4506 case bulk_latency: /* 250 usec aka 4000 ints/s */
4507 if (bytes > 25000) {
4508 if (packets > 35)
4509 itrval = low_latency;
4510 } else if (bytes < 1500) {
4511 itrval = low_latency;
4512 }
4513 break;
4514 }
4515
4516 /* clear work counters since we have the values we need */
4517 ring_container->total_bytes = 0;
4518 ring_container->total_packets = 0;
4519
4520 /* write updated itr to ring container */
4521 ring_container->itr = itrval;
4522 }
4523
igc_set_itr(struct igc_q_vector * q_vector)4524 static void igc_set_itr(struct igc_q_vector *q_vector)
4525 {
4526 struct igc_adapter *adapter = q_vector->adapter;
4527 u32 new_itr = q_vector->itr_val;
4528 u8 current_itr = 0;
4529
4530 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4531 switch (adapter->link_speed) {
4532 case SPEED_10:
4533 case SPEED_100:
4534 current_itr = 0;
4535 new_itr = IGC_4K_ITR;
4536 goto set_itr_now;
4537 default:
4538 break;
4539 }
4540
4541 igc_update_itr(q_vector, &q_vector->tx);
4542 igc_update_itr(q_vector, &q_vector->rx);
4543
4544 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
4545
4546 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4547 if (current_itr == lowest_latency &&
4548 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4549 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4550 current_itr = low_latency;
4551
4552 switch (current_itr) {
4553 /* counts and packets in update_itr are dependent on these numbers */
4554 case lowest_latency:
4555 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
4556 break;
4557 case low_latency:
4558 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
4559 break;
4560 case bulk_latency:
4561 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
4562 break;
4563 default:
4564 break;
4565 }
4566
4567 set_itr_now:
4568 if (new_itr != q_vector->itr_val) {
4569 /* this attempts to bias the interrupt rate towards Bulk
4570 * by adding intermediate steps when interrupt rate is
4571 * increasing
4572 */
4573 new_itr = new_itr > q_vector->itr_val ?
4574 max((new_itr * q_vector->itr_val) /
4575 (new_itr + (q_vector->itr_val >> 2)),
4576 new_itr) : new_itr;
4577 /* Don't write the value here; it resets the adapter's
4578 * internal timer, and causes us to delay far longer than
4579 * we should between interrupts. Instead, we write the ITR
4580 * value at the beginning of the next interrupt so the timing
4581 * ends up being correct.
4582 */
4583 q_vector->itr_val = new_itr;
4584 q_vector->set_itr = 1;
4585 }
4586 }
4587
igc_reset_interrupt_capability(struct igc_adapter * adapter)4588 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
4589 {
4590 int v_idx = adapter->num_q_vectors;
4591
4592 if (adapter->msix_entries) {
4593 pci_disable_msix(adapter->pdev);
4594 kfree(adapter->msix_entries);
4595 adapter->msix_entries = NULL;
4596 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
4597 pci_disable_msi(adapter->pdev);
4598 }
4599
4600 while (v_idx--)
4601 igc_reset_q_vector(adapter, v_idx);
4602 }
4603
4604 /**
4605 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4606 * @adapter: Pointer to adapter structure
4607 * @msix: boolean value for MSI-X capability
4608 *
4609 * Attempt to configure interrupts using the best available
4610 * capabilities of the hardware and kernel.
4611 */
igc_set_interrupt_capability(struct igc_adapter * adapter,bool msix)4612 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
4613 bool msix)
4614 {
4615 int numvecs, i;
4616 int err;
4617
4618 if (!msix)
4619 goto msi_only;
4620 adapter->flags |= IGC_FLAG_HAS_MSIX;
4621
4622 /* Number of supported queues. */
4623 adapter->num_rx_queues = adapter->rss_queues;
4624
4625 adapter->num_tx_queues = adapter->rss_queues;
4626
4627 /* start with one vector for every Rx queue */
4628 numvecs = adapter->num_rx_queues;
4629
4630 /* if Tx handler is separate add 1 for every Tx queue */
4631 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
4632 numvecs += adapter->num_tx_queues;
4633
4634 /* store the number of vectors reserved for queues */
4635 adapter->num_q_vectors = numvecs;
4636
4637 /* add 1 vector for link status interrupts */
4638 numvecs++;
4639
4640 adapter->msix_entries = kzalloc_objs(struct msix_entry, numvecs);
4641
4642 if (!adapter->msix_entries)
4643 return;
4644
4645 /* populate entry values */
4646 for (i = 0; i < numvecs; i++)
4647 adapter->msix_entries[i].entry = i;
4648
4649 err = pci_enable_msix_range(adapter->pdev,
4650 adapter->msix_entries,
4651 numvecs,
4652 numvecs);
4653 if (err > 0)
4654 return;
4655
4656 kfree(adapter->msix_entries);
4657 adapter->msix_entries = NULL;
4658
4659 igc_reset_interrupt_capability(adapter);
4660
4661 msi_only:
4662 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
4663
4664 adapter->rss_queues = 1;
4665 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4666 adapter->num_rx_queues = 1;
4667 adapter->num_tx_queues = 1;
4668 adapter->num_q_vectors = 1;
4669 if (!pci_enable_msi(adapter->pdev))
4670 adapter->flags |= IGC_FLAG_HAS_MSI;
4671 }
4672
4673 /**
4674 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4675 * @q_vector: pointer to q_vector
4676 *
4677 * Stores a new ITR value based on strictly on packet size. This
4678 * algorithm is less sophisticated than that used in igc_update_itr,
4679 * due to the difficulty of synchronizing statistics across multiple
4680 * receive rings. The divisors and thresholds used by this function
4681 * were determined based on theoretical maximum wire speed and testing
4682 * data, in order to minimize response time while increasing bulk
4683 * throughput.
4684 * NOTE: This function is called only when operating in a multiqueue
4685 * receive environment.
4686 */
igc_update_ring_itr(struct igc_q_vector * q_vector)4687 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
4688 {
4689 struct igc_adapter *adapter = q_vector->adapter;
4690 int new_val = q_vector->itr_val;
4691 int avg_wire_size = 0;
4692 unsigned int packets;
4693
4694 /* For non-gigabit speeds, just fix the interrupt rate at 4000
4695 * ints/sec - ITR timer value of 120 ticks.
4696 */
4697 switch (adapter->link_speed) {
4698 case SPEED_10:
4699 case SPEED_100:
4700 new_val = IGC_4K_ITR;
4701 goto set_itr_val;
4702 default:
4703 break;
4704 }
4705
4706 packets = q_vector->rx.total_packets;
4707 if (packets)
4708 avg_wire_size = q_vector->rx.total_bytes / packets;
4709
4710 packets = q_vector->tx.total_packets;
4711 if (packets)
4712 avg_wire_size = max_t(u32, avg_wire_size,
4713 q_vector->tx.total_bytes / packets);
4714
4715 /* if avg_wire_size isn't set no work was done */
4716 if (!avg_wire_size)
4717 goto clear_counts;
4718
4719 /* Add 24 bytes to size to account for CRC, preamble, and gap */
4720 avg_wire_size += 24;
4721
4722 /* Don't starve jumbo frames */
4723 avg_wire_size = min(avg_wire_size, 3000);
4724
4725 /* Give a little boost to mid-size frames */
4726 if (avg_wire_size > 300 && avg_wire_size < 1200)
4727 new_val = avg_wire_size / 3;
4728 else
4729 new_val = avg_wire_size / 2;
4730
4731 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4732 if (new_val < IGC_20K_ITR &&
4733 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4734 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4735 new_val = IGC_20K_ITR;
4736
4737 set_itr_val:
4738 if (new_val != q_vector->itr_val) {
4739 q_vector->itr_val = new_val;
4740 q_vector->set_itr = 1;
4741 }
4742 clear_counts:
4743 q_vector->rx.total_bytes = 0;
4744 q_vector->rx.total_packets = 0;
4745 q_vector->tx.total_bytes = 0;
4746 q_vector->tx.total_packets = 0;
4747 }
4748
igc_ring_irq_enable(struct igc_q_vector * q_vector)4749 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
4750 {
4751 struct igc_adapter *adapter = q_vector->adapter;
4752 struct igc_hw *hw = &adapter->hw;
4753
4754 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
4755 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
4756 if (adapter->num_q_vectors == 1)
4757 igc_set_itr(q_vector);
4758 else
4759 igc_update_ring_itr(q_vector);
4760 }
4761
4762 if (!test_bit(__IGC_DOWN, &adapter->state)) {
4763 if (adapter->msix_entries)
4764 wr32(IGC_EIMS, q_vector->eims_value);
4765 else
4766 igc_irq_enable(adapter);
4767 }
4768 }
4769
igc_add_ring(struct igc_ring * ring,struct igc_ring_container * head)4770 static void igc_add_ring(struct igc_ring *ring,
4771 struct igc_ring_container *head)
4772 {
4773 head->ring = ring;
4774 head->count++;
4775 }
4776
4777 /**
4778 * igc_cache_ring_register - Descriptor ring to register mapping
4779 * @adapter: board private structure to initialize
4780 *
4781 * Once we know the feature-set enabled for the device, we'll cache
4782 * the register offset the descriptor ring is assigned to.
4783 */
igc_cache_ring_register(struct igc_adapter * adapter)4784 static void igc_cache_ring_register(struct igc_adapter *adapter)
4785 {
4786 int i = 0, j = 0;
4787
4788 switch (adapter->hw.mac.type) {
4789 case igc_i225:
4790 default:
4791 for (; i < adapter->num_rx_queues; i++)
4792 adapter->rx_ring[i]->reg_idx = i;
4793 for (; j < adapter->num_tx_queues; j++)
4794 adapter->tx_ring[j]->reg_idx = j;
4795 break;
4796 }
4797 }
4798
4799 /**
4800 * igc_poll - NAPI Rx polling callback
4801 * @napi: napi polling structure
4802 * @budget: count of how many packets we should handle
4803 */
igc_poll(struct napi_struct * napi,int budget)4804 static int igc_poll(struct napi_struct *napi, int budget)
4805 {
4806 struct igc_q_vector *q_vector = container_of(napi,
4807 struct igc_q_vector,
4808 napi);
4809 struct igc_ring *rx_ring = q_vector->rx.ring;
4810 bool clean_complete = true;
4811 int work_done = 0;
4812
4813 if (q_vector->tx.ring)
4814 clean_complete = igc_clean_tx_irq(q_vector, budget);
4815
4816 if (rx_ring) {
4817 int cleaned = rx_ring->xsk_pool ?
4818 igc_clean_rx_irq_zc(q_vector, budget) :
4819 igc_clean_rx_irq(q_vector, budget);
4820
4821 work_done += cleaned;
4822 if (cleaned >= budget)
4823 clean_complete = false;
4824 }
4825
4826 /* If all work not completed, return budget and keep polling */
4827 if (!clean_complete)
4828 return budget;
4829
4830 /* Exit the polling mode, but don't re-enable interrupts if stack might
4831 * poll us due to busy-polling
4832 */
4833 if (likely(napi_complete_done(napi, work_done)))
4834 igc_ring_irq_enable(q_vector);
4835
4836 return min(work_done, budget - 1);
4837 }
4838
4839 /**
4840 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4841 * @adapter: board private structure to initialize
4842 * @v_count: q_vectors allocated on adapter, used for ring interleaving
4843 * @v_idx: index of vector in adapter struct
4844 * @txr_count: total number of Tx rings to allocate
4845 * @txr_idx: index of first Tx ring to allocate
4846 * @rxr_count: total number of Rx rings to allocate
4847 * @rxr_idx: index of first Rx ring to allocate
4848 *
4849 * We allocate one q_vector. If allocation fails we return -ENOMEM.
4850 */
igc_alloc_q_vector(struct igc_adapter * adapter,unsigned int v_count,unsigned int v_idx,unsigned int txr_count,unsigned int txr_idx,unsigned int rxr_count,unsigned int rxr_idx)4851 static int igc_alloc_q_vector(struct igc_adapter *adapter,
4852 unsigned int v_count, unsigned int v_idx,
4853 unsigned int txr_count, unsigned int txr_idx,
4854 unsigned int rxr_count, unsigned int rxr_idx)
4855 {
4856 struct igc_q_vector *q_vector;
4857 struct igc_ring *ring;
4858 int ring_count;
4859
4860 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
4861 if (txr_count > 1 || rxr_count > 1)
4862 return -ENOMEM;
4863
4864 ring_count = txr_count + rxr_count;
4865
4866 /* allocate q_vector and rings */
4867 q_vector = adapter->q_vector[v_idx];
4868 if (!q_vector)
4869 q_vector = kzalloc_flex(*q_vector, ring, ring_count);
4870 else
4871 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
4872 if (!q_vector)
4873 return -ENOMEM;
4874
4875 /* initialize NAPI */
4876 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
4877
4878 /* tie q_vector and adapter together */
4879 adapter->q_vector[v_idx] = q_vector;
4880 q_vector->adapter = adapter;
4881
4882 /* initialize work limits */
4883 q_vector->tx.work_limit = adapter->tx_work_limit;
4884
4885 /* initialize ITR configuration */
4886 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
4887 q_vector->itr_val = IGC_START_ITR;
4888
4889 /* initialize pointer to rings */
4890 ring = q_vector->ring;
4891
4892 /* initialize ITR */
4893 if (rxr_count) {
4894 /* rx or rx/tx vector */
4895 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
4896 q_vector->itr_val = adapter->rx_itr_setting;
4897 } else {
4898 /* tx only vector */
4899 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
4900 q_vector->itr_val = adapter->tx_itr_setting;
4901 }
4902
4903 if (txr_count) {
4904 /* assign generic ring traits */
4905 ring->dev = &adapter->pdev->dev;
4906 ring->netdev = adapter->netdev;
4907
4908 /* configure backlink on ring */
4909 ring->q_vector = q_vector;
4910
4911 /* update q_vector Tx values */
4912 igc_add_ring(ring, &q_vector->tx);
4913
4914 /* apply Tx specific ring traits */
4915 ring->count = adapter->tx_ring_count;
4916 ring->queue_index = txr_idx;
4917
4918 /* assign ring to adapter */
4919 adapter->tx_ring[txr_idx] = ring;
4920
4921 /* push pointer to next ring */
4922 ring++;
4923 }
4924
4925 if (rxr_count) {
4926 /* assign generic ring traits */
4927 ring->dev = &adapter->pdev->dev;
4928 ring->netdev = adapter->netdev;
4929
4930 /* configure backlink on ring */
4931 ring->q_vector = q_vector;
4932
4933 /* update q_vector Rx values */
4934 igc_add_ring(ring, &q_vector->rx);
4935
4936 /* apply Rx specific ring traits */
4937 ring->count = adapter->rx_ring_count;
4938 ring->queue_index = rxr_idx;
4939
4940 /* assign ring to adapter */
4941 adapter->rx_ring[rxr_idx] = ring;
4942 }
4943
4944 return 0;
4945 }
4946
4947 /**
4948 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4949 * @adapter: board private structure to initialize
4950 *
4951 * We allocate one q_vector per queue interrupt. If allocation fails we
4952 * return -ENOMEM.
4953 */
igc_alloc_q_vectors(struct igc_adapter * adapter)4954 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
4955 {
4956 int rxr_remaining = adapter->num_rx_queues;
4957 int txr_remaining = adapter->num_tx_queues;
4958 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
4959 int q_vectors = adapter->num_q_vectors;
4960 int err;
4961
4962 if (q_vectors >= (rxr_remaining + txr_remaining)) {
4963 for (; rxr_remaining; v_idx++) {
4964 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4965 0, 0, 1, rxr_idx);
4966
4967 if (err)
4968 goto err_out;
4969
4970 /* update counts and index */
4971 rxr_remaining--;
4972 rxr_idx++;
4973 }
4974 }
4975
4976 for (; v_idx < q_vectors; v_idx++) {
4977 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
4978 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
4979
4980 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4981 tqpv, txr_idx, rqpv, rxr_idx);
4982
4983 if (err)
4984 goto err_out;
4985
4986 /* update counts and index */
4987 rxr_remaining -= rqpv;
4988 txr_remaining -= tqpv;
4989 rxr_idx++;
4990 txr_idx++;
4991 }
4992
4993 return 0;
4994
4995 err_out:
4996 adapter->num_tx_queues = 0;
4997 adapter->num_rx_queues = 0;
4998 adapter->num_q_vectors = 0;
4999
5000 while (v_idx--)
5001 igc_free_q_vector(adapter, v_idx);
5002
5003 return -ENOMEM;
5004 }
5005
5006 /**
5007 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
5008 * @adapter: Pointer to adapter structure
5009 * @msix: boolean for MSI-X capability
5010 *
5011 * This function initializes the interrupts and allocates all of the queues.
5012 */
igc_init_interrupt_scheme(struct igc_adapter * adapter,bool msix)5013 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
5014 {
5015 struct net_device *dev = adapter->netdev;
5016 int err = 0;
5017
5018 igc_set_interrupt_capability(adapter, msix);
5019
5020 err = igc_alloc_q_vectors(adapter);
5021 if (err) {
5022 netdev_err(dev, "Unable to allocate memory for vectors\n");
5023 goto err_alloc_q_vectors;
5024 }
5025
5026 igc_cache_ring_register(adapter);
5027
5028 return 0;
5029
5030 err_alloc_q_vectors:
5031 igc_reset_interrupt_capability(adapter);
5032 return err;
5033 }
5034
5035 /**
5036 * igc_sw_init - Initialize general software structures (struct igc_adapter)
5037 * @adapter: board private structure to initialize
5038 *
5039 * igc_sw_init initializes the Adapter private data structure.
5040 * Fields are initialized based on PCI device information and
5041 * OS network device settings (MTU size).
5042 */
igc_sw_init(struct igc_adapter * adapter)5043 static int igc_sw_init(struct igc_adapter *adapter)
5044 {
5045 struct net_device *netdev = adapter->netdev;
5046 struct pci_dev *pdev = adapter->pdev;
5047 struct igc_hw *hw = &adapter->hw;
5048
5049 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
5050
5051 /* set default ring sizes */
5052 adapter->tx_ring_count = IGC_DEFAULT_TXD;
5053 adapter->rx_ring_count = IGC_DEFAULT_RXD;
5054
5055 /* set default ITR values */
5056 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
5057 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
5058
5059 /* set default work limits */
5060 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
5061
5062 /* adjust max frame to be at least the size of a standard frame */
5063 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
5064 VLAN_HLEN;
5065 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
5066
5067 mutex_init(&adapter->nfc_rule_lock);
5068 INIT_LIST_HEAD(&adapter->nfc_rule_list);
5069 adapter->nfc_rule_count = 0;
5070
5071 spin_lock_init(&adapter->stats64_lock);
5072 spin_lock_init(&adapter->qbv_tx_lock);
5073 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
5074 adapter->flags |= IGC_FLAG_HAS_MSIX;
5075
5076 igc_init_queue_configuration(adapter);
5077
5078 /* This call may decrease the number of queues */
5079 if (igc_init_interrupt_scheme(adapter, true)) {
5080 netdev_err(netdev, "Unable to allocate memory for queues\n");
5081 return -ENOMEM;
5082 }
5083
5084 /* Explicitly disable IRQ since the NIC can be in any state. */
5085 igc_irq_disable(adapter);
5086
5087 set_bit(__IGC_DOWN, &adapter->state);
5088
5089 return 0;
5090 }
5091
igc_set_queue_napi(struct igc_adapter * adapter,int vector,struct napi_struct * napi)5092 static void igc_set_queue_napi(struct igc_adapter *adapter, int vector,
5093 struct napi_struct *napi)
5094 {
5095 struct igc_q_vector *q_vector = adapter->q_vector[vector];
5096
5097 if (q_vector->rx.ring)
5098 netif_queue_set_napi(adapter->netdev,
5099 q_vector->rx.ring->queue_index,
5100 NETDEV_QUEUE_TYPE_RX, napi);
5101
5102 if (q_vector->tx.ring)
5103 netif_queue_set_napi(adapter->netdev,
5104 q_vector->tx.ring->queue_index,
5105 NETDEV_QUEUE_TYPE_TX, napi);
5106 }
5107
5108 /**
5109 * igc_up - Open the interface and prepare it to handle traffic
5110 * @adapter: board private structure
5111 */
igc_up(struct igc_adapter * adapter)5112 void igc_up(struct igc_adapter *adapter)
5113 {
5114 struct igc_hw *hw = &adapter->hw;
5115 struct napi_struct *napi;
5116 int i = 0;
5117
5118 /* hardware has been reset, we need to reload some things */
5119 igc_configure(adapter);
5120
5121 clear_bit(__IGC_DOWN, &adapter->state);
5122
5123 for (i = 0; i < adapter->num_q_vectors; i++) {
5124 napi = &adapter->q_vector[i]->napi;
5125 napi_enable(napi);
5126 igc_set_queue_napi(adapter, i, napi);
5127 }
5128
5129 if (adapter->msix_entries)
5130 igc_configure_msix(adapter);
5131 else
5132 igc_assign_vector(adapter->q_vector[0], 0);
5133
5134 /* Clear any pending interrupts. */
5135 rd32(IGC_ICR);
5136 igc_irq_enable(adapter);
5137
5138 netif_tx_start_all_queues(adapter->netdev);
5139
5140 /* start the watchdog. */
5141 hw->mac.get_link_status = true;
5142 schedule_work(&adapter->watchdog_task);
5143 }
5144
5145 /**
5146 * igc_update_stats - Update the board statistics counters
5147 * @adapter: board private structure
5148 */
igc_update_stats(struct igc_adapter * adapter)5149 void igc_update_stats(struct igc_adapter *adapter)
5150 {
5151 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
5152 struct pci_dev *pdev = adapter->pdev;
5153 struct igc_hw *hw = &adapter->hw;
5154 u64 _bytes, _packets;
5155 u64 bytes, packets;
5156 unsigned int start;
5157 u32 mpc;
5158 int i;
5159
5160 /* Prevent stats update while adapter is being reset, or if the pci
5161 * connection is down.
5162 */
5163 if (adapter->link_speed == 0)
5164 return;
5165 if (pci_channel_offline(pdev))
5166 return;
5167
5168 packets = 0;
5169 bytes = 0;
5170
5171 rcu_read_lock();
5172 for (i = 0; i < adapter->num_rx_queues; i++) {
5173 struct igc_ring *ring = adapter->rx_ring[i];
5174 u32 rqdpc = rd32(IGC_RQDPC(i));
5175
5176 if (hw->mac.type >= igc_i225)
5177 wr32(IGC_RQDPC(i), 0);
5178
5179 if (rqdpc) {
5180 ring->rx_stats.drops += rqdpc;
5181 net_stats->rx_fifo_errors += rqdpc;
5182 }
5183
5184 do {
5185 start = u64_stats_fetch_begin(&ring->rx_syncp);
5186 _bytes = ring->rx_stats.bytes;
5187 _packets = ring->rx_stats.packets;
5188 } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
5189 bytes += _bytes;
5190 packets += _packets;
5191 }
5192
5193 net_stats->rx_bytes = bytes;
5194 net_stats->rx_packets = packets;
5195
5196 packets = 0;
5197 bytes = 0;
5198 for (i = 0; i < adapter->num_tx_queues; i++) {
5199 struct igc_ring *ring = adapter->tx_ring[i];
5200
5201 do {
5202 start = u64_stats_fetch_begin(&ring->tx_syncp);
5203 _bytes = ring->tx_stats.bytes;
5204 _packets = ring->tx_stats.packets;
5205 } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
5206 bytes += _bytes;
5207 packets += _packets;
5208 }
5209 net_stats->tx_bytes = bytes;
5210 net_stats->tx_packets = packets;
5211 rcu_read_unlock();
5212
5213 /* read stats registers */
5214 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
5215 adapter->stats.gprc += rd32(IGC_GPRC);
5216 adapter->stats.gorc += rd32(IGC_GORCL);
5217 rd32(IGC_GORCH); /* clear GORCL */
5218 adapter->stats.bprc += rd32(IGC_BPRC);
5219 adapter->stats.mprc += rd32(IGC_MPRC);
5220 adapter->stats.roc += rd32(IGC_ROC);
5221
5222 adapter->stats.prc64 += rd32(IGC_PRC64);
5223 adapter->stats.prc127 += rd32(IGC_PRC127);
5224 adapter->stats.prc255 += rd32(IGC_PRC255);
5225 adapter->stats.prc511 += rd32(IGC_PRC511);
5226 adapter->stats.prc1023 += rd32(IGC_PRC1023);
5227 adapter->stats.prc1522 += rd32(IGC_PRC1522);
5228 adapter->stats.tlpic += rd32(IGC_TLPIC);
5229 adapter->stats.rlpic += rd32(IGC_RLPIC);
5230 adapter->stats.hgptc += rd32(IGC_HGPTC);
5231
5232 mpc = rd32(IGC_MPC);
5233 adapter->stats.mpc += mpc;
5234 net_stats->rx_fifo_errors += mpc;
5235 adapter->stats.scc += rd32(IGC_SCC);
5236 adapter->stats.ecol += rd32(IGC_ECOL);
5237 adapter->stats.mcc += rd32(IGC_MCC);
5238 adapter->stats.latecol += rd32(IGC_LATECOL);
5239 adapter->stats.dc += rd32(IGC_DC);
5240 adapter->stats.rlec += rd32(IGC_RLEC);
5241 adapter->stats.xonrxc += rd32(IGC_XONRXC);
5242 adapter->stats.xontxc += rd32(IGC_XONTXC);
5243 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
5244 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
5245 adapter->stats.fcruc += rd32(IGC_FCRUC);
5246 adapter->stats.gptc += rd32(IGC_GPTC);
5247 adapter->stats.gotc += rd32(IGC_GOTCL);
5248 rd32(IGC_GOTCH); /* clear GOTCL */
5249 adapter->stats.rnbc += rd32(IGC_RNBC);
5250 adapter->stats.ruc += rd32(IGC_RUC);
5251 adapter->stats.rfc += rd32(IGC_RFC);
5252 adapter->stats.rjc += rd32(IGC_RJC);
5253 adapter->stats.tor += rd32(IGC_TORH);
5254 adapter->stats.tot += rd32(IGC_TOTH);
5255 adapter->stats.tpr += rd32(IGC_TPR);
5256
5257 adapter->stats.ptc64 += rd32(IGC_PTC64);
5258 adapter->stats.ptc127 += rd32(IGC_PTC127);
5259 adapter->stats.ptc255 += rd32(IGC_PTC255);
5260 adapter->stats.ptc511 += rd32(IGC_PTC511);
5261 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
5262 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
5263
5264 adapter->stats.mptc += rd32(IGC_MPTC);
5265 adapter->stats.bptc += rd32(IGC_BPTC);
5266
5267 adapter->stats.tpt += rd32(IGC_TPT);
5268 adapter->stats.colc += rd32(IGC_COLC);
5269 adapter->stats.colc += rd32(IGC_RERC);
5270
5271 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
5272
5273 adapter->stats.tsctc += rd32(IGC_TSCTC);
5274
5275 adapter->stats.iac += rd32(IGC_IAC);
5276
5277 /* Fill out the OS statistics structure */
5278 net_stats->multicast = adapter->stats.mprc;
5279 net_stats->collisions = adapter->stats.colc;
5280
5281 /* Rx Errors */
5282
5283 /* RLEC on some newer hardware can be incorrect so build
5284 * our own version based on RUC and ROC
5285 */
5286 net_stats->rx_errors = adapter->stats.rxerrc +
5287 adapter->stats.crcerrs + adapter->stats.algnerrc +
5288 adapter->stats.ruc + adapter->stats.roc +
5289 adapter->stats.cexterr;
5290 net_stats->rx_length_errors = adapter->stats.ruc +
5291 adapter->stats.roc;
5292 net_stats->rx_crc_errors = adapter->stats.crcerrs;
5293 net_stats->rx_frame_errors = adapter->stats.algnerrc;
5294 net_stats->rx_missed_errors = adapter->stats.mpc;
5295
5296 /* Tx Errors */
5297 net_stats->tx_errors = adapter->stats.ecol +
5298 adapter->stats.latecol;
5299 net_stats->tx_aborted_errors = adapter->stats.ecol;
5300 net_stats->tx_window_errors = adapter->stats.latecol;
5301 net_stats->tx_carrier_errors = adapter->stats.tncrs;
5302
5303 /* Tx Dropped */
5304 net_stats->tx_dropped = adapter->stats.txdrop;
5305
5306 /* Management Stats */
5307 adapter->stats.mgptc += rd32(IGC_MGTPTC);
5308 adapter->stats.mgprc += rd32(IGC_MGTPRC);
5309 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
5310 }
5311
5312 /**
5313 * igc_down - Close the interface
5314 * @adapter: board private structure
5315 */
igc_down(struct igc_adapter * adapter)5316 void igc_down(struct igc_adapter *adapter)
5317 {
5318 struct net_device *netdev = adapter->netdev;
5319 struct igc_hw *hw = &adapter->hw;
5320 u32 tctl, rctl;
5321 int i = 0;
5322
5323 set_bit(__IGC_DOWN, &adapter->state);
5324
5325 igc_ptp_suspend(adapter);
5326
5327 if (pci_device_is_present(adapter->pdev)) {
5328 /* disable receives in the hardware */
5329 rctl = rd32(IGC_RCTL);
5330 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
5331 /* flush and sleep below */
5332 }
5333 /* set trans_start so we don't get spurious watchdogs during reset */
5334 netif_trans_update(netdev);
5335
5336 netif_carrier_off(netdev);
5337 netif_tx_stop_all_queues(netdev);
5338
5339 if (pci_device_is_present(adapter->pdev)) {
5340 /* disable transmits in the hardware */
5341 tctl = rd32(IGC_TCTL);
5342 tctl &= ~IGC_TCTL_EN;
5343 wr32(IGC_TCTL, tctl);
5344 /* flush both disables and wait for them to finish */
5345 wrfl();
5346 usleep_range(10000, 20000);
5347
5348 igc_irq_disable(adapter);
5349 }
5350
5351 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5352
5353 for (i = 0; i < adapter->num_q_vectors; i++) {
5354 if (adapter->q_vector[i]) {
5355 napi_synchronize(&adapter->q_vector[i]->napi);
5356 igc_set_queue_napi(adapter, i, NULL);
5357 napi_disable(&adapter->q_vector[i]->napi);
5358 }
5359 }
5360
5361 timer_delete_sync(&adapter->watchdog_timer);
5362 timer_delete_sync(&adapter->phy_info_timer);
5363
5364 /* record the stats before reset*/
5365 spin_lock(&adapter->stats64_lock);
5366 igc_update_stats(adapter);
5367 spin_unlock(&adapter->stats64_lock);
5368
5369 adapter->link_speed = 0;
5370 adapter->link_duplex = 0;
5371
5372 if (!pci_channel_offline(adapter->pdev))
5373 igc_reset(adapter);
5374
5375 /* clear VLAN promisc flag so VFTA will be updated if necessary */
5376 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
5377
5378 igc_disable_all_tx_rings_hw(adapter);
5379 igc_clean_all_tx_rings(adapter);
5380 igc_clean_all_rx_rings(adapter);
5381
5382 if (adapter->fpe.mmsv.pmac_enabled)
5383 ethtool_mmsv_stop(&adapter->fpe.mmsv);
5384 }
5385
igc_reinit_locked(struct igc_adapter * adapter)5386 void igc_reinit_locked(struct igc_adapter *adapter)
5387 {
5388 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5389 usleep_range(1000, 2000);
5390 igc_down(adapter);
5391 igc_up(adapter);
5392 clear_bit(__IGC_RESETTING, &adapter->state);
5393 }
5394
igc_reset_task(struct work_struct * work)5395 static void igc_reset_task(struct work_struct *work)
5396 {
5397 struct igc_adapter *adapter;
5398
5399 adapter = container_of(work, struct igc_adapter, reset_task);
5400
5401 rtnl_lock();
5402 /* If we're already down or resetting, just bail */
5403 if (test_bit(__IGC_DOWN, &adapter->state) ||
5404 test_bit(__IGC_RESETTING, &adapter->state)) {
5405 rtnl_unlock();
5406 return;
5407 }
5408
5409 igc_rings_dump(adapter);
5410 igc_regs_dump(adapter);
5411 netdev_err(adapter->netdev, "Reset adapter\n");
5412 igc_reinit_locked(adapter);
5413 rtnl_unlock();
5414 }
5415
5416 /**
5417 * igc_change_mtu - Change the Maximum Transfer Unit
5418 * @netdev: network interface device structure
5419 * @new_mtu: new value for maximum frame size
5420 *
5421 * Returns 0 on success, negative on failure
5422 */
igc_change_mtu(struct net_device * netdev,int new_mtu)5423 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
5424 {
5425 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5426 struct igc_adapter *adapter = netdev_priv(netdev);
5427
5428 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
5429 netdev_dbg(netdev, "Jumbo frames not supported with XDP");
5430 return -EINVAL;
5431 }
5432
5433 /* adjust max frame to be at least the size of a standard frame */
5434 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5435 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5436
5437 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5438 usleep_range(1000, 2000);
5439
5440 /* igc_down has a dependency on max_frame_size */
5441 adapter->max_frame_size = max_frame;
5442
5443 if (netif_running(netdev))
5444 igc_down(adapter);
5445
5446 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5447 WRITE_ONCE(netdev->mtu, new_mtu);
5448
5449 if (netif_running(netdev))
5450 igc_up(adapter);
5451 else
5452 igc_reset(adapter);
5453
5454 clear_bit(__IGC_RESETTING, &adapter->state);
5455
5456 return 0;
5457 }
5458
5459 /**
5460 * igc_tx_timeout - Respond to a Tx Hang
5461 * @netdev: network interface device structure
5462 * @txqueue: queue number that timed out
5463 **/
igc_tx_timeout(struct net_device * netdev,unsigned int __always_unused txqueue)5464 static void igc_tx_timeout(struct net_device *netdev,
5465 unsigned int __always_unused txqueue)
5466 {
5467 struct igc_adapter *adapter = netdev_priv(netdev);
5468 struct igc_hw *hw = &adapter->hw;
5469
5470 /* Do the reset outside of interrupt context */
5471 adapter->tx_timeout_count++;
5472 schedule_work(&adapter->reset_task);
5473 wr32(IGC_EICS,
5474 (adapter->eims_enable_mask & ~adapter->eims_other));
5475 }
5476
5477 /**
5478 * igc_get_stats64 - Get System Network Statistics
5479 * @netdev: network interface device structure
5480 * @stats: rtnl_link_stats64 pointer
5481 *
5482 * Returns the address of the device statistics structure.
5483 * The statistics are updated here and also from the timer callback.
5484 */
igc_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)5485 static void igc_get_stats64(struct net_device *netdev,
5486 struct rtnl_link_stats64 *stats)
5487 {
5488 struct igc_adapter *adapter = netdev_priv(netdev);
5489
5490 spin_lock(&adapter->stats64_lock);
5491 if (!test_bit(__IGC_RESETTING, &adapter->state))
5492 igc_update_stats(adapter);
5493 memcpy(stats, &adapter->stats64, sizeof(*stats));
5494 spin_unlock(&adapter->stats64_lock);
5495 }
5496
igc_fix_features(struct net_device * netdev,netdev_features_t features)5497 static netdev_features_t igc_fix_features(struct net_device *netdev,
5498 netdev_features_t features)
5499 {
5500 /* Since there is no support for separate Rx/Tx vlan accel
5501 * enable/disable make sure Tx flag is always in same state as Rx.
5502 */
5503 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5504 features |= NETIF_F_HW_VLAN_CTAG_TX;
5505 else
5506 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
5507
5508 return features;
5509 }
5510
igc_set_features(struct net_device * netdev,netdev_features_t features)5511 static int igc_set_features(struct net_device *netdev,
5512 netdev_features_t features)
5513 {
5514 netdev_features_t changed = netdev->features ^ features;
5515 struct igc_adapter *adapter = netdev_priv(netdev);
5516
5517 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
5518 igc_vlan_mode(netdev, features);
5519
5520 /* Add VLAN support */
5521 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
5522 return 0;
5523
5524 if (!(features & NETIF_F_NTUPLE))
5525 igc_flush_nfc_rules(adapter);
5526
5527 netdev->features = features;
5528
5529 if (netif_running(netdev))
5530 igc_reinit_locked(adapter);
5531 else
5532 igc_reset(adapter);
5533
5534 return 1;
5535 }
5536
5537 static netdev_features_t
igc_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)5538 igc_features_check(struct sk_buff *skb, struct net_device *dev,
5539 netdev_features_t features)
5540 {
5541 unsigned int network_hdr_len, mac_hdr_len;
5542
5543 /* Make certain the headers can be described by a context descriptor */
5544 mac_hdr_len = skb_network_offset(skb);
5545 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
5546 return features & ~(NETIF_F_HW_CSUM |
5547 NETIF_F_SCTP_CRC |
5548 NETIF_F_HW_VLAN_CTAG_TX |
5549 NETIF_F_TSO |
5550 NETIF_F_TSO6);
5551
5552 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
5553 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
5554 return features & ~(NETIF_F_HW_CSUM |
5555 NETIF_F_SCTP_CRC |
5556 NETIF_F_TSO |
5557 NETIF_F_TSO6);
5558
5559 /* We can only support IPv4 TSO in tunnels if we can mangle the
5560 * inner IP ID field, so strip TSO if MANGLEID is not supported.
5561 */
5562 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
5563 features &= ~NETIF_F_TSO;
5564
5565 return features;
5566 }
5567
igc_tsync_interrupt(struct igc_adapter * adapter)5568 static void igc_tsync_interrupt(struct igc_adapter *adapter)
5569 {
5570 struct igc_hw *hw = &adapter->hw;
5571 u32 tsauxc, sec, nsec, tsicr;
5572 struct ptp_clock_event event;
5573 struct timespec64 ts;
5574
5575 tsicr = rd32(IGC_TSICR);
5576
5577 if (tsicr & IGC_TSICR_SYS_WRAP) {
5578 event.type = PTP_CLOCK_PPS;
5579 if (adapter->ptp_caps.pps)
5580 ptp_clock_event(adapter->ptp_clock, &event);
5581 }
5582
5583 if (tsicr & IGC_TSICR_TXTS) {
5584 /* retrieve hardware timestamp */
5585 igc_ptp_tx_tstamp_event(adapter);
5586 }
5587
5588 if (tsicr & IGC_TSICR_TT0) {
5589 spin_lock(&adapter->tmreg_lock);
5590 ts = timespec64_add(adapter->perout[0].start,
5591 adapter->perout[0].period);
5592 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5593 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
5594 tsauxc = rd32(IGC_TSAUXC);
5595 tsauxc |= IGC_TSAUXC_EN_TT0;
5596 wr32(IGC_TSAUXC, tsauxc);
5597 adapter->perout[0].start = ts;
5598 spin_unlock(&adapter->tmreg_lock);
5599 }
5600
5601 if (tsicr & IGC_TSICR_TT1) {
5602 spin_lock(&adapter->tmreg_lock);
5603 ts = timespec64_add(adapter->perout[1].start,
5604 adapter->perout[1].period);
5605 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5606 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
5607 tsauxc = rd32(IGC_TSAUXC);
5608 tsauxc |= IGC_TSAUXC_EN_TT1;
5609 wr32(IGC_TSAUXC, tsauxc);
5610 adapter->perout[1].start = ts;
5611 spin_unlock(&adapter->tmreg_lock);
5612 }
5613
5614 if (tsicr & IGC_TSICR_AUTT0) {
5615 nsec = rd32(IGC_AUXSTMPL0);
5616 sec = rd32(IGC_AUXSTMPH0);
5617 event.type = PTP_CLOCK_EXTTS;
5618 event.index = 0;
5619 event.timestamp = sec * NSEC_PER_SEC + nsec;
5620 ptp_clock_event(adapter->ptp_clock, &event);
5621 }
5622
5623 if (tsicr & IGC_TSICR_AUTT1) {
5624 nsec = rd32(IGC_AUXSTMPL1);
5625 sec = rd32(IGC_AUXSTMPH1);
5626 event.type = PTP_CLOCK_EXTTS;
5627 event.index = 1;
5628 event.timestamp = sec * NSEC_PER_SEC + nsec;
5629 ptp_clock_event(adapter->ptp_clock, &event);
5630 }
5631 }
5632
5633 /**
5634 * igc_msix_other - msix other interrupt handler
5635 * @irq: interrupt number
5636 * @data: pointer to a q_vector
5637 */
igc_msix_other(int irq,void * data)5638 static irqreturn_t igc_msix_other(int irq, void *data)
5639 {
5640 struct igc_adapter *adapter = data;
5641 struct igc_hw *hw = &adapter->hw;
5642 u32 icr = rd32(IGC_ICR);
5643
5644 /* reading ICR causes bit 31 of EICR to be cleared */
5645 if (icr & IGC_ICR_DRSTA)
5646 schedule_work(&adapter->reset_task);
5647
5648 if (icr & IGC_ICR_DOUTSYNC) {
5649 /* HW is reporting DMA is out of sync */
5650 adapter->stats.doosync++;
5651 }
5652
5653 if (icr & IGC_ICR_LSC) {
5654 hw->mac.get_link_status = true;
5655 /* guard against interrupt when we're going down */
5656 if (!test_bit(__IGC_DOWN, &adapter->state))
5657 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5658 }
5659
5660 if (icr & IGC_ICR_TS)
5661 igc_tsync_interrupt(adapter);
5662
5663 wr32(IGC_EIMS, adapter->eims_other);
5664
5665 return IRQ_HANDLED;
5666 }
5667
igc_write_itr(struct igc_q_vector * q_vector)5668 static void igc_write_itr(struct igc_q_vector *q_vector)
5669 {
5670 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
5671
5672 if (!q_vector->set_itr)
5673 return;
5674
5675 if (!itr_val)
5676 itr_val = IGC_ITR_VAL_MASK;
5677
5678 itr_val |= IGC_EITR_CNT_IGNR;
5679
5680 writel(itr_val, q_vector->itr_register);
5681 q_vector->set_itr = 0;
5682 }
5683
igc_msix_ring(int irq,void * data)5684 static irqreturn_t igc_msix_ring(int irq, void *data)
5685 {
5686 struct igc_q_vector *q_vector = data;
5687
5688 /* Write the ITR value calculated from the previous interrupt. */
5689 igc_write_itr(q_vector);
5690
5691 napi_schedule(&q_vector->napi);
5692
5693 return IRQ_HANDLED;
5694 }
5695
5696 /**
5697 * igc_request_msix - Initialize MSI-X interrupts
5698 * @adapter: Pointer to adapter structure
5699 *
5700 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5701 * kernel.
5702 */
igc_request_msix(struct igc_adapter * adapter)5703 static int igc_request_msix(struct igc_adapter *adapter)
5704 {
5705 unsigned int num_q_vectors = adapter->num_q_vectors;
5706 int i = 0, err = 0, vector = 0, free_vector = 0;
5707 struct net_device *netdev = adapter->netdev;
5708
5709 err = request_irq(adapter->msix_entries[vector].vector,
5710 &igc_msix_other, 0, netdev->name, adapter);
5711 if (err)
5712 goto err_out;
5713
5714 if (num_q_vectors > MAX_Q_VECTORS) {
5715 num_q_vectors = MAX_Q_VECTORS;
5716 dev_warn(&adapter->pdev->dev,
5717 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
5718 adapter->num_q_vectors, MAX_Q_VECTORS);
5719 }
5720 for (i = 0; i < num_q_vectors; i++) {
5721 struct igc_q_vector *q_vector = adapter->q_vector[i];
5722
5723 vector++;
5724
5725 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
5726
5727 if (q_vector->rx.ring && q_vector->tx.ring)
5728 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
5729 q_vector->rx.ring->queue_index);
5730 else if (q_vector->tx.ring)
5731 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
5732 q_vector->tx.ring->queue_index);
5733 else if (q_vector->rx.ring)
5734 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
5735 q_vector->rx.ring->queue_index);
5736 else
5737 sprintf(q_vector->name, "%s-unused", netdev->name);
5738
5739 err = request_irq(adapter->msix_entries[vector].vector,
5740 igc_msix_ring, 0, q_vector->name,
5741 q_vector);
5742 if (err)
5743 goto err_free;
5744
5745 netif_napi_set_irq(&q_vector->napi,
5746 adapter->msix_entries[vector].vector);
5747 }
5748
5749 igc_configure_msix(adapter);
5750 return 0;
5751
5752 err_free:
5753 /* free already assigned IRQs */
5754 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
5755
5756 vector--;
5757 for (i = 0; i < vector; i++) {
5758 free_irq(adapter->msix_entries[free_vector++].vector,
5759 adapter->q_vector[i]);
5760 }
5761 err_out:
5762 return err;
5763 }
5764
5765 /**
5766 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5767 * @adapter: Pointer to adapter structure
5768 *
5769 * This function resets the device so that it has 0 rx queues, tx queues, and
5770 * MSI-X interrupts allocated.
5771 */
igc_clear_interrupt_scheme(struct igc_adapter * adapter)5772 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
5773 {
5774 igc_free_q_vectors(adapter);
5775 igc_reset_interrupt_capability(adapter);
5776 }
5777
5778 /* Need to wait a few seconds after link up to get diagnostic information from
5779 * the phy
5780 */
igc_update_phy_info(struct timer_list * t)5781 static void igc_update_phy_info(struct timer_list *t)
5782 {
5783 struct igc_adapter *adapter = timer_container_of(adapter, t,
5784 phy_info_timer);
5785
5786 igc_get_phy_info(&adapter->hw);
5787 }
5788
5789 /**
5790 * igc_has_link - check shared code for link and determine up/down
5791 * @adapter: pointer to driver private info
5792 */
igc_has_link(struct igc_adapter * adapter)5793 bool igc_has_link(struct igc_adapter *adapter)
5794 {
5795 struct igc_hw *hw = &adapter->hw;
5796 bool link_active = false;
5797
5798 /* get_link_status is set on LSC (link status) interrupt or
5799 * rx sequence error interrupt. get_link_status will stay
5800 * false until the igc_check_for_link establishes link
5801 * for copper adapters ONLY
5802 */
5803 if (!hw->mac.get_link_status)
5804 return true;
5805 hw->mac.ops.check_for_link(hw);
5806 link_active = !hw->mac.get_link_status;
5807
5808 if (hw->mac.type == igc_i225) {
5809 if (!netif_carrier_ok(adapter->netdev)) {
5810 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5811 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
5812 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
5813 adapter->link_check_timeout = jiffies;
5814 }
5815 }
5816
5817 return link_active;
5818 }
5819
5820 /**
5821 * igc_watchdog - Timer Call-back
5822 * @t: timer for the watchdog
5823 */
igc_watchdog(struct timer_list * t)5824 static void igc_watchdog(struct timer_list *t)
5825 {
5826 struct igc_adapter *adapter = timer_container_of(adapter, t,
5827 watchdog_timer);
5828 /* Do the rest outside of interrupt context */
5829 schedule_work(&adapter->watchdog_task);
5830 }
5831
igc_watchdog_task(struct work_struct * work)5832 static void igc_watchdog_task(struct work_struct *work)
5833 {
5834 struct igc_adapter *adapter = container_of(work,
5835 struct igc_adapter,
5836 watchdog_task);
5837 struct net_device *netdev = adapter->netdev;
5838 struct igc_hw *hw = &adapter->hw;
5839 struct igc_phy_info *phy = &hw->phy;
5840 u16 phy_data, retry_count = 20;
5841 u32 link;
5842 int i;
5843
5844 link = igc_has_link(adapter);
5845
5846 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
5847 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5848 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5849 else
5850 link = false;
5851 }
5852
5853 if (link) {
5854 /* Cancel scheduled suspend requests. */
5855 pm_runtime_resume(netdev->dev.parent);
5856
5857 if (!netif_carrier_ok(netdev)) {
5858 u32 ctrl;
5859
5860 hw->mac.ops.get_speed_and_duplex(hw,
5861 &adapter->link_speed,
5862 &adapter->link_duplex);
5863
5864 ctrl = rd32(IGC_CTRL);
5865 /* Link status message must follow this format */
5866 netdev_info(netdev,
5867 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5868 adapter->link_speed,
5869 adapter->link_duplex == FULL_DUPLEX ?
5870 "Full" : "Half",
5871 (ctrl & IGC_CTRL_TFCE) &&
5872 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
5873 (ctrl & IGC_CTRL_RFCE) ? "RX" :
5874 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
5875
5876 /* disable EEE if enabled */
5877 if ((adapter->flags & IGC_FLAG_EEE) &&
5878 adapter->link_duplex == HALF_DUPLEX) {
5879 netdev_info(netdev,
5880 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
5881 adapter->hw.dev_spec._base.eee_enable = false;
5882 adapter->flags &= ~IGC_FLAG_EEE;
5883 }
5884
5885 /* check if SmartSpeed worked */
5886 igc_check_downshift(hw);
5887 if (phy->speed_downgraded)
5888 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5889
5890 /* adjust timeout factor according to speed/duplex */
5891 adapter->tx_timeout_factor = 1;
5892 switch (adapter->link_speed) {
5893 case SPEED_10:
5894 adapter->tx_timeout_factor = 14;
5895 break;
5896 case SPEED_100:
5897 case SPEED_1000:
5898 case SPEED_2500:
5899 adapter->tx_timeout_factor = 1;
5900 break;
5901 }
5902
5903 /* Once the launch time has been set on the wire, there
5904 * is a delay before the link speed can be determined
5905 * based on link-up activity. Write into the register
5906 * as soon as we know the correct link speed.
5907 */
5908 igc_tsn_adjust_txtime_offset(adapter);
5909
5910 if (adapter->fpe.mmsv.pmac_enabled)
5911 ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
5912 true);
5913
5914 if (adapter->link_speed != SPEED_1000)
5915 goto no_wait;
5916
5917 /* wait for Remote receiver status OK */
5918 retry_read_status:
5919 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
5920 &phy_data)) {
5921 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5922 retry_count) {
5923 msleep(100);
5924 retry_count--;
5925 goto retry_read_status;
5926 } else if (!retry_count) {
5927 netdev_err(netdev, "exceed max 2 second\n");
5928 }
5929 } else {
5930 netdev_err(netdev, "read 1000Base-T Status Reg\n");
5931 }
5932 no_wait:
5933 netif_carrier_on(netdev);
5934
5935 /* link state has changed, schedule phy info update */
5936 if (!test_bit(__IGC_DOWN, &adapter->state))
5937 mod_timer(&adapter->phy_info_timer,
5938 round_jiffies(jiffies + 2 * HZ));
5939 }
5940 } else {
5941 if (netif_carrier_ok(netdev)) {
5942 adapter->link_speed = 0;
5943 adapter->link_duplex = 0;
5944
5945 /* Links status message must follow this format */
5946 netdev_info(netdev, "NIC Link is Down\n");
5947 netif_carrier_off(netdev);
5948
5949 if (adapter->fpe.mmsv.pmac_enabled)
5950 ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
5951 false);
5952
5953 /* link state has changed, schedule phy info update */
5954 if (!test_bit(__IGC_DOWN, &adapter->state))
5955 mod_timer(&adapter->phy_info_timer,
5956 round_jiffies(jiffies + 2 * HZ));
5957
5958 pm_schedule_suspend(netdev->dev.parent,
5959 MSEC_PER_SEC * 5);
5960 }
5961 }
5962
5963 spin_lock(&adapter->stats64_lock);
5964 igc_update_stats(adapter);
5965 spin_unlock(&adapter->stats64_lock);
5966
5967 for (i = 0; i < adapter->num_tx_queues; i++) {
5968 struct igc_ring *tx_ring = adapter->tx_ring[i];
5969
5970 if (!netif_carrier_ok(netdev)) {
5971 /* We've lost link, so the controller stops DMA,
5972 * but we've got queued Tx work that's never going
5973 * to get done, so reset controller to flush Tx.
5974 * (Do the reset outside of interrupt context).
5975 */
5976 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5977 adapter->tx_timeout_count++;
5978 schedule_work(&adapter->reset_task);
5979 /* return immediately since reset is imminent */
5980 return;
5981 }
5982 }
5983
5984 /* Force detection of hung controller every watchdog period */
5985 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5986 }
5987
5988 /* Cause software interrupt to ensure Rx ring is cleaned */
5989 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5990 u32 eics = 0;
5991
5992 for (i = 0; i < adapter->num_q_vectors; i++) {
5993 struct igc_q_vector *q_vector = adapter->q_vector[i];
5994 struct igc_ring *rx_ring;
5995
5996 if (!q_vector->rx.ring)
5997 continue;
5998
5999 rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
6000
6001 if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
6002 eics |= q_vector->eims_value;
6003 clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
6004 }
6005 }
6006 if (eics)
6007 wr32(IGC_EICS, eics);
6008 } else {
6009 struct igc_ring *rx_ring = adapter->rx_ring[0];
6010
6011 if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
6012 clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
6013 wr32(IGC_ICS, IGC_ICS_RXDMT0);
6014 }
6015 }
6016
6017 igc_ptp_tx_hang(adapter);
6018
6019 /* Reset the timer */
6020 if (!test_bit(__IGC_DOWN, &adapter->state)) {
6021 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
6022 mod_timer(&adapter->watchdog_timer,
6023 round_jiffies(jiffies + HZ));
6024 else
6025 mod_timer(&adapter->watchdog_timer,
6026 round_jiffies(jiffies + 2 * HZ));
6027 }
6028 }
6029
6030 /**
6031 * igc_intr_msi - Interrupt Handler
6032 * @irq: interrupt number
6033 * @data: pointer to a network interface device structure
6034 */
igc_intr_msi(int irq,void * data)6035 static irqreturn_t igc_intr_msi(int irq, void *data)
6036 {
6037 struct igc_adapter *adapter = data;
6038 struct igc_q_vector *q_vector = adapter->q_vector[0];
6039 struct igc_hw *hw = &adapter->hw;
6040 /* read ICR disables interrupts using IAM */
6041 u32 icr = rd32(IGC_ICR);
6042
6043 igc_write_itr(q_vector);
6044
6045 if (icr & IGC_ICR_DRSTA)
6046 schedule_work(&adapter->reset_task);
6047
6048 if (icr & IGC_ICR_DOUTSYNC) {
6049 /* HW is reporting DMA is out of sync */
6050 adapter->stats.doosync++;
6051 }
6052
6053 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
6054 hw->mac.get_link_status = true;
6055 if (!test_bit(__IGC_DOWN, &adapter->state))
6056 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6057 }
6058
6059 if (icr & IGC_ICR_TS)
6060 igc_tsync_interrupt(adapter);
6061
6062 napi_schedule(&q_vector->napi);
6063
6064 return IRQ_HANDLED;
6065 }
6066
6067 /**
6068 * igc_intr - Legacy Interrupt Handler
6069 * @irq: interrupt number
6070 * @data: pointer to a network interface device structure
6071 */
igc_intr(int irq,void * data)6072 static irqreturn_t igc_intr(int irq, void *data)
6073 {
6074 struct igc_adapter *adapter = data;
6075 struct igc_q_vector *q_vector = adapter->q_vector[0];
6076 struct igc_hw *hw = &adapter->hw;
6077 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
6078 * need for the IMC write
6079 */
6080 u32 icr = rd32(IGC_ICR);
6081
6082 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
6083 * not set, then the adapter didn't send an interrupt
6084 */
6085 if (!(icr & IGC_ICR_INT_ASSERTED))
6086 return IRQ_NONE;
6087
6088 igc_write_itr(q_vector);
6089
6090 if (icr & IGC_ICR_DRSTA)
6091 schedule_work(&adapter->reset_task);
6092
6093 if (icr & IGC_ICR_DOUTSYNC) {
6094 /* HW is reporting DMA is out of sync */
6095 adapter->stats.doosync++;
6096 }
6097
6098 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
6099 hw->mac.get_link_status = true;
6100 /* guard against interrupt when we're going down */
6101 if (!test_bit(__IGC_DOWN, &adapter->state))
6102 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6103 }
6104
6105 if (icr & IGC_ICR_TS)
6106 igc_tsync_interrupt(adapter);
6107
6108 napi_schedule(&q_vector->napi);
6109
6110 return IRQ_HANDLED;
6111 }
6112
igc_free_irq(struct igc_adapter * adapter)6113 static void igc_free_irq(struct igc_adapter *adapter)
6114 {
6115 if (adapter->msix_entries) {
6116 int vector = 0, i;
6117
6118 free_irq(adapter->msix_entries[vector++].vector, adapter);
6119
6120 for (i = 0; i < adapter->num_q_vectors; i++)
6121 free_irq(adapter->msix_entries[vector++].vector,
6122 adapter->q_vector[i]);
6123 } else {
6124 free_irq(adapter->pdev->irq, adapter);
6125 }
6126 }
6127
6128 /**
6129 * igc_request_irq - initialize interrupts
6130 * @adapter: Pointer to adapter structure
6131 *
6132 * Attempts to configure interrupts using the best available
6133 * capabilities of the hardware and kernel.
6134 */
igc_request_irq(struct igc_adapter * adapter)6135 static int igc_request_irq(struct igc_adapter *adapter)
6136 {
6137 struct net_device *netdev = adapter->netdev;
6138 struct pci_dev *pdev = adapter->pdev;
6139 int err = 0;
6140
6141 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
6142 err = igc_request_msix(adapter);
6143 if (!err)
6144 goto request_done;
6145 /* fall back to MSI */
6146 igc_free_all_tx_resources(adapter);
6147 igc_free_all_rx_resources(adapter);
6148
6149 igc_clear_interrupt_scheme(adapter);
6150 err = igc_init_interrupt_scheme(adapter, false);
6151 if (err)
6152 goto request_done;
6153 igc_setup_all_tx_resources(adapter);
6154 igc_setup_all_rx_resources(adapter);
6155 igc_configure(adapter);
6156 }
6157
6158 igc_assign_vector(adapter->q_vector[0], 0);
6159
6160 if (adapter->flags & IGC_FLAG_HAS_MSI) {
6161 err = request_irq(pdev->irq, &igc_intr_msi, 0,
6162 netdev->name, adapter);
6163 if (!err)
6164 goto request_done;
6165
6166 /* fall back to legacy interrupts */
6167 igc_reset_interrupt_capability(adapter);
6168 adapter->flags &= ~IGC_FLAG_HAS_MSI;
6169 }
6170
6171 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
6172 netdev->name, adapter);
6173
6174 if (err)
6175 netdev_err(netdev, "Error %d getting interrupt\n", err);
6176
6177 request_done:
6178 return err;
6179 }
6180
6181 /**
6182 * __igc_open - Called when a network interface is made active
6183 * @netdev: network interface device structure
6184 * @resuming: boolean indicating if the device is resuming
6185 *
6186 * Returns 0 on success, negative value on failure
6187 *
6188 * The open entry point is called when a network interface is made
6189 * active by the system (IFF_UP). At this point all resources needed
6190 * for transmit and receive operations are allocated, the interrupt
6191 * handler is registered with the OS, the watchdog timer is started,
6192 * and the stack is notified that the interface is ready.
6193 */
__igc_open(struct net_device * netdev,bool resuming)6194 static int __igc_open(struct net_device *netdev, bool resuming)
6195 {
6196 struct igc_adapter *adapter = netdev_priv(netdev);
6197 struct pci_dev *pdev = adapter->pdev;
6198 struct igc_hw *hw = &adapter->hw;
6199 struct napi_struct *napi;
6200 int err = 0;
6201 int i = 0;
6202
6203 /* disallow open during test */
6204
6205 if (test_bit(__IGC_TESTING, &adapter->state)) {
6206 WARN_ON(resuming);
6207 return -EBUSY;
6208 }
6209
6210 if (!resuming)
6211 pm_runtime_get_sync(&pdev->dev);
6212
6213 netif_carrier_off(netdev);
6214
6215 /* allocate transmit descriptors */
6216 err = igc_setup_all_tx_resources(adapter);
6217 if (err)
6218 goto err_setup_tx;
6219
6220 /* allocate receive descriptors */
6221 err = igc_setup_all_rx_resources(adapter);
6222 if (err)
6223 goto err_setup_rx;
6224
6225 igc_power_up_link(adapter);
6226
6227 igc_configure(adapter);
6228
6229 err = igc_request_irq(adapter);
6230 if (err)
6231 goto err_req_irq;
6232
6233 clear_bit(__IGC_DOWN, &adapter->state);
6234
6235 for (i = 0; i < adapter->num_q_vectors; i++) {
6236 napi = &adapter->q_vector[i]->napi;
6237 napi_enable(napi);
6238 igc_set_queue_napi(adapter, i, napi);
6239 }
6240
6241 /* Clear any pending interrupts. */
6242 rd32(IGC_ICR);
6243 igc_irq_enable(adapter);
6244
6245 if (!resuming)
6246 pm_runtime_put(&pdev->dev);
6247
6248 netif_tx_start_all_queues(netdev);
6249
6250 /* start the watchdog. */
6251 hw->mac.get_link_status = true;
6252 schedule_work(&adapter->watchdog_task);
6253
6254 return IGC_SUCCESS;
6255
6256 err_req_irq:
6257 igc_release_hw_control(adapter);
6258 igc_power_down_phy_copper_base(&adapter->hw);
6259 igc_free_all_rx_resources(adapter);
6260 err_setup_rx:
6261 igc_free_all_tx_resources(adapter);
6262 err_setup_tx:
6263 igc_reset(adapter);
6264 if (!resuming)
6265 pm_runtime_put(&pdev->dev);
6266
6267 return err;
6268 }
6269
igc_open(struct net_device * netdev)6270 int igc_open(struct net_device *netdev)
6271 {
6272 struct igc_adapter *adapter = netdev_priv(netdev);
6273 int err;
6274
6275 /* Notify the stack of the actual queue counts. */
6276 err = netif_set_real_num_queues(netdev, adapter->num_tx_queues,
6277 adapter->num_rx_queues);
6278 if (err) {
6279 netdev_err(netdev, "error setting real queue count\n");
6280 return err;
6281 }
6282
6283 return __igc_open(netdev, false);
6284 }
6285
6286 /**
6287 * __igc_close - Disables a network interface
6288 * @netdev: network interface device structure
6289 * @suspending: boolean indicating the device is suspending
6290 *
6291 * Returns 0, this is not allowed to fail
6292 *
6293 * The close entry point is called when an interface is de-activated
6294 * by the OS. The hardware is still under the driver's control, but
6295 * needs to be disabled. A global MAC reset is issued to stop the
6296 * hardware, and all transmit and receive resources are freed.
6297 */
__igc_close(struct net_device * netdev,bool suspending)6298 static int __igc_close(struct net_device *netdev, bool suspending)
6299 {
6300 struct igc_adapter *adapter = netdev_priv(netdev);
6301 struct pci_dev *pdev = adapter->pdev;
6302
6303 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
6304
6305 if (!suspending)
6306 pm_runtime_get_sync(&pdev->dev);
6307
6308 igc_down(adapter);
6309
6310 igc_release_hw_control(adapter);
6311
6312 igc_free_irq(adapter);
6313
6314 igc_free_all_tx_resources(adapter);
6315 igc_free_all_rx_resources(adapter);
6316
6317 if (!suspending)
6318 pm_runtime_put_sync(&pdev->dev);
6319
6320 return 0;
6321 }
6322
igc_close(struct net_device * netdev)6323 int igc_close(struct net_device *netdev)
6324 {
6325 if (netif_device_present(netdev) || netdev->dismantle)
6326 return __igc_close(netdev, false);
6327 return 0;
6328 }
6329
igc_save_launchtime_params(struct igc_adapter * adapter,int queue,bool enable)6330 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
6331 bool enable)
6332 {
6333 struct igc_ring *ring;
6334
6335 if (queue < 0 || queue >= adapter->num_tx_queues)
6336 return -EINVAL;
6337
6338 ring = adapter->tx_ring[queue];
6339 ring->launchtime_enable = enable;
6340
6341 return 0;
6342 }
6343
is_base_time_past(ktime_t base_time,const struct timespec64 * now)6344 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
6345 {
6346 struct timespec64 b;
6347
6348 b = ktime_to_timespec64(base_time);
6349
6350 return timespec64_compare(now, &b) > 0;
6351 }
6352
validate_schedule(struct igc_adapter * adapter,const struct tc_taprio_qopt_offload * qopt)6353 static bool validate_schedule(struct igc_adapter *adapter,
6354 const struct tc_taprio_qopt_offload *qopt)
6355 {
6356 int queue_uses[IGC_MAX_TX_QUEUES] = { };
6357 struct igc_hw *hw = &adapter->hw;
6358 struct timespec64 now;
6359 size_t n;
6360
6361 if (qopt->cycle_time_extension)
6362 return false;
6363
6364 igc_ptp_read(adapter, &now);
6365
6366 /* If we program the controller's BASET registers with a time
6367 * in the future, it will hold all the packets until that
6368 * time, causing a lot of TX Hangs, so to avoid that, we
6369 * reject schedules that would start in the future.
6370 * Note: Limitation above is no longer in i226.
6371 */
6372 if (!is_base_time_past(qopt->base_time, &now) &&
6373 igc_is_device_id_i225(hw))
6374 return false;
6375
6376 for (n = 0; n < qopt->num_entries; n++) {
6377 const struct tc_taprio_sched_entry *e, *prev;
6378 int i;
6379
6380 prev = n ? &qopt->entries[n - 1] : NULL;
6381 e = &qopt->entries[n];
6382
6383 /* i225 only supports "global" frame preemption
6384 * settings.
6385 */
6386 if (e->command != TC_TAPRIO_CMD_SET_GATES)
6387 return false;
6388
6389 for (i = 0; i < adapter->num_tx_queues; i++)
6390 if (e->gate_mask & BIT(i)) {
6391 queue_uses[i]++;
6392
6393 /* There are limitations: A single queue cannot
6394 * be opened and closed multiple times per cycle
6395 * unless the gate stays open. Check for it.
6396 */
6397 if (queue_uses[i] > 1 &&
6398 !(prev->gate_mask & BIT(i)))
6399 return false;
6400 }
6401 }
6402
6403 return true;
6404 }
6405
igc_tsn_enable_launchtime(struct igc_adapter * adapter,struct tc_etf_qopt_offload * qopt)6406 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
6407 struct tc_etf_qopt_offload *qopt)
6408 {
6409 struct igc_hw *hw = &adapter->hw;
6410 int err;
6411
6412 if (hw->mac.type != igc_i225)
6413 return -EOPNOTSUPP;
6414
6415 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
6416 if (err)
6417 return err;
6418
6419 return igc_tsn_offload_apply(adapter);
6420 }
6421
igc_qbv_clear_schedule(struct igc_adapter * adapter)6422 static int igc_qbv_clear_schedule(struct igc_adapter *adapter)
6423 {
6424 unsigned long flags;
6425 int i;
6426
6427 adapter->base_time = 0;
6428 adapter->cycle_time = NSEC_PER_SEC;
6429 adapter->taprio_offload_enable = false;
6430 adapter->qbv_config_change_errors = 0;
6431 adapter->qbv_count = 0;
6432
6433 for (i = 0; i < adapter->num_tx_queues; i++) {
6434 struct igc_ring *ring = adapter->tx_ring[i];
6435
6436 ring->start_time = 0;
6437 ring->end_time = NSEC_PER_SEC;
6438 ring->max_sdu = 0;
6439 ring->preemptible = false;
6440 }
6441
6442 spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
6443
6444 adapter->qbv_transition = false;
6445
6446 for (i = 0; i < adapter->num_tx_queues; i++) {
6447 struct igc_ring *ring = adapter->tx_ring[i];
6448
6449 ring->oper_gate_closed = false;
6450 ring->admin_gate_closed = false;
6451 }
6452
6453 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
6454
6455 return 0;
6456 }
6457
igc_tsn_clear_schedule(struct igc_adapter * adapter)6458 static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
6459 {
6460 igc_qbv_clear_schedule(adapter);
6461
6462 return 0;
6463 }
6464
igc_taprio_stats(struct net_device * dev,struct tc_taprio_qopt_stats * stats)6465 static void igc_taprio_stats(struct net_device *dev,
6466 struct tc_taprio_qopt_stats *stats)
6467 {
6468 /* When Strict_End is enabled, the tx_overruns counter
6469 * will always be zero.
6470 */
6471 stats->tx_overruns = 0;
6472 }
6473
igc_taprio_queue_stats(struct net_device * dev,struct tc_taprio_qopt_queue_stats * queue_stats)6474 static void igc_taprio_queue_stats(struct net_device *dev,
6475 struct tc_taprio_qopt_queue_stats *queue_stats)
6476 {
6477 struct tc_taprio_qopt_stats *stats = &queue_stats->stats;
6478
6479 /* When Strict_End is enabled, the tx_overruns counter
6480 * will always be zero.
6481 */
6482 stats->tx_overruns = 0;
6483 }
6484
igc_save_qbv_schedule(struct igc_adapter * adapter,struct tc_taprio_qopt_offload * qopt)6485 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
6486 struct tc_taprio_qopt_offload *qopt)
6487 {
6488 bool queue_configured[IGC_MAX_TX_QUEUES] = { };
6489 struct igc_hw *hw = &adapter->hw;
6490 u32 start_time = 0, end_time = 0;
6491 struct timespec64 now;
6492 unsigned long flags;
6493 size_t n;
6494 int i;
6495
6496 if (qopt->base_time < 0)
6497 return -ERANGE;
6498
6499 if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable)
6500 return -EALREADY;
6501
6502 if (!validate_schedule(adapter, qopt))
6503 return -EINVAL;
6504
6505 if (qopt->mqprio.preemptible_tcs &&
6506 !(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) {
6507 NL_SET_ERR_MSG_MOD(qopt->extack,
6508 "reverse-tsn-txq-prio private flag must be enabled before setting preemptible tc");
6509 return -ENODEV;
6510 }
6511
6512 igc_ptp_read(adapter, &now);
6513
6514 if (igc_tsn_is_taprio_activated_by_user(adapter) &&
6515 is_base_time_past(qopt->base_time, &now))
6516 adapter->qbv_config_change_errors++;
6517
6518 adapter->cycle_time = qopt->cycle_time;
6519 adapter->base_time = qopt->base_time;
6520 adapter->taprio_offload_enable = true;
6521
6522 for (n = 0; n < qopt->num_entries; n++) {
6523 struct tc_taprio_sched_entry *e = &qopt->entries[n];
6524
6525 end_time += e->interval;
6526
6527 /* If any of the conditions below are true, we need to manually
6528 * control the end time of the cycle.
6529 * 1. Qbv users can specify a cycle time that is not equal
6530 * to the total GCL intervals. Hence, recalculation is
6531 * necessary here to exclude the time interval that
6532 * exceeds the cycle time.
6533 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
6534 * once the end of the list is reached, it will switch
6535 * to the END_OF_CYCLE state and leave the gates in the
6536 * same state until the next cycle is started.
6537 */
6538 if (end_time > adapter->cycle_time ||
6539 n + 1 == qopt->num_entries)
6540 end_time = adapter->cycle_time;
6541
6542 for (i = 0; i < adapter->num_tx_queues; i++) {
6543 struct igc_ring *ring = adapter->tx_ring[i];
6544
6545 if (!(e->gate_mask & BIT(i)))
6546 continue;
6547
6548 /* Check whether a queue stays open for more than one
6549 * entry. If so, keep the start and advance the end
6550 * time.
6551 */
6552 if (!queue_configured[i])
6553 ring->start_time = start_time;
6554 ring->end_time = end_time;
6555
6556 if (ring->start_time >= adapter->cycle_time)
6557 queue_configured[i] = false;
6558 else
6559 queue_configured[i] = true;
6560 }
6561
6562 start_time += e->interval;
6563 }
6564
6565 spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
6566
6567 /* Check whether a queue gets configured.
6568 * If not, set the start and end time to be end time.
6569 */
6570 for (i = 0; i < adapter->num_tx_queues; i++) {
6571 struct igc_ring *ring = adapter->tx_ring[i];
6572
6573 if (!is_base_time_past(qopt->base_time, &now)) {
6574 ring->admin_gate_closed = false;
6575 } else {
6576 ring->oper_gate_closed = false;
6577 ring->admin_gate_closed = false;
6578 }
6579
6580 if (!queue_configured[i]) {
6581 if (!is_base_time_past(qopt->base_time, &now))
6582 ring->admin_gate_closed = true;
6583 else
6584 ring->oper_gate_closed = true;
6585
6586 ring->start_time = end_time;
6587 ring->end_time = end_time;
6588 }
6589 }
6590
6591 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
6592
6593 for (i = 0; i < adapter->num_tx_queues; i++) {
6594 struct igc_ring *ring = adapter->tx_ring[i];
6595 struct net_device *dev = adapter->netdev;
6596
6597 if (qopt->max_sdu[i])
6598 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN;
6599 else
6600 ring->max_sdu = 0;
6601 }
6602
6603 igc_fpe_save_preempt_queue(adapter, &qopt->mqprio);
6604
6605 return 0;
6606 }
6607
igc_tsn_enable_qbv_scheduling(struct igc_adapter * adapter,struct tc_taprio_qopt_offload * qopt)6608 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
6609 struct tc_taprio_qopt_offload *qopt)
6610 {
6611 struct igc_hw *hw = &adapter->hw;
6612 int err;
6613
6614 if (hw->mac.type != igc_i225)
6615 return -EOPNOTSUPP;
6616
6617 switch (qopt->cmd) {
6618 case TAPRIO_CMD_REPLACE:
6619 err = igc_save_qbv_schedule(adapter, qopt);
6620 break;
6621 case TAPRIO_CMD_DESTROY:
6622 err = igc_tsn_clear_schedule(adapter);
6623 break;
6624 case TAPRIO_CMD_STATS:
6625 igc_taprio_stats(adapter->netdev, &qopt->stats);
6626 return 0;
6627 case TAPRIO_CMD_QUEUE_STATS:
6628 igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
6629 return 0;
6630 default:
6631 return -EOPNOTSUPP;
6632 }
6633
6634 if (err)
6635 return err;
6636
6637 return igc_tsn_offload_apply(adapter);
6638 }
6639
igc_save_cbs_params(struct igc_adapter * adapter,int queue,bool enable,int idleslope,int sendslope,int hicredit,int locredit)6640 static int igc_save_cbs_params(struct igc_adapter *adapter, int queue,
6641 bool enable, int idleslope, int sendslope,
6642 int hicredit, int locredit)
6643 {
6644 bool cbs_status[IGC_MAX_SR_QUEUES] = { false };
6645 struct net_device *netdev = adapter->netdev;
6646 struct igc_ring *ring;
6647 int i;
6648
6649 /* i225 has two sets of credit-based shaper logic.
6650 * Supporting it only on the top two priority queues
6651 */
6652 if (queue < 0 || queue > 1)
6653 return -EINVAL;
6654
6655 ring = adapter->tx_ring[queue];
6656
6657 for (i = 0; i < IGC_MAX_SR_QUEUES; i++)
6658 if (adapter->tx_ring[i])
6659 cbs_status[i] = adapter->tx_ring[i]->cbs_enable;
6660
6661 /* CBS should be enabled on the highest priority queue first in order
6662 * for the CBS algorithm to operate as intended.
6663 */
6664 if (enable) {
6665 if (queue == 1 && !cbs_status[0]) {
6666 netdev_err(netdev,
6667 "Enabling CBS on queue1 before queue0\n");
6668 return -EINVAL;
6669 }
6670 } else {
6671 if (queue == 0 && cbs_status[1]) {
6672 netdev_err(netdev,
6673 "Disabling CBS on queue0 before queue1\n");
6674 return -EINVAL;
6675 }
6676 }
6677
6678 ring->cbs_enable = enable;
6679 ring->idleslope = idleslope;
6680 ring->sendslope = sendslope;
6681 ring->hicredit = hicredit;
6682 ring->locredit = locredit;
6683
6684 return 0;
6685 }
6686
igc_tsn_enable_cbs(struct igc_adapter * adapter,struct tc_cbs_qopt_offload * qopt)6687 static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
6688 struct tc_cbs_qopt_offload *qopt)
6689 {
6690 struct igc_hw *hw = &adapter->hw;
6691 int err;
6692
6693 if (hw->mac.type != igc_i225)
6694 return -EOPNOTSUPP;
6695
6696 if (qopt->queue < 0 || qopt->queue > 1)
6697 return -EINVAL;
6698
6699 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable,
6700 qopt->idleslope, qopt->sendslope,
6701 qopt->hicredit, qopt->locredit);
6702 if (err)
6703 return err;
6704
6705 return igc_tsn_offload_apply(adapter);
6706 }
6707
igc_tc_query_caps(struct igc_adapter * adapter,struct tc_query_caps_base * base)6708 static int igc_tc_query_caps(struct igc_adapter *adapter,
6709 struct tc_query_caps_base *base)
6710 {
6711 struct igc_hw *hw = &adapter->hw;
6712
6713 switch (base->type) {
6714 case TC_SETUP_QDISC_MQPRIO: {
6715 struct tc_mqprio_caps *caps = base->caps;
6716
6717 caps->validate_queue_counts = true;
6718
6719 return 0;
6720 }
6721 case TC_SETUP_QDISC_TAPRIO: {
6722 struct tc_taprio_caps *caps = base->caps;
6723
6724 if (!(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO))
6725 caps->broken_mqprio = true;
6726
6727 if (hw->mac.type == igc_i225) {
6728 caps->supports_queue_max_sdu = true;
6729 caps->gate_mask_per_txq = true;
6730 }
6731
6732 return 0;
6733 }
6734 default:
6735 return -EOPNOTSUPP;
6736 }
6737 }
6738
igc_save_mqprio_params(struct igc_adapter * adapter,u8 num_tc,u16 * offset)6739 static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc,
6740 u16 *offset)
6741 {
6742 int i;
6743
6744 adapter->strict_priority_enable = true;
6745 adapter->num_tc = num_tc;
6746
6747 for (i = 0; i < num_tc; i++)
6748 adapter->queue_per_tc[i] = offset[i];
6749 }
6750
6751 static bool
igc_tsn_is_tc_to_queue_priority_ordered(struct tc_mqprio_qopt_offload * mqprio)6752 igc_tsn_is_tc_to_queue_priority_ordered(struct tc_mqprio_qopt_offload *mqprio)
6753 {
6754 int num_tc = mqprio->qopt.num_tc;
6755 int i;
6756
6757 for (i = 1; i < num_tc; i++) {
6758 if (mqprio->qopt.offset[i - 1] > mqprio->qopt.offset[i])
6759 return false;
6760 }
6761
6762 return true;
6763 }
6764
igc_tsn_enable_mqprio(struct igc_adapter * adapter,struct tc_mqprio_qopt_offload * mqprio)6765 static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
6766 struct tc_mqprio_qopt_offload *mqprio)
6767 {
6768 struct igc_hw *hw = &adapter->hw;
6769 int err, i;
6770
6771 if (hw->mac.type != igc_i225)
6772 return -EOPNOTSUPP;
6773
6774 if (!mqprio->qopt.num_tc) {
6775 adapter->strict_priority_enable = false;
6776 igc_fpe_clear_preempt_queue(adapter);
6777 netdev_reset_tc(adapter->netdev);
6778 goto apply;
6779 }
6780
6781 /* There are as many TCs as Tx queues. */
6782 if (mqprio->qopt.num_tc != adapter->num_tx_queues) {
6783 NL_SET_ERR_MSG_FMT_MOD(mqprio->extack,
6784 "Only %d traffic classes supported",
6785 adapter->num_tx_queues);
6786 return -EOPNOTSUPP;
6787 }
6788
6789 /* Only one queue per TC is supported. */
6790 for (i = 0; i < mqprio->qopt.num_tc; i++) {
6791 if (mqprio->qopt.count[i] != 1) {
6792 NL_SET_ERR_MSG_MOD(mqprio->extack,
6793 "Only one queue per TC supported");
6794 return -EOPNOTSUPP;
6795 }
6796 }
6797
6798 if (!igc_tsn_is_tc_to_queue_priority_ordered(mqprio)) {
6799 NL_SET_ERR_MSG_MOD(mqprio->extack,
6800 "tc to queue mapping must preserve increasing priority (higher tc -> higher queue)");
6801 return -EOPNOTSUPP;
6802 }
6803
6804 igc_save_mqprio_params(adapter, mqprio->qopt.num_tc,
6805 mqprio->qopt.offset);
6806
6807 err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
6808 if (err)
6809 return err;
6810
6811 for (i = 0; i < adapter->num_tc; i++) {
6812 err = netdev_set_tc_queue(adapter->netdev, i, 1,
6813 adapter->queue_per_tc[i]);
6814 if (err)
6815 return err;
6816 }
6817
6818 /* In case the card is configured with less than four queues. */
6819 for (; i < IGC_MAX_TX_QUEUES; i++)
6820 adapter->queue_per_tc[i] = i;
6821
6822 mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
6823 igc_fpe_save_preempt_queue(adapter, mqprio);
6824
6825 apply:
6826 return igc_tsn_offload_apply(adapter);
6827 }
6828
igc_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)6829 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
6830 void *type_data)
6831 {
6832 struct igc_adapter *adapter = netdev_priv(dev);
6833
6834 adapter->tc_setup_type = type;
6835
6836 switch (type) {
6837 case TC_QUERY_CAPS:
6838 return igc_tc_query_caps(adapter, type_data);
6839 case TC_SETUP_QDISC_TAPRIO:
6840 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
6841
6842 case TC_SETUP_QDISC_ETF:
6843 return igc_tsn_enable_launchtime(adapter, type_data);
6844
6845 case TC_SETUP_QDISC_CBS:
6846 return igc_tsn_enable_cbs(adapter, type_data);
6847
6848 case TC_SETUP_QDISC_MQPRIO:
6849 return igc_tsn_enable_mqprio(adapter, type_data);
6850
6851 default:
6852 return -EOPNOTSUPP;
6853 }
6854 }
6855
igc_bpf(struct net_device * dev,struct netdev_bpf * bpf)6856 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6857 {
6858 struct igc_adapter *adapter = netdev_priv(dev);
6859
6860 switch (bpf->command) {
6861 case XDP_SETUP_PROG:
6862 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
6863 case XDP_SETUP_XSK_POOL:
6864 return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
6865 bpf->xsk.queue_id);
6866 default:
6867 return -EOPNOTSUPP;
6868 }
6869 }
6870
igc_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6871 static int igc_xdp_xmit(struct net_device *dev, int num_frames,
6872 struct xdp_frame **frames, u32 flags)
6873 {
6874 struct igc_adapter *adapter = netdev_priv(dev);
6875 int cpu = smp_processor_id();
6876 struct netdev_queue *nq;
6877 struct igc_ring *ring;
6878 int i, nxmit;
6879
6880 if (unlikely(!netif_carrier_ok(dev)))
6881 return -ENETDOWN;
6882
6883 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6884 return -EINVAL;
6885
6886 ring = igc_get_tx_ring(adapter, cpu);
6887 nq = txring_txq(ring);
6888
6889 __netif_tx_lock(nq, cpu);
6890
6891 /* Avoid transmit queue timeout since we share it with the slow path */
6892 txq_trans_cond_update(nq);
6893
6894 nxmit = 0;
6895 for (i = 0; i < num_frames; i++) {
6896 int err;
6897 struct xdp_frame *xdpf = frames[i];
6898
6899 err = igc_xdp_init_tx_descriptor(ring, xdpf);
6900 if (err)
6901 break;
6902 nxmit++;
6903 }
6904
6905 if (flags & XDP_XMIT_FLUSH)
6906 igc_flush_tx_descriptors(ring);
6907
6908 __netif_tx_unlock(nq);
6909
6910 return nxmit;
6911 }
6912
igc_sw_irq_prep(struct igc_q_vector * q_vector)6913 static u32 igc_sw_irq_prep(struct igc_q_vector *q_vector)
6914 {
6915 u32 eics = 0;
6916
6917 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
6918 eics = q_vector->eims_value;
6919
6920 return eics;
6921 }
6922
igc_xsk_wakeup(struct net_device * dev,u32 queue_id,u32 flags)6923 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
6924 {
6925 struct igc_adapter *adapter = netdev_priv(dev);
6926 struct igc_hw *hw = &adapter->hw;
6927 struct igc_ring *ring;
6928 u32 eics = 0;
6929
6930 if (test_bit(__IGC_DOWN, &adapter->state))
6931 return -ENETDOWN;
6932
6933 if (!igc_xdp_is_enabled(adapter))
6934 return -ENXIO;
6935 /* Check if queue_id is valid. Tx and Rx queue numbers are always same */
6936 if (queue_id >= adapter->num_rx_queues)
6937 return -EINVAL;
6938
6939 ring = adapter->rx_ring[queue_id];
6940
6941 if (!ring->xsk_pool)
6942 return -ENXIO;
6943
6944 if (flags & XDP_WAKEUP_RX)
6945 eics |= igc_sw_irq_prep(ring->q_vector);
6946
6947 if (flags & XDP_WAKEUP_TX) {
6948 /* If IGC_FLAG_QUEUE_PAIRS is active, the q_vector
6949 * and NAPI is shared between RX and TX.
6950 * If NAPI is already running it would be marked as missed
6951 * from the RX path, making this TX call a NOP
6952 */
6953 ring = adapter->tx_ring[queue_id];
6954 eics |= igc_sw_irq_prep(ring->q_vector);
6955 }
6956
6957 if (eics)
6958 /* Cause software interrupt */
6959 wr32(IGC_EICS, eics);
6960
6961 return 0;
6962 }
6963
igc_get_tstamp(struct net_device * dev,const struct skb_shared_hwtstamps * hwtstamps,bool cycles)6964 static ktime_t igc_get_tstamp(struct net_device *dev,
6965 const struct skb_shared_hwtstamps *hwtstamps,
6966 bool cycles)
6967 {
6968 struct igc_adapter *adapter = netdev_priv(dev);
6969 struct igc_inline_rx_tstamps *tstamp;
6970 ktime_t timestamp;
6971
6972 tstamp = hwtstamps->netdev_data;
6973
6974 if (cycles)
6975 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1);
6976 else
6977 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
6978
6979 return timestamp;
6980 }
6981
6982 static const struct net_device_ops igc_netdev_ops = {
6983 .ndo_open = igc_open,
6984 .ndo_stop = igc_close,
6985 .ndo_start_xmit = igc_xmit_frame,
6986 .ndo_set_rx_mode = igc_set_rx_mode,
6987 .ndo_set_mac_address = igc_set_mac,
6988 .ndo_change_mtu = igc_change_mtu,
6989 .ndo_tx_timeout = igc_tx_timeout,
6990 .ndo_get_stats64 = igc_get_stats64,
6991 .ndo_fix_features = igc_fix_features,
6992 .ndo_set_features = igc_set_features,
6993 .ndo_features_check = igc_features_check,
6994 .ndo_setup_tc = igc_setup_tc,
6995 .ndo_bpf = igc_bpf,
6996 .ndo_xdp_xmit = igc_xdp_xmit,
6997 .ndo_xsk_wakeup = igc_xsk_wakeup,
6998 .ndo_get_tstamp = igc_get_tstamp,
6999 .ndo_hwtstamp_get = igc_ptp_hwtstamp_get,
7000 .ndo_hwtstamp_set = igc_ptp_hwtstamp_set,
7001 };
7002
igc_rd32(struct igc_hw * hw,u32 reg)7003 u32 igc_rd32(struct igc_hw *hw, u32 reg)
7004 {
7005 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
7006 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
7007 u32 value = 0;
7008
7009 if (IGC_REMOVED(hw_addr))
7010 return ~value;
7011
7012 value = readl(&hw_addr[reg]);
7013
7014 /* reads should not return all F's */
7015 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
7016 struct net_device *netdev = igc->netdev;
7017
7018 hw->hw_addr = NULL;
7019 netif_device_detach(netdev);
7020 netdev_err(netdev, "PCIe link lost, device now detached\n");
7021 WARN(pci_device_is_present(igc->pdev),
7022 "igc: Failed to read reg 0x%x!\n", reg);
7023 }
7024
7025 return value;
7026 }
7027
7028 /* Mapping HW RSS Type to enum xdp_rss_hash_type */
7029 static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = {
7030 [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2,
7031 [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP,
7032 [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4,
7033 [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP,
7034 [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
7035 [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6,
7036 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
7037 [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP,
7038 [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP,
7039 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX,
7040 [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
7041 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
7042 [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */
7043 [13] = XDP_RSS_TYPE_NONE,
7044 [14] = XDP_RSS_TYPE_NONE,
7045 [15] = XDP_RSS_TYPE_NONE,
7046 };
7047
igc_xdp_rx_hash(const struct xdp_md * _ctx,u32 * hash,enum xdp_rss_hash_type * rss_type)7048 static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
7049 enum xdp_rss_hash_type *rss_type)
7050 {
7051 const struct igc_xdp_buff *ctx = (void *)_ctx;
7052
7053 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))
7054 return -ENODATA;
7055
7056 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss);
7057 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)];
7058
7059 return 0;
7060 }
7061
igc_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7062 static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7063 {
7064 const struct igc_xdp_buff *ctx = (void *)_ctx;
7065 struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev);
7066 struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts;
7067
7068 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
7069 *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
7070
7071 return 0;
7072 }
7073
7074 return -ENODATA;
7075 }
7076
7077 static const struct xdp_metadata_ops igc_xdp_metadata_ops = {
7078 .xmo_rx_hash = igc_xdp_rx_hash,
7079 .xmo_rx_timestamp = igc_xdp_rx_timestamp,
7080 };
7081
igc_qbv_scheduling_timer(struct hrtimer * timer)7082 static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
7083 {
7084 struct igc_adapter *adapter = container_of(timer, struct igc_adapter,
7085 hrtimer);
7086 unsigned long flags;
7087 unsigned int i;
7088
7089 spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
7090
7091 adapter->qbv_transition = true;
7092 for (i = 0; i < adapter->num_tx_queues; i++) {
7093 struct igc_ring *tx_ring = adapter->tx_ring[i];
7094
7095 if (tx_ring->admin_gate_closed) {
7096 tx_ring->admin_gate_closed = false;
7097 tx_ring->oper_gate_closed = true;
7098 } else {
7099 tx_ring->oper_gate_closed = false;
7100 }
7101 }
7102 adapter->qbv_transition = false;
7103
7104 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
7105
7106 return HRTIMER_NORESTART;
7107 }
7108
7109 /**
7110 * igc_probe - Device Initialization Routine
7111 * @pdev: PCI device information struct
7112 * @ent: entry in igc_pci_tbl
7113 *
7114 * Returns 0 on success, negative on failure
7115 *
7116 * igc_probe initializes an adapter identified by a pci_dev structure.
7117 * The OS initialization, configuring the adapter private structure,
7118 * and a hardware reset occur.
7119 */
igc_probe(struct pci_dev * pdev,const struct pci_device_id * ent)7120 static int igc_probe(struct pci_dev *pdev,
7121 const struct pci_device_id *ent)
7122 {
7123 struct igc_adapter *adapter;
7124 struct net_device *netdev;
7125 struct igc_hw *hw;
7126 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
7127 int err;
7128
7129 err = pci_enable_device_mem(pdev);
7130 if (err)
7131 return err;
7132
7133 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7134 if (err) {
7135 dev_err(&pdev->dev,
7136 "No usable DMA configuration, aborting\n");
7137 goto err_dma;
7138 }
7139
7140 err = pci_request_mem_regions(pdev, igc_driver_name);
7141 if (err)
7142 goto err_pci_reg;
7143
7144 err = pci_enable_ptm(pdev, NULL);
7145 if (err < 0)
7146 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
7147
7148 pci_set_master(pdev);
7149
7150 err = -ENOMEM;
7151 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
7152 IGC_MAX_TX_QUEUES);
7153
7154 if (!netdev)
7155 goto err_alloc_etherdev;
7156
7157 SET_NETDEV_DEV(netdev, &pdev->dev);
7158
7159 pci_set_drvdata(pdev, netdev);
7160 adapter = netdev_priv(netdev);
7161 adapter->netdev = netdev;
7162 adapter->pdev = pdev;
7163 hw = &adapter->hw;
7164 hw->back = adapter;
7165 adapter->port_num = hw->bus.func;
7166 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
7167
7168 /* PCI config space info */
7169 hw->vendor_id = pdev->vendor;
7170 hw->device_id = pdev->device;
7171 hw->revision_id = pdev->revision;
7172 hw->subsystem_vendor_id = pdev->subsystem_vendor;
7173 hw->subsystem_device_id = pdev->subsystem_device;
7174
7175 /* Disable ASPM L1.2 on I226 devices to avoid packet loss */
7176 if (igc_is_device_id_i226(hw))
7177 pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
7178
7179 err = pci_save_state(pdev);
7180 if (err)
7181 goto err_ioremap;
7182
7183 err = -EIO;
7184 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
7185 pci_resource_len(pdev, 0));
7186 if (!adapter->io_addr)
7187 goto err_ioremap;
7188
7189 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
7190 hw->hw_addr = adapter->io_addr;
7191
7192 netdev->netdev_ops = &igc_netdev_ops;
7193 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
7194 netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops;
7195 igc_ethtool_set_ops(netdev);
7196 netdev->watchdog_timeo = 5 * HZ;
7197
7198 netdev->mem_start = pci_resource_start(pdev, 0);
7199 netdev->mem_end = pci_resource_end(pdev, 0);
7200
7201 /* Copy the default MAC and PHY function pointers */
7202 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
7203 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
7204
7205 /* Initialize skew-specific constants */
7206 err = ei->get_invariants(hw);
7207 if (err)
7208 goto err_sw_init;
7209
7210 /* Add supported features to the features list*/
7211 netdev->features |= NETIF_F_SG;
7212 netdev->features |= NETIF_F_TSO;
7213 netdev->features |= NETIF_F_TSO6;
7214 netdev->features |= NETIF_F_TSO_ECN;
7215 netdev->features |= NETIF_F_RXHASH;
7216 netdev->features |= NETIF_F_RXCSUM;
7217 netdev->features |= NETIF_F_HW_CSUM;
7218 netdev->features |= NETIF_F_SCTP_CRC;
7219 netdev->features |= NETIF_F_HW_TC;
7220
7221 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
7222 NETIF_F_GSO_GRE_CSUM | \
7223 NETIF_F_GSO_IPXIP4 | \
7224 NETIF_F_GSO_IPXIP6 | \
7225 NETIF_F_GSO_UDP_TUNNEL | \
7226 NETIF_F_GSO_UDP_TUNNEL_CSUM)
7227
7228 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
7229 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
7230
7231 /* setup the private structure */
7232 err = igc_sw_init(adapter);
7233 if (err)
7234 goto err_sw_init;
7235
7236 /* copy netdev features into list of user selectable features */
7237 netdev->hw_features |= NETIF_F_NTUPLE;
7238 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
7239 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7240 netdev->hw_features |= netdev->features;
7241
7242 netdev->features |= NETIF_F_HIGHDMA;
7243
7244 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
7245 netdev->mpls_features |= NETIF_F_HW_CSUM;
7246 netdev->hw_enc_features |= netdev->vlan_features;
7247
7248 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7249 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7250
7251 /* enable HW vlan tag insertion/stripping by default */
7252 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7253
7254 /* MTU range: 68 - 9216 */
7255 netdev->min_mtu = ETH_MIN_MTU;
7256 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
7257
7258 /* before reading the NVM, reset the controller to put the device in a
7259 * known good starting state
7260 */
7261 hw->mac.ops.reset_hw(hw);
7262
7263 if (igc_get_flash_presence_i225(hw)) {
7264 if (hw->nvm.ops.validate(hw) < 0) {
7265 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
7266 err = -EIO;
7267 goto err_eeprom;
7268 }
7269 }
7270
7271 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
7272 /* copy the MAC address out of the NVM */
7273 if (hw->mac.ops.read_mac_addr(hw))
7274 dev_err(&pdev->dev, "NVM Read Error\n");
7275 }
7276
7277 eth_hw_addr_set(netdev, hw->mac.addr);
7278
7279 if (!is_valid_ether_addr(netdev->dev_addr)) {
7280 dev_err(&pdev->dev, "Invalid MAC Address\n");
7281 err = -EIO;
7282 goto err_eeprom;
7283 }
7284
7285 /* configure RXPBSIZE and TXPBSIZE */
7286 wr32(IGC_RXPBS, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
7287 wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
7288
7289 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
7290 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
7291
7292 INIT_WORK(&adapter->reset_task, igc_reset_task);
7293 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
7294
7295 hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC,
7296 HRTIMER_MODE_REL);
7297
7298 /* Initialize link properties that are user-changeable */
7299 adapter->fc_autoneg = true;
7300 hw->phy.autoneg_advertised = 0xaf;
7301
7302 hw->fc.requested_mode = igc_fc_default;
7303 hw->fc.current_mode = igc_fc_default;
7304
7305 /* By default, support wake on port A */
7306 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
7307
7308 /* initialize the wol settings based on the eeprom settings */
7309 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
7310 adapter->wol |= IGC_WUFC_MAG;
7311
7312 device_set_wakeup_enable(&adapter->pdev->dev,
7313 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
7314
7315 igc_ptp_init(adapter);
7316
7317 igc_tsn_clear_schedule(adapter);
7318
7319 igc_fpe_init(adapter);
7320
7321 /* reset the hardware with the new settings */
7322 igc_reset(adapter);
7323
7324 /* let the f/w know that the h/w is now under the control of the
7325 * driver.
7326 */
7327 igc_get_hw_control(adapter);
7328
7329 strscpy(netdev->name, "eth%d", sizeof(netdev->name));
7330 err = register_netdev(netdev);
7331 if (err)
7332 goto err_register;
7333
7334 /* carrier off reporting is important to ethtool even BEFORE open */
7335 netif_carrier_off(netdev);
7336
7337 /* Check if Media Autosense is enabled */
7338 adapter->ei = *ei;
7339
7340 /* print pcie link status and MAC address */
7341 pcie_print_link_status(pdev);
7342 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
7343
7344 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
7345 /* Disable EEE for internal PHY devices */
7346 hw->dev_spec._base.eee_enable = false;
7347 adapter->flags &= ~IGC_FLAG_EEE;
7348 igc_set_eee_i225(hw, false, false, false);
7349
7350 pm_runtime_put_noidle(&pdev->dev);
7351
7352 if (IS_ENABLED(CONFIG_IGC_LEDS)) {
7353 err = igc_led_setup(adapter);
7354 if (err) {
7355 netdev_warn_once(netdev,
7356 "LED init failed (%d); continuing without LED support\n",
7357 err);
7358 adapter->leds_available = false;
7359 } else {
7360 adapter->leds_available = true;
7361 }
7362 }
7363
7364 return 0;
7365
7366 err_register:
7367 igc_release_hw_control(adapter);
7368 igc_ptp_stop(adapter);
7369 err_eeprom:
7370 if (!igc_check_reset_block(hw))
7371 igc_reset_phy(hw);
7372 err_sw_init:
7373 igc_clear_interrupt_scheme(adapter);
7374 iounmap(adapter->io_addr);
7375 err_ioremap:
7376 free_netdev(netdev);
7377 err_alloc_etherdev:
7378 pci_release_mem_regions(pdev);
7379 err_pci_reg:
7380 err_dma:
7381 pci_disable_device(pdev);
7382 return err;
7383 }
7384
7385 /**
7386 * igc_remove - Device Removal Routine
7387 * @pdev: PCI device information struct
7388 *
7389 * igc_remove is called by the PCI subsystem to alert the driver
7390 * that it should release a PCI device. This could be caused by a
7391 * Hot-Plug event, or because the driver is going to be removed from
7392 * memory.
7393 */
igc_remove(struct pci_dev * pdev)7394 static void igc_remove(struct pci_dev *pdev)
7395 {
7396 struct net_device *netdev = pci_get_drvdata(pdev);
7397 struct igc_adapter *adapter = netdev_priv(netdev);
7398
7399 pm_runtime_get_noresume(&pdev->dev);
7400
7401 igc_flush_nfc_rules(adapter);
7402
7403 igc_ptp_stop(adapter);
7404
7405 pci_disable_ptm(pdev);
7406 pci_clear_master(pdev);
7407
7408 set_bit(__IGC_DOWN, &adapter->state);
7409
7410 timer_delete_sync(&adapter->watchdog_timer);
7411 timer_delete_sync(&adapter->phy_info_timer);
7412
7413 cancel_work_sync(&adapter->reset_task);
7414 cancel_work_sync(&adapter->watchdog_task);
7415 hrtimer_cancel(&adapter->hrtimer);
7416
7417 if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available)
7418 igc_led_free(adapter);
7419
7420 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7421 * would have already happened in close and is redundant.
7422 */
7423 igc_release_hw_control(adapter);
7424 unregister_netdev(netdev);
7425
7426 igc_clear_interrupt_scheme(adapter);
7427 pci_iounmap(pdev, adapter->io_addr);
7428 pci_release_mem_regions(pdev);
7429
7430 free_netdev(netdev);
7431
7432 pci_disable_device(pdev);
7433 }
7434
__igc_shutdown(struct pci_dev * pdev,bool * enable_wake,bool runtime)7435 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
7436 bool runtime)
7437 {
7438 struct net_device *netdev = pci_get_drvdata(pdev);
7439 struct igc_adapter *adapter = netdev_priv(netdev);
7440 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
7441 struct igc_hw *hw = &adapter->hw;
7442 u32 ctrl, rctl, status;
7443 bool wake;
7444
7445 rtnl_lock();
7446 netif_device_detach(netdev);
7447
7448 if (netif_running(netdev))
7449 __igc_close(netdev, true);
7450
7451 igc_ptp_suspend(adapter);
7452
7453 igc_clear_interrupt_scheme(adapter);
7454 rtnl_unlock();
7455
7456 status = rd32(IGC_STATUS);
7457 if (status & IGC_STATUS_LU)
7458 wufc &= ~IGC_WUFC_LNKC;
7459
7460 if (wufc) {
7461 igc_setup_rctl(adapter);
7462 igc_set_rx_mode(netdev);
7463
7464 /* turn on all-multi mode if wake on multicast is enabled */
7465 if (wufc & IGC_WUFC_MC) {
7466 rctl = rd32(IGC_RCTL);
7467 rctl |= IGC_RCTL_MPE;
7468 wr32(IGC_RCTL, rctl);
7469 }
7470
7471 ctrl = rd32(IGC_CTRL);
7472 ctrl |= IGC_CTRL_ADVD3WUC;
7473 wr32(IGC_CTRL, ctrl);
7474
7475 /* Allow time for pending master requests to run */
7476 igc_disable_pcie_master(hw);
7477
7478 wr32(IGC_WUC, IGC_WUC_PME_EN);
7479 wr32(IGC_WUFC, wufc);
7480 } else {
7481 wr32(IGC_WUC, 0);
7482 wr32(IGC_WUFC, 0);
7483 }
7484
7485 wake = wufc || adapter->en_mng_pt;
7486 if (!wake)
7487 igc_power_down_phy_copper_base(&adapter->hw);
7488 else
7489 igc_power_up_link(adapter);
7490
7491 if (enable_wake)
7492 *enable_wake = wake;
7493
7494 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7495 * would have already happened in close and is redundant.
7496 */
7497 igc_release_hw_control(adapter);
7498
7499 pci_disable_device(pdev);
7500
7501 return 0;
7502 }
7503
igc_runtime_suspend(struct device * dev)7504 static int igc_runtime_suspend(struct device *dev)
7505 {
7506 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
7507 }
7508
igc_deliver_wake_packet(struct net_device * netdev)7509 static void igc_deliver_wake_packet(struct net_device *netdev)
7510 {
7511 struct igc_adapter *adapter = netdev_priv(netdev);
7512 struct igc_hw *hw = &adapter->hw;
7513 struct sk_buff *skb;
7514 u32 wupl;
7515
7516 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
7517
7518 /* WUPM stores only the first 128 bytes of the wake packet.
7519 * Read the packet only if we have the whole thing.
7520 */
7521 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
7522 return;
7523
7524 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
7525 if (!skb)
7526 return;
7527
7528 skb_put(skb, wupl);
7529
7530 /* Ensure reads are 32-bit aligned */
7531 wupl = roundup(wupl, 4);
7532
7533 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
7534
7535 skb->protocol = eth_type_trans(skb, netdev);
7536 netif_rx(skb);
7537 }
7538
__igc_resume(struct device * dev,bool rpm)7539 static int __igc_resume(struct device *dev, bool rpm)
7540 {
7541 struct pci_dev *pdev = to_pci_dev(dev);
7542 struct net_device *netdev = pci_get_drvdata(pdev);
7543 struct igc_adapter *adapter = netdev_priv(netdev);
7544 struct igc_hw *hw = &adapter->hw;
7545 u32 err, val;
7546
7547 pci_set_power_state(pdev, PCI_D0);
7548 pci_restore_state(pdev);
7549
7550 if (!pci_device_is_present(pdev))
7551 return -ENODEV;
7552 err = pci_enable_device_mem(pdev);
7553 if (err) {
7554 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
7555 return err;
7556 }
7557 pci_set_master(pdev);
7558
7559 pci_enable_wake(pdev, PCI_D3hot, 0);
7560 pci_enable_wake(pdev, PCI_D3cold, 0);
7561
7562 if (igc_is_device_id_i226(hw))
7563 pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
7564
7565 if (igc_init_interrupt_scheme(adapter, true)) {
7566 netdev_err(netdev, "Unable to allocate memory for queues\n");
7567 return -ENOMEM;
7568 }
7569
7570 igc_reset(adapter);
7571
7572 /* let the f/w know that the h/w is now under the control of the
7573 * driver.
7574 */
7575 igc_get_hw_control(adapter);
7576
7577 val = rd32(IGC_WUS);
7578 if (val & WAKE_PKT_WUS)
7579 igc_deliver_wake_packet(netdev);
7580
7581 wr32(IGC_WUS, ~0);
7582
7583 if (netif_running(netdev)) {
7584 if (!rpm)
7585 rtnl_lock();
7586 err = __igc_open(netdev, true);
7587 if (!rpm)
7588 rtnl_unlock();
7589 if (!err)
7590 netif_device_attach(netdev);
7591 }
7592
7593 return err;
7594 }
7595
igc_resume(struct device * dev)7596 static int igc_resume(struct device *dev)
7597 {
7598 return __igc_resume(dev, false);
7599 }
7600
igc_runtime_resume(struct device * dev)7601 static int igc_runtime_resume(struct device *dev)
7602 {
7603 return __igc_resume(dev, true);
7604 }
7605
igc_suspend(struct device * dev)7606 static int igc_suspend(struct device *dev)
7607 {
7608 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
7609 }
7610
igc_runtime_idle(struct device * dev)7611 static int __maybe_unused igc_runtime_idle(struct device *dev)
7612 {
7613 struct net_device *netdev = dev_get_drvdata(dev);
7614 struct igc_adapter *adapter = netdev_priv(netdev);
7615
7616 if (!igc_has_link(adapter))
7617 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
7618
7619 return -EBUSY;
7620 }
7621
igc_shutdown(struct pci_dev * pdev)7622 static void igc_shutdown(struct pci_dev *pdev)
7623 {
7624 bool wake;
7625
7626 __igc_shutdown(pdev, &wake, 0);
7627
7628 if (system_state == SYSTEM_POWER_OFF) {
7629 pci_wake_from_d3(pdev, wake);
7630 pci_set_power_state(pdev, PCI_D3hot);
7631 }
7632 }
7633
7634 /**
7635 * igc_io_error_detected - called when PCI error is detected
7636 * @pdev: Pointer to PCI device
7637 * @state: The current PCI connection state
7638 *
7639 * This function is called after a PCI bus error affecting
7640 * this device has been detected.
7641 **/
igc_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)7642 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
7643 pci_channel_state_t state)
7644 {
7645 struct net_device *netdev = pci_get_drvdata(pdev);
7646 struct igc_adapter *adapter = netdev_priv(netdev);
7647
7648 rtnl_lock();
7649 netif_device_detach(netdev);
7650
7651 if (state == pci_channel_io_perm_failure) {
7652 rtnl_unlock();
7653 return PCI_ERS_RESULT_DISCONNECT;
7654 }
7655
7656 if (netif_running(netdev))
7657 igc_down(adapter);
7658 pci_disable_device(pdev);
7659 rtnl_unlock();
7660
7661 /* Request a slot reset. */
7662 return PCI_ERS_RESULT_NEED_RESET;
7663 }
7664
7665 /**
7666 * igc_io_slot_reset - called after the PCI bus has been reset.
7667 * @pdev: Pointer to PCI device
7668 *
7669 * Restart the card from scratch, as if from a cold-boot. Implementation
7670 * resembles the first-half of the __igc_resume routine.
7671 **/
igc_io_slot_reset(struct pci_dev * pdev)7672 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
7673 {
7674 struct net_device *netdev = pci_get_drvdata(pdev);
7675 struct igc_adapter *adapter = netdev_priv(netdev);
7676 struct igc_hw *hw = &adapter->hw;
7677 pci_ers_result_t result;
7678
7679 if (pci_enable_device_mem(pdev)) {
7680 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
7681 result = PCI_ERS_RESULT_DISCONNECT;
7682 } else {
7683 pci_set_master(pdev);
7684 pci_restore_state(pdev);
7685
7686 pci_enable_wake(pdev, PCI_D3hot, 0);
7687 pci_enable_wake(pdev, PCI_D3cold, 0);
7688
7689 if (igc_is_device_id_i226(hw))
7690 pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2);
7691
7692 /* In case of PCI error, adapter loses its HW address
7693 * so we should re-assign it here.
7694 */
7695 hw->hw_addr = adapter->io_addr;
7696
7697 igc_reset(adapter);
7698 wr32(IGC_WUS, ~0);
7699 result = PCI_ERS_RESULT_RECOVERED;
7700 }
7701
7702 return result;
7703 }
7704
7705 /**
7706 * igc_io_resume - called when traffic can start to flow again.
7707 * @pdev: Pointer to PCI device
7708 *
7709 * This callback is called when the error recovery driver tells us that
7710 * its OK to resume normal operation. Implementation resembles the
7711 * second-half of the __igc_resume routine.
7712 */
igc_io_resume(struct pci_dev * pdev)7713 static void igc_io_resume(struct pci_dev *pdev)
7714 {
7715 struct net_device *netdev = pci_get_drvdata(pdev);
7716 struct igc_adapter *adapter = netdev_priv(netdev);
7717
7718 rtnl_lock();
7719 if (netif_running(netdev)) {
7720 if (igc_open(netdev)) {
7721 rtnl_unlock();
7722 netdev_err(netdev, "igc_open failed after reset\n");
7723 return;
7724 }
7725 }
7726
7727 netif_device_attach(netdev);
7728
7729 /* let the f/w know that the h/w is now under the control of the
7730 * driver.
7731 */
7732 igc_get_hw_control(adapter);
7733 rtnl_unlock();
7734 }
7735
7736 static const struct pci_error_handlers igc_err_handler = {
7737 .error_detected = igc_io_error_detected,
7738 .slot_reset = igc_io_slot_reset,
7739 .resume = igc_io_resume,
7740 };
7741
7742 static _DEFINE_DEV_PM_OPS(igc_pm_ops, igc_suspend, igc_resume,
7743 igc_runtime_suspend, igc_runtime_resume,
7744 igc_runtime_idle);
7745
7746 static struct pci_driver igc_driver = {
7747 .name = igc_driver_name,
7748 .id_table = igc_pci_tbl,
7749 .probe = igc_probe,
7750 .remove = igc_remove,
7751 .driver.pm = pm_ptr(&igc_pm_ops),
7752 .shutdown = igc_shutdown,
7753 .err_handler = &igc_err_handler,
7754 };
7755
7756 /**
7757 * igc_reinit_queues - return error
7758 * @adapter: pointer to adapter structure
7759 */
igc_reinit_queues(struct igc_adapter * adapter)7760 int igc_reinit_queues(struct igc_adapter *adapter)
7761 {
7762 struct net_device *netdev = adapter->netdev;
7763 int err = 0;
7764
7765 if (netif_running(netdev))
7766 igc_close(netdev);
7767
7768 igc_reset_interrupt_capability(adapter);
7769
7770 if (igc_init_interrupt_scheme(adapter, true)) {
7771 netdev_err(netdev, "Unable to allocate memory for queues\n");
7772 return -ENOMEM;
7773 }
7774
7775 if (netif_running(netdev))
7776 err = igc_open(netdev);
7777
7778 if (!err) {
7779 /* Restore default IEEE 802.1Qbv schedule after queue reinit */
7780 igc_tsn_clear_schedule(adapter);
7781 }
7782
7783 return err;
7784 }
7785
7786 /**
7787 * igc_get_hw_dev - return device
7788 * @hw: pointer to hardware structure
7789 *
7790 * used by hardware layer to print debugging information
7791 */
igc_get_hw_dev(struct igc_hw * hw)7792 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
7793 {
7794 struct igc_adapter *adapter = hw->back;
7795
7796 return adapter->netdev;
7797 }
7798
igc_disable_rx_ring_hw(struct igc_ring * ring)7799 static void igc_disable_rx_ring_hw(struct igc_ring *ring)
7800 {
7801 struct igc_hw *hw = &ring->q_vector->adapter->hw;
7802 u8 idx = ring->reg_idx;
7803 u32 rxdctl;
7804
7805 rxdctl = rd32(IGC_RXDCTL(idx));
7806 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
7807 rxdctl |= IGC_RXDCTL_SWFLUSH;
7808 wr32(IGC_RXDCTL(idx), rxdctl);
7809 }
7810
igc_disable_rx_ring(struct igc_ring * ring)7811 void igc_disable_rx_ring(struct igc_ring *ring)
7812 {
7813 igc_disable_rx_ring_hw(ring);
7814 igc_clean_rx_ring(ring);
7815 }
7816
igc_enable_rx_ring(struct igc_ring * ring)7817 void igc_enable_rx_ring(struct igc_ring *ring)
7818 {
7819 struct igc_adapter *adapter = ring->q_vector->adapter;
7820
7821 igc_configure_rx_ring(adapter, ring);
7822
7823 if (ring->xsk_pool)
7824 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
7825 else
7826 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
7827 }
7828
igc_disable_tx_ring(struct igc_ring * ring)7829 void igc_disable_tx_ring(struct igc_ring *ring)
7830 {
7831 igc_disable_tx_ring_hw(ring);
7832 igc_clean_tx_ring(ring);
7833 }
7834
igc_enable_tx_ring(struct igc_ring * ring)7835 void igc_enable_tx_ring(struct igc_ring *ring)
7836 {
7837 struct igc_adapter *adapter = ring->q_vector->adapter;
7838
7839 igc_configure_tx_ring(adapter, ring);
7840 }
7841
7842 /**
7843 * igc_init_module - Driver Registration Routine
7844 *
7845 * igc_init_module is the first routine called when the driver is
7846 * loaded. All it does is register with the PCI subsystem.
7847 */
igc_init_module(void)7848 static int __init igc_init_module(void)
7849 {
7850 int ret;
7851
7852 pr_info("%s\n", igc_driver_string);
7853 pr_info("%s\n", igc_copyright);
7854
7855 ret = pci_register_driver(&igc_driver);
7856 return ret;
7857 }
7858
7859 module_init(igc_init_module);
7860
7861 /**
7862 * igc_exit_module - Driver Exit Cleanup Routine
7863 *
7864 * igc_exit_module is called just before the driver is removed
7865 * from memory.
7866 */
igc_exit_module(void)7867 static void __exit igc_exit_module(void)
7868 {
7869 pci_unregister_driver(&igc_driver);
7870 }
7871
7872 module_exit(igc_exit_module);
7873 /* igc_main.c */
7874