Lines Matching +full:no +full:- +full:hw +full:- +full:checksum

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2009 - 2018 Intel Corporation. */
23 #include <net/checksum.h>
31 "Copyright (c) 2009 - 2012 Intel Corporation.";
34 static int debug = -1;
63 * igbvf_desc_unused - calculate if we have unused descriptors
68 if (ring->next_to_clean > ring->next_to_use) in igbvf_desc_unused()
69 return ring->next_to_clean - ring->next_to_use - 1; in igbvf_desc_unused()
71 return ring->count + ring->next_to_clean - ring->next_to_use - 1; in igbvf_desc_unused()
75 * igbvf_receive_skb - helper function to handle Rx indications
80 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
91 if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) && in igbvf_receive_skb()
96 if (test_bit(vid, adapter->active_vlans)) in igbvf_receive_skb()
100 napi_gro_receive(&adapter->rx_ring->napi, skb); in igbvf_receive_skb()
108 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ in igbvf_rx_checksum_adv()
110 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) in igbvf_rx_checksum_adv()
113 /* TCP/UDP checksum error bit is set */ in igbvf_rx_checksum_adv()
116 /* let the stack verify checksum errors */ in igbvf_rx_checksum_adv()
117 adapter->hw_csum_err++; in igbvf_rx_checksum_adv()
121 /* It must be a TCP or UDP packet with a valid checksum */ in igbvf_rx_checksum_adv()
123 skb->ip_summed = CHECKSUM_UNNECESSARY; in igbvf_rx_checksum_adv()
125 adapter->hw_csum_good++; in igbvf_rx_checksum_adv()
129 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
136 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_alloc_rx_buffers()
137 struct net_device *netdev = adapter->netdev; in igbvf_alloc_rx_buffers()
138 struct pci_dev *pdev = adapter->pdev; in igbvf_alloc_rx_buffers()
145 i = rx_ring->next_to_use; in igbvf_alloc_rx_buffers()
146 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers()
148 if (adapter->rx_ps_hdr_size) in igbvf_alloc_rx_buffers()
149 bufsz = adapter->rx_ps_hdr_size; in igbvf_alloc_rx_buffers()
151 bufsz = adapter->rx_buffer_len; in igbvf_alloc_rx_buffers()
153 while (cleaned_count--) { in igbvf_alloc_rx_buffers()
156 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { in igbvf_alloc_rx_buffers()
157 if (!buffer_info->page) { in igbvf_alloc_rx_buffers()
158 buffer_info->page = alloc_page(GFP_ATOMIC); in igbvf_alloc_rx_buffers()
159 if (!buffer_info->page) { in igbvf_alloc_rx_buffers()
160 adapter->alloc_rx_buff_failed++; in igbvf_alloc_rx_buffers()
163 buffer_info->page_offset = 0; in igbvf_alloc_rx_buffers()
165 buffer_info->page_offset ^= PAGE_SIZE / 2; in igbvf_alloc_rx_buffers()
167 buffer_info->page_dma = in igbvf_alloc_rx_buffers()
168 dma_map_page(&pdev->dev, buffer_info->page, in igbvf_alloc_rx_buffers()
169 buffer_info->page_offset, in igbvf_alloc_rx_buffers()
172 if (dma_mapping_error(&pdev->dev, in igbvf_alloc_rx_buffers()
173 buffer_info->page_dma)) { in igbvf_alloc_rx_buffers()
174 __free_page(buffer_info->page); in igbvf_alloc_rx_buffers()
175 buffer_info->page = NULL; in igbvf_alloc_rx_buffers()
176 dev_err(&pdev->dev, "RX DMA map failed\n"); in igbvf_alloc_rx_buffers()
181 if (!buffer_info->skb) { in igbvf_alloc_rx_buffers()
184 adapter->alloc_rx_buff_failed++; in igbvf_alloc_rx_buffers()
188 buffer_info->skb = skb; in igbvf_alloc_rx_buffers()
189 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in igbvf_alloc_rx_buffers()
192 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in igbvf_alloc_rx_buffers()
193 dev_kfree_skb(buffer_info->skb); in igbvf_alloc_rx_buffers()
194 buffer_info->skb = NULL; in igbvf_alloc_rx_buffers()
195 dev_err(&pdev->dev, "RX DMA map failed\n"); in igbvf_alloc_rx_buffers()
200 * each write-back erases this info. in igbvf_alloc_rx_buffers()
202 if (adapter->rx_ps_hdr_size) { in igbvf_alloc_rx_buffers()
203 rx_desc->read.pkt_addr = in igbvf_alloc_rx_buffers()
204 cpu_to_le64(buffer_info->page_dma); in igbvf_alloc_rx_buffers()
205 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); in igbvf_alloc_rx_buffers()
207 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); in igbvf_alloc_rx_buffers()
208 rx_desc->read.hdr_addr = 0; in igbvf_alloc_rx_buffers()
212 if (i == rx_ring->count) in igbvf_alloc_rx_buffers()
214 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers()
218 if (rx_ring->next_to_use != i) { in igbvf_alloc_rx_buffers()
219 rx_ring->next_to_use = i; in igbvf_alloc_rx_buffers()
221 i = (rx_ring->count - 1); in igbvf_alloc_rx_buffers()
223 i--; in igbvf_alloc_rx_buffers()
227 * applicable for weak-ordered memory model archs, in igbvf_alloc_rx_buffers()
228 * such as IA-64). in igbvf_alloc_rx_buffers()
231 writel(i, adapter->hw.hw_addr + rx_ring->tail); in igbvf_alloc_rx_buffers()
236 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
242 * is no guarantee that everything was cleaned
247 struct igbvf_ring *rx_ring = adapter->rx_ring; in igbvf_clean_rx_irq()
248 struct net_device *netdev = adapter->netdev; in igbvf_clean_rx_irq()
249 struct pci_dev *pdev = adapter->pdev; in igbvf_clean_rx_irq()
259 i = rx_ring->next_to_clean; in igbvf_clean_rx_irq()
261 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in igbvf_clean_rx_irq()
269 buffer_info = &rx_ring->buffer_info[i]; in igbvf_clean_rx_irq()
271 /* HW will not DMA in data larger than the given buffer, even in igbvf_clean_rx_irq()
276 hlen = le16_get_bits(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info, in igbvf_clean_rx_irq()
278 if (hlen > adapter->rx_ps_hdr_size) in igbvf_clean_rx_irq()
279 hlen = adapter->rx_ps_hdr_size; in igbvf_clean_rx_irq()
281 length = le16_to_cpu(rx_desc->wb.upper.length); in igbvf_clean_rx_irq()
285 skb = buffer_info->skb; in igbvf_clean_rx_irq()
286 prefetch(skb->data - NET_IP_ALIGN); in igbvf_clean_rx_irq()
287 buffer_info->skb = NULL; in igbvf_clean_rx_irq()
288 if (!adapter->rx_ps_hdr_size) { in igbvf_clean_rx_irq()
289 dma_unmap_single(&pdev->dev, buffer_info->dma, in igbvf_clean_rx_irq()
290 adapter->rx_buffer_len, in igbvf_clean_rx_irq()
292 buffer_info->dma = 0; in igbvf_clean_rx_irq()
297 if (!skb_shinfo(skb)->nr_frags) { in igbvf_clean_rx_irq()
298 dma_unmap_single(&pdev->dev, buffer_info->dma, in igbvf_clean_rx_irq()
299 adapter->rx_ps_hdr_size, in igbvf_clean_rx_irq()
301 buffer_info->dma = 0; in igbvf_clean_rx_irq()
306 dma_unmap_page(&pdev->dev, buffer_info->page_dma, in igbvf_clean_rx_irq()
309 buffer_info->page_dma = 0; in igbvf_clean_rx_irq()
311 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, in igbvf_clean_rx_irq()
312 buffer_info->page, in igbvf_clean_rx_irq()
313 buffer_info->page_offset, in igbvf_clean_rx_irq()
316 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || in igbvf_clean_rx_irq()
317 (page_count(buffer_info->page) != 1)) in igbvf_clean_rx_irq()
318 buffer_info->page = NULL; in igbvf_clean_rx_irq()
320 get_page(buffer_info->page); in igbvf_clean_rx_irq()
322 skb->len += length; in igbvf_clean_rx_irq()
323 skb->data_len += length; in igbvf_clean_rx_irq()
324 skb->truesize += PAGE_SIZE / 2; in igbvf_clean_rx_irq()
328 if (i == rx_ring->count) in igbvf_clean_rx_irq()
332 next_buffer = &rx_ring->buffer_info[i]; in igbvf_clean_rx_irq()
335 buffer_info->skb = next_buffer->skb; in igbvf_clean_rx_irq()
336 buffer_info->dma = next_buffer->dma; in igbvf_clean_rx_irq()
337 next_buffer->skb = skb; in igbvf_clean_rx_irq()
338 next_buffer->dma = 0; in igbvf_clean_rx_irq()
347 total_bytes += skb->len; in igbvf_clean_rx_irq()
352 skb->protocol = eth_type_trans(skb, netdev); in igbvf_clean_rx_irq()
355 rx_desc->wb.upper.vlan); in igbvf_clean_rx_irq()
358 rx_desc->wb.upper.status_error = 0; in igbvf_clean_rx_irq()
370 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in igbvf_clean_rx_irq()
373 rx_ring->next_to_clean = i; in igbvf_clean_rx_irq()
379 adapter->total_rx_packets += total_packets; in igbvf_clean_rx_irq()
380 adapter->total_rx_bytes += total_bytes; in igbvf_clean_rx_irq()
381 netdev->stats.rx_bytes += total_bytes; in igbvf_clean_rx_irq()
382 netdev->stats.rx_packets += total_packets; in igbvf_clean_rx_irq()
389 if (buffer_info->dma) { in igbvf_put_txbuf()
390 if (buffer_info->mapped_as_page) in igbvf_put_txbuf()
391 dma_unmap_page(&adapter->pdev->dev, in igbvf_put_txbuf()
392 buffer_info->dma, in igbvf_put_txbuf()
393 buffer_info->length, in igbvf_put_txbuf()
396 dma_unmap_single(&adapter->pdev->dev, in igbvf_put_txbuf()
397 buffer_info->dma, in igbvf_put_txbuf()
398 buffer_info->length, in igbvf_put_txbuf()
400 buffer_info->dma = 0; in igbvf_put_txbuf()
402 if (buffer_info->skb) { in igbvf_put_txbuf()
403 dev_kfree_skb_any(buffer_info->skb); in igbvf_put_txbuf()
404 buffer_info->skb = NULL; in igbvf_put_txbuf()
406 buffer_info->time_stamp = 0; in igbvf_put_txbuf()
410 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
419 struct pci_dev *pdev = adapter->pdev; in igbvf_setup_tx_resources()
422 size = sizeof(struct igbvf_buffer) * tx_ring->count; in igbvf_setup_tx_resources()
423 tx_ring->buffer_info = vzalloc(size); in igbvf_setup_tx_resources()
424 if (!tx_ring->buffer_info) in igbvf_setup_tx_resources()
428 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igbvf_setup_tx_resources()
429 tx_ring->size = ALIGN(tx_ring->size, 4096); in igbvf_setup_tx_resources()
431 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, in igbvf_setup_tx_resources()
432 &tx_ring->dma, GFP_KERNEL); in igbvf_setup_tx_resources()
433 if (!tx_ring->desc) in igbvf_setup_tx_resources()
436 tx_ring->adapter = adapter; in igbvf_setup_tx_resources()
437 tx_ring->next_to_use = 0; in igbvf_setup_tx_resources()
438 tx_ring->next_to_clean = 0; in igbvf_setup_tx_resources()
442 vfree(tx_ring->buffer_info); in igbvf_setup_tx_resources()
443 dev_err(&adapter->pdev->dev, in igbvf_setup_tx_resources()
445 return -ENOMEM; in igbvf_setup_tx_resources()
449 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
458 struct pci_dev *pdev = adapter->pdev; in igbvf_setup_rx_resources()
461 size = sizeof(struct igbvf_buffer) * rx_ring->count; in igbvf_setup_rx_resources()
462 rx_ring->buffer_info = vzalloc(size); in igbvf_setup_rx_resources()
463 if (!rx_ring->buffer_info) in igbvf_setup_rx_resources()
469 rx_ring->size = rx_ring->count * desc_len; in igbvf_setup_rx_resources()
470 rx_ring->size = ALIGN(rx_ring->size, 4096); in igbvf_setup_rx_resources()
472 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, in igbvf_setup_rx_resources()
473 &rx_ring->dma, GFP_KERNEL); in igbvf_setup_rx_resources()
474 if (!rx_ring->desc) in igbvf_setup_rx_resources()
477 rx_ring->next_to_clean = 0; in igbvf_setup_rx_resources()
478 rx_ring->next_to_use = 0; in igbvf_setup_rx_resources()
480 rx_ring->adapter = adapter; in igbvf_setup_rx_resources()
485 vfree(rx_ring->buffer_info); in igbvf_setup_rx_resources()
486 rx_ring->buffer_info = NULL; in igbvf_setup_rx_resources()
487 dev_err(&adapter->pdev->dev, in igbvf_setup_rx_resources()
489 return -ENOMEM; in igbvf_setup_rx_resources()
493 * igbvf_clean_tx_ring - Free Tx Buffers
498 struct igbvf_adapter *adapter = tx_ring->adapter; in igbvf_clean_tx_ring()
503 if (!tx_ring->buffer_info) in igbvf_clean_tx_ring()
507 for (i = 0; i < tx_ring->count; i++) { in igbvf_clean_tx_ring()
508 buffer_info = &tx_ring->buffer_info[i]; in igbvf_clean_tx_ring()
512 size = sizeof(struct igbvf_buffer) * tx_ring->count; in igbvf_clean_tx_ring()
513 memset(tx_ring->buffer_info, 0, size); in igbvf_clean_tx_ring()
516 memset(tx_ring->desc, 0, tx_ring->size); in igbvf_clean_tx_ring()
518 tx_ring->next_to_use = 0; in igbvf_clean_tx_ring()
519 tx_ring->next_to_clean = 0; in igbvf_clean_tx_ring()
521 writel(0, adapter->hw.hw_addr + tx_ring->head); in igbvf_clean_tx_ring()
522 writel(0, adapter->hw.hw_addr + tx_ring->tail); in igbvf_clean_tx_ring()
526 * igbvf_free_tx_resources - Free Tx Resources per Queue
533 struct pci_dev *pdev = tx_ring->adapter->pdev; in igbvf_free_tx_resources()
537 vfree(tx_ring->buffer_info); in igbvf_free_tx_resources()
538 tx_ring->buffer_info = NULL; in igbvf_free_tx_resources()
540 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, in igbvf_free_tx_resources()
541 tx_ring->dma); in igbvf_free_tx_resources()
543 tx_ring->desc = NULL; in igbvf_free_tx_resources()
547 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
552 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_clean_rx_ring()
554 struct pci_dev *pdev = adapter->pdev; in igbvf_clean_rx_ring()
558 if (!rx_ring->buffer_info) in igbvf_clean_rx_ring()
562 for (i = 0; i < rx_ring->count; i++) { in igbvf_clean_rx_ring()
563 buffer_info = &rx_ring->buffer_info[i]; in igbvf_clean_rx_ring()
564 if (buffer_info->dma) { in igbvf_clean_rx_ring()
565 if (adapter->rx_ps_hdr_size) { in igbvf_clean_rx_ring()
566 dma_unmap_single(&pdev->dev, buffer_info->dma, in igbvf_clean_rx_ring()
567 adapter->rx_ps_hdr_size, in igbvf_clean_rx_ring()
570 dma_unmap_single(&pdev->dev, buffer_info->dma, in igbvf_clean_rx_ring()
571 adapter->rx_buffer_len, in igbvf_clean_rx_ring()
574 buffer_info->dma = 0; in igbvf_clean_rx_ring()
577 if (buffer_info->skb) { in igbvf_clean_rx_ring()
578 dev_kfree_skb(buffer_info->skb); in igbvf_clean_rx_ring()
579 buffer_info->skb = NULL; in igbvf_clean_rx_ring()
582 if (buffer_info->page) { in igbvf_clean_rx_ring()
583 if (buffer_info->page_dma) in igbvf_clean_rx_ring()
584 dma_unmap_page(&pdev->dev, in igbvf_clean_rx_ring()
585 buffer_info->page_dma, in igbvf_clean_rx_ring()
588 put_page(buffer_info->page); in igbvf_clean_rx_ring()
589 buffer_info->page = NULL; in igbvf_clean_rx_ring()
590 buffer_info->page_dma = 0; in igbvf_clean_rx_ring()
591 buffer_info->page_offset = 0; in igbvf_clean_rx_ring()
595 size = sizeof(struct igbvf_buffer) * rx_ring->count; in igbvf_clean_rx_ring()
596 memset(rx_ring->buffer_info, 0, size); in igbvf_clean_rx_ring()
599 memset(rx_ring->desc, 0, rx_ring->size); in igbvf_clean_rx_ring()
601 rx_ring->next_to_clean = 0; in igbvf_clean_rx_ring()
602 rx_ring->next_to_use = 0; in igbvf_clean_rx_ring()
604 writel(0, adapter->hw.hw_addr + rx_ring->head); in igbvf_clean_rx_ring()
605 writel(0, adapter->hw.hw_addr + rx_ring->tail); in igbvf_clean_rx_ring()
609 * igbvf_free_rx_resources - Free Rx Resources
617 struct pci_dev *pdev = rx_ring->adapter->pdev; in igbvf_free_rx_resources()
621 vfree(rx_ring->buffer_info); in igbvf_free_rx_resources()
622 rx_ring->buffer_info = NULL; in igbvf_free_rx_resources()
624 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, in igbvf_free_rx_resources()
625 rx_ring->dma); in igbvf_free_rx_resources()
626 rx_ring->desc = NULL; in igbvf_free_rx_resources()
630 * igbvf_update_itr - update the dynamic ITR value based on statistics
632 * @itr_setting: current adapter->itr
717 adapter->tx_ring->itr_range = in igbvf_set_itr()
719 adapter->tx_ring->itr_val, in igbvf_set_itr()
720 adapter->total_tx_packets, in igbvf_set_itr()
721 adapter->total_tx_bytes); in igbvf_set_itr()
724 if (adapter->requested_itr == 3 && in igbvf_set_itr()
725 adapter->tx_ring->itr_range == lowest_latency) in igbvf_set_itr()
726 adapter->tx_ring->itr_range = low_latency; in igbvf_set_itr()
728 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); in igbvf_set_itr()
730 if (new_itr != adapter->tx_ring->itr_val) { in igbvf_set_itr()
731 u32 current_itr = adapter->tx_ring->itr_val; in igbvf_set_itr()
739 adapter->tx_ring->itr_val = new_itr; in igbvf_set_itr()
741 adapter->tx_ring->set_itr = 1; in igbvf_set_itr()
744 adapter->rx_ring->itr_range = in igbvf_set_itr()
745 igbvf_update_itr(adapter, adapter->rx_ring->itr_val, in igbvf_set_itr()
746 adapter->total_rx_packets, in igbvf_set_itr()
747 adapter->total_rx_bytes); in igbvf_set_itr()
748 if (adapter->requested_itr == 3 && in igbvf_set_itr()
749 adapter->rx_ring->itr_range == lowest_latency) in igbvf_set_itr()
750 adapter->rx_ring->itr_range = low_latency; in igbvf_set_itr()
752 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); in igbvf_set_itr()
754 if (new_itr != adapter->rx_ring->itr_val) { in igbvf_set_itr()
755 u32 current_itr = adapter->rx_ring->itr_val; in igbvf_set_itr()
760 adapter->rx_ring->itr_val = new_itr; in igbvf_set_itr()
762 adapter->rx_ring->set_itr = 1; in igbvf_set_itr()
767 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
774 struct igbvf_adapter *adapter = tx_ring->adapter; in igbvf_clean_tx_irq()
775 struct net_device *netdev = adapter->netdev; in igbvf_clean_tx_irq()
783 i = tx_ring->next_to_clean; in igbvf_clean_tx_irq()
784 buffer_info = &tx_ring->buffer_info[i]; in igbvf_clean_tx_irq()
785 eop_desc = buffer_info->next_to_watch; in igbvf_clean_tx_irq()
788 /* if next_to_watch is not set then there is no work pending */ in igbvf_clean_tx_irq()
796 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) in igbvf_clean_tx_irq()
800 buffer_info->next_to_watch = NULL; in igbvf_clean_tx_irq()
805 skb = buffer_info->skb; in igbvf_clean_tx_irq()
811 segs = skb_shinfo(skb)->gso_segs ?: 1; in igbvf_clean_tx_irq()
813 bytecount = ((segs - 1) * skb_headlen(skb)) + in igbvf_clean_tx_irq()
814 skb->len; in igbvf_clean_tx_irq()
820 tx_desc->wb.status = 0; in igbvf_clean_tx_irq()
823 if (i == tx_ring->count) in igbvf_clean_tx_irq()
826 buffer_info = &tx_ring->buffer_info[i]; in igbvf_clean_tx_irq()
829 eop_desc = buffer_info->next_to_watch; in igbvf_clean_tx_irq()
830 } while (count < tx_ring->count); in igbvf_clean_tx_irq()
832 tx_ring->next_to_clean = i; in igbvf_clean_tx_irq()
841 !(test_bit(__IGBVF_DOWN, &adapter->state))) { in igbvf_clean_tx_irq()
843 ++adapter->restart_queue; in igbvf_clean_tx_irq()
847 netdev->stats.tx_bytes += total_bytes; in igbvf_clean_tx_irq()
848 netdev->stats.tx_packets += total_packets; in igbvf_clean_tx_irq()
849 return count < tx_ring->count; in igbvf_clean_tx_irq()
856 struct e1000_hw *hw = &adapter->hw; in igbvf_msix_other() local
858 hw->mac.get_link_status = 1; in igbvf_msix_other()
859 if (!test_bit(__IGBVF_DOWN, &adapter->state)) in igbvf_msix_other()
860 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igbvf_msix_other()
862 ew32(EIMS, adapter->eims_other); in igbvf_msix_other()
871 struct e1000_hw *hw = &adapter->hw; in igbvf_intr_msix_tx() local
872 struct igbvf_ring *tx_ring = adapter->tx_ring; in igbvf_intr_msix_tx()
874 if (tx_ring->set_itr) { in igbvf_intr_msix_tx()
875 writel(tx_ring->itr_val, in igbvf_intr_msix_tx()
876 adapter->hw.hw_addr + tx_ring->itr_register); in igbvf_intr_msix_tx()
877 adapter->tx_ring->set_itr = 0; in igbvf_intr_msix_tx()
880 adapter->total_tx_bytes = 0; in igbvf_intr_msix_tx()
881 adapter->total_tx_packets = 0; in igbvf_intr_msix_tx()
883 /* auto mask will automatically re-enable the interrupt when we write in igbvf_intr_msix_tx()
888 ew32(EICS, tx_ring->eims_value); in igbvf_intr_msix_tx()
890 ew32(EIMS, tx_ring->eims_value); in igbvf_intr_msix_tx()
903 if (adapter->rx_ring->set_itr) { in igbvf_intr_msix_rx()
904 writel(adapter->rx_ring->itr_val, in igbvf_intr_msix_rx()
905 adapter->hw.hw_addr + adapter->rx_ring->itr_register); in igbvf_intr_msix_rx()
906 adapter->rx_ring->set_itr = 0; in igbvf_intr_msix_rx()
909 if (napi_schedule_prep(&adapter->rx_ring->napi)) { in igbvf_intr_msix_rx()
910 adapter->total_rx_bytes = 0; in igbvf_intr_msix_rx()
911 adapter->total_rx_packets = 0; in igbvf_intr_msix_rx()
912 __napi_schedule(&adapter->rx_ring->napi); in igbvf_intr_msix_rx()
918 #define IGBVF_NO_QUEUE -1
923 struct e1000_hw *hw = &adapter->hw; in igbvf_assign_vector() local
926 /* 82576 uses a table-based method for assigning vectors. in igbvf_assign_vector()
943 adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector); in igbvf_assign_vector()
958 adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector); in igbvf_assign_vector()
964 * igbvf_configure_msix - Configure MSI-X hardware
968 * generate MSI-X interrupts.
973 struct e1000_hw *hw = &adapter->hw; in igbvf_configure_msix() local
974 struct igbvf_ring *tx_ring = adapter->tx_ring; in igbvf_configure_msix()
975 struct igbvf_ring *rx_ring = adapter->rx_ring; in igbvf_configure_msix()
978 adapter->eims_enable_mask = 0; in igbvf_configure_msix()
981 adapter->eims_enable_mask |= tx_ring->eims_value; in igbvf_configure_msix()
982 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); in igbvf_configure_msix()
984 adapter->eims_enable_mask |= rx_ring->eims_value; in igbvf_configure_msix()
985 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); in igbvf_configure_msix()
993 adapter->eims_enable_mask = GENMASK(vector - 1, 0); in igbvf_configure_msix()
994 adapter->eims_other = BIT(vector - 1); in igbvf_configure_msix()
1000 if (adapter->msix_entries) { in igbvf_reset_interrupt_capability()
1001 pci_disable_msix(adapter->pdev); in igbvf_reset_interrupt_capability()
1002 kfree(adapter->msix_entries); in igbvf_reset_interrupt_capability()
1003 adapter->msix_entries = NULL; in igbvf_reset_interrupt_capability()
1008 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1016 int err = -ENOMEM; in igbvf_set_interrupt_capability()
1020 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), in igbvf_set_interrupt_capability()
1022 if (adapter->msix_entries) { in igbvf_set_interrupt_capability()
1024 adapter->msix_entries[i].entry = i; in igbvf_set_interrupt_capability()
1026 err = pci_enable_msix_range(adapter->pdev, in igbvf_set_interrupt_capability()
1027 adapter->msix_entries, 3, 3); in igbvf_set_interrupt_capability()
1031 /* MSI-X failed */ in igbvf_set_interrupt_capability()
1032 dev_err(&adapter->pdev->dev, in igbvf_set_interrupt_capability()
1033 "Failed to initialize MSI-X interrupts.\n"); in igbvf_set_interrupt_capability()
1039 * igbvf_request_msix - Initialize MSI-X interrupts
1042 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1047 struct net_device *netdev = adapter->netdev; in igbvf_request_msix()
1050 if (strlen(netdev->name) < (IFNAMSIZ - 5)) { in igbvf_request_msix()
1051 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); in igbvf_request_msix()
1052 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); in igbvf_request_msix()
1054 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); in igbvf_request_msix()
1055 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); in igbvf_request_msix()
1058 err = request_irq(adapter->msix_entries[vector].vector, in igbvf_request_msix()
1059 igbvf_intr_msix_tx, 0, adapter->tx_ring->name, in igbvf_request_msix()
1064 adapter->tx_ring->itr_register = E1000_EITR(vector); in igbvf_request_msix()
1065 adapter->tx_ring->itr_val = adapter->current_itr; in igbvf_request_msix()
1068 err = request_irq(adapter->msix_entries[vector].vector, in igbvf_request_msix()
1069 igbvf_intr_msix_rx, 0, adapter->rx_ring->name, in igbvf_request_msix()
1074 adapter->rx_ring->itr_register = E1000_EITR(vector); in igbvf_request_msix()
1075 adapter->rx_ring->itr_val = adapter->current_itr; in igbvf_request_msix()
1078 err = request_irq(adapter->msix_entries[vector].vector, in igbvf_request_msix()
1079 igbvf_msix_other, 0, netdev->name, netdev); in igbvf_request_msix()
1086 free_irq(adapter->msix_entries[--vector].vector, netdev); in igbvf_request_msix()
1088 free_irq(adapter->msix_entries[--vector].vector, netdev); in igbvf_request_msix()
1094 * igbvf_alloc_queues - Allocate memory for all rings
1099 struct net_device *netdev = adapter->netdev; in igbvf_alloc_queues()
1101 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); in igbvf_alloc_queues()
1102 if (!adapter->tx_ring) in igbvf_alloc_queues()
1103 return -ENOMEM; in igbvf_alloc_queues()
1105 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); in igbvf_alloc_queues()
1106 if (!adapter->rx_ring) { in igbvf_alloc_queues()
1107 kfree(adapter->tx_ring); in igbvf_alloc_queues()
1108 return -ENOMEM; in igbvf_alloc_queues()
1111 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll); in igbvf_alloc_queues()
1117 * igbvf_request_irq - initialize interrupts
1125 int err = -1; in igbvf_request_irq()
1127 /* igbvf supports msi-x only */ in igbvf_request_irq()
1128 if (adapter->msix_entries) in igbvf_request_irq()
1134 dev_err(&adapter->pdev->dev, in igbvf_request_irq()
1142 struct net_device *netdev = adapter->netdev; in igbvf_free_irq()
1145 if (adapter->msix_entries) { in igbvf_free_irq()
1147 free_irq(adapter->msix_entries[vector].vector, netdev); in igbvf_free_irq()
1152 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1157 struct e1000_hw *hw = &adapter->hw; in igbvf_irq_disable() local
1161 if (adapter->msix_entries) in igbvf_irq_disable()
1166 * igbvf_irq_enable - Enable default interrupt generation settings
1171 struct e1000_hw *hw = &adapter->hw; in igbvf_irq_enable() local
1173 ew32(EIAC, adapter->eims_enable_mask); in igbvf_irq_enable()
1174 ew32(EIAM, adapter->eims_enable_mask); in igbvf_irq_enable()
1175 ew32(EIMS, adapter->eims_enable_mask); in igbvf_irq_enable()
1179 * igbvf_poll - NAPI Rx polling callback
1186 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_poll()
1187 struct e1000_hw *hw = &adapter->hw; in igbvf_poll() local
1195 /* Exit the polling mode, but don't re-enable interrupts if stack might in igbvf_poll()
1196 * poll us due to busy-polling in igbvf_poll()
1199 if (adapter->requested_itr & 3) in igbvf_poll()
1202 if (!test_bit(__IGBVF_DOWN, &adapter->state)) in igbvf_poll()
1203 ew32(EIMS, adapter->rx_ring->eims_value); in igbvf_poll()
1210 * igbvf_set_rlpml - set receive large packet maximum length
1218 struct e1000_hw *hw = &adapter->hw; in igbvf_set_rlpml() local
1220 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; in igbvf_set_rlpml()
1222 spin_lock_bh(&hw->mbx_lock); in igbvf_set_rlpml()
1224 e1000_rlpml_set_vf(hw, max_frame_size); in igbvf_set_rlpml()
1226 spin_unlock_bh(&hw->mbx_lock); in igbvf_set_rlpml()
1233 struct e1000_hw *hw = &adapter->hw; in igbvf_vlan_rx_add_vid() local
1235 spin_lock_bh(&hw->mbx_lock); in igbvf_vlan_rx_add_vid()
1237 if (hw->mac.ops.set_vfta(hw, vid, true)) { in igbvf_vlan_rx_add_vid()
1238 dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid); in igbvf_vlan_rx_add_vid()
1239 spin_unlock_bh(&hw->mbx_lock); in igbvf_vlan_rx_add_vid()
1240 return -EINVAL; in igbvf_vlan_rx_add_vid()
1243 spin_unlock_bh(&hw->mbx_lock); in igbvf_vlan_rx_add_vid()
1245 set_bit(vid, adapter->active_vlans); in igbvf_vlan_rx_add_vid()
1253 struct e1000_hw *hw = &adapter->hw; in igbvf_vlan_rx_kill_vid() local
1255 spin_lock_bh(&hw->mbx_lock); in igbvf_vlan_rx_kill_vid()
1257 if (hw->mac.ops.set_vfta(hw, vid, false)) { in igbvf_vlan_rx_kill_vid()
1258 dev_err(&adapter->pdev->dev, in igbvf_vlan_rx_kill_vid()
1260 spin_unlock_bh(&hw->mbx_lock); in igbvf_vlan_rx_kill_vid()
1261 return -EINVAL; in igbvf_vlan_rx_kill_vid()
1264 spin_unlock_bh(&hw->mbx_lock); in igbvf_vlan_rx_kill_vid()
1266 clear_bit(vid, adapter->active_vlans); in igbvf_vlan_rx_kill_vid()
1274 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in igbvf_restore_vlan()
1275 igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in igbvf_restore_vlan()
1279 * igbvf_configure_tx - Configure Transmit Unit after Reset
1286 struct e1000_hw *hw = &adapter->hw; in igbvf_configure_tx() local
1287 struct igbvf_ring *tx_ring = adapter->tx_ring; in igbvf_configure_tx()
1297 /* Setup the HW Tx Head and Tail descriptor pointers */ in igbvf_configure_tx()
1298 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); in igbvf_configure_tx()
1299 tdba = tx_ring->dma; in igbvf_configure_tx()
1304 tx_ring->head = E1000_TDH(0); in igbvf_configure_tx()
1305 tx_ring->tail = E1000_TDT(0); in igbvf_configure_tx()
1307 /* Turn off Relaxed Ordering on head write-backs. The writebacks in igbvf_configure_tx()
1320 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; in igbvf_configure_tx()
1323 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; in igbvf_configure_tx()
1327 * igbvf_setup_srrctl - configure the receive control registers
1332 struct e1000_hw *hw = &adapter->hw; in igbvf_setup_srrctl() local
1343 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> in igbvf_setup_srrctl()
1346 if (adapter->rx_buffer_len < 2048) { in igbvf_setup_srrctl()
1347 adapter->rx_ps_hdr_size = 0; in igbvf_setup_srrctl()
1350 adapter->rx_ps_hdr_size = 128; in igbvf_setup_srrctl()
1351 srrctl |= adapter->rx_ps_hdr_size << in igbvf_setup_srrctl()
1360 * igbvf_configure_rx - Configure Receive Unit after Reset
1367 struct e1000_hw *hw = &adapter->hw; in igbvf_configure_rx() local
1368 struct igbvf_ring *rx_ring = adapter->rx_ring; in igbvf_configure_rx()
1378 /* Setup the HW Rx Head and Tail Descriptor Pointers and in igbvf_configure_rx()
1381 rdba = rx_ring->dma; in igbvf_configure_rx()
1384 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); in igbvf_configure_rx()
1385 rx_ring->head = E1000_RDH(0); in igbvf_configure_rx()
1386 rx_ring->tail = E1000_RDT(0); in igbvf_configure_rx()
1403 * igbvf_set_multi - Multicast and Promiscuous mode set
1409 * promiscuous mode, and all-multi behavior.
1414 struct e1000_hw *hw = &adapter->hw; in igbvf_set_multi() local
1429 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in igbvf_set_multi()
1431 spin_lock_bh(&hw->mbx_lock); in igbvf_set_multi()
1433 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); in igbvf_set_multi()
1435 spin_unlock_bh(&hw->mbx_lock); in igbvf_set_multi()
1440 * igbvf_set_uni - Configure unicast MAC filters
1449 struct e1000_hw *hw = &adapter->hw; in igbvf_set_uni() local
1452 pr_err("Too many unicast filters - No Space\n"); in igbvf_set_uni()
1453 return -ENOSPC; in igbvf_set_uni()
1456 spin_lock_bh(&hw->mbx_lock); in igbvf_set_uni()
1459 hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL); in igbvf_set_uni()
1461 spin_unlock_bh(&hw->mbx_lock); in igbvf_set_uni()
1468 spin_lock_bh(&hw->mbx_lock); in igbvf_set_uni()
1470 hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD, in igbvf_set_uni()
1471 ha->addr); in igbvf_set_uni()
1473 spin_unlock_bh(&hw->mbx_lock); in igbvf_set_uni()
1488 * igbvf_configure - configure the hardware for Rx and Tx
1493 igbvf_set_rx_mode(adapter->netdev); in igbvf_configure()
1500 igbvf_alloc_rx_buffers(adapter->rx_ring, in igbvf_configure()
1501 igbvf_desc_unused(adapter->rx_ring)); in igbvf_configure()
1504 /* igbvf_reset - bring the hardware into a known good state
1508 * require a configuration cycle of the hardware - those cannot be
1514 struct e1000_mac_info *mac = &adapter->hw.mac; in igbvf_reset()
1515 struct net_device *netdev = adapter->netdev; in igbvf_reset()
1516 struct e1000_hw *hw = &adapter->hw; in igbvf_reset() local
1518 spin_lock_bh(&hw->mbx_lock); in igbvf_reset()
1521 if (mac->ops.reset_hw(hw)) in igbvf_reset()
1522 dev_info(&adapter->pdev->dev, "PF still resetting\n"); in igbvf_reset()
1524 mac->ops.init_hw(hw); in igbvf_reset()
1526 spin_unlock_bh(&hw->mbx_lock); in igbvf_reset()
1528 if (is_valid_ether_addr(adapter->hw.mac.addr)) { in igbvf_reset()
1529 eth_hw_addr_set(netdev, adapter->hw.mac.addr); in igbvf_reset()
1530 memcpy(netdev->perm_addr, adapter->hw.mac.addr, in igbvf_reset()
1531 netdev->addr_len); in igbvf_reset()
1534 adapter->last_reset = jiffies; in igbvf_reset()
1539 struct e1000_hw *hw = &adapter->hw; in igbvf_up() local
1544 clear_bit(__IGBVF_DOWN, &adapter->state); in igbvf_up()
1546 napi_enable(&adapter->rx_ring->napi); in igbvf_up()
1547 if (adapter->msix_entries) in igbvf_up()
1555 hw->mac.get_link_status = 1; in igbvf_up()
1556 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igbvf_up()
1563 struct net_device *netdev = adapter->netdev; in igbvf_down()
1564 struct e1000_hw *hw = &adapter->hw; in igbvf_down() local
1570 set_bit(__IGBVF_DOWN, &adapter->state); in igbvf_down()
1587 napi_disable(&adapter->rx_ring->napi); in igbvf_down()
1591 timer_delete_sync(&adapter->watchdog_timer); in igbvf_down()
1596 adapter->link_speed = 0; in igbvf_down()
1597 adapter->link_duplex = 0; in igbvf_down()
1600 igbvf_clean_tx_ring(adapter->tx_ring); in igbvf_down()
1601 igbvf_clean_rx_ring(adapter->rx_ring); in igbvf_down()
1607 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) in igbvf_reinit_locked()
1611 clear_bit(__IGBVF_RESETTING, &adapter->state); in igbvf_reinit_locked()
1615 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1624 struct net_device *netdev = adapter->netdev; in igbvf_sw_init()
1627 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; in igbvf_sw_init()
1628 adapter->rx_ps_hdr_size = 0; in igbvf_sw_init()
1629 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; in igbvf_sw_init()
1630 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igbvf_sw_init()
1632 adapter->requested_itr = 3; in igbvf_sw_init()
1633 adapter->current_itr = IGBVF_START_ITR; in igbvf_sw_init()
1636 adapter->ei->init_ops(&adapter->hw); in igbvf_sw_init()
1638 rc = adapter->hw.mac.ops.init_params(&adapter->hw); in igbvf_sw_init()
1642 rc = adapter->hw.mbx.ops.init_params(&adapter->hw); in igbvf_sw_init()
1649 return -ENOMEM; in igbvf_sw_init()
1654 spin_lock_init(&adapter->hw.mbx_lock); in igbvf_sw_init()
1656 set_bit(__IGBVF_DOWN, &adapter->state); in igbvf_sw_init()
1662 struct e1000_hw *hw = &adapter->hw; in igbvf_initialize_last_counter_stats() local
1664 adapter->stats.last_gprc = er32(VFGPRC); in igbvf_initialize_last_counter_stats()
1665 adapter->stats.last_gorc = er32(VFGORC); in igbvf_initialize_last_counter_stats()
1666 adapter->stats.last_gptc = er32(VFGPTC); in igbvf_initialize_last_counter_stats()
1667 adapter->stats.last_gotc = er32(VFGOTC); in igbvf_initialize_last_counter_stats()
1668 adapter->stats.last_mprc = er32(VFMPRC); in igbvf_initialize_last_counter_stats()
1669 adapter->stats.last_gotlbc = er32(VFGOTLBC); in igbvf_initialize_last_counter_stats()
1670 adapter->stats.last_gptlbc = er32(VFGPTLBC); in igbvf_initialize_last_counter_stats()
1671 adapter->stats.last_gorlbc = er32(VFGORLBC); in igbvf_initialize_last_counter_stats()
1672 adapter->stats.last_gprlbc = er32(VFGPRLBC); in igbvf_initialize_last_counter_stats()
1674 adapter->stats.base_gprc = er32(VFGPRC); in igbvf_initialize_last_counter_stats()
1675 adapter->stats.base_gorc = er32(VFGORC); in igbvf_initialize_last_counter_stats()
1676 adapter->stats.base_gptc = er32(VFGPTC); in igbvf_initialize_last_counter_stats()
1677 adapter->stats.base_gotc = er32(VFGOTC); in igbvf_initialize_last_counter_stats()
1678 adapter->stats.base_mprc = er32(VFMPRC); in igbvf_initialize_last_counter_stats()
1679 adapter->stats.base_gotlbc = er32(VFGOTLBC); in igbvf_initialize_last_counter_stats()
1680 adapter->stats.base_gptlbc = er32(VFGPTLBC); in igbvf_initialize_last_counter_stats()
1681 adapter->stats.base_gorlbc = er32(VFGORLBC); in igbvf_initialize_last_counter_stats()
1682 adapter->stats.base_gprlbc = er32(VFGPRLBC); in igbvf_initialize_last_counter_stats()
1686 * igbvf_open - Called when a network interface is made active
1700 struct e1000_hw *hw = &adapter->hw; in igbvf_open() local
1704 if (test_bit(__IGBVF_TESTING, &adapter->state)) in igbvf_open()
1705 return -EBUSY; in igbvf_open()
1708 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); in igbvf_open()
1713 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); in igbvf_open()
1729 clear_bit(__IGBVF_DOWN, &adapter->state); in igbvf_open()
1731 napi_enable(&adapter->rx_ring->napi); in igbvf_open()
1739 hw->mac.get_link_status = 1; in igbvf_open()
1740 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igbvf_open()
1745 igbvf_free_rx_resources(adapter->rx_ring); in igbvf_open()
1747 igbvf_free_tx_resources(adapter->tx_ring); in igbvf_open()
1755 * igbvf_close - Disables a network interface
1760 * The close entry point is called when an interface is de-activated
1769 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); in igbvf_close()
1774 igbvf_free_tx_resources(adapter->tx_ring); in igbvf_close()
1775 igbvf_free_rx_resources(adapter->rx_ring); in igbvf_close()
1781 * igbvf_set_mac - Change the Ethernet Address of the NIC
1790 struct e1000_hw *hw = &adapter->hw; in igbvf_set_mac() local
1793 if (!is_valid_ether_addr(addr->sa_data)) in igbvf_set_mac()
1794 return -EADDRNOTAVAIL; in igbvf_set_mac()
1796 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in igbvf_set_mac()
1798 spin_lock_bh(&hw->mbx_lock); in igbvf_set_mac()
1800 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); in igbvf_set_mac()
1802 spin_unlock_bh(&hw->mbx_lock); in igbvf_set_mac()
1804 if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) in igbvf_set_mac()
1805 return -EADDRNOTAVAIL; in igbvf_set_mac()
1807 eth_hw_addr_set(netdev, addr->sa_data); in igbvf_set_mac()
1815 if (current_counter < adapter->stats.last_##name) \
1816 adapter->stats.name += 0x100000000LL; \
1817 adapter->stats.last_##name = current_counter; \
1818 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1819 adapter->stats.name |= current_counter; \
1823 * igbvf_update_stats - Update the board statistics counters
1828 struct e1000_hw *hw = &adapter->hw; in igbvf_update_stats() local
1829 struct pci_dev *pdev = adapter->pdev; in igbvf_update_stats()
1834 if (adapter->link_speed == 0) in igbvf_update_stats()
1837 if (test_bit(__IGBVF_RESETTING, &adapter->state)) in igbvf_update_stats()
1854 adapter->netdev->stats.multicast = adapter->stats.mprc; in igbvf_update_stats()
1859 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", in igbvf_print_link_info()
1860 adapter->link_speed, in igbvf_print_link_info()
1861 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); in igbvf_print_link_info()
1866 struct e1000_hw *hw = &adapter->hw; in igbvf_has_link() local
1871 if (test_bit(__IGBVF_DOWN, &adapter->state)) in igbvf_has_link()
1874 spin_lock_bh(&hw->mbx_lock); in igbvf_has_link()
1876 ret_val = hw->mac.ops.check_for_link(hw); in igbvf_has_link()
1878 spin_unlock_bh(&hw->mbx_lock); in igbvf_has_link()
1880 link_active = !hw->mac.get_link_status; in igbvf_has_link()
1883 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) in igbvf_has_link()
1884 schedule_work(&adapter->reset_task); in igbvf_has_link()
1890 * igbvf_watchdog - Timer Call-back
1899 schedule_work(&adapter->watchdog_task); in igbvf_watchdog()
1907 struct net_device *netdev = adapter->netdev; in igbvf_watchdog_task()
1908 struct e1000_mac_info *mac = &adapter->hw.mac; in igbvf_watchdog_task()
1909 struct igbvf_ring *tx_ring = adapter->tx_ring; in igbvf_watchdog_task()
1910 struct e1000_hw *hw = &adapter->hw; in igbvf_watchdog_task() local
1918 mac->ops.get_link_up_info(&adapter->hw, in igbvf_watchdog_task()
1919 &adapter->link_speed, in igbvf_watchdog_task()
1920 &adapter->link_duplex); in igbvf_watchdog_task()
1928 adapter->link_speed = 0; in igbvf_watchdog_task()
1929 adapter->link_duplex = 0; in igbvf_watchdog_task()
1930 dev_info(&adapter->pdev->dev, "Link is Down\n"); in igbvf_watchdog_task()
1940 tx_ring->count); in igbvf_watchdog_task()
1947 adapter->tx_timeout_count++; in igbvf_watchdog_task()
1948 schedule_work(&adapter->reset_task); in igbvf_watchdog_task()
1953 ew32(EICS, adapter->rx_ring->eims_value); in igbvf_watchdog_task()
1956 if (!test_bit(__IGBVF_DOWN, &adapter->state)) in igbvf_watchdog_task()
1957 mod_timer(&adapter->watchdog_timer, in igbvf_watchdog_task()
1973 u16 i = tx_ring->next_to_use; in igbvf_tx_ctxtdesc()
1976 buffer_info = &tx_ring->buffer_info[i]; in igbvf_tx_ctxtdesc()
1979 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igbvf_tx_ctxtdesc()
1984 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in igbvf_tx_ctxtdesc()
1985 context_desc->seqnum_seed = 0; in igbvf_tx_ctxtdesc()
1986 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in igbvf_tx_ctxtdesc()
1987 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in igbvf_tx_ctxtdesc()
1989 buffer_info->time_stamp = jiffies; in igbvf_tx_ctxtdesc()
1990 buffer_info->dma = 0; in igbvf_tx_ctxtdesc()
2009 if (skb->ip_summed != CHECKSUM_PARTIAL) in igbvf_tso()
2026 if (ip.v4->version == 4) { in igbvf_tso()
2028 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in igbvf_tso()
2033 ip.v4->check = csum_fold(csum_partial(trans_start, in igbvf_tso()
2034 csum_start - trans_start, in igbvf_tso()
2038 ip.v4->tot_len = 0; in igbvf_tso()
2040 ip.v6->payload_len = 0; in igbvf_tso()
2044 l4_offset = l4.hdr - skb->data; in igbvf_tso()
2047 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in igbvf_tso()
2049 /* remove payload length from inner checksum */ in igbvf_tso()
2050 paylen = skb->len - l4_offset; in igbvf_tso()
2051 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); in igbvf_tso()
2054 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; in igbvf_tso()
2055 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; in igbvf_tso()
2058 vlan_macip_lens = l4.hdr - ip.hdr; in igbvf_tso()
2059 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; in igbvf_tso()
2073 if (skb->ip_summed != CHECKSUM_PARTIAL) { in igbvf_tx_csum()
2080 switch (skb->csum_offset) { in igbvf_tx_csum()
2086 case offsetof(struct sctphdr, checksum): in igbvf_tx_csum()
2098 vlan_macip_lens = skb_checksum_start_offset(skb) - in igbvf_tx_csum()
2113 if (igbvf_desc_unused(adapter->tx_ring) >= size) in igbvf_maybe_stop_tx()
2125 if (igbvf_desc_unused(adapter->tx_ring) < size) in igbvf_maybe_stop_tx()
2126 return -EBUSY; in igbvf_maybe_stop_tx()
2130 ++adapter->restart_queue; in igbvf_maybe_stop_tx()
2142 struct pci_dev *pdev = adapter->pdev; in igbvf_tx_map_adv()
2147 i = tx_ring->next_to_use; in igbvf_tx_map_adv()
2149 buffer_info = &tx_ring->buffer_info[i]; in igbvf_tx_map_adv()
2151 buffer_info->length = len; in igbvf_tx_map_adv()
2153 buffer_info->time_stamp = jiffies; in igbvf_tx_map_adv()
2154 buffer_info->mapped_as_page = false; in igbvf_tx_map_adv()
2155 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, in igbvf_tx_map_adv()
2157 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in igbvf_tx_map_adv()
2160 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { in igbvf_tx_map_adv()
2165 if (i == tx_ring->count) in igbvf_tx_map_adv()
2168 frag = &skb_shinfo(skb)->frags[f]; in igbvf_tx_map_adv()
2171 buffer_info = &tx_ring->buffer_info[i]; in igbvf_tx_map_adv()
2173 buffer_info->length = len; in igbvf_tx_map_adv()
2174 buffer_info->time_stamp = jiffies; in igbvf_tx_map_adv()
2175 buffer_info->mapped_as_page = true; in igbvf_tx_map_adv()
2176 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, in igbvf_tx_map_adv()
2178 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in igbvf_tx_map_adv()
2182 tx_ring->buffer_info[i].skb = skb; in igbvf_tx_map_adv()
2187 dev_err(&pdev->dev, "TX DMA map failed\n"); in igbvf_tx_map_adv()
2190 buffer_info->dma = 0; in igbvf_tx_map_adv()
2191 buffer_info->time_stamp = 0; in igbvf_tx_map_adv()
2192 buffer_info->length = 0; in igbvf_tx_map_adv()
2193 buffer_info->mapped_as_page = false; in igbvf_tx_map_adv()
2195 count--; in igbvf_tx_map_adv()
2198 while (count--) { in igbvf_tx_map_adv()
2200 i += tx_ring->count; in igbvf_tx_map_adv()
2201 i--; in igbvf_tx_map_adv()
2202 buffer_info = &tx_ring->buffer_info[i]; in igbvf_tx_map_adv()
2229 /* insert tcp checksum */ in igbvf_tx_queue_adv()
2232 /* insert ip checksum */ in igbvf_tx_queue_adv()
2240 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); in igbvf_tx_queue_adv()
2242 i = tx_ring->next_to_use; in igbvf_tx_queue_adv()
2243 while (count--) { in igbvf_tx_queue_adv()
2244 buffer_info = &tx_ring->buffer_info[i]; in igbvf_tx_queue_adv()
2246 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in igbvf_tx_queue_adv()
2247 tx_desc->read.cmd_type_len = in igbvf_tx_queue_adv()
2248 cpu_to_le32(cmd_type_len | buffer_info->length); in igbvf_tx_queue_adv()
2249 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igbvf_tx_queue_adv()
2251 if (i == tx_ring->count) in igbvf_tx_queue_adv()
2255 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); in igbvf_tx_queue_adv()
2258 * applicable for weak-ordered memory model archs, in igbvf_tx_queue_adv()
2259 * such as IA-64). in igbvf_tx_queue_adv()
2263 tx_ring->buffer_info[first].next_to_watch = tx_desc; in igbvf_tx_queue_adv()
2264 tx_ring->next_to_use = i; in igbvf_tx_queue_adv()
2265 writel(i, adapter->hw.hw_addr + tx_ring->tail); in igbvf_tx_queue_adv()
2279 if (test_bit(__IGBVF_DOWN, &adapter->state)) { in igbvf_xmit_frame_ring_adv()
2284 if (skb->len <= 0) { in igbvf_xmit_frame_ring_adv()
2291 * + 1 desc for skb->data, in igbvf_xmit_frame_ring_adv()
2295 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { in igbvf_xmit_frame_ring_adv()
2309 first = tx_ring->next_to_use; in igbvf_xmit_frame_ring_adv()
2320 (skb->ip_summed == CHECKSUM_PARTIAL)) in igbvf_xmit_frame_ring_adv()
2330 first, skb->len, hdr_len); in igbvf_xmit_frame_ring_adv()
2335 tx_ring->buffer_info[first].time_stamp = 0; in igbvf_xmit_frame_ring_adv()
2336 tx_ring->next_to_use = first; in igbvf_xmit_frame_ring_adv()
2348 if (test_bit(__IGBVF_DOWN, &adapter->state)) { in igbvf_xmit_frame()
2353 tx_ring = &adapter->tx_ring[0]; in igbvf_xmit_frame()
2359 * igbvf_tx_timeout - Respond to a Tx Hang
2368 adapter->tx_timeout_count++; in igbvf_tx_timeout()
2369 schedule_work(&adapter->reset_task); in igbvf_tx_timeout()
2382 * igbvf_change_mtu - Change the Maximum Transfer Unit
2393 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) in igbvf_change_mtu()
2396 adapter->max_frame_size = max_frame; in igbvf_change_mtu()
2403 * i.e. RXBUFFER_2048 --> size-4096 slab in igbvf_change_mtu()
2409 adapter->rx_buffer_len = 1024; in igbvf_change_mtu()
2411 adapter->rx_buffer_len = 2048; in igbvf_change_mtu()
2414 adapter->rx_buffer_len = 16384; in igbvf_change_mtu()
2416 adapter->rx_buffer_len = PAGE_SIZE / 2; in igbvf_change_mtu()
2422 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + in igbvf_change_mtu()
2426 netdev->mtu, new_mtu); in igbvf_change_mtu()
2427 WRITE_ONCE(netdev->mtu, new_mtu); in igbvf_change_mtu()
2434 clear_bit(__IGBVF_RESETTING, &adapter->state); in igbvf_change_mtu()
2443 return -EOPNOTSUPP; in igbvf_ioctl()
2455 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); in igbvf_suspend()
2490 igbvf_suspend(&pdev->dev); in igbvf_shutdown()
2494 /* Polling 'interrupt' - used by things like netconsole to send skbs
2495 * without having to re-enable interrupts. It's not called while
2502 disable_irq(adapter->pdev->irq); in igbvf_netpoll()
2504 igbvf_clean_tx_irq(adapter->tx_ring); in igbvf_netpoll()
2506 enable_irq(adapter->pdev->irq); in igbvf_netpoll()
2511 * igbvf_io_error_detected - called when PCI error is detected
2538 * igbvf_io_slot_reset - called after the pci bus has been reset.
2541 * Restart the card from scratch, as if from a cold-boot. Implementation
2542 * resembles the first-half of the igbvf_resume routine.
2550 dev_err(&pdev->dev, in igbvf_io_slot_reset()
2551 "Cannot re-enable PCI device after reset.\n"); in igbvf_io_slot_reset()
2562 * igbvf_io_resume - called when traffic can start flowing again.
2567 * second-half of the igbvf_resume routine.
2576 dev_err(&pdev->dev, in igbvf_io_resume()
2586 * igbvf_io_prepare - prepare device driver for PCI reset
2594 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) in igbvf_io_prepare()
2600 * igbvf_io_reset_done - PCI reset done, device driver reset can begin
2609 clear_bit(__IGBVF_RESETTING, &adapter->state); in igbvf_io_reset_done()
2614 struct e1000_hw *hw = &adapter->hw; in igbvf_print_device_info() local
2615 struct net_device *netdev = adapter->netdev; in igbvf_print_device_info()
2616 struct pci_dev *pdev = adapter->pdev; in igbvf_print_device_info()
2618 if (hw->mac.type == e1000_vfadapt_i350) in igbvf_print_device_info()
2619 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n"); in igbvf_print_device_info()
2621 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); in igbvf_print_device_info()
2622 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); in igbvf_print_device_info()
2631 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; in igbvf_set_features()
2633 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; in igbvf_set_features()
2656 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in igbvf_features_check()
2666 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in igbvf_features_check()
2691 * igbvf_probe - Device Initialization Routine
2705 struct e1000_hw *hw; in igbvf_probe() local
2706 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; in igbvf_probe()
2713 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in igbvf_probe()
2715 dev_err(&pdev->dev, in igbvf_probe()
2716 "No usable DMA configuration, aborting\n"); in igbvf_probe()
2726 err = -ENOMEM; in igbvf_probe()
2731 SET_NETDEV_DEV(netdev, &pdev->dev); in igbvf_probe()
2735 hw = &adapter->hw; in igbvf_probe()
2736 adapter->netdev = netdev; in igbvf_probe()
2737 adapter->pdev = pdev; in igbvf_probe()
2738 adapter->ei = ei; in igbvf_probe()
2739 adapter->pba = ei->pba; in igbvf_probe()
2740 adapter->flags = ei->flags; in igbvf_probe()
2741 adapter->hw.back = adapter; in igbvf_probe()
2742 adapter->hw.mac.type = ei->mac; in igbvf_probe()
2743 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igbvf_probe()
2747 hw->vendor_id = pdev->vendor; in igbvf_probe()
2748 hw->device_id = pdev->device; in igbvf_probe()
2749 hw->subsystem_vendor_id = pdev->subsystem_vendor; in igbvf_probe()
2750 hw->subsystem_device_id = pdev->subsystem_device; in igbvf_probe()
2751 hw->revision_id = pdev->revision; in igbvf_probe()
2753 err = -EIO; in igbvf_probe()
2754 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), in igbvf_probe()
2757 if (!adapter->hw.hw_addr) in igbvf_probe()
2760 if (ei->get_variants) { in igbvf_probe()
2761 err = ei->get_variants(adapter); in igbvf_probe()
2772 netdev->netdev_ops = &igbvf_netdev_ops; in igbvf_probe()
2775 netdev->watchdog_timeo = 5 * HZ; in igbvf_probe()
2776 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in igbvf_probe()
2778 netdev->hw_features = NETIF_F_SG | in igbvf_probe()
2792 netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES; in igbvf_probe()
2793 netdev->hw_features |= NETIF_F_GSO_PARTIAL | in igbvf_probe()
2796 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; in igbvf_probe()
2798 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in igbvf_probe()
2799 netdev->mpls_features |= NETIF_F_HW_CSUM; in igbvf_probe()
2800 netdev->hw_enc_features |= netdev->vlan_features; in igbvf_probe()
2803 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in igbvf_probe()
2807 /* MTU range: 68 - 9216 */ in igbvf_probe()
2808 netdev->min_mtu = ETH_MIN_MTU; in igbvf_probe()
2809 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; in igbvf_probe()
2811 spin_lock_bh(&hw->mbx_lock); in igbvf_probe()
2814 err = hw->mac.ops.reset_hw(hw); in igbvf_probe()
2816 dev_info(&pdev->dev, in igbvf_probe()
2819 err = hw->mac.ops.read_mac_addr(hw); in igbvf_probe()
2821 dev_info(&pdev->dev, "Error reading MAC address.\n"); in igbvf_probe()
2822 else if (is_zero_ether_addr(adapter->hw.mac.addr)) in igbvf_probe()
2823 dev_info(&pdev->dev, in igbvf_probe()
2825 eth_hw_addr_set(netdev, adapter->hw.mac.addr); in igbvf_probe()
2828 spin_unlock_bh(&hw->mbx_lock); in igbvf_probe()
2830 if (!is_valid_ether_addr(netdev->dev_addr)) { in igbvf_probe()
2831 dev_info(&pdev->dev, "Assigning random MAC address.\n"); in igbvf_probe()
2833 memcpy(adapter->hw.mac.addr, netdev->dev_addr, in igbvf_probe()
2834 netdev->addr_len); in igbvf_probe()
2837 timer_setup(&adapter->watchdog_timer, igbvf_watchdog, 0); in igbvf_probe()
2839 INIT_WORK(&adapter->reset_task, igbvf_reset_task); in igbvf_probe()
2840 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); in igbvf_probe()
2843 adapter->rx_ring->count = 1024; in igbvf_probe()
2844 adapter->tx_ring->count = 1024; in igbvf_probe()
2849 /* set hardware-specific flags */ in igbvf_probe()
2850 if (adapter->hw.mac.type == e1000_vfadapt_i350) in igbvf_probe()
2851 adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP; in igbvf_probe()
2853 strcpy(netdev->name, "eth%d"); in igbvf_probe()
2869 netif_napi_del(&adapter->rx_ring->napi); in igbvf_probe()
2870 kfree(adapter->tx_ring); in igbvf_probe()
2871 kfree(adapter->rx_ring); in igbvf_probe()
2875 iounmap(adapter->hw.hw_addr); in igbvf_probe()
2887 * igbvf_remove - Device Removal Routine
2892 * Hot-Plug event, or because the driver is going to be removed from
2899 struct e1000_hw *hw = &adapter->hw; in igbvf_remove() local
2904 set_bit(__IGBVF_DOWN, &adapter->state); in igbvf_remove()
2905 timer_delete_sync(&adapter->watchdog_timer); in igbvf_remove()
2907 cancel_work_sync(&adapter->reset_task); in igbvf_remove()
2908 cancel_work_sync(&adapter->watchdog_task); in igbvf_remove()
2917 netif_napi_del(&adapter->rx_ring->napi); in igbvf_remove()
2918 kfree(adapter->tx_ring); in igbvf_remove()
2919 kfree(adapter->rx_ring); in igbvf_remove()
2921 iounmap(hw->hw_addr); in igbvf_remove()
2922 if (hw->flash_address) in igbvf_remove()
2923 iounmap(hw->flash_address); in igbvf_remove()
2961 * igbvf_init_module - Driver Registration Routine
2980 * igbvf_exit_module - Driver Exit Cleanup Routine