Lines Matching +full:disable +full:- +full:eop
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
36 static int debug = -1;
112 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
127 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) in __ew32_prepare()
133 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in __ew32()
136 writel(val, hw->hw_addr + reg); in __ew32()
140 * e1000_regdump - register printout routine
150 switch (reginfo->ofs) { in e1000_regdump()
164 pr_info("%-15s %08x\n", in e1000_regdump()
165 reginfo->name, __er32(hw, reginfo->ofs)); in e1000_regdump()
169 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); in e1000_regdump()
170 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); in e1000_regdump()
179 for (i = 0; i < adapter->rx_ps_pages; i++) { in e1000e_dump_ps_pages()
180 ps_page = &bi->ps_pages[i]; in e1000e_dump_ps_pages()
182 if (ps_page->page) { in e1000e_dump_ps_pages()
185 16, 1, page_address(ps_page->page), in e1000e_dump_ps_pages()
192 * e1000e_dump - Print registers, Tx-ring and Rx-ring
197 struct net_device *netdev = adapter->netdev; in e1000e_dump()
198 struct e1000_hw *hw = &adapter->hw; in e1000e_dump()
200 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000e_dump()
207 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000e_dump()
224 dev_info(&adapter->pdev->dev, "Net device Info\n"); in e1000e_dump()
226 pr_info("%-15s %016lX %016lX\n", netdev->name, in e1000e_dump()
227 netdev->state, dev_trans_start(netdev)); in e1000e_dump()
231 dev_info(&adapter->pdev->dev, "Register Dump\n"); in e1000e_dump()
234 reginfo->name; reginfo++) { in e1000e_dump()
242 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); in e1000e_dump()
243 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); in e1000e_dump()
244 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; in e1000e_dump()
246 0, tx_ring->next_to_use, tx_ring->next_to_clean, in e1000e_dump()
247 (unsigned long long)buffer_info->dma, in e1000e_dump()
248 buffer_info->length, in e1000e_dump()
249 buffer_info->next_to_watch, in e1000e_dump()
250 (unsigned long long)buffer_info->time_stamp); in e1000e_dump()
256 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); in e1000e_dump()
258 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) in e1000e_dump()
261 * +--------------------------------------------------------------+ in e1000e_dump()
263 * +--------------------------------------------------------------+ in e1000e_dump()
265 * +--------------------------------------------------------------+ in e1000e_dump()
270 * +----------------------------------------------------------------+ in e1000e_dump()
272 * +----------------------------------------------------------------+ in e1000e_dump()
274 * +----------------------------------------------------------------+ in e1000e_dump()
278 * +----------------------------------------------------------------+ in e1000e_dump()
280 * +----------------------------------------------------------------+ in e1000e_dump()
282 * +----------------------------------------------------------------+ in e1000e_dump()
285 …Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
286 …Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
287 …Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
288 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in e1000e_dump()
291 buffer_info = &tx_ring->buffer_info[i]; in e1000e_dump()
293 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) in e1000e_dump()
295 else if (i == tx_ring->next_to_use) in e1000e_dump()
297 else if (i == tx_ring->next_to_clean) in e1000e_dump()
302 (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : in e1000e_dump()
303 ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), in e1000e_dump()
305 (unsigned long long)le64_to_cpu(u0->a), in e1000e_dump()
306 (unsigned long long)le64_to_cpu(u0->b), in e1000e_dump()
307 (unsigned long long)buffer_info->dma, in e1000e_dump()
308 buffer_info->length, buffer_info->next_to_watch, in e1000e_dump()
309 (unsigned long long)buffer_info->time_stamp, in e1000e_dump()
310 buffer_info->skb, next_desc); in e1000e_dump()
312 if (netif_msg_pktdata(adapter) && buffer_info->skb) in e1000e_dump()
314 16, 1, buffer_info->skb->data, in e1000e_dump()
315 buffer_info->skb->len, true); in e1000e_dump()
320 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); in e1000e_dump()
323 0, rx_ring->next_to_use, rx_ring->next_to_clean); in e1000e_dump()
329 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); in e1000e_dump()
330 switch (adapter->rx_ps_pages) { in e1000e_dump()
336 * +-----------------------------------------------------+ in e1000e_dump()
338 * +-----------------------------------------------------+ in e1000e_dump()
340 * +-----------------------------------------------------+ in e1000e_dump()
342 * +-----------------------------------------------------+ in e1000e_dump()
344 * +-----------------------------------------------------+ in e1000e_dump()
346 …0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt… in e1000e_dump()
347 /* [Extended] Receive Descriptor (Write-Back) Format in e1000e_dump()
350 * +------------------------------------------------------+ in e1000e_dump()
353 * +------------------------------------------------------+ in e1000e_dump()
355 * +------------------------------------------------------+ in e1000e_dump()
358 …h] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-… in e1000e_dump()
359 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
361 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
365 le32_to_cpu(rx_desc_ps->wb.middle.status_error); in e1000e_dump()
367 if (i == rx_ring->next_to_use) in e1000e_dump()
369 else if (i == rx_ring->next_to_clean) in e1000e_dump()
376 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
378 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
379 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
380 (unsigned long long)le64_to_cpu(u1->c), in e1000e_dump()
381 (unsigned long long)le64_to_cpu(u1->d), in e1000e_dump()
382 buffer_info->skb, next_desc); in e1000e_dump()
386 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
387 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
388 (unsigned long long)le64_to_cpu(u1->c), in e1000e_dump()
389 (unsigned long long)le64_to_cpu(u1->d), in e1000e_dump()
390 (unsigned long long)buffer_info->dma, in e1000e_dump()
391 buffer_info->skb, next_desc); in e1000e_dump()
403 * +-----------------------------------------------------+ in e1000e_dump()
405 * +-----------------------------------------------------+ in e1000e_dump()
407 * +-----------------------------------------------------+ in e1000e_dump()
409 …pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read… in e1000e_dump()
410 /* Extended Receive Descriptor (Write-Back) Format in e1000e_dump()
413 * +------------------------------------------------------+ in e1000e_dump()
415 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | in e1000e_dump()
418 * +------------------------------------------------------+ in e1000e_dump()
420 * +------------------------------------------------------+ in e1000e_dump()
423 …pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"… in e1000e_dump()
425 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
428 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
431 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000e_dump()
433 if (i == rx_ring->next_to_use) in e1000e_dump()
435 else if (i == rx_ring->next_to_clean) in e1000e_dump()
442 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
444 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
445 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
446 buffer_info->skb, next_desc); in e1000e_dump()
450 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
451 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
452 (unsigned long long)buffer_info->dma, in e1000e_dump()
453 buffer_info->skb, next_desc); in e1000e_dump()
456 buffer_info->skb) in e1000e_dump()
460 buffer_info->skb->data, in e1000e_dump()
461 adapter->rx_buffer_len, in e1000e_dump()
469 * e1000_desc_unused - calculate if we have unused descriptors
474 if (ring->next_to_clean > ring->next_to_use) in e1000_desc_unused()
475 return ring->next_to_clean - ring->next_to_use - 1; in e1000_desc_unused()
477 return ring->count + ring->next_to_clean - ring->next_to_use - 1; in e1000_desc_unused()
481 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
501 spin_lock_irqsave(&adapter->systim_lock, flags); in e1000e_systim_to_hwtstamp()
502 ns = timecounter_cyc2time(&adapter->tc, systim); in e1000e_systim_to_hwtstamp()
503 spin_unlock_irqrestore(&adapter->systim_lock, flags); in e1000e_systim_to_hwtstamp()
506 hwtstamps->hwtstamp = ns_to_ktime(ns); in e1000e_systim_to_hwtstamp()
510 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
522 struct e1000_hw *hw = &adapter->hw; in e1000e_rx_hwtstamp()
525 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || in e1000e_rx_hwtstamp()
541 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; in e1000e_rx_hwtstamp()
545 * e1000_receive_skb - helper function to handle Rx indications
560 skb->protocol = eth_type_trans(skb, netdev); in e1000_receive_skb()
565 napi_gro_receive(&adapter->napi, skb); in e1000_receive_skb()
569 * e1000_rx_checksum - Receive Checksum Offload
583 if (!(adapter->netdev->features & NETIF_F_RXCSUM)) in e1000_rx_checksum()
593 adapter->hw_csum_err++; in e1000_rx_checksum()
602 skb->ip_summed = CHECKSUM_UNNECESSARY; in e1000_rx_checksum()
603 adapter->hw_csum_good++; in e1000_rx_checksum()
608 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_update_rdt_wa()
609 struct e1000_hw *hw = &adapter->hw; in e1000e_update_rdt_wa()
612 writel(i, rx_ring->tail); in e1000e_update_rdt_wa()
614 if (unlikely(i != readl(rx_ring->tail))) { in e1000e_update_rdt_wa()
618 e_err("ME firmware caused invalid RDT - resetting\n"); in e1000e_update_rdt_wa()
619 schedule_work(&adapter->reset_task); in e1000e_update_rdt_wa()
625 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_update_tdt_wa()
626 struct e1000_hw *hw = &adapter->hw; in e1000e_update_tdt_wa()
629 writel(i, tx_ring->tail); in e1000e_update_tdt_wa()
631 if (unlikely(i != readl(tx_ring->tail))) { in e1000e_update_tdt_wa()
635 e_err("ME firmware caused invalid TDT - resetting\n"); in e1000e_update_tdt_wa()
636 schedule_work(&adapter->reset_task); in e1000e_update_tdt_wa()
641 * e1000_alloc_rx_buffers - Replace used receive buffers
649 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_rx_buffers()
650 struct net_device *netdev = adapter->netdev; in e1000_alloc_rx_buffers()
651 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_rx_buffers()
656 unsigned int bufsz = adapter->rx_buffer_len; in e1000_alloc_rx_buffers()
658 i = rx_ring->next_to_use; in e1000_alloc_rx_buffers()
659 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers()
661 while (cleaned_count--) { in e1000_alloc_rx_buffers()
662 skb = buffer_info->skb; in e1000_alloc_rx_buffers()
671 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers()
675 buffer_info->skb = skb; in e1000_alloc_rx_buffers()
677 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in e1000_alloc_rx_buffers()
678 adapter->rx_buffer_len, in e1000_alloc_rx_buffers()
680 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_rx_buffers()
681 dev_err(&pdev->dev, "Rx DMA map failed\n"); in e1000_alloc_rx_buffers()
682 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers()
687 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers()
689 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { in e1000_alloc_rx_buffers()
692 * applicable for weak-ordered memory model archs, in e1000_alloc_rx_buffers()
693 * such as IA-64). in e1000_alloc_rx_buffers()
696 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_rx_buffers()
699 writel(i, rx_ring->tail); in e1000_alloc_rx_buffers()
702 if (i == rx_ring->count) in e1000_alloc_rx_buffers()
704 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers()
707 rx_ring->next_to_use = i; in e1000_alloc_rx_buffers()
711 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
719 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_rx_buffers_ps()
720 struct net_device *netdev = adapter->netdev; in e1000_alloc_rx_buffers_ps()
721 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_rx_buffers_ps()
728 i = rx_ring->next_to_use; in e1000_alloc_rx_buffers_ps()
729 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers_ps()
731 while (cleaned_count--) { in e1000_alloc_rx_buffers_ps()
735 ps_page = &buffer_info->ps_pages[j]; in e1000_alloc_rx_buffers_ps()
736 if (j >= adapter->rx_ps_pages) { in e1000_alloc_rx_buffers_ps()
738 rx_desc->read.buffer_addr[j + 1] = in e1000_alloc_rx_buffers_ps()
742 if (!ps_page->page) { in e1000_alloc_rx_buffers_ps()
743 ps_page->page = alloc_page(gfp); in e1000_alloc_rx_buffers_ps()
744 if (!ps_page->page) { in e1000_alloc_rx_buffers_ps()
745 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers_ps()
748 ps_page->dma = dma_map_page(&pdev->dev, in e1000_alloc_rx_buffers_ps()
749 ps_page->page, in e1000_alloc_rx_buffers_ps()
752 if (dma_mapping_error(&pdev->dev, in e1000_alloc_rx_buffers_ps()
753 ps_page->dma)) { in e1000_alloc_rx_buffers_ps()
754 dev_err(&adapter->pdev->dev, in e1000_alloc_rx_buffers_ps()
756 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers_ps()
761 * didn't change because each write-back in e1000_alloc_rx_buffers_ps()
764 rx_desc->read.buffer_addr[j + 1] = in e1000_alloc_rx_buffers_ps()
765 cpu_to_le64(ps_page->dma); in e1000_alloc_rx_buffers_ps()
768 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, in e1000_alloc_rx_buffers_ps()
772 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers_ps()
776 buffer_info->skb = skb; in e1000_alloc_rx_buffers_ps()
777 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in e1000_alloc_rx_buffers_ps()
778 adapter->rx_ps_bsize0, in e1000_alloc_rx_buffers_ps()
780 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_rx_buffers_ps()
781 dev_err(&pdev->dev, "Rx DMA map failed\n"); in e1000_alloc_rx_buffers_ps()
782 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers_ps()
785 buffer_info->skb = NULL; in e1000_alloc_rx_buffers_ps()
789 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers_ps()
791 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { in e1000_alloc_rx_buffers_ps()
794 * applicable for weak-ordered memory model archs, in e1000_alloc_rx_buffers_ps()
795 * such as IA-64). in e1000_alloc_rx_buffers_ps()
798 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_rx_buffers_ps()
801 writel(i << 1, rx_ring->tail); in e1000_alloc_rx_buffers_ps()
805 if (i == rx_ring->count) in e1000_alloc_rx_buffers_ps()
807 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers_ps()
811 rx_ring->next_to_use = i; in e1000_alloc_rx_buffers_ps()
815 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
824 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_jumbo_rx_buffers()
825 struct net_device *netdev = adapter->netdev; in e1000_alloc_jumbo_rx_buffers()
826 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_jumbo_rx_buffers()
831 unsigned int bufsz = 256 - 16; /* for skb_reserve */ in e1000_alloc_jumbo_rx_buffers()
833 i = rx_ring->next_to_use; in e1000_alloc_jumbo_rx_buffers()
834 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_jumbo_rx_buffers()
836 while (cleaned_count--) { in e1000_alloc_jumbo_rx_buffers()
837 skb = buffer_info->skb; in e1000_alloc_jumbo_rx_buffers()
846 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
850 buffer_info->skb = skb; in e1000_alloc_jumbo_rx_buffers()
853 if (!buffer_info->page) { in e1000_alloc_jumbo_rx_buffers()
854 buffer_info->page = alloc_page(gfp); in e1000_alloc_jumbo_rx_buffers()
855 if (unlikely(!buffer_info->page)) { in e1000_alloc_jumbo_rx_buffers()
856 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
861 if (!buffer_info->dma) { in e1000_alloc_jumbo_rx_buffers()
862 buffer_info->dma = dma_map_page(&pdev->dev, in e1000_alloc_jumbo_rx_buffers()
863 buffer_info->page, 0, in e1000_alloc_jumbo_rx_buffers()
866 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_jumbo_rx_buffers()
867 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
873 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_alloc_jumbo_rx_buffers()
875 if (unlikely(++i == rx_ring->count)) in e1000_alloc_jumbo_rx_buffers()
877 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_jumbo_rx_buffers()
880 if (likely(rx_ring->next_to_use != i)) { in e1000_alloc_jumbo_rx_buffers()
881 rx_ring->next_to_use = i; in e1000_alloc_jumbo_rx_buffers()
882 if (unlikely(i-- == 0)) in e1000_alloc_jumbo_rx_buffers()
883 i = (rx_ring->count - 1); in e1000_alloc_jumbo_rx_buffers()
887 * applicable for weak-ordered memory model archs, in e1000_alloc_jumbo_rx_buffers()
888 * such as IA-64). in e1000_alloc_jumbo_rx_buffers()
891 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_jumbo_rx_buffers()
894 writel(i, rx_ring->tail); in e1000_alloc_jumbo_rx_buffers()
901 if (netdev->features & NETIF_F_RXHASH) in e1000_rx_hash()
906 * e1000_clean_rx_irq - Send received data up the network stack
917 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_irq()
918 struct net_device *netdev = adapter->netdev; in e1000_clean_rx_irq()
919 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_irq()
920 struct e1000_hw *hw = &adapter->hw; in e1000_clean_rx_irq()
929 i = rx_ring->next_to_clean; in e1000_clean_rx_irq()
931 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_rx_irq()
932 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq()
942 skb = buffer_info->skb; in e1000_clean_rx_irq()
943 buffer_info->skb = NULL; in e1000_clean_rx_irq()
945 prefetch(skb->data - NET_IP_ALIGN); in e1000_clean_rx_irq()
948 if (i == rx_ring->count) in e1000_clean_rx_irq()
953 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq()
957 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_irq()
958 adapter->rx_buffer_len, DMA_FROM_DEVICE); in e1000_clean_rx_irq()
959 buffer_info->dma = 0; in e1000_clean_rx_irq()
961 length = le16_to_cpu(rx_desc->wb.upper.length); in e1000_clean_rx_irq()
963 /* !EOP means multiple descriptors were used to store a single in e1000_clean_rx_irq()
965 * need to toss every packet with the EOP bit clear and the in e1000_clean_rx_irq()
966 * next frame that _does_ have the EOP bit set, as it is by in e1000_clean_rx_irq()
970 adapter->flags2 |= FLAG2_IS_DISCARDING; in e1000_clean_rx_irq()
972 if (adapter->flags2 & FLAG2_IS_DISCARDING) { in e1000_clean_rx_irq()
976 buffer_info->skb = skb; in e1000_clean_rx_irq()
978 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_irq()
983 !(netdev->features & NETIF_F_RXALL))) { in e1000_clean_rx_irq()
985 buffer_info->skb = skb; in e1000_clean_rx_irq()
990 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq()
995 if (netdev->features & NETIF_F_RXFCS) in e1000_clean_rx_irq()
996 total_rx_bytes -= 4; in e1000_clean_rx_irq()
998 length -= 4; in e1000_clean_rx_irq()
1010 napi_alloc_skb(&adapter->napi, length); in e1000_clean_rx_irq()
1013 -NET_IP_ALIGN, in e1000_clean_rx_irq()
1014 (skb->data - in e1000_clean_rx_irq()
1019 buffer_info->skb = skb; in e1000_clean_rx_irq()
1030 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_rx_irq()
1033 rx_desc->wb.upper.vlan); in e1000_clean_rx_irq()
1036 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq()
1040 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_rx_irq()
1049 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_rx_irq()
1051 rx_ring->next_to_clean = i; in e1000_clean_rx_irq()
1055 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_rx_irq()
1057 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_rx_irq()
1058 adapter->total_rx_packets += total_rx_packets; in e1000_clean_rx_irq()
1066 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_put_txbuf()
1068 if (buffer_info->dma) { in e1000_put_txbuf()
1069 if (buffer_info->mapped_as_page) in e1000_put_txbuf()
1070 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, in e1000_put_txbuf()
1071 buffer_info->length, DMA_TO_DEVICE); in e1000_put_txbuf()
1073 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, in e1000_put_txbuf()
1074 buffer_info->length, DMA_TO_DEVICE); in e1000_put_txbuf()
1075 buffer_info->dma = 0; in e1000_put_txbuf()
1077 if (buffer_info->skb) { in e1000_put_txbuf()
1079 dev_kfree_skb_any(buffer_info->skb); in e1000_put_txbuf()
1081 dev_consume_skb_any(buffer_info->skb); in e1000_put_txbuf()
1082 buffer_info->skb = NULL; in e1000_put_txbuf()
1084 buffer_info->time_stamp = 0; in e1000_put_txbuf()
1092 struct net_device *netdev = adapter->netdev; in e1000_print_hw_hang()
1093 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_print_hw_hang()
1094 unsigned int i = tx_ring->next_to_clean; in e1000_print_hw_hang()
1095 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; in e1000_print_hw_hang() local
1096 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); in e1000_print_hw_hang()
1097 struct e1000_hw *hw = &adapter->hw; in e1000_print_hw_hang()
1101 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_print_hw_hang()
1104 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { in e1000_print_hw_hang()
1105 /* May be block on write-back, flush and detect again in e1000_print_hw_hang()
1108 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000_print_hw_hang()
1114 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000_print_hw_hang()
1117 adapter->tx_hang_recheck = true; in e1000_print_hw_hang()
1120 adapter->tx_hang_recheck = false; in e1000_print_hw_hang()
1134 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); in e1000_print_hw_hang()
1149 "PHY 1000BASE-T Status <%x>\n" in e1000_print_hw_hang()
1152 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, in e1000_print_hw_hang()
1153 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, in e1000_print_hw_hang()
1154 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), in e1000_print_hw_hang()
1160 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) in e1000_print_hw_hang()
1165 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1176 struct e1000_hw *hw = &adapter->hw; in e1000e_tx_hwtstamp_work()
1179 struct sk_buff *skb = adapter->tx_hwtstamp_skb; in e1000e_tx_hwtstamp_work()
1191 adapter->tx_hwtstamp_skb = NULL; in e1000e_tx_hwtstamp_work()
1196 } else if (time_after(jiffies, adapter->tx_hwtstamp_start in e1000e_tx_hwtstamp_work()
1197 + adapter->tx_timeout_factor * HZ)) { in e1000e_tx_hwtstamp_work()
1198 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); in e1000e_tx_hwtstamp_work()
1199 adapter->tx_hwtstamp_skb = NULL; in e1000e_tx_hwtstamp_work()
1200 adapter->tx_hwtstamp_timeouts++; in e1000e_tx_hwtstamp_work()
1204 schedule_work(&adapter->tx_hwtstamp_work); in e1000e_tx_hwtstamp_work()
1209 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1217 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_clean_tx_irq()
1218 struct net_device *netdev = adapter->netdev; in e1000_clean_tx_irq()
1219 struct e1000_hw *hw = &adapter->hw; in e1000_clean_tx_irq()
1222 unsigned int i, eop; in e1000_clean_tx_irq() local
1227 i = tx_ring->next_to_clean; in e1000_clean_tx_irq()
1228 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
1229 eop_desc = E1000_TX_DESC(*tx_ring, eop); in e1000_clean_tx_irq()
1231 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && in e1000_clean_tx_irq()
1232 (count < tx_ring->count)) { in e1000_clean_tx_irq()
1238 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_irq()
1239 cleaned = (i == eop); in e1000_clean_tx_irq()
1242 total_tx_packets += buffer_info->segs; in e1000_clean_tx_irq()
1243 total_tx_bytes += buffer_info->bytecount; in e1000_clean_tx_irq()
1244 if (buffer_info->skb) { in e1000_clean_tx_irq()
1245 bytes_compl += buffer_info->skb->len; in e1000_clean_tx_irq()
1251 tx_desc->upper.data = 0; in e1000_clean_tx_irq()
1254 if (i == tx_ring->count) in e1000_clean_tx_irq()
1258 if (i == tx_ring->next_to_use) in e1000_clean_tx_irq()
1260 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
1261 eop_desc = E1000_TX_DESC(*tx_ring, eop); in e1000_clean_tx_irq()
1264 tx_ring->next_to_clean = i; in e1000_clean_tx_irq()
1277 !(test_bit(__E1000_DOWN, &adapter->state))) { in e1000_clean_tx_irq()
1279 ++adapter->restart_queue; in e1000_clean_tx_irq()
1283 if (adapter->detect_tx_hung) { in e1000_clean_tx_irq()
1287 adapter->detect_tx_hung = false; in e1000_clean_tx_irq()
1288 if (tx_ring->buffer_info[i].time_stamp && in e1000_clean_tx_irq()
1289 time_after(jiffies, tx_ring->buffer_info[i].time_stamp in e1000_clean_tx_irq()
1290 + (adapter->tx_timeout_factor * HZ)) && in e1000_clean_tx_irq()
1292 schedule_work(&adapter->print_hang_task); in e1000_clean_tx_irq()
1294 adapter->tx_hang_recheck = false; in e1000_clean_tx_irq()
1296 adapter->total_tx_bytes += total_tx_bytes; in e1000_clean_tx_irq()
1297 adapter->total_tx_packets += total_tx_packets; in e1000_clean_tx_irq()
1298 return count < tx_ring->count; in e1000_clean_tx_irq()
1302 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1313 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_irq_ps()
1314 struct e1000_hw *hw = &adapter->hw; in e1000_clean_rx_irq_ps()
1316 struct net_device *netdev = adapter->netdev; in e1000_clean_rx_irq_ps()
1317 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_irq_ps()
1327 i = rx_ring->next_to_clean; in e1000_clean_rx_irq_ps()
1329 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); in e1000_clean_rx_irq_ps()
1330 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq_ps()
1336 skb = buffer_info->skb; in e1000_clean_rx_irq_ps()
1340 prefetch(skb->data - NET_IP_ALIGN); in e1000_clean_rx_irq_ps()
1343 if (i == rx_ring->count) in e1000_clean_rx_irq_ps()
1348 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq_ps()
1352 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_irq_ps()
1353 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); in e1000_clean_rx_irq_ps()
1354 buffer_info->dma = 0; in e1000_clean_rx_irq_ps()
1356 /* see !EOP comment in other Rx routine */ in e1000_clean_rx_irq_ps()
1358 adapter->flags2 |= FLAG2_IS_DISCARDING; in e1000_clean_rx_irq_ps()
1360 if (adapter->flags2 & FLAG2_IS_DISCARDING) { in e1000_clean_rx_irq_ps()
1364 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_irq_ps()
1369 !(netdev->features & NETIF_F_RXALL))) { in e1000_clean_rx_irq_ps()
1374 length = le16_to_cpu(rx_desc->wb.middle.length0); in e1000_clean_rx_irq_ps()
1389 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); in e1000_clean_rx_irq_ps()
1396 ((length + l1) <= adapter->rx_ps_bsize0)) { in e1000_clean_rx_irq_ps()
1397 ps_page = &buffer_info->ps_pages[0]; in e1000_clean_rx_irq_ps()
1399 dma_sync_single_for_cpu(&pdev->dev, in e1000_clean_rx_irq_ps()
1400 ps_page->dma, in e1000_clean_rx_irq_ps()
1404 page_address(ps_page->page), l1); in e1000_clean_rx_irq_ps()
1405 dma_sync_single_for_device(&pdev->dev, in e1000_clean_rx_irq_ps()
1406 ps_page->dma, in e1000_clean_rx_irq_ps()
1411 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq_ps()
1412 if (!(netdev->features & NETIF_F_RXFCS)) in e1000_clean_rx_irq_ps()
1413 l1 -= 4; in e1000_clean_rx_irq_ps()
1422 length = le16_to_cpu(rx_desc->wb.upper.length[j]); in e1000_clean_rx_irq_ps()
1426 ps_page = &buffer_info->ps_pages[j]; in e1000_clean_rx_irq_ps()
1427 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, in e1000_clean_rx_irq_ps()
1429 ps_page->dma = 0; in e1000_clean_rx_irq_ps()
1430 skb_fill_page_desc(skb, j, ps_page->page, 0, length); in e1000_clean_rx_irq_ps()
1431 ps_page->page = NULL; in e1000_clean_rx_irq_ps()
1432 skb->len += length; in e1000_clean_rx_irq_ps()
1433 skb->data_len += length; in e1000_clean_rx_irq_ps()
1434 skb->truesize += PAGE_SIZE; in e1000_clean_rx_irq_ps()
1440 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq_ps()
1441 if (!(netdev->features & NETIF_F_RXFCS)) in e1000_clean_rx_irq_ps()
1442 pskb_trim(skb, skb->len - 4); in e1000_clean_rx_irq_ps()
1446 total_rx_bytes += skb->len; in e1000_clean_rx_irq_ps()
1451 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_rx_irq_ps()
1453 if (rx_desc->wb.upper.header_status & in e1000_clean_rx_irq_ps()
1455 adapter->rx_hdr_split++; in e1000_clean_rx_irq_ps()
1458 rx_desc->wb.middle.vlan); in e1000_clean_rx_irq_ps()
1461 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq_ps()
1462 buffer_info->skb = NULL; in e1000_clean_rx_irq_ps()
1466 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_rx_irq_ps()
1475 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); in e1000_clean_rx_irq_ps()
1477 rx_ring->next_to_clean = i; in e1000_clean_rx_irq_ps()
1481 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_rx_irq_ps()
1483 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_rx_irq_ps()
1484 adapter->total_rx_packets += total_rx_packets; in e1000_clean_rx_irq_ps()
1491 bi->page = NULL; in e1000_consume_page()
1492 skb->len += length; in e1000_consume_page()
1493 skb->data_len += length; in e1000_consume_page()
1494 skb->truesize += PAGE_SIZE; in e1000_consume_page()
1498 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1509 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_jumbo_rx_irq()
1510 struct net_device *netdev = adapter->netdev; in e1000_clean_jumbo_rx_irq()
1511 struct pci_dev *pdev = adapter->pdev; in e1000_clean_jumbo_rx_irq()
1521 i = rx_ring->next_to_clean; in e1000_clean_jumbo_rx_irq()
1523 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_jumbo_rx_irq()
1524 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_jumbo_rx_irq()
1534 skb = buffer_info->skb; in e1000_clean_jumbo_rx_irq()
1535 buffer_info->skb = NULL; in e1000_clean_jumbo_rx_irq()
1538 if (i == rx_ring->count) in e1000_clean_jumbo_rx_irq()
1543 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_jumbo_rx_irq()
1547 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, in e1000_clean_jumbo_rx_irq()
1549 buffer_info->dma = 0; in e1000_clean_jumbo_rx_irq()
1551 length = le16_to_cpu(rx_desc->wb.upper.length); in e1000_clean_jumbo_rx_irq()
1553 /* errors is only valid for DD + EOP descriptors */ in e1000_clean_jumbo_rx_irq()
1556 !(netdev->features & NETIF_F_RXALL)))) { in e1000_clean_jumbo_rx_irq()
1558 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1560 if (rx_ring->rx_skb_top) in e1000_clean_jumbo_rx_irq()
1561 dev_kfree_skb_irq(rx_ring->rx_skb_top); in e1000_clean_jumbo_rx_irq()
1562 rx_ring->rx_skb_top = NULL; in e1000_clean_jumbo_rx_irq()
1565 #define rxtop (rx_ring->rx_skb_top) in e1000_clean_jumbo_rx_irq()
1571 skb_fill_page_desc(rxtop, 0, buffer_info->page, in e1000_clean_jumbo_rx_irq()
1576 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1577 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1579 /* re-use the skb, only consumed the page */ in e1000_clean_jumbo_rx_irq()
1580 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1588 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1589 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1591 /* re-use the current skb, we only consumed the in e1000_clean_jumbo_rx_irq()
1594 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1599 /* no chain, got EOP, this buf is the packet in e1000_clean_jumbo_rx_irq()
1605 page_address(buffer_info->page), in e1000_clean_jumbo_rx_irq()
1607 /* re-use the page, so don't erase in e1000_clean_jumbo_rx_irq()
1608 * buffer_info->page in e1000_clean_jumbo_rx_irq()
1613 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1624 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_jumbo_rx_irq()
1627 total_rx_bytes += skb->len; in e1000_clean_jumbo_rx_irq()
1630 /* eth type trans needs skb->data to point to something */ in e1000_clean_jumbo_rx_irq()
1638 rx_desc->wb.upper.vlan); in e1000_clean_jumbo_rx_irq()
1641 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_jumbo_rx_irq()
1645 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_jumbo_rx_irq()
1654 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_jumbo_rx_irq()
1656 rx_ring->next_to_clean = i; in e1000_clean_jumbo_rx_irq()
1660 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_jumbo_rx_irq()
1662 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_jumbo_rx_irq()
1663 adapter->total_rx_packets += total_rx_packets; in e1000_clean_jumbo_rx_irq()
1668 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1673 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_ring()
1676 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_ring()
1680 for (i = 0; i < rx_ring->count; i++) { in e1000_clean_rx_ring()
1681 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_ring()
1682 if (buffer_info->dma) { in e1000_clean_rx_ring()
1683 if (adapter->clean_rx == e1000_clean_rx_irq) in e1000_clean_rx_ring()
1684 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1685 adapter->rx_buffer_len, in e1000_clean_rx_ring()
1687 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) in e1000_clean_rx_ring()
1688 dma_unmap_page(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1690 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) in e1000_clean_rx_ring()
1691 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1692 adapter->rx_ps_bsize0, in e1000_clean_rx_ring()
1694 buffer_info->dma = 0; in e1000_clean_rx_ring()
1697 if (buffer_info->page) { in e1000_clean_rx_ring()
1698 put_page(buffer_info->page); in e1000_clean_rx_ring()
1699 buffer_info->page = NULL; in e1000_clean_rx_ring()
1702 if (buffer_info->skb) { in e1000_clean_rx_ring()
1703 dev_kfree_skb(buffer_info->skb); in e1000_clean_rx_ring()
1704 buffer_info->skb = NULL; in e1000_clean_rx_ring()
1708 ps_page = &buffer_info->ps_pages[j]; in e1000_clean_rx_ring()
1709 if (!ps_page->page) in e1000_clean_rx_ring()
1711 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, in e1000_clean_rx_ring()
1713 ps_page->dma = 0; in e1000_clean_rx_ring()
1714 put_page(ps_page->page); in e1000_clean_rx_ring()
1715 ps_page->page = NULL; in e1000_clean_rx_ring()
1720 if (rx_ring->rx_skb_top) { in e1000_clean_rx_ring()
1721 dev_kfree_skb(rx_ring->rx_skb_top); in e1000_clean_rx_ring()
1722 rx_ring->rx_skb_top = NULL; in e1000_clean_rx_ring()
1726 memset(rx_ring->desc, 0, rx_ring->size); in e1000_clean_rx_ring()
1728 rx_ring->next_to_clean = 0; in e1000_clean_rx_ring()
1729 rx_ring->next_to_use = 0; in e1000_clean_rx_ring()
1730 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_ring()
1739 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000e_downshift_workaround()
1742 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); in e1000e_downshift_workaround()
1746 * e1000_intr_msi - Interrupt Handler
1754 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msi()
1759 hw->mac.get_link_status = true; in e1000_intr_msi()
1760 /* ICH8 workaround-- Call gig speed drop workaround on cable in e1000_intr_msi()
1763 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && in e1000_intr_msi()
1765 schedule_work(&adapter->downshift_task); in e1000_intr_msi()
1767 /* 80003ES2LAN workaround-- For packet buffer work-around on in e1000_intr_msi()
1768 * link down event; disable receives here in the ISR and reset in e1000_intr_msi()
1772 adapter->flags & FLAG_RX_NEEDS_RESTART) { in e1000_intr_msi()
1773 /* disable receives */ in e1000_intr_msi()
1777 adapter->flags |= FLAG_RESTART_NOW; in e1000_intr_msi()
1780 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr_msi()
1781 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_intr_msi()
1785 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { in e1000_intr_msi()
1788 adapter->corr_errors += in e1000_intr_msi()
1790 adapter->uncorr_errors += in e1000_intr_msi()
1794 schedule_work(&adapter->reset_task); in e1000_intr_msi()
1800 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr_msi()
1801 adapter->total_tx_bytes = 0; in e1000_intr_msi()
1802 adapter->total_tx_packets = 0; in e1000_intr_msi()
1803 adapter->total_rx_bytes = 0; in e1000_intr_msi()
1804 adapter->total_rx_packets = 0; in e1000_intr_msi()
1805 __napi_schedule(&adapter->napi); in e1000_intr_msi()
1812 * e1000_intr - Interrupt Handler
1820 struct e1000_hw *hw = &adapter->hw; in e1000_intr()
1823 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr()
1826 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in e1000_intr()
1832 /* Interrupt Auto-Mask...upon reading ICR, in e1000_intr()
1838 hw->mac.get_link_status = true; in e1000_intr()
1839 /* ICH8 workaround-- Call gig speed drop workaround on cable in e1000_intr()
1842 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && in e1000_intr()
1844 schedule_work(&adapter->downshift_task); in e1000_intr()
1846 /* 80003ES2LAN workaround-- in e1000_intr()
1847 * For packet buffer work-around on link down event; in e1000_intr()
1848 * disable receives here in the ISR and in e1000_intr()
1852 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { in e1000_intr()
1853 /* disable receives */ in e1000_intr()
1856 adapter->flags |= FLAG_RESTART_NOW; in e1000_intr()
1859 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr()
1860 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_intr()
1864 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { in e1000_intr()
1867 adapter->corr_errors += in e1000_intr()
1869 adapter->uncorr_errors += in e1000_intr()
1873 schedule_work(&adapter->reset_task); in e1000_intr()
1879 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr()
1880 adapter->total_tx_bytes = 0; in e1000_intr()
1881 adapter->total_tx_packets = 0; in e1000_intr()
1882 adapter->total_rx_bytes = 0; in e1000_intr()
1883 adapter->total_rx_packets = 0; in e1000_intr()
1884 __napi_schedule(&adapter->napi); in e1000_intr()
1894 struct e1000_hw *hw = &adapter->hw; in e1000_msix_other()
1897 if (icr & adapter->eiac_mask) in e1000_msix_other()
1898 ew32(ICS, (icr & adapter->eiac_mask)); in e1000_msix_other()
1901 hw->mac.get_link_status = true; in e1000_msix_other()
1903 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_msix_other()
1904 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_msix_other()
1907 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_msix_other()
1917 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msix_tx()
1918 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_intr_msix_tx()
1920 adapter->total_tx_bytes = 0; in e1000_intr_msix_tx()
1921 adapter->total_tx_packets = 0; in e1000_intr_msix_tx()
1925 ew32(ICS, tx_ring->ims_val); in e1000_intr_msix_tx()
1927 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr_msix_tx()
1928 ew32(IMS, adapter->tx_ring->ims_val); in e1000_intr_msix_tx()
1937 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_intr_msix_rx()
1942 if (rx_ring->set_itr) { in e1000_intr_msix_rx()
1943 u32 itr = rx_ring->itr_val ? in e1000_intr_msix_rx()
1944 1000000000 / (rx_ring->itr_val * 256) : 0; in e1000_intr_msix_rx()
1946 writel(itr, rx_ring->itr_register); in e1000_intr_msix_rx()
1947 rx_ring->set_itr = 0; in e1000_intr_msix_rx()
1950 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr_msix_rx()
1951 adapter->total_rx_bytes = 0; in e1000_intr_msix_rx()
1952 adapter->total_rx_packets = 0; in e1000_intr_msix_rx()
1953 __napi_schedule(&adapter->napi); in e1000_intr_msix_rx()
1959 * e1000_configure_msix - Configure MSI-X hardware
1963 * generate MSI-X interrupts.
1967 struct e1000_hw *hw = &adapter->hw; in e1000_configure_msix()
1968 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure_msix()
1969 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_configure_msix()
1973 adapter->eiac_mask = 0; in e1000_configure_msix()
1975 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ in e1000_configure_msix()
1976 if (hw->mac.type == e1000_82574) { in e1000_configure_msix()
1984 rx_ring->ims_val = E1000_IMS_RXQ0; in e1000_configure_msix()
1985 adapter->eiac_mask |= rx_ring->ims_val; in e1000_configure_msix()
1986 if (rx_ring->itr_val) in e1000_configure_msix()
1987 writel(1000000000 / (rx_ring->itr_val * 256), in e1000_configure_msix()
1988 rx_ring->itr_register); in e1000_configure_msix()
1990 writel(1, rx_ring->itr_register); in e1000_configure_msix()
1994 tx_ring->ims_val = E1000_IMS_TXQ0; in e1000_configure_msix()
1996 if (tx_ring->itr_val) in e1000_configure_msix()
1997 writel(1000000000 / (tx_ring->itr_val * 256), in e1000_configure_msix()
1998 tx_ring->itr_register); in e1000_configure_msix()
2000 writel(1, tx_ring->itr_register); in e1000_configure_msix()
2001 adapter->eiac_mask |= tx_ring->ims_val; in e1000_configure_msix()
2007 if (rx_ring->itr_val) in e1000_configure_msix()
2008 writel(1000000000 / (rx_ring->itr_val * 256), in e1000_configure_msix()
2009 hw->hw_addr + E1000_EITR_82574(vector)); in e1000_configure_msix()
2011 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); in e1000_configure_msix()
2018 /* enable MSI-X PBA support */ in e1000_configure_msix()
2027 if (adapter->msix_entries) { in e1000e_reset_interrupt_capability()
2028 pci_disable_msix(adapter->pdev); in e1000e_reset_interrupt_capability()
2029 kfree(adapter->msix_entries); in e1000e_reset_interrupt_capability()
2030 adapter->msix_entries = NULL; in e1000e_reset_interrupt_capability()
2031 } else if (adapter->flags & FLAG_MSI_ENABLED) { in e1000e_reset_interrupt_capability()
2032 pci_disable_msi(adapter->pdev); in e1000e_reset_interrupt_capability()
2033 adapter->flags &= ~FLAG_MSI_ENABLED; in e1000e_reset_interrupt_capability()
2038 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2049 switch (adapter->int_mode) { in e1000e_set_interrupt_capability()
2051 if (adapter->flags & FLAG_HAS_MSIX) { in e1000e_set_interrupt_capability()
2052 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ in e1000e_set_interrupt_capability()
2053 adapter->msix_entries = kcalloc(adapter->num_vectors, in e1000e_set_interrupt_capability()
2057 if (adapter->msix_entries) { in e1000e_set_interrupt_capability()
2060 for (i = 0; i < adapter->num_vectors; i++) in e1000e_set_interrupt_capability()
2061 adapter->msix_entries[i].entry = i; in e1000e_set_interrupt_capability()
2063 err = pci_enable_msix_range(a->pdev, in e1000e_set_interrupt_capability()
2064 a->msix_entries, in e1000e_set_interrupt_capability()
2065 a->num_vectors, in e1000e_set_interrupt_capability()
2066 a->num_vectors); in e1000e_set_interrupt_capability()
2070 /* MSI-X failed, so fall through and try MSI */ in e1000e_set_interrupt_capability()
2071 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); in e1000e_set_interrupt_capability()
2074 adapter->int_mode = E1000E_INT_MODE_MSI; in e1000e_set_interrupt_capability()
2077 if (!pci_enable_msi(adapter->pdev)) { in e1000e_set_interrupt_capability()
2078 adapter->flags |= FLAG_MSI_ENABLED; in e1000e_set_interrupt_capability()
2080 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000e_set_interrupt_capability()
2090 adapter->num_vectors = 1; in e1000e_set_interrupt_capability()
2094 * e1000_request_msix - Initialize MSI-X interrupts
2097 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2102 struct net_device *netdev = adapter->netdev; in e1000_request_msix()
2105 if (strlen(netdev->name) < (IFNAMSIZ - 5)) in e1000_request_msix()
2106 snprintf(adapter->rx_ring->name, in e1000_request_msix()
2107 sizeof(adapter->rx_ring->name) - 1, in e1000_request_msix()
2108 "%.14s-rx-0", netdev->name); in e1000_request_msix()
2110 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); in e1000_request_msix()
2111 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2112 e1000_intr_msix_rx, 0, adapter->rx_ring->name, in e1000_request_msix()
2116 adapter->rx_ring->itr_register = adapter->hw.hw_addr + in e1000_request_msix()
2118 adapter->rx_ring->itr_val = adapter->itr; in e1000_request_msix()
2121 if (strlen(netdev->name) < (IFNAMSIZ - 5)) in e1000_request_msix()
2122 snprintf(adapter->tx_ring->name, in e1000_request_msix()
2123 sizeof(adapter->tx_ring->name) - 1, in e1000_request_msix()
2124 "%.14s-tx-0", netdev->name); in e1000_request_msix()
2126 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); in e1000_request_msix()
2127 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2128 e1000_intr_msix_tx, 0, adapter->tx_ring->name, in e1000_request_msix()
2132 adapter->tx_ring->itr_register = adapter->hw.hw_addr + in e1000_request_msix()
2134 adapter->tx_ring->itr_val = adapter->itr; in e1000_request_msix()
2137 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2138 e1000_msix_other, 0, netdev->name, netdev); in e1000_request_msix()
2148 * e1000_request_irq - initialize interrupts
2156 struct net_device *netdev = adapter->netdev; in e1000_request_irq()
2159 if (adapter->msix_entries) { in e1000_request_irq()
2165 adapter->int_mode = E1000E_INT_MODE_MSI; in e1000_request_irq()
2168 if (adapter->flags & FLAG_MSI_ENABLED) { in e1000_request_irq()
2169 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, in e1000_request_irq()
2170 netdev->name, netdev); in e1000_request_irq()
2176 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000_request_irq()
2179 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, in e1000_request_irq()
2180 netdev->name, netdev); in e1000_request_irq()
2189 struct net_device *netdev = adapter->netdev; in e1000_free_irq()
2191 if (adapter->msix_entries) { in e1000_free_irq()
2194 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2197 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2201 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2205 free_irq(adapter->pdev->irq, netdev); in e1000_free_irq()
2209 * e1000_irq_disable - Mask off interrupt generation on the NIC
2214 struct e1000_hw *hw = &adapter->hw; in e1000_irq_disable()
2217 if (adapter->msix_entries) in e1000_irq_disable()
2221 if (adapter->msix_entries) { in e1000_irq_disable()
2224 for (i = 0; i < adapter->num_vectors; i++) in e1000_irq_disable()
2225 synchronize_irq(adapter->msix_entries[i].vector); in e1000_irq_disable()
2227 synchronize_irq(adapter->pdev->irq); in e1000_irq_disable()
2232 * e1000_irq_enable - Enable default interrupt generation settings
2237 struct e1000_hw *hw = &adapter->hw; in e1000_irq_enable()
2239 if (adapter->msix_entries) { in e1000_irq_enable()
2240 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); in e1000_irq_enable()
2241 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | in e1000_irq_enable()
2243 } else if (hw->mac.type >= e1000_pch_lpt) { in e1000_irq_enable()
2252 * e1000e_get_hw_control - get control of the h/w from f/w
2262 struct e1000_hw *hw = &adapter->hw; in e1000e_get_hw_control()
2267 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { in e1000e_get_hw_control()
2270 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { in e1000e_get_hw_control()
2277 * e1000e_release_hw_control - release control of the h/w to f/w
2288 struct e1000_hw *hw = &adapter->hw; in e1000e_release_hw_control()
2293 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { in e1000e_release_hw_control()
2296 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { in e1000e_release_hw_control()
2303 * e1000_alloc_ring_dma - allocate memory for a ring structure
2310 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_ring_dma()
2312 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, in e1000_alloc_ring_dma()
2314 if (!ring->desc) in e1000_alloc_ring_dma()
2315 return -ENOMEM; in e1000_alloc_ring_dma()
2321 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2328 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_setup_tx_resources()
2329 int err = -ENOMEM, size; in e1000e_setup_tx_resources()
2331 size = sizeof(struct e1000_buffer) * tx_ring->count; in e1000e_setup_tx_resources()
2332 tx_ring->buffer_info = vzalloc(size); in e1000e_setup_tx_resources()
2333 if (!tx_ring->buffer_info) in e1000e_setup_tx_resources()
2337 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); in e1000e_setup_tx_resources()
2338 tx_ring->size = ALIGN(tx_ring->size, 4096); in e1000e_setup_tx_resources()
2344 tx_ring->next_to_use = 0; in e1000e_setup_tx_resources()
2345 tx_ring->next_to_clean = 0; in e1000e_setup_tx_resources()
2349 vfree(tx_ring->buffer_info); in e1000e_setup_tx_resources()
2355 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2362 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_setup_rx_resources()
2364 int i, size, desc_len, err = -ENOMEM; in e1000e_setup_rx_resources()
2366 size = sizeof(struct e1000_buffer) * rx_ring->count; in e1000e_setup_rx_resources()
2367 rx_ring->buffer_info = vzalloc(size); in e1000e_setup_rx_resources()
2368 if (!rx_ring->buffer_info) in e1000e_setup_rx_resources()
2371 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2372 buffer_info = &rx_ring->buffer_info[i]; in e1000e_setup_rx_resources()
2373 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, in e1000e_setup_rx_resources()
2376 if (!buffer_info->ps_pages) in e1000e_setup_rx_resources()
2383 rx_ring->size = rx_ring->count * desc_len; in e1000e_setup_rx_resources()
2384 rx_ring->size = ALIGN(rx_ring->size, 4096); in e1000e_setup_rx_resources()
2390 rx_ring->next_to_clean = 0; in e1000e_setup_rx_resources()
2391 rx_ring->next_to_use = 0; in e1000e_setup_rx_resources()
2392 rx_ring->rx_skb_top = NULL; in e1000e_setup_rx_resources()
2397 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2398 buffer_info = &rx_ring->buffer_info[i]; in e1000e_setup_rx_resources()
2399 kfree(buffer_info->ps_pages); in e1000e_setup_rx_resources()
2402 vfree(rx_ring->buffer_info); in e1000e_setup_rx_resources()
2408 * e1000_clean_tx_ring - Free Tx Buffers
2413 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_clean_tx_ring()
2418 for (i = 0; i < tx_ring->count; i++) { in e1000_clean_tx_ring()
2419 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_ring()
2423 netdev_reset_queue(adapter->netdev); in e1000_clean_tx_ring()
2424 size = sizeof(struct e1000_buffer) * tx_ring->count; in e1000_clean_tx_ring()
2425 memset(tx_ring->buffer_info, 0, size); in e1000_clean_tx_ring()
2427 memset(tx_ring->desc, 0, tx_ring->size); in e1000_clean_tx_ring()
2429 tx_ring->next_to_use = 0; in e1000_clean_tx_ring()
2430 tx_ring->next_to_clean = 0; in e1000_clean_tx_ring()
2434 * e1000e_free_tx_resources - Free Tx Resources per Queue
2441 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_free_tx_resources()
2442 struct pci_dev *pdev = adapter->pdev; in e1000e_free_tx_resources()
2446 vfree(tx_ring->buffer_info); in e1000e_free_tx_resources()
2447 tx_ring->buffer_info = NULL; in e1000e_free_tx_resources()
2449 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, in e1000e_free_tx_resources()
2450 tx_ring->dma); in e1000e_free_tx_resources()
2451 tx_ring->desc = NULL; in e1000e_free_tx_resources()
2455 * e1000e_free_rx_resources - Free Rx Resources
2462 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_free_rx_resources()
2463 struct pci_dev *pdev = adapter->pdev; in e1000e_free_rx_resources()
2468 for (i = 0; i < rx_ring->count; i++) in e1000e_free_rx_resources()
2469 kfree(rx_ring->buffer_info[i].ps_pages); in e1000e_free_rx_resources()
2471 vfree(rx_ring->buffer_info); in e1000e_free_rx_resources()
2472 rx_ring->buffer_info = NULL; in e1000e_free_rx_resources()
2474 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, in e1000e_free_rx_resources()
2475 rx_ring->dma); in e1000e_free_rx_resources()
2476 rx_ring->desc = NULL; in e1000e_free_rx_resources()
2480 * e1000_update_itr - update the dynamic ITR value based on statistics
2481 * @itr_setting: current adapter->itr
2540 u32 new_itr = adapter->itr; in e1000_set_itr()
2542 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in e1000_set_itr()
2543 if (adapter->link_speed != SPEED_1000) { in e1000_set_itr()
2548 if (adapter->flags2 & FLAG2_DISABLE_AIM) { in e1000_set_itr()
2553 adapter->tx_itr = e1000_update_itr(adapter->tx_itr, in e1000_set_itr()
2554 adapter->total_tx_packets, in e1000_set_itr()
2555 adapter->total_tx_bytes); in e1000_set_itr()
2557 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) in e1000_set_itr()
2558 adapter->tx_itr = low_latency; in e1000_set_itr()
2560 adapter->rx_itr = e1000_update_itr(adapter->rx_itr, in e1000_set_itr()
2561 adapter->total_rx_packets, in e1000_set_itr()
2562 adapter->total_rx_bytes); in e1000_set_itr()
2564 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) in e1000_set_itr()
2565 adapter->rx_itr = low_latency; in e1000_set_itr()
2567 current_itr = max(adapter->rx_itr, adapter->tx_itr); in e1000_set_itr()
2585 if (new_itr != adapter->itr) { in e1000_set_itr()
2590 new_itr = new_itr > adapter->itr ? in e1000_set_itr()
2591 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; in e1000_set_itr()
2592 adapter->itr = new_itr; in e1000_set_itr()
2593 adapter->rx_ring->itr_val = new_itr; in e1000_set_itr()
2594 if (adapter->msix_entries) in e1000_set_itr()
2595 adapter->rx_ring->set_itr = 1; in e1000_set_itr()
2602 * e1000e_write_itr - write the ITR value to the appropriate registers
2606 * e1000e_write_itr determines if the adapter is in MSI-X mode
2612 struct e1000_hw *hw = &adapter->hw; in e1000e_write_itr()
2615 if (adapter->msix_entries) { in e1000e_write_itr()
2618 for (vector = 0; vector < adapter->num_vectors; vector++) in e1000e_write_itr()
2619 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); in e1000e_write_itr()
2626 * e1000_alloc_queues - Allocate memory for all rings
2633 adapter->tx_ring = kzalloc(size, GFP_KERNEL); in e1000_alloc_queues()
2634 if (!adapter->tx_ring) in e1000_alloc_queues()
2636 adapter->tx_ring->count = adapter->tx_ring_count; in e1000_alloc_queues()
2637 adapter->tx_ring->adapter = adapter; in e1000_alloc_queues()
2639 adapter->rx_ring = kzalloc(size, GFP_KERNEL); in e1000_alloc_queues()
2640 if (!adapter->rx_ring) in e1000_alloc_queues()
2642 adapter->rx_ring->count = adapter->rx_ring_count; in e1000_alloc_queues()
2643 adapter->rx_ring->adapter = adapter; in e1000_alloc_queues()
2648 kfree(adapter->rx_ring); in e1000_alloc_queues()
2649 kfree(adapter->tx_ring); in e1000_alloc_queues()
2650 return -ENOMEM; in e1000_alloc_queues()
2654 * e1000e_poll - NAPI Rx polling callback
2662 struct e1000_hw *hw = &adapter->hw; in e1000e_poll()
2663 struct net_device *poll_dev = adapter->netdev; in e1000e_poll()
2668 if (!adapter->msix_entries || in e1000e_poll()
2669 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) in e1000e_poll()
2670 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); in e1000e_poll()
2672 adapter->clean_rx(adapter->rx_ring, &work_done, budget); in e1000e_poll()
2677 /* Exit the polling mode, but don't re-enable interrupts if stack might in e1000e_poll()
2678 * poll us due to busy-polling in e1000e_poll()
2681 if (adapter->itr_setting & 3) in e1000e_poll()
2683 if (!test_bit(__E1000_DOWN, &adapter->state)) { in e1000e_poll()
2684 if (adapter->msix_entries) in e1000e_poll()
2685 ew32(IMS, adapter->rx_ring->ims_val); in e1000e_poll()
2698 struct e1000_hw *hw = &adapter->hw; in e1000_vlan_rx_add_vid()
2702 if ((adapter->hw.mng_cookie.status & in e1000_vlan_rx_add_vid()
2704 (vid == adapter->mng_vlan_id)) in e1000_vlan_rx_add_vid()
2708 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000_vlan_rx_add_vid()
2712 hw->mac.ops.write_vfta(hw, index, vfta); in e1000_vlan_rx_add_vid()
2715 set_bit(vid, adapter->active_vlans); in e1000_vlan_rx_add_vid()
2724 struct e1000_hw *hw = &adapter->hw; in e1000_vlan_rx_kill_vid()
2727 if ((adapter->hw.mng_cookie.status & in e1000_vlan_rx_kill_vid()
2729 (vid == adapter->mng_vlan_id)) { in e1000_vlan_rx_kill_vid()
2736 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000_vlan_rx_kill_vid()
2740 hw->mac.ops.write_vfta(hw, index, vfta); in e1000_vlan_rx_kill_vid()
2743 clear_bit(vid, adapter->active_vlans); in e1000_vlan_rx_kill_vid()
2749 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2754 struct net_device *netdev = adapter->netdev; in e1000e_vlan_filter_disable()
2755 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_filter_disable()
2758 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000e_vlan_filter_disable()
2759 /* disable VLAN receive filtering */ in e1000e_vlan_filter_disable()
2764 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { in e1000e_vlan_filter_disable()
2766 adapter->mng_vlan_id); in e1000e_vlan_filter_disable()
2767 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; in e1000e_vlan_filter_disable()
2773 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2778 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_filter_enable()
2781 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000e_vlan_filter_enable()
2791 * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
2796 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_strip_disable()
2799 /* disable VLAN tag insert/strip */ in e1000e_vlan_strip_disable()
2806 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2811 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_strip_enable()
2822 struct net_device *netdev = adapter->netdev; in e1000_update_mng_vlan()
2823 u16 vid = adapter->hw.mng_cookie.vlan_id; in e1000_update_mng_vlan()
2824 u16 old_vid = adapter->mng_vlan_id; in e1000_update_mng_vlan()
2826 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { in e1000_update_mng_vlan()
2828 adapter->mng_vlan_id = vid; in e1000_update_mng_vlan()
2839 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in e1000_restore_vlan()
2841 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in e1000_restore_vlan()
2842 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in e1000_restore_vlan()
2847 struct e1000_hw *hw = &adapter->hw; in e1000_init_manageability_pt()
2850 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) in e1000_init_manageability_pt()
2862 switch (hw->mac.type) { in e1000_init_manageability_pt()
2868 /* Check if IPMI pass-through decision filter already exists; in e1000_init_manageability_pt()
2899 e_warn("Unable to create IPMI pass-through filter\n"); in e1000_init_manageability_pt()
2908 * e1000_configure_tx - Configure Transmit Unit after Reset
2915 struct e1000_hw *hw = &adapter->hw; in e1000_configure_tx()
2916 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_configure_tx()
2921 tdba = tx_ring->dma; in e1000_configure_tx()
2922 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); in e1000_configure_tx()
2928 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); in e1000_configure_tx()
2929 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); in e1000_configure_tx()
2931 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_configure_tx()
2935 ew32(TIDV, adapter->tx_int_delay); in e1000_configure_tx()
2937 ew32(TADV, adapter->tx_abs_int_delay); in e1000_configure_tx()
2939 if (adapter->flags2 & FLAG2_DMA_BURST) { in e1000_configure_tx()
2965 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { in e1000_configure_tx()
2976 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { in e1000_configure_tx()
2985 /* Setup Transmit Descriptor Settings for eop descriptor */ in e1000_configure_tx()
2986 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; in e1000_configure_tx()
2989 if (adapter->tx_int_delay) in e1000_configure_tx()
2990 adapter->txd_cmd |= E1000_TXD_CMD_IDE; in e1000_configure_tx()
2993 adapter->txd_cmd |= E1000_TXD_CMD_RS; in e1000_configure_tx()
2997 hw->mac.ops.config_collision_dist(hw); in e1000_configure_tx()
3000 if (hw->mac.type == e1000_pch_spt) { in e1000_configure_tx()
3019 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3022 * e1000_setup_rctl - configure the receive control registers
3027 struct e1000_hw *hw = &adapter->hw; in e1000_setup_rctl()
3031 /* Workaround Si errata on PCHx - configure jumbo frame flow. in e1000_setup_rctl()
3035 if (hw->mac.type >= e1000_pch2lan) { in e1000_setup_rctl()
3038 if (adapter->netdev->mtu > ETH_DATA_LEN) in e1000_setup_rctl()
3044 e_dbg("failed to enable|disable jumbo frame workaround mode\n"); in e1000_setup_rctl()
3052 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); in e1000_setup_rctl()
3058 if (adapter->netdev->mtu <= ETH_DATA_LEN) in e1000_setup_rctl()
3067 if (adapter->flags2 & FLAG2_CRC_STRIPPING) in e1000_setup_rctl()
3070 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ in e1000_setup_rctl()
3071 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { in e1000_setup_rctl()
3090 switch (adapter->rx_buffer_len) { in e1000_setup_rctl()
3112 /* 82571 and greater support packet-split where the protocol in e1000_setup_rctl()
3113 * header is placed in skb->data and the packet data is in e1000_setup_rctl()
3114 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. in e1000_setup_rctl()
3115 * In the case of a non-split, skb->data is linearly filled, in e1000_setup_rctl()
3116 * followed by the page buffers. Therefore, skb->data is in e1000_setup_rctl()
3126 pages = PAGE_USE_COUNT(adapter->netdev->mtu); in e1000_setup_rctl()
3128 adapter->rx_ps_pages = pages; in e1000_setup_rctl()
3130 adapter->rx_ps_pages = 0; in e1000_setup_rctl()
3132 if (adapter->rx_ps_pages) { in e1000_setup_rctl()
3138 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; in e1000_setup_rctl()
3140 switch (adapter->rx_ps_pages) { in e1000_setup_rctl()
3156 if (adapter->netdev->features & NETIF_F_RXALL) { in e1000_setup_rctl()
3164 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ in e1000_setup_rctl()
3174 adapter->flags &= ~FLAG_RESTART_NOW; in e1000_setup_rctl()
3178 * e1000_configure_rx - Configure Receive Unit after Reset
3185 struct e1000_hw *hw = &adapter->hw; in e1000_configure_rx()
3186 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure_rx()
3190 if (adapter->rx_ps_pages) { in e1000_configure_rx()
3192 rdlen = rx_ring->count * in e1000_configure_rx()
3194 adapter->clean_rx = e1000_clean_rx_irq_ps; in e1000_configure_rx()
3195 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; in e1000_configure_rx()
3196 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { in e1000_configure_rx()
3197 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); in e1000_configure_rx()
3198 adapter->clean_rx = e1000_clean_jumbo_rx_irq; in e1000_configure_rx()
3199 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; in e1000_configure_rx()
3201 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); in e1000_configure_rx()
3202 adapter->clean_rx = e1000_clean_rx_irq; in e1000_configure_rx()
3203 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; in e1000_configure_rx()
3206 /* disable receives while setting up the descriptors */ in e1000_configure_rx()
3208 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) in e1000_configure_rx()
3213 if (adapter->flags2 & FLAG2_DMA_BURST) { in e1000_configure_rx()
3227 ew32(RDTR, adapter->rx_int_delay); in e1000_configure_rx()
3230 ew32(RADV, adapter->rx_abs_int_delay); in e1000_configure_rx()
3231 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) in e1000_configure_rx()
3232 e1000e_write_itr(adapter, adapter->itr); in e1000_configure_rx()
3235 /* Auto-Mask interrupts upon ICR access */ in e1000_configure_rx()
3244 rdba = rx_ring->dma; in e1000_configure_rx()
3250 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); in e1000_configure_rx()
3251 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); in e1000_configure_rx()
3253 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_configure_rx()
3258 if (adapter->netdev->features & NETIF_F_RXCSUM) in e1000_configure_rx()
3264 /* With jumbo frames, excessive C-state transition latencies result in e1000_configure_rx()
3267 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000_configure_rx()
3269 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - in e1000_configure_rx()
3270 adapter->max_frame_size) * 8 / 1000; in e1000_configure_rx()
3272 if (adapter->flags & FLAG_IS_ICH) { in e1000_configure_rx()
3278 dev_info(&adapter->pdev->dev, in e1000_configure_rx()
3279 "Some CPU C-states have been disabled in order to enable jumbo frames\n"); in e1000_configure_rx()
3280 cpu_latency_qos_update_request(&adapter->pm_qos_req, lat); in e1000_configure_rx()
3282 cpu_latency_qos_update_request(&adapter->pm_qos_req, in e1000_configure_rx()
3291 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3295 * Returns: -ENOMEM on failure
3302 struct e1000_hw *hw = &adapter->hw; in e1000e_write_mc_addr_list()
3309 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); in e1000e_write_mc_addr_list()
3315 return -ENOMEM; in e1000e_write_mc_addr_list()
3320 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in e1000e_write_mc_addr_list()
3322 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); in e1000e_write_mc_addr_list()
3329 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3333 * Returns: -ENOMEM on failure/insufficient address space
3340 struct e1000_hw *hw = &adapter->hw; in e1000e_write_uc_addr_list()
3344 rar_entries = hw->mac.ops.rar_get_count(hw); in e1000e_write_uc_addr_list()
3347 rar_entries--; in e1000e_write_uc_addr_list()
3350 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) in e1000e_write_uc_addr_list()
3351 rar_entries--; in e1000e_write_uc_addr_list()
3355 return -ENOMEM; in e1000e_write_uc_addr_list()
3368 ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); in e1000e_write_uc_addr_list()
3370 return -ENOMEM; in e1000e_write_uc_addr_list()
3376 for (; rar_entries > 0; rar_entries--) { in e1000e_write_uc_addr_list()
3386 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3392 * promiscuous mode, and all-multi behavior.
3397 struct e1000_hw *hw = &adapter->hw; in e1000e_set_rx_mode()
3400 if (pm_runtime_suspended(netdev->dev.parent)) in e1000e_set_rx_mode()
3409 if (netdev->flags & IFF_PROMISC) { in e1000e_set_rx_mode()
3416 if (netdev->flags & IFF_ALLMULTI) { in e1000e_set_rx_mode()
3439 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in e1000e_set_rx_mode()
3447 struct e1000_hw *hw = &adapter->hw; in e1000e_setup_rss_hash()
3460 /* Disable raw packet checksumming so that RSS hash is placed in in e1000e_setup_rss_hash()
3478 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3487 struct e1000_hw *hw = &adapter->hw; in e1000e_get_base_timinca()
3493 if ((hw->mac.type >= e1000_pch_lpt) && in e1000e_get_base_timinca()
3504 switch (hw->mac.type) { in e1000e_get_base_timinca()
3510 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; in e1000e_get_base_timinca()
3518 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; in e1000e_get_base_timinca()
3524 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3532 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3546 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3552 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3561 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3564 return -EINVAL; in e1000e_get_base_timinca()
3574 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3579 * disable it when requested, although it shouldn't cause any overhead
3592 struct e1000_hw *hw = &adapter->hw; in e1000e_config_hwtstamp()
3601 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) in e1000e_config_hwtstamp()
3602 return -EINVAL; in e1000e_config_hwtstamp()
3604 switch (config->tx_type) { in e1000e_config_hwtstamp()
3611 return -ERANGE; in e1000e_config_hwtstamp()
3614 switch (config->rx_filter) { in e1000e_config_hwtstamp()
3666 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; in e1000e_config_hwtstamp()
3672 * Delay Request messages but not both so fall-through to in e1000e_config_hwtstamp()
3681 config->rx_filter = HWTSTAMP_FILTER_ALL; in e1000e_config_hwtstamp()
3684 return -ERANGE; in e1000e_config_hwtstamp()
3687 adapter->hwtstamp_config = *config; in e1000e_config_hwtstamp()
3689 /* enable/disable Tx h/w time stamping */ in e1000e_config_hwtstamp()
3697 return -EAGAIN; in e1000e_config_hwtstamp()
3700 /* enable/disable Rx h/w time stamping */ in e1000e_config_hwtstamp()
3710 return -EAGAIN; in e1000e_config_hwtstamp()
3737 * e1000_configure - configure the hardware for Rx and Tx
3742 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure()
3744 e1000e_set_rx_mode(adapter->netdev); in e1000_configure()
3751 if (adapter->netdev->features & NETIF_F_RXHASH) in e1000_configure()
3755 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); in e1000_configure()
3759 * e1000e_power_up_phy - restore link in case the phy was powered down
3768 if (adapter->hw.phy.ops.power_up) in e1000e_power_up_phy()
3769 adapter->hw.phy.ops.power_up(&adapter->hw); in e1000e_power_up_phy()
3771 adapter->hw.mac.ops.setup_link(&adapter->hw); in e1000e_power_up_phy()
3775 * e1000_power_down_phy - Power down the PHY
3783 if (adapter->hw.phy.ops.power_down) in e1000_power_down_phy()
3784 adapter->hw.phy.ops.power_down(&adapter->hw); in e1000_power_down_phy()
3788 * e1000_flush_tx_ring - remove all descriptors from the tx_ring
3798 struct e1000_hw *hw = &adapter->hw; in e1000_flush_tx_ring()
3799 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_flush_tx_ring()
3807 BUG_ON(tdt != tx_ring->next_to_use); in e1000_flush_tx_ring()
3808 tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); in e1000_flush_tx_ring()
3809 tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma); in e1000_flush_tx_ring()
3811 tx_desc->lower.data = cpu_to_le32(txd_lower | size); in e1000_flush_tx_ring()
3812 tx_desc->upper.data = 0; in e1000_flush_tx_ring()
3815 tx_ring->next_to_use++; in e1000_flush_tx_ring()
3816 if (tx_ring->next_to_use == tx_ring->count) in e1000_flush_tx_ring()
3817 tx_ring->next_to_use = 0; in e1000_flush_tx_ring()
3818 ew32(TDT(0), tx_ring->next_to_use); in e1000_flush_tx_ring()
3823 * e1000_flush_rx_ring - remove all descriptors from the rx_ring
3826 * Mark all descriptors in the RX ring as consumed and disable the rx ring
3831 struct e1000_hw *hw = &adapter->hw; in e1000_flush_rx_ring()
3856 * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
3871 struct e1000_hw *hw = &adapter->hw; in e1000_flush_desc_rings()
3873 /* First, disable MULR fix in FEXTNVM11 */ in e1000_flush_desc_rings()
3879 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, in e1000_flush_desc_rings()
3885 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, in e1000_flush_desc_rings()
3892 * e1000e_systim_reset - reset the timesync registers after a hardware reset
3902 struct ptp_clock_info *info = &adapter->ptp_clock_info; in e1000e_systim_reset()
3903 struct e1000_hw *hw = &adapter->hw; in e1000e_systim_reset()
3908 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) in e1000e_systim_reset()
3911 if (info->adjfine) { in e1000e_systim_reset()
3913 ret_val = info->adjfine(info, adapter->ptp_delta); in e1000e_systim_reset()
3922 dev_warn(&adapter->pdev->dev, in e1000e_systim_reset()
3929 spin_lock_irqsave(&adapter->systim_lock, flags); in e1000e_systim_reset()
3930 timecounter_init(&adapter->tc, &adapter->cc, in e1000e_systim_reset()
3932 spin_unlock_irqrestore(&adapter->systim_lock, flags); in e1000e_systim_reset()
3935 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); in e1000e_systim_reset()
3939 * e1000e_reset - bring the hardware into a known good state
3943 * require a configuration cycle of the hardware - those cannot be
3949 struct e1000_mac_info *mac = &adapter->hw.mac; in e1000e_reset()
3950 struct e1000_fc_info *fc = &adapter->hw.fc; in e1000e_reset()
3951 struct e1000_hw *hw = &adapter->hw; in e1000e_reset()
3953 u32 pba = adapter->pba; in e1000e_reset()
3959 if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) { in e1000e_reset()
3975 min_tx_space = (adapter->max_frame_size + in e1000e_reset()
3976 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; in e1000e_reset()
3980 min_rx_space = adapter->max_frame_size; in e1000e_reset()
3989 ((min_tx_space - tx_space) < pba)) { in e1000e_reset()
3990 pba -= min_tx_space - tx_space; in e1000e_reset()
4007 * - 90% of the Rx FIFO size, and in e1000e_reset()
4008 * - the full Rx FIFO size minus one full frame in e1000e_reset()
4010 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) in e1000e_reset()
4011 fc->pause_time = 0xFFFF; in e1000e_reset()
4013 fc->pause_time = E1000_FC_PAUSE_TIME; in e1000e_reset()
4014 fc->send_xon = true; in e1000e_reset()
4015 fc->current_mode = fc->requested_mode; in e1000e_reset()
4017 switch (hw->mac.type) { in e1000e_reset()
4020 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000e_reset()
4023 fc->high_water = 0x2800; in e1000e_reset()
4024 fc->low_water = fc->high_water - 8; in e1000e_reset()
4030 ((pba << 10) - adapter->max_frame_size)); in e1000e_reset()
4032 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ in e1000e_reset()
4033 fc->low_water = fc->high_water - 8; in e1000e_reset()
4039 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000e_reset()
4040 fc->high_water = 0x3500; in e1000e_reset()
4041 fc->low_water = 0x1500; in e1000e_reset()
4043 fc->high_water = 0x5000; in e1000e_reset()
4044 fc->low_water = 0x3000; in e1000e_reset()
4046 fc->refresh_time = 0x1000; in e1000e_reset()
4058 fc->refresh_time = 0xFFFF; in e1000e_reset()
4059 fc->pause_time = 0xFFFF; in e1000e_reset()
4061 if (adapter->netdev->mtu <= ETH_DATA_LEN) { in e1000e_reset()
4062 fc->high_water = 0x05C20; in e1000e_reset()
4063 fc->low_water = 0x05048; in e1000e_reset()
4069 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; in e1000e_reset()
4070 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; in e1000e_reset()
4079 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, in e1000e_reset()
4082 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot in e1000e_reset()
4085 if (adapter->itr_setting & 0x3) { in e1000e_reset()
4086 if ((adapter->max_frame_size * 2) > (pba << 10)) { in e1000e_reset()
4087 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { in e1000e_reset()
4088 dev_info(&adapter->pdev->dev, in e1000e_reset()
4090 adapter->flags2 |= FLAG2_DISABLE_AIM; in e1000e_reset()
4093 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { in e1000e_reset()
4094 dev_info(&adapter->pdev->dev, in e1000e_reset()
4096 adapter->flags2 &= ~FLAG2_DISABLE_AIM; in e1000e_reset()
4097 adapter->itr = 20000; in e1000e_reset()
4098 e1000e_write_itr(adapter, adapter->itr); in e1000e_reset()
4102 if (hw->mac.type >= e1000_pch_spt) in e1000e_reset()
4105 mac->ops.reset_hw(hw); in e1000e_reset()
4110 if (adapter->flags & FLAG_HAS_AMT) in e1000e_reset()
4115 if (mac->ops.init_hw(hw)) in e1000e_reset()
4129 if (adapter->flags2 & FLAG2_HAS_EEE) { in e1000e_reset()
4133 switch (hw->phy.type) { in e1000e_reset()
4141 dev_err(&adapter->pdev->dev, in e1000e_reset()
4146 ret_val = hw->phy.ops.acquire(hw); in e1000e_reset()
4148 dev_err(&adapter->pdev->dev, in e1000e_reset()
4149 "EEE advertisement - unable to acquire PHY\n"); in e1000e_reset()
4154 hw->dev_spec.ich8lan.eee_disable ? in e1000e_reset()
4155 0 : adapter->eee_advert); in e1000e_reset()
4157 hw->phy.ops.release(hw); in e1000e_reset()
4160 if (!netif_running(adapter->netdev) && in e1000e_reset()
4161 !test_bit(__E1000_TESTING, &adapter->state)) in e1000e_reset()
4166 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && in e1000e_reset()
4167 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { in e1000e_reset()
4177 if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { in e1000e_reset()
4194 * e1000e_trigger_lsc - trigger an LSC interrupt
4201 struct e1000_hw *hw = &adapter->hw; in e1000e_trigger_lsc()
4203 if (adapter->msix_entries) in e1000e_trigger_lsc()
4214 clear_bit(__E1000_DOWN, &adapter->state); in e1000e_up()
4216 if (adapter->msix_entries) in e1000e_up()
4227 struct e1000_hw *hw = &adapter->hw; in e1000e_flush_descriptors()
4229 if (!(adapter->flags2 & FLAG2_DMA_BURST)) in e1000e_flush_descriptors()
4233 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000e_flush_descriptors()
4234 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); in e1000e_flush_descriptors()
4242 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000e_flush_descriptors()
4243 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); in e1000e_flush_descriptors()
4252 * e1000e_down - quiesce the device and optionally reset the hardware
4258 struct net_device *netdev = adapter->netdev; in e1000e_down()
4259 struct e1000_hw *hw = &adapter->hw; in e1000e_down()
4265 set_bit(__E1000_DOWN, &adapter->state); in e1000e_down()
4269 /* disable receives in the hardware */ in e1000e_down()
4271 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) in e1000e_down()
4277 /* disable transmits in the hardware */ in e1000e_down()
4288 napi_synchronize(&adapter->napi); in e1000e_down()
4290 del_timer_sync(&adapter->watchdog_timer); in e1000e_down()
4291 del_timer_sync(&adapter->phy_info_timer); in e1000e_down()
4293 spin_lock(&adapter->stats64_lock); in e1000e_down()
4295 spin_unlock(&adapter->stats64_lock); in e1000e_down()
4299 adapter->link_speed = 0; in e1000e_down()
4300 adapter->link_duplex = 0; in e1000e_down()
4302 /* Disable Si errata workaround on PCHx for jumbo frame flow */ in e1000e_down()
4303 if ((hw->mac.type >= e1000_pch2lan) && in e1000e_down()
4304 (adapter->netdev->mtu > ETH_DATA_LEN) && in e1000e_down()
4306 e_dbg("failed to disable jumbo frame workaround mode\n"); in e1000e_down()
4308 if (!pci_channel_offline(adapter->pdev)) { in e1000e_down()
4311 else if (hw->mac.type >= e1000_pch_spt) in e1000e_down()
4314 e1000_clean_tx_ring(adapter->tx_ring); in e1000e_down()
4315 e1000_clean_rx_ring(adapter->rx_ring); in e1000e_down()
4321 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) in e1000e_reinit_locked()
4325 clear_bit(__E1000_RESETTING, &adapter->state); in e1000e_reinit_locked()
4329 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4355 time_delta = systim_next - systim; in e1000e_sanitize_systim()
4370 * e1000e_read_systim - read SYSTIM register
4378 struct e1000_hw *hw = &adapter->hw; in e1000e_read_systim()
4385 * to fix that we test for overflow and if true, we re-read systime. in e1000e_read_systim()
4392 if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { in e1000e_read_systim()
4407 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) in e1000e_read_systim()
4414 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4426 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4435 struct net_device *netdev = adapter->netdev; in e1000_sw_init()
4437 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; in e1000_sw_init()
4438 adapter->rx_ps_bsize0 = 128; in e1000_sw_init()
4439 adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; in e1000_sw_init()
4440 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in e1000_sw_init()
4441 adapter->tx_ring_count = E1000_DEFAULT_TXD; in e1000_sw_init()
4442 adapter->rx_ring_count = E1000_DEFAULT_RXD; in e1000_sw_init()
4444 spin_lock_init(&adapter->stats64_lock); in e1000_sw_init()
4449 return -ENOMEM; in e1000_sw_init()
4452 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { in e1000_sw_init()
4453 adapter->cc.read = e1000e_cyclecounter_read; in e1000_sw_init()
4454 adapter->cc.mask = CYCLECOUNTER_MASK(64); in e1000_sw_init()
4455 adapter->cc.mult = 1; in e1000_sw_init()
4458 spin_lock_init(&adapter->systim_lock); in e1000_sw_init()
4459 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); in e1000_sw_init()
4462 /* Explicitly disable IRQ since the NIC can be in any state. */ in e1000_sw_init()
4465 set_bit(__E1000_DOWN, &adapter->state); in e1000_sw_init()
4470 * e1000_intr_msi_test - Interrupt Handler
4478 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msi_test()
4483 adapter->flags &= ~FLAG_MSI_TEST_FAILED; in e1000_intr_msi_test()
4494 * e1000_test_msi_interrupt - Returns 0 for successful test
4501 struct net_device *netdev = adapter->netdev; in e1000_test_msi_interrupt()
4502 struct e1000_hw *hw = &adapter->hw; in e1000_test_msi_interrupt()
4505 /* poll_enable hasn't been called yet, so don't need disable */ in e1000_test_msi_interrupt()
4516 adapter->flags |= FLAG_MSI_TEST_FAILED; in e1000_test_msi_interrupt()
4518 err = pci_enable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4522 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, in e1000_test_msi_interrupt()
4523 netdev->name, netdev); in e1000_test_msi_interrupt()
4525 pci_disable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4545 if (adapter->flags & FLAG_MSI_TEST_FAILED) { in e1000_test_msi_interrupt()
4546 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000_test_msi_interrupt()
4552 free_irq(adapter->pdev->irq, netdev); in e1000_test_msi_interrupt()
4553 pci_disable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4561 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4571 if (!(adapter->flags & FLAG_MSI_ENABLED)) in e1000_test_msi()
4574 /* disable SERR in case the MSI write causes a master abort */ in e1000_test_msi()
4575 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); in e1000_test_msi()
4577 pci_write_config_word(adapter->pdev, PCI_COMMAND, in e1000_test_msi()
4582 /* re-enable SERR */ in e1000_test_msi()
4584 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); in e1000_test_msi()
4586 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); in e1000_test_msi()
4593 * e1000e_open - Called when a network interface is made active
4607 struct e1000_hw *hw = &adapter->hw; in e1000e_open()
4608 struct pci_dev *pdev = adapter->pdev; in e1000e_open()
4613 if (test_bit(__E1000_TESTING, &adapter->state)) in e1000e_open()
4614 return -EBUSY; in e1000e_open()
4616 pm_runtime_get_sync(&pdev->dev); in e1000e_open()
4622 err = e1000e_setup_tx_resources(adapter->tx_ring); in e1000e_open()
4627 err = e1000e_setup_rx_resources(adapter->rx_ring); in e1000e_open()
4634 if (adapter->flags & FLAG_HAS_AMT) { in e1000e_open()
4641 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; in e1000e_open()
4642 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) in e1000e_open()
4646 cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE); in e1000e_open()
4663 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { in e1000e_open()
4672 clear_bit(__E1000_DOWN, &adapter->state); in e1000e_open()
4674 if (adapter->int_mode == E1000E_INT_MODE_MSIX) in e1000e_open()
4675 irq = adapter->msix_entries[0].vector; in e1000e_open()
4677 irq = adapter->pdev->irq; in e1000e_open()
4679 netif_napi_set_irq(&adapter->napi, irq); in e1000e_open()
4680 napi_enable(&adapter->napi); in e1000e_open()
4681 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi); in e1000e_open()
4682 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi); in e1000e_open()
4686 adapter->tx_hang_recheck = false; in e1000e_open()
4688 hw->mac.get_link_status = true; in e1000e_open()
4689 pm_runtime_put(&pdev->dev); in e1000e_open()
4696 cpu_latency_qos_remove_request(&adapter->pm_qos_req); in e1000e_open()
4699 e1000e_free_rx_resources(adapter->rx_ring); in e1000e_open()
4701 e1000e_free_tx_resources(adapter->tx_ring); in e1000e_open()
4704 pm_runtime_put_sync(&pdev->dev); in e1000e_open()
4710 * e1000e_close - Disables a network interface
4715 * The close entry point is called when an interface is de-activated
4723 struct pci_dev *pdev = adapter->pdev; in e1000e_close()
4726 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_close()
4729 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_close()
4731 pm_runtime_get_sync(&pdev->dev); in e1000e_close()
4743 napi_disable(&adapter->napi); in e1000e_close()
4745 e1000e_free_tx_resources(adapter->tx_ring); in e1000e_close()
4746 e1000e_free_rx_resources(adapter->rx_ring); in e1000e_close()
4751 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) in e1000e_close()
4753 adapter->mng_vlan_id); in e1000e_close()
4758 if ((adapter->flags & FLAG_HAS_AMT) && in e1000e_close()
4759 !test_bit(__E1000_TESTING, &adapter->state)) in e1000e_close()
4762 cpu_latency_qos_remove_request(&adapter->pm_qos_req); in e1000e_close()
4764 pm_runtime_put_sync(&pdev->dev); in e1000e_close()
4770 * e1000_set_mac - Change the Ethernet Address of the NIC
4779 struct e1000_hw *hw = &adapter->hw; in e1000_set_mac()
4782 if (!is_valid_ether_addr(addr->sa_data)) in e1000_set_mac()
4783 return -EADDRNOTAVAIL; in e1000_set_mac()
4785 eth_hw_addr_set(netdev, addr->sa_data); in e1000_set_mac()
4786 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); in e1000_set_mac()
4788 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); in e1000_set_mac()
4790 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { in e1000_set_mac()
4792 e1000e_set_laa_state_82571(&adapter->hw, 1); in e1000_set_mac()
4801 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, in e1000_set_mac()
4802 adapter->hw.mac.rar_entry_count - 1); in e1000_set_mac()
4809 * e1000e_update_phy_task - work thread to update phy
4821 struct e1000_hw *hw = &adapter->hw; in e1000e_update_phy_task()
4823 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000e_update_phy_task()
4829 if (hw->phy.type >= e1000_phy_82579) in e1000e_update_phy_task()
4834 * e1000_update_phy_info - timre call-back to update PHY info
4844 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_update_phy_info()
4847 schedule_work(&adapter->update_phy_task); in e1000_update_phy_info()
4851 * e1000e_update_phy_stats - Update the PHY statistics counters
4854 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4858 struct e1000_hw *hw = &adapter->hw; in e1000e_update_phy_stats()
4862 ret_val = hw->phy.ops.acquire(hw); in e1000e_update_phy_stats()
4869 hw->phy.addr = 1; in e1000e_update_phy_stats()
4875 ret_val = hw->phy.ops.set_page(hw, in e1000e_update_phy_stats()
4882 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); in e1000e_update_phy_stats()
4883 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); in e1000e_update_phy_stats()
4885 adapter->stats.scc += phy_data; in e1000e_update_phy_stats()
4888 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); in e1000e_update_phy_stats()
4889 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); in e1000e_update_phy_stats()
4891 adapter->stats.ecol += phy_data; in e1000e_update_phy_stats()
4894 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); in e1000e_update_phy_stats()
4895 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); in e1000e_update_phy_stats()
4897 adapter->stats.mcc += phy_data; in e1000e_update_phy_stats()
4900 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); in e1000e_update_phy_stats()
4901 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); in e1000e_update_phy_stats()
4903 adapter->stats.latecol += phy_data; in e1000e_update_phy_stats()
4905 /* Collision Count - also used for adaptive IFS */ in e1000e_update_phy_stats()
4906 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); in e1000e_update_phy_stats()
4907 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); in e1000e_update_phy_stats()
4909 hw->mac.collision_delta = phy_data; in e1000e_update_phy_stats()
4912 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); in e1000e_update_phy_stats()
4913 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); in e1000e_update_phy_stats()
4915 adapter->stats.dc += phy_data; in e1000e_update_phy_stats()
4918 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); in e1000e_update_phy_stats()
4919 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); in e1000e_update_phy_stats()
4921 adapter->stats.tncrs += phy_data; in e1000e_update_phy_stats()
4924 hw->phy.ops.release(hw); in e1000e_update_phy_stats()
4928 * e1000e_update_stats - Update the board statistics counters
4933 struct net_device *netdev = adapter->netdev; in e1000e_update_stats()
4934 struct e1000_hw *hw = &adapter->hw; in e1000e_update_stats()
4935 struct pci_dev *pdev = adapter->pdev; in e1000e_update_stats()
4940 if (adapter->link_speed == 0) in e1000e_update_stats()
4945 adapter->stats.crcerrs += er32(CRCERRS); in e1000e_update_stats()
4946 adapter->stats.gprc += er32(GPRC); in e1000e_update_stats()
4947 adapter->stats.gorc += er32(GORCL); in e1000e_update_stats()
4949 adapter->stats.bprc += er32(BPRC); in e1000e_update_stats()
4950 adapter->stats.mprc += er32(MPRC); in e1000e_update_stats()
4951 adapter->stats.roc += er32(ROC); in e1000e_update_stats()
4953 adapter->stats.mpc += er32(MPC); in e1000e_update_stats()
4955 /* Half-duplex statistics */ in e1000e_update_stats()
4956 if (adapter->link_duplex == HALF_DUPLEX) { in e1000e_update_stats()
4957 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { in e1000e_update_stats()
4960 adapter->stats.scc += er32(SCC); in e1000e_update_stats()
4961 adapter->stats.ecol += er32(ECOL); in e1000e_update_stats()
4962 adapter->stats.mcc += er32(MCC); in e1000e_update_stats()
4963 adapter->stats.latecol += er32(LATECOL); in e1000e_update_stats()
4964 adapter->stats.dc += er32(DC); in e1000e_update_stats()
4966 hw->mac.collision_delta = er32(COLC); in e1000e_update_stats()
4968 if ((hw->mac.type != e1000_82574) && in e1000e_update_stats()
4969 (hw->mac.type != e1000_82583)) in e1000e_update_stats()
4970 adapter->stats.tncrs += er32(TNCRS); in e1000e_update_stats()
4972 adapter->stats.colc += hw->mac.collision_delta; in e1000e_update_stats()
4975 adapter->stats.xonrxc += er32(XONRXC); in e1000e_update_stats()
4976 adapter->stats.xontxc += er32(XONTXC); in e1000e_update_stats()
4977 adapter->stats.xoffrxc += er32(XOFFRXC); in e1000e_update_stats()
4978 adapter->stats.xofftxc += er32(XOFFTXC); in e1000e_update_stats()
4979 adapter->stats.gptc += er32(GPTC); in e1000e_update_stats()
4980 adapter->stats.gotc += er32(GOTCL); in e1000e_update_stats()
4982 adapter->stats.rnbc += er32(RNBC); in e1000e_update_stats()
4983 adapter->stats.ruc += er32(RUC); in e1000e_update_stats()
4985 adapter->stats.mptc += er32(MPTC); in e1000e_update_stats()
4986 adapter->stats.bptc += er32(BPTC); in e1000e_update_stats()
4990 hw->mac.tx_packet_delta = er32(TPT); in e1000e_update_stats()
4991 adapter->stats.tpt += hw->mac.tx_packet_delta; in e1000e_update_stats()
4993 adapter->stats.algnerrc += er32(ALGNERRC); in e1000e_update_stats()
4994 adapter->stats.rxerrc += er32(RXERRC); in e1000e_update_stats()
4995 adapter->stats.cexterr += er32(CEXTERR); in e1000e_update_stats()
4996 adapter->stats.tsctc += er32(TSCTC); in e1000e_update_stats()
4997 adapter->stats.tsctfc += er32(TSCTFC); in e1000e_update_stats()
5000 netdev->stats.multicast = adapter->stats.mprc; in e1000e_update_stats()
5001 netdev->stats.collisions = adapter->stats.colc; in e1000e_update_stats()
5008 netdev->stats.rx_errors = adapter->stats.rxerrc + in e1000e_update_stats()
5009 adapter->stats.crcerrs + adapter->stats.algnerrc + in e1000e_update_stats()
5010 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; in e1000e_update_stats()
5011 netdev->stats.rx_length_errors = adapter->stats.ruc + in e1000e_update_stats()
5012 adapter->stats.roc; in e1000e_update_stats()
5013 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; in e1000e_update_stats()
5014 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; in e1000e_update_stats()
5015 netdev->stats.rx_missed_errors = adapter->stats.mpc; in e1000e_update_stats()
5018 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; in e1000e_update_stats()
5019 netdev->stats.tx_aborted_errors = adapter->stats.ecol; in e1000e_update_stats()
5020 netdev->stats.tx_window_errors = adapter->stats.latecol; in e1000e_update_stats()
5021 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; in e1000e_update_stats()
5026 adapter->stats.mgptc += er32(MGTPTC); in e1000e_update_stats()
5027 adapter->stats.mgprc += er32(MGTPRC); in e1000e_update_stats()
5028 adapter->stats.mgpdc += er32(MGTPDC); in e1000e_update_stats()
5031 if (hw->mac.type >= e1000_pch_lpt) { in e1000e_update_stats()
5034 adapter->corr_errors += in e1000e_update_stats()
5036 adapter->uncorr_errors += in e1000e_update_stats()
5042 * e1000_phy_read_status - Update the PHY register status snapshot
5047 struct e1000_hw *hw = &adapter->hw; in e1000_phy_read_status()
5048 struct e1000_phy_regs *phy = &adapter->phy_regs; in e1000_phy_read_status()
5050 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && in e1000_phy_read_status()
5052 (adapter->hw.phy.media_type == e1000_media_type_copper)) { in e1000_phy_read_status()
5055 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); in e1000_phy_read_status()
5056 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); in e1000_phy_read_status()
5057 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); in e1000_phy_read_status()
5058 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); in e1000_phy_read_status()
5059 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); in e1000_phy_read_status()
5060 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); in e1000_phy_read_status()
5061 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); in e1000_phy_read_status()
5062 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); in e1000_phy_read_status()
5067 * Set values to typical power-on defaults in e1000_phy_read_status()
5069 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); in e1000_phy_read_status()
5070 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | in e1000_phy_read_status()
5073 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | in e1000_phy_read_status()
5075 phy->lpa = 0; in e1000_phy_read_status()
5076 phy->expansion = EXPANSION_ENABLENPAGE; in e1000_phy_read_status()
5077 phy->ctrl1000 = ADVERTISE_1000FULL; in e1000_phy_read_status()
5078 phy->stat1000 = 0; in e1000_phy_read_status()
5079 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); in e1000_phy_read_status()
5085 struct e1000_hw *hw = &adapter->hw; in e1000_print_link_info()
5089 netdev_info(adapter->netdev, in e1000_print_link_info()
5091 adapter->link_speed, in e1000_print_link_info()
5092 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", in e1000_print_link_info()
5100 struct e1000_hw *hw = &adapter->hw; in e1000e_has_link()
5109 switch (hw->phy.media_type) { in e1000e_has_link()
5111 if (hw->mac.get_link_status) { in e1000e_has_link()
5112 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5113 link_active = !hw->mac.get_link_status; in e1000e_has_link()
5119 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5123 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5124 link_active = hw->mac.serdes_has_link; in e1000e_has_link()
5131 if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && in e1000e_has_link()
5143 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && in e1000e_enable_receives()
5144 (adapter->flags & FLAG_RESTART_NOW)) { in e1000e_enable_receives()
5145 struct e1000_hw *hw = &adapter->hw; in e1000e_enable_receives()
5149 adapter->flags &= ~FLAG_RESTART_NOW; in e1000e_enable_receives()
5155 struct e1000_hw *hw = &adapter->hw; in e1000e_check_82574_phy_workaround()
5161 adapter->phy_hang_count++; in e1000e_check_82574_phy_workaround()
5163 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5165 if (adapter->phy_hang_count > 1) { in e1000e_check_82574_phy_workaround()
5166 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5167 e_dbg("PHY appears hung - resetting\n"); in e1000e_check_82574_phy_workaround()
5168 schedule_work(&adapter->reset_task); in e1000e_check_82574_phy_workaround()
5173 * e1000_watchdog - Timer Call-back
5181 schedule_work(&adapter->watchdog_task); in e1000_watchdog()
5191 struct net_device *netdev = adapter->netdev; in e1000_watchdog_task()
5192 struct e1000_mac_info *mac = &adapter->hw.mac; in e1000_watchdog_task()
5193 struct e1000_phy_info *phy = &adapter->hw.phy; in e1000_watchdog_task()
5194 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_watchdog_task()
5196 struct e1000_hw *hw = &adapter->hw; in e1000_watchdog_task()
5199 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5205 pm_runtime_resume(netdev->dev.parent); in e1000_watchdog_task()
5212 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) in e1000_watchdog_task()
5220 pm_runtime_resume(netdev->dev.parent); in e1000_watchdog_task()
5235 e1000_phy_hw_reset(&adapter->hw); in e1000_watchdog_task()
5241 mac->ops.get_link_up_info(&adapter->hw, in e1000_watchdog_task()
5242 &adapter->link_speed, in e1000_watchdog_task()
5243 &adapter->link_duplex); in e1000_watchdog_task()
5248 if (phy->speed_downgraded) in e1000_watchdog_task()
5255 if ((hw->phy.type == e1000_phy_igp_3 || in e1000_watchdog_task()
5256 hw->phy.type == e1000_phy_bm) && in e1000_watchdog_task()
5257 hw->mac.autoneg && in e1000_watchdog_task()
5258 (adapter->link_speed == SPEED_10 || in e1000_watchdog_task()
5259 adapter->link_speed == SPEED_100) && in e1000_watchdog_task()
5260 (adapter->link_duplex == HALF_DUPLEX)) { in e1000_watchdog_task()
5270 adapter->tx_timeout_factor = 1; in e1000_watchdog_task()
5271 switch (adapter->link_speed) { in e1000_watchdog_task()
5274 adapter->tx_timeout_factor = 16; in e1000_watchdog_task()
5278 adapter->tx_timeout_factor = 10; in e1000_watchdog_task()
5282 /* workaround: re-program speed mode bit after in e1000_watchdog_task()
5283 * link-up event in e1000_watchdog_task()
5285 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && in e1000_watchdog_task()
5301 /* Perform any post-link-up configuration before in e1000_watchdog_task()
5304 if (phy->ops.cfg_on_link_up) in e1000_watchdog_task()
5305 phy->ops.cfg_on_link_up(hw); in e1000_watchdog_task()
5310 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5311 mod_timer(&adapter->phy_info_timer, in e1000_watchdog_task()
5316 adapter->link_speed = 0; in e1000_watchdog_task()
5317 adapter->link_duplex = 0; in e1000_watchdog_task()
5322 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5323 mod_timer(&adapter->phy_info_timer, in e1000_watchdog_task()
5326 /* 8000ES2LAN requires a Rx packet buffer work-around in e1000_watchdog_task()
5330 if (adapter->flags & FLAG_RX_NEEDS_RESTART) in e1000_watchdog_task()
5331 adapter->flags |= FLAG_RESTART_NOW; in e1000_watchdog_task()
5333 pm_schedule_suspend(netdev->dev.parent, in e1000_watchdog_task()
5339 spin_lock(&adapter->stats64_lock); in e1000_watchdog_task()
5342 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; in e1000_watchdog_task()
5343 adapter->tpt_old = adapter->stats.tpt; in e1000_watchdog_task()
5344 mac->collision_delta = adapter->stats.colc - adapter->colc_old; in e1000_watchdog_task()
5345 adapter->colc_old = adapter->stats.colc; in e1000_watchdog_task()
5347 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; in e1000_watchdog_task()
5348 adapter->gorc_old = adapter->stats.gorc; in e1000_watchdog_task()
5349 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; in e1000_watchdog_task()
5350 adapter->gotc_old = adapter->stats.gotc; in e1000_watchdog_task()
5351 spin_unlock(&adapter->stats64_lock); in e1000_watchdog_task()
5358 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) in e1000_watchdog_task()
5359 adapter->flags |= FLAG_RESTART_NOW; in e1000_watchdog_task()
5362 if (adapter->flags & FLAG_RESTART_NOW) { in e1000_watchdog_task()
5363 schedule_work(&adapter->reset_task); in e1000_watchdog_task()
5368 e1000e_update_adaptive(&adapter->hw); in e1000_watchdog_task()
5371 if (adapter->itr_setting == 4) { in e1000_watchdog_task()
5374 * everyone else is between 2000-8000. in e1000_watchdog_task()
5376 u32 goc = (adapter->gotc + adapter->gorc) / 10000; in e1000_watchdog_task()
5377 u32 dif = (adapter->gotc > adapter->gorc ? in e1000_watchdog_task()
5378 adapter->gotc - adapter->gorc : in e1000_watchdog_task()
5379 adapter->gorc - adapter->gotc) / 10000; in e1000_watchdog_task()
5386 if (adapter->msix_entries) in e1000_watchdog_task()
5387 ew32(ICS, adapter->rx_ring->ims_val); in e1000_watchdog_task()
5395 adapter->detect_tx_hung = true; in e1000_watchdog_task()
5401 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); in e1000_watchdog_task()
5403 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) in e1000_watchdog_task()
5407 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { in e1000_watchdog_task()
5408 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && in e1000_watchdog_task()
5411 adapter->rx_hwtstamp_cleared++; in e1000_watchdog_task()
5413 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; in e1000_watchdog_task()
5418 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5419 mod_timer(&adapter->watchdog_timer, in e1000_watchdog_task()
5451 mss = skb_shinfo(skb)->gso_size; in e1000_tso()
5454 iph->tot_len = 0; in e1000_tso()
5455 iph->check = 0; in e1000_tso()
5456 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, in e1000_tso()
5459 ipcse = skb_transport_offset(skb) - 1; in e1000_tso()
5465 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; in e1000_tso()
5467 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; in e1000_tso()
5470 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); in e1000_tso()
5472 i = tx_ring->next_to_use; in e1000_tso()
5474 buffer_info = &tx_ring->buffer_info[i]; in e1000_tso()
5476 context_desc->lower_setup.ip_fields.ipcss = ipcss; in e1000_tso()
5477 context_desc->lower_setup.ip_fields.ipcso = ipcso; in e1000_tso()
5478 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); in e1000_tso()
5479 context_desc->upper_setup.tcp_fields.tucss = tucss; in e1000_tso()
5480 context_desc->upper_setup.tcp_fields.tucso = tucso; in e1000_tso()
5481 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tso()
5482 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); in e1000_tso()
5483 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; in e1000_tso()
5484 context_desc->cmd_and_length = cpu_to_le32(cmd_length); in e1000_tso()
5486 buffer_info->time_stamp = jiffies; in e1000_tso()
5487 buffer_info->next_to_watch = i; in e1000_tso()
5490 if (i == tx_ring->count) in e1000_tso()
5492 tx_ring->next_to_use = i; in e1000_tso()
5500 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_csum()
5507 if (skb->ip_summed != CHECKSUM_PARTIAL) in e1000_tx_csum()
5512 if (ip_hdr(skb)->protocol == IPPROTO_TCP) in e1000_tx_csum()
5517 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) in e1000_tx_csum()
5529 i = tx_ring->next_to_use; in e1000_tx_csum()
5530 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_csum()
5533 context_desc->lower_setup.ip_config = 0; in e1000_tx_csum()
5534 context_desc->upper_setup.tcp_fields.tucss = css; in e1000_tx_csum()
5535 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; in e1000_tx_csum()
5536 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tx_csum()
5537 context_desc->tcp_seg_setup.data = 0; in e1000_tx_csum()
5538 context_desc->cmd_and_length = cpu_to_le32(cmd_len); in e1000_tx_csum()
5540 buffer_info->time_stamp = jiffies; in e1000_tx_csum()
5541 buffer_info->next_to_watch = i; in e1000_tx_csum()
5544 if (i == tx_ring->count) in e1000_tx_csum()
5546 tx_ring->next_to_use = i; in e1000_tx_csum()
5555 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_map()
5556 struct pci_dev *pdev = adapter->pdev; in e1000_tx_map()
5562 i = tx_ring->next_to_use; in e1000_tx_map()
5565 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5568 buffer_info->length = size; in e1000_tx_map()
5569 buffer_info->time_stamp = jiffies; in e1000_tx_map()
5570 buffer_info->next_to_watch = i; in e1000_tx_map()
5571 buffer_info->dma = dma_map_single(&pdev->dev, in e1000_tx_map()
5572 skb->data + offset, in e1000_tx_map()
5574 buffer_info->mapped_as_page = false; in e1000_tx_map()
5575 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in e1000_tx_map()
5578 len -= size; in e1000_tx_map()
5584 if (i == tx_ring->count) in e1000_tx_map()
5590 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in e1000_tx_map()
5597 if (i == tx_ring->count) in e1000_tx_map()
5600 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5603 buffer_info->length = size; in e1000_tx_map()
5604 buffer_info->time_stamp = jiffies; in e1000_tx_map()
5605 buffer_info->next_to_watch = i; in e1000_tx_map()
5606 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, in e1000_tx_map()
5609 buffer_info->mapped_as_page = true; in e1000_tx_map()
5610 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in e1000_tx_map()
5613 len -= size; in e1000_tx_map()
5619 segs = skb_shinfo(skb)->gso_segs ? : 1; in e1000_tx_map()
5621 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; in e1000_tx_map()
5623 tx_ring->buffer_info[i].skb = skb; in e1000_tx_map()
5624 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
5625 tx_ring->buffer_info[i].bytecount = bytecount; in e1000_tx_map()
5626 tx_ring->buffer_info[first].next_to_watch = i; in e1000_tx_map()
5631 dev_err(&pdev->dev, "Tx DMA map failed\n"); in e1000_tx_map()
5632 buffer_info->dma = 0; in e1000_tx_map()
5634 count--; in e1000_tx_map()
5636 while (count--) { in e1000_tx_map()
5638 i += tx_ring->count; in e1000_tx_map()
5639 i--; in e1000_tx_map()
5640 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5649 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_queue()
5682 i = tx_ring->next_to_use; in e1000_tx_queue()
5685 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_queue()
5687 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_tx_queue()
5688 tx_desc->lower.data = cpu_to_le32(txd_lower | in e1000_tx_queue()
5689 buffer_info->length); in e1000_tx_queue()
5690 tx_desc->upper.data = cpu_to_le32(txd_upper); in e1000_tx_queue()
5693 if (i == tx_ring->count) in e1000_tx_queue()
5695 } while (--count > 0); in e1000_tx_queue()
5697 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); in e1000_tx_queue()
5699 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ in e1000_tx_queue()
5701 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); in e1000_tx_queue()
5705 * applicable for weak-ordered memory model archs, in e1000_tx_queue()
5706 * such as IA-64). in e1000_tx_queue()
5710 tx_ring->next_to_use = i; in e1000_tx_queue()
5717 struct e1000_hw *hw = &adapter->hw; in e1000_transfer_dhcp_info()
5721 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && in e1000_transfer_dhcp_info()
5722 (adapter->hw.mng_cookie.status & in e1000_transfer_dhcp_info()
5726 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) in e1000_transfer_dhcp_info()
5729 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) in e1000_transfer_dhcp_info()
5733 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); in e1000_transfer_dhcp_info()
5736 if (ip->protocol != IPPROTO_UDP) in e1000_transfer_dhcp_info()
5739 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); in e1000_transfer_dhcp_info()
5740 if (ntohs(udp->dest) != 67) in e1000_transfer_dhcp_info()
5743 offset = (u8 *)udp + 8 - skb->data; in e1000_transfer_dhcp_info()
5744 length = skb->len - offset; in e1000_transfer_dhcp_info()
5753 struct e1000_adapter *adapter = tx_ring->adapter; in __e1000_maybe_stop_tx()
5755 netif_stop_queue(adapter->netdev); in __e1000_maybe_stop_tx()
5766 return -EBUSY; in __e1000_maybe_stop_tx()
5769 netif_start_queue(adapter->netdev); in __e1000_maybe_stop_tx()
5770 ++adapter->restart_queue; in __e1000_maybe_stop_tx()
5776 BUG_ON(size > tx_ring->count); in e1000_maybe_stop_tx()
5787 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_xmit_frame()
5798 if (test_bit(__E1000_DOWN, &adapter->state)) { in e1000_xmit_frame()
5803 if (skb->len <= 0) { in e1000_xmit_frame()
5814 mss = skb_shinfo(skb)->gso_size; in e1000_xmit_frame()
5818 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data in e1000_xmit_frame()
5820 * frags into skb->data in e1000_xmit_frame()
5823 /* we do this workaround for ES2LAN, but it is un-necessary, in e1000_xmit_frame()
5826 if (skb->data_len && (hdr_len == len)) { in e1000_xmit_frame()
5829 pull_size = min_t(unsigned int, 4, skb->data_len); in e1000_xmit_frame()
5840 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) in e1000_xmit_frame()
5844 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); in e1000_xmit_frame()
5846 nr_frags = skb_shinfo(skb)->nr_frags; in e1000_xmit_frame()
5848 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), in e1000_xmit_frame()
5849 adapter->tx_fifo_limit); in e1000_xmit_frame()
5851 if (adapter->hw.mac.tx_pkt_filtering) in e1000_xmit_frame()
5866 first = tx_ring->next_to_use; in e1000_xmit_frame()
5886 if (unlikely(skb->no_fcs)) in e1000_xmit_frame()
5890 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, in e1000_xmit_frame()
5893 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in e1000_xmit_frame()
5894 (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) { in e1000_xmit_frame()
5895 if (!adapter->tx_hwtstamp_skb) { in e1000_xmit_frame()
5896 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in e1000_xmit_frame()
5898 adapter->tx_hwtstamp_skb = skb_get(skb); in e1000_xmit_frame()
5899 adapter->tx_hwtstamp_start = jiffies; in e1000_xmit_frame()
5900 schedule_work(&adapter->tx_hwtstamp_work); in e1000_xmit_frame()
5902 adapter->tx_hwtstamp_skipped++; in e1000_xmit_frame()
5908 netdev_sent_queue(netdev, skb->len); in e1000_xmit_frame()
5914 adapter->tx_fifo_limit) + 4)); in e1000_xmit_frame()
5918 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_xmit_frame()
5920 tx_ring->next_to_use); in e1000_xmit_frame()
5922 writel(tx_ring->next_to_use, tx_ring->tail); in e1000_xmit_frame()
5926 tx_ring->buffer_info[first].time_stamp = 0; in e1000_xmit_frame()
5927 tx_ring->next_to_use = first; in e1000_xmit_frame()
5934 * e1000_tx_timeout - Respond to a Tx Hang
5943 adapter->tx_timeout_count++; in e1000_tx_timeout()
5944 schedule_work(&adapter->reset_task); in e1000_tx_timeout()
5954 if (test_bit(__E1000_DOWN, &adapter->state)) { in e1000_reset_task()
5959 if (!(adapter->flags & FLAG_RESTART_NOW)) { in e1000_reset_task()
5968 * e1000e_get_stats64 - Get System Network Statistics
5979 spin_lock(&adapter->stats64_lock); in e1000e_get_stats64()
5982 stats->rx_bytes = adapter->stats.gorc; in e1000e_get_stats64()
5983 stats->rx_packets = adapter->stats.gprc; in e1000e_get_stats64()
5984 stats->tx_bytes = adapter->stats.gotc; in e1000e_get_stats64()
5985 stats->tx_packets = adapter->stats.gptc; in e1000e_get_stats64()
5986 stats->multicast = adapter->stats.mprc; in e1000e_get_stats64()
5987 stats->collisions = adapter->stats.colc; in e1000e_get_stats64()
5994 stats->rx_errors = adapter->stats.rxerrc + in e1000e_get_stats64()
5995 adapter->stats.crcerrs + adapter->stats.algnerrc + in e1000e_get_stats64()
5996 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; in e1000e_get_stats64()
5997 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; in e1000e_get_stats64()
5998 stats->rx_crc_errors = adapter->stats.crcerrs; in e1000e_get_stats64()
5999 stats->rx_frame_errors = adapter->stats.algnerrc; in e1000e_get_stats64()
6000 stats->rx_missed_errors = adapter->stats.mpc; in e1000e_get_stats64()
6003 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; in e1000e_get_stats64()
6004 stats->tx_aborted_errors = adapter->stats.ecol; in e1000e_get_stats64()
6005 stats->tx_window_errors = adapter->stats.latecol; in e1000e_get_stats64()
6006 stats->tx_carrier_errors = adapter->stats.tncrs; in e1000e_get_stats64()
6010 spin_unlock(&adapter->stats64_lock); in e1000e_get_stats64()
6014 * e1000_change_mtu - Change the Maximum Transfer Unit
6027 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { in e1000_change_mtu()
6029 return -EINVAL; in e1000_change_mtu()
6033 if ((adapter->hw.mac.type >= e1000_pch2lan) && in e1000_change_mtu()
6034 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && in e1000_change_mtu()
6037 return -EINVAL; in e1000_change_mtu()
6040 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) in e1000_change_mtu()
6042 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ in e1000_change_mtu()
6043 adapter->max_frame_size = max_frame; in e1000_change_mtu()
6045 netdev->mtu, new_mtu); in e1000_change_mtu()
6046 WRITE_ONCE(netdev->mtu, new_mtu); in e1000_change_mtu()
6048 pm_runtime_get_sync(netdev->dev.parent); in e1000_change_mtu()
6056 * i.e. RXBUFFER_2048 --> size-4096 slab in e1000_change_mtu()
6062 adapter->rx_buffer_len = 2048; in e1000_change_mtu()
6064 adapter->rx_buffer_len = 4096; in e1000_change_mtu()
6068 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; in e1000_change_mtu()
6075 pm_runtime_put_sync(netdev->dev.parent); in e1000_change_mtu()
6077 clear_bit(__E1000_RESETTING, &adapter->state); in e1000_change_mtu()
6088 if (adapter->hw.phy.media_type != e1000_media_type_copper) in e1000_mii_ioctl()
6089 return -EOPNOTSUPP; in e1000_mii_ioctl()
6093 data->phy_id = adapter->hw.phy.addr; in e1000_mii_ioctl()
6098 switch (data->reg_num & 0x1F) { in e1000_mii_ioctl()
6100 data->val_out = adapter->phy_regs.bmcr; in e1000_mii_ioctl()
6103 data->val_out = adapter->phy_regs.bmsr; in e1000_mii_ioctl()
6106 data->val_out = (adapter->hw.phy.id >> 16); in e1000_mii_ioctl()
6109 data->val_out = (adapter->hw.phy.id & 0xFFFF); in e1000_mii_ioctl()
6112 data->val_out = adapter->phy_regs.advertise; in e1000_mii_ioctl()
6115 data->val_out = adapter->phy_regs.lpa; in e1000_mii_ioctl()
6118 data->val_out = adapter->phy_regs.expansion; in e1000_mii_ioctl()
6121 data->val_out = adapter->phy_regs.ctrl1000; in e1000_mii_ioctl()
6124 data->val_out = adapter->phy_regs.stat1000; in e1000_mii_ioctl()
6127 data->val_out = adapter->phy_regs.estatus; in e1000_mii_ioctl()
6130 return -EIO; in e1000_mii_ioctl()
6135 return -EOPNOTSUPP; in e1000_mii_ioctl()
6141 * e1000e_hwtstamp_set - control hardware time stamping
6146 * disable it when requested, although it shouldn't cause any overhead
6162 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in e1000e_hwtstamp_set()
6163 return -EFAULT; in e1000e_hwtstamp_set()
6187 return copy_to_user(ifr->ifr_data, &config, in e1000e_hwtstamp_set()
6188 sizeof(config)) ? -EFAULT : 0; in e1000e_hwtstamp_set()
6195 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, in e1000e_hwtstamp_get()
6196 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; in e1000e_hwtstamp_get()
6211 return -EOPNOTSUPP; in e1000_ioctl()
6217 struct e1000_hw *hw = &adapter->hw; in e1000_init_phy_wakeup()
6225 retval = hw->phy.ops.acquire(hw); in e1000_init_phy_wakeup()
6236 /* copy MAC MTA to PHY MTA - only needed for pchlan */ in e1000_init_phy_wakeup()
6237 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { in e1000_init_phy_wakeup()
6239 hw->phy.ops.write_reg_page(hw, BM_MTA(i), in e1000_init_phy_wakeup()
6241 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, in e1000_init_phy_wakeup()
6246 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); in e1000_init_phy_wakeup()
6263 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); in e1000_init_phy_wakeup()
6275 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); in e1000_init_phy_wakeup()
6276 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); in e1000_init_phy_wakeup()
6284 hw->phy.ops.release(hw); in e1000_init_phy_wakeup()
6293 struct e1000_hw *hw = &adapter->hw; in e1000e_flush_lpic()
6296 pm_runtime_get_sync(netdev->dev.parent); in e1000e_flush_lpic()
6298 ret_val = hw->phy.ops.acquire(hw); in e1000e_flush_lpic()
6305 hw->phy.ops.release(hw); in e1000e_flush_lpic()
6308 pm_runtime_put_sync(netdev->dev.parent); in e1000e_flush_lpic()
6314 struct e1000_hw *hw = &adapter->hw; in e1000e_s0ix_entry_flow()
6319 hw->mac.type >= e1000_pch_adp) { in e1000e_s0ix_entry_flow()
6328 /* Disable the periodic inband message, in e1000e_s0ix_entry_flow()
6355 * page769_20[7] - PHY PLL stop in e1000e_s0ix_entry_flow()
6356 * page769_20[8] - PHY go to the electrical idle in e1000e_s0ix_entry_flow()
6357 * page769_20[9] - PHY serdes disable in e1000e_s0ix_entry_flow()
6358 * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 in e1000e_s0ix_entry_flow()
6371 /* Disable disconnected cable conditioning for Power Gating */ in e1000e_s0ix_entry_flow()
6414 /* Disable the time synchronization clock */ in e1000e_s0ix_entry_flow()
6463 struct e1000_hw *hw = &adapter->hw; in e1000e_s0ix_exit_flow()
6470 hw->mac.type >= e1000_pch_adp) { in e1000e_s0ix_exit_flow()
6506 /* Cancel disable disconnected cable conditioning in e1000e_s0ix_exit_flow()
6513 /* Disable the Dynamic Clock Gating in the DMA and MAC */ in e1000e_s0ix_exit_flow()
6545 /* Disable Dynamic Power Gating */ in e1000e_s0ix_exit_flow()
6556 /* Disable the Dynamic Power Gating in the MAC */ in e1000e_s0ix_exit_flow()
6561 /* Disable mPHY power gating for any link and speed */ in e1000e_s0ix_exit_flow()
6566 /* Disable K1 off */ in e1000e_s0ix_exit_flow()
6571 /* Disable Ungate PGCB clock */ in e1000e_s0ix_exit_flow()
6605 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_pm_freeze()
6608 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_pm_freeze()
6619 e1000e_disable_pcie_master(&adapter->hw); in e1000e_pm_freeze()
6628 struct e1000_hw *hw = &adapter->hw; in __e1000_shutdown()
6635 else if (device_may_wakeup(&pdev->dev)) in __e1000_shutdown()
6636 wufc = adapter->wol; in __e1000_shutdown()
6648 /* turn on all-multi mode if wake on multicast is enabled */ in __e1000_shutdown()
6657 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) in __e1000_shutdown()
6661 if (adapter->hw.phy.media_type == e1000_media_type_fiber || in __e1000_shutdown()
6662 adapter->hw.phy.media_type == in __e1000_shutdown()
6673 if (adapter->flags & FLAG_IS_ICH) in __e1000_shutdown()
6674 e1000_suspend_workarounds_ich8lan(&adapter->hw); in __e1000_shutdown()
6676 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { in __e1000_shutdown()
6695 if (adapter->hw.phy.type == e1000_phy_igp_3) { in __e1000_shutdown()
6696 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); in __e1000_shutdown()
6697 } else if (hw->mac.type >= e1000_pch_lpt) { in __e1000_shutdown()
6713 if ((hw->phy.type >= e1000_phy_i217) && in __e1000_shutdown()
6714 adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { in __e1000_shutdown()
6717 retval = hw->phy.ops.acquire(hw); in __e1000_shutdown()
6722 if (adapter->eee_advert & in __e1000_shutdown()
6723 hw->dev_spec.ich8lan.eee_lp_ability & in __e1000_shutdown()
6726 if (adapter->eee_advert & in __e1000_shutdown()
6727 hw->dev_spec.ich8lan.eee_lp_ability & in __e1000_shutdown()
6735 hw->phy.ops.release(hw); in __e1000_shutdown()
6746 /* The pci-e switch on some quad port adapters will report a in __e1000_shutdown()
6749 * downstream port of the pci-e switch. in __e1000_shutdown()
6755 if (adapter->flags & FLAG_IS_QUAD_PORT) { in __e1000_shutdown()
6756 struct pci_dev *us_dev = pdev->bus->self; in __e1000_shutdown()
6776 * __e1000e_disable_aspm - Disable ASPM states
6778 * @state: bit-mask of ASPM states to disable
6785 struct pci_dev *parent = pdev->bus->self; in __e1000e_disable_aspm()
6815 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", in __e1000e_disable_aspm()
6827 /* Double-check ASPM control. If not disabled by the above, the in __e1000e_disable_aspm()
6839 * Disable ASPM in downstream component first and then upstream. in __e1000e_disable_aspm()
6849 * e1000e_disable_aspm - Disable ASPM states.
6851 * @state: bit-mask of ASPM states to disable
6862 * e1000e_disable_aspm_locked - Disable ASPM states.
6864 * @state: bit-mask of ASPM states to disable
6902 struct e1000_hw *hw = &adapter->hw; in __e1000_resume()
6905 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) in __e1000_resume()
6907 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) in __e1000_resume()
6914 if (hw->mac.type >= e1000_pch2lan) in __e1000_resume()
6915 e1000_resume_workarounds_pchlan(&adapter->hw); in __e1000_resume()
6920 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { in __e1000_resume()
6923 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); in __e1000_resume()
6925 e_info("PHY Wakeup cause - %s\n", in __e1000_resume()
6933 e1e_wphy(&adapter->hw, BM_WUS, ~0); in __e1000_resume()
6938 e_info("MAC Wakeup cause - %s\n", in __e1000_resume()
6957 if (!(adapter->flags & FLAG_HAS_AMT)) in __e1000_resume()
6983 if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) in e1000e_pm_suspend()
6998 if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) in e1000e_pm_resume()
7014 eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability; in e1000e_pm_runtime_idle()
7017 adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp; in e1000e_pm_runtime_idle()
7021 return -EBUSY; in e1000e_pm_runtime_idle()
7031 pdev->pme_poll = true; in e1000e_pm_runtime_resume()
7037 if (netdev->flags & IFF_UP) in e1000e_pm_runtime_resume()
7049 if (netdev->flags & IFF_UP) { in e1000e_pm_runtime_suspend()
7052 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_pm_runtime_suspend()
7055 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_pm_runtime_suspend()
7063 return -EBUSY; in e1000e_pm_runtime_suspend()
7073 e1000e_pm_freeze(&pdev->dev); in e1000_shutdown()
7085 if (adapter->msix_entries) { in e1000_intr_msix()
7089 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7095 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7101 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7114 * Polling 'interrupt' - used by things like netconsole to send skbs
7115 * without having to re-enable interrupts. It's not called while
7122 switch (adapter->int_mode) { in e1000_netpoll()
7124 e1000_intr_msix(adapter->pdev->irq, netdev); in e1000_netpoll()
7127 if (disable_hardirq(adapter->pdev->irq)) in e1000_netpoll()
7128 e1000_intr_msi(adapter->pdev->irq, netdev); in e1000_netpoll()
7129 enable_irq(adapter->pdev->irq); in e1000_netpoll()
7132 if (disable_hardirq(adapter->pdev->irq)) in e1000_netpoll()
7133 e1000_intr(adapter->pdev->irq, netdev); in e1000_netpoll()
7134 enable_irq(adapter->pdev->irq); in e1000_netpoll()
7141 * e1000_io_error_detected - called when PCI error is detected
7151 e1000e_pm_freeze(&pdev->dev); in e1000_io_error_detected()
7163 * e1000_io_slot_reset - called after the pci bus has been reset.
7166 * Restart the card from scratch, as if from a cold-boot. Implementation
7167 * resembles the first-half of the e1000e_pm_resume routine.
7173 struct e1000_hw *hw = &adapter->hw; in e1000_io_slot_reset()
7178 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) in e1000_io_slot_reset()
7180 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) in e1000_io_slot_reset()
7187 dev_err(&pdev->dev, in e1000_io_slot_reset()
7188 "Cannot re-enable PCI device after reset.\n"); in e1000_io_slot_reset()
7191 pdev->state_saved = true; in e1000_io_slot_reset()
7207 * e1000_io_resume - called when traffic can start flowing again.
7212 * second-half of the e1000e_pm_resume routine.
7221 e1000e_pm_thaw(&pdev->dev); in e1000_io_resume()
7227 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_io_resume()
7233 struct e1000_hw *hw = &adapter->hw; in e1000_print_device_info()
7234 struct net_device *netdev = adapter->netdev; in e1000_print_device_info()
7241 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : in e1000_print_device_info()
7244 netdev->dev_addr); in e1000_print_device_info()
7246 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); in e1000_print_device_info()
7252 hw->mac.type, hw->phy.type, pba_str); in e1000_print_device_info()
7257 struct e1000_hw *hw = &adapter->hw; in e1000_eeprom_checks()
7261 if (hw->mac.type != e1000_82573) in e1000_eeprom_checks()
7268 dev_warn(&adapter->pdev->dev, in e1000_eeprom_checks()
7277 struct e1000_hw *hw = &adapter->hw; in e1000_fix_features()
7280 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) in e1000_fix_features()
7284 * enable/disable make sure Tx flag is always in same state as Rx. in e1000_fix_features()
7298 netdev_features_t changed = features ^ netdev->features; in e1000_set_features()
7301 adapter->flags |= FLAG_TSO_FORCE; in e1000_set_features()
7310 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; in e1000_set_features()
7315 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) in e1000_set_features()
7316 adapter->flags2 |= FLAG2_CRC_STRIPPING; in e1000_set_features()
7318 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; in e1000_set_features()
7322 netdev->features = features; in e1000_set_features()
7355 * e1000_probe - Device Initialization Routine
7370 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; in e1000_probe()
7380 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) in e1000_probe()
7382 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) in e1000_probe()
7391 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in e1000_probe()
7393 dev_err(&pdev->dev, in e1000_probe()
7410 err = -ENOMEM; in e1000_probe()
7415 SET_NETDEV_DEV(netdev, &pdev->dev); in e1000_probe()
7417 netdev->irq = pdev->irq; in e1000_probe()
7421 hw = &adapter->hw; in e1000_probe()
7422 adapter->netdev = netdev; in e1000_probe()
7423 adapter->pdev = pdev; in e1000_probe()
7424 adapter->ei = ei; in e1000_probe()
7425 adapter->pba = ei->pba; in e1000_probe()
7426 adapter->flags = ei->flags; in e1000_probe()
7427 adapter->flags2 = ei->flags2; in e1000_probe()
7428 adapter->hw.adapter = adapter; in e1000_probe()
7429 adapter->hw.mac.type = ei->mac; in e1000_probe()
7430 adapter->max_hw_frame_size = ei->max_hw_frame_size; in e1000_probe()
7431 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in e1000_probe()
7436 err = -EIO; in e1000_probe()
7437 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); in e1000_probe()
7438 if (!adapter->hw.hw_addr) in e1000_probe()
7441 if ((adapter->flags & FLAG_HAS_FLASH) && in e1000_probe()
7443 (hw->mac.type < e1000_pch_spt)) { in e1000_probe()
7446 adapter->hw.flash_address = ioremap(flash_start, flash_len); in e1000_probe()
7447 if (!adapter->hw.flash_address) in e1000_probe()
7452 if (adapter->flags2 & FLAG2_HAS_EEE) in e1000_probe()
7453 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; in e1000_probe()
7456 netdev->netdev_ops = &e1000e_netdev_ops; in e1000_probe()
7458 netdev->watchdog_timeo = 5 * HZ; in e1000_probe()
7459 netif_napi_add(netdev, &adapter->napi, e1000e_poll); in e1000_probe()
7460 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in e1000_probe()
7462 netdev->mem_start = mmio_start; in e1000_probe()
7463 netdev->mem_end = mmio_start + mmio_len; in e1000_probe()
7465 adapter->bd_number = cards_found++; in e1000_probe()
7474 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in e1000_probe()
7475 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); in e1000_probe()
7476 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in e1000_probe()
7478 err = ei->get_variants(adapter); in e1000_probe()
7482 if ((adapter->flags & FLAG_IS_ICH) && in e1000_probe()
7483 (adapter->flags & FLAG_READ_ONLY_NVM) && in e1000_probe()
7484 (hw->mac.type < e1000_pch_spt)) in e1000_probe()
7485 e1000e_write_protect_nvm_ich8lan(&adapter->hw); in e1000_probe()
7487 hw->mac.ops.get_bus_info(&adapter->hw); in e1000_probe()
7489 adapter->hw.phy.autoneg_wait_to_complete = 0; in e1000_probe()
7492 if (adapter->hw.phy.media_type == e1000_media_type_copper) { in e1000_probe()
7493 adapter->hw.phy.mdix = AUTO_ALL_MODES; in e1000_probe()
7494 adapter->hw.phy.disable_polarity_correction = 0; in e1000_probe()
7495 adapter->hw.phy.ms_type = e1000_ms_hw_default; in e1000_probe()
7498 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) in e1000_probe()
7499 dev_info(&pdev->dev, in e1000_probe()
7503 netdev->features = (NETIF_F_SG | in e1000_probe()
7512 /* disable TSO for pcie and 10/100 speeds to avoid in e1000_probe()
7516 if (!(adapter->flags & FLAG_TSO_FORCE)) { in e1000_probe()
7517 switch (adapter->link_speed) { in e1000_probe()
7521 netdev->features &= ~NETIF_F_TSO; in e1000_probe()
7522 netdev->features &= ~NETIF_F_TSO6; in e1000_probe()
7525 netdev->features |= NETIF_F_TSO; in e1000_probe()
7526 netdev->features |= NETIF_F_TSO6; in e1000_probe()
7532 if (hw->mac.type == e1000_pch_spt) { in e1000_probe()
7533 netdev->features &= ~NETIF_F_TSO; in e1000_probe()
7534 netdev->features &= ~NETIF_F_TSO6; in e1000_probe()
7538 /* Set user-changeable features (subset of all device features) */ in e1000_probe()
7539 netdev->hw_features = netdev->features; in e1000_probe()
7540 netdev->hw_features |= NETIF_F_RXFCS; in e1000_probe()
7541 netdev->priv_flags |= IFF_SUPP_NOFCS; in e1000_probe()
7542 netdev->hw_features |= NETIF_F_RXALL; in e1000_probe()
7544 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) in e1000_probe()
7545 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in e1000_probe()
7547 netdev->vlan_features |= (NETIF_F_SG | in e1000_probe()
7552 netdev->priv_flags |= IFF_UNICAST_FLT; in e1000_probe()
7554 netdev->features |= NETIF_F_HIGHDMA; in e1000_probe()
7555 netdev->vlan_features |= NETIF_F_HIGHDMA; in e1000_probe()
7557 /* MTU range: 68 - max_hw_frame_size */ in e1000_probe()
7558 netdev->min_mtu = ETH_MIN_MTU; in e1000_probe()
7559 netdev->max_mtu = adapter->max_hw_frame_size - in e1000_probe()
7562 if (e1000e_enable_mng_pass_thru(&adapter->hw)) in e1000_probe()
7563 adapter->flags |= FLAG_MNG_PT_ENABLED; in e1000_probe()
7568 adapter->hw.mac.ops.reset_hw(&adapter->hw); in e1000_probe()
7574 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) in e1000_probe()
7577 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in e1000_probe()
7578 err = -EIO; in e1000_probe()
7586 if (e1000e_read_mac_addr(&adapter->hw)) in e1000_probe()
7587 dev_err(&pdev->dev, in e1000_probe()
7590 eth_hw_addr_set(netdev, adapter->hw.mac.addr); in e1000_probe()
7592 if (!is_valid_ether_addr(netdev->dev_addr)) { in e1000_probe()
7593 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", in e1000_probe()
7594 netdev->dev_addr); in e1000_probe()
7595 err = -EIO; in e1000_probe()
7599 timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); in e1000_probe()
7600 timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); in e1000_probe()
7602 INIT_WORK(&adapter->reset_task, e1000_reset_task); in e1000_probe()
7603 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); in e1000_probe()
7604 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); in e1000_probe()
7605 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); in e1000_probe()
7606 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); in e1000_probe()
7609 adapter->hw.mac.autoneg = 1; in e1000_probe()
7610 adapter->fc_autoneg = true; in e1000_probe()
7611 adapter->hw.fc.requested_mode = e1000_fc_default; in e1000_probe()
7612 adapter->hw.fc.current_mode = e1000_fc_default; in e1000_probe()
7613 adapter->hw.phy.autoneg_advertised = 0x2f; in e1000_probe()
7615 /* Initial Wake on LAN setting - If APM wake is enabled in in e1000_probe()
7618 if (adapter->flags & FLAG_APME_IN_WUC) { in e1000_probe()
7622 if ((hw->mac.type > e1000_ich10lan) && in e1000_probe()
7624 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; in e1000_probe()
7625 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { in e1000_probe()
7626 if (adapter->flags & FLAG_APME_CHECK_PORT_B && in e1000_probe()
7627 (adapter->hw.bus.func == 1)) in e1000_probe()
7628 ret_val = e1000_read_nvm(&adapter->hw, in e1000_probe()
7632 ret_val = e1000_read_nvm(&adapter->hw, in e1000_probe()
7641 adapter->eeprom_wol |= E1000_WUFC_MAG; in e1000_probe()
7647 if (!(adapter->flags & FLAG_HAS_WOL)) in e1000_probe()
7648 adapter->eeprom_wol = 0; in e1000_probe()
7651 adapter->wol = adapter->eeprom_wol; in e1000_probe()
7654 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || in e1000_probe()
7655 (hw->mac.ops.check_mng_mode(hw))) in e1000_probe()
7656 device_wakeup_enable(&pdev->dev); in e1000_probe()
7659 ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); in e1000_probe()
7663 adapter->eeprom_vers = 0; in e1000_probe()
7676 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_probe()
7679 if (hw->mac.type >= e1000_pch_cnp) in e1000_probe()
7680 adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS; in e1000_probe()
7682 strscpy(netdev->name, "eth%d", sizeof(netdev->name)); in e1000_probe()
7692 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); in e1000_probe()
7695 pm_runtime_put_noidle(&pdev->dev); in e1000_probe()
7700 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_probe()
7703 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) in e1000_probe()
7704 e1000_phy_hw_reset(&adapter->hw); in e1000_probe()
7706 kfree(adapter->tx_ring); in e1000_probe()
7707 kfree(adapter->rx_ring); in e1000_probe()
7709 if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) in e1000_probe()
7710 iounmap(adapter->hw.flash_address); in e1000_probe()
7713 iounmap(adapter->hw.hw_addr); in e1000_probe()
7725 * e1000_remove - Device Removal Routine
7730 * Hot-Plug event, or because the driver is going to be removed from
7740 /* The timers may be rescheduled, so explicitly disable them in e1000_remove()
7743 set_bit(__E1000_DOWN, &adapter->state); in e1000_remove()
7744 del_timer_sync(&adapter->watchdog_timer); in e1000_remove()
7745 del_timer_sync(&adapter->phy_info_timer); in e1000_remove()
7747 cancel_work_sync(&adapter->reset_task); in e1000_remove()
7748 cancel_work_sync(&adapter->watchdog_task); in e1000_remove()
7749 cancel_work_sync(&adapter->downshift_task); in e1000_remove()
7750 cancel_work_sync(&adapter->update_phy_task); in e1000_remove()
7751 cancel_work_sync(&adapter->print_hang_task); in e1000_remove()
7753 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { in e1000_remove()
7754 cancel_work_sync(&adapter->tx_hwtstamp_work); in e1000_remove()
7755 if (adapter->tx_hwtstamp_skb) { in e1000_remove()
7756 dev_consume_skb_any(adapter->tx_hwtstamp_skb); in e1000_remove()
7757 adapter->tx_hwtstamp_skb = NULL; in e1000_remove()
7764 pm_runtime_get_noresume(&pdev->dev); in e1000_remove()
7772 kfree(adapter->tx_ring); in e1000_remove()
7773 kfree(adapter->rx_ring); in e1000_remove()
7775 iounmap(adapter->hw.hw_addr); in e1000_remove()
7776 if ((adapter->hw.flash_address) && in e1000_remove()
7777 (adapter->hw.mac.type < e1000_pch_spt)) in e1000_remove()
7778 iounmap(adapter->hw.flash_address); in e1000_remove()
7954 * e1000_init_module - Driver Registration Routine
7962 pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); in e1000_init_module()
7969 * e1000_exit_module - Driver Exit Cleanup Routine