Lines Matching +full:free +full:- +full:flowing
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
56 "Copyright (c) 2007-2014 Intel Corporation.";
208 static int debug = -1;
253 /* igb_regdump - register printout routine */
260 switch (reginfo->ofs) { in igb_regdump()
310 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); in igb_regdump()
314 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); in igb_regdump()
315 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], in igb_regdump()
319 /* igb_dump - Print registers, Tx-rings and Rx-rings */
322 struct net_device *netdev = adapter->netdev; in igb_dump()
323 struct e1000_hw *hw = &adapter->hw; in igb_dump()
338 dev_info(&adapter->pdev->dev, "Net device Info\n"); in igb_dump()
340 pr_info("%-15s %016lX %016lX\n", netdev->name, in igb_dump()
341 netdev->state, dev_trans_start(netdev)); in igb_dump()
345 dev_info(&adapter->pdev->dev, "Register Dump\n"); in igb_dump()
348 reginfo->name; reginfo++) { in igb_dump()
356 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); in igb_dump()
357 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); in igb_dump()
358 for (n = 0; n < adapter->num_tx_queues; n++) { in igb_dump()
360 tx_ring = adapter->tx_ring[n]; in igb_dump()
361 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igb_dump()
363 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igb_dump()
366 buffer_info->next_to_watch, in igb_dump()
367 (u64)buffer_info->time_stamp); in igb_dump()
374 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); in igb_dump()
379 * +--------------------------------------------------------------+ in igb_dump()
381 * +--------------------------------------------------------------+ in igb_dump()
383 * +--------------------------------------------------------------+ in igb_dump()
387 for (n = 0; n < adapter->num_tx_queues; n++) { in igb_dump()
388 tx_ring = adapter->tx_ring[n]; in igb_dump()
389 pr_info("------------------------------------\n"); in igb_dump()
390 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); in igb_dump()
391 pr_info("------------------------------------\n"); in igb_dump()
392 …fo("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi-… in igb_dump()
394 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in igb_dump()
398 buffer_info = &tx_ring->tx_buffer_info[i]; in igb_dump()
400 if (i == tx_ring->next_to_use && in igb_dump()
401 i == tx_ring->next_to_clean) in igb_dump()
403 else if (i == tx_ring->next_to_use) in igb_dump()
405 else if (i == tx_ring->next_to_clean) in igb_dump()
411 i, le64_to_cpu(u0->a), in igb_dump()
412 le64_to_cpu(u0->b), in igb_dump()
415 buffer_info->next_to_watch, in igb_dump()
416 (u64)buffer_info->time_stamp, in igb_dump()
417 buffer_info->skb, next_desc); in igb_dump()
419 if (netif_msg_pktdata(adapter) && buffer_info->skb) in igb_dump()
422 16, 1, buffer_info->skb->data, in igb_dump()
430 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); in igb_dump()
432 for (n = 0; n < adapter->num_rx_queues; n++) { in igb_dump()
433 rx_ring = adapter->rx_ring[n]; in igb_dump()
435 n, rx_ring->next_to_use, rx_ring->next_to_clean); in igb_dump()
442 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); in igb_dump()
446 * +-----------------------------------------------------+ in igb_dump()
448 * +----------------------------------------------+------+ in igb_dump()
450 * +-----------------------------------------------------+ in igb_dump()
453 * Advanced Receive Descriptor (Write-Back) Format in igb_dump()
456 * +------------------------------------------------------+ in igb_dump()
459 * +------------------------------------------------------+ in igb_dump()
461 * +------------------------------------------------------+ in igb_dump()
465 for (n = 0; n < adapter->num_rx_queues; n++) { in igb_dump()
466 rx_ring = adapter->rx_ring[n]; in igb_dump()
467 pr_info("------------------------------------\n"); in igb_dump()
468 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in igb_dump()
469 pr_info("------------------------------------\n"); in igb_dump()
470 …pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Re… in igb_dump()
471 …nfo("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-… in igb_dump()
473 for (i = 0; i < rx_ring->count; i++) { in igb_dump()
479 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in igb_dump()
481 if (!rx_ring->xsk_pool) { in igb_dump()
482 buffer_info = &rx_ring->rx_buffer_info[i]; in igb_dump()
483 dma = buffer_info->dma; in igb_dump()
486 if (i == rx_ring->next_to_use) in igb_dump()
488 else if (i == rx_ring->next_to_clean) in igb_dump()
495 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", in igb_dump()
497 le64_to_cpu(u0->a), in igb_dump()
498 le64_to_cpu(u0->b), in igb_dump()
503 le64_to_cpu(u0->a), in igb_dump()
504 le64_to_cpu(u0->b), in igb_dump()
509 buffer_info && dma && buffer_info->page) { in igb_dump()
513 page_address(buffer_info->page) + in igb_dump()
514 buffer_info->page_offset, in igb_dump()
526 * igb_get_i2c_data - Reads the I2C SDA data bit
534 struct e1000_hw *hw = &adapter->hw; in igb_get_i2c_data()
541 * igb_set_i2c_data - Sets the I2C data bit
550 struct e1000_hw *hw = &adapter->hw; in igb_set_i2c_data()
565 * igb_set_i2c_clk - Sets the I2C SCL clock
574 struct e1000_hw *hw = &adapter->hw; in igb_set_i2c_clk()
588 * igb_get_i2c_clk - Gets the I2C SCL clock state
596 struct e1000_hw *hw = &adapter->hw; in igb_get_i2c_clk()
612 * igb_get_hw_dev - return device
619 struct igb_adapter *adapter = hw->back; in igb_get_hw_dev()
620 return adapter->netdev; in igb_get_hw_dev()
626 * igb_init_module - Driver Registration Routine
652 * igb_exit_module - Driver Exit Cleanup Routine
669 * igb_cache_ring_register - Descriptor ring to register mapping
672 * Once we know the feature-set enabled for the device, we'll cache
678 u32 rbase_offset = adapter->vfs_allocated_count; in igb_cache_ring_register()
680 switch (adapter->hw.mac.type) { in igb_cache_ring_register()
684 * In order to avoid collision we start at the first free queue in igb_cache_ring_register()
687 if (adapter->vfs_allocated_count) { in igb_cache_ring_register()
688 for (; i < adapter->rss_queues; i++) in igb_cache_ring_register()
689 adapter->rx_ring[i]->reg_idx = rbase_offset + in igb_cache_ring_register()
700 for (; i < adapter->num_rx_queues; i++) in igb_cache_ring_register()
701 adapter->rx_ring[i]->reg_idx = rbase_offset + i; in igb_cache_ring_register()
702 for (; j < adapter->num_tx_queues; j++) in igb_cache_ring_register()
703 adapter->tx_ring[j]->reg_idx = rbase_offset + j; in igb_cache_ring_register()
711 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); in igb_rd32()
721 struct net_device *netdev = igb->netdev; in igb_rd32()
722 hw->hw_addr = NULL; in igb_rd32()
724 WARN(pci_device_is_present(igb->pdev), in igb_rd32()
732 * igb_write_ivar - configure ivar for given MSI-X vector
757 #define IGB_N0_QUEUE -1
760 struct igb_adapter *adapter = q_vector->adapter; in igb_assign_vector()
761 struct e1000_hw *hw = &adapter->hw; in igb_assign_vector()
766 if (q_vector->rx.ring) in igb_assign_vector()
767 rx_queue = q_vector->rx.ring->reg_idx; in igb_assign_vector()
768 if (q_vector->tx.ring) in igb_assign_vector()
769 tx_queue = q_vector->tx.ring->reg_idx; in igb_assign_vector()
771 switch (hw->mac.type) { in igb_assign_vector()
782 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) in igb_assign_vector()
785 q_vector->eims_value = msixbm; in igb_assign_vector()
789 * with 8 rows. The ordering is column-major so we use the in igb_assign_vector()
801 q_vector->eims_value = BIT(msix_vector); in igb_assign_vector()
809 * however instead of ordering column-major we have things in igb_assign_vector()
810 * ordered row-major. So we traverse the table by using in igb_assign_vector()
822 q_vector->eims_value = BIT(msix_vector); in igb_assign_vector()
830 adapter->eims_enable_mask |= q_vector->eims_value; in igb_assign_vector()
833 q_vector->set_itr = 1; in igb_assign_vector()
837 * igb_configure_msix - Configure MSI-X hardware
841 * generate MSI-X interrupts.
847 struct e1000_hw *hw = &adapter->hw; in igb_configure_msix()
849 adapter->eims_enable_mask = 0; in igb_configure_msix()
852 switch (hw->mac.type) { in igb_configure_msix()
855 /* enable MSI-X PBA support*/ in igb_configure_msix()
858 /* Auto-Mask interrupts upon ICR read. */ in igb_configure_msix()
866 adapter->eims_other = E1000_EIMS_OTHER; in igb_configure_msix()
876 /* Turn on MSI-X capability first, or our settings in igb_configure_msix()
884 adapter->eims_other = BIT(vector); in igb_configure_msix()
890 /* do nothing, since nothing else supports MSI-X */ in igb_configure_msix()
892 } /* switch (hw->mac.type) */ in igb_configure_msix()
894 adapter->eims_enable_mask |= adapter->eims_other; in igb_configure_msix()
896 for (i = 0; i < adapter->num_q_vectors; i++) in igb_configure_msix()
897 igb_assign_vector(adapter->q_vector[i], vector++); in igb_configure_msix()
903 * igb_request_msix - Initialize MSI-X interrupts
906 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
911 unsigned int num_q_vectors = adapter->num_q_vectors; in igb_request_msix()
912 struct net_device *netdev = adapter->netdev; in igb_request_msix()
915 err = request_irq(adapter->msix_entries[vector].vector, in igb_request_msix()
916 igb_msix_other, 0, netdev->name, adapter); in igb_request_msix()
922 dev_warn(&adapter->pdev->dev, in igb_request_msix()
924 adapter->num_q_vectors, MAX_Q_VECTORS); in igb_request_msix()
927 struct igb_q_vector *q_vector = adapter->q_vector[i]; in igb_request_msix()
931 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); in igb_request_msix()
933 if (q_vector->rx.ring && q_vector->tx.ring) in igb_request_msix()
934 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, in igb_request_msix()
935 q_vector->rx.ring->queue_index); in igb_request_msix()
936 else if (q_vector->tx.ring) in igb_request_msix()
937 sprintf(q_vector->name, "%s-tx-%u", netdev->name, in igb_request_msix()
938 q_vector->tx.ring->queue_index); in igb_request_msix()
939 else if (q_vector->rx.ring) in igb_request_msix()
940 sprintf(q_vector->name, "%s-rx-%u", netdev->name, in igb_request_msix()
941 q_vector->rx.ring->queue_index); in igb_request_msix()
943 sprintf(q_vector->name, "%s-unused", netdev->name); in igb_request_msix()
945 err = request_irq(adapter->msix_entries[vector].vector, in igb_request_msix()
946 igb_msix_ring, 0, q_vector->name, in igb_request_msix()
951 netif_napi_set_irq(&q_vector->napi, in igb_request_msix()
952 adapter->msix_entries[vector].vector); in igb_request_msix()
959 /* free already assigned IRQs */ in igb_request_msix()
960 free_irq(adapter->msix_entries[free_vector++].vector, adapter); in igb_request_msix()
962 vector--; in igb_request_msix()
964 free_irq(adapter->msix_entries[free_vector++].vector, in igb_request_msix()
965 adapter->q_vector[i]); in igb_request_msix()
972 * igb_free_q_vector - Free memory allocated for specific interrupt vector
980 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; in igb_free_q_vector()
982 adapter->q_vector[v_idx] = NULL; in igb_free_q_vector()
992 * igb_reset_q_vector - Reset config for interrupt vector
1001 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; in igb_reset_q_vector()
1009 if (q_vector->tx.ring) in igb_reset_q_vector()
1010 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igb_reset_q_vector()
1012 if (q_vector->rx.ring) in igb_reset_q_vector()
1013 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igb_reset_q_vector()
1015 netif_napi_del(&q_vector->napi); in igb_reset_q_vector()
1021 int v_idx = adapter->num_q_vectors; in igb_reset_interrupt_capability()
1023 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_reset_interrupt_capability()
1024 pci_disable_msix(adapter->pdev); in igb_reset_interrupt_capability()
1025 else if (adapter->flags & IGB_FLAG_HAS_MSI) in igb_reset_interrupt_capability()
1026 pci_disable_msi(adapter->pdev); in igb_reset_interrupt_capability()
1028 while (v_idx--) in igb_reset_interrupt_capability()
1033 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1042 int v_idx = adapter->num_q_vectors; in igb_free_q_vectors()
1044 adapter->num_tx_queues = 0; in igb_free_q_vectors()
1045 adapter->num_rx_queues = 0; in igb_free_q_vectors()
1046 adapter->num_q_vectors = 0; in igb_free_q_vectors()
1048 while (v_idx--) { in igb_free_q_vectors()
1055 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1059 * MSI-X interrupts allocated.
1068 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1082 adapter->flags |= IGB_FLAG_HAS_MSIX; in igb_set_interrupt_capability()
1085 adapter->num_rx_queues = adapter->rss_queues; in igb_set_interrupt_capability()
1086 if (adapter->vfs_allocated_count) in igb_set_interrupt_capability()
1087 adapter->num_tx_queues = 1; in igb_set_interrupt_capability()
1089 adapter->num_tx_queues = adapter->rss_queues; in igb_set_interrupt_capability()
1092 numvecs = adapter->num_rx_queues; in igb_set_interrupt_capability()
1095 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) in igb_set_interrupt_capability()
1096 numvecs += adapter->num_tx_queues; in igb_set_interrupt_capability()
1099 adapter->num_q_vectors = numvecs; in igb_set_interrupt_capability()
1104 adapter->msix_entries[i].entry = i; in igb_set_interrupt_capability()
1106 err = pci_enable_msix_range(adapter->pdev, in igb_set_interrupt_capability()
1107 adapter->msix_entries, in igb_set_interrupt_capability()
1115 /* If we can't do MSI-X, try MSI */ in igb_set_interrupt_capability()
1117 adapter->flags &= ~IGB_FLAG_HAS_MSIX; in igb_set_interrupt_capability()
1119 /* disable SR-IOV for non MSI-X configurations */ in igb_set_interrupt_capability()
1120 if (adapter->vf_data) { in igb_set_interrupt_capability()
1121 struct e1000_hw *hw = &adapter->hw; in igb_set_interrupt_capability()
1123 pci_disable_sriov(adapter->pdev); in igb_set_interrupt_capability()
1126 kfree(adapter->vf_mac_list); in igb_set_interrupt_capability()
1127 adapter->vf_mac_list = NULL; in igb_set_interrupt_capability()
1128 kfree(adapter->vf_data); in igb_set_interrupt_capability()
1129 adapter->vf_data = NULL; in igb_set_interrupt_capability()
1133 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); in igb_set_interrupt_capability()
1136 adapter->vfs_allocated_count = 0; in igb_set_interrupt_capability()
1137 adapter->rss_queues = 1; in igb_set_interrupt_capability()
1138 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; in igb_set_interrupt_capability()
1139 adapter->num_rx_queues = 1; in igb_set_interrupt_capability()
1140 adapter->num_tx_queues = 1; in igb_set_interrupt_capability()
1141 adapter->num_q_vectors = 1; in igb_set_interrupt_capability()
1142 if (!pci_enable_msi(adapter->pdev)) in igb_set_interrupt_capability()
1143 adapter->flags |= IGB_FLAG_HAS_MSI; in igb_set_interrupt_capability()
1149 head->ring = ring; in igb_add_ring()
1150 head->count++; in igb_add_ring()
1154 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1163 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1177 return -ENOMEM; in igb_alloc_q_vector()
1183 q_vector = adapter->q_vector[v_idx]; in igb_alloc_q_vector()
1197 return -ENOMEM; in igb_alloc_q_vector()
1200 netif_napi_add_config(adapter->netdev, &q_vector->napi, igb_poll, in igb_alloc_q_vector()
1204 adapter->q_vector[v_idx] = q_vector; in igb_alloc_q_vector()
1205 q_vector->adapter = adapter; in igb_alloc_q_vector()
1208 q_vector->tx.work_limit = adapter->tx_work_limit; in igb_alloc_q_vector()
1211 q_vector->itr_register = adapter->io_addr + E1000_EITR(0); in igb_alloc_q_vector()
1212 q_vector->itr_val = IGB_START_ITR; in igb_alloc_q_vector()
1215 ring = q_vector->ring; in igb_alloc_q_vector()
1220 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) in igb_alloc_q_vector()
1221 q_vector->itr_val = adapter->rx_itr_setting; in igb_alloc_q_vector()
1224 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) in igb_alloc_q_vector()
1225 q_vector->itr_val = adapter->tx_itr_setting; in igb_alloc_q_vector()
1230 ring->dev = &adapter->pdev->dev; in igb_alloc_q_vector()
1231 ring->netdev = adapter->netdev; in igb_alloc_q_vector()
1234 ring->q_vector = q_vector; in igb_alloc_q_vector()
1237 igb_add_ring(ring, &q_vector->tx); in igb_alloc_q_vector()
1240 if (adapter->hw.mac.type == e1000_82575) in igb_alloc_q_vector()
1241 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); in igb_alloc_q_vector()
1244 ring->count = adapter->tx_ring_count; in igb_alloc_q_vector()
1245 ring->queue_index = txr_idx; in igb_alloc_q_vector()
1247 ring->cbs_enable = false; in igb_alloc_q_vector()
1248 ring->idleslope = 0; in igb_alloc_q_vector()
1249 ring->sendslope = 0; in igb_alloc_q_vector()
1250 ring->hicredit = 0; in igb_alloc_q_vector()
1251 ring->locredit = 0; in igb_alloc_q_vector()
1253 u64_stats_init(&ring->tx_syncp); in igb_alloc_q_vector()
1254 u64_stats_init(&ring->tx_syncp2); in igb_alloc_q_vector()
1257 adapter->tx_ring[txr_idx] = ring; in igb_alloc_q_vector()
1265 ring->dev = &adapter->pdev->dev; in igb_alloc_q_vector()
1266 ring->netdev = adapter->netdev; in igb_alloc_q_vector()
1269 ring->q_vector = q_vector; in igb_alloc_q_vector()
1272 igb_add_ring(ring, &q_vector->rx); in igb_alloc_q_vector()
1275 if (adapter->hw.mac.type >= e1000_82576) in igb_alloc_q_vector()
1276 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); in igb_alloc_q_vector()
1279 * have the tag byte-swapped. in igb_alloc_q_vector()
1281 if (adapter->hw.mac.type >= e1000_i350) in igb_alloc_q_vector()
1282 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); in igb_alloc_q_vector()
1285 ring->count = adapter->rx_ring_count; in igb_alloc_q_vector()
1286 ring->queue_index = rxr_idx; in igb_alloc_q_vector()
1288 u64_stats_init(&ring->rx_syncp); in igb_alloc_q_vector()
1291 adapter->rx_ring[rxr_idx] = ring; in igb_alloc_q_vector()
1299 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1303 * return -ENOMEM.
1307 int q_vectors = adapter->num_q_vectors; in igb_alloc_q_vectors()
1308 int rxr_remaining = adapter->num_rx_queues; in igb_alloc_q_vectors()
1309 int txr_remaining = adapter->num_tx_queues; in igb_alloc_q_vectors()
1322 rxr_remaining--; in igb_alloc_q_vectors()
1328 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); in igb_alloc_q_vectors()
1329 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); in igb_alloc_q_vectors()
1338 rxr_remaining -= rqpv; in igb_alloc_q_vectors()
1339 txr_remaining -= tqpv; in igb_alloc_q_vectors()
1347 adapter->num_tx_queues = 0; in igb_alloc_q_vectors()
1348 adapter->num_rx_queues = 0; in igb_alloc_q_vectors()
1349 adapter->num_q_vectors = 0; in igb_alloc_q_vectors()
1351 while (v_idx--) in igb_alloc_q_vectors()
1354 return -ENOMEM; in igb_alloc_q_vectors()
1358 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1366 struct pci_dev *pdev = adapter->pdev; in igb_init_interrupt_scheme()
1373 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); in igb_init_interrupt_scheme()
1387 * igb_request_irq - initialize interrupts
1395 struct net_device *netdev = adapter->netdev; in igb_request_irq()
1396 struct pci_dev *pdev = adapter->pdev; in igb_request_irq()
1399 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_request_irq()
1417 igb_assign_vector(adapter->q_vector[0], 0); in igb_request_irq()
1419 if (adapter->flags & IGB_FLAG_HAS_MSI) { in igb_request_irq()
1420 err = request_irq(pdev->irq, igb_intr_msi, 0, in igb_request_irq()
1421 netdev->name, adapter); in igb_request_irq()
1427 adapter->flags &= ~IGB_FLAG_HAS_MSI; in igb_request_irq()
1430 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, in igb_request_irq()
1431 netdev->name, adapter); in igb_request_irq()
1434 dev_err(&pdev->dev, "Error %d getting interrupt\n", in igb_request_irq()
1443 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_free_irq()
1446 free_irq(adapter->msix_entries[vector++].vector, adapter); in igb_free_irq()
1448 for (i = 0; i < adapter->num_q_vectors; i++) in igb_free_irq()
1449 free_irq(adapter->msix_entries[vector++].vector, in igb_free_irq()
1450 adapter->q_vector[i]); in igb_free_irq()
1452 free_irq(adapter->pdev->irq, adapter); in igb_free_irq()
1457 * igb_irq_disable - Mask off interrupt generation on the NIC
1462 struct e1000_hw *hw = &adapter->hw; in igb_irq_disable()
1468 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_disable()
1471 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); in igb_irq_disable()
1472 wr32(E1000_EIMC, adapter->eims_enable_mask); in igb_irq_disable()
1474 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); in igb_irq_disable()
1480 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_disable()
1483 for (i = 0; i < adapter->num_q_vectors; i++) in igb_irq_disable()
1484 synchronize_irq(adapter->msix_entries[i].vector); in igb_irq_disable()
1486 synchronize_irq(adapter->pdev->irq); in igb_irq_disable()
1491 * igb_irq_enable - Enable default interrupt generation settings
1496 struct e1000_hw *hw = &adapter->hw; in igb_irq_enable()
1498 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_enable()
1502 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); in igb_irq_enable()
1504 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); in igb_irq_enable()
1505 wr32(E1000_EIMS, adapter->eims_enable_mask); in igb_irq_enable()
1506 if (adapter->vfs_allocated_count) { in igb_irq_enable()
1521 struct e1000_hw *hw = &adapter->hw; in igb_update_mng_vlan()
1522 u16 pf_id = adapter->vfs_allocated_count; in igb_update_mng_vlan()
1523 u16 vid = adapter->hw.mng_cookie.vlan_id; in igb_update_mng_vlan()
1524 u16 old_vid = adapter->mng_vlan_id; in igb_update_mng_vlan()
1526 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { in igb_update_mng_vlan()
1529 adapter->mng_vlan_id = vid; in igb_update_mng_vlan()
1531 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; in igb_update_mng_vlan()
1535 !test_bit(old_vid, adapter->active_vlans)) { in igb_update_mng_vlan()
1542 * igb_release_hw_control - release control of the h/w to f/w
1551 struct e1000_hw *hw = &adapter->hw; in igb_release_hw_control()
1561 * igb_get_hw_control - get control of the h/w from f/w
1570 struct e1000_hw *hw = &adapter->hw; in igb_get_hw_control()
1581 struct net_device *netdev = adapter->netdev; in enable_fqtss()
1582 struct e1000_hw *hw = &adapter->hw; in enable_fqtss()
1584 WARN_ON(hw->mac.type != e1000_i210); in enable_fqtss()
1587 adapter->flags |= IGB_FLAG_FQTSS; in enable_fqtss()
1589 adapter->flags &= ~IGB_FLAG_FQTSS; in enable_fqtss()
1592 schedule_work(&adapter->reset_task); in enable_fqtss()
1597 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; in is_fqtss_enabled()
1605 WARN_ON(hw->mac.type != e1000_i210); in set_tx_desc_fetch_prio()
1622 WARN_ON(hw->mac.type != e1000_i210); in set_queue_mode()
1639 for (i = 0; i < adapter->num_tx_queues; i++) { in is_any_cbs_enabled()
1640 if (adapter->tx_ring[i]->cbs_enable) in is_any_cbs_enabled()
1651 for (i = 0; i < adapter->num_tx_queues; i++) { in is_any_txtime_enabled()
1652 if (adapter->tx_ring[i]->launchtime_enable) in is_any_txtime_enabled()
1660 * igb_config_tx_modes - Configure "Qav Tx mode" features on igb
1671 struct net_device *netdev = adapter->netdev; in igb_config_tx_modes()
1672 struct e1000_hw *hw = &adapter->hw; in igb_config_tx_modes()
1677 WARN_ON(hw->mac.type != e1000_i210); in igb_config_tx_modes()
1679 ring = adapter->tx_ring[queue]; in igb_config_tx_modes()
1685 if (ring->cbs_enable || ring->launchtime_enable) { in igb_config_tx_modes()
1694 if (ring->cbs_enable || queue == 0) { in igb_config_tx_modes()
1704 if (queue == 0 && !ring->cbs_enable) { in igb_config_tx_modes()
1706 ring->idleslope = 1000000; in igb_config_tx_modes()
1707 ring->hicredit = ETH_FRAME_LEN; in igb_config_tx_modes()
1710 /* Always set data transfer arbitration to credit-based in igb_config_tx_modes()
1731 * Note that 'link-speed' is in Mbps. in igb_config_tx_modes()
1733 * value = BW * 0x7735 * 2 * link-speed in igb_config_tx_modes()
1734 * -------------- (E3) in igb_config_tx_modes()
1743 * ----------------- (E4) in igb_config_tx_modes()
1744 * link-speed * 1000 in igb_config_tx_modes()
1750 * value = idleSlope * 0x7735 * 2 * link-speed in igb_config_tx_modes()
1751 * ----------------- -------------- (E5) in igb_config_tx_modes()
1752 * link-speed * 1000 1000 in igb_config_tx_modes()
1754 * 'link-speed' is present in both sides of the fraction so in igb_config_tx_modes()
1758 * ----------------- (E6) in igb_config_tx_modes()
1775 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); in igb_config_tx_modes()
1783 0x80000000 + ring->hicredit * 0x7735); in igb_config_tx_modes()
1806 if (ring->launchtime_enable) { in igb_config_tx_modes()
1810 * - LaunchTime will be enabled for all SR queues. in igb_config_tx_modes()
1811 * - A fixed offset can be added relative to the launch in igb_config_tx_modes()
1838 ring->cbs_enable ? "enabled" : "disabled", in igb_config_tx_modes()
1839 ring->launchtime_enable ? "enabled" : "disabled", in igb_config_tx_modes()
1841 ring->idleslope, ring->sendslope, in igb_config_tx_modes()
1842 ring->hicredit, ring->locredit); in igb_config_tx_modes()
1850 if (queue < 0 || queue > adapter->num_tx_queues) in igb_save_txtime_params()
1851 return -EINVAL; in igb_save_txtime_params()
1853 ring = adapter->tx_ring[queue]; in igb_save_txtime_params()
1854 ring->launchtime_enable = enable; in igb_save_txtime_params()
1865 if (queue < 0 || queue > adapter->num_tx_queues) in igb_save_cbs_params()
1866 return -EINVAL; in igb_save_cbs_params()
1868 ring = adapter->tx_ring[queue]; in igb_save_cbs_params()
1870 ring->cbs_enable = enable; in igb_save_cbs_params()
1871 ring->idleslope = idleslope; in igb_save_cbs_params()
1872 ring->sendslope = sendslope; in igb_save_cbs_params()
1873 ring->hicredit = hicredit; in igb_save_cbs_params()
1874 ring->locredit = locredit; in igb_save_cbs_params()
1880 * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
1890 struct net_device *netdev = adapter->netdev; in igb_setup_tx_mode()
1891 struct e1000_hw *hw = &adapter->hw; in igb_setup_tx_mode()
1895 if (hw->mac.type != e1000_i210) in igb_setup_tx_mode()
1933 * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64. in igb_setup_tx_mode()
1935 val = (4096 - 1) / 64; in igb_setup_tx_mode()
1943 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? in igb_setup_tx_mode()
1944 adapter->num_tx_queues : I210_SR_QUEUES_NUM; in igb_setup_tx_mode()
1968 * igb_configure - configure the hardware for RX and TX
1973 struct net_device *netdev = adapter->netdev; in igb_configure()
1990 igb_rx_fifo_flush_82575(&adapter->hw); in igb_configure()
1996 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_configure()
1997 struct igb_ring *ring = adapter->rx_ring[i]; in igb_configure()
1998 if (ring->xsk_pool) in igb_configure()
1999 igb_alloc_rx_buffers_zc(ring, ring->xsk_pool, in igb_configure()
2007 * igb_power_up_link - Power up the phy/serdes link
2012 igb_reset_phy(&adapter->hw); in igb_power_up_link()
2014 if (adapter->hw.phy.media_type == e1000_media_type_copper) in igb_power_up_link()
2015 igb_power_up_phy_copper(&adapter->hw); in igb_power_up_link()
2017 igb_power_up_serdes_link_82575(&adapter->hw); in igb_power_up_link()
2019 igb_setup_link(&adapter->hw); in igb_power_up_link()
2023 * igb_power_down_link - Power down the phy/serdes link
2028 if (adapter->hw.phy.media_type == e1000_media_type_copper) in igb_power_down_link()
2029 igb_power_down_phy_copper_82575(&adapter->hw); in igb_power_down_link()
2031 igb_shutdown_serdes_link_82575(&adapter->hw); in igb_power_down_link()
2035 * igb_check_swap_media - Detect and switch function for Media Auto Sense
2040 struct e1000_hw *hw = &adapter->hw; in igb_check_swap_media()
2051 if ((hw->phy.media_type == e1000_media_type_copper) && in igb_check_swap_media()
2054 } else if ((hw->phy.media_type != e1000_media_type_copper) && in igb_check_swap_media()
2057 if (adapter->copper_tries < 4) { in igb_check_swap_media()
2058 adapter->copper_tries++; in igb_check_swap_media()
2063 adapter->copper_tries = 0; in igb_check_swap_media()
2076 switch (hw->phy.media_type) { in igb_check_swap_media()
2078 netdev_info(adapter->netdev, in igb_check_swap_media()
2082 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_check_swap_media()
2083 adapter->copper_tries = 0; in igb_check_swap_media()
2087 netdev_info(adapter->netdev, in igb_check_swap_media()
2091 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_check_swap_media()
2095 netdev_err(adapter->netdev, in igb_check_swap_media()
2105 struct igb_q_vector *q_vector = adapter->q_vector[vector]; in igb_set_queue_napi()
2107 if (q_vector->rx.ring) in igb_set_queue_napi()
2108 netif_queue_set_napi(adapter->netdev, in igb_set_queue_napi()
2109 q_vector->rx.ring->queue_index, in igb_set_queue_napi()
2112 if (q_vector->tx.ring) in igb_set_queue_napi()
2113 netif_queue_set_napi(adapter->netdev, in igb_set_queue_napi()
2114 q_vector->tx.ring->queue_index, in igb_set_queue_napi()
2119 * igb_up - Open the interface and prepare it to handle traffic
2124 struct e1000_hw *hw = &adapter->hw; in igb_up()
2131 clear_bit(__IGB_DOWN, &adapter->state); in igb_up()
2133 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_up()
2134 napi = &adapter->q_vector[i]->napi; in igb_up()
2139 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_up()
2142 igb_assign_vector(adapter->q_vector[0], 0); in igb_up()
2150 if (adapter->vfs_allocated_count) { in igb_up()
2157 netif_tx_start_all_queues(adapter->netdev); in igb_up()
2160 hw->mac.get_link_status = 1; in igb_up()
2161 schedule_work(&adapter->watchdog_task); in igb_up()
2163 if ((adapter->flags & IGB_FLAG_EEE) && in igb_up()
2164 (!hw->dev_spec._82575.eee_disable)) in igb_up()
2165 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; in igb_up()
2172 struct net_device *netdev = adapter->netdev; in igb_down()
2173 struct e1000_hw *hw = &adapter->hw; in igb_down()
2180 set_bit(__IGB_DOWN, &adapter->state); in igb_down()
2202 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_down()
2204 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_down()
2205 if (adapter->q_vector[i]) { in igb_down()
2206 napi_synchronize(&adapter->q_vector[i]->napi); in igb_down()
2208 napi_disable(&adapter->q_vector[i]->napi); in igb_down()
2212 timer_delete_sync(&adapter->watchdog_timer); in igb_down()
2213 timer_delete_sync(&adapter->phy_info_timer); in igb_down()
2216 spin_lock(&adapter->stats64_lock); in igb_down()
2218 spin_unlock(&adapter->stats64_lock); in igb_down()
2220 adapter->link_speed = 0; in igb_down()
2221 adapter->link_duplex = 0; in igb_down()
2223 if (!pci_channel_offline(adapter->pdev)) in igb_down()
2227 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; in igb_down()
2240 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_reinit_locked()
2244 clear_bit(__IGB_RESETTING, &adapter->state); in igb_reinit_locked()
2247 /** igb_enable_mas - Media Autosense re-enable after swap
2253 struct e1000_hw *hw = &adapter->hw; in igb_enable_mas()
2257 if ((hw->phy.media_type == e1000_media_type_copper) && in igb_enable_mas()
2268 * igb_set_i2c_bb - Init I2C interface
2292 struct pci_dev *pdev = adapter->pdev; in igb_reset()
2293 struct e1000_hw *hw = &adapter->hw; in igb_reset()
2294 struct e1000_mac_info *mac = &hw->mac; in igb_reset()
2295 struct e1000_fc_info *fc = &hw->fc; in igb_reset()
2301 switch (mac->type) { in igb_reset()
2320 if (mac->type == e1000_82575) { in igb_reset()
2340 min_tx_space = adapter->max_frame_size; in igb_reset()
2341 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; in igb_reset()
2345 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); in igb_reset()
2352 pba -= needed_tx_space; in igb_reset()
2371 * - the full Rx FIFO size minus one full Tx plus one full Rx frame in igb_reset()
2373 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); in igb_reset()
2375 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ in igb_reset()
2376 fc->low_water = fc->high_water - 16; in igb_reset()
2377 fc->pause_time = 0xFFFF; in igb_reset()
2378 fc->send_xon = 1; in igb_reset()
2379 fc->current_mode = fc->requested_mode; in igb_reset()
2382 if (adapter->vfs_allocated_count) { in igb_reset()
2385 for (i = 0 ; i < adapter->vfs_allocated_count; i++) in igb_reset()
2386 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; in igb_reset()
2397 hw->mac.ops.reset_hw(hw); in igb_reset()
2400 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_reset()
2402 adapter->ei.get_invariants(hw); in igb_reset()
2403 adapter->flags &= ~IGB_FLAG_MEDIA_RESET; in igb_reset()
2405 if ((mac->type == e1000_82575 || mac->type == e1000_i350) && in igb_reset()
2406 (adapter->flags & IGB_FLAG_MAS_ENABLE)) { in igb_reset()
2409 if (hw->mac.ops.init_hw(hw)) in igb_reset()
2410 dev_err(&pdev->dev, "Hardware Error\n"); in igb_reset()
2414 __dev_uc_unsync(adapter->netdev, NULL); in igb_reset()
2422 if (!hw->mac.autoneg) in igb_reset()
2427 /* Re-initialize the thermal sensor on i350 devices. */ in igb_reset()
2428 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_reset()
2429 if (mac->type == e1000_i350 && hw->bus.func == 0) { in igb_reset()
2430 /* If present, re-initialize the external thermal sensor in igb_reset()
2433 if (adapter->ets) in igb_reset()
2435 mac->ops.init_thermal_sensor_thresh(hw); in igb_reset()
2439 /* Re-establish EEE setting */ in igb_reset()
2440 if (hw->phy.media_type == e1000_media_type_copper) { in igb_reset()
2441 switch (mac->type) { in igb_reset()
2454 if (!netif_running(adapter->netdev)) in igb_reset()
2462 /* Re-enable PTP, where applicable. */ in igb_reset()
2463 if (adapter->ptp_flags & IGB_PTP_ENABLED) in igb_reset()
2486 netdev_features_t changed = netdev->features ^ features; in igb_set_features()
2499 spin_lock(&adapter->nfc_lock); in igb_set_features()
2501 &adapter->nfc_filter_list, nfc_node) { in igb_set_features()
2503 hlist_del(&rule->nfc_node); in igb_set_features()
2506 spin_unlock(&adapter->nfc_lock); in igb_set_features()
2507 adapter->nfc_filter_count = 0; in igb_set_features()
2510 netdev->features = features; in igb_set_features()
2529 int vfn = adapter->vfs_allocated_count; in igb_ndo_fdb_add()
2532 return -ENOMEM; in igb_ndo_fdb_add()
2557 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in igb_features_check()
2568 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in igb_features_check()
2590 struct e1000_hw *hw = &adapter->hw; in igb_offload_cbs()
2594 if (hw->mac.type != e1000_i210) in igb_offload_cbs()
2595 return -EOPNOTSUPP; in igb_offload_cbs()
2598 if (qopt->queue < 0 || qopt->queue > 1) in igb_offload_cbs()
2599 return -EINVAL; in igb_offload_cbs()
2601 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, in igb_offload_cbs()
2602 qopt->idleslope, qopt->sendslope, in igb_offload_cbs()
2603 qopt->hicredit, qopt->locredit); in igb_offload_cbs()
2607 igb_offload_apply(adapter, qopt->queue); in igb_offload_cbs()
2621 struct flow_dissector *dissector = rule->match.dissector; in igb_parse_cls_flower()
2622 struct netlink_ext_ack *extack = f->common.extack; in igb_parse_cls_flower()
2624 if (dissector->used_keys & in igb_parse_cls_flower()
2631 return -EOPNOTSUPP; in igb_parse_cls_flower()
2635 return -EOPNOTSUPP; in igb_parse_cls_flower()
2641 if (!is_zero_ether_addr(match.mask->dst)) { in igb_parse_cls_flower()
2642 if (!is_broadcast_ether_addr(match.mask->dst)) { in igb_parse_cls_flower()
2644 return -EINVAL; in igb_parse_cls_flower()
2647 input->filter.match_flags |= in igb_parse_cls_flower()
2649 ether_addr_copy(input->filter.dst_addr, match.key->dst); in igb_parse_cls_flower()
2652 if (!is_zero_ether_addr(match.mask->src)) { in igb_parse_cls_flower()
2653 if (!is_broadcast_ether_addr(match.mask->src)) { in igb_parse_cls_flower()
2655 return -EINVAL; in igb_parse_cls_flower()
2658 input->filter.match_flags |= in igb_parse_cls_flower()
2660 ether_addr_copy(input->filter.src_addr, match.key->src); in igb_parse_cls_flower()
2668 if (match.mask->n_proto) { in igb_parse_cls_flower()
2669 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { in igb_parse_cls_flower()
2671 return -EINVAL; in igb_parse_cls_flower()
2674 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; in igb_parse_cls_flower()
2675 input->filter.etype = match.key->n_proto; in igb_parse_cls_flower()
2683 if (match.mask->vlan_priority) { in igb_parse_cls_flower()
2684 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { in igb_parse_cls_flower()
2686 return -EINVAL; in igb_parse_cls_flower()
2689 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; in igb_parse_cls_flower()
2690 input->filter.vlan_tci = in igb_parse_cls_flower()
2691 (__force __be16)match.key->vlan_priority; in igb_parse_cls_flower()
2695 input->action = traffic_class; in igb_parse_cls_flower()
2696 input->cookie = f->cookie; in igb_parse_cls_flower()
2704 struct netlink_ext_ack *extack = cls_flower->common.extack; in igb_configure_clsflower()
2708 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); in igb_configure_clsflower()
2711 return -EINVAL; in igb_configure_clsflower()
2716 return -ENOMEM; in igb_configure_clsflower()
2722 spin_lock(&adapter->nfc_lock); in igb_configure_clsflower()
2724 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) { in igb_configure_clsflower()
2725 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { in igb_configure_clsflower()
2726 err = -EEXIST; in igb_configure_clsflower()
2733 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) { in igb_configure_clsflower()
2734 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { in igb_configure_clsflower()
2735 err = -EEXIST; in igb_configure_clsflower()
2748 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list); in igb_configure_clsflower()
2750 spin_unlock(&adapter->nfc_lock); in igb_configure_clsflower()
2755 spin_unlock(&adapter->nfc_lock); in igb_configure_clsflower()
2769 spin_lock(&adapter->nfc_lock); in igb_delete_clsflower()
2771 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node) in igb_delete_clsflower()
2772 if (filter->cookie == cls_flower->cookie) in igb_delete_clsflower()
2776 err = -ENOENT; in igb_delete_clsflower()
2784 hlist_del(&filter->nfc_node); in igb_delete_clsflower()
2788 spin_unlock(&adapter->nfc_lock); in igb_delete_clsflower()
2796 switch (cls_flower->command) { in igb_setup_tc_cls_flower()
2802 return -EOPNOTSUPP; in igb_setup_tc_cls_flower()
2804 return -EOPNOTSUPP; in igb_setup_tc_cls_flower()
2813 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) in igb_setup_tc_block_cb()
2814 return -EOPNOTSUPP; in igb_setup_tc_block_cb()
2821 return -EOPNOTSUPP; in igb_setup_tc_block_cb()
2828 struct e1000_hw *hw = &adapter->hw; in igb_offload_txtime()
2832 if (hw->mac.type != e1000_i210) in igb_offload_txtime()
2833 return -EOPNOTSUPP; in igb_offload_txtime()
2836 if (qopt->queue < 0 || qopt->queue > 1) in igb_offload_txtime()
2837 return -EINVAL; in igb_offload_txtime()
2839 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); in igb_offload_txtime()
2843 igb_offload_apply(adapter, qopt->queue); in igb_offload_txtime()
2851 switch (base->type) { in igb_tc_query_caps()
2853 struct tc_taprio_caps *caps = base->caps; in igb_tc_query_caps()
2855 caps->broken_mqprio = true; in igb_tc_query_caps()
2860 return -EOPNOTSUPP; in igb_tc_query_caps()
2886 return -EOPNOTSUPP; in igb_setup_tc()
2892 int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD; in igb_xdp_setup()
2894 struct bpf_prog *prog = bpf->prog, *old_prog; in igb_xdp_setup()
2899 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_xdp_setup()
2900 struct igb_ring *ring = adapter->rx_ring[i]; in igb_xdp_setup()
2903 NL_SET_ERR_MSG_MOD(bpf->extack, in igb_xdp_setup()
2907 return -EINVAL; in igb_xdp_setup()
2911 old_prog = xchg(&adapter->xdp_prog, prog); in igb_xdp_setup()
2918 for (i = 0; i < adapter->num_rx_queues; i++) in igb_xdp_setup()
2919 (void)xchg(&adapter->rx_ring[i]->xdp_prog, in igb_xdp_setup()
2920 adapter->xdp_prog); in igb_xdp_setup()
2946 switch (xdp->command) { in igb_xdp()
2950 return igb_xsk_pool_setup(adapter, xdp->xsk.pool, in igb_xdp()
2951 xdp->xsk.queue_id); in igb_xdp()
2953 return -EINVAL; in igb_xdp()
2968 /* During program transitions its possible adapter->xdp_prog is assigned in igb_xdp_xmit_back()
2996 if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) in igb_xdp_xmit()
2997 return -ENETDOWN; in igb_xdp_xmit()
3000 return -EINVAL; in igb_xdp_xmit()
3002 /* During program transitions its possible adapter->xdp_prog is assigned in igb_xdp_xmit()
3008 return -ENXIO; in igb_xdp_xmit()
3010 if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))) in igb_xdp_xmit()
3011 return -ENXIO; in igb_xdp_xmit()
3069 * igb_set_fw_version - Configure version string for ethtool
3074 struct e1000_hw *hw = &adapter->hw; in igb_set_fw_version()
3079 switch (hw->mac.type) { in igb_set_fw_version()
3083 snprintf(adapter->fw_version, in igb_set_fw_version()
3084 sizeof(adapter->fw_version), in igb_set_fw_version()
3085 "%2d.%2d-%d", in igb_set_fw_version()
3094 snprintf(adapter->fw_version, in igb_set_fw_version()
3095 sizeof(adapter->fw_version), in igb_set_fw_version()
3101 snprintf(adapter->fw_version, in igb_set_fw_version()
3102 sizeof(adapter->fw_version), in igb_set_fw_version()
3106 snprintf(adapter->fw_version, in igb_set_fw_version()
3107 sizeof(adapter->fw_version), in igb_set_fw_version()
3116 * igb_init_mas - init Media Autosense feature if enabled in the NVM
3122 struct e1000_hw *hw = &adapter->hw; in igb_init_mas()
3125 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); in igb_init_mas()
3126 switch (hw->bus.func) { in igb_init_mas()
3129 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
3130 netdev_info(adapter->netdev, in igb_init_mas()
3132 hw->bus.func); in igb_init_mas()
3137 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
3138 netdev_info(adapter->netdev, in igb_init_mas()
3140 hw->bus.func); in igb_init_mas()
3145 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
3146 netdev_info(adapter->netdev, in igb_init_mas()
3148 hw->bus.func); in igb_init_mas()
3153 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
3154 netdev_info(adapter->netdev, in igb_init_mas()
3156 hw->bus.func); in igb_init_mas()
3161 netdev_err(adapter->netdev, in igb_init_mas()
3168 * igb_init_i2c - Init I2C interface
3176 if (adapter->hw.mac.type != e1000_i350) in igb_init_i2c()
3183 adapter->i2c_adap.owner = THIS_MODULE; in igb_init_i2c()
3184 adapter->i2c_algo = igb_i2c_algo; in igb_init_i2c()
3185 adapter->i2c_algo.data = adapter; in igb_init_i2c()
3186 adapter->i2c_adap.algo_data = &adapter->i2c_algo; in igb_init_i2c()
3187 adapter->i2c_adap.dev.parent = &adapter->pdev->dev; in igb_init_i2c()
3188 strscpy(adapter->i2c_adap.name, "igb BB", in igb_init_i2c()
3189 sizeof(adapter->i2c_adap.name)); in igb_init_i2c()
3190 status = i2c_bit_add_bus(&adapter->i2c_adap); in igb_init_i2c()
3195 * igb_probe - Device Initialization Routine
3213 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; in igb_probe()
3218 * the PCIe SR-IOV capability. in igb_probe()
3220 if (pdev->is_virtfn) { in igb_probe()
3222 pci_name(pdev), pdev->vendor, pdev->device); in igb_probe()
3223 return -EINVAL; in igb_probe()
3230 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in igb_probe()
3232 dev_err(&pdev->dev, in igb_probe()
3244 err = -ENOMEM; in igb_probe()
3250 SET_NETDEV_DEV(netdev, &pdev->dev); in igb_probe()
3254 adapter->netdev = netdev; in igb_probe()
3255 adapter->pdev = pdev; in igb_probe()
3256 hw = &adapter->hw; in igb_probe()
3257 hw->back = adapter; in igb_probe()
3258 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igb_probe()
3260 err = -EIO; in igb_probe()
3261 adapter->io_addr = pci_iomap(pdev, 0, 0); in igb_probe()
3262 if (!adapter->io_addr) in igb_probe()
3264 /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ in igb_probe()
3265 hw->hw_addr = adapter->io_addr; in igb_probe()
3267 netdev->netdev_ops = &igb_netdev_ops; in igb_probe()
3269 netdev->watchdog_timeo = 5 * HZ; in igb_probe()
3271 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in igb_probe()
3273 netdev->mem_start = pci_resource_start(pdev, 0); in igb_probe()
3274 netdev->mem_end = pci_resource_end(pdev, 0); in igb_probe()
3277 hw->vendor_id = pdev->vendor; in igb_probe()
3278 hw->device_id = pdev->device; in igb_probe()
3279 hw->revision_id = pdev->revision; in igb_probe()
3280 hw->subsystem_vendor_id = pdev->subsystem_vendor; in igb_probe()
3281 hw->subsystem_device_id = pdev->subsystem_device; in igb_probe()
3284 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in igb_probe()
3285 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in igb_probe()
3286 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); in igb_probe()
3287 /* Initialize skew-specific constants */ in igb_probe()
3288 err = ei->get_invariants(hw); in igb_probe()
3299 hw->phy.autoneg_wait_to_complete = false; in igb_probe()
3302 if (hw->phy.media_type == e1000_media_type_copper) { in igb_probe()
3303 hw->phy.mdix = AUTO_ALL_MODES; in igb_probe()
3304 hw->phy.disable_polarity_correction = false; in igb_probe()
3305 hw->phy.ms_type = e1000_ms_hw_default; in igb_probe()
3309 dev_info(&pdev->dev, in igb_probe()
3316 netdev->features |= NETIF_F_SG | in igb_probe()
3323 if (hw->mac.type >= e1000_82576) in igb_probe()
3324 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; in igb_probe()
3326 if (hw->mac.type >= e1000_i350) in igb_probe()
3327 netdev->features |= NETIF_F_HW_TC; in igb_probe()
3336 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; in igb_probe()
3337 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; in igb_probe()
3340 netdev->hw_features |= netdev->features | in igb_probe()
3345 if (hw->mac.type >= e1000_i350) in igb_probe()
3346 netdev->hw_features |= NETIF_F_NTUPLE; in igb_probe()
3348 netdev->features |= NETIF_F_HIGHDMA; in igb_probe()
3350 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in igb_probe()
3351 netdev->mpls_features |= NETIF_F_HW_CSUM; in igb_probe()
3352 netdev->hw_enc_features |= netdev->vlan_features; in igb_probe()
3355 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in igb_probe()
3359 netdev->priv_flags |= IFF_SUPP_NOFCS; in igb_probe()
3361 netdev->priv_flags |= IFF_UNICAST_FLT; in igb_probe()
3362 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in igb_probe()
3365 /* MTU range: 68 - 9216 */ in igb_probe()
3366 netdev->min_mtu = ETH_MIN_MTU; in igb_probe()
3367 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; in igb_probe()
3369 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); in igb_probe()
3374 hw->mac.ops.reset_hw(hw); in igb_probe()
3379 switch (hw->mac.type) { in igb_probe()
3383 if (hw->nvm.ops.validate(hw) < 0) { in igb_probe()
3384 dev_err(&pdev->dev, in igb_probe()
3386 err = -EIO; in igb_probe()
3392 if (hw->nvm.ops.validate(hw) < 0) { in igb_probe()
3393 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in igb_probe()
3394 err = -EIO; in igb_probe()
3400 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { in igb_probe()
3402 if (hw->mac.ops.read_mac_addr(hw)) in igb_probe()
3403 dev_err(&pdev->dev, "NVM Read Error\n"); in igb_probe()
3406 eth_hw_addr_set(netdev, hw->mac.addr); in igb_probe()
3408 if (!is_valid_ether_addr(netdev->dev_addr)) { in igb_probe()
3409 dev_err(&pdev->dev, "Invalid MAC Address\n"); in igb_probe()
3410 err = -EIO; in igb_probe()
3416 /* get firmware version for ethtool -i */ in igb_probe()
3420 if (hw->mac.type == e1000_i210) { in igb_probe()
3425 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0); in igb_probe()
3426 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0); in igb_probe()
3428 INIT_WORK(&adapter->reset_task, igb_reset_task); in igb_probe()
3429 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); in igb_probe()
3431 /* Initialize link properties that are user-changeable */ in igb_probe()
3432 adapter->fc_autoneg = true; in igb_probe()
3433 hw->mac.autoneg = true; in igb_probe()
3434 hw->phy.autoneg_advertised = 0x2f; in igb_probe()
3436 hw->fc.requested_mode = e1000_fc_default; in igb_probe()
3437 hw->fc.current_mode = e1000_fc_default; in igb_probe()
3442 if (hw->bus.func == 0) in igb_probe()
3443 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3445 /* Check the NVM for wake support on non-port A ports */ in igb_probe()
3446 if (hw->mac.type >= e1000_82580) in igb_probe()
3447 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + in igb_probe()
3448 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, in igb_probe()
3450 else if (hw->bus.func == 1) in igb_probe()
3451 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); in igb_probe()
3454 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3460 switch (pdev->device) { in igb_probe()
3462 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3471 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3477 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3479 adapter->flags |= IGB_FLAG_QUAD_PORT_A; in igb_probe()
3486 if (!device_can_wakeup(&adapter->pdev->dev)) in igb_probe()
3487 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3491 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) in igb_probe()
3492 adapter->wol |= E1000_WUFC_MAG; in igb_probe()
3495 if ((hw->mac.type == e1000_i350) && in igb_probe()
3496 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { in igb_probe()
3497 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3498 adapter->wol = 0; in igb_probe()
3504 if (((hw->mac.type == e1000_i350) || in igb_probe()
3505 (hw->mac.type == e1000_i354)) && in igb_probe()
3506 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { in igb_probe()
3507 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3508 adapter->wol = 0; in igb_probe()
3510 if (hw->mac.type == e1000_i350) { in igb_probe()
3511 if (((pdev->subsystem_device == 0x5001) || in igb_probe()
3512 (pdev->subsystem_device == 0x5002)) && in igb_probe()
3513 (hw->bus.func == 0)) { in igb_probe()
3514 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3515 adapter->wol = 0; in igb_probe()
3517 if (pdev->subsystem_device == 0x1F52) in igb_probe()
3518 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
3521 device_set_wakeup_enable(&adapter->pdev->dev, in igb_probe()
3522 adapter->flags & IGB_FLAG_WOL_SUPPORTED); in igb_probe()
3530 dev_err(&pdev->dev, "failed to init i2c interface\n"); in igb_probe()
3539 strcpy(netdev->name, "eth%d"); in igb_probe()
3548 if (dca_add_requester(&pdev->dev) == 0) { in igb_probe()
3549 adapter->flags |= IGB_FLAG_DCA_ENABLED; in igb_probe()
3550 dev_info(&pdev->dev, "DCA enabled\n"); in igb_probe()
3557 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { in igb_probe()
3563 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); in igb_probe()
3565 adapter->ets = true; in igb_probe()
3567 adapter->ets = false; in igb_probe()
3571 if (adapter->ets) in igb_probe()
3573 hw->mac.ops.init_thermal_sensor_thresh(hw); in igb_probe()
3575 dev_err(&pdev->dev, in igb_probe()
3578 adapter->ets = false; in igb_probe()
3582 adapter->ei = *ei; in igb_probe()
3583 if (hw->dev_spec._82575.mas_capable) in igb_probe()
3589 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); in igb_probe()
3591 if (hw->mac.type != e1000_i354) { in igb_probe()
3592 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", in igb_probe()
3593 netdev->name, in igb_probe()
3594 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : in igb_probe()
3595 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : in igb_probe()
3597 ((hw->bus.width == e1000_bus_width_pcie_x4) ? in igb_probe()
3599 (hw->bus.width == e1000_bus_width_pcie_x2) ? in igb_probe()
3601 (hw->bus.width == e1000_bus_width_pcie_x1) ? in igb_probe()
3602 "Width x1" : "unknown"), netdev->dev_addr); in igb_probe()
3605 if ((hw->mac.type == e1000_82576 && in igb_probe()
3607 (hw->mac.type >= e1000_i210 || in igb_probe()
3612 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; in igb_probe()
3617 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); in igb_probe()
3618 dev_info(&pdev->dev, in igb_probe()
3620 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : in igb_probe()
3621 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", in igb_probe()
3622 adapter->num_rx_queues, adapter->num_tx_queues); in igb_probe()
3623 if (hw->phy.media_type == e1000_media_type_copper) { in igb_probe()
3624 switch (hw->mac.type) { in igb_probe()
3631 (!hw->dev_spec._82575.eee_disable)) { in igb_probe()
3632 adapter->eee_advert = in igb_probe()
3634 adapter->flags |= IGB_FLAG_EEE; in igb_probe()
3642 (!hw->dev_spec._82575.eee_disable)) { in igb_probe()
3643 adapter->eee_advert = in igb_probe()
3645 adapter->flags |= IGB_FLAG_EEE; in igb_probe()
3654 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); in igb_probe()
3656 pm_runtime_put_noidle(&pdev->dev); in igb_probe()
3661 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); in igb_probe()
3666 if (hw->flash_address) in igb_probe()
3667 iounmap(hw->flash_address); in igb_probe()
3669 kfree(adapter->mac_table); in igb_probe()
3670 kfree(adapter->shadow_vfta); in igb_probe()
3675 pci_iounmap(pdev, adapter->io_addr); in igb_probe()
3691 struct pci_dev *pdev = adapter->pdev; in igb_sriov_reinit()
3706 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); in igb_sriov_reinit()
3707 return -ENOMEM; in igb_sriov_reinit()
3722 struct e1000_hw *hw = &adapter->hw; in igb_disable_sriov()
3726 if (adapter->vf_data) { in igb_disable_sriov()
3729 dev_warn(&pdev->dev, in igb_disable_sriov()
3730 …"Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\… in igb_disable_sriov()
3731 return -EPERM; in igb_disable_sriov()
3736 spin_lock_irqsave(&adapter->vfs_lock, flags); in igb_disable_sriov()
3737 kfree(adapter->vf_mac_list); in igb_disable_sriov()
3738 adapter->vf_mac_list = NULL; in igb_disable_sriov()
3739 kfree(adapter->vf_data); in igb_disable_sriov()
3740 adapter->vf_data = NULL; in igb_disable_sriov()
3741 adapter->vfs_allocated_count = 0; in igb_disable_sriov()
3742 spin_unlock_irqrestore(&adapter->vfs_lock, flags); in igb_disable_sriov()
3746 dev_info(&pdev->dev, "IOV Disabled\n"); in igb_disable_sriov()
3748 /* Re-enable DMA Coalescing flag since IOV is turned off */ in igb_disable_sriov()
3749 adapter->flags |= IGB_FLAG_DMAC; in igb_disable_sriov()
3764 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { in igb_enable_sriov()
3765 err = -EPERM; in igb_enable_sriov()
3772 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", in igb_enable_sriov()
3774 adapter->vfs_allocated_count = old_vfs; in igb_enable_sriov()
3776 adapter->vfs_allocated_count = num_vfs; in igb_enable_sriov()
3778 adapter->vf_data = kcalloc(adapter->vfs_allocated_count, in igb_enable_sriov()
3781 /* if allocation failed then we do not support SR-IOV */ in igb_enable_sriov()
3782 if (!adapter->vf_data) { in igb_enable_sriov()
3783 adapter->vfs_allocated_count = 0; in igb_enable_sriov()
3784 err = -ENOMEM; in igb_enable_sriov()
3793 num_vf_mac_filters = adapter->hw.mac.rar_entry_count - in igb_enable_sriov()
3795 adapter->vfs_allocated_count); in igb_enable_sriov()
3797 adapter->vf_mac_list = kcalloc(num_vf_mac_filters, in igb_enable_sriov()
3801 mac_list = adapter->vf_mac_list; in igb_enable_sriov()
3802 INIT_LIST_HEAD(&adapter->vf_macs.l); in igb_enable_sriov()
3804 if (adapter->vf_mac_list) { in igb_enable_sriov()
3807 mac_list->vf = -1; in igb_enable_sriov()
3808 mac_list->free = true; in igb_enable_sriov()
3809 list_add(&mac_list->l, &adapter->vf_macs.l); in igb_enable_sriov()
3816 dev_err(&pdev->dev, in igb_enable_sriov()
3820 dev_info(&pdev->dev, "%d VFs allocated\n", in igb_enable_sriov()
3821 adapter->vfs_allocated_count); in igb_enable_sriov()
3822 for (i = 0; i < adapter->vfs_allocated_count; i++) in igb_enable_sriov()
3826 adapter->flags &= ~IGB_FLAG_DMAC; in igb_enable_sriov()
3836 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); in igb_enable_sriov()
3844 kfree(adapter->vf_mac_list); in igb_enable_sriov()
3845 adapter->vf_mac_list = NULL; in igb_enable_sriov()
3846 kfree(adapter->vf_data); in igb_enable_sriov()
3847 adapter->vf_data = NULL; in igb_enable_sriov()
3848 adapter->vfs_allocated_count = 0; in igb_enable_sriov()
3855 * igb_remove_i2c - Cleanup I2C interface
3860 /* free the adapter bus structure */ in igb_remove_i2c()
3861 i2c_del_adapter(&adapter->i2c_adap); in igb_remove_i2c()
3865 * igb_remove - Device Removal Routine
3870 * Hot-Plug event, or because the driver is going to be removed from
3877 struct e1000_hw *hw = &adapter->hw; in igb_remove()
3879 pm_runtime_get_noresume(&pdev->dev); in igb_remove()
3888 set_bit(__IGB_DOWN, &adapter->state); in igb_remove()
3889 timer_delete_sync(&adapter->watchdog_timer); in igb_remove()
3890 timer_delete_sync(&adapter->phy_info_timer); in igb_remove()
3892 cancel_work_sync(&adapter->reset_task); in igb_remove()
3893 cancel_work_sync(&adapter->watchdog_task); in igb_remove()
3896 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { in igb_remove()
3897 dev_info(&pdev->dev, "DCA disabled\n"); in igb_remove()
3898 dca_remove_requester(&pdev->dev); in igb_remove()
3899 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; in igb_remove()
3917 pci_iounmap(pdev, adapter->io_addr); in igb_remove()
3918 if (hw->flash_address) in igb_remove()
3919 iounmap(hw->flash_address); in igb_remove()
3922 kfree(adapter->mac_table); in igb_remove()
3923 kfree(adapter->shadow_vfta); in igb_remove()
3930 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
3935 * more expensive time wise to disable SR-IOV than it is to allocate and free
3941 struct pci_dev *pdev = adapter->pdev; in igb_probe_vfs()
3942 struct e1000_hw *hw = &adapter->hw; in igb_probe_vfs()
3945 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) || in igb_probe_vfs()
3946 (hw->mac.type == e1000_82580)) in igb_probe_vfs()
3964 struct e1000_hw *hw = &adapter->hw; in igb_get_max_rss_queues()
3968 switch (hw->mac.type) { in igb_get_max_rss_queues()
3977 /* I350 cannot do RSS and SR-IOV at the same time */ in igb_get_max_rss_queues()
3978 if (!!adapter->vfs_allocated_count) { in igb_get_max_rss_queues()
3984 if (!!adapter->vfs_allocated_count) { in igb_get_max_rss_queues()
4004 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); in igb_init_queue_configuration()
4012 struct e1000_hw *hw = &adapter->hw; in igb_set_flag_queue_pairs()
4015 switch (hw->mac.type) { in igb_set_flag_queue_pairs()
4029 if (adapter->rss_queues > (max_rss_queues / 2)) in igb_set_flag_queue_pairs()
4030 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; in igb_set_flag_queue_pairs()
4032 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; in igb_set_flag_queue_pairs()
4038 * igb_sw_init - Initialize general software structures (struct igb_adapter)
4047 struct e1000_hw *hw = &adapter->hw; in igb_sw_init()
4048 struct net_device *netdev = adapter->netdev; in igb_sw_init()
4049 struct pci_dev *pdev = adapter->pdev; in igb_sw_init()
4051 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); in igb_sw_init()
4054 adapter->tx_ring_count = IGB_DEFAULT_TXD; in igb_sw_init()
4055 adapter->rx_ring_count = IGB_DEFAULT_RXD; in igb_sw_init()
4058 adapter->rx_itr_setting = IGB_DEFAULT_ITR; in igb_sw_init()
4059 adapter->tx_itr_setting = IGB_DEFAULT_ITR; in igb_sw_init()
4062 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; in igb_sw_init()
4064 adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD; in igb_sw_init()
4065 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igb_sw_init()
4067 spin_lock_init(&adapter->nfc_lock); in igb_sw_init()
4068 spin_lock_init(&adapter->stats64_lock); in igb_sw_init()
4071 spin_lock_init(&adapter->vfs_lock); in igb_sw_init()
4073 switch (hw->mac.type) { in igb_sw_init()
4077 dev_warn(&pdev->dev, in igb_sw_init()
4079 max_vfs = adapter->vfs_allocated_count = 7; in igb_sw_init()
4081 adapter->vfs_allocated_count = max_vfs; in igb_sw_init()
4082 if (adapter->vfs_allocated_count) in igb_sw_init()
4083 dev_warn(&pdev->dev, in igb_sw_init()
4084 …"Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface… in igb_sw_init()
4091 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ in igb_sw_init()
4092 adapter->flags |= IGB_FLAG_HAS_MSIX; in igb_sw_init()
4094 adapter->mac_table = kcalloc(hw->mac.rar_entry_count, in igb_sw_init()
4097 if (!adapter->mac_table) in igb_sw_init()
4098 return -ENOMEM; in igb_sw_init()
4105 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), in igb_sw_init()
4107 if (!adapter->shadow_vfta) in igb_sw_init()
4108 return -ENOMEM; in igb_sw_init()
4112 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); in igb_sw_init()
4113 return -ENOMEM; in igb_sw_init()
4119 if (hw->mac.type >= e1000_i350) in igb_sw_init()
4120 adapter->flags &= ~IGB_FLAG_DMAC; in igb_sw_init()
4122 set_bit(__IGB_DOWN, &adapter->state); in igb_sw_init()
4127 * __igb_open - Called when a network interface is made active
4142 struct pci_dev *pdev = adapter->pdev; in __igb_open()
4143 struct e1000_hw *hw = &adapter->hw; in __igb_open()
4149 if (test_bit(__IGB_TESTING, &adapter->state)) { in __igb_open()
4151 return -EBUSY; in __igb_open()
4155 pm_runtime_get_sync(&pdev->dev); in __igb_open()
4183 err = netif_set_real_num_tx_queues(adapter->netdev, in __igb_open()
4184 adapter->num_tx_queues); in __igb_open()
4188 err = netif_set_real_num_rx_queues(adapter->netdev, in __igb_open()
4189 adapter->num_rx_queues); in __igb_open()
4194 clear_bit(__IGB_DOWN, &adapter->state); in __igb_open()
4196 for (i = 0; i < adapter->num_q_vectors; i++) { in __igb_open()
4197 napi = &adapter->q_vector[i]->napi; in __igb_open()
4209 if (adapter->vfs_allocated_count) { in __igb_open()
4219 pm_runtime_put(&pdev->dev); in __igb_open()
4222 hw->mac.get_link_status = 1; in __igb_open()
4223 schedule_work(&adapter->watchdog_task); in __igb_open()
4238 pm_runtime_put(&pdev->dev); in __igb_open()
4249 * __igb_close - Disables a network interface
4255 * The close entry point is called when an interface is de-activated
4263 struct pci_dev *pdev = adapter->pdev; in __igb_close()
4265 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); in __igb_close()
4268 pm_runtime_get_sync(&pdev->dev); in __igb_close()
4277 pm_runtime_put_sync(&pdev->dev); in __igb_close()
4283 if (netif_device_present(netdev) || netdev->dismantle) in igb_close()
4289 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
4296 struct device *dev = tx_ring->dev; in igb_setup_tx_resources()
4299 size = sizeof(struct igb_tx_buffer) * tx_ring->count; in igb_setup_tx_resources()
4301 tx_ring->tx_buffer_info = vmalloc(size); in igb_setup_tx_resources()
4302 if (!tx_ring->tx_buffer_info) in igb_setup_tx_resources()
4306 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igb_setup_tx_resources()
4307 tx_ring->size = ALIGN(tx_ring->size, 4096); in igb_setup_tx_resources()
4309 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igb_setup_tx_resources()
4310 &tx_ring->dma, GFP_KERNEL); in igb_setup_tx_resources()
4311 if (!tx_ring->desc) in igb_setup_tx_resources()
4314 tx_ring->next_to_use = 0; in igb_setup_tx_resources()
4315 tx_ring->next_to_clean = 0; in igb_setup_tx_resources()
4320 vfree(tx_ring->tx_buffer_info); in igb_setup_tx_resources()
4321 tx_ring->tx_buffer_info = NULL; in igb_setup_tx_resources()
4323 return -ENOMEM; in igb_setup_tx_resources()
4327 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
4335 struct pci_dev *pdev = adapter->pdev; in igb_setup_all_tx_resources()
4338 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_setup_all_tx_resources()
4339 err = igb_setup_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
4341 dev_err(&pdev->dev, in igb_setup_all_tx_resources()
4343 for (i--; i >= 0; i--) in igb_setup_all_tx_resources()
4344 igb_free_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
4353 * igb_setup_tctl - configure the transmit control registers
4358 struct e1000_hw *hw = &adapter->hw; in igb_setup_tctl()
4379 * igb_configure_tx_ring - Configure transmit ring after Reset
4388 struct e1000_hw *hw = &adapter->hw; in igb_configure_tx_ring()
4390 u64 tdba = ring->dma; in igb_configure_tx_ring()
4391 int reg_idx = ring->reg_idx; in igb_configure_tx_ring()
4393 WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring)); in igb_configure_tx_ring()
4396 ring->count * sizeof(union e1000_adv_tx_desc)); in igb_configure_tx_ring()
4401 ring->tail = adapter->io_addr + E1000_TDT(reg_idx); in igb_configure_tx_ring()
4403 writel(0, ring->tail); in igb_configure_tx_ring()
4410 memset(ring->tx_buffer_info, 0, in igb_configure_tx_ring()
4411 sizeof(struct igb_tx_buffer) * ring->count); in igb_configure_tx_ring()
4418 * igb_configure_tx - Configure transmit Unit after Reset
4425 struct e1000_hw *hw = &adapter->hw; in igb_configure_tx()
4429 for (i = 0; i < adapter->num_tx_queues; i++) in igb_configure_tx()
4430 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); in igb_configure_tx()
4435 for (i = 0; i < adapter->num_tx_queues; i++) in igb_configure_tx()
4436 igb_configure_tx_ring(adapter, adapter->tx_ring[i]); in igb_configure_tx()
4440 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
4447 struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); in igb_setup_rx_resources()
4448 struct device *dev = rx_ring->dev; in igb_setup_rx_resources()
4451 /* XDP RX-queue info */ in igb_setup_rx_resources()
4452 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in igb_setup_rx_resources()
4453 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igb_setup_rx_resources()
4454 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, in igb_setup_rx_resources()
4455 rx_ring->queue_index, 0); in igb_setup_rx_resources()
4458 rx_ring->queue_index); in igb_setup_rx_resources()
4462 size = sizeof(struct igb_rx_buffer) * rx_ring->count; in igb_setup_rx_resources()
4464 rx_ring->rx_buffer_info = vmalloc(size); in igb_setup_rx_resources()
4465 if (!rx_ring->rx_buffer_info) in igb_setup_rx_resources()
4469 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); in igb_setup_rx_resources()
4470 rx_ring->size = ALIGN(rx_ring->size, 4096); in igb_setup_rx_resources()
4472 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in igb_setup_rx_resources()
4473 &rx_ring->dma, GFP_KERNEL); in igb_setup_rx_resources()
4474 if (!rx_ring->desc) in igb_setup_rx_resources()
4477 rx_ring->next_to_alloc = 0; in igb_setup_rx_resources()
4478 rx_ring->next_to_clean = 0; in igb_setup_rx_resources()
4479 rx_ring->next_to_use = 0; in igb_setup_rx_resources()
4481 rx_ring->xdp_prog = adapter->xdp_prog; in igb_setup_rx_resources()
4486 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igb_setup_rx_resources()
4487 vfree(rx_ring->rx_buffer_info); in igb_setup_rx_resources()
4488 rx_ring->rx_buffer_info = NULL; in igb_setup_rx_resources()
4490 return -ENOMEM; in igb_setup_rx_resources()
4494 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
4502 struct pci_dev *pdev = adapter->pdev; in igb_setup_all_rx_resources()
4505 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_setup_all_rx_resources()
4506 err = igb_setup_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
4508 dev_err(&pdev->dev, in igb_setup_all_rx_resources()
4510 for (i--; i >= 0; i--) in igb_setup_all_rx_resources()
4511 igb_free_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
4520 * igb_setup_mrqc - configure the multiple receive queue control registers
4525 struct e1000_hw *hw = &adapter->hw; in igb_setup_mrqc()
4534 num_rx_queues = adapter->rss_queues; in igb_setup_mrqc()
4536 switch (hw->mac.type) { in igb_setup_mrqc()
4538 /* 82576 supports 2 RSS queues for SR-IOV */ in igb_setup_mrqc()
4539 if (adapter->vfs_allocated_count) in igb_setup_mrqc()
4546 if (adapter->rss_indir_tbl_init != num_rx_queues) { in igb_setup_mrqc()
4548 adapter->rss_indir_tbl[j] = in igb_setup_mrqc()
4550 adapter->rss_indir_tbl_init = num_rx_queues; in igb_setup_mrqc()
4561 if (adapter->hw.mac.type >= e1000_82576) in igb_setup_mrqc()
4577 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) in igb_setup_mrqc()
4579 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) in igb_setup_mrqc()
4586 if (adapter->vfs_allocated_count) { in igb_setup_mrqc()
4587 if (hw->mac.type > e1000_82575) { in igb_setup_mrqc()
4593 vtctl |= adapter->vfs_allocated_count << in igb_setup_mrqc()
4597 if (adapter->rss_queues > 1) in igb_setup_mrqc()
4610 * igb_setup_rctl - configure the receive control registers
4615 struct e1000_hw *hw = &adapter->hw; in igb_setup_rctl()
4624 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); in igb_setup_rctl()
4638 /* disable queue 0 to prevent tail write w/o re-config */ in igb_setup_rctl()
4641 /* Attention!!! For SR-IOV PF driver operations you must enable in igb_setup_rctl()
4643 * if an un-trusted VF does not provide descriptors to hardware. in igb_setup_rctl()
4645 if (adapter->vfs_allocated_count) { in igb_setup_rctl()
4651 if (adapter->netdev->features & NETIF_F_RXALL) { in igb_setup_rctl()
4672 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_rlpml()
4689 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_vlan_strip()
4692 if (hw->mac.type < e1000_82576) in igb_set_vf_vlan_strip()
4695 if (hw->mac.type == e1000_i350) in igb_set_vf_vlan_strip()
4711 struct e1000_hw *hw = &adapter->hw; in igb_set_vmolr()
4717 if (hw->mac.type < e1000_82576) in igb_set_vmolr()
4729 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) in igb_set_vmolr()
4734 if (vfn <= adapter->vfs_allocated_count) in igb_set_vmolr()
4741 * igb_setup_srrctl - configure the split and replication receive control
4748 struct e1000_hw *hw = &adapter->hw; in igb_setup_srrctl()
4749 int reg_idx = ring->reg_idx; in igb_setup_srrctl()
4753 if (ring->xsk_pool) in igb_setup_srrctl()
4754 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igb_setup_srrctl()
4763 if (hw->mac.type >= e1000_82580) in igb_setup_srrctl()
4768 if (adapter->vfs_allocated_count || in igb_setup_srrctl()
4769 (!(hw->fc.current_mode & e1000_fc_rx_pause) && in igb_setup_srrctl()
4770 adapter->num_rx_queues > 1)) in igb_setup_srrctl()
4777 * igb_configure_rx_ring - Configure a receive ring after Reset
4786 struct e1000_hw *hw = &adapter->hw; in igb_configure_rx_ring()
4788 u64 rdba = ring->dma; in igb_configure_rx_ring()
4789 int reg_idx = ring->reg_idx; in igb_configure_rx_ring()
4792 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in igb_configure_rx_ring()
4793 WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring)); in igb_configure_rx_ring()
4794 if (ring->xsk_pool) { in igb_configure_rx_ring()
4795 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igb_configure_rx_ring()
4798 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igb_configure_rx_ring()
4800 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igb_configure_rx_ring()
4813 ring->count * sizeof(union e1000_adv_rx_desc)); in igb_configure_rx_ring()
4816 ring->tail = adapter->io_addr + E1000_RDT(reg_idx); in igb_configure_rx_ring()
4818 writel(0, ring->tail); in igb_configure_rx_ring()
4830 if (ring->xsk_pool) in igb_configure_rx_ring()
4831 memset(ring->rx_buffer_info_zc, 0, in igb_configure_rx_ring()
4832 sizeof(*ring->rx_buffer_info_zc) * ring->count); in igb_configure_rx_ring()
4834 memset(ring->rx_buffer_info, 0, in igb_configure_rx_ring()
4835 sizeof(*ring->rx_buffer_info) * ring->count); in igb_configure_rx_ring()
4839 rx_desc->wb.upper.length = 0; in igb_configure_rx_ring()
4850 struct e1000_hw *hw = &adapter->hw; in igb_set_rx_buffer_len()
4857 if (adapter->flags & IGB_FLAG_RX_LEGACY) in igb_set_rx_buffer_len()
4863 if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB || in igb_set_rx_buffer_len()
4871 * igb_configure_rx - Configure receive Unit after Reset
4886 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_configure_rx()
4887 struct igb_ring *rx_ring = adapter->rx_ring[i]; in igb_configure_rx()
4895 * igb_free_tx_resources - Free Tx Resources per Queue
4898 * Free all transmit software resources
4904 vfree(tx_ring->tx_buffer_info); in igb_free_tx_resources()
4905 tx_ring->tx_buffer_info = NULL; in igb_free_tx_resources()
4907 /* if not set, then don't free */ in igb_free_tx_resources()
4908 if (!tx_ring->desc) in igb_free_tx_resources()
4911 dma_free_coherent(tx_ring->dev, tx_ring->size, in igb_free_tx_resources()
4912 tx_ring->desc, tx_ring->dma); in igb_free_tx_resources()
4914 tx_ring->desc = NULL; in igb_free_tx_resources()
4918 * igb_free_all_tx_resources - Free Tx Resources for All Queues
4921 * Free all transmit software resources
4927 for (i = 0; i < adapter->num_tx_queues; i++) in igb_free_all_tx_resources()
4928 if (adapter->tx_ring[i]) in igb_free_all_tx_resources()
4929 igb_free_tx_resources(adapter->tx_ring[i]); in igb_free_all_tx_resources()
4933 * igb_clean_tx_ring - Free Tx Buffers
4938 u16 i = tx_ring->next_to_clean; in igb_clean_tx_ring()
4939 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_clean_tx_ring()
4942 while (i != tx_ring->next_to_use) { in igb_clean_tx_ring()
4945 /* Free all the Tx ring sk_buffs or xdp frames */ in igb_clean_tx_ring()
4946 if (tx_buffer->type == IGB_TYPE_SKB) { in igb_clean_tx_ring()
4947 dev_kfree_skb_any(tx_buffer->skb); in igb_clean_tx_ring()
4948 } else if (tx_buffer->type == IGB_TYPE_XDP) { in igb_clean_tx_ring()
4949 xdp_return_frame(tx_buffer->xdpf); in igb_clean_tx_ring()
4950 } else if (tx_buffer->type == IGB_TYPE_XSK) { in igb_clean_tx_ring()
4956 dma_unmap_single(tx_ring->dev, in igb_clean_tx_ring()
4962 eop_desc = tx_buffer->next_to_watch; in igb_clean_tx_ring()
4970 if (unlikely(i == tx_ring->count)) { in igb_clean_tx_ring()
4972 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_ring()
4978 dma_unmap_page(tx_ring->dev, in igb_clean_tx_ring()
4985 tx_buffer->next_to_watch = NULL; in igb_clean_tx_ring()
4990 if (unlikely(i == tx_ring->count)) { in igb_clean_tx_ring()
4992 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_ring()
4999 if (tx_ring->xsk_pool && xsk_frames) in igb_clean_tx_ring()
5000 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igb_clean_tx_ring()
5003 tx_ring->next_to_use = 0; in igb_clean_tx_ring()
5004 tx_ring->next_to_clean = 0; in igb_clean_tx_ring()
5008 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
5015 for (i = 0; i < adapter->num_tx_queues; i++) in igb_clean_all_tx_rings()
5016 if (adapter->tx_ring[i]) in igb_clean_all_tx_rings()
5017 igb_clean_tx_ring(adapter->tx_ring[i]); in igb_clean_all_tx_rings()
5021 * igb_free_rx_resources - Free Rx Resources
5024 * Free all receive software resources
5030 rx_ring->xdp_prog = NULL; in igb_free_rx_resources()
5031 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igb_free_rx_resources()
5032 if (rx_ring->xsk_pool) { in igb_free_rx_resources()
5033 vfree(rx_ring->rx_buffer_info_zc); in igb_free_rx_resources()
5034 rx_ring->rx_buffer_info_zc = NULL; in igb_free_rx_resources()
5036 vfree(rx_ring->rx_buffer_info); in igb_free_rx_resources()
5037 rx_ring->rx_buffer_info = NULL; in igb_free_rx_resources()
5040 /* if not set, then don't free */ in igb_free_rx_resources()
5041 if (!rx_ring->desc) in igb_free_rx_resources()
5044 dma_free_coherent(rx_ring->dev, rx_ring->size, in igb_free_rx_resources()
5045 rx_ring->desc, rx_ring->dma); in igb_free_rx_resources()
5047 rx_ring->desc = NULL; in igb_free_rx_resources()
5051 * igb_free_all_rx_resources - Free Rx Resources for All Queues
5054 * Free all receive software resources
5060 for (i = 0; i < adapter->num_rx_queues; i++) in igb_free_all_rx_resources()
5061 if (adapter->rx_ring[i]) in igb_free_all_rx_resources()
5062 igb_free_rx_resources(adapter->rx_ring[i]); in igb_free_all_rx_resources()
5066 * igb_clean_rx_ring - Free Rx Buffers per Queue
5067 * @rx_ring: ring to free buffers from
5071 u16 i = rx_ring->next_to_clean; in igb_clean_rx_ring()
5073 dev_kfree_skb(rx_ring->skb); in igb_clean_rx_ring()
5074 rx_ring->skb = NULL; in igb_clean_rx_ring()
5076 if (rx_ring->xsk_pool) { in igb_clean_rx_ring()
5081 /* Free all the Rx ring sk_buffs */ in igb_clean_rx_ring()
5082 while (i != rx_ring->next_to_alloc) { in igb_clean_rx_ring()
5083 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igb_clean_rx_ring()
5088 dma_sync_single_range_for_cpu(rx_ring->dev, in igb_clean_rx_ring()
5089 buffer_info->dma, in igb_clean_rx_ring()
5090 buffer_info->page_offset, in igb_clean_rx_ring()
5094 /* free resources associated with mapping */ in igb_clean_rx_ring()
5095 dma_unmap_page_attrs(rx_ring->dev, in igb_clean_rx_ring()
5096 buffer_info->dma, in igb_clean_rx_ring()
5100 __page_frag_cache_drain(buffer_info->page, in igb_clean_rx_ring()
5101 buffer_info->pagecnt_bias); in igb_clean_rx_ring()
5104 if (i == rx_ring->count) in igb_clean_rx_ring()
5109 rx_ring->next_to_alloc = 0; in igb_clean_rx_ring()
5110 rx_ring->next_to_clean = 0; in igb_clean_rx_ring()
5111 rx_ring->next_to_use = 0; in igb_clean_rx_ring()
5115 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
5122 for (i = 0; i < adapter->num_rx_queues; i++) in igb_clean_all_rx_rings()
5123 if (adapter->rx_ring[i]) in igb_clean_all_rx_rings()
5124 igb_clean_rx_ring(adapter->rx_ring[i]); in igb_clean_all_rx_rings()
5128 * igb_set_mac - Change the Ethernet Address of the NIC
5137 struct e1000_hw *hw = &adapter->hw; in igb_set_mac()
5140 if (!is_valid_ether_addr(addr->sa_data)) in igb_set_mac()
5141 return -EADDRNOTAVAIL; in igb_set_mac()
5143 eth_hw_addr_set(netdev, addr->sa_data); in igb_set_mac()
5144 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in igb_set_mac()
5153 * igb_write_mc_addr_list - write multicast addresses to MTA
5157 * Returns: -ENOMEM on failure
5164 struct e1000_hw *hw = &adapter->hw; in igb_write_mc_addr_list()
5178 return -ENOMEM; in igb_write_mc_addr_list()
5183 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in igb_write_mc_addr_list()
5193 struct e1000_hw *hw = &adapter->hw; in igb_vlan_promisc_enable()
5196 switch (hw->mac.type) { in igb_vlan_promisc_enable()
5201 if (adapter->netdev->features & NETIF_F_NTUPLE) in igb_vlan_promisc_enable()
5208 if (adapter->vfs_allocated_count) in igb_vlan_promisc_enable()
5216 if (adapter->flags & IGB_FLAG_VLAN_PROMISC) in igb_vlan_promisc_enable()
5219 if (!adapter->vfs_allocated_count) in igb_vlan_promisc_enable()
5223 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; in igb_vlan_promisc_enable()
5225 for (i = E1000_VLVF_ARRAY_SIZE; --i;) { in igb_vlan_promisc_enable()
5234 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) in igb_vlan_promisc_enable()
5235 hw->mac.ops.write_vfta(hw, i, ~0U); in igb_vlan_promisc_enable()
5238 adapter->flags |= IGB_FLAG_VLAN_PROMISC; in igb_vlan_promisc_enable()
5246 struct e1000_hw *hw = &adapter->hw; in igb_scrub_vfta()
5253 vid = adapter->mng_vlan_id; in igb_scrub_vfta()
5255 vfta[(vid - vid_start) / 32] |= BIT(vid % 32); in igb_scrub_vfta()
5257 if (!adapter->vfs_allocated_count) in igb_scrub_vfta()
5260 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; in igb_scrub_vfta()
5262 for (i = E1000_VLVF_ARRAY_SIZE; --i;) { in igb_scrub_vfta()
5274 vfta[(vid - vid_start) / 32] |= BIT(vid % 32); in igb_scrub_vfta()
5277 if (test_bit(vid, adapter->active_vlans)) in igb_scrub_vfta()
5289 for (i = VFTA_BLOCK_SIZE; i--;) { in igb_scrub_vfta()
5294 vfta[i] |= adapter->active_vlans[word] >> bits; in igb_scrub_vfta()
5296 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); in igb_scrub_vfta()
5305 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) in igb_vlan_promisc_disable()
5309 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; in igb_vlan_promisc_disable()
5316 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
5322 * promiscuous mode, and all-multi behavior.
5327 struct e1000_hw *hw = &adapter->hw; in igb_set_rx_mode()
5328 unsigned int vfn = adapter->vfs_allocated_count; in igb_set_rx_mode()
5333 if (netdev->flags & IFF_PROMISC) { in igb_set_rx_mode()
5338 if (hw->mac.type == e1000_82576) in igb_set_rx_mode()
5341 if (netdev->flags & IFF_ALLMULTI) { in igb_set_rx_mode()
5372 if ((netdev->flags & IFF_PROMISC) || in igb_set_rx_mode()
5373 (netdev->features & NETIF_F_RXALL)) { in igb_set_rx_mode()
5387 if (!adapter->vfs_allocated_count) { in igb_set_rx_mode()
5388 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) in igb_set_rx_mode()
5394 /* In order to support SR-IOV and eventually VMDq it is necessary to set in igb_set_rx_mode()
5399 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) in igb_set_rx_mode()
5411 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) in igb_set_rx_mode()
5425 struct e1000_hw *hw = &adapter->hw; in igb_check_wvbr()
5428 switch (hw->mac.type) { in igb_check_wvbr()
5439 adapter->wvbr |= wvbr; in igb_check_wvbr()
5448 if (!adapter->wvbr) in igb_spoof_check()
5451 for (j = 0; j < adapter->vfs_allocated_count; j++) { in igb_spoof_check()
5452 if (adapter->wvbr & BIT(j) || in igb_spoof_check()
5453 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { in igb_spoof_check()
5454 dev_warn(&adapter->pdev->dev, in igb_spoof_check()
5456 adapter->wvbr &= in igb_spoof_check()
5470 igb_get_phy_info(&adapter->hw); in igb_update_phy_info()
5474 * igb_has_link - check shared code for link and determine up/down
5479 struct e1000_hw *hw = &adapter->hw; in igb_has_link()
5487 switch (hw->phy.media_type) { in igb_has_link()
5489 if (!hw->mac.get_link_status) in igb_has_link()
5493 hw->mac.ops.check_for_link(hw); in igb_has_link()
5494 link_active = !hw->mac.get_link_status; in igb_has_link()
5501 if (((hw->mac.type == e1000_i210) || in igb_has_link()
5502 (hw->mac.type == e1000_i211)) && in igb_has_link()
5503 (hw->phy.id == I210_I_PHY_ID)) { in igb_has_link()
5504 if (!netif_carrier_ok(adapter->netdev)) { in igb_has_link()
5505 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_has_link()
5506 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { in igb_has_link()
5507 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; in igb_has_link()
5508 adapter->link_check_timeout = jiffies; in igb_has_link()
5521 if (hw->mac.type == e1000_i350) { in igb_thermal_sensor_event()
5525 if ((hw->phy.media_type == e1000_media_type_copper) && in igb_thermal_sensor_event()
5534 * igb_check_lvmmc - check for malformed packets received
5540 struct e1000_hw *hw = &adapter->hw; in igb_check_lvmmc()
5546 netdev_warn(adapter->netdev, in igb_check_lvmmc()
5554 * igb_watchdog - Timer Call-back
5562 schedule_work(&adapter->watchdog_task); in igb_watchdog()
5570 struct e1000_hw *hw = &adapter->hw; in igb_watchdog_task()
5571 struct e1000_phy_info *phy = &hw->phy; in igb_watchdog_task()
5572 struct net_device *netdev = adapter->netdev; in igb_watchdog_task()
5580 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { in igb_watchdog_task()
5581 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) in igb_watchdog_task()
5582 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_watchdog_task()
5588 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { in igb_watchdog_task()
5589 if (hw->phy.media_type == e1000_media_type_copper) { in igb_watchdog_task()
5597 if (hw->dev_spec._82575.media_changed) { in igb_watchdog_task()
5598 hw->dev_spec._82575.media_changed = false; in igb_watchdog_task()
5599 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_watchdog_task()
5603 pm_runtime_resume(netdev->dev.parent); in igb_watchdog_task()
5608 hw->mac.ops.get_speed_and_duplex(hw, in igb_watchdog_task()
5609 &adapter->link_speed, in igb_watchdog_task()
5610 &adapter->link_duplex); in igb_watchdog_task()
5616 netdev->name, in igb_watchdog_task()
5617 adapter->link_speed, in igb_watchdog_task()
5618 adapter->link_duplex == FULL_DUPLEX ? in igb_watchdog_task()
5626 if ((adapter->flags & IGB_FLAG_EEE) && in igb_watchdog_task()
5627 (adapter->link_duplex == HALF_DUPLEX)) { in igb_watchdog_task()
5628 dev_info(&adapter->pdev->dev, in igb_watchdog_task()
5629 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); in igb_watchdog_task()
5630 adapter->hw.dev_spec._82575.eee_disable = true; in igb_watchdog_task()
5631 adapter->flags &= ~IGB_FLAG_EEE; in igb_watchdog_task()
5636 if (phy->speed_downgraded) in igb_watchdog_task()
5645 adapter->tx_timeout_factor = 1; in igb_watchdog_task()
5646 switch (adapter->link_speed) { in igb_watchdog_task()
5648 adapter->tx_timeout_factor = 14; in igb_watchdog_task()
5655 if (adapter->link_speed != SPEED_1000 || in igb_watchdog_task()
5656 !hw->phy.ops.read_reg) in igb_watchdog_task()
5666 retry_count--; in igb_watchdog_task()
5669 dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); in igb_watchdog_task()
5672 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); in igb_watchdog_task()
5681 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_watchdog_task()
5682 mod_timer(&adapter->phy_info_timer, in igb_watchdog_task()
5687 adapter->link_speed = 0; in igb_watchdog_task()
5688 adapter->link_duplex = 0; in igb_watchdog_task()
5698 netdev->name); in igb_watchdog_task()
5704 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_watchdog_task()
5705 mod_timer(&adapter->phy_info_timer, in igb_watchdog_task()
5709 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { in igb_watchdog_task()
5711 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_watchdog_task()
5712 schedule_work(&adapter->reset_task); in igb_watchdog_task()
5717 pm_schedule_suspend(netdev->dev.parent, in igb_watchdog_task()
5722 (adapter->flags & IGB_FLAG_MAS_ENABLE)) { in igb_watchdog_task()
5724 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_watchdog_task()
5725 schedule_work(&adapter->reset_task); in igb_watchdog_task()
5732 spin_lock(&adapter->stats64_lock); in igb_watchdog_task()
5734 spin_unlock(&adapter->stats64_lock); in igb_watchdog_task()
5736 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_watchdog_task()
5737 struct igb_ring *tx_ring = adapter->tx_ring[i]; in igb_watchdog_task()
5744 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { in igb_watchdog_task()
5745 adapter->tx_timeout_count++; in igb_watchdog_task()
5746 schedule_work(&adapter->reset_task); in igb_watchdog_task()
5753 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igb_watchdog_task()
5757 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_watchdog_task()
5760 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_watchdog_task()
5761 struct igb_q_vector *q_vector = adapter->q_vector[i]; in igb_watchdog_task()
5764 if (!q_vector->rx.ring) in igb_watchdog_task()
5767 rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index]; in igb_watchdog_task()
5769 if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { in igb_watchdog_task()
5770 eics |= q_vector->eims_value; in igb_watchdog_task()
5771 clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igb_watchdog_task()
5777 struct igb_ring *rx_ring = adapter->rx_ring[0]; in igb_watchdog_task()
5779 if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { in igb_watchdog_task()
5780 clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igb_watchdog_task()
5790 if ((adapter->hw.mac.type == e1000_i350) || in igb_watchdog_task()
5791 (adapter->hw.mac.type == e1000_i354)) in igb_watchdog_task()
5795 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_watchdog_task()
5796 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) in igb_watchdog_task()
5797 mod_timer(&adapter->watchdog_timer, in igb_watchdog_task()
5800 mod_timer(&adapter->watchdog_timer, in igb_watchdog_task()
5813 * igb_update_ring_itr - update the dynamic ITR value based on packet size
5829 int new_val = q_vector->itr_val; in igb_update_ring_itr()
5831 struct igb_adapter *adapter = q_vector->adapter; in igb_update_ring_itr()
5834 /* For non-gigabit speeds, just fix the interrupt rate at 4000 in igb_update_ring_itr()
5835 * ints/sec - ITR timer value of 120 ticks. in igb_update_ring_itr()
5837 if (adapter->link_speed != SPEED_1000) { in igb_update_ring_itr()
5842 packets = q_vector->rx.total_packets; in igb_update_ring_itr()
5844 avg_wire_size = q_vector->rx.total_bytes / packets; in igb_update_ring_itr()
5846 packets = q_vector->tx.total_packets; in igb_update_ring_itr()
5849 q_vector->tx.total_bytes / packets); in igb_update_ring_itr()
5861 /* Give a little boost to mid-size frames */ in igb_update_ring_itr()
5869 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igb_update_ring_itr()
5870 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igb_update_ring_itr()
5874 if (new_val != q_vector->itr_val) { in igb_update_ring_itr()
5875 q_vector->itr_val = new_val; in igb_update_ring_itr()
5876 q_vector->set_itr = 1; in igb_update_ring_itr()
5879 q_vector->rx.total_bytes = 0; in igb_update_ring_itr()
5880 q_vector->rx.total_packets = 0; in igb_update_ring_itr()
5881 q_vector->tx.total_bytes = 0; in igb_update_ring_itr()
5882 q_vector->tx.total_packets = 0; in igb_update_ring_itr()
5886 * igb_update_itr - update the dynamic ITR value based on statistics
5898 * NOTE: These calculations are only valid when operating in a single-
5904 unsigned int packets = ring_container->total_packets; in igb_update_itr()
5905 unsigned int bytes = ring_container->total_bytes; in igb_update_itr()
5906 u8 itrval = ring_container->itr; in igb_update_itr()
5946 ring_container->total_bytes = 0; in igb_update_itr()
5947 ring_container->total_packets = 0; in igb_update_itr()
5950 ring_container->itr = itrval; in igb_update_itr()
5955 struct igb_adapter *adapter = q_vector->adapter; in igb_set_itr()
5956 u32 new_itr = q_vector->itr_val; in igb_set_itr()
5959 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in igb_set_itr()
5960 if (adapter->link_speed != SPEED_1000) { in igb_set_itr()
5966 igb_update_itr(q_vector, &q_vector->tx); in igb_set_itr()
5967 igb_update_itr(q_vector, &q_vector->rx); in igb_set_itr()
5969 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in igb_set_itr()
5973 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igb_set_itr()
5974 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igb_set_itr()
5993 if (new_itr != q_vector->itr_val) { in igb_set_itr()
5998 new_itr = new_itr > q_vector->itr_val ? in igb_set_itr()
5999 max((new_itr * q_vector->itr_val) / in igb_set_itr()
6000 (new_itr + (q_vector->itr_val >> 2)), in igb_set_itr()
6008 q_vector->itr_val = new_itr; in igb_set_itr()
6009 q_vector->set_itr = 1; in igb_set_itr()
6019 u16 i = tx_ring->next_to_use; in igb_tx_ctxtdesc()
6025 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igb_tx_ctxtdesc()
6031 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_tx_ctxtdesc()
6032 mss_l4len_idx |= tx_ring->reg_idx << 4; in igb_tx_ctxtdesc()
6034 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in igb_tx_ctxtdesc()
6035 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in igb_tx_ctxtdesc()
6036 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in igb_tx_ctxtdesc()
6041 if (tx_ring->launchtime_enable) { in igb_tx_ctxtdesc()
6042 ts = ktime_to_timespec64(first->skb->tstamp); in igb_tx_ctxtdesc()
6043 skb_txtime_consumed(first->skb); in igb_tx_ctxtdesc()
6044 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); in igb_tx_ctxtdesc()
6046 context_desc->seqnum_seed = 0; in igb_tx_ctxtdesc()
6055 struct sk_buff *skb = first->skb; in igb_tso()
6069 if (skb->ip_summed != CHECKSUM_PARTIAL) in igb_tso()
6083 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? in igb_tso()
6087 if (ip.v4->version == 4) { in igb_tso()
6089 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in igb_tso()
6094 ip.v4->check = csum_fold(csum_partial(trans_start, in igb_tso()
6095 csum_start - trans_start, in igb_tso()
6099 ip.v4->tot_len = 0; in igb_tso()
6100 first->tx_flags |= IGB_TX_FLAGS_TSO | in igb_tso()
6104 ip.v6->payload_len = 0; in igb_tso()
6105 first->tx_flags |= IGB_TX_FLAGS_TSO | in igb_tso()
6110 l4_offset = l4.hdr - skb->data; in igb_tso()
6113 paylen = skb->len - l4_offset; in igb_tso()
6116 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in igb_tso()
6117 csum_replace_by_diff(&l4.tcp->check, in igb_tso()
6122 csum_replace_by_diff(&l4.udp->check, in igb_tso()
6127 first->gso_segs = skb_shinfo(skb)->gso_segs; in igb_tso()
6128 first->bytecount += (first->gso_segs - 1) * *hdr_len; in igb_tso()
6131 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; in igb_tso()
6132 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; in igb_tso()
6135 vlan_macip_lens = l4.hdr - ip.hdr; in igb_tso()
6136 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; in igb_tso()
6137 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; in igb_tso()
6147 struct sk_buff *skb = first->skb; in igb_tx_csum()
6151 if (skb->ip_summed != CHECKSUM_PARTIAL) { in igb_tx_csum()
6153 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && in igb_tx_csum()
6154 !tx_ring->launchtime_enable) in igb_tx_csum()
6159 switch (skb->csum_offset) { in igb_tx_csum()
6178 first->tx_flags |= IGB_TX_FLAGS_CSUM; in igb_tx_csum()
6179 vlan_macip_lens = skb_checksum_start_offset(skb) - in igb_tx_csum()
6183 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; in igb_tx_csum()
6213 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); in igb_tx_cmd_type()
6225 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_tx_olinfo_status()
6226 olinfo_status |= tx_ring->reg_idx << 4; in igb_tx_olinfo_status()
6238 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igb_tx_olinfo_status()
6243 struct net_device *netdev = tx_ring->netdev; in __igb_maybe_stop_tx()
6245 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igb_maybe_stop_tx()
6257 return -EBUSY; in __igb_maybe_stop_tx()
6260 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igb_maybe_stop_tx()
6262 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igb_maybe_stop_tx()
6263 tx_ring->tx_stats.restart_queue2++; in __igb_maybe_stop_tx()
6264 u64_stats_update_end(&tx_ring->tx_syncp2); in __igb_maybe_stop_tx()
6280 struct sk_buff *skb = first->skb; in igb_tx_map()
6286 u32 tx_flags = first->tx_flags; in igb_tx_map()
6288 u16 i = tx_ring->next_to_use; in igb_tx_map()
6292 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igb_tx_map()
6295 data_len = skb->data_len; in igb_tx_map()
6297 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igb_tx_map()
6301 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in igb_tx_map()
6302 if (dma_mapping_error(tx_ring->dev, dma)) in igb_tx_map()
6309 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igb_tx_map()
6312 tx_desc->read.cmd_type_len = in igb_tx_map()
6317 if (i == tx_ring->count) { in igb_tx_map()
6321 tx_desc->read.olinfo_status = 0; in igb_tx_map()
6324 size -= IGB_MAX_DATA_PER_TXD; in igb_tx_map()
6326 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igb_tx_map()
6332 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); in igb_tx_map()
6336 if (i == tx_ring->count) { in igb_tx_map()
6340 tx_desc->read.olinfo_status = 0; in igb_tx_map()
6343 data_len -= size; in igb_tx_map()
6345 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igb_tx_map()
6348 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
6353 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igb_tx_map()
6355 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igb_tx_map()
6358 first->time_stamp = jiffies; in igb_tx_map()
6363 * are new descriptors to fetch. (Only applicable for weak-ordered in igb_tx_map()
6364 * memory model archs, such as IA-64). in igb_tx_map()
6372 first->next_to_watch = tx_desc; in igb_tx_map()
6375 if (i == tx_ring->count) in igb_tx_map()
6378 tx_ring->next_to_use = i; in igb_tx_map()
6384 writel(i, tx_ring->tail); in igb_tx_map()
6389 dev_err(tx_ring->dev, "TX DMA map failed\n"); in igb_tx_map()
6390 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
6395 dma_unmap_page(tx_ring->dev, in igb_tx_map()
6401 if (i-- == 0) in igb_tx_map()
6402 i += tx_ring->count; in igb_tx_map()
6403 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_tx_map()
6407 dma_unmap_single(tx_ring->dev, in igb_tx_map()
6413 dev_kfree_skb_any(tx_buffer->skb); in igb_tx_map()
6414 tx_buffer->skb = NULL; in igb_tx_map()
6416 tx_ring->next_to_use = i; in igb_tx_map()
6418 return -1; in igb_tx_map()
6426 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in igb_xmit_xdp_ring()
6427 u16 count, i, index = tx_ring->next_to_use; in igb_xmit_xdp_ring()
6428 struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index]; in igb_xmit_xdp_ring()
6431 u32 len = xdpf->len, cmd_type, olinfo_status; in igb_xmit_xdp_ring()
6432 void *data = xdpf->data; in igb_xmit_xdp_ring()
6436 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); in igb_xmit_xdp_ring()
6443 tx_head->bytecount = xdp_get_frame_len(xdpf); in igb_xmit_xdp_ring()
6444 tx_head->type = IGB_TYPE_XDP; in igb_xmit_xdp_ring()
6445 tx_head->gso_segs = 1; in igb_xmit_xdp_ring()
6446 tx_head->xdpf = xdpf; in igb_xmit_xdp_ring()
6448 olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT; in igb_xmit_xdp_ring()
6450 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igb_xmit_xdp_ring()
6451 olinfo_status |= tx_ring->reg_idx << 4; in igb_xmit_xdp_ring()
6452 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igb_xmit_xdp_ring()
6457 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in igb_xmit_xdp_ring()
6458 if (dma_mapping_error(tx_ring->dev, dma)) in igb_xmit_xdp_ring()
6469 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igb_xmit_xdp_ring()
6470 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igb_xmit_xdp_ring()
6472 tx_buffer->protocol = 0; in igb_xmit_xdp_ring()
6474 if (++index == tx_ring->count) in igb_xmit_xdp_ring()
6480 tx_buffer = &tx_ring->tx_buffer_info[index]; in igb_xmit_xdp_ring()
6482 tx_desc->read.olinfo_status = 0; in igb_xmit_xdp_ring()
6484 data = skb_frag_address(&sinfo->frags[i]); in igb_xmit_xdp_ring()
6485 len = skb_frag_size(&sinfo->frags[i]); in igb_xmit_xdp_ring()
6488 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD); in igb_xmit_xdp_ring()
6490 netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount); in igb_xmit_xdp_ring()
6492 tx_head->time_stamp = jiffies; in igb_xmit_xdp_ring()
6498 tx_head->next_to_watch = tx_desc; in igb_xmit_xdp_ring()
6499 tx_ring->next_to_use = index; in igb_xmit_xdp_ring()
6505 writel(index, tx_ring->tail); in igb_xmit_xdp_ring()
6511 tx_buffer = &tx_ring->tx_buffer_info[index]; in igb_xmit_xdp_ring()
6513 dma_unmap_page(tx_ring->dev, in igb_xmit_xdp_ring()
6522 index += tx_ring->count; in igb_xmit_xdp_ring()
6523 index--; in igb_xmit_xdp_ring()
6546 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in igb_xmit_frame_ring()
6548 &skb_shinfo(skb)->frags[f])); in igb_xmit_frame_ring()
6555 if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))) in igb_xmit_frame_ring()
6559 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igb_xmit_frame_ring()
6560 first->type = IGB_TYPE_SKB; in igb_xmit_frame_ring()
6561 first->skb = skb; in igb_xmit_frame_ring()
6562 first->bytecount = skb->len; in igb_xmit_frame_ring()
6563 first->gso_segs = 1; in igb_xmit_frame_ring()
6565 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in igb_xmit_frame_ring()
6566 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); in igb_xmit_frame_ring()
6568 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && in igb_xmit_frame_ring()
6570 &adapter->state)) { in igb_xmit_frame_ring()
6571 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in igb_xmit_frame_ring()
6574 adapter->ptp_tx_skb = skb_get(skb); in igb_xmit_frame_ring()
6575 adapter->ptp_tx_start = jiffies; in igb_xmit_frame_ring()
6576 if (adapter->hw.mac.type == e1000_82576) in igb_xmit_frame_ring()
6577 schedule_work(&adapter->ptp_tx_work); in igb_xmit_frame_ring()
6579 adapter->tx_hwtstamp_skipped++; in igb_xmit_frame_ring()
6589 first->tx_flags = tx_flags; in igb_xmit_frame_ring()
6590 first->protocol = protocol; in igb_xmit_frame_ring()
6604 dev_kfree_skb_any(first->skb); in igb_xmit_frame_ring()
6605 first->skb = NULL; in igb_xmit_frame_ring()
6608 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); in igb_xmit_frame_ring()
6610 dev_kfree_skb_any(adapter->ptp_tx_skb); in igb_xmit_frame_ring()
6611 adapter->ptp_tx_skb = NULL; in igb_xmit_frame_ring()
6612 if (adapter->hw.mac.type == e1000_82576) in igb_xmit_frame_ring()
6613 cancel_work_sync(&adapter->ptp_tx_work); in igb_xmit_frame_ring()
6614 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); in igb_xmit_frame_ring()
6623 unsigned int r_idx = skb->queue_mapping; in igb_tx_queue_mapping()
6625 if (r_idx >= adapter->num_tx_queues) in igb_tx_queue_mapping()
6626 r_idx = r_idx % adapter->num_tx_queues; in igb_tx_queue_mapping()
6628 return adapter->tx_ring[r_idx]; in igb_tx_queue_mapping()
6646 * igb_tx_timeout - Respond to a Tx Hang
6653 struct e1000_hw *hw = &adapter->hw; in igb_tx_timeout()
6656 adapter->tx_timeout_count++; in igb_tx_timeout()
6658 if (hw->mac.type >= e1000_82580) in igb_tx_timeout()
6659 hw->dev_spec._82575.global_device_reset = true; in igb_tx_timeout()
6661 schedule_work(&adapter->reset_task); in igb_tx_timeout()
6663 (adapter->eims_enable_mask & ~adapter->eims_other)); in igb_tx_timeout()
6673 if (test_bit(__IGB_DOWN, &adapter->state) || in igb_reset_task()
6674 test_bit(__IGB_RESETTING, &adapter->state)) { in igb_reset_task()
6680 netdev_err(adapter->netdev, "Reset adapter\n"); in igb_reset_task()
6686 * igb_get_stats64 - Get System Network Statistics
6695 spin_lock(&adapter->stats64_lock); in igb_get_stats64()
6697 memcpy(stats, &adapter->stats64, sizeof(*stats)); in igb_get_stats64()
6698 spin_unlock(&adapter->stats64_lock); in igb_get_stats64()
6702 * igb_change_mtu - Change the Maximum Transfer Unit
6716 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_change_mtu()
6717 struct igb_ring *ring = adapter->rx_ring[i]; in igb_change_mtu()
6720 netdev_warn(adapter->netdev, in igb_change_mtu()
6723 return -EINVAL; in igb_change_mtu()
6732 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_change_mtu()
6736 adapter->max_frame_size = max_frame; in igb_change_mtu()
6742 netdev->mtu, new_mtu); in igb_change_mtu()
6743 WRITE_ONCE(netdev->mtu, new_mtu); in igb_change_mtu()
6750 clear_bit(__IGB_RESETTING, &adapter->state); in igb_change_mtu()
6756 * igb_update_stats - Update the board statistics counters
6761 struct rtnl_link_stats64 *net_stats = &adapter->stats64; in igb_update_stats()
6762 struct e1000_hw *hw = &adapter->hw; in igb_update_stats()
6763 struct pci_dev *pdev = adapter->pdev; in igb_update_stats()
6773 if (adapter->link_speed == 0) in igb_update_stats()
6782 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_update_stats()
6783 struct igb_ring *ring = adapter->rx_ring[i]; in igb_update_stats()
6785 if (hw->mac.type >= e1000_i210) in igb_update_stats()
6789 ring->rx_stats.drops += rqdpc; in igb_update_stats()
6790 net_stats->rx_fifo_errors += rqdpc; in igb_update_stats()
6794 start = u64_stats_fetch_begin(&ring->rx_syncp); in igb_update_stats()
6795 _bytes = ring->rx_stats.bytes; in igb_update_stats()
6796 _packets = ring->rx_stats.packets; in igb_update_stats()
6797 } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); in igb_update_stats()
6802 net_stats->rx_bytes = bytes; in igb_update_stats()
6803 net_stats->rx_packets = packets; in igb_update_stats()
6807 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_update_stats()
6808 struct igb_ring *ring = adapter->tx_ring[i]; in igb_update_stats()
6810 start = u64_stats_fetch_begin(&ring->tx_syncp); in igb_update_stats()
6811 _bytes = ring->tx_stats.bytes; in igb_update_stats()
6812 _packets = ring->tx_stats.packets; in igb_update_stats()
6813 } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); in igb_update_stats()
6817 net_stats->tx_bytes = bytes; in igb_update_stats()
6818 net_stats->tx_packets = packets; in igb_update_stats()
6822 adapter->stats.crcerrs += rd32(E1000_CRCERRS); in igb_update_stats()
6823 adapter->stats.gprc += rd32(E1000_GPRC); in igb_update_stats()
6824 adapter->stats.gorc += rd32(E1000_GORCL); in igb_update_stats()
6826 adapter->stats.bprc += rd32(E1000_BPRC); in igb_update_stats()
6827 adapter->stats.mprc += rd32(E1000_MPRC); in igb_update_stats()
6828 adapter->stats.roc += rd32(E1000_ROC); in igb_update_stats()
6830 adapter->stats.prc64 += rd32(E1000_PRC64); in igb_update_stats()
6831 adapter->stats.prc127 += rd32(E1000_PRC127); in igb_update_stats()
6832 adapter->stats.prc255 += rd32(E1000_PRC255); in igb_update_stats()
6833 adapter->stats.prc511 += rd32(E1000_PRC511); in igb_update_stats()
6834 adapter->stats.prc1023 += rd32(E1000_PRC1023); in igb_update_stats()
6835 adapter->stats.prc1522 += rd32(E1000_PRC1522); in igb_update_stats()
6836 adapter->stats.symerrs += rd32(E1000_SYMERRS); in igb_update_stats()
6837 adapter->stats.sec += rd32(E1000_SEC); in igb_update_stats()
6840 adapter->stats.mpc += mpc; in igb_update_stats()
6841 net_stats->rx_fifo_errors += mpc; in igb_update_stats()
6842 adapter->stats.scc += rd32(E1000_SCC); in igb_update_stats()
6843 adapter->stats.ecol += rd32(E1000_ECOL); in igb_update_stats()
6844 adapter->stats.mcc += rd32(E1000_MCC); in igb_update_stats()
6845 adapter->stats.latecol += rd32(E1000_LATECOL); in igb_update_stats()
6846 adapter->stats.dc += rd32(E1000_DC); in igb_update_stats()
6847 adapter->stats.rlec += rd32(E1000_RLEC); in igb_update_stats()
6848 adapter->stats.xonrxc += rd32(E1000_XONRXC); in igb_update_stats()
6849 adapter->stats.xontxc += rd32(E1000_XONTXC); in igb_update_stats()
6850 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); in igb_update_stats()
6851 adapter->stats.xofftxc += rd32(E1000_XOFFTXC); in igb_update_stats()
6852 adapter->stats.fcruc += rd32(E1000_FCRUC); in igb_update_stats()
6853 adapter->stats.gptc += rd32(E1000_GPTC); in igb_update_stats()
6854 adapter->stats.gotc += rd32(E1000_GOTCL); in igb_update_stats()
6856 adapter->stats.rnbc += rd32(E1000_RNBC); in igb_update_stats()
6857 adapter->stats.ruc += rd32(E1000_RUC); in igb_update_stats()
6858 adapter->stats.rfc += rd32(E1000_RFC); in igb_update_stats()
6859 adapter->stats.rjc += rd32(E1000_RJC); in igb_update_stats()
6860 adapter->stats.tor += rd32(E1000_TORH); in igb_update_stats()
6861 adapter->stats.tot += rd32(E1000_TOTH); in igb_update_stats()
6862 adapter->stats.tpr += rd32(E1000_TPR); in igb_update_stats()
6864 adapter->stats.ptc64 += rd32(E1000_PTC64); in igb_update_stats()
6865 adapter->stats.ptc127 += rd32(E1000_PTC127); in igb_update_stats()
6866 adapter->stats.ptc255 += rd32(E1000_PTC255); in igb_update_stats()
6867 adapter->stats.ptc511 += rd32(E1000_PTC511); in igb_update_stats()
6868 adapter->stats.ptc1023 += rd32(E1000_PTC1023); in igb_update_stats()
6869 adapter->stats.ptc1522 += rd32(E1000_PTC1522); in igb_update_stats()
6871 adapter->stats.mptc += rd32(E1000_MPTC); in igb_update_stats()
6872 adapter->stats.bptc += rd32(E1000_BPTC); in igb_update_stats()
6874 adapter->stats.tpt += rd32(E1000_TPT); in igb_update_stats()
6875 adapter->stats.colc += rd32(E1000_COLC); in igb_update_stats()
6877 adapter->stats.algnerrc += rd32(E1000_ALGNERRC); in igb_update_stats()
6881 adapter->stats.rxerrc += rd32(E1000_RXERRC); in igb_update_stats()
6884 if ((hw->mac.type != e1000_i210) && in igb_update_stats()
6885 (hw->mac.type != e1000_i211)) in igb_update_stats()
6886 adapter->stats.tncrs += rd32(E1000_TNCRS); in igb_update_stats()
6889 adapter->stats.tsctc += rd32(E1000_TSCTC); in igb_update_stats()
6890 adapter->stats.tsctfc += rd32(E1000_TSCTFC); in igb_update_stats()
6892 adapter->stats.iac += rd32(E1000_IAC); in igb_update_stats()
6893 adapter->stats.icrxoc += rd32(E1000_ICRXOC); in igb_update_stats()
6894 adapter->stats.icrxptc += rd32(E1000_ICRXPTC); in igb_update_stats()
6895 adapter->stats.icrxatc += rd32(E1000_ICRXATC); in igb_update_stats()
6896 adapter->stats.ictxptc += rd32(E1000_ICTXPTC); in igb_update_stats()
6897 adapter->stats.ictxatc += rd32(E1000_ICTXATC); in igb_update_stats()
6898 adapter->stats.ictxqec += rd32(E1000_ICTXQEC); in igb_update_stats()
6899 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); in igb_update_stats()
6900 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); in igb_update_stats()
6903 net_stats->multicast = adapter->stats.mprc; in igb_update_stats()
6904 net_stats->collisions = adapter->stats.colc; in igb_update_stats()
6911 net_stats->rx_errors = adapter->stats.rxerrc + in igb_update_stats()
6912 adapter->stats.crcerrs + adapter->stats.algnerrc + in igb_update_stats()
6913 adapter->stats.ruc + adapter->stats.roc + in igb_update_stats()
6914 adapter->stats.cexterr; in igb_update_stats()
6915 net_stats->rx_length_errors = adapter->stats.ruc + in igb_update_stats()
6916 adapter->stats.roc; in igb_update_stats()
6917 net_stats->rx_crc_errors = adapter->stats.crcerrs; in igb_update_stats()
6918 net_stats->rx_frame_errors = adapter->stats.algnerrc; in igb_update_stats()
6919 net_stats->rx_missed_errors = adapter->stats.mpc; in igb_update_stats()
6922 net_stats->tx_errors = adapter->stats.ecol + in igb_update_stats()
6923 adapter->stats.latecol; in igb_update_stats()
6924 net_stats->tx_aborted_errors = adapter->stats.ecol; in igb_update_stats()
6925 net_stats->tx_window_errors = adapter->stats.latecol; in igb_update_stats()
6926 net_stats->tx_carrier_errors = adapter->stats.tncrs; in igb_update_stats()
6931 adapter->stats.mgptc += rd32(E1000_MGTPTC); in igb_update_stats()
6932 adapter->stats.mgprc += rd32(E1000_MGTPRC); in igb_update_stats()
6933 adapter->stats.mgpdc += rd32(E1000_MGTPDC); in igb_update_stats()
6938 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); in igb_update_stats()
6939 adapter->stats.o2bspc += rd32(E1000_O2BSPC); in igb_update_stats()
6940 adapter->stats.b2ospc += rd32(E1000_B2OSPC); in igb_update_stats()
6941 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); in igb_update_stats()
6947 int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt); in igb_perout()
6948 struct e1000_hw *hw = &adapter->hw; in igb_perout()
6955 spin_lock(&adapter->tmreg_lock); in igb_perout()
6957 if (hw->mac.type == e1000_82580 || in igb_perout()
6958 hw->mac.type == e1000_i354 || in igb_perout()
6959 hw->mac.type == e1000_i350) { in igb_perout()
6960 s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period); in igb_perout()
6969 now = timecounter_cyc2time(&adapter->tc, systim); in igb_perout()
6980 systim = systim + (ns - rem); in igb_perout()
6990 adapter->sdp_config[pin].name); in igb_perout()
6998 adapter->sdp_config[pin].name); in igb_perout()
7008 ts = timespec64_add(adapter->perout[tsintr_tt].start, in igb_perout()
7009 adapter->perout[tsintr_tt].period); in igb_perout()
7018 adapter->perout[tsintr_tt].start = ts; in igb_perout()
7020 spin_unlock(&adapter->tmreg_lock); in igb_perout()
7025 int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt); in igb_extts()
7028 struct e1000_hw *hw = &adapter->hw; in igb_extts()
7036 if (hw->mac.type == e1000_82580 || in igb_extts()
7037 hw->mac.type == e1000_i354 || in igb_extts()
7038 hw->mac.type == e1000_i350) { in igb_extts()
7042 spin_lock_irqsave(&adapter->tmreg_lock, flags); in igb_extts()
7043 ns = timecounter_cyc2time(&adapter->tc, ns); in igb_extts()
7044 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); in igb_extts()
7054 ptp_clock_event(adapter->ptp_clock, &event); in igb_extts()
7062 struct e1000_hw *hw = &adapter->hw; in igb_tsync_interrupt()
7066 if (hw->mac.type == e1000_82580) { in igb_tsync_interrupt()
7075 if (adapter->ptp_caps.pps) in igb_tsync_interrupt()
7076 ptp_clock_event(adapter->ptp_clock, &event); in igb_tsync_interrupt()
7081 schedule_work(&adapter->ptp_tx_work); in igb_tsync_interrupt()
7100 struct e1000_hw *hw = &adapter->hw; in igb_msix_other()
7105 schedule_work(&adapter->reset_task); in igb_msix_other()
7109 adapter->stats.doosync++; in igb_msix_other()
7122 hw->mac.get_link_status = 1; in igb_msix_other()
7124 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_msix_other()
7125 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_msix_other()
7131 wr32(E1000_EIMS, adapter->eims_other); in igb_msix_other()
7138 struct igb_adapter *adapter = q_vector->adapter; in igb_write_itr()
7139 u32 itr_val = q_vector->itr_val & 0x7FFC; in igb_write_itr()
7141 if (!q_vector->set_itr) in igb_write_itr()
7147 if (adapter->hw.mac.type == e1000_82575) in igb_write_itr()
7152 writel(itr_val, q_vector->itr_register); in igb_write_itr()
7153 q_vector->set_itr = 0; in igb_write_itr()
7163 napi_schedule(&q_vector->napi); in igb_msix_ring()
7173 struct e1000_hw *hw = &adapter->hw; in igb_update_tx_dca()
7174 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); in igb_update_tx_dca()
7176 if (hw->mac.type != e1000_82575) in igb_update_tx_dca()
7187 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); in igb_update_tx_dca()
7194 struct e1000_hw *hw = &adapter->hw; in igb_update_rx_dca()
7195 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); in igb_update_rx_dca()
7197 if (hw->mac.type != e1000_82575) in igb_update_rx_dca()
7207 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); in igb_update_rx_dca()
7212 struct igb_adapter *adapter = q_vector->adapter; in igb_update_dca()
7215 if (q_vector->cpu == cpu) in igb_update_dca()
7218 if (q_vector->tx.ring) in igb_update_dca()
7219 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); in igb_update_dca()
7221 if (q_vector->rx.ring) in igb_update_dca()
7222 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); in igb_update_dca()
7224 q_vector->cpu = cpu; in igb_update_dca()
7231 struct e1000_hw *hw = &adapter->hw; in igb_setup_dca()
7234 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) in igb_setup_dca()
7240 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_setup_dca()
7241 adapter->q_vector[i]->cpu = -1; in igb_setup_dca()
7242 igb_update_dca(adapter->q_vector[i]); in igb_setup_dca()
7250 struct pci_dev *pdev = adapter->pdev; in __igb_notify_dca()
7251 struct e1000_hw *hw = &adapter->hw; in __igb_notify_dca()
7257 if (adapter->flags & IGB_FLAG_DCA_ENABLED) in __igb_notify_dca()
7260 adapter->flags |= IGB_FLAG_DCA_ENABLED; in __igb_notify_dca()
7261 dev_info(&pdev->dev, "DCA enabled\n"); in __igb_notify_dca()
7267 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { in __igb_notify_dca()
7272 dev_info(&pdev->dev, "DCA disabled\n"); in __igb_notify_dca()
7273 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; in __igb_notify_dca()
7303 adapter->vf_data[vf].spoofchk_enabled = true; in igb_vf_configure()
7306 adapter->vf_data[vf].trusted = false; in igb_vf_configure()
7314 struct e1000_hw *hw = &adapter->hw; in igb_ping_all_vfs()
7318 for (i = 0 ; i < adapter->vfs_allocated_count; i++) { in igb_ping_all_vfs()
7320 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) in igb_ping_all_vfs()
7328 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_promisc()
7330 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_promisc()
7332 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | in igb_set_vf_promisc()
7338 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; in igb_set_vf_promisc()
7345 if (vf_data->num_vf_mc_hashes > 30) { in igb_set_vf_promisc()
7347 } else if (vf_data->num_vf_mc_hashes) { in igb_set_vf_promisc()
7351 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) in igb_set_vf_promisc()
7352 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); in igb_set_vf_promisc()
7360 return -EINVAL; in igb_set_vf_promisc()
7370 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_multicasts()
7377 vf_data->num_vf_mc_hashes = n; in igb_set_vf_multicasts()
7385 vf_data->vf_mc_hashes[i] = hash_list[i]; in igb_set_vf_multicasts()
7388 igb_set_rx_mode(adapter->netdev); in igb_set_vf_multicasts()
7395 struct e1000_hw *hw = &adapter->hw; in igb_restore_vf_multicasts()
7399 for (i = 0; i < adapter->vfs_allocated_count; i++) { in igb_restore_vf_multicasts()
7404 vf_data = &adapter->vf_data[i]; in igb_restore_vf_multicasts()
7406 if ((vf_data->num_vf_mc_hashes > 30) || in igb_restore_vf_multicasts()
7407 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { in igb_restore_vf_multicasts()
7409 } else if (vf_data->num_vf_mc_hashes) { in igb_restore_vf_multicasts()
7411 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) in igb_restore_vf_multicasts()
7412 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); in igb_restore_vf_multicasts()
7420 struct e1000_hw *hw = &adapter->hw; in igb_clear_vf_vfta()
7429 adapter->vfs_allocated_count); in igb_clear_vf_vfta()
7432 for (i = E1000_VLVF_ARRAY_SIZE; i--;) { in igb_clear_vf_vfta()
7455 vfta = adapter->shadow_vfta[vid / 32]; in igb_clear_vf_vfta()
7457 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); in igb_clear_vf_vfta()
7460 if (adapter->flags & IGB_FLAG_VLAN_PROMISC) in igb_clear_vf_vfta()
7480 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { in igb_find_vlvf_entry()
7491 struct e1000_hw *hw = &adapter->hw; in igb_update_pf_vlvf()
7502 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; in igb_update_pf_vlvf()
7508 if (adapter->flags & IGB_FLAG_VLAN_PROMISC) in igb_update_pf_vlvf()
7518 int pf_id = adapter->vfs_allocated_count; in igb_set_vf_vlan()
7519 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_vlan()
7527 if (add && test_bit(vid, adapter->active_vlans)) { in igb_set_vf_vlan()
7539 * we may need to drop the PF pool bit in order to allow us to free in igb_set_vf_vlan()
7542 if (test_bit(vid, adapter->active_vlans) || in igb_set_vf_vlan()
7543 (adapter->flags & IGB_FLAG_VLAN_PROMISC)) in igb_set_vf_vlan()
7551 struct e1000_hw *hw = &adapter->hw; in igb_set_vmvir()
7572 if (vlan != adapter->vf_data[vf].pf_vlan) in igb_enable_port_vlan()
7573 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, in igb_enable_port_vlan()
7576 adapter->vf_data[vf].pf_vlan = vlan; in igb_enable_port_vlan()
7577 adapter->vf_data[vf].pf_qos = qos; in igb_enable_port_vlan()
7579 dev_info(&adapter->pdev->dev, in igb_enable_port_vlan()
7581 if (test_bit(__IGB_DOWN, &adapter->state)) { in igb_enable_port_vlan()
7582 dev_warn(&adapter->pdev->dev, in igb_enable_port_vlan()
7584 dev_warn(&adapter->pdev->dev, in igb_enable_port_vlan()
7600 if (adapter->vf_data[vf].pf_vlan) in igb_disable_port_vlan()
7601 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, in igb_disable_port_vlan()
7604 adapter->vf_data[vf].pf_vlan = 0; in igb_disable_port_vlan()
7605 adapter->vf_data[vf].pf_qos = 0; in igb_disable_port_vlan()
7616 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) in igb_ndo_set_vf_vlan()
7617 return -EINVAL; in igb_ndo_set_vf_vlan()
7620 return -EPROTONOSUPPORT; in igb_ndo_set_vf_vlan()
7632 if (adapter->vf_data[vf].pf_vlan) in igb_set_vf_vlan_msg()
7633 return -1; in igb_set_vf_vlan_msg()
7647 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_vf_reset()
7649 /* clear flags - except flag that indicates PF has set the MAC */ in igb_vf_reset()
7650 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; in igb_vf_reset()
7651 vf_data->last_nack = jiffies; in igb_vf_reset()
7655 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); in igb_vf_reset()
7656 igb_set_vmvir(adapter, vf_data->pf_vlan | in igb_vf_reset()
7657 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); in igb_vf_reset()
7658 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); in igb_vf_reset()
7659 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan)); in igb_vf_reset()
7662 adapter->vf_data[vf].num_vf_mc_hashes = 0; in igb_vf_reset()
7665 igb_set_rx_mode(adapter->netdev); in igb_vf_reset()
7670 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; in igb_vf_reset_event()
7673 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) in igb_vf_reset_event()
7682 struct e1000_hw *hw = &adapter->hw; in igb_vf_reset_msg()
7683 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; in igb_vf_reset_msg()
7699 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; in igb_vf_reset_msg()
7713 struct e1000_hw *hw = &adapter->hw; in igb_flush_mac_table()
7716 for (i = 0; i < hw->mac.rar_entry_count; i++) { in igb_flush_mac_table()
7717 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; in igb_flush_mac_table()
7718 eth_zero_addr(adapter->mac_table[i].addr); in igb_flush_mac_table()
7719 adapter->mac_table[i].queue = 0; in igb_flush_mac_table()
7726 struct e1000_hw *hw = &adapter->hw; in igb_available_rars()
7728 int rar_entries = hw->mac.rar_entry_count - in igb_available_rars()
7729 adapter->vfs_allocated_count; in igb_available_rars()
7734 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) in igb_available_rars()
7738 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) && in igb_available_rars()
7739 (adapter->mac_table[i].queue != queue)) in igb_available_rars()
7751 struct igb_mac_addr *mac_table = &adapter->mac_table[0]; in igb_set_default_mac_filter()
7753 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); in igb_set_default_mac_filter()
7754 mac_table->queue = adapter->vfs_allocated_count; in igb_set_default_mac_filter()
7755 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; in igb_set_default_mac_filter()
7768 if (!(entry->state & IGB_MAC_STATE_IN_USE)) in igb_mac_entry_can_be_used()
7771 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != in igb_mac_entry_can_be_used()
7775 if (!ether_addr_equal(addr, entry->addr)) in igb_mac_entry_can_be_used()
7790 struct e1000_hw *hw = &adapter->hw; in igb_add_mac_filter_flags()
7791 int rar_entries = hw->mac.rar_entry_count - in igb_add_mac_filter_flags()
7792 adapter->vfs_allocated_count; in igb_add_mac_filter_flags()
7796 return -EINVAL; in igb_add_mac_filter_flags()
7803 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], in igb_add_mac_filter_flags()
7807 ether_addr_copy(adapter->mac_table[i].addr, addr); in igb_add_mac_filter_flags()
7808 adapter->mac_table[i].queue = queue; in igb_add_mac_filter_flags()
7809 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; in igb_add_mac_filter_flags()
7815 return -ENOSPC; in igb_add_mac_filter_flags()
7834 struct e1000_hw *hw = &adapter->hw; in igb_del_mac_filter_flags()
7835 int rar_entries = hw->mac.rar_entry_count - in igb_del_mac_filter_flags()
7836 adapter->vfs_allocated_count; in igb_del_mac_filter_flags()
7840 return -EINVAL; in igb_del_mac_filter_flags()
7847 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) in igb_del_mac_filter_flags()
7849 if ((adapter->mac_table[i].state & flags) != flags) in igb_del_mac_filter_flags()
7851 if (adapter->mac_table[i].queue != queue) in igb_del_mac_filter_flags()
7853 if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) in igb_del_mac_filter_flags()
7859 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { in igb_del_mac_filter_flags()
7860 adapter->mac_table[i].state = in igb_del_mac_filter_flags()
7862 adapter->mac_table[i].queue = in igb_del_mac_filter_flags()
7863 adapter->vfs_allocated_count; in igb_del_mac_filter_flags()
7865 adapter->mac_table[i].state = 0; in igb_del_mac_filter_flags()
7866 adapter->mac_table[i].queue = 0; in igb_del_mac_filter_flags()
7867 eth_zero_addr(adapter->mac_table[i].addr); in igb_del_mac_filter_flags()
7874 return -ENOENT; in igb_del_mac_filter_flags()
7886 struct e1000_hw *hw = &adapter->hw; in igb_add_mac_steering_filter()
7891 if (hw->mac.type != e1000_i210) in igb_add_mac_steering_filter()
7892 return -EOPNOTSUPP; in igb_add_mac_steering_filter()
7910 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count); in igb_uc_sync()
7919 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count); in igb_uc_unsync()
7927 struct pci_dev *pdev = adapter->pdev; in igb_set_vf_mac_filter()
7928 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_mac_filter()
7933 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && in igb_set_vf_mac_filter()
7934 !vf_data->trusted) { in igb_set_vf_mac_filter()
7935 dev_warn(&pdev->dev, in igb_set_vf_mac_filter()
7938 return -EINVAL; in igb_set_vf_mac_filter()
7941 dev_warn(&pdev->dev, in igb_set_vf_mac_filter()
7944 return -EINVAL; in igb_set_vf_mac_filter()
7950 list_for_each_entry(entry, &adapter->vf_macs.l, l) { in igb_set_vf_mac_filter()
7951 if (entry->vf == vf) { in igb_set_vf_mac_filter()
7952 entry->vf = -1; in igb_set_vf_mac_filter()
7953 entry->free = true; in igb_set_vf_mac_filter()
7954 igb_del_mac_filter(adapter, entry->vf_mac, vf); in igb_set_vf_mac_filter()
7960 list_for_each_entry(entry, &adapter->vf_macs.l, l) { in igb_set_vf_mac_filter()
7961 if (entry->free) { in igb_set_vf_mac_filter()
7968 entry->free = false; in igb_set_vf_mac_filter()
7969 entry->vf = vf; in igb_set_vf_mac_filter()
7970 ether_addr_copy(entry->vf_mac, addr); in igb_set_vf_mac_filter()
7975 ret = -ENOSPC; in igb_set_vf_mac_filter()
7978 if (ret == -ENOSPC) in igb_set_vf_mac_filter()
7979 dev_warn(&pdev->dev, in igb_set_vf_mac_filter()
7984 ret = -EINVAL; in igb_set_vf_mac_filter()
7993 struct pci_dev *pdev = adapter->pdev; in igb_set_vf_mac_addr()
7994 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_mac_addr()
8004 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && in igb_set_vf_mac_addr()
8005 !vf_data->trusted) { in igb_set_vf_mac_addr()
8006 dev_warn(&pdev->dev, in igb_set_vf_mac_addr()
8009 return -EINVAL; in igb_set_vf_mac_addr()
8013 dev_warn(&pdev->dev, in igb_set_vf_mac_addr()
8016 return -EINVAL; in igb_set_vf_mac_addr()
8029 struct e1000_hw *hw = &adapter->hw; in igb_rcv_ack_from_vf()
8030 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_rcv_ack_from_vf()
8034 if (!(vf_data->flags & IGB_VF_FLAG_CTS) && in igb_rcv_ack_from_vf()
8035 time_after(jiffies, vf_data->last_nack + (2 * HZ))) { in igb_rcv_ack_from_vf()
8037 vf_data->last_nack = jiffies; in igb_rcv_ack_from_vf()
8043 struct pci_dev *pdev = adapter->pdev; in igb_rcv_msg_from_vf()
8045 struct e1000_hw *hw = &adapter->hw; in igb_rcv_msg_from_vf()
8046 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_rcv_msg_from_vf()
8053 dev_err(&pdev->dev, "Error receiving message from VF\n"); in igb_rcv_msg_from_vf()
8054 vf_data->flags &= ~IGB_VF_FLAG_CTS; in igb_rcv_msg_from_vf()
8055 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) in igb_rcv_msg_from_vf()
8073 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { in igb_rcv_msg_from_vf()
8074 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) in igb_rcv_msg_from_vf()
8076 retval = -1; in igb_rcv_msg_from_vf()
8094 retval = -1; in igb_rcv_msg_from_vf()
8095 if (vf_data->pf_vlan) in igb_rcv_msg_from_vf()
8096 dev_warn(&pdev->dev, in igb_rcv_msg_from_vf()
8103 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); in igb_rcv_msg_from_vf()
8104 retval = -1; in igb_rcv_msg_from_vf()
8126 struct e1000_hw *hw = &adapter->hw; in igb_msg_task()
8130 spin_lock_irqsave(&adapter->vfs_lock, flags); in igb_msg_task()
8131 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { in igb_msg_task()
8144 spin_unlock_irqrestore(&adapter->vfs_lock, flags); in igb_msg_task()
8148 * igb_set_uta - Set unicast filter table address
8152 * The unicast table address is a register array of 32-bit registers.
8160 struct e1000_hw *hw = &adapter->hw; in igb_set_uta()
8165 if (!adapter->vfs_allocated_count) in igb_set_uta()
8168 for (i = hw->mac.uta_reg_count; i--;) in igb_set_uta()
8173 * igb_intr_msi - Interrupt Handler
8180 struct igb_q_vector *q_vector = adapter->q_vector[0]; in igb_intr_msi()
8181 struct e1000_hw *hw = &adapter->hw; in igb_intr_msi()
8188 schedule_work(&adapter->reset_task); in igb_intr_msi()
8192 adapter->stats.doosync++; in igb_intr_msi()
8196 hw->mac.get_link_status = 1; in igb_intr_msi()
8197 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_intr_msi()
8198 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_intr_msi()
8204 napi_schedule(&q_vector->napi); in igb_intr_msi()
8210 * igb_intr - Legacy Interrupt Handler
8217 struct igb_q_vector *q_vector = adapter->q_vector[0]; in igb_intr()
8218 struct e1000_hw *hw = &adapter->hw; in igb_intr()
8219 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No in igb_intr()
8224 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in igb_intr()
8233 schedule_work(&adapter->reset_task); in igb_intr()
8237 adapter->stats.doosync++; in igb_intr()
8241 hw->mac.get_link_status = 1; in igb_intr()
8243 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_intr()
8244 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_intr()
8250 napi_schedule(&q_vector->napi); in igb_intr()
8257 struct igb_adapter *adapter = q_vector->adapter; in igb_ring_irq_enable()
8258 struct e1000_hw *hw = &adapter->hw; in igb_ring_irq_enable()
8260 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || in igb_ring_irq_enable()
8261 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { in igb_ring_irq_enable()
8262 if ((adapter->num_q_vectors == 1) && !adapter->vf_data) in igb_ring_irq_enable()
8268 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_ring_irq_enable()
8269 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_ring_irq_enable()
8270 wr32(E1000_EIMS, q_vector->eims_value); in igb_ring_irq_enable()
8277 * igb_poll - NAPI Rx polling callback
8291 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) in igb_poll()
8294 if (q_vector->tx.ring) in igb_poll()
8297 if (q_vector->rx.ring) { in igb_poll()
8300 xsk_pool = READ_ONCE(q_vector->rx.ring->xsk_pool); in igb_poll()
8314 /* Exit the polling mode, but don't re-enable interrupts if stack might in igb_poll()
8315 * poll us due to busy-polling in igb_poll()
8324 * igb_clean_tx_irq - Reclaim resources after transmit completes
8333 struct igb_adapter *adapter = q_vector->adapter; in igb_clean_tx_irq()
8334 unsigned int budget = q_vector->tx.work_limit; in igb_clean_tx_irq()
8335 struct igb_ring *tx_ring = q_vector->tx.ring; in igb_clean_tx_irq()
8336 unsigned int i = tx_ring->next_to_clean; in igb_clean_tx_irq()
8345 if (test_bit(__IGB_DOWN, &adapter->state)) in igb_clean_tx_irq()
8348 tx_buffer = &tx_ring->tx_buffer_info[i]; in igb_clean_tx_irq()
8350 i -= tx_ring->count; in igb_clean_tx_irq()
8353 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in igb_clean_tx_irq()
8363 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) in igb_clean_tx_irq()
8367 tx_buffer->next_to_watch = NULL; in igb_clean_tx_irq()
8370 total_bytes += tx_buffer->bytecount; in igb_clean_tx_irq()
8371 total_packets += tx_buffer->gso_segs; in igb_clean_tx_irq()
8373 /* free the skb */ in igb_clean_tx_irq()
8374 if (tx_buffer->type == IGB_TYPE_SKB) { in igb_clean_tx_irq()
8375 napi_consume_skb(tx_buffer->skb, napi_budget); in igb_clean_tx_irq()
8376 } else if (tx_buffer->type == IGB_TYPE_XDP) { in igb_clean_tx_irq()
8377 xdp_return_frame(tx_buffer->xdpf); in igb_clean_tx_irq()
8378 } else if (tx_buffer->type == IGB_TYPE_XSK) { in igb_clean_tx_irq()
8384 dma_unmap_single(tx_ring->dev, in igb_clean_tx_irq()
8398 i -= tx_ring->count; in igb_clean_tx_irq()
8399 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_irq()
8405 dma_unmap_page(tx_ring->dev, in igb_clean_tx_irq()
8419 i -= tx_ring->count; in igb_clean_tx_irq()
8420 tx_buffer = tx_ring->tx_buffer_info; in igb_clean_tx_irq()
8428 budget--; in igb_clean_tx_irq()
8433 i += tx_ring->count; in igb_clean_tx_irq()
8434 tx_ring->next_to_clean = i; in igb_clean_tx_irq()
8435 u64_stats_update_begin(&tx_ring->tx_syncp); in igb_clean_tx_irq()
8436 tx_ring->tx_stats.bytes += total_bytes; in igb_clean_tx_irq()
8437 tx_ring->tx_stats.packets += total_packets; in igb_clean_tx_irq()
8438 u64_stats_update_end(&tx_ring->tx_syncp); in igb_clean_tx_irq()
8439 q_vector->tx.total_bytes += total_bytes; in igb_clean_tx_irq()
8440 q_vector->tx.total_packets += total_packets; in igb_clean_tx_irq()
8442 xsk_pool = READ_ONCE(tx_ring->xsk_pool); in igb_clean_tx_irq()
8457 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igb_clean_tx_irq()
8458 struct e1000_hw *hw = &adapter->hw; in igb_clean_tx_irq()
8463 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igb_clean_tx_irq()
8464 if (tx_buffer->next_to_watch && in igb_clean_tx_irq()
8465 time_after(jiffies, tx_buffer->time_stamp + in igb_clean_tx_irq()
8466 (adapter->tx_timeout_factor * HZ)) && in igb_clean_tx_irq()
8470 dev_err(tx_ring->dev, in igb_clean_tx_irq()
8482 tx_ring->queue_index, in igb_clean_tx_irq()
8483 rd32(E1000_TDH(tx_ring->reg_idx)), in igb_clean_tx_irq()
8484 readl(tx_ring->tail), in igb_clean_tx_irq()
8485 tx_ring->next_to_use, in igb_clean_tx_irq()
8486 tx_ring->next_to_clean, in igb_clean_tx_irq()
8487 tx_buffer->time_stamp, in igb_clean_tx_irq()
8488 tx_buffer->next_to_watch, in igb_clean_tx_irq()
8490 tx_buffer->next_to_watch->wb.status); in igb_clean_tx_irq()
8491 netif_stop_subqueue(tx_ring->netdev, in igb_clean_tx_irq()
8492 tx_ring->queue_index); in igb_clean_tx_irq()
8501 netif_carrier_ok(tx_ring->netdev) && in igb_clean_tx_irq()
8507 if (__netif_subqueue_stopped(tx_ring->netdev, in igb_clean_tx_irq()
8508 tx_ring->queue_index) && in igb_clean_tx_irq()
8509 !(test_bit(__IGB_DOWN, &adapter->state))) { in igb_clean_tx_irq()
8510 netif_wake_subqueue(tx_ring->netdev, in igb_clean_tx_irq()
8511 tx_ring->queue_index); in igb_clean_tx_irq()
8513 u64_stats_update_begin(&tx_ring->tx_syncp); in igb_clean_tx_irq()
8514 tx_ring->tx_stats.restart_queue++; in igb_clean_tx_irq()
8515 u64_stats_update_end(&tx_ring->tx_syncp); in igb_clean_tx_irq()
8523 * igb_reuse_rx_page - page flip buffer and store it back on the ring
8533 u16 nta = rx_ring->next_to_alloc; in igb_reuse_rx_page()
8535 new_buff = &rx_ring->rx_buffer_info[nta]; in igb_reuse_rx_page()
8539 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in igb_reuse_rx_page()
8545 new_buff->dma = old_buff->dma; in igb_reuse_rx_page()
8546 new_buff->page = old_buff->page; in igb_reuse_rx_page()
8547 new_buff->page_offset = old_buff->page_offset; in igb_reuse_rx_page()
8548 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igb_reuse_rx_page()
8554 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igb_can_reuse_rx_page()
8555 struct page *page = rx_buffer->page; in igb_can_reuse_rx_page()
8557 /* avoid re-using remote and pfmemalloc pages */ in igb_can_reuse_rx_page()
8563 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) in igb_can_reuse_rx_page()
8567 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048) in igb_can_reuse_rx_page()
8569 if (rx_buffer->page_offset > IGB_LAST_OFFSET) in igb_can_reuse_rx_page()
8578 page_ref_add(page, USHRT_MAX - 1); in igb_can_reuse_rx_page()
8579 rx_buffer->pagecnt_bias = USHRT_MAX; in igb_can_reuse_rx_page()
8586 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
8592 * This function will add the data contained in rx_buffer->page to the skb.
8606 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in igb_add_rx_frag()
8607 rx_buffer->page_offset, size, truesize); in igb_add_rx_frag()
8609 rx_buffer->page_offset ^= truesize; in igb_add_rx_frag()
8611 rx_buffer->page_offset += truesize; in igb_add_rx_frag()
8623 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - in igb_construct_skb()
8624 xdp->data_hard_start); in igb_construct_skb()
8626 unsigned int size = xdp->data_end - xdp->data; in igb_construct_skb()
8631 net_prefetch(xdp->data); in igb_construct_skb()
8634 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); in igb_construct_skb()
8639 skb_hwtstamps(skb)->hwtstamp = timestamp; in igb_construct_skb()
8644 headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN); in igb_construct_skb()
8647 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); in igb_construct_skb()
8650 size -= headlen; in igb_construct_skb()
8652 skb_add_rx_frag(skb, 0, rx_buffer->page, in igb_construct_skb()
8653 (xdp->data + headlen) - page_address(rx_buffer->page), in igb_construct_skb()
8656 rx_buffer->page_offset ^= truesize; in igb_construct_skb()
8658 rx_buffer->page_offset += truesize; in igb_construct_skb()
8661 rx_buffer->pagecnt_bias++; in igb_construct_skb()
8676 SKB_DATA_ALIGN(xdp->data_end - in igb_build_skb()
8677 xdp->data_hard_start); in igb_build_skb()
8679 unsigned int metasize = xdp->data - xdp->data_meta; in igb_build_skb()
8683 net_prefetch(xdp->data_meta); in igb_build_skb()
8686 skb = napi_build_skb(xdp->data_hard_start, truesize); in igb_build_skb()
8691 skb_reserve(skb, xdp->data - xdp->data_hard_start); in igb_build_skb()
8692 __skb_put(skb, xdp->data_end - xdp->data); in igb_build_skb()
8698 skb_hwtstamps(skb)->hwtstamp = timestamp; in igb_build_skb()
8702 rx_buffer->page_offset ^= truesize; in igb_build_skb()
8704 rx_buffer->page_offset += truesize; in igb_build_skb()
8717 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in igb_run_xdp()
8722 prefetchw(xdp->data_hard_start); /* xdp_frame write */ in igb_run_xdp()
8734 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); in igb_run_xdp()
8740 bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act); in igb_run_xdp()
8744 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in igb_run_xdp()
8760 truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in igb_rx_frame_truesize()
8776 rx_buffer->page_offset ^= truesize; in igb_rx_buffer_flip()
8778 rx_buffer->page_offset += truesize; in igb_rx_buffer_flip()
8793 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in igb_rx_checksum()
8804 if (!((skb->len == 60) && in igb_rx_checksum()
8805 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { in igb_rx_checksum()
8806 u64_stats_update_begin(&ring->rx_syncp); in igb_rx_checksum()
8807 ring->rx_stats.csum_err++; in igb_rx_checksum()
8808 u64_stats_update_end(&ring->rx_syncp); in igb_rx_checksum()
8816 skb->ip_summed = CHECKSUM_UNNECESSARY; in igb_rx_checksum()
8818 dev_dbg(ring->dev, "cksum success: bits %08X\n", in igb_rx_checksum()
8819 le32_to_cpu(rx_desc->wb.upper.status_error)); in igb_rx_checksum()
8826 if (ring->netdev->features & NETIF_F_RXHASH) in igb_rx_hash()
8828 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), in igb_rx_hash()
8833 * igb_is_non_eop - process handling of non-EOP buffers
8840 * that this is in fact a non-EOP buffer.
8845 u32 ntc = rx_ring->next_to_clean + 1; in igb_is_non_eop()
8848 ntc = (ntc < rx_ring->count) ? ntc : 0; in igb_is_non_eop()
8849 rx_ring->next_to_clean = ntc; in igb_is_non_eop()
8860 * igb_cleanup_headers - Correct corrupted or empty headers
8879 struct net_device *netdev = rx_ring->netdev; in igb_cleanup_headers()
8880 if (!(netdev->features & NETIF_F_RXALL)) { in igb_cleanup_headers()
8894 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
8907 struct net_device *dev = rx_ring->netdev; in igb_process_skb_fields()
8915 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); in igb_process_skb_fields()
8917 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && in igb_process_skb_fields()
8922 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) in igb_process_skb_fields()
8923 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); in igb_process_skb_fields()
8925 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in igb_process_skb_fields()
8930 skb_record_rx_queue(skb, rx_ring->queue_index); in igb_process_skb_fields()
8932 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in igb_process_skb_fields()
8945 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in igb_get_rx_buffer()
8948 page_count(rx_buffer->page); in igb_get_rx_buffer()
8952 prefetchw(rx_buffer->page); in igb_get_rx_buffer()
8955 dma_sync_single_range_for_cpu(rx_ring->dev, in igb_get_rx_buffer()
8956 rx_buffer->dma, in igb_get_rx_buffer()
8957 rx_buffer->page_offset, in igb_get_rx_buffer()
8961 rx_buffer->pagecnt_bias--; in igb_get_rx_buffer()
8973 /* We are not reusing the buffer so unmap it and free in igb_put_rx_buffer()
8976 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in igb_put_rx_buffer()
8979 __page_frag_cache_drain(rx_buffer->page, in igb_put_rx_buffer()
8980 rx_buffer->pagecnt_bias); in igb_put_rx_buffer()
8984 rx_buffer->page = NULL; in igb_put_rx_buffer()
9008 struct igb_ring *ring = q_vector->rx.ring; in igb_update_rx_stats()
9010 u64_stats_update_begin(&ring->rx_syncp); in igb_update_rx_stats()
9011 ring->rx_stats.packets += packets; in igb_update_rx_stats()
9012 ring->rx_stats.bytes += bytes; in igb_update_rx_stats()
9013 u64_stats_update_end(&ring->rx_syncp); in igb_update_rx_stats()
9015 q_vector->rx.total_packets += packets; in igb_update_rx_stats()
9016 q_vector->rx.total_bytes += bytes; in igb_update_rx_stats()
9022 struct igb_adapter *adapter = q_vector->adapter; in igb_clean_rx_irq()
9023 struct igb_ring *rx_ring = q_vector->rx.ring; in igb_clean_rx_irq()
9025 struct sk_buff *skb = rx_ring->skb; in igb_clean_rx_irq()
9036 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in igb_clean_rx_irq()
9052 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); in igb_clean_rx_irq()
9053 size = le16_to_cpu(rx_desc->wb.upper.length); in igb_clean_rx_irq()
9064 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; in igb_clean_rx_irq()
9070 ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector, in igb_clean_rx_irq()
9074 size -= ts_hdr_len; in igb_clean_rx_irq()
9079 unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring); in igb_clean_rx_irq()
9096 rx_buffer->pagecnt_bias++; in igb_clean_rx_irq()
9111 rx_ring->rx_stats.alloc_failed++; in igb_clean_rx_irq()
9112 rx_buffer->pagecnt_bias++; in igb_clean_rx_irq()
9113 set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igb_clean_rx_irq()
9120 /* fetch next buffer in frame if non-eop */ in igb_clean_rx_irq()
9131 total_bytes += skb->len; in igb_clean_rx_irq()
9136 napi_gro_receive(&q_vector->napi, skb); in igb_clean_rx_irq()
9146 rx_ring->skb = skb; in igb_clean_rx_irq()
9162 struct page *page = bi->page; in igb_alloc_mapped_page()
9172 rx_ring->rx_stats.alloc_failed++; in igb_alloc_mapped_page()
9173 set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igb_alloc_mapped_page()
9178 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in igb_alloc_mapped_page()
9183 /* if mapping failed free memory back to system since in igb_alloc_mapped_page()
9186 if (dma_mapping_error(rx_ring->dev, dma)) { in igb_alloc_mapped_page()
9189 rx_ring->rx_stats.alloc_failed++; in igb_alloc_mapped_page()
9190 set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igb_alloc_mapped_page()
9194 bi->dma = dma; in igb_alloc_mapped_page()
9195 bi->page = page; in igb_alloc_mapped_page()
9196 bi->page_offset = igb_rx_offset(rx_ring); in igb_alloc_mapped_page()
9197 page_ref_add(page, USHRT_MAX - 1); in igb_alloc_mapped_page()
9198 bi->pagecnt_bias = USHRT_MAX; in igb_alloc_mapped_page()
9204 * igb_alloc_rx_buffers - Replace used receive buffers
9212 u16 i = rx_ring->next_to_use; in igb_alloc_rx_buffers()
9220 bi = &rx_ring->rx_buffer_info[i]; in igb_alloc_rx_buffers()
9221 i -= rx_ring->count; in igb_alloc_rx_buffers()
9230 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in igb_alloc_rx_buffers()
9231 bi->page_offset, bufsz, in igb_alloc_rx_buffers()
9235 * because each write-back erases this info. in igb_alloc_rx_buffers()
9237 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in igb_alloc_rx_buffers()
9244 bi = rx_ring->rx_buffer_info; in igb_alloc_rx_buffers()
9245 i -= rx_ring->count; in igb_alloc_rx_buffers()
9249 rx_desc->wb.upper.length = 0; in igb_alloc_rx_buffers()
9251 cleaned_count--; in igb_alloc_rx_buffers()
9254 i += rx_ring->count; in igb_alloc_rx_buffers()
9256 if (rx_ring->next_to_use != i) { in igb_alloc_rx_buffers()
9258 rx_ring->next_to_use = i; in igb_alloc_rx_buffers()
9261 rx_ring->next_to_alloc = i; in igb_alloc_rx_buffers()
9265 * applicable for weak-ordered memory model archs, in igb_alloc_rx_buffers()
9266 * such as IA-64). in igb_alloc_rx_buffers()
9269 writel(i, rx_ring->tail); in igb_alloc_rx_buffers()
9274 * igb_mii_ioctl -
9284 if (adapter->hw.phy.media_type != e1000_media_type_copper) in igb_mii_ioctl()
9285 return -EOPNOTSUPP; in igb_mii_ioctl()
9289 data->phy_id = adapter->hw.phy.addr; in igb_mii_ioctl()
9292 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, in igb_mii_ioctl()
9293 &data->val_out)) in igb_mii_ioctl()
9294 return -EIO; in igb_mii_ioctl()
9297 if (igb_write_phy_reg(&adapter->hw, data->reg_num & 0x1F, in igb_mii_ioctl()
9298 data->val_in)) in igb_mii_ioctl()
9299 return -EIO; in igb_mii_ioctl()
9302 return -EOPNOTSUPP; in igb_mii_ioctl()
9308 * igb_ioctl -
9321 return -EOPNOTSUPP; in igb_ioctl()
9327 struct igb_adapter *adapter = hw->back; in igb_read_pci_cfg()
9329 pci_read_config_word(adapter->pdev, reg, value); in igb_read_pci_cfg()
9334 struct igb_adapter *adapter = hw->back; in igb_write_pci_cfg()
9336 pci_write_config_word(adapter->pdev, reg, *value); in igb_write_pci_cfg()
9341 struct igb_adapter *adapter = hw->back; in igb_read_pcie_cap_reg()
9343 if (pcie_capability_read_word(adapter->pdev, reg, value)) in igb_read_pcie_cap_reg()
9344 return -E1000_ERR_CONFIG; in igb_read_pcie_cap_reg()
9351 struct igb_adapter *adapter = hw->back; in igb_write_pcie_cap_reg()
9353 if (pcie_capability_write_word(adapter->pdev, reg, *value)) in igb_write_pcie_cap_reg()
9354 return -E1000_ERR_CONFIG; in igb_write_pcie_cap_reg()
9362 struct e1000_hw *hw = &adapter->hw; in igb_vlan_mode()
9383 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable); in igb_vlan_mode()
9390 struct e1000_hw *hw = &adapter->hw; in igb_vlan_rx_add_vid()
9391 int pf_id = adapter->vfs_allocated_count; in igb_vlan_rx_add_vid()
9394 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) in igb_vlan_rx_add_vid()
9397 set_bit(vid, adapter->active_vlans); in igb_vlan_rx_add_vid()
9406 int pf_id = adapter->vfs_allocated_count; in igb_vlan_rx_kill_vid()
9407 struct e1000_hw *hw = &adapter->hw; in igb_vlan_rx_kill_vid()
9410 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) in igb_vlan_rx_kill_vid()
9413 clear_bit(vid, adapter->active_vlans); in igb_vlan_rx_kill_vid()
9422 igb_vlan_mode(adapter->netdev, adapter->netdev->features); in igb_restore_vlan()
9423 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in igb_restore_vlan()
9425 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) in igb_restore_vlan()
9426 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in igb_restore_vlan()
9431 struct pci_dev *pdev = adapter->pdev; in igb_set_spd_dplx()
9432 struct e1000_mac_info *mac = &adapter->hw.mac; in igb_set_spd_dplx()
9434 mac->autoneg = 0; in igb_set_spd_dplx()
9445 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { in igb_set_spd_dplx()
9458 mac->forced_speed_duplex = ADVERTISE_10_HALF; in igb_set_spd_dplx()
9461 mac->forced_speed_duplex = ADVERTISE_10_FULL; in igb_set_spd_dplx()
9464 mac->forced_speed_duplex = ADVERTISE_100_HALF; in igb_set_spd_dplx()
9467 mac->forced_speed_duplex = ADVERTISE_100_FULL; in igb_set_spd_dplx()
9470 mac->autoneg = 1; in igb_set_spd_dplx()
9471 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; in igb_set_spd_dplx()
9478 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ in igb_set_spd_dplx()
9479 adapter->hw.phy.mdix = AUTO_ALL_MODES; in igb_set_spd_dplx()
9484 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); in igb_set_spd_dplx()
9485 return -EINVAL; in igb_set_spd_dplx()
9493 struct e1000_hw *hw = &adapter->hw; in __igb_shutdown()
9495 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; in __igb_shutdown()
9517 /* turn on all-multi mode if wake on multicast is enabled */ in __igb_shutdown()
9538 wake = wufc || adapter->en_mng_pt; in __igb_shutdown()
9560 struct e1000_hw *hw = &adapter->hw; in igb_deliver_wake_packet()
9578 /* Ensure reads are 32-bit aligned */ in igb_deliver_wake_packet()
9581 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl); in igb_deliver_wake_packet()
9583 skb->protocol = eth_type_trans(skb, netdev); in igb_deliver_wake_packet()
9597 struct e1000_hw *hw = &adapter->hw; in __igb_resume()
9605 return -ENODEV; in __igb_resume()
9608 dev_err(&pdev->dev, in __igb_resume()
9618 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); in __igb_resume()
9619 return -ENOMEM; in __igb_resume()
9661 return -EBUSY; in igb_runtime_idle()
9702 * igb_io_error_detected - called when PCI error is detected
9716 dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n"); in igb_io_error_detected()
9737 * igb_io_slot_reset - called after the pci bus has been reset.
9740 * Restart the card from scratch, as if from a cold-boot. Implementation
9741 * resembles the first-half of the __igb_resume routine.
9747 struct e1000_hw *hw = &adapter->hw; in igb_io_slot_reset()
9751 dev_err(&pdev->dev, in igb_io_slot_reset()
9752 "Cannot re-enable PCI device after reset.\n"); in igb_io_slot_reset()
9763 * so we should re-assign it here. in igb_io_slot_reset()
9765 hw->hw_addr = adapter->io_addr; in igb_io_slot_reset()
9776 * igb_io_resume - called when traffic can start flowing again.
9781 * second-half of the __igb_resume routine.
9790 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_io_resume()
9791 dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n"); in igb_io_resume()
9797 dev_err(&pdev->dev, "igb_up failed after reset\n"); in igb_io_resume()
9813 * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
9819 struct e1000_hw *hw = &adapter->hw; in igb_rar_set_index()
9821 u8 *addr = adapter->mac_table[index].addr; in igb_rar_set_index()
9832 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { in igb_rar_set_index()
9836 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) in igb_rar_set_index()
9839 switch (hw->mac.type) { in igb_rar_set_index()
9842 if (adapter->mac_table[index].state & in igb_rar_set_index()
9847 adapter->mac_table[index].queue; in igb_rar_set_index()
9851 adapter->mac_table[index].queue; in igb_rar_set_index()
9865 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_mac()
9869 int rar_entry = hw->mac.rar_entry_count - (vf + 1); in igb_set_vf_mac()
9870 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses; in igb_set_vf_mac()
9873 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr); in igb_set_vf_mac()
9874 adapter->mac_table[rar_entry].queue = vf; in igb_set_vf_mac()
9875 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE; in igb_set_vf_mac()
9885 if (vf >= adapter->vfs_allocated_count) in igb_ndo_set_vf_mac()
9886 return -EINVAL; in igb_ndo_set_vf_mac()
9891 * MAC after unbinding vfio-pci and reloading igbvf after shutting in igb_ndo_set_vf_mac()
9895 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC; in igb_ndo_set_vf_mac()
9896 dev_info(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
9900 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; in igb_ndo_set_vf_mac()
9901 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", in igb_ndo_set_vf_mac()
9903 dev_info(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
9906 if (test_bit(__IGB_DOWN, &adapter->state)) { in igb_ndo_set_vf_mac()
9907 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
9909 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
9913 return -EINVAL; in igb_ndo_set_vf_mac()
9939 rf_dec = (link_speed - (rf_int * tx_rate)); in igb_set_vf_rate_limit()
9952 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. in igb_set_vf_rate_limit()
9964 if ((adapter->vf_rate_link_speed == 0) || in igb_check_vf_rate_limit()
9965 (adapter->hw.mac.type != e1000_82576)) in igb_check_vf_rate_limit()
9968 actual_link_speed = igb_link_mbps(adapter->link_speed); in igb_check_vf_rate_limit()
9969 if (actual_link_speed != adapter->vf_rate_link_speed) { in igb_check_vf_rate_limit()
9971 adapter->vf_rate_link_speed = 0; in igb_check_vf_rate_limit()
9972 dev_info(&adapter->pdev->dev, in igb_check_vf_rate_limit()
9976 for (i = 0; i < adapter->vfs_allocated_count; i++) { in igb_check_vf_rate_limit()
9978 adapter->vf_data[i].tx_rate = 0; in igb_check_vf_rate_limit()
9980 igb_set_vf_rate_limit(&adapter->hw, i, in igb_check_vf_rate_limit()
9981 adapter->vf_data[i].tx_rate, in igb_check_vf_rate_limit()
9990 struct e1000_hw *hw = &adapter->hw; in igb_ndo_set_vf_bw()
9993 if (hw->mac.type != e1000_82576) in igb_ndo_set_vf_bw()
9994 return -EOPNOTSUPP; in igb_ndo_set_vf_bw()
9997 return -EINVAL; in igb_ndo_set_vf_bw()
9999 actual_link_speed = igb_link_mbps(adapter->link_speed); in igb_ndo_set_vf_bw()
10000 if ((vf >= adapter->vfs_allocated_count) || in igb_ndo_set_vf_bw()
10004 return -EINVAL; in igb_ndo_set_vf_bw()
10006 adapter->vf_rate_link_speed = actual_link_speed; in igb_ndo_set_vf_bw()
10007 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; in igb_ndo_set_vf_bw()
10017 struct e1000_hw *hw = &adapter->hw; in igb_ndo_set_vf_spoofchk()
10020 if (!adapter->vfs_allocated_count) in igb_ndo_set_vf_spoofchk()
10021 return -EOPNOTSUPP; in igb_ndo_set_vf_spoofchk()
10023 if (vf >= adapter->vfs_allocated_count) in igb_ndo_set_vf_spoofchk()
10024 return -EINVAL; in igb_ndo_set_vf_spoofchk()
10026 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; in igb_ndo_set_vf_spoofchk()
10036 adapter->vf_data[vf].spoofchk_enabled = setting; in igb_ndo_set_vf_spoofchk()
10044 if (vf >= adapter->vfs_allocated_count) in igb_ndo_set_vf_trust()
10045 return -EINVAL; in igb_ndo_set_vf_trust()
10046 if (adapter->vf_data[vf].trusted == setting) in igb_ndo_set_vf_trust()
10049 adapter->vf_data[vf].trusted = setting; in igb_ndo_set_vf_trust()
10051 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n", in igb_ndo_set_vf_trust()
10060 if (vf >= adapter->vfs_allocated_count) in igb_ndo_get_vf_config()
10061 return -EINVAL; in igb_ndo_get_vf_config()
10062 ivi->vf = vf; in igb_ndo_get_vf_config()
10063 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); in igb_ndo_get_vf_config()
10064 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; in igb_ndo_get_vf_config()
10065 ivi->min_tx_rate = 0; in igb_ndo_get_vf_config()
10066 ivi->vlan = adapter->vf_data[vf].pf_vlan; in igb_ndo_get_vf_config()
10067 ivi->qos = adapter->vf_data[vf].pf_qos; in igb_ndo_get_vf_config()
10068 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; in igb_ndo_get_vf_config()
10069 ivi->trusted = adapter->vf_data[vf].trusted; in igb_ndo_get_vf_config()
10075 struct e1000_hw *hw = &adapter->hw; in igb_vmm_control()
10078 switch (hw->mac.type) { in igb_vmm_control()
10103 if (adapter->vfs_allocated_count) { in igb_vmm_control()
10107 adapter->vfs_allocated_count); in igb_vmm_control()
10116 struct e1000_hw *hw = &adapter->hw; in igb_init_dmac()
10121 if (hw->mac.type > e1000_82580) { in igb_init_dmac()
10122 if (adapter->flags & IGB_FLAG_DMAC) { in igb_init_dmac()
10127 * than the Rx threshold. Set hwm to PBA - max frame in igb_init_dmac()
10128 * size in 16B units, capping it at PBA - 6KB. in igb_init_dmac()
10130 hwm = 64 * (pba - 6); in igb_init_dmac()
10136 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max in igb_init_dmac()
10137 * frame size, capping it at PBA - 10KB. in igb_init_dmac()
10139 dmac_thr = pba - 10; in igb_init_dmac()
10147 /* watchdog timer= +-1000 usec in 32usec intervals */ in igb_init_dmac()
10150 /* Disable BMC-to-OS Watchdog Enable */ in igb_init_dmac()
10151 if (hw->mac.type != e1000_i354) in igb_init_dmac()
10156 * coalescing(smart fifb)-UTRESH=0 in igb_init_dmac()
10164 /* free space in tx packet buffer to wake from in igb_init_dmac()
10167 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - in igb_init_dmac()
10168 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); in igb_init_dmac()
10171 if (hw->mac.type >= e1000_i210 || in igb_init_dmac()
10172 (adapter->flags & IGB_FLAG_DMAC)) { in igb_init_dmac()
10176 } /* endif adapter->dmac is not disabled */ in igb_init_dmac()
10177 } else if (hw->mac.type == e1000_82580) { in igb_init_dmac()
10186 * igb_read_i2c_byte - Reads 8 bit word over I2C
10199 struct i2c_client *this_client = adapter->i2c_client; in igb_read_i2c_byte()
10208 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) in igb_read_i2c_byte()
10212 hw->mac.ops.release_swfw_sync(hw, swfw_mask); in igb_read_i2c_byte()
10223 * igb_write_i2c_byte - Writes 8 bit word over I2C
10236 struct i2c_client *this_client = adapter->i2c_client; in igb_write_i2c_byte()
10243 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) in igb_write_i2c_byte()
10246 hw->mac.ops.release_swfw_sync(hw, swfw_mask); in igb_write_i2c_byte()
10257 struct net_device *netdev = adapter->netdev; in igb_reinit_queues()
10258 struct pci_dev *pdev = adapter->pdev; in igb_reinit_queues()
10267 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); in igb_reinit_queues()
10268 return -ENOMEM; in igb_reinit_queues()
10281 spin_lock(&adapter->nfc_lock); in igb_nfc_filter_exit()
10283 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) in igb_nfc_filter_exit()
10286 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) in igb_nfc_filter_exit()
10289 spin_unlock(&adapter->nfc_lock); in igb_nfc_filter_exit()
10296 spin_lock(&adapter->nfc_lock); in igb_nfc_filter_restore()
10298 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) in igb_nfc_filter_restore()
10301 spin_unlock(&adapter->nfc_lock); in igb_nfc_filter_restore()