Lines Matching +full:1 +full:qbv
30 #define IGC_XDP_TX BIT(1)
33 static int debug = -1;
74 low_latency = 1,
102 fc->send_xon = 1; in igc_reset()
752 txdctl |= IGC_TXDCTL_PTHRESH(8) | IGC_TXDCTL_HTHRESH(1) | in igc_configure_tx_ring()
807 /* Don't need to set TUOFL or IPOFL, they default to 1 */ in igc_setup_mrqc()
970 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_set_default_mac_filter()
1102 buffer->gso_segs = 1; in igc_init_empty_frame()
1280 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); in igc_tx_cmd_type()
1441 return -1; in igc_tx_map()
1522 first->bytecount += (first->gso_segs - 1) * *hdr_len; in igc_tso()
1536 return 1; in igc_tso()
1608 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, in igc_xmit_frame_ring()
1609 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, in igc_xmit_frame_ring()
1611 * + 1 desc for context descriptor, in igc_xmit_frame_ring()
1634 * "dirties" the current Qbv cycle. This ensures that the in igc_xmit_frame_ring()
1635 * upcoming packet, which is scheduled in the next Qbv cycle, in igc_xmit_frame_ring()
1650 first->gso_segs = 1; in igc_xmit_frame_ring()
2086 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) in igc_can_reuse_rx_page()
2100 if (unlikely(pagecnt_bias == 1)) { in igc_can_reuse_rx_page()
2101 page_ref_add(page, USHRT_MAX - 1); in igc_can_reuse_rx_page()
2121 u32 ntc = rx_ring->next_to_clean + 1; in igc_is_non_eop()
2241 page_ref_add(page, USHRT_MAX - 1); in igc_alloc_mapped_page()
2402 head->gso_segs = 1; in igc_xdp_init_tx_descriptor()
3021 used_desc += 1; in igc_xsk_request_launch_time()
3054 * descriptor. When the launch time falls into the next Qbv cycle, we in igc_xdp_xmit_zc()
3101 bi->gso_segs = 1; in igc_xdp_xmit_zc()
3338 return -1; in igc_find_mac_filter()
3355 return -1; in igc_get_avail_mac_filter_slot()
3416 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); in igc_del_mac_filter()
3492 return -1; in igc_get_avail_etype_filter_slot()
3548 return -1; in igc_find_etype_filter()
3663 (data[data_idx + 1] << 8) | in igc_write_flex_filter_ll()
4069 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_uc_sync()
4094 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty, -1); in igc_enable_empty_addr_recv()
4182 * at least 1 descriptor unused to make sure in igc_configure()
4236 rx_queue >> 1, in igc_assign_vector()
4240 tx_queue >> 1, in igc_assign_vector()
4253 q_vector->set_itr = 1; in igc_assign_vector()
4580 q_vector->set_itr = 1; in igc_set_itr()
4626 /* if Tx handler is separate add 1 for every Tx queue */ in igc_set_interrupt_capability()
4633 /* add 1 vector for link status interrupts */ in igc_set_interrupt_capability()
4661 adapter->rss_queues = 1; in igc_set_interrupt_capability()
4663 adapter->num_rx_queues = 1; in igc_set_interrupt_capability()
4664 adapter->num_tx_queues = 1; in igc_set_interrupt_capability()
4665 adapter->num_q_vectors = 1; in igc_set_interrupt_capability()
4737 q_vector->set_itr = 1; in igc_update_ring_itr()
4753 if (adapter->num_q_vectors == 1) in igc_ring_irq_enable()
4833 return min(work_done, budget - 1); in igc_poll()
4857 /* igc only supports 1 Tx and/or 1 Rx queue per vector */ in igc_alloc_q_vector()
4858 if (txr_count > 1 || rxr_count > 1) in igc_alloc_q_vector()
4963 0, 0, 1, rxr_idx); in igc_alloc_q_vectors()
5532 return 1; in igc_set_features()
5601 ts = timespec64_add(adapter->perout[1].start, in igc_tsync_interrupt()
5602 adapter->perout[1].period); in igc_tsync_interrupt()
5608 adapter->perout[1].start = ts; in igc_tsync_interrupt()
5625 event.index = 1; in igc_tsync_interrupt()
5655 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_msix_other()
5889 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5897 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5974 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { in igc_watchdog_task()
6054 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr_msi()
6100 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr()
6378 prev = n ? &qopt->entries[n - 1] : NULL; in validate_schedule()
6395 if (queue_uses[i] > 1 && in validate_schedule()
6527 * 1. Qbv users can specify a cycle time that is not equal in igc_save_qbv_schedule()
6537 n + 1 == qopt->num_entries) in igc_save_qbv_schedule()
6650 if (queue < 0 || queue > 1) in igc_save_cbs_params()
6663 if (queue == 1 && !cbs_status[0]) { in igc_save_cbs_params()
6669 if (queue == 0 && cbs_status[1]) { in igc_save_cbs_params()
6694 if (qopt->queue < 0 || qopt->queue > 1) in igc_tsn_enable_cbs()
6755 for (i = 1; i < num_tc; i++) { in igc_tsn_is_tc_to_queue_priority_ordered()
6756 if (mqprio->qopt.offset[i - 1] > mqprio->qopt.offset[i]) in igc_tsn_is_tc_to_queue_priority_ordered()
6789 if (mqprio->qopt.count[i] != 1) { in igc_tsn_enable_mqprio()
6810 err = netdev_set_tc_queue(adapter->netdev, i, 1, in igc_tsn_enable_mqprio()
7490 return __igc_shutdown(to_pci_dev(dev), NULL, 1); in igc_runtime_suspend()