/linux/drivers/net/ethernet/google/gve/ |
H A D | gve_ethtool.c | 96 int num_tx_queues; in gve_get_strings() local 99 num_tx_queues = gve_num_tx_queues(priv); in gve_get_strings() 110 for (i = 0; i < num_tx_queues; i++) in gve_get_strings() 133 int num_tx_queues; in gve_get_sset_count() local 135 num_tx_queues = gve_num_tx_queues(priv); in gve_get_sset_count() 140 (num_tx_queues * NUM_GVE_TX_CNTS); in gve_get_sset_count() 168 int num_tx_queues; in gve_get_ethtool_stats() local 175 num_tx_queues = gve_num_tx_queues(priv); in gve_get_ethtool_stats() 186 tx_qid_to_stats_idx = kmalloc_array(num_tx_queues, in gve_get_ethtool_stats() 192 for (ring = 0; ring < num_tx_queues; ring++) { in gve_get_ethtool_stats() [all …]
|
/linux/net/sched/ |
H A D | sch_mq.c | 62 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) in mq_destroy() 83 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), in mq_init() 88 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_init() 113 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_attach() 143 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_dump() 164 if (ntx >= dev->num_tx_queues) in mq_queue_get() 249 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { in mq_walk()
|
H A D | sch_mqprio.c | 103 ntx < dev->num_tx_queues && priv->qdiscs[ntx]; in mqprio_destroy() 367 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY) in mqprio_init() 391 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), in mqprio_init() 396 for (i = 0; i < dev->num_tx_queues; i++) { in mqprio_init() 440 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mqprio_attach() 458 if (ntx >= dev->num_tx_queues) in mqprio_queue_get() 571 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mqprio_dump() 632 return (ntx <= dev->num_tx_queues) ? ntx : 0; in mqprio_find() 739 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) { in mqprio_walk()
|
H A D | sch_taprio.c | 792 if (q->cur_txq[tc] >= dev->num_tx_queues) in taprio_dequeue_tc_priority() 814 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_dequeue_txq_priority() 1188 if (qopt->num_tc > dev->num_tx_queues) { in taprio_parse_mqprio_opt() 2005 for (i = 0; i < dev->num_tx_queues; i++) in taprio_reset() 2029 for (i = 0; i < dev->num_tx_queues; i++) in taprio_destroy() 2082 q->qdiscs = kcalloc(dev->num_tx_queues, sizeof(q->qdiscs[0]), in taprio_init() 2090 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_init() 2124 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in taprio_attach() 2159 if (ntx >= dev->num_tx_queues) in taprio_queue_get() 2443 if (ntx >= dev->num_tx_queues) in taprio_leaf() [all …]
|
/linux/drivers/net/ethernet/engleder/ |
H A D | tsnep_ethtool.c | 100 max(adapter->num_tx_queues, adapter->num_rx_queues) - 1; in tsnep_ethtool_get_regs_len() 136 int tx_count = adapter->num_tx_queues; in tsnep_ethtool_get_strings() 172 int tx_count = adapter->num_tx_queues; in tsnep_ethtool_get_ethtool_stats() 194 for (i = 0; i < adapter->num_tx_queues; i++) { in tsnep_ethtool_get_ethtool_stats() 249 tx_count = adapter->num_tx_queues; in tsnep_ethtool_get_sset_count() 413 if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues)) in tsnep_ethtool_get_per_queue_coalesce() 436 if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues)) in tsnep_ethtool_set_per_queue_coalesce()
|
H A D | tsnep_xdp.c | 28 queue_id >= adapter->num_tx_queues) in tsnep_xdp_enable_pool() 64 queue_id >= adapter->num_tx_queues) in tsnep_xdp_disable_pool()
|
/linux/drivers/net/ethernet/wangxun/libwx/ |
H A D | wx_ethtool.c | 56 #define WX_NUM_RX_QUEUES netdev->num_tx_queues 57 #define WX_NUM_TX_QUEUES netdev->num_tx_queues 94 for (i = 0; i < netdev->num_tx_queues; i++) { in wx_get_strings() 131 for (j = 0; j < netdev->num_tx_queues; j++) { in wx_get_ethtool_stats() 205 if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { in wx_get_drvinfo() 207 (WX_NUM_TX_QUEUES - wx->num_tx_queues) * in wx_get_drvinfo()
|
H A D | wx_lib.c | 1532 if (r_idx >= wx->num_tx_queues) in wx_xmit_frame() 1533 r_idx = r_idx % wx->num_tx_queues; in wx_xmit_frame() 1600 wx->num_tx_queues = f->indices; in wx_set_rss_queues() 1607 wx->num_tx_queues = 1; in wx_set_num_queues() 1627 nvecs = max(wx->num_rx_queues, wx->num_tx_queues); in wx_acquire_msix_vectors() 1730 for (i = 0; i < wx->num_tx_queues; i++) in wx_cache_ring_rss() 1890 unsigned int txr_remaining = wx->num_tx_queues; in wx_alloc_q_vectors() 1915 wx->num_tx_queues = 0; in wx_alloc_q_vectors() 1937 wx->num_tx_queues = 0; in wx_free_q_vectors() 2373 for (i = 0; i < wx->num_tx_queues; i++) in wx_clean_all_tx_rings() [all …]
|
/linux/drivers/net/ethernet/broadcom/ |
H A D | bcmsysport.c | 338 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; in bcm_sysport_get_sset_count() 362 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_get_strings() 434 for (q = 0; q < priv->netdev->num_tx_queues; q++) { in bcm_sysport_update_tx_stats() 498 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; in bcm_sysport_get_stats() 500 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_get_stats() 632 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_set_coalesce() 988 for (q = 0; q < priv->netdev->num_tx_queues; q++) in bcm_sysport_tx_reclaim_all() 1138 for (ring = 0; ring < dev->num_tx_queues; ring++) { in bcm_sysport_rx_isr() 1171 for (ring = 0; ring < dev->num_tx_queues; ring++) { in bcm_sysport_tx_isr() 2004 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_open() [all …]
|
/linux/drivers/net/ethernet/intel/igc/ |
H A D | igc_tsn.c | 12 for (i = 0; i < adapter->num_tx_queues; i++) { in is_any_launchtime() 26 for (i = 0; i < adapter->num_tx_queues; i++) { in is_cbs_enabled() 151 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_tsn_disable_offload() 242 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_tsn_enable_offload()
|
H A D | igc_dump.c | 135 for (n = 0; n < adapter->num_tx_queues; n++) { in igc_rings_dump() 166 for (n = 0; n < adapter->num_tx_queues; n++) { in igc_rings_dump()
|
H A D | igc_main.c | 302 for (i = 0; i < adapter->num_tx_queues; i++) in igc_free_all_tx_resources() 314 for (i = 0; i < adapter->num_tx_queues; i++) in igc_clean_all_tx_rings() 339 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_disable_all_tx_rings_hw() 395 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_setup_all_tx_resources() 770 for (i = 0; i < adapter->num_tx_queues; i++) in igc_configure_tx() 1693 if (r_idx >= adapter->num_tx_queues) in igc_tx_queue_mapping() 1694 r_idx = r_idx % adapter->num_tx_queues; in igc_tx_queue_mapping() 2456 while (index >= adapter->num_tx_queues) in igc_xdp_get_tx_ring() 2457 index -= adapter->num_tx_queues; in igc_xdp_get_tx_ring() 4302 adapter->num_tx_queues = 0; in igc_free_q_vectors() [all …]
|
/linux/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_netdev.c | 59 for (i = 0; i < interface->num_tx_queues; i++) { in fm10k_setup_all_tx_resources() 226 for (i = 0; i < interface->num_tx_queues; i++) in fm10k_clean_all_tx_rings() 238 int i = interface->num_tx_queues; in fm10k_free_all_tx_resources() 462 interface->num_tx_queues); in fm10k_open() 513 int num_tx_queues = READ_ONCE(interface->num_tx_queues); in fm10k_xmit_frame() local 517 if (!num_tx_queues) in fm10k_xmit_frame() 572 if (r_idx >= num_tx_queues) in fm10k_xmit_frame() 573 r_idx %= num_tx_queues; in fm10k_xmit_frame() 591 if (txqueue >= interface->num_tx_queues) { in fm10k_tx_timeout() 1241 for (i = 0; i < interface->num_tx_queues; i++) { in fm10k_get_stats64()
|
H A D | fm10k_main.c | 1510 interface->num_tx_queues = rss_i * pcs; in fm10k_set_qos_queues() 1536 interface->num_tx_queues = rss_i; in fm10k_set_rss_queues() 1571 interface->num_tx_queues = 0; in fm10k_reset_num_queues() 1718 unsigned int txr_remaining = interface->num_tx_queues; in fm10k_alloc_q_vectors() 1813 v_budget = max(interface->num_rx_queues, interface->num_tx_queues); in fm10k_init_msix_capability() 1900 for (i = 0; i < interface->num_tx_queues; i++) in fm10k_cache_ring_rss()
|
/linux/drivers/net/ethernet/intel/ixgbevf/ |
H A D | ethtool.c | 61 (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ 266 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_set_ringparam() 280 adapter->num_tx_queues + in ixgbevf_set_ringparam() 287 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_set_ringparam() 361 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_set_ringparam() 396 i < adapter->num_tx_queues + adapter->num_xdp_queues; i++) in ixgbevf_set_ringparam() 452 for (j = 0; j < adapter->num_tx_queues; j++) { in ixgbevf_get_ethtool_stats() 522 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_get_strings()
|
/linux/drivers/net/dsa/microchip/ |
H A D | ksz_common.c | 1324 .num_tx_queues = 4, 1355 .num_tx_queues = 4, 1396 .num_tx_queues = 4, 1423 .num_tx_queues = 4, 1450 .num_tx_queues = 4, 1484 .num_tx_queues = 4, 1507 .num_tx_queues = 4, 1531 .num_tx_queues = 4, 1568 .num_tx_queues = 4, 1603 .num_tx_queues = 4, [all …]
|
H A D | ksz_dcb.c | 375 ipm = ieee8021q_tt_to_tc(tt, dev->info->num_tx_queues); in ksz_init_global_dscp_map() 427 dev->info->num_tx_queues); in ksz_port_del_dscp_prio() 609 return ksz8_all_queues_split(dev, dev->info->num_tx_queues); in ksz88x3_port2_apptrust_quirk() 775 dev->info->num_tx_queues); in ksz_dcb_init_port()
|
/linux/drivers/crypto/caam/ |
H A D | dpseci.h | 53 u8 num_tx_queues; member 77 u8 num_tx_queues; member
|
H A D | dpseci.c | 178 attr->num_tx_queues = rsp_params->num_tx_queues; in dpseci_get_attributes()
|
H A D | dpseci_cmd.h | 71 u8 num_tx_queues; member
|
/linux/drivers/net/ethernet/wangxun/ngbe/ |
H A D | ngbe_ethtool.c | 72 for (i = 0; i < wx->num_tx_queues; i++) in ngbe_set_ringparam() 83 i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); in ngbe_set_ringparam()
|
/linux/drivers/net/vmxnet3/ |
H A D | vmxnet3_ethtool.c | 154 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_get_stats64() 192 adapter->num_tx_queues + in vmxnet3_get_sset_count() 216 (1 + adapter->num_tx_queues * 17 /* Tx queue registers */) + in vmxnet3_get_regs_len() 246 for (j = 0; j < adapter->num_tx_queues; j++) { in vmxnet3_get_strings() 481 for (j = 0; j < adapter->num_tx_queues; j++) { in vmxnet3_get_ethtool_stats() 548 buf[j++] = adapter->num_tx_queues; in vmxnet3_get_regs() 549 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_get_regs() 1325 ec->combined_count = adapter->num_tx_queues; in vmxnet3_get_channels() 1330 1 : adapter->num_tx_queues; in vmxnet3_get_channels()
|
/linux/net/core/ |
H A D | dev.h | 211 netdev->num_tx_queues); in netdev_set_defer_hard_irqs() 257 netdev->num_tx_queues); in netdev_set_gro_flush_timeout()
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | ipoib_tx.c | 697 priv->txqs = kcalloc_node(dev->num_tx_queues, in hfi1_ipoib_txreq_init() 704 for (i = 0; i < dev->num_tx_queues; i++) { in hfi1_ipoib_txreq_init() 798 for (i = 0; i < priv->netdev->num_tx_queues; i++) { in hfi1_ipoib_txreq_deinit() 821 for (i = 0; i < dev->num_tx_queues; i++) { in hfi1_ipoib_napi_tx_enable() 833 for (i = 0; i < dev->num_tx_queues; i++) { in hfi1_ipoib_napi_tx_disable()
|
/linux/drivers/net/ethernet/freescale/ |
H A D | gianfar.c | 132 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_tx_rx_base() 246 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { in gfar_configure_coalescing() 288 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_get_stats64() 414 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_tx_queues() 448 for (i = 0; i < priv->num_tx_queues; i++) in gfar_free_tx_queues() 560 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { in gfar_parse_group() 563 grp->num_tx_queues++; in gfar_parse_group() 694 priv->num_tx_queues = num_tx_qs; in gfar_of_init() 1133 for (i = 0; i < priv->num_tx_queues; i++) { in free_skb_resources() 1294 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_bds() [all …]
|