/linux/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_lib.c | 48 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { in ixgbe_cache_ring_dcb_sriov() 78 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { in ixgbe_cache_ring_dcb_sriov() 225 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { in ixgbe_cache_ring_sriov() 239 for (; i < adapter->num_tx_queues; i++, reg_idx++) in ixgbe_cache_ring_sriov() 262 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) in ixgbe_cache_ring_rss() 385 adapter->num_tx_queues = vmdq_i * tcs; in ixgbe_set_dcb_sriov_queues() 404 adapter->num_tx_queues += fcoe_i; in ixgbe_set_dcb_sriov_queues() 441 rss_i = dev->num_tx_queues / tcs; in ixgbe_set_dcb_queues() 483 adapter->num_tx_queues = rss_i * tcs; in ixgbe_set_dcb_queues() 556 adapter->num_tx_queues = vmdq_i * rss_i; in ixgbe_set_sriov_queues() [all …]
|
/linux/drivers/net/ethernet/google/gve/ |
H A D | gve_ethtool.c | 96 int num_tx_queues; in gve_get_strings() local 99 num_tx_queues = gve_num_tx_queues(priv); in gve_get_strings() 110 for (i = 0; i < num_tx_queues; i++) in gve_get_strings() 133 int num_tx_queues; in gve_get_sset_count() local 135 num_tx_queues = gve_num_tx_queues(priv); in gve_get_sset_count() 140 (num_tx_queues * NUM_GVE_TX_CNTS); in gve_get_sset_count() 168 int num_tx_queues; in gve_get_ethtool_stats() local 175 num_tx_queues = gve_num_tx_queues(priv); in gve_get_ethtool_stats() 186 tx_qid_to_stats_idx = kmalloc_array(num_tx_queues, in gve_get_ethtool_stats() 192 for (ring = 0; ring < num_tx_queues; ring++) { in gve_get_ethtool_stats() [all …]
|
/linux/net/sched/ |
H A D | sch_mq.c | 62 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) in mq_destroy() 83 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), in mq_init() 88 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_init() 113 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_attach() 143 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_dump() 164 if (ntx >= dev->num_tx_queues) in mq_queue_get() 249 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { in mq_walk()
|
H A D | sch_mqprio.c | 103 ntx < dev->num_tx_queues && priv->qdiscs[ntx]; in mqprio_destroy() 367 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY) in mqprio_init() 391 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), in mqprio_init() 396 for (i = 0; i < dev->num_tx_queues; i++) { in mqprio_init() 440 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mqprio_attach() 458 if (ntx >= dev->num_tx_queues) in mqprio_queue_get() 571 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mqprio_dump() 632 return (ntx <= dev->num_tx_queues) ? ntx : 0; in mqprio_find() 739 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) { in mqprio_walk()
|
H A D | sch_taprio.c | 797 if (q->cur_txq[tc] >= dev->num_tx_queues) in taprio_dequeue_tc_priority() 819 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_dequeue_txq_priority() 1193 if (qopt->num_tc > dev->num_tx_queues) { in taprio_parse_mqprio_opt() 2017 for (i = 0; i < dev->num_tx_queues; i++) in taprio_reset() 2041 for (i = 0; i < dev->num_tx_queues; i++) in taprio_destroy() 2093 q->qdiscs = kcalloc(dev->num_tx_queues, sizeof(q->qdiscs[0]), in taprio_init() 2101 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_init() 2135 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in taprio_attach() 2170 if (ntx >= dev->num_tx_queues) in taprio_queue_get() 2454 if (ntx >= dev->num_tx_queues) in taprio_leaf() [all …]
|
H A D | sch_generic.c | 436 for (i = 1; i < dev->num_tx_queues; i++) { in dev_trans_start() 452 for (i = 0; i < dev->num_tx_queues; i++) { in netif_freeze_queues() 478 for (i = 0; i < dev->num_tx_queues; i++) { in netif_unfreeze_queues() 512 for (i = 0; i < dev->num_tx_queues; i++) { in dev_watchdog() 1328 for (i = 0; i < dev->num_tx_queues; i++) { in some_qdisc_is_busy() 1459 for (i = 0; i < dev->num_tx_queues; i++) { in dev_qdisc_change_tx_queue_len()
|
/linux/drivers/net/ethernet/engleder/ |
H A D | tsnep_ethtool.c | 100 max(adapter->num_tx_queues, adapter->num_rx_queues) - 1; in tsnep_ethtool_get_regs_len() 136 int tx_count = adapter->num_tx_queues; in tsnep_ethtool_get_strings() 172 int tx_count = adapter->num_tx_queues; in tsnep_ethtool_get_ethtool_stats() 194 for (i = 0; i < adapter->num_tx_queues; i++) { in tsnep_ethtool_get_ethtool_stats() 249 tx_count = adapter->num_tx_queues; in tsnep_ethtool_get_sset_count() 413 if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues)) in tsnep_ethtool_get_per_queue_coalesce() 436 if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues)) in tsnep_ethtool_set_per_queue_coalesce()
|
H A D | tsnep_xdp.c | 28 queue_id >= adapter->num_tx_queues) in tsnep_xdp_enable_pool() 64 queue_id >= adapter->num_tx_queues) in tsnep_xdp_disable_pool()
|
/linux/drivers/net/ethernet/broadcom/ |
H A D | bcmsysport.c | 338 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; in bcm_sysport_get_sset_count() 362 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_get_strings() 434 for (q = 0; q < priv->netdev->num_tx_queues; q++) { in bcm_sysport_update_tx_stats() 498 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; in bcm_sysport_get_stats() 500 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_get_stats() 632 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_set_coalesce() 988 for (q = 0; q < priv->netdev->num_tx_queues; q++) in bcm_sysport_tx_reclaim_all() 1138 for (ring = 0; ring < dev->num_tx_queues; ring++) { in bcm_sysport_rx_isr() 1171 for (ring = 0; ring < dev->num_tx_queues; ring++) { in bcm_sysport_tx_isr() 2004 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_open() [all …]
|
/linux/drivers/net/ethernet/intel/igc/ |
H A D | igc_tsn.c | 165 for (int i = 0; i < adapter->num_tx_queues; i++) { in igc_fpe_clear_preempt_queue() 198 for (int i = 0; i < adapter->num_tx_queues; i++) { in igc_fpe_save_preempt_queue() 209 for (i = 0; i < adapter->num_tx_queues; i++) { in is_any_launchtime() 223 for (i = 0; i < adapter->num_tx_queues; i++) { in is_cbs_enabled() 379 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_tsn_disable_offload() 468 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_tsn_enable_offload()
|
H A D | igc_xdp.c | 68 queue_id >= adapter->num_tx_queues) in igc_xdp_enable_pool() 126 queue_id >= adapter->num_tx_queues) in igc_xdp_disable_pool()
|
H A D | igc_dump.c | 135 for (n = 0; n < adapter->num_tx_queues; n++) { in igc_rings_dump() 166 for (n = 0; n < adapter->num_tx_queues; n++) { in igc_rings_dump()
|
H A D | igc_main.c | 302 for (i = 0; i < adapter->num_tx_queues; i++) in igc_free_all_tx_resources() 314 for (i = 0; i < adapter->num_tx_queues; i++) in igc_clean_all_tx_rings() 339 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_disable_all_tx_rings_hw() 395 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_setup_all_tx_resources() 768 for (i = 0; i < adapter->num_tx_queues; i++) in igc_configure_tx() 1719 if (r_idx >= adapter->num_tx_queues) in igc_tx_queue_mapping() 1720 r_idx = r_idx % adapter->num_tx_queues; in igc_tx_queue_mapping() 2481 while (index >= adapter->num_tx_queues) in igc_get_tx_ring() 2482 index -= adapter->num_tx_queues; in igc_get_tx_ring() 4443 adapter->num_tx_queues = 0; in igc_free_q_vectors() [all …]
|
/linux/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_netdev.c | 59 for (i = 0; i < interface->num_tx_queues; i++) { in fm10k_setup_all_tx_resources() 226 for (i = 0; i < interface->num_tx_queues; i++) in fm10k_clean_all_tx_rings() 238 int i = interface->num_tx_queues; in fm10k_free_all_tx_resources() 462 interface->num_tx_queues); in fm10k_open() 513 int num_tx_queues = READ_ONCE(interface->num_tx_queues); in fm10k_xmit_frame() local 517 if (!num_tx_queues) in fm10k_xmit_frame() 572 if (r_idx >= num_tx_queues) in fm10k_xmit_frame() 573 r_idx %= num_tx_queues; in fm10k_xmit_frame() 591 if (txqueue >= interface->num_tx_queues) { in fm10k_tx_timeout() 1241 for (i = 0; i < interface->num_tx_queues; i++) { in fm10k_get_stats64()
|
/linux/drivers/net/vmxnet3/ |
H A D | vmxnet3_drv.c | 221 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link() 230 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link() 258 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_process_events() 562 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_destroy_all() 662 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_cleanup_all() 940 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_init_all() 1439 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); in vmxnet3_xmit_frame() 2358 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_do_poll() 2435 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_msix_tx() 2560 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_request_irqs() [all …]
|
H A D | vmxnet3_ethtool.c | 154 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_get_stats64() 192 adapter->num_tx_queues + in vmxnet3_get_sset_count() 216 (1 + adapter->num_tx_queues * 17 /* Tx queue registers */) + in vmxnet3_get_regs_len() 246 for (j = 0; j < adapter->num_tx_queues; j++) { in vmxnet3_get_strings() 481 for (j = 0; j < adapter->num_tx_queues; j++) { in vmxnet3_get_ethtool_stats() 548 buf[j++] = adapter->num_tx_queues; in vmxnet3_get_regs() 549 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_get_regs() 1300 ec->combined_count = adapter->num_tx_queues; in vmxnet3_get_channels() 1305 1 : adapter->num_tx_queues; in vmxnet3_get_channels()
|
/linux/drivers/crypto/caam/ |
H A D | dpseci.h | 53 u8 num_tx_queues; member 77 u8 num_tx_queues; member
|
H A D | dpseci_cmd.h | 71 u8 num_tx_queues; member
|
/linux/drivers/net/ |
H A D | ifb.c | 163 for (i = 0; i < dev->num_tx_queues; i++,txp++) { in ifb_stats64() 190 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL); in ifb_dev_init() 194 for (i = 0; i < dev->num_tx_queues; i++,txp++) { in ifb_dev_init() 302 for (i = 0; i < dev->num_tx_queues; i++,txp++) { in ifb_dev_free()
|
/linux/drivers/net/dsa/microchip/ |
H A D | ksz_dcb.c | 339 ipm = ieee8021q_tt_to_tc(tt, dev->info->num_tx_queues); in ksz_init_global_dscp_map() 391 dev->info->num_tx_queues); in ksz_port_del_dscp_prio() 584 dev->info->num_tx_queues); in ksz_dcb_init_port()
|
/linux/drivers/net/ethernet/freescale/ |
H A D | gianfar.c | 133 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_tx_rx_base() 247 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { in gfar_configure_coalescing() 289 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_get_stats64() 415 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_tx_queues() 449 for (i = 0; i < priv->num_tx_queues; i++) in gfar_free_tx_queues() 561 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { in gfar_parse_group() 564 grp->num_tx_queues++; in gfar_parse_group() 685 priv->num_tx_queues = num_tx_qs; in gfar_of_init() 1124 for (i = 0; i < priv->num_tx_queues; i++) { in free_skb_resources() 1285 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_bds() [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | ipoib_tx.c | 697 priv->txqs = kcalloc_node(dev->num_tx_queues, in hfi1_ipoib_txreq_init() 704 for (i = 0; i < dev->num_tx_queues; i++) { in hfi1_ipoib_txreq_init() 798 for (i = 0; i < priv->netdev->num_tx_queues; i++) { in hfi1_ipoib_txreq_deinit() 821 for (i = 0; i < dev->num_tx_queues; i++) { in hfi1_ipoib_napi_tx_enable() 833 for (i = 0; i < dev->num_tx_queues; i++) { in hfi1_ipoib_napi_tx_disable()
|
/linux/net/core/ |
H A D | dev.h | 235 netdev->num_tx_queues); in netdev_set_defer_hard_irqs() 281 netdev->num_tx_queues); in netdev_set_gro_flush_timeout()
|
/linux/include/net/ |
H A D | netdev_lock.h | 127 for (i = 0; i < (dev)->num_tx_queues; i++) \
|
/linux/drivers/net/ethernet/meta/fbnic/ |
H A D | fbnic_netdev.h | 61 u16 num_tx_queues; member
|