Home
last modified time | relevance | path

Searched refs:q_idx (Results 1 – 25 of 50) sorted by relevance

12

/linux/drivers/net/ethernet/intel/ice/
H A Dice_base.c754 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) in ice_vsi_cfg_single_rxq() argument
756 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq()
759 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); in ice_vsi_cfg_single_rxq()
1133 u16 q_idx) in ice_vsi_cfg_single_txq() argument
1137 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) in ice_vsi_cfg_single_txq()
1142 return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); in ice_vsi_cfg_single_txq()
1159 u16 q_idx; in ice_vsi_cfg_txqs() local
1163 for (q_idx = 0; q_idx < count; q_idx++) { in ice_vsi_cfg_txqs()
1164 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); in ice_vsi_cfg_txqs()
1394 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) in ice_qp_reset_stats() argument
[all …]
H A Dice_lib.c1987 u16 q_idx; in ice_vsi_stop_tx_rings() local
1992 for (q_idx = 0; q_idx < count; q_idx++) { in ice_vsi_stop_tx_rings()
1996 if (!rings || !rings[q_idx]) in ice_vsi_stop_tx_rings()
1999 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings()
2001 rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings()
2786 int q_idx, v_idx; in ice_vsi_set_napi_queues() local
2792 ice_for_each_rxq(vsi, q_idx) in ice_vsi_set_napi_queues()
2793 if (vsi->rx_rings[q_idx] && vsi->rx_rings[q_idx]->q_vector) in ice_vsi_set_napi_queues()
2794 netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, in ice_vsi_set_napi_queues()
2795 &vsi->rx_rings[q_idx]->q_vector->napi); in ice_vsi_set_napi_queues()
[all …]
/linux/drivers/accel/habanalabs/common/
H A Dhw_queue.c420 u32 q_idx; in init_signal_cs() local
423 q_idx = job->hw_queue_id; in init_signal_cs()
424 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_signal_cs()
432 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx, in init_signal_cs()
441 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1, in init_signal_cs()
480 u32 q_idx; in init_wait_cs() local
482 q_idx = job->hw_queue_id; in init_wait_cs()
483 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_wait_cs()
497 cs->encaps_sig_hdl->q_idx, in init_wait_cs()
533 prop->base_mon_id, q_idx, cs->sequence); in init_wait_cs()
[all …]
H A Dcommand_submission.c156 hw_sob->q_idx, hw_sob->sob_id); in hl_sob_reset_error()
1798 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx, in hl_cs_signal_sob_wraparound_handler() argument
1806 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in hl_cs_signal_sob_wraparound_handler()
1829 q_idx); in hl_cs_signal_sob_wraparound_handler()
1870 prop->curr_sob_offset, q_idx); in hl_cs_signal_sob_wraparound_handler()
1938 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset) in cs_ioctl_signal_wait_create_jobs() argument
1973 job->hw_queue_id = q_idx; in cs_ioctl_signal_wait_create_jobs()
2002 u32 q_idx, u32 count, in cs_ioctl_reserve_signals() argument
2022 if (q_idx >= hdev->asic_prop.max_queues) { in cs_ioctl_reserve_signals()
2024 q_idx); in cs_ioctl_reserve_signals()
[all …]
/linux/drivers/net/hyperv/
H A Dnetvsc_bpf.c230 struct xdp_frame *frame, u16 q_idx) in netvsc_ndoxdp_xmit_fm() argument
240 skb_record_rx_queue(skb, q_idx); in netvsc_ndoxdp_xmit_fm()
256 u16 q_idx; in netvsc_ndoxdp_xmit() local
276 q_idx = smp_processor_id() % ndev->real_num_tx_queues; in netvsc_ndoxdp_xmit()
279 if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx)) in netvsc_ndoxdp_xmit()
285 tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats; in netvsc_ndoxdp_xmit()
H A Dnetvsc.c321 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) in netvsc_alloc_recv_comp_ring() argument
323 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring()
765 u16 q_idx = 0; in netvsc_send_tx_complete() local
786 q_idx = packet->q_idx; in netvsc_send_tx_complete()
788 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete()
800 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete()
806 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); in netvsc_send_tx_complete()
1103 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt()
1107 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); in netvsc_send_pkt()
1248 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send()
[all …]
H A Dnetvsc_drv.c255 int q_idx; in netvsc_get_tx_queue() local
257 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & in netvsc_get_tx_queue()
261 if (q_idx != old_idx && in netvsc_get_tx_queue()
263 sk_tx_queue_set(sk, q_idx); in netvsc_get_tx_queue()
265 return q_idx; in netvsc_get_tx_queue()
281 int q_idx = sk_tx_queue_get(skb->sk); in netvsc_pick_tx() local
283 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { in netvsc_pick_tx()
288 q_idx = skb_get_rx_queue(skb); in netvsc_pick_tx()
290 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); in netvsc_pick_tx()
293 return q_idx; in netvsc_pick_tx()
[all …]
/linux/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_queues.h354 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
356 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
357 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
H A Dnicvf_queues.c1722 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) in nicvf_int_type_to_mask() argument
1728 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); in nicvf_int_type_to_mask()
1731 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); in nicvf_int_type_to_mask()
1734 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); in nicvf_int_type_to_mask()
1756 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_enable_intr() argument
1758 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_enable_intr()
1770 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_disable_intr() argument
1772 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_disable_intr()
1784 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_clear_intr() argument
1786 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_clear_intr()
[all …]
/linux/drivers/net/ethernet/ti/icssg/
H A Dicssg_common.c97 unsigned int q_idx) in emac_xsk_xmit_zc() argument
99 struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx]; in emac_xsk_xmit_zc()
141 (emac->port_id | (q_idx << 8))); in emac_xsk_xmit_zc()
689 unsigned int q_idx, in emac_xmit_xdp_frame() argument
702 if (q_idx >= PRUETH_MAX_TX_QUEUES) { in emac_xmit_xdp_frame()
703 netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx); in emac_xmit_xdp_frame()
707 tx_chn = &emac->tx_chns[q_idx]; in emac_xmit_xdp_frame()
742 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8))); in emac_xmit_xdp_frame()
788 int q_idx, err; in emac_run_xdp() local
803 q_idx = cpu % emac->tx_ch_num; in emac_run_xdp()
[all …]
/linux/drivers/net/ethernet/meta/fbnic/
H A Dfbnic_txrx.c118 return netdev_get_tx_queue(dev, ring->q_idx); in txring_txq()
1221 skb_record_rx_queue(skb, rcq->q_idx); in fbnic_populate_skb_fields()
1472 WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr); in fbnic_remove_tx_ring()
1473 fbn->tx[txr->q_idx] = NULL; in fbnic_remove_tx_ring()
1485 WARN_ON(fbn->tx[xdpr->q_idx] && fbn->tx[xdpr->q_idx] != xdpr); in fbnic_remove_xdp_ring()
1486 fbn->tx[xdpr->q_idx] = NULL; in fbnic_remove_xdp_ring()
1498 WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr); in fbnic_remove_rx_ring()
1499 fbn->rx[rxr->q_idx] = NULL; in fbnic_remove_rx_ring()
1606 int q_idx, u8 flags) in fbnic_ring_init() argument
1610 ring->q_idx = q_idx; in fbnic_ring_init()
[all …]
H A Dfbnic_txrx.h122 u8 q_idx; /* Logical netdev ring index */ member
/linux/drivers/net/ethernet/intel/iavf/
H A Diavf_main.c1161 int q_idx; in iavf_napi_enable_all() local
1165 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_enable_all()
1168 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_enable_all()
1180 int q_idx; in iavf_napi_disable_all() local
1184 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_disable_all()
1185 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_disable_all()
1766 int q_idx = 0, num_q_vectors, irq_num; in iavf_alloc_q_vectors() local
1774 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { in iavf_alloc_q_vectors()
1775 irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector; in iavf_alloc_q_vectors()
1776 q_vector = &adapter->q_vectors[q_idx]; in iavf_alloc_q_vectors()
[all …]
/linux/net/sched/
H A Dsch_api.c1828 int ret = 0, q_idx = *q_idx_p; in tc_dump_qdisc_root() local
1836 if (q_idx < s_q_idx) { in tc_dump_qdisc_root()
1837 q_idx++; in tc_dump_qdisc_root()
1844 q_idx++; in tc_dump_qdisc_root()
1857 if (q_idx < s_q_idx) { in tc_dump_qdisc_root()
1858 q_idx++; in tc_dump_qdisc_root()
1866 q_idx++; in tc_dump_qdisc_root()
1870 *q_idx_p = q_idx; in tc_dump_qdisc_root()
1880 int idx, q_idx; in tc_dump_qdisc() local
1888 s_q_idx = q_idx in tc_dump_qdisc()
[all...]
/linux/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/
H A Dtrx.c534 u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) in rtl92ee_get_available_desc() argument
542 get_desc_addr_fr_q_idx(q_idx)); in rtl92ee_get_available_desc()
889 u8 q_idx = *val; in rtl92ee_set_desc() local
900 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; in rtl92ee_set_desc()
903 if (q_idx == BEACON_QUEUE) { in rtl92ee_set_desc()
914 get_desc_addr_fr_q_idx(q_idx), in rtl92ee_set_desc()
/linux/drivers/infiniband/hw/hfi1/
H A Dipoib_tx.c57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq()
64 netif_wake_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_wake_txq()
125 le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx()
723 txq->q_idx = i; in hfi1_ipoib_txreq_init()
789 txq->q_idx, in hfi1_ipoib_drain_tx_list()
848 __netif_subqueue_stopped(dev, txq->q_idx), in hfi1_ipoib_tx_timeout()
H A Dipoib.h114 u8 q_idx; member
/linux/drivers/net/wireless/mediatek/mt76/mt7925/
H A Dmac.c732 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0; in mt7925_mac_write_txwi() local
754 q_idx = MT_LMAC_ALTX0; in mt7925_mac_write_txwi()
757 q_idx = MT_LMAC_BCN0; in mt7925_mac_write_txwi()
760 q_idx = MT_LMAC_ALTX0; in mt7925_mac_write_txwi()
763 q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS + in mt7925_mac_write_txwi()
773 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); in mt7925_mac_write_txwi()
808 (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0))) in mt7925_mac_write_txwi()
/linux/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_tx.c576 int i, q_idx; in fun_xdp_xmit_frames() local
585 q_idx = smp_processor_id(); in fun_xdp_xmit_frames()
586 if (unlikely(q_idx >= fp->num_xdpqs)) in fun_xdp_xmit_frames()
589 for (q = xdpqs[q_idx], i = 0; i < n; i++) in fun_xdp_xmit_frames()
/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_device_queue_manager.c3340 int q_idx = QUEUE_NOT_FOUND; in resume_queues() local
3343 q_idx = q_array_get_index( in resume_queues()
3348 if (!queue_ids || q_idx != QUEUE_NOT_FOUND) { in resume_queues()
3353 queue_ids[q_idx] &= in resume_queues()
3356 queue_ids[q_idx] |= in resume_queues()
3385 int q_idx = q_array_get_index( in resume_queues() local
3391 if (q_idx != QUEUE_NOT_FOUND) in resume_queues()
3392 queue_ids[q_idx] |= in resume_queues()
3444 int q_idx = q_array_get_index(q->properties.queue_id, in suspend_queues() local
3448 if (q_idx != QUEUE_NOT_FOUND) { in suspend_queues()
[all …]
/linux/drivers/net/wireless/mediatek/mt76/
H A Dmt76_connac_mac.c504 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0; in mt76_connac2_mac_write_txwi() local
526 q_idx = MT_LMAC_ALTX0; in mt76_connac2_mac_write_txwi()
529 q_idx = MT_LMAC_BCN0; in mt76_connac2_mac_write_txwi()
532 q_idx = MT_LMAC_ALTX0; in mt76_connac2_mac_write_txwi()
535 q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS + in mt76_connac2_mac_write_txwi()
545 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); in mt76_connac2_mac_write_txwi()
/linux/drivers/scsi/mpi3mr/
H A Dmpi3mr_fw.c1926 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) in mpi3mr_free_op_req_q_segments() argument
1932 segments = mrioc->req_qinfo[q_idx].q_segments; in mpi3mr_free_op_req_q_segments()
1938 if (mrioc->req_qinfo[q_idx].q_segment_list) { in mpi3mr_free_op_req_q_segments()
1941 mrioc->req_qinfo[q_idx].q_segment_list, in mpi3mr_free_op_req_q_segments()
1942 mrioc->req_qinfo[q_idx].q_segment_list_dma); in mpi3mr_free_op_req_q_segments()
1943 mrioc->req_qinfo[q_idx].q_segment_list = NULL; in mpi3mr_free_op_req_q_segments()
1946 size = mrioc->req_qinfo[q_idx].segment_qd * in mpi3mr_free_op_req_q_segments()
1949 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { in mpi3mr_free_op_req_q_segments()
1956 kfree(mrioc->req_qinfo[q_idx].q_segments); in mpi3mr_free_op_req_q_segments()
1957 mrioc->req_qinfo[q_idx].q_segments = NULL; in mpi3mr_free_op_req_q_segments()
[all …]
/linux/drivers/net/ethernet/intel/igc/
H A Digc_defines.h722 #define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) argument
/linux/drivers/net/ethernet/cavium/liquidio/
H A Dlio_vf_main.c1404 int q_idx = 0, iq_no = 0; in liquidio_xmit() local
1416 q_idx = skb_iq(lio->oct_dev, skb); in liquidio_xmit()
1417 tag = q_idx; in liquidio_xmit()
1418 iq_no = lio->linfo.txpciq[q_idx].s.q_no; in liquidio_xmit()
1496 spin_lock(&lio->glist_lock[q_idx]); in liquidio_xmit()
1498 lio_list_delete_head(&lio->glist[q_idx]); in liquidio_xmit()
1499 spin_unlock(&lio->glist_lock[q_idx]); in liquidio_xmit()
1593 netif_stop_subqueue(netdev, q_idx); in liquidio_xmit()
/linux/drivers/net/ethernet/intel/igb/
H A De1000_defines.h1043 #define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) argument

12