Home
last modified time | relevance | path

Searched refs:qid (Results 1 – 25 of 206) sorted by relevance

123456789

/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dhtb.c17 u16 qid; member
40 if (node->qid == MLX5E_QOS_QID_INNER) in mlx5e_htb_enumerate_leaves()
42 err = callback(data, node->qid, node->hw_id); in mlx5e_htb_enumerate_leaves()
70 mlx5e_htb_node_create_leaf(struct mlx5e_htb *htb, u16 classid, u16 qid, in mlx5e_htb_node_create_leaf() argument
81 node->qid = qid; in mlx5e_htb_node_create_leaf()
82 __set_bit(qid, htb->qos_used_qids); in mlx5e_htb_node_create_leaf()
100 node->qid = MLX5E_QOS_QID_INNER; in mlx5e_htb_node_create_root()
134 if (node->qid != MLX5E_QOS_QID_INNER) { in mlx5e_htb_node_delete()
135 __clear_bit(node->qid, htb->qos_used_qids); in mlx5e_htb_node_delete()
150 u16 qid; in mlx5e_htb_get_txq_by_classid() local
[all …]
H A Dqos.c38 u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) in mlx5e_qid_from_qos() argument
49 return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid; in mlx5e_qid_from_qos()
54 static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid) in mlx5e_get_qos_sq() argument
61 ix = qid % params->num_channels; in mlx5e_get_qos_sq()
62 qid /= params->num_channels; in mlx5e_get_qos_sq()
66 return mlx5e_state_dereference(priv, qos_sqs[qid]); in mlx5e_get_qos_sq()
76 int txq_ix, ix, qid, err = 0; in mlx5e_open_qos_sq() local
113 qid = node_qid / params->num_channels; in mlx5e_open_qos_sq()
139 rcu_assign_pointer(qos_sqs[qid], sq); in mlx5e_open_qos_sq()
161 u16 qid; in mlx5e_activate_qos_sq() local
[all …]
H A Dqos.h24 void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid);
25 void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid);
26 void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq);
27 void mlx5e_reset_qdisc(struct net_device *dev, u16 qid);
38 u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid);
/linux/drivers/vdpa/pds/
H A Dvdpa_dev.c72 static int pds_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, in pds_vdpa_set_vq_address() argument
77 pdsv->vqs[qid].desc_addr = desc_addr; in pds_vdpa_set_vq_address()
78 pdsv->vqs[qid].avail_addr = driver_addr; in pds_vdpa_set_vq_address()
79 pdsv->vqs[qid].used_addr = device_addr; in pds_vdpa_set_vq_address()
84 static void pds_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num) in pds_vdpa_set_vq_num() argument
88 pdsv->vqs[qid].q_len = num; in pds_vdpa_set_vq_num()
91 static void pds_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) in pds_vdpa_kick_vq() argument
95 iowrite16(qid, pdsv->vqs[qid].notify); in pds_vdpa_kick_vq()
98 static void pds_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, in pds_vdpa_set_vq_cb() argument
103 pdsv->vqs[qid].event_cb = *cb; in pds_vdpa_set_vq_cb()
[all …]
H A Dcmds.c125 int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx, in pds_vdpa_cmd_init_vq() argument
134 .vdpa_vq_init.qid = cpu_to_le16(qid), in pds_vdpa_cmd_init_vq()
139 .vdpa_vq_init.intr_index = cpu_to_le16(qid), in pds_vdpa_cmd_init_vq()
147 __func__, qid, ilog2(vq_info->q_len), in pds_vdpa_cmd_init_vq()
154 qid, comp.status, ERR_PTR(err)); in pds_vdpa_cmd_init_vq()
159 int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx, in pds_vdpa_cmd_reset_vq() argument
168 .vdpa_vq_reset.qid = cpu_to_le16(qid), in pds_vdpa_cmd_reset_vq()
177 qid, comp.status, ERR_PTR(err)); in pds_vdpa_cmd_reset_vq()
/linux/drivers/infiniband/hw/cxgb4/
H A Dresource.c111 u32 qid; in c4iw_get_cqid() local
119 qid = entry->qid; in c4iw_get_cqid()
122 qid = c4iw_get_resource(&rdev->resource.qid_table); in c4iw_get_cqid()
123 if (!qid) in c4iw_get_cqid()
126 rdev->stats.qid.cur += rdev->qpmask + 1; in c4iw_get_cqid()
128 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid()
132 entry->qid = i; in c4iw_get_cqid()
143 entry->qid = qid; in c4iw_get_cqid()
145 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid()
149 entry->qid = i; in c4iw_get_cqid()
[all …]
/linux/drivers/nvme/host/
H A Dauth.c29 int qid; member
60 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, in nvme_auth_submit() argument
68 if (qid != 0) { in nvme_auth_submit()
86 qid == 0 ? NVME_QID_ANY : qid, flags); in nvme_auth_submit()
89 "qid %d auth_send failed with status %d\n", qid, ret); in nvme_auth_submit()
92 "qid %d auth_send failed with error %d\n", qid, ret); in nvme_auth_submit()
96 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid, in nvme_auth_receive_validate() argument
101 __func__, qid, data->auth_type, data->auth_id); in nvme_auth_receive_validate()
111 qid, data->auth_type, data->auth_id); in nvme_auth_receive_validate()
117 qid, le16_to_cpu(data->t_id)); in nvme_auth_receive_validate()
[all …]
H A Dtrace.h26 #define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ argument
29 ((qid) ? \
53 __field(int, qid)
64 __entry->qid = nvme_req_qid(req);
77 __entry->qid, __entry->cid, __entry->nsid,
79 show_opcode_name(__entry->qid, __entry->opcode,
81 parse_nvme_cmd(__entry->qid, __entry->opcode,
91 __field(int, qid)
100 __entry->qid = nvme_req_qid(req);
110 __entry->qid, __entry->cid, __entry->result,
[all …]
/linux/drivers/vdpa/ifcvf/
H A Difcvf_base.c13 u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector) in ifcvf_set_vq_vector() argument
17 vp_iowrite16(qid, &cfg->queue_select); in ifcvf_set_vq_vector()
72 u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid) in ifcvf_get_vq_size() argument
76 if (qid >= hw->nr_vring) in ifcvf_get_vq_size()
79 vp_iowrite16(qid, &hw->common_cfg->queue_select); in ifcvf_get_vq_size()
87 u16 queue_size, max_size, qid; in ifcvf_get_max_vq_size() local
90 for (qid = 1; qid < hw->nr_vring; qid++) { in ifcvf_get_max_vq_size()
91 queue_size = ifcvf_get_vq_size(hw, qid); in ifcvf_get_max_vq_size()
328 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) in ifcvf_get_vq_state() argument
333 last_avail_idx = vp_ioread16(&lm_cfg->vq_state_region + qid * 2); in ifcvf_get_vq_state()
[all …]
H A Difcvf_base.h108 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
119 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
120 int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
122 u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
124 void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
125 int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
127 bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
128 void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
132 u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid);
/linux/fs/fuse/
H A Ddev_uring.c124 int qid; in fuse_uring_abort_end_requests() local
128 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_abort_end_requests()
129 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_abort_end_requests()
164 int qid; in fuse_uring_request_expired() local
169 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_request_expired()
170 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_request_expired()
191 int qid; in fuse_uring_destruct() local
196 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_destruct()
197 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_destruct()
216 ring->queues[qid] = NULL; in fuse_uring_destruct()
[all …]
/linux/drivers/nvme/target/
H A Dfabrics-cmd-auth.c21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); in nvmet_auth_expired_work()
41 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
51 if (req->sq->qid) in nvmet_auth_negotiate()
89 __func__, ctrl->cntlid, req->sq->qid); in nvmet_auth_negotiate()
93 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
117 __func__, ctrl->cntlid, req->sq->qid); in nvmet_auth_negotiate()
121 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
128 ctrl->cntlid, req->sq->qid); in nvmet_auth_negotiate()
132 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
145 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_reply()
[all …]
H A Dtrace.h28 #define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ argument
31 (qid ? \
73 __field(int, qid)
86 __entry->qid = req->sq->qid;
100 __entry->qid, __entry->cid, __entry->nsid,
102 show_opcode_name(__entry->qid, __entry->opcode,
104 parse_nvme_cmd(__entry->qid, __entry->opcode,
114 __field(int, qid)
121 __entry->qid = req->cq->qid;
130 __entry->qid, __entry->cid, __entry->result, __entry->status)
/linux/drivers/net/ethernet/intel/igb/
H A Digb_xsk.c34 static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid) in igb_txrx_ring_disable() argument
36 struct igb_ring *tx_ring = adapter->tx_ring[qid]; in igb_txrx_ring_disable()
37 struct igb_ring *rx_ring = adapter->rx_ring[qid]; in igb_txrx_ring_disable()
57 static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid) in igb_txrx_ring_enable() argument
59 struct igb_ring *tx_ring = adapter->tx_ring[qid]; in igb_txrx_ring_enable()
60 struct igb_ring *rx_ring = adapter->rx_ring[qid]; in igb_txrx_ring_enable()
86 int qid = ring->queue_index; in igb_xsk_pool() local
89 pool = xsk_get_pool_from_qid(adapter->netdev, qid); in igb_xsk_pool()
99 u16 qid) in igb_xsk_pool_enable() argument
106 if (qid >= adapter->num_rx_queues) in igb_xsk_pool_enable()
[all …]
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dqos.c98 if (node->qid == OTX2_QOS_QID_NONE) { in otx2_config_sched_shaping()
181 if (node->qid == OTX2_QOS_QID_INNER && !node->parent) { in __otx2_qos_txschq_cfg()
279 if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) { in otx2_qos_sw_node_delete()
280 __clear_bit(node->qid, pfvf->qos.qos_sq_bmap); in otx2_qos_sw_node_delete()
422 WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); in otx2_qos_alloc_root()
473 WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE); in otx2_qos_alloc_txschq_node()
506 u32 quantum, u16 qid, bool static_cfg) in otx2_qos_sw_create_leaf_node() argument
518 WRITE_ONCE(node->qid, qid); in otx2_qos_sw_create_leaf_node()
528 __set_bit(qid, pfvf->qos.qos_sq_bmap); in otx2_qos_sw_create_leaf_node()
553 *otx2_sw_node_find_by_qid(struct otx2_nic *pfvf, u16 qid) in otx2_sw_node_find_by_qid() argument
[all …]
/linux/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_xsk.c15 int qid = ring->ring_idx; in ixgbe_xsk_pool() local
17 if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps)) in ixgbe_xsk_pool()
20 return xsk_get_pool_from_qid(adapter->netdev, qid); in ixgbe_xsk_pool()
25 u16 qid) in ixgbe_xsk_pool_enable() argument
31 if (qid >= adapter->num_rx_queues) in ixgbe_xsk_pool_enable()
34 if (qid >= netdev->real_num_rx_queues || in ixgbe_xsk_pool_enable()
35 qid >= netdev->real_num_tx_queues) in ixgbe_xsk_pool_enable()
46 ixgbe_txrx_ring_disable(adapter, qid); in ixgbe_xsk_pool_enable()
48 set_bit(qid, adapter->af_xdp_zc_qps); in ixgbe_xsk_pool_enable()
51 ixgbe_txrx_ring_enable(adapter, qid); in ixgbe_xsk_pool_enable()
[all …]
/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_process_queue_manager.c34 struct process_queue_manager *pqm, unsigned int qid) in get_queue_by_qid() argument
39 if ((pqn->q && pqn->q->properties.queue_id == qid) || in get_queue_by_qid()
40 (pqn->kq && pqn->kq->queue->properties.queue_id == qid)) in get_queue_by_qid()
48 unsigned int qid) in assign_queue_slot_by_qid() argument
50 if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) in assign_queue_slot_by_qid()
53 if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) { in assign_queue_slot_by_qid()
54 pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid); in assign_queue_slot_by_qid()
62 unsigned int *qid) in find_available_queue_slot() argument
78 *qid = found; in find_available_queue_slot()
103 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, in pqm_set_gws() argument
[all …]
/linux/include/linux/
H A Dquota.h79 extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
80 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
81 extern bool qid_valid(struct kqid qid);
98 enum quota_type type, qid_t qid) in make_kqid() argument
105 kqid.uid = make_kuid(from, qid); in make_kqid()
108 kqid.gid = make_kgid(from, qid); in make_kqid()
111 kqid.projid = make_kprojid(from, qid); in make_kqid()
187 static inline bool qid_has_mapping(struct user_namespace *ns, struct kqid qid) in qid_has_mapping() argument
189 return from_kqid(ns, qid) != (qid_t) -1; in qid_has_mapping()
320 …int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structu…
[all …]
/linux/drivers/net/ethernet/intel/libeth/
H A Dxsk.c222 void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid) in libeth_xsk_wakeup() argument
230 if (unlikely(qid >= nr_cpu_ids)) in libeth_xsk_wakeup()
231 qid %= nr_cpu_ids; in libeth_xsk_wakeup()
233 if (qid != raw_smp_processor_id() && cpu_online(qid)) in libeth_xsk_wakeup()
234 smp_call_function_single_async(qid, csd); in libeth_xsk_wakeup()
255 int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable) in libeth_xsk_setup_pool() argument
259 pool = xsk_get_pool_from_qid(dev, qid); in libeth_xsk_setup_pool()
/linux/net/9p/
H A Dclient.c991 struct p9_qid qid; in p9_client_attach() local
1009 err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", &qid); in p9_client_attach()
1017 qid.type, qid.path, qid.version); in p9_client_attach()
1019 memmove(&fid->qid, &qid, sizeof(struct p9_qid)); in p9_client_attach()
1086 memmove(&fid->qid, &wqids[nwqids - 1], sizeof(struct p9_qid)); in p9_client_walk()
1088 memmove(&fid->qid, &oldfid->qid, sizeof(struct p9_qid)); in p9_client_walk()
1111 struct p9_qid qid; in p9_client_open() local
1130 err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", &qid, &iounit); in p9_client_open()
1137 p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN", qid.type, in p9_client_open()
1138 qid.path, qid.version, iounit); in p9_client_open()
[all …]
/linux/drivers/vdpa/octeon_ep/
H A Doctep_vdpa_main.c201 u16 qid; in octep_vdpa_reset() local
206 for (qid = 0; qid < oct_hw->nr_vring; qid++) { in octep_vdpa_reset()
207 oct_hw->vqs[qid].cb.callback = NULL; in octep_vdpa_reset()
208 oct_hw->vqs[qid].cb.private = NULL; in octep_vdpa_reset()
227 static int octep_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, in octep_vdpa_get_vq_state() argument
232 return octep_get_vq_state(oct_hw, qid, state); in octep_vdpa_get_vq_state()
235 static int octep_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, in octep_vdpa_set_vq_state() argument
240 return octep_set_vq_state(oct_hw, qid, state); in octep_vdpa_set_vq_state()
243 static void octep_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, struct vdpa_callback *cb) in octep_vdpa_set_vq_cb() argument
247 oct_hw->vqs[qid].cb = *cb; in octep_vdpa_set_vq_cb()
[all …]
/linux/fs/9p/
H A Dvfs_inode_dotl.c68 if (memcmp(&v9inode->qid.version, in v9fs_test_inode_dotl()
69 &st->qid.version, sizeof(v9inode->qid.version))) in v9fs_test_inode_dotl()
72 if (v9inode->qid.type != st->qid.type) in v9fs_test_inode_dotl()
75 if (v9inode->qid.path != st->qid.path) in v9fs_test_inode_dotl()
91 memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); in v9fs_set_inode_dotl()
97 struct p9_qid *qid, in v9fs_qid_iget_dotl() argument
112 inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode_dotl, st); in v9fs_qid_iget_dotl()
122 inode->i_ino = QID2INO(qid); in v9fs_qid_iget_dotl()
154 inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new); in v9fs_inode_from_fid_dotl()
235 struct p9_qid qid; in v9fs_vfs_atomic_open_dotl() local
[all …]
/linux/fs/quota/
H A Dkqid.c120 bool qid_valid(struct kqid qid) in qid_valid() argument
122 switch (qid.type) { in qid_valid()
124 return uid_valid(qid.uid); in qid_valid()
126 return gid_valid(qid.gid); in qid_valid()
128 return projid_valid(qid.projid); in qid_valid()
H A Dquota.c204 struct kqid qid; in quota_getquota() local
211 qid = make_kqid(current_user_ns(), type, id); in quota_getquota()
212 if (!qid_has_mapping(sb->s_user_ns, qid)) in quota_getquota()
214 ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); in quota_getquota()
240 struct kqid qid; in quota_getnextquota() local
247 qid = make_kqid(current_user_ns(), type, id); in quota_getnextquota()
248 if (!qid_has_mapping(sb->s_user_ns, qid)) in quota_getnextquota()
250 ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq); in quota_getnextquota()
255 idq.dqb_id = from_kqid(current_user_ns(), qid); in quota_getnextquota()
292 struct kqid qid; in quota_setquota() local
[all …]
/linux/drivers/net/ethernet/intel/i40e/
H A Di40e_xsk.c84 u16 qid) in i40e_xsk_pool_enable() argument
93 if (qid >= vsi->num_queue_pairs) in i40e_xsk_pool_enable()
96 if (qid >= netdev->real_num_rx_queues || in i40e_xsk_pool_enable()
97 qid >= netdev->real_num_tx_queues) in i40e_xsk_pool_enable()
104 set_bit(qid, vsi->af_xdp_zc_qps); in i40e_xsk_pool_enable()
109 err = i40e_queue_pair_disable(vsi, qid); in i40e_xsk_pool_enable()
113 err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); in i40e_xsk_pool_enable()
117 err = i40e_queue_pair_enable(vsi, qid); in i40e_xsk_pool_enable()
122 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); in i40e_xsk_pool_enable()
138 static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) in i40e_xsk_pool_disable() argument
[all …]

123456789