Home
last modified time | relevance | path

Searched refs:q_id (Results 1 – 25 of 42) sorted by relevance

12

/linux/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_eqs.c33 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
34 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
37 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
38 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
41 HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
42 HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
45 HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
46 HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
80 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
83 container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
[all …]
H A Dhinic_hw_csr.h87 #define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ argument
88 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
91 #define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ argument
92 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
95 #define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ argument
96 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
99 #define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ argument
100 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
H A Dhinic_hw_io.c30 #define CI_ADDR(base_addr, q_id) ((base_addr) + \ argument
31 (q_id) * CI_Q_ADDR_SIZE)
132 base_qpn + qp->q_id); in write_sq_ctxts()
176 base_qpn + qp->q_id); in write_rq_ctxts()
272 struct hinic_qp *qp, int q_id, in init_qp() argument
281 qp->q_id = q_id; in init_qp()
283 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], in init_qp()
291 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], in init_qp()
306 func_to_io->sq_db[q_id] = db_base; in init_qp()
308 qp->sq.qid = q_id; in init_qp()
[all …]
H A Dhinic_tx.c494 u16 prod_idx, q_id = skb->queue_mapping; in hinic_lb_xmit_frame() local
502 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame()
514 netif_stop_subqueue(netdev, qp->q_id); in hinic_lb_xmit_frame()
518 netif_wake_subqueue(nic_dev->netdev, qp->q_id); in hinic_lb_xmit_frame()
537 netdev_txq = netdev_get_tx_queue(netdev, q_id); in hinic_lb_xmit_frame()
555 u16 prod_idx, q_id = skb->queue_mapping; in hinic_xmit_frame() local
563 txq = &nic_dev->txqs[q_id]; in hinic_xmit_frame()
595 netif_stop_subqueue(netdev, qp->q_id); in hinic_xmit_frame()
602 netif_wake_subqueue(nic_dev->netdev, qp->q_id); in hinic_xmit_frame()
626 netdev_txq = netdev_get_tx_queue(netdev, q_id); in hinic_xmit_frame()
[all …]
H A Dhinic_rx.c421 skb_record_rx_queue(skb, qp->q_id); in rxq_recv()
532 intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id]; in rx_request_irq()
549 cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); in rx_request_irq()
594 "%s_rxq%d", netdev->name, qp->q_id); in hinic_init_rxq()
H A Dhinic_main.c820 u16 num_sqs, q_id; in hinic_tx_timeout() local
826 for (q_id = 0; q_id < num_sqs; q_id++) { in hinic_tx_timeout()
827 if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) in hinic_tx_timeout()
830 sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id); in hinic_tx_timeout()
835 q_id, sw_pi, hw_ci, sw_ci, in hinic_tx_timeout()
836 nic_dev->txqs[q_id].napi.state); in hinic_tx_timeout()
H A Dhinic_hw_qp.c42 #define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ argument
43 (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE)
45 #define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ argument
47 (max_sqs + (q_id)) * Q_CTXT_SIZE)
625 HINIC_SQ_DB_INFO_SET(qp->q_id, QID)); in sq_prepare_db()
H A Dhinic_hw_cmdq.c508 enum hinic_set_arm_qtype q_type, u32 q_id) in hinic_set_arm_bit() argument
517 arm_bit.q_id = q_id; in hinic_set_arm_bit()
521 dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id); in hinic_set_arm_bit()
H A Dhinic_ethtool.c677 static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id, in set_queue_coalesce() argument
687 intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] : in set_queue_coalesce()
688 &nic_dev->tx_intr_coalesce[q_id]; in set_queue_coalesce()
697 q_id >= nic_dev->num_qps) in set_queue_coalesce()
700 msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry : in set_queue_coalesce()
701 nic_dev->txqs[q_id].sq->msix_entry; in set_queue_coalesce()
711 set_rx_coal ? "rx" : "tx", q_id); in set_queue_coalesce()
/linux/tools/testing/selftests/ublk/
H A Dnull.c46 struct io_uring_sqe *sqe, int q_id) in __setup_nop_io() argument
55 sqe->user_data = build_user_data(tag, ublk_op, 0, q_id, 1); in __setup_nop_io()
66 io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index); in null_queue_zc_io()
68 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); in null_queue_zc_io()
71 __setup_nop_io(tag, iod, sqe[1], q->q_id); in null_queue_zc_io()
74 io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index); in null_queue_zc_io()
75 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1); in null_queue_zc_io()
88 __setup_nop_io(tag, iod, sqe[0], q->q_id); in null_queue_auto_zc_io()
H A Dkublk.h162 int q_id; member
211 static inline __u64 ublk_user_copy_offset(unsigned q_id, unsigned tag) in ublk_io_auto_zc_fallback()
214 ((__u64)q_id << UBLK_QID_OFF | (__u64)tag << UBLK_TAG_OFF); in is_target_io()
223 unsigned tgt_data, unsigned q_id, unsigned is_target_io) in build_user_data()
225 /* we only have 7 bits to encode q_id */ in build_user_data()
227 assert(!(tag >> 16) && !(op >> 8) && !(tgt_data >> 16) && !(q_id >> 7)); in build_user_data()
230 (__u64)q_id << 56 | (__u64)is_target_io << 63; in user_data_to_tag()
295 struct ublk_queue *q, int tag, int q_id, __u64 index) in __io_uring_prep_buf_reg_unreg()
309 cmd->q_id = q_id; in io_uring_prep_buf_register()
220 build_user_data(unsigned tag,unsigned op,unsigned tgt_data,unsigned q_id,unsigned is_target_io) build_user_data() argument
292 __io_uring_prep_buf_reg_unreg(struct io_uring_sqe * sqe,struct ublk_queue * q,int tag,int q_id,__u64 index) __io_uring_prep_buf_reg_unreg() argument
310 io_uring_prep_buf_register(struct io_uring_sqe * sqe,struct ublk_queue * q,int tag,int q_id,__u64 index) io_uring_prep_buf_register() argument
317 io_uring_prep_buf_unregister(struct io_uring_sqe * sqe,struct ublk_queue * q,int tag,int q_id,__u64 index) io_uring_prep_buf_unregister() argument
[all...]
H A Dfile_backed.c26 sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); in loop_queue_flush_io()
54 sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); in loop_queue_tgt_rw_io()
60 io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index); in loop_queue_tgt_rw_io()
63 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); in loop_queue_tgt_rw_io()
70 sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); in loop_queue_tgt_rw_io()
72 io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, io->buf_index); in loop_queue_tgt_rw_io()
73 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1); in loop_queue_tgt_rw_io()
H A Dstripe.c145 io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index); in stripe_queue_tgt_rw_io()
148 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); in stripe_queue_tgt_rw_io()
166 sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, q->q_id, 1); in stripe_queue_tgt_rw_io()
171 io_uring_prep_buf_unregister(unreg, q, tag, q->q_id, io->buf_index); in stripe_queue_tgt_rw_io()
173 tag, ublk_cmd_op_nr(unreg->cmd_op), 0, q->q_id, 1); in stripe_queue_tgt_rw_io()
191 sqe[i]->user_data = build_user_data(tag, UBLK_IO_OP_FLUSH, 0, q->q_id, 1); in handle_flush()
H A Dkublk.c453 off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz(); in ublk_queue_init()
458 q->dev->dev_info.dev_id, q->q_id); in ublk_queue_init()
474 dev->dev_info.dev_id, q->q_id, i); in ublk_queue_init()
483 dev->dev_info.dev_id, q->q_id); in ublk_queue_init()
606 __u64 off = ublk_user_copy_offset(q->q_id, io->tag); in ublk_queue_io_cmd()
686 cmd->q_id = q->q_id; in ublk_submit_fetch_commands()
695 user_data = build_user_data(io->tag, _IOC_NR(cmd_op), 0, q->q_id, 0); in ublk_submit_fetch_commands()
703 __func__, t->idx, q->q_id, io->tag, cmd_op, in ublk_submit_fetch_commands()
728 int q_id in ublksrv_handle_tgt_cqe()
693 int q_id = i / dinfo->queue_depth; ublk_submit_fetch_commands() local
776 unsigned q_id = user_data_to_q_id(cqe->user_data); ublk_handle_cqe() local
[all...]
H A Dfault_inject.c52 sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1); in ublk_fault_inject_queue_io()
/linux/tools/cgroup/
H A Diocost_monitor.py64 def __init__(self, root_blkcg, q_id, include_dying=False): argument
67 self.walk(root_blkcg, q_id, '')
72 def walk(self, blkcg, q_id, parent_path): argument
80 address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id))
88 self.walk(c, q_id, path)
224 q_id = None variable
232 q_id = blkg.q.id.value_() variable
258 for path, blkg in BlkgIterator(blkcg_root, q_id):
/linux/drivers/block/
H A Dublk_drv.c196 int q_id; member
255 u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
808 ublk_queue_cmd_buf(struct ublk_device *ub, int q_id) in ublk_max_cmd_buf_size()
810 return ublk_get_queue(ub, q_id)->io_cmd_buf; in ublk_max_cmd_buf_size()
1306 __func__, ubq->q_id, req->tag, io->flags, in ublk_dispatch_req()
1331 __func__, ubq->q_id, req->tag, io->flags); in ublk_dispatch_req()
1775 int q_id, ret = 0; in ublk_ch_mmap()
1794 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz; in __ublk_fail_req()
1796 __func__, q_id, current->pid, vma->vm_start, in __ublk_fail_req()
1802 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIF in __ublk_fail_req()
793 ublk_queue_cmd_buf(struct ublk_device * ub,int q_id) ublk_queue_cmd_buf() argument
1760 int q_id, ret = 0; ublk_ch_mmap() local
2178 ublk_register_io_buf(struct io_uring_cmd * cmd,struct ublk_device * ub,u16 q_id,u16 tag,struct ublk_io * io,unsigned int index,unsigned int issue_flags) ublk_register_io_buf() argument
2205 ublk_daemon_register_io_buf(struct io_uring_cmd * cmd,struct ublk_device * ub,u16 q_id,u16 tag,struct ublk_io * io,unsigned index,unsigned issue_flags) ublk_daemon_register_io_buf() argument
2358 u16 q_id = READ_ONCE(ub_src->q_id); ublk_ch_uring_cmd_local() local
2482 __ublk_check_and_get_req(struct ublk_device * ub,u16 q_id,u16 tag,struct ublk_io * io,size_t offset) __ublk_check_and_get_req() argument
2563 u16 tag, q_id; ublk_check_and_get_req() local
2644 ublk_deinit_queue(struct ublk_device * ub,int q_id) ublk_deinit_queue() argument
2669 ublk_get_queue_numa_node(struct ublk_device * ub,int q_id) ublk_get_queue_numa_node() argument
2682 ublk_init_queue(struct ublk_device * ub,int q_id) ublk_init_queue() argument
[all...]
/linux/drivers/net/ethernet/intel/ice/
H A Dice_idc.c135 u16 q_id; in ice_del_rdma_qset() local
147 q_id = qset->qs_handle; in ice_del_rdma_qset()
150 return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id); in ice_del_rdma_qset()
H A Dice_switch.h120 u16 q_id:11; member
172 u16 q_id:11; member
/linux/fs/xfs/scrub/
H A Ddqiterate.c158 *next_incore_id = dq->q_id; in xchk_dquot_iter_advance_incore()
208 cursor->id = dq->q_id + 1; in xchk_dquot_iter()
/linux/drivers/net/ethernet/intel/idpf/
H A Didpf_txrx.h571 u32 q_id; member
707 u32 q_id; member
786 u32 q_id; member
846 u32 q_id; member
H A Didpf_virtchnl.c1705 qi->queue_id = cpu_to_le32(q->q_id); in idpf_fill_txq_config_chunk()
1718 val = q->complq->q_id; in idpf_fill_txq_config_chunk()
1720 val = q->txq_grp->complq->q_id; in idpf_fill_txq_config_chunk()
1744 qi->queue_id = cpu_to_le32(q->q_id); in idpf_fill_complq_config_chunk()
1872 qi->queue_id = cpu_to_le32(q->q_id); in idpf_fill_rxq_config_chunk()
1899 qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); in idpf_fill_rxq_config_chunk()
1902 qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id); in idpf_fill_rxq_config_chunk()
1925 qi->queue_id = cpu_to_le32(q->q_id); in idpf_fill_bufq_config_chunk()
2126 qid = q->rxq->q_id; in idpf_send_ena_dis_queue_set_msg()
2129 qid = q->txq->q_id; in idpf_send_ena_dis_queue_set_msg()
[all …]
H A Didpf_controlq_api.h101 int q_id; member
/linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/
H A Dhclge_tm.h266 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id);
267 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id);
/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_process_queue_manager.c355 retval = assign_queue_slot_by_qid(pqm, q_data->q_id); in pqm_create_queue()
356 *qid = q_data->q_id; in pqm_create_queue()
838 q_data->q_id = q->properties.queue_id; in criu_checkpoint_queue()
868 pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id); in criu_checkpoint_queue()
1065 ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws); in kfd_criu_restore_queue()

12