| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | en_dim.c | 38 struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq) in mlx5e_complete_dim_work() argument 40 mlx5e_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts, in mlx5e_complete_dim_work() 52 mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq); in mlx5e_rx_dim_work() 62 mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); in mlx5e_tx_dim_work() 67 u8 cq_period_mode, struct mlx5_core_cq *mcq, in mlx5e_dim_enable() argument 82 err = mlx5e_modify_cq_period_mode(mdev, mcq, dim->mode); in mlx5e_dim_enable() 107 c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq); in mlx5e_dim_rx_change() 134 c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq); in mlx5e_dim_tx_change()
|
| H A D | wc.c | 25 struct mlx5_core_cq mcq; member 51 struct mlx5_core_cq *mcq = &cq->mcq; in mlx5_wc_create_cqwq() local 60 mcq->cqe_sz = 64; in mlx5_wc_create_cqwq() 61 mcq->set_ci_db = cq->wq_ctrl.db.db; in mlx5_wc_create_cqwq() 62 mcq->arm_db = cq->wq_ctrl.db.db + 1; in mlx5_wc_create_cqwq() 79 struct mlx5_core_cq *mcq = &cq->mcq; in create_wc_cq() local 107 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); in create_wc_cq() 152 mlx5_core_destroy_cq(cq->mdev, &cq->mcq); in mlx5_wc_destroy_cq() 173 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); in create_wc_sq()
|
| H A D | cq.c | 49 struct mlx5_core_cq *mcq; in mlx5_cq_tasklet_cb() local 56 list_for_each_entry_safe(mcq, temp, &ctx->process_list, in mlx5_cq_tasklet_cb() 58 list_del_init(&mcq->tasklet_ctx.list); in mlx5_cq_tasklet_cb() 59 mcq->tasklet_ctx.comp(mcq, NULL); in mlx5_cq_tasklet_cb() 60 mlx5_cq_put(mcq); in mlx5_cq_tasklet_cb()
|
| H A D | en_main.c | 1157 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); in mlx5e_create_rq() 1899 csp.cqn = sq->cq.mcq.cqn; in mlx5e_open_txqsq() 2070 csp.cqn = sq->cq.mcq.cqn; in mlx5e_open_icosq() 2131 csp.cqn = sq->cq.mcq.cqn; in mlx5e_open_xdpsq() 2209 struct mlx5_core_cq *mcq = &cq->mcq; in mlx5e_alloc_cq_common() local 2218 mcq->cqe_sz = 64; in mlx5e_alloc_cq_common() 2219 mcq->set_ci_db = cq->wq_ctrl.db.db; in mlx5e_alloc_cq_common() 2220 mcq->arm_db = cq->wq_ctrl.db.db + 1; in mlx5e_alloc_cq_common() 2221 *mcq->set_ci_db = 0; in mlx5e_alloc_cq_common() 2222 mcq->vector = param->eq_ix; in mlx5e_alloc_cq_common() [all …]
|
| H A D | en_ethtool.c | 712 mlx5e_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq, in mlx5e_set_priv_channels_tx_coalesce() 732 mlx5e_modify_cq_moderation(mdev, &c->rq.cq.mcq, moder->usec, moder->pkts, in mlx5e_set_priv_channels_rx_coalesce() 914 mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, in mlx5e_ethtool_set_per_queue_coalesce() 924 mlx5_core_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq, in mlx5e_ethtool_set_per_queue_coalesce()
|
| H A D | en.h | 349 struct mlx5_core_cq mcq; member
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | cq.c | 51 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) in mlx5_ib_cq_event() argument 53 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() 60 type, mcq->cqn); in mlx5_ib_cq_event() 87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 99 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw() 470 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one() 472 ++cq->mcq.cons_index; in mlx5_poll_one() 529 "Requestor" : "Responder", cq->mcq.cqn); in mlx5_poll_one() 570 cq->mcq.cqn, sig->err_item.key, in mlx5_poll_one() 596 cq->mcq.cqn); in poll_soft_wc() [all …]
|
| H A D | qp.c | 2034 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); in create_xrc_tgt_qp() 2035 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn); in create_xrc_tgt_qp() 2177 to_mcq(init_attr->send_cq)->mcq.cqn); in create_dci() 2181 to_mcq(init_attr->recv_cq)->mcq.cqn); in create_dci() 2358 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); in create_user_qp() 2373 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); in create_user_qp() 2376 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn); in create_user_qp() 2516 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn); in create_kernel_qp() 2519 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn); in create_kernel_qp() 2572 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs() [all …]
|
| H A D | srq.c | 281 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; in mlx5_ib_create_srq() 283 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; in mlx5_ib_create_srq()
|
| H A D | main.c | 2740 struct mlx5_core_cq *mcq; in mlx5_ib_handle_internal_error() local 2755 if (send_mcq->mcq.comp && in mlx5_ib_handle_internal_error() 2757 if (!send_mcq->mcq.reset_notify_added) { in mlx5_ib_handle_internal_error() 2758 send_mcq->mcq.reset_notify_added = 1; in mlx5_ib_handle_internal_error() 2759 list_add_tail(&send_mcq->mcq.reset_notify, in mlx5_ib_handle_internal_error() 2772 if (recv_mcq->mcq.comp && in mlx5_ib_handle_internal_error() 2774 if (!recv_mcq->mcq.reset_notify_added) { in mlx5_ib_handle_internal_error() 2775 recv_mcq->mcq.reset_notify_added = 1; in mlx5_ib_handle_internal_error() 2776 list_add_tail(&recv_mcq->mcq.reset_notify, in mlx5_ib_handle_internal_error() 2789 list_for_each_entry(mcq, &cq_armed_list, reset_notify) { in mlx5_ib_handle_internal_error() [all …]
|
| H A D | devx.c | 646 to_mcq(uobj->object)->mcq.cqn) == in devx_is_valid_obj_id() 1492 static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) in devx_cq_comp() argument 1494 struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq); in devx_cq_comp() 1498 u32 obj_id = mcq->cqn; in devx_cq_comp()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
| H A D | conn.c | 361 mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT, in mlx5_fpga_conn_arm_cq() 400 static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq, in mlx5_fpga_conn_cq_complete() argument 405 conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); in mlx5_fpga_conn_cq_complete() 424 conn->cq.mcq.cqe_sz = 64; in mlx5_fpga_conn_create_cq() 425 conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db; in mlx5_fpga_conn_create_cq() 426 conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1; in mlx5_fpga_conn_create_cq() 427 *conn->cq.mcq.set_ci_db = 0; in mlx5_fpga_conn_create_cq() 428 conn->cq.mcq.vector = 0; in mlx5_fpga_conn_create_cq() 429 conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete; in mlx5_fpga_conn_create_cq() 472 err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out)); in mlx5_fpga_conn_create_cq() [all …]
|
| H A D | conn.h | 58 struct mlx5_core_cq mcq; member
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | cq.c | 63 struct mlx4_cq *mcq, *temp; in mlx4_cq_tasklet_cb() local 69 list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { in mlx4_cq_tasklet_cb() 70 list_del_init(&mcq->tasklet_ctx.list); in mlx4_cq_tasklet_cb() 71 mcq->tasklet_ctx.comp(mcq); in mlx4_cq_tasklet_cb() 72 if (refcount_dec_and_test(&mcq->refcount)) in mlx4_cq_tasklet_cb() 73 complete(&mcq->free); in mlx4_cq_tasklet_cb()
|
| H A D | en_rx.c | 337 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; in mlx4_en_activate_rx_rings() 711 index = cq->mcq.cons_index & ring->size_mask; in mlx4_en_process_rx_cq() 716 cq->mcq.cons_index & cq->size)) { in mlx4_en_process_rx_cq() 932 ++cq->mcq.cons_index; in mlx4_en_process_rx_cq() 933 index = (cq->mcq.cons_index) & ring->size_mask; in mlx4_en_process_rx_cq() 948 mlx4_cq_set_ci(&cq->mcq); in mlx4_en_process_rx_cq() 950 ring->cons = cq->mcq.cons_index; in mlx4_en_process_rx_cq() 959 void mlx4_en_rx_irq(struct mlx4_cq *mcq) in mlx4_en_rx_irq() argument 961 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); in mlx4_en_rx_irq()
|
| H A D | en_tx.c | 428 struct mlx4_cq *mcq = &cq->mcq; in mlx4_en_process_tx_cq() local 434 u32 cons_index = mcq->cons_index; in mlx4_en_process_tx_cq() 514 mcq->cons_index = cons_index; in mlx4_en_process_tx_cq() 515 mlx4_cq_set_ci(mcq); in mlx4_en_process_tx_cq() 538 void mlx4_en_tx_irq(struct mlx4_cq *mcq) in mlx4_en_tx_irq() argument 540 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); in mlx4_en_tx_irq()
|
| /linux/drivers/vfio/pci/mlx5/ |
| H A D | cmd.c | 1089 mlx5_core_destroy_cq(mdev, &cq->mcq); in mlx5vf_destroy_cq() 1094 static void mlx5vf_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) in mlx5vf_cq_event() argument 1099 set_tracker_error(container_of(mcq, struct mlx5vf_pci_core_device, in mlx5vf_cq_event() 1100 tracker.cq.mcq)); in mlx5vf_cq_event() 1143 static void mlx5vf_cq_complete(struct mlx5_core_cq *mcq, in mlx5vf_cq_complete() argument 1147 container_of(mcq, struct mlx5vf_pci_core_device, in mlx5vf_cq_complete() 1148 tracker.cq.mcq); in mlx5vf_cq_complete() 1172 cq->mcq.set_ci_db = cq->db.db; in mlx5vf_create_cq() 1173 cq->mcq.arm_db = cq->db.db + 1; in mlx5vf_create_cq() 1174 cq->mcq.cqe_sz = cqe_size; in mlx5vf_create_cq() [all …]
|
| H A D | cmd.h | 126 struct mlx5_core_cq mcq; member
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| H A D | send.c | 756 MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn); in hws_send_ring_create_sq() 882 struct mlx5_core_cq *mcq = &cq->mcq; in hws_send_ring_alloc_cq() local 895 mcq->cqe_sz = 64; in hws_send_ring_alloc_cq() 896 mcq->set_ci_db = cq->wq_ctrl.db.db; in hws_send_ring_alloc_cq() 897 mcq->arm_db = cq->wq_ctrl.db.db + 1; in hws_send_ring_alloc_cq() 915 struct mlx5_core_cq *mcq = &cq->mcq; in hws_send_ring_create_cq() local 940 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); in hws_send_ring_create_cq() 983 mlx5_core_destroy_cq(cq->mdev, &cq->mcq); in hws_send_ring_close_cq()
|
| H A D | debug.c | 253 cq->mcq.cqn, in hws_debug_dump_context_send_engine() 260 cq->mcq.cqe_sz, in hws_debug_dump_context_send_engine()
|
| /linux/drivers/ufs/core/ |
| H A D | Makefile | 4 ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/ |
| H A D | dr_send.c | 1086 cq->mcq.cqe_sz = 64; in dr_create_cq() 1087 cq->mcq.set_ci_db = cq->wq_ctrl.db.db; in dr_create_cq() 1088 cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; in dr_create_cq() 1089 *cq->mcq.set_ci_db = 0; in dr_create_cq() 1090 cq->mcq.vector = 0; in dr_create_cq() 1116 err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); in dr_create_cq() 1133 mlx5_core_destroy_cq(mdev, &cq->mcq); in dr_destroy_cq() 1222 init_attr.cqn = dmn->send_ring->cq->mcq.cqn; in mlx5dr_send_ring_alloc()
|
| H A D | dr_types.h | 1441 struct mlx5_core_cq mcq; member
|
| /linux/drivers/vdpa/mlx5/net/ |
| H A D | mlx5_vnet.c | 71 struct mlx5_core_cq mcq; member 400 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn); in qp_prepare() 496 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw() 507 vcq->mcq.cons_index++; in mlx5_vdpa_poll_one() 517 mlx5_cq_set_ci(&mvq->cq.mcq); in mlx5_vdpa_handle_completions() 528 static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) in mlx5_vdpa_cq_comp() argument 530 struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq); in mlx5_vdpa_cq_comp() 552 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in mlx5_vdpa_cq_comp() 573 vcq->mcq.set_ci_db = vcq->db.db; in cq_create() 574 vcq->mcq.arm_db = vcq->db.db + 1; in cq_create() [all …]
|
| /linux/drivers/infiniband/hw/mlx4/ |
| H A D | srq.c | 181 to_mcq(init_attr->ext.cq)->mcq.cqn : 0; in mlx4_ib_create_srq()
|