/freebsd/sys/dev/mlx5/mlx5_ib/ |
H A D | mlx5_ib_cq.c | 43 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, int type) in mlx5_ib_cq_event() argument 45 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() 52 type, mcq->cqn); in mlx5_ib_cq_event() 71 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); in get_cqe() 84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 96 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw() 538 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one() 540 ++cq->mcq.cons_index; in mlx5_poll_one() 597 "Requestor" : "Responder", cq->mcq in mlx5_poll_one() 1020 struct mlx5_ib_cq *mcq = to_mcq(cq); mlx5_ib_destroy_cq() local 1098 struct mlx5_ib_cq *mcq = to_mcq(cq); mlx5_ib_modify_cq() local [all...] |
H A D | mlx5_ib_qp.c | 1802 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); in create_qp_common() 1803 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn); in create_qp_common() 1808 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); in create_qp_common() 1823 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); in create_qp_common() 1826 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn); in create_qp_common() 1893 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs() 1897 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs() 1923 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_unlock_cqs() 1926 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_unlock_cqs() 2167 init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, in mlx5_ib_create_qp() [all …]
|
H A D | mlx5_ib.h | 468 struct mlx5_core_cq mcq; member 795 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) in to_mibcq() argument 797 return container_of(mcq, struct mlx5_ib_cq, mcq); in to_mibcq()
|
H A D | mlx5_ib_main.c | 2602 struct mlx5_core_cq *mcq; in mlx5_ib_handle_internal_error() local 2617 if (send_mcq->mcq.comp && in mlx5_ib_handle_internal_error() 2619 if (!send_mcq->mcq.reset_notify_added) { in mlx5_ib_handle_internal_error() 2620 send_mcq->mcq.reset_notify_added = 1; in mlx5_ib_handle_internal_error() 2621 list_add_tail(&send_mcq->mcq.reset_notify, in mlx5_ib_handle_internal_error() 2634 if (recv_mcq->mcq.comp && in mlx5_ib_handle_internal_error() 2636 if (!recv_mcq->mcq.reset_notify_added) { in mlx5_ib_handle_internal_error() 2637 recv_mcq->mcq.reset_notify_added = 1; in mlx5_ib_handle_internal_error() 2638 list_add_tail(&recv_mcq->mcq.reset_notify, in mlx5_ib_handle_internal_error() 2651 list_for_each_entry(mcq, &cq_armed_list, reset_notify) { in mlx5_ib_handle_internal_error() [all …]
|
H A D | mlx5_ib_srq.c | 304 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; in mlx5_ib_create_srq() 306 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; in mlx5_ib_create_srq()
|
/freebsd/sys/dev/mlx4/mlx4_ib/ |
H A D | mlx4_ib_cq.c | 91 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw() 96 struct mlx4_ib_cq *mcq = to_mcq(cq); in mlx4_ib_modify_cq() local 99 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); in mlx4_ib_modify_cq() 226 cq->mcq.set_ci_db = cq->db.db; in mlx4_ib_create_cq() 227 cq->mcq.arm_db = cq->db.db + 1; in mlx4_ib_create_cq() 228 *cq->mcq.set_ci_db = 0; in mlx4_ib_create_cq() 229 *cq->mcq.arm_db = 0; in mlx4_ib_create_cq() 242 cq->db.dma, &cq->mcq, vector, 0, in mlx4_ib_create_cq() 247 cq->mcq.comp = mlx4_ib_cq_comp; in mlx4_ib_create_cq() 248 cq->mcq.event = mlx4_ib_cq_event; in mlx4_ib_create_cq() [all …]
|
H A D | mlx4_ib.h | 118 struct mlx4_cq mcq; member 654 static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq) in to_mibcq() argument 656 return container_of(mcq, struct mlx4_ib_cq, mcq); in to_mibcq()
|
H A D | mlx4_ib_main.c | 2951 struct mlx4_cq *mcq; in mlx4_ib_handle_catas_error() local 2965 if (send_mcq->mcq.comp && in mlx4_ib_handle_catas_error() 2967 if (!send_mcq->mcq.reset_notify_added) { in mlx4_ib_handle_catas_error() 2968 send_mcq->mcq.reset_notify_added = 1; in mlx4_ib_handle_catas_error() 2969 list_add_tail(&send_mcq->mcq.reset_notify, in mlx4_ib_handle_catas_error() 2983 if (recv_mcq->mcq.comp && in mlx4_ib_handle_catas_error() 2985 if (!recv_mcq->mcq.reset_notify_added) { in mlx4_ib_handle_catas_error() 2986 recv_mcq->mcq.reset_notify_added = 1; in mlx4_ib_handle_catas_error() 2987 list_add_tail(&recv_mcq->mcq.reset_notify, in mlx4_ib_handle_catas_error() 2998 list_for_each_entry(mcq, &cq_notify_list, reset_notify) { in mlx4_ib_handle_catas_error() [all …]
|
H A D | mlx4_ib_qp.c | 653 struct mlx4_ib_cq *mcq; in create_qp_common() local 897 mcq = to_mcq(init_attr->send_cq); in create_qp_common() 898 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_qp_common() 899 mcq = to_mcq(init_attr->recv_cq); in create_qp_common() 900 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_qp_common() 964 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_lock_cqs() 979 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_unlock_cqs() 1847 context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); in __mlx4_ib_modify_qp() 1848 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); in __mlx4_ib_modify_qp()
|
/freebsd/sys/dev/mlx4/mlx4_en/ |
H A D | mlx4_en_cq.c | 122 cq->mcq.set_ci_db = cq->wqres.db.db; in mlx4_en_activate_cq() 123 cq->mcq.arm_db = cq->wqres.db.db + 1; in mlx4_en_activate_cq() 124 *cq->mcq.set_ci_db = 0; in mlx4_en_activate_cq() 125 *cq->mcq.arm_db = 0; in mlx4_en_activate_cq() 158 &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, in mlx4_en_activate_cq() 163 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; in mlx4_en_activate_cq() 164 cq->mcq.event = mlx4_en_cq_event; in mlx4_en_activate_cq() 210 mlx4_cq_free(priv->mdev->dev, &cq->mcq); in mlx4_en_deactivate_cq() 216 return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, in mlx4_en_set_cq_moder() 222 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, in mlx4_en_arm_cq()
|
H A D | mlx4_en_rx.c | 478 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; in mlx4_en_activate_rx_rings() 746 struct mlx4_cq *mcq = &cq->mcq; in mlx4_en_process_rx_cq() local 751 u32 cons_index = mcq->cons_index; in mlx4_en_process_rx_cq() 856 mcq->cons_index = cons_index; in mlx4_en_process_rx_cq() 857 mlx4_cq_set_ci(mcq); in mlx4_en_process_rx_cq() 859 ring->cons = mcq->cons_index; in mlx4_en_process_rx_cq() 880 void mlx4_en_rx_irq(struct mlx4_cq *mcq) in mlx4_en_rx_irq() argument 882 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); in mlx4_en_rx_irq()
|
H A D | mlx4_en_tx.c | 351 struct mlx4_cq *mcq = &cq->mcq; in mlx4_en_process_tx_cq() local 358 u32 cons_index = mcq->cons_index; in mlx4_en_process_tx_cq() 415 mcq->cons_index = cons_index; in mlx4_en_process_tx_cq() 416 mlx4_cq_set_ci(mcq); in mlx4_en_process_tx_cq() 423 void mlx4_en_tx_irq(struct mlx4_cq *mcq) in mlx4_en_tx_irq() argument 425 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); in mlx4_en_tx_irq()
|
H A D | en.h | 391 struct mlx4_cq mcq; member 808 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 855 void mlx4_en_rx_irq(struct mlx4_cq *mcq);
|
/freebsd/sys/dev/mlx5/mlx5_en/ |
H A D | mlx5_en_txrx.c | 50 mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event) in mlx5e_cq_error_event() argument 52 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); in mlx5e_cq_error_event() 55 mcq->cqn, event); in mlx5e_cq_error_event() 74 cq->mcq.cqn, ci, qn, err_cqe->op_own >> 4, in mlx5e_dump_err_cqe()
|
H A D | mlx5_en_iq.c | 93 mlx5e_iq_completion(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused) in mlx5e_iq_completion() argument 95 struct mlx5e_iq *iq = container_of(mcq, struct mlx5e_iq, cq.mcq); in mlx5e_iq_completion() 260 MLX5_SET(sqc, sqc, cqn, iq->cq.mcq.cqn); in mlx5e_iq_enable() 392 iq->cq.mcq.comp(&iq->cq.mcq, NULL); in mlx5e_iq_drain() 407 iq->cq.mcq.comp(&iq->cq.mcq, NULL); in mlx5e_iq_drain()
|
H A D | mlx5_en_main.c | 1436 MLX5_SET(rqc, param->rqc, cqn, c->rq.cq.mcq.cqn); in mlx5e_open_rq() 1495 mlx5e_open_drop_rq_comp(struct mlx5_core_cq *mcq __unused, struct mlx5_eqe *eqe __unused) in mlx5e_open_drop_rq_comp() 1536 MLX5_SET(rqc, param_rq.rqc, cqn, drop_rq->cq.mcq.cqn); in mlx5e_open_drop_rq() 1760 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); in mlx5e_enable_sq() 1945 sq->cq.mcq.comp(&sq->cq.mcq, NULL); in mlx5e_drain_sq() 1964 sq->cq.mcq.comp(&sq->cq.mcq, NULL); in mlx5e_drain_sq() 1987 struct mlx5_core_cq *mcq = &cq->mcq; in mlx5e_create_cq() local 2002 mcq->cqe_sz = 64; in mlx5e_create_cq() 2003 mcq->set_ci_db = cq->wq_ctrl.db.db; in mlx5e_create_cq() 2004 mcq->arm_db = cq->wq_ctrl.db.db + 1; in mlx5e_create_cq() [all …]
|
H A D | mlx5_en_rx.c | 667 mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused) in mlx5e_rx_cq_comp() argument 669 struct mlx5e_channel *c = container_of(mcq, struct mlx5e_channel, rq.cq.mcq); in mlx5e_rx_cq_comp() 670 struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq); in mlx5e_rx_cq_comp()
|
H A D | en.h | 742 struct mlx5_core_cq mcq; member 1208 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event); 1264 struct mlx5_core_cq *mcq; in mlx5e_cq_arm() local 1266 mcq = &cq->mcq; in mlx5e_cq_arm() 1267 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, dblock, cq->wq.cc); in mlx5e_cq_arm()
|
H A D | mlx5_en_dim.c | 91 mlx5_core_modify_cq_moderation(c->priv->mdev, &rq->cq.mcq, in mlx5e_dim_work()
|
H A D | mlx5_en_rl.c | 270 sq->cq.mcq.comp(&sq->cq.mcq, NULL); in mlx5e_rl_open_channel() 1335 mlx5_core_modify_cq_moderation_mode(rl->priv->mdev, &sq->cq.mcq, in mlx5e_rl_refresh_channel_params() 1340 mlx5_core_modify_cq_moderation(rl->priv->mdev, &sq->cq.mcq, in mlx5e_rl_refresh_channel_params()
|
H A D | mlx5_en_tx.c | 1172 mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused) in mlx5e_tx_cq_comp() 1174 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq); in mlx5e_tx_cq_comp() 1169 mlx5e_tx_cq_comp(struct mlx5_core_cq * mcq,struct mlx5_eqe * eqe __unused) mlx5e_tx_cq_comp() argument
|
/freebsd/sys/dev/mlx5/mlx5_fpga/ |
H A D | mlx5fpga_conn.c | 359 mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT, in mlx5_fpga_conn_arm_cq() 363 static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq, in mlx5_fpga_conn_cq_event() argument 368 conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); in mlx5_fpga_conn_cq_event() 369 mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn); in mlx5_fpga_conn_cq_event() 415 static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq) in mlx5_fpga_conn_cq_complete() argument 419 conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); in mlx5_fpga_conn_cq_complete() 477 err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen); in mlx5_fpga_conn_create_cq() 483 conn->cq.mcq.cqe_sz = 64; in mlx5_fpga_conn_create_cq() 484 conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db; in mlx5_fpga_conn_create_cq() 485 conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1; in mlx5_fpga_conn_create_cq() [all …]
|
H A D | conn.h | 57 struct mlx5_core_cq mcq; member
|
/freebsd/contrib/ofed/libmlx4/ |
H A D | srq.c | 310 struct mlx4_cq *mcq; in mlx4_destroy_xrc_srq() local 313 mcq = to_mcq(msrq->verbs_srq.cq); in mlx4_destroy_xrc_srq() 314 mlx4_cq_clean(mcq, 0, msrq); in mlx4_destroy_xrc_srq() 315 pthread_spin_lock(&mcq->lock); in mlx4_destroy_xrc_srq() 317 pthread_spin_unlock(&mcq->lock); in mlx4_destroy_xrc_srq() 321 pthread_spin_lock(&mcq->lock); in mlx4_destroy_xrc_srq() 323 pthread_spin_unlock(&mcq->lock); in mlx4_destroy_xrc_srq()
|
/freebsd/contrib/ofed/libmlx5/ |
H A D | mlx5.c | 679 struct mlx5_cq *mcq = to_mcq(cq_in); in mlx5dv_get_cq() local 683 cq_out->cqn = mcq->cqn; in mlx5dv_get_cq() 684 cq_out->cqe_cnt = mcq->ibv_cq.cqe + 1; in mlx5dv_get_cq() 685 cq_out->cqe_size = mcq->cqe_sz; in mlx5dv_get_cq() 686 cq_out->buf = mcq->active_buf->buf; in mlx5dv_get_cq() 687 cq_out->dbrec = mcq->dbrec; in mlx5dv_get_cq() 690 mcq->flags |= MLX5_CQ_FLAGS_DV_OWNED; in mlx5dv_get_cq()
|