Lines Matching refs:mcq
43 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, int type)
45 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
52 type, mcq->cqn);
71 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
96 return get_sw_cqe(cq, cq->mcq.cons_index);
538 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
540 ++cq->mcq.cons_index;
597 "Requestor" : "Responder", cq->mcq.cqn);
633 cq->mcq.cqn, mr->sig->err_item.key,
658 cq->mcq.cqn);
693 mlx5_cq_set_ci(&cq->mcq);
719 mlx5_cq_arm(&cq->mcq,
724 cq->mcq.cons_index);
865 cq->mcq.set_ci_db = cq->db.db;
866 cq->mcq.arm_db = cq->db.db + 1;
867 cq->mcq.cqe_sz = cqe_size;
984 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
988 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
989 cq->mcq.irqn = irqn;
990 cq->mcq.comp = mlx5_ib_cq_comp;
991 cq->mcq.event = mlx5_ib_cq_event;
996 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1006 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1020 struct mlx5_ib_cq *mcq = to_mcq(cq);
1022 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
1024 destroy_cq_user(mcq, udata);
1026 destroy_cq_kernel(dev, mcq);
1051 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1052 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1058 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1060 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1067 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1069 memcpy(dest, cqe, cq->mcq.cqe_sz);
1076 cq->mcq.cons_index += nfreed;
1081 mlx5_cq_set_ci(&cq->mcq);
1098 struct mlx5_ib_cq *mcq = to_mcq(cq);
1104 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1107 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1203 i = cq->mcq.cons_index;
1231 cq->mcq.cqn);
1235 ++cq->mcq.cons_index;
1319 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1321 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);