Lines Matching defs:qp
12 void erdma_qp_llp_close(struct erdma_qp *qp)
16 down_write(&qp->state_lock);
18 switch (qp->attrs.iwarp.state) {
24 erdma_modify_qp_state_iwarp(qp, ¶ms, ERDMA_QPA_IWARP_STATE);
27 qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
33 if (qp->cep) {
34 erdma_cep_put(qp->cep);
35 qp->cep = NULL;
38 up_write(&qp->state_lock);
43 struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id);
45 if (qp)
46 return &qp->ibqp;
52 erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
57 struct erdma_dev *dev = qp->dev;
60 struct erdma_cep *cep = qp->cep;
70 params->cc = qp->attrs.cc;
80 tp = tcp_sk(qp->cep->sock->sk);
87 FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
107 qp->attrs.irq_size = params->irq_size;
110 qp->attrs.orq_size = params->orq_size;
113 qp->attrs.cc = params->cc;
115 qp->attrs.iwarp.state = ERDMA_QPS_IWARP_RTS;
121 erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
125 struct erdma_dev *dev = qp->dev;
133 FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
140 qp->attrs.iwarp.state = params->state;
145 int erdma_modify_qp_state_iwarp(struct erdma_qp *qp,
158 switch (qp->attrs.iwarp.state) {
162 ret = erdma_modify_qp_state_to_rts(qp, params, mask);
164 qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
166 if (qp->cep) {
167 erdma_cep_put(qp->cep);
168 qp->cep = NULL;
170 ret = erdma_modify_qp_state_to_stop(qp, params, mask);
179 ret = erdma_modify_qp_state_to_stop(qp, params, mask);
185 erdma_qp_cm_drop(qp);
190 qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
194 qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
196 ret = erdma_modify_qp_state_to_stop(qp, params, mask);
197 qp->attrs.iwarp.state = ERDMA_QPS_IWARP_ERROR;
206 if (need_reflush && !ret && rdma_is_kernel_res(&qp->ibqp.res)) {
207 qp->flags |= ERDMA_QP_IN_FLUSHING;
208 mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork,
215 static int modify_qp_cmd_rocev2(struct erdma_qp *qp,
226 req.cfg0 = FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
250 return erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL,
254 static void erdma_reset_qp(struct erdma_qp *qp)
256 qp->kern_qp.sq_pi = 0;
257 qp->kern_qp.sq_ci = 0;
258 qp->kern_qp.rq_pi = 0;
259 qp->kern_qp.rq_ci = 0;
260 memset(qp->kern_qp.swr_tbl, 0, qp->attrs.sq_size * sizeof(u64));
261 memset(qp->kern_qp.rwr_tbl, 0, qp->attrs.rq_size * sizeof(u64));
262 memset(qp->kern_qp.sq_buf, 0, qp->attrs.sq_size << SQEBB_SHIFT);
263 memset(qp->kern_qp.rq_buf, 0, qp->attrs.rq_size << RQE_SHIFT);
264 erdma_remove_cqes_of_qp(&qp->scq->ibcq, QP_ID(qp));
265 if (qp->rcq != qp->scq)
266 erdma_remove_cqes_of_qp(&qp->rcq->ibcq, QP_ID(qp));
269 int erdma_modify_qp_state_rocev2(struct erdma_qp *qp,
273 struct erdma_dev *dev = to_edev(qp->ibqp.device);
276 ret = modify_qp_cmd_rocev2(qp, params, attr_mask);
281 qp->attrs.rocev2.state = params->state;
284 qp->attrs.rocev2.qkey = params->qkey;
287 qp->attrs.rocev2.dst_qpn = params->dst_qpn;
290 memcpy(&qp->attrs.rocev2.av, ¶ms->av,
293 if (rdma_is_kernel_res(&qp->ibqp.res) &&
295 erdma_reset_qp(qp);
297 if (rdma_is_kernel_res(&qp->ibqp.res) &&
299 qp->flags |= ERDMA_QP_IN_FLUSHING;
300 mod_delayed_work(dev->reflush_wq, &qp->reflush_dwork,
309 struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
311 complete(&qp->safe_free);
314 void erdma_qp_put(struct erdma_qp *qp)
316 WARN_ON(kref_read(&qp->ref) < 1);
317 kref_put(&qp->ref, erdma_qp_safe_free);
320 void erdma_qp_get(struct erdma_qp *qp)
322 kref_get(&qp->ref);
325 static int fill_inline_data(struct erdma_qp *qp,
335 data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size,
359 data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
360 qp->attrs.sq_size, SQEBB_SHIFT);
372 static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
379 if (send_wr->num_sge > qp->dev->attrs.max_send_sge)
388 sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
389 qp->attrs.sq_size, SQEBB_SHIFT);
403 static void init_send_sqe_rc(struct erdma_qp *qp, struct erdma_send_sqe_rc *sqe,
419 static void init_send_sqe_ud(struct erdma_qp *qp, struct erdma_send_sqe_ud *sqe,
437 sqe->qkey = cpu_to_le32(qp->attrs.rocev2.qkey);
442 static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
446 u32 idx = *pi & (qp->attrs.sq_size - 1);
462 if (qp->ibqp.qp_type != IB_QPT_RC && send_wr->opcode != IB_WR_SEND &&
466 entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
472 qp->kern_qp.swr_tbl[idx] = send_wr->wr_id;
476 ((flags & IB_SEND_SIGNALED) || qp->kern_qp.sig_all) ? 1 : 0);
483 wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp));
527 sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
528 qp->attrs.sq_size, SQEBB_SHIFT);
539 if (qp->ibqp.qp_type == IB_QPT_RC) {
541 init_send_sqe_rc(qp, rc_send_sqe, send_wr, &hw_op);
546 init_send_sqe_ud(qp, ud_send_sqe, send_wr, &hw_op);
572 memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
573 qp->attrs.sq_size, SQEBB_SHIFT),
608 sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
609 qp->attrs.sq_size, SQEBB_SHIFT);
625 ret = fill_inline_data(qp, send_wr, idx, sgl_offset,
632 ret = fill_sgl(qp, send_wr, idx, sgl_offset, length_field);
651 static void kick_sq_db(struct erdma_qp *qp, u16 pi)
653 u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
656 *(u64 *)qp->kern_qp.sq_dbrec = db_data;
657 writeq(db_data, qp->kern_qp.hw_sq_db);
663 struct erdma_qp *qp = to_eqp(ibqp);
672 spin_lock_irqsave(&qp->lock, flags);
673 sq_pi = qp->kern_qp.sq_pi;
676 if ((u16)(sq_pi - qp->kern_qp.sq_ci) >= qp->attrs.sq_size) {
682 ret = erdma_push_one_sqe(qp, &sq_pi, wr);
687 qp->kern_qp.sq_pi = sq_pi;
688 kick_sq_db(qp, sq_pi);
692 spin_unlock_irqrestore(&qp->lock, flags);
694 if (unlikely(qp->flags & ERDMA_QP_IN_FLUSHING))
695 mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork,
701 static int erdma_post_recv_one(struct erdma_qp *qp,
705 get_queue_entry(qp->kern_qp.rq_buf, qp->kern_qp.rq_pi,
706 qp->attrs.rq_size, RQE_SHIFT);
708 rqe->qe_idx = cpu_to_le16(qp->kern_qp.rq_pi + 1);
709 rqe->qpn = cpu_to_le32(QP_ID(qp));
721 *(u64 *)qp->kern_qp.rq_dbrec = *(u64 *)rqe;
722 writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
724 qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
726 qp->kern_qp.rq_pi++;
735 struct erdma_qp *qp = to_eqp(ibqp);
739 spin_lock_irqsave(&qp->lock, flags);
742 ret = erdma_post_recv_one(qp, wr);
750 spin_unlock_irqrestore(&qp->lock, flags);
752 if (unlikely(qp->flags & ERDMA_QP_IN_FLUSHING))
753 mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork,