Lines Matching +full:comp +full:- +full:int

1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
35 [COMPST_COMP_WQE] = "COMP WQE",
36 [COMPST_COMP_ACK] = "COMP ACK",
42 [COMPST_UPDATE_COMP] = "UPDATE COMP",
122 spin_lock_irqsave(&qp->state_lock, flags); in retransmit_timer()
123 if (qp->valid) { in retransmit_timer()
124 qp->comp.timeout = 1; in retransmit_timer()
125 rxe_sched_task(&qp->send_task); in retransmit_timer()
127 spin_unlock_irqrestore(&qp->state_lock, flags); in retransmit_timer()
132 rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED); in rxe_comp_queue_pkt()
133 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt()
134 rxe_sched_task(&qp->send_task); in rxe_comp_queue_pkt()
146 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); in get_wqe()
150 if (!wqe || wqe->state == wqe_state_posted) in get_wqe()
154 if (wqe->state == wqe_state_done) in get_wqe()
158 if (wqe->state == wqe_state_error) in get_wqe()
167 qp->comp.retry_cnt = qp->attr.retry_cnt; in reset_retry_counters()
168 qp->comp.rnr_retry = qp->attr.rnr_retry; in reset_retry_counters()
169 qp->comp.started_retry = 0; in reset_retry_counters()
181 diff = psn_compare(pkt->psn, wqe->last_psn); in check_psn()
183 if (wqe->state == wqe_state_pending) { in check_psn()
184 if (wqe->mask & WR_ATOMIC_OR_READ_MASK) in check_psn()
195 diff = psn_compare(pkt->psn, qp->comp.psn); in check_psn()
200 if (pkt->psn == wqe->last_psn) in check_psn()
202 else if (pkt->opcode == IB_OPCODE_RC_ACKNOWLEDGE && in check_psn()
203 (qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST || in check_psn()
204 qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE)) in check_psn()
208 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) { in check_psn()
219 unsigned int mask = pkt->mask; in check_ack()
221 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_ack()
224 switch (qp->comp.opcode) { in check_ack()
225 case -1: in check_ack()
235 if (pkt->opcode == IB_OPCODE_RC_ACKNOWLEDGE) in check_ack()
238 if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE && in check_ack()
239 pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) { in check_ack()
243 if ((pkt->psn == wqe->first_psn && in check_ack()
244 pkt->opcode == in check_ack()
246 (wqe->first_psn == wqe->last_psn && in check_ack()
247 pkt->opcode == in check_ack()
259 switch (pkt->opcode) { in check_ack()
268 if (wqe->wr.opcode == IB_WR_ATOMIC_WRITE) in check_ack()
275 if (wqe->wr.opcode != IB_WR_RDMA_READ && in check_ack()
276 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV && in check_ack()
277 wqe->wr.opcode != IB_WR_FLUSH) { in check_ack()
278 wqe->status = IB_WC_FATAL_ERR; in check_ack()
290 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP && in check_ack()
291 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD) in check_ack()
313 if (psn_compare(pkt->psn, qp->comp.psn) > 0) { in check_ack()
316 qp->comp.psn = pkt->psn; in check_ack()
317 if (qp->req.wait_psn) { in check_ack()
318 qp->req.wait_psn = 0; in check_ack()
319 qp->req.again = 1; in check_ack()
325 wqe->status = IB_WC_REM_INV_REQ_ERR; in check_ack()
329 wqe->status = IB_WC_REM_ACCESS_ERR; in check_ack()
333 wqe->status = IB_WC_REM_OP_ERR; in check_ack()
338 wqe->status = IB_WC_REM_OP_ERR; in check_ack()
358 int ret; in do_read()
360 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, in do_read()
361 &wqe->dma, payload_addr(pkt), in do_read()
364 wqe->status = IB_WC_LOC_PROT_ERR; in do_read()
368 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK)) in do_read()
378 int ret; in do_atomic()
382 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, in do_atomic()
383 &wqe->dma, &atomic_orig, in do_atomic()
386 wqe->status = IB_WC_LOC_PROT_ERR; in do_atomic()
396 struct ib_wc *wc = &cqe->ibwc; in make_send_cqe()
397 struct ib_uverbs_wc *uwc = &cqe->uibwc; in make_send_cqe()
401 if (!qp->is_user) { in make_send_cqe()
402 wc->wr_id = wqe->wr.wr_id; in make_send_cqe()
403 wc->status = wqe->status; in make_send_cqe()
404 wc->qp = &qp->ibqp; in make_send_cqe()
406 uwc->wr_id = wqe->wr.wr_id; in make_send_cqe()
407 uwc->status = wqe->status; in make_send_cqe()
408 uwc->qp_num = qp->ibqp.qp_num; in make_send_cqe()
411 if (wqe->status == IB_WC_SUCCESS) { in make_send_cqe()
412 if (!qp->is_user) { in make_send_cqe()
413 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode); in make_send_cqe()
414 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || in make_send_cqe()
415 wqe->wr.opcode == IB_WR_SEND_WITH_IMM) in make_send_cqe()
416 wc->wc_flags = IB_WC_WITH_IMM; in make_send_cqe()
417 wc->byte_len = wqe->dma.length; in make_send_cqe()
419 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode); in make_send_cqe()
420 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || in make_send_cqe()
421 wqe->wr.opcode == IB_WR_SEND_WITH_IMM) in make_send_cqe()
422 uwc->wc_flags = IB_WC_WITH_IMM; in make_send_cqe()
423 uwc->byte_len = wqe->dma.length; in make_send_cqe()
426 if (wqe->status != IB_WC_WR_FLUSH_ERR) in make_send_cqe()
427 rxe_err_qp(qp, "non-flush error status = %d\n", in make_send_cqe()
428 wqe->status); in make_send_cqe()
434 * ---------8<---------8<-------------
438 * ---------8<---------8<-------------
442 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete()
447 post = ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) || in do_complete()
448 (wqe->wr.send_flags & IB_SEND_SIGNALED) || in do_complete()
449 wqe->status != IB_WC_SUCCESS); in do_complete()
454 queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); in do_complete()
457 rxe_cq_post(qp->scq, &cqe, 0); in do_complete()
459 if (wqe->wr.opcode == IB_WR_SEND || in do_complete()
460 wqe->wr.opcode == IB_WR_SEND_WITH_IMM || in do_complete()
461 wqe->wr.opcode == IB_WR_SEND_WITH_INV) in do_complete()
468 if (qp->req.wait_fence) { in do_complete()
469 qp->req.wait_fence = 0; in do_complete()
470 qp->req.again = 1; in do_complete()
478 spin_lock_irqsave(&qp->state_lock, flags); in comp_check_sq_drain_done()
480 if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) { in comp_check_sq_drain_done()
481 qp->attr.sq_draining = 0; in comp_check_sq_drain_done()
482 spin_unlock_irqrestore(&qp->state_lock, flags); in comp_check_sq_drain_done()
484 if (qp->ibqp.event_handler) { in comp_check_sq_drain_done()
487 ev.device = qp->ibqp.device; in comp_check_sq_drain_done()
488 ev.element.qp = &qp->ibqp; in comp_check_sq_drain_done()
490 qp->ibqp.event_handler(&ev, in comp_check_sq_drain_done()
491 qp->ibqp.qp_context); in comp_check_sq_drain_done()
496 spin_unlock_irqrestore(&qp->state_lock, flags); in comp_check_sq_drain_done()
503 if (wqe->has_rd_atomic) { in complete_ack()
504 wqe->has_rd_atomic = 0; in complete_ack()
505 atomic_inc(&qp->req.rd_atomic); in complete_ack()
506 if (qp->req.need_rd_atomic) { in complete_ack()
507 qp->comp.timeout_retry = 0; in complete_ack()
508 qp->req.need_rd_atomic = 0; in complete_ack()
509 qp->req.again = 1; in complete_ack()
517 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) in complete_ack()
527 if (pkt && wqe->state == wqe_state_pending) { in complete_wqe()
528 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) { in complete_wqe()
529 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK; in complete_wqe()
530 qp->comp.opcode = -1; in complete_wqe()
533 if (qp->req.wait_psn) { in complete_wqe()
534 qp->req.wait_psn = 0; in complete_wqe()
535 qp->req.again = 1; in complete_wqe()
549 while ((skb = skb_dequeue(&qp->resp_pkts))) { in drain_resp_pkts()
552 ib_device_put(qp->ibqp.device); in drain_resp_pkts()
557 static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in flush_send_wqe()
562 int err; in flush_send_wqe()
564 if (qp->is_user) { in flush_send_wqe()
565 uwc->wr_id = wqe->wr.wr_id; in flush_send_wqe()
566 uwc->status = IB_WC_WR_FLUSH_ERR; in flush_send_wqe()
567 uwc->qp_num = qp->ibqp.qp_num; in flush_send_wqe()
569 wc->wr_id = wqe->wr.wr_id; in flush_send_wqe()
570 wc->status = IB_WC_WR_FLUSH_ERR; in flush_send_wqe()
571 wc->qp = &qp->ibqp; in flush_send_wqe()
574 err = rxe_cq_post(qp->scq, &cqe, 0); in flush_send_wqe()
576 rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err); in flush_send_wqe()
588 struct rxe_queue *q = qp->sq.queue; in flush_send_queue()
589 int err; in flush_send_queue()
592 if (!qp->sq.queue) in flush_send_queue()
595 while ((wqe = queue_head(q, q->type))) { in flush_send_queue()
601 queue_advance_consumer(q, q->type); in flush_send_queue()
608 struct rxe_qp *qp = pkt->qp; in free_pkt()
609 struct ib_device *dev = qp->ibqp.device; in free_pkt()
617 * - QP is type RC
618 * - there is a packet sent by the requester that
621 * - the timeout parameter is set
622 * - the QP is alive
628 if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) { in reset_retry_timer()
629 spin_lock_irqsave(&qp->state_lock, flags); in reset_retry_timer()
631 psn_compare(qp->req.psn, qp->comp.psn) > 0) in reset_retry_timer()
632 mod_timer(&qp->retrans_timer, in reset_retry_timer()
633 jiffies + qp->qp_timeout_jiffies); in reset_retry_timer()
634 spin_unlock_irqrestore(&qp->state_lock, flags); in reset_retry_timer()
638 int rxe_completer(struct rxe_qp *qp) in rxe_completer()
640 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_completer()
645 int ret; in rxe_completer()
648 qp->req.again = 0; in rxe_completer()
650 spin_lock_irqsave(&qp->state_lock, flags); in rxe_completer()
651 if (!qp->valid || qp_state(qp) == IB_QPS_ERR || in rxe_completer()
653 bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); in rxe_completer()
657 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_completer()
660 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_completer()
662 if (qp->comp.timeout) { in rxe_completer()
663 qp->comp.timeout_retry = 1; in rxe_completer()
664 qp->comp.timeout = 0; in rxe_completer()
666 qp->comp.timeout_retry = 0; in rxe_completer()
669 if (qp->req.need_retry) in rxe_completer()
678 skb = skb_dequeue(&qp->resp_pkts); in rxe_completer()
681 qp->comp.timeout_retry = 0; in rxe_completer()
707 if (wqe->state == wqe_state_pending && in rxe_completer()
708 wqe->last_psn == pkt->psn) in rxe_completer()
723 if (pkt->mask & RXE_END_MASK) in rxe_completer()
724 qp->comp.opcode = -1; in rxe_completer()
726 qp->comp.opcode = pkt->opcode; in rxe_completer()
728 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) in rxe_completer()
729 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in rxe_completer()
731 if (qp->req.wait_psn) { in rxe_completer()
732 qp->req.wait_psn = 0; in rxe_completer()
733 qp->req.again = 1; in rxe_completer()
743 if (qp->comp.timeout_retry && wqe) { in rxe_completer()
761 if (!wqe || (wqe->state == wqe_state_posted)) in rxe_completer()
767 if (qp->comp.started_retry && in rxe_completer()
768 !qp->comp.timeout_retry) in rxe_completer()
771 if (qp->comp.retry_cnt > 0) { in rxe_completer()
772 if (qp->comp.retry_cnt != 7) in rxe_completer()
773 qp->comp.retry_cnt--; in rxe_completer()
779 if (psn_compare(qp->req.psn, in rxe_completer()
780 qp->comp.psn) > 0) { in rxe_completer()
786 qp->req.need_retry = 1; in rxe_completer()
787 qp->comp.started_retry = 1; in rxe_completer()
788 qp->req.again = 1; in rxe_completer()
794 wqe->status = IB_WC_RETRY_EXC_ERR; in rxe_completer()
801 if (qp->comp.rnr_retry > 0) { in rxe_completer()
802 if (qp->comp.rnr_retry != 7) in rxe_completer()
803 qp->comp.rnr_retry--; in rxe_completer()
808 qp->req.wait_for_rnr_timer = 1; in rxe_completer()
811 mod_timer(&qp->rnr_nak_timer, in rxe_completer()
818 wqe->status = IB_WC_RNR_RETRY_EXC_ERR; in rxe_completer()
824 WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS); in rxe_completer()
831 /* A non-zero return value will cause rxe_do_task to in rxe_completer()
839 ret = (qp->req.again) ? 0 : -EAGAIN; in rxe_completer()
841 qp->req.again = 0; in rxe_completer()