Lines Matching +full:wr +full:- +full:hold
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright(c) 2016 - 2020 Intel Corporation.
13 #include <rdma/rvt-abi.h>
55 #define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
60 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
61 * RVT_S_BUSY - send tasklet is processing the QP
62 * RVT_S_TIMER - the RC retry timer is active
63 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
64 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
66 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
68 * RVT_S_WAIT_RNR - waiting for RNR timeout
69 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
70 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
72 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
73 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
74 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
75 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
76 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
77 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
78 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
79 * RVT_S_ECN - a BECN was queued to the send engine
80 * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
147 * rvt_ud_wr - IB UD work plus AH cache
148 * @wr: valid IB work request
151 * Special case the UD WR so we can keep track of the AH attributes.
153 * NOTE: This data structure is stricly ordered wr then attr. I.e the attr
154 * MUST come after wr. The ib_ud_wr is sized and copied in rvt_post_one_wr.
155 * The copy assumes that wr is first.
158 struct ib_ud_wr wr;
165 * in qp->s_max_sge.
169 struct ib_send_wr wr; /* don't use wr.sg_list */
184 * struct rvt_krwq - kernel struct receive work request
209 * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
215 return ibah_to_rvtah(swqe->ud_wr.wr.ah);
219 * rvt_get_swqe_ah_attr - Return the cached ah attribute information
225 return swqe->ud_wr.attr;
229 * rvt_get_swqe_remote_qpn - Access the remote QPN value
235 return swqe->ud_wr.wr.remote_qpn;
239 * rvt_get_swqe_remote_qkey - Acces the remote qkey value
245 return swqe->ud_wr.wr.remote_qkey;
249 * rvt_get_swqe_pkey_index - Access the pkey index
255 return swqe->ud_wr.wr.pkey_index;
268 * rvt_get_rq_count - count numbers of request work queue entries
274 * Return - total number of entries in the Receive Queue
279 u32 count = head - tail;
282 count += rq->size;
312 * rvt_operation_params - op table entry
313 * @length - the length to copy into the swqe entry
314 * @qpt_support - a bit mask indicating QP type support
315 * @flags - RVT_OPERATION flags (see above)
368 u8 s_max_sge; /* size of s_wq->sg_list */
421 u32 s_acked; /* last un-ACK'ed entry */
431 u8 s_nak_state; /* non-zero if NAK is pending */
432 u8 r_nak_state; /* non-zero if NAK is pending */
473 #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
477 * QPN-map pages start out as NULL, they get allocated upon
535 return (struct rvt_swqe *)((char *)qp->s_wq +
537 qp->s_max_sge *
548 ((char *)rq->kwq->curr_wq +
550 rq->max_sge * sizeof(struct ib_sge)) * n);
554 * rvt_is_user_qp - return if this is user mode QP
555 * @qp - the target QP
559 return !!qp->pid;
563 * rvt_get_qp - get a QP reference
564 * @qp - the QP to hold
568 atomic_inc(&qp->refcount);
572 * rvt_put_qp - release a QP reference
573 * @qp - the QP to release
577 if (qp && atomic_dec_and_test(&qp->refcount))
578 wake_up(&qp->wait);
582 * rvt_put_swqe - drop mr refs held by swqe
583 * @wqe - the send wqe
591 for (i = 0; i < wqe->wr.num_sge; i++) {
592 struct rvt_sge *sge = &wqe->sg_list[i];
594 rvt_put_mr(sge->mr);
599 * rvt_qp_wqe_reserve - reserve operation
600 * @qp - the rvt qp
601 * @wqe - the send wqe
610 atomic_inc(&qp->s_reserved_used);
614 * rvt_qp_wqe_unreserve - clean reserved operation
615 * @qp - the rvt qp
616 * @flags - send wqe flags
631 atomic_dec(&qp->s_reserved_used);
632 /* insure no compiler re-order up to s_last change */
645 return (((int)a) - ((int)b)) << 8;
655 * rvt_div_round_up_mtu - round up divide
656 * @qp - the qp pair
657 * @len - the length
663 return (len + qp->pmtu - 1) >> qp->log_pmtu;
667 * @qp - the qp pair
668 * @len - the length
674 return len >> qp->log_pmtu;
678 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
679 * @timeout - timeout input(0 - 31).
692 * rvt_lookup_qpn - return the QP with the given QPN
696 * The caller must hold the rcu_read_lock(), and keep the lock until
706 qp = rcu_dereference(rvp->qp[qpn]);
708 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
710 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
711 qp = rcu_dereference(qp->next))
712 if (qp->ibqp.qp_num == qpn)
719 * rvt_mod_retry_timer - mod a retry timer
720 * @qp - the QP
721 * @shift - timeout shift to wait for multiple packets
726 struct ib_qp *ibqp = &qp->ibqp;
727 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
729 lockdep_assert_held(&qp->s_lock);
730 qp->s_flags |= RVT_S_TIMER;
731 /* 4.096 usec. * (1 << qp->timeout) */
732 mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
733 (qp->timeout_jiffies << shift));
742 * rvt_put_qp_swqe - drop refs held by swqe
751 if (qp->allowed_ops == IB_OPCODE_UD)
752 rdma_destroy_ah_attr(wqe->ud_wr.attr);
756 * rvt_qp_sqwe_incr - increment ring index
765 if (++val >= qp->s_size)
773 * rvt_recv_cq - add a new entry to completion queue
786 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
793 * rvt_send_cq - add a new entry to completion queue
806 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
813 * rvt_qp_complete_swqe - insert send completion
814 * @qp - the qp
815 * @wqe - the send wqe
816 * @opcode - wc operation (driver dependent)
817 * @status - completion status
837 int flags = wqe->wr.send_flags;
844 (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
848 wr_id = wqe->wr.wr_id;
849 byte_len = wqe->length;
852 last = rvt_qp_swqe_incr(qp, qp->s_last);
854 smp_store_release(&qp->s_last, last);
860 .qp = &qp->ibqp,
893 * struct rvt_qp_iter - the iterator for QPs
894 * @qp - the current QP
915 * ib_cq_tail - Return tail index of cq buffer
916 * @send_cq - The cq for send
925 return ibcq_to_rvtcq(send_cq)->ip ?
926 RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
927 ibcq_to_rvtcq(send_cq)->kqueue->tail;
931 * ib_cq_head - Return head index of cq buffer
932 * @send_cq - The cq for send
941 return ibcq_to_rvtcq(send_cq)->ip ?
942 RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
943 ibcq_to_rvtcq(send_cq)->kqueue->head;
947 * rvt_free_rq - free memory allocated for rvt_rq struct
955 kvfree(rq->kwq);
956 rq->kwq = NULL;
957 vfree(rq->wq);
958 rq->wq = NULL;
962 * rvt_to_iport - Get the ibport pointer
969 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
971 return rdi->ports[qp->port_num - 1];
975 * rvt_rc_credit_avail - Check if there are enough RC credits for the request
984 lockdep_assert_held(&qp->s_lock);
985 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
986 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
989 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
990 rvp->n_rc_crwaits++;