Home
last modified time | relevance | path

Searched full:qp (Results 1 – 25 of 503) sorted by relevance

12345678910>>...21

/linux/drivers/infiniband/sw/rxe/
H A Drxe_qp.c92 rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num); in rxe_qp_chk_init()
103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument
105 qp->resp.res_head = 0; in alloc_rd_atomic_resources()
106 qp->resp.res_tail = 0; in alloc_rd_atomic_resources()
107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources()
109 if (!qp->resp.resources) in alloc_rd_atomic_resources()
115 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument
117 if (qp->resp.resources) { in free_rd_atomic_resources()
120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources()
121 struct resp_res *res = &qp->resp.resources[i]; in free_rd_atomic_resources()
[all …]
H A Drxe_resp.c50 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_resp_queue_pkt() argument
52 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt()
53 rxe_sched_task(&qp->recv_task); in rxe_resp_queue_pkt()
56 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument
61 skb = skb_peek(&qp->req_pkts); in get_req()
67 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req()
70 static enum resp_states check_psn(struct rxe_qp *qp, in check_psn() argument
73 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn()
74 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_psn()
76 switch (qp_type(qp)) { in check_psn()
[all …]
H A Drxe_req.c14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
17 static inline void retry_first_write_send(struct rxe_qp *qp, in retry_first_write_send() argument
23 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send()
24 qp->mtu : wqe->dma.resid; in retry_first_write_send()
26 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send()
38 static void req_retry(struct rxe_qp *qp) in req_retry() argument
45 struct rxe_queue *q = qp->sq.queue; in req_retry()
52 qp->req.wqe_index = cons; in req_retry()
53 qp->req.psn = qp->comp.psn; in req_retry()
54 qp->req.opcode = -1; in req_retry()
[all …]
H A Drxe_comp.c117 struct rxe_qp *qp = from_timer(qp, t, retrans_timer); in retransmit_timer() local
120 rxe_dbg_qp(qp, "retransmit timer fired\n"); in retransmit_timer()
122 spin_lock_irqsave(&qp->state_lock, flags); in retransmit_timer()
123 if (qp->valid) { in retransmit_timer()
124 qp->comp.timeout = 1; in retransmit_timer()
125 rxe_sched_task(&qp->send_task); in retransmit_timer()
127 spin_unlock_irqrestore(&qp->state_lock, flags); in retransmit_timer()
130 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument
133 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt()
134 rxe_sched_task(&qp->send_task); in rxe_comp_queue_pkt()
[all …]
/linux/drivers/infiniband/hw/qib/
H A Dqib_rc.c53 * @dev: the device for this QP
54 * @qp: a pointer to the QP
59 * Note that we are in the responder's side of the QP context.
60 * Note the QP s_lock must be held.
62 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, in qib_make_rc_ack() argument
72 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in qib_make_rc_ack()
78 switch (qp->s_ack_state) { in qib_make_rc_ack()
81 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack()
93 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) in qib_make_rc_ack()
94 qp->s_tail_ack_queue = 0; in qib_make_rc_ack()
[all …]
H A Dqib_uc.c42 * @qp: a pointer to the QP
49 int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) in qib_make_uc_req() argument
51 struct qib_qp_priv *priv = qp->priv; in qib_make_uc_req()
57 u32 pmtu = qp->pmtu; in qib_make_uc_req()
60 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in qib_make_uc_req()
61 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in qib_make_uc_req()
64 if (qp->s_last == READ_ONCE(qp->s_head)) in qib_make_uc_req()
68 qp->s_flags |= RVT_S_WAIT_DMA; in qib_make_uc_req()
71 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req()
72 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req()
[all …]
H A Dqib_ruc.c42 * The QP s_lock should be held and interrupts disabled.
44 void qib_migrate_qp(struct rvt_qp *qp) in qib_migrate_qp() argument
48 qp->s_mig_state = IB_MIG_MIGRATED; in qib_migrate_qp()
49 qp->remote_ah_attr = qp->alt_ah_attr; in qib_migrate_qp()
50 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); in qib_migrate_qp()
51 qp->s_pkey_index = qp->s_alt_pkey_index; in qib_migrate_qp()
53 ev.device = qp->ibqp.device; in qib_migrate_qp()
54 ev.element.qp = &qp->ibqp; in qib_migrate_qp()
56 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in qib_migrate_qp()
78 * This should be called with the QP r_lock held.
[all …]
H A Dqib_ud.c43 * @sqp: the sending QP
57 struct rvt_qp *qp; in qib_ud_loopback() local
67 qp = rvt_lookup_qpn(rdi, &ibp->rvp, rvt_get_swqe_remote_qpn(swqe)); in qib_ud_loopback()
68 if (!qp) { in qib_ud_loopback()
75 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback()
76 IB_QPT_UD : qp->ibqp.qp_type; in qib_ud_loopback()
79 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in qib_ud_loopback()
87 if (qp->ibqp.qp_num > 1) { in qib_ud_loopback()
93 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); in qib_ud_loopback()
99 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback()
[all …]
/linux/drivers/infiniband/hw/hfi1/
H A Dqp.c16 #include "qp.h"
22 MODULE_PARM_DESC(qp_table_size, "QP table size");
24 static void flush_tx_list(struct rvt_qp *qp);
33 static void qp_pio_drain(struct rvt_qp *qp);
122 static void flush_tx_list(struct rvt_qp *qp) in flush_tx_list() argument
124 struct hfi1_qp_priv *priv = qp->priv; in flush_tx_list()
130 static void flush_iowait(struct rvt_qp *qp) in flush_iowait() argument
132 struct hfi1_qp_priv *priv = qp->priv; in flush_iowait()
142 rvt_put_qp(qp); in flush_iowait()
160 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_check_modify_qp() argument
[all …]
H A Drc.c11 #include "qp.h"
16 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, in find_prev_entry() argument
18 __must_hold(&qp->s_lock) in find_prev_entry()
24 for (i = qp->r_head_ack_queue; ; i = p) { in find_prev_entry()
25 if (i == qp->s_tail_ack_queue) in find_prev_entry()
30 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); in find_prev_entry()
31 if (p == qp->r_head_ack_queue) { in find_prev_entry()
35 e = &qp->s_ack_queue[p]; in find_prev_entry()
41 if (p == qp->s_tail_ack_queue && in find_prev_entry()
58 * @dev: the device for this QP
[all …]
H A Duc.c8 #include "qp.h"
15 * @qp: a pointer to the QP
22 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) in hfi1_make_uc_req() argument
24 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_uc_req()
30 u32 pmtu = qp->pmtu; in hfi1_make_uc_req()
33 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_uc_req()
37 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_uc_req()
38 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_uc_req()
41 if (qp->s_last == READ_ONCE(qp->s_head)) in hfi1_make_uc_req()
45 qp->s_flags |= RVT_S_WAIT_DMA; in hfi1_make_uc_req()
[all …]
H A Druc.c10 #include "qp.h"
23 * This should be called with the QP r_lock held.
31 struct rvt_qp *qp = packet->qp; in hfi1_ruc_check_hdr() local
32 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; in hfi1_ruc_check_hdr()
39 if (qp->s_mig_state == IB_MIG_ARMED && migrated) { in hfi1_ruc_check_hdr()
41 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) & in hfi1_ruc_check_hdr()
48 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) & in hfi1_ruc_check_hdr()
51 grh = rdma_ah_read_grh(&qp->alt_ah_attr); in hfi1_ruc_check_hdr()
64 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num, in hfi1_ruc_check_hdr()
69 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) || in hfi1_ruc_check_hdr()
[all …]
H A Dtid_rdma.c8 #include "qp.h"
58 /* Maximum number of segments in flight per QP request. */
114 static void hfi1_init_trdma_req(struct rvt_qp *qp,
116 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
118 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
119 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
120 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
121 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
123 static int make_tid_rdma_ack(struct rvt_qp *qp,
126 static void hfi1_do_tid_send(struct rvt_qp *qp);
[all …]
H A Dqp.h23 * HFI1_S_WAIT_PIO_DRAIN - qp waiting for PIOs to drain
24 * HFI1_S_WAIT_TID_SPACE - a QP is waiting for TID resource
49 static inline int hfi1_send_ok(struct rvt_qp *qp) in hfi1_send_ok() argument
51 struct hfi1_qp_priv *priv = qp->priv; in hfi1_send_ok()
53 return !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)) && in hfi1_send_ok()
55 (qp->s_flags & RVT_S_RESP_PENDING) || in hfi1_send_ok()
56 !(qp->s_flags & RVT_S_ANY_WAIT_SEND)); in hfi1_send_ok()
60 * free_ahg - clear ahg from QP
62 static inline void clear_ahg(struct rvt_qp *qp) in clear_ahg() argument
64 struct hfi1_qp_priv *priv = qp->priv; in clear_ahg()
[all …]
/linux/drivers/infiniband/sw/rdmavt/
H A Dqp.c15 #include "qp.h"
22 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
283 * init_qpn_table - initialize the QP number table for a device
340 * free_qpn_table - free the QP number table for a device
352 * rvt_driver_qp_init - Init driver qp resources
366 * If driver is not doing any QP allocation then make sure it is in rvt_driver_qp_init()
367 * providing the necessary QP functions. in rvt_driver_qp_init()
416 * rvt_free_qp_cb - callback function to reset a qp
417 * @qp: the qp to reset
420 * This function resets the qp and removes it from the
[all …]
/linux/drivers/ntb/
H A Dntb_transport.c120 struct ntb_transport_qp *qp; member
142 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument
279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
468 struct ntb_transport_qp *qp; in debugfs_read() local
472 qp = filp->private_data; in debugfs_read()
474 if (!qp || !qp->link_is_up) in debugfs_read()
485 "\nNTB QP stats:\n\n"); in debugfs_read()
[all …]
/linux/drivers/net/ethernet/qlogic/qed/
H A Dqed_roce.c74 /* when destroying a_RoCE QP the control is returned to the user after in qed_roce_stop()
76 * We delay for a short while if an async destroy QP is still expected. in qed_roce_stop()
96 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, in qed_rdma_copy_gids() argument
101 if (qp->roce_mode == ROCE_V2_IPV4) { in qed_rdma_copy_gids()
107 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); in qed_rdma_copy_gids()
108 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); in qed_rdma_copy_gids()
111 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { in qed_rdma_copy_gids()
112 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); in qed_rdma_copy_gids()
113 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); in qed_rdma_copy_gids()
164 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); in qed_roce_alloc_cid()
[all …]
/linux/drivers/infiniband/sw/siw/
H A Dsiw_qp.c96 struct siw_qp *qp; in siw_qp_llp_data_ready() local
105 qp = sk_to_qp(sk); in siw_qp_llp_data_ready()
107 if (likely(!qp->rx_stream.rx_suspend && in siw_qp_llp_data_ready()
108 down_read_trylock(&qp->state_lock))) { in siw_qp_llp_data_ready()
109 read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 }; in siw_qp_llp_data_ready()
111 if (likely(qp->attrs.state == SIW_QP_STATE_RTS)) in siw_qp_llp_data_ready()
120 up_read(&qp->state_lock); in siw_qp_llp_data_ready()
122 siw_dbg_qp(qp, "unable to process RX, suspend: %d\n", in siw_qp_llp_data_ready()
123 qp->rx_stream.rx_suspend); in siw_qp_llp_data_ready()
129 void siw_qp_llp_close(struct siw_qp *qp) in siw_qp_llp_close() argument
[all …]
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c196 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) in is_sqp() argument
198 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
199 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
202 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument
204 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
205 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
208 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument
210 if (qp->is_direct) in get_recv_wqe()
211 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe()
213 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe()
[all …]
/linux/drivers/infiniband/hw/irdma/
H A Duk.c56 * @qp: hw qp ptr
58 static int irdma_nop_1(struct irdma_qp_uk *qp) in irdma_nop_1() argument
65 if (!qp->sq_ring.head) in irdma_nop_1()
68 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); in irdma_nop_1()
69 wqe = qp->sq_base[wqe_idx].elem; in irdma_nop_1()
71 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA; in irdma_nop_1()
79 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); in irdma_nop_1()
91 * @qp: hw qp ptr
94 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) in irdma_clr_wqes() argument
100 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; in irdma_clr_wqes()
[all …]
/linux/drivers/infiniband/hw/erdma/
H A Derdma_qp.c12 void erdma_qp_llp_close(struct erdma_qp *qp) in erdma_qp_llp_close() argument
16 down_write(&qp->state_lock); in erdma_qp_llp_close()
18 switch (qp->attrs.state) { in erdma_qp_llp_close()
24 erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE); in erdma_qp_llp_close()
27 qp->attrs.state = ERDMA_QP_STATE_IDLE; in erdma_qp_llp_close()
33 if (qp->cep) { in erdma_qp_llp_close()
34 erdma_cep_put(qp->cep); in erdma_qp_llp_close()
35 qp->cep = NULL; in erdma_qp_llp_close()
38 up_write(&qp->state_lock); in erdma_qp_llp_close()
43 struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id); in erdma_get_ibqp() local
[all …]
/linux/drivers/infiniband/hw/mlx4/
H A Dqp.c47 #include <linux/mlx4/qp.h>
107 struct mlx4_qp *qp; member
113 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
118 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
119 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
123 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
130 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
131 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
137 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || in is_sqp()
138 qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { in is_sqp()
[all …]
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_qp.c56 struct pvrdma_qp *qp);
58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument
61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs()
62 *recv_cq = to_vcq(qp->ibqp.recv_cq); in get_cqs()
101 static void pvrdma_reset_qp(struct pvrdma_qp *qp) in pvrdma_reset_qp() argument
107 get_cqs(qp, &scq, &rcq); in pvrdma_reset_qp()
110 _pvrdma_flush_cqe(qp, scq); in pvrdma_reset_qp()
112 _pvrdma_flush_cqe(qp, rcq); in pvrdma_reset_qp()
120 if (qp->rq.ring) { in pvrdma_reset_qp()
121 atomic_set(&qp->rq.ring->cons_head, 0); in pvrdma_reset_qp()
[all …]
/linux/drivers/infiniband/hw/bnxt_re/
H A Dqplib_fp.c62 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
64 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp) in bnxt_qplib_cancel_phantom_processing() argument
66 qp->sq.condition = false; in bnxt_qplib_cancel_phantom_processing()
67 qp->sq.send_phantom = false; in bnxt_qplib_cancel_phantom_processing()
68 qp->sq.single = false; in bnxt_qplib_cancel_phantom_processing()
72 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) in __bnxt_qplib_add_flush_qp() argument
76 scq = qp->scq; in __bnxt_qplib_add_flush_qp()
77 rcq = qp->rcq; in __bnxt_qplib_add_flush_qp()
79 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp()
81 "FP: Adding to SQ Flush list = %p\n", qp); in __bnxt_qplib_add_flush_qp()
[all …]
/linux/include/rdma/
H A Drdmavt_qp.h31 * If a packet's QP[23:16] bits match this value, then it is
60 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
61 * RVT_S_BUSY - send tasklet is processing the QP
124 /* Number of bits to pay attention to in the opcode for checking qp type */
127 /* Flags for checking QP state (see ib_rvt_state_ops[]) */
164 * The size of the sg_list is determined when the QP is created and stored
165 * in qp->s_max_sge.
314 * @qpt_support - a bit mask indicating QP type support
331 * which only happens in modify_qp() or changing the QP 'state'.
348 u32 qkey; /* QKEY for this QP (fo
510 struct rvt_qp *qp; global() member
531 rvt_get_swqe_ptr(struct rvt_qp * qp,unsigned n) rvt_get_swqe_ptr() argument
556 rvt_is_user_qp(struct rvt_qp * qp) rvt_is_user_qp() argument
565 rvt_get_qp(struct rvt_qp * qp) rvt_get_qp() argument
574 rvt_put_qp(struct rvt_qp * qp) rvt_put_qp() argument
606 rvt_qp_wqe_reserve(struct rvt_qp * qp,struct rvt_swqe * wqe) rvt_qp_wqe_reserve() argument
627 rvt_qp_wqe_unreserve(struct rvt_qp * qp,int flags) rvt_qp_wqe_unreserve() argument
660 rvt_div_round_up_mtu(struct rvt_qp * qp,u32 len) rvt_div_round_up_mtu() argument
671 rvt_div_mtu(struct rvt_qp * qp,u32 len) rvt_div_mtu() argument
702 struct rvt_qp *qp = NULL; rvt_lookup_qpn() local
723 rvt_mod_retry_timer_ext(struct rvt_qp * qp,u8 shift) rvt_mod_retry_timer_ext() argument
735 rvt_mod_retry_timer(struct rvt_qp * qp) rvt_mod_retry_timer() argument
747 rvt_put_qp_swqe(struct rvt_qp * qp,struct rvt_swqe * wqe) rvt_put_qp_swqe() argument
762 rvt_qp_swqe_incr(struct rvt_qp * qp,u32 val) rvt_qp_swqe_incr() argument
782 rvt_recv_cq(struct rvt_qp * qp,struct ib_wc * wc,bool solicited) rvt_recv_cq() argument
802 rvt_send_cq(struct rvt_qp * qp,struct ib_wc * wc,bool solicited) rvt_send_cq() argument
828 rvt_qp_complete_swqe(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_opcode opcode,enum ib_wc_status status) rvt_qp_complete_swqe() argument
879 rvt_add_retry_timer(struct rvt_qp * qp) rvt_add_retry_timer() argument
900 struct rvt_qp *qp; global() member
966 rvt_to_iport(struct rvt_qp * qp) rvt_to_iport() argument
981 rvt_rc_credit_avail(struct rvt_qp * qp,struct rvt_swqe * wqe) rvt_rc_credit_avail() argument
[all...]

12345678910>>...21