| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_qp.c | 92 rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num); in rxe_qp_chk_init() 103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument 105 qp->resp.res_head = 0; in alloc_rd_atomic_resources() 106 qp->resp.res_tail = 0; in alloc_rd_atomic_resources() 107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources() 109 if (!qp->resp.resources) in alloc_rd_atomic_resources() 115 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument 117 if (qp->resp.resources) { in free_rd_atomic_resources() 120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources() 121 struct resp_res *res = &qp->resp.resources[i]; in free_rd_atomic_resources() [all …]
|
| H A D | rxe_resp.c | 50 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_resp_queue_pkt() argument 52 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt() 53 rxe_sched_task(&qp->recv_task); in rxe_resp_queue_pkt() 56 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument 61 skb = skb_peek(&qp->req_pkts); in get_req() 67 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req() 70 static enum resp_states check_psn(struct rxe_qp *qp, in check_psn() argument 73 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn() 74 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_psn() 76 switch (qp_type(qp)) { in check_psn() [all …]
|
| H A D | rxe_req.c | 13 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 16 static inline void retry_first_write_send(struct rxe_qp *qp, in retry_first_write_send() argument 22 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send() 23 qp->mtu : wqe->dma.resid; in retry_first_write_send() 25 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send() 37 static void req_retry(struct rxe_qp *qp) in req_retry() argument 44 struct rxe_queue *q = qp->sq.queue; in req_retry() 51 qp->req.wqe_index = cons; in req_retry() 52 qp->req.psn = qp->comp.psn; in req_retry() 53 qp->req.opcode = -1; in req_retry() [all …]
|
| H A D | rxe_comp.c | 117 struct rxe_qp *qp = timer_container_of(qp, t, retrans_timer); in retransmit_timer() local 120 rxe_dbg_qp(qp, "retransmit timer fired\n"); in retransmit_timer() 122 spin_lock_irqsave(&qp->state_lock, flags); in retransmit_timer() 123 if (qp->valid) { in retransmit_timer() 124 qp->comp.timeout = 1; in retransmit_timer() 125 rxe_sched_task(&qp->send_task); in retransmit_timer() 127 spin_unlock_irqrestore(&qp->state_lock, flags); in retransmit_timer() 130 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument 133 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt() 134 rxe_sched_task(&qp->send_task); in rxe_comp_queue_pkt() [all …]
|
| H A D | rxe_recv.c | 12 /* check that QP matches packet opcode type and is in a valid state */ 14 struct rxe_qp *qp) in check_type_state() argument 19 if (unlikely(!qp->valid)) in check_type_state() 24 switch (qp_type(qp)) { in check_type_state() 42 spin_lock_irqsave(&qp->state_lock, flags); in check_type_state() 44 if (unlikely(qp_state(qp) < IB_QPS_RTR)) { in check_type_state() 45 spin_unlock_irqrestore(&qp->state_lock, flags); in check_type_state() 49 if (unlikely(qp_state(qp) < IB_QPS_RTS)) { in check_type_state() 50 spin_unlock_irqrestore(&qp->state_lock, flags); in check_type_state() 54 spin_unlock_irqrestore(&qp->state_lock, flags); in check_type_state() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | qp.c | 16 #include "qp.h" 22 MODULE_PARM_DESC(qp_table_size, "QP table size"); 24 static void flush_tx_list(struct rvt_qp *qp); 33 static void qp_pio_drain(struct rvt_qp *qp); 122 static void flush_tx_list(struct rvt_qp *qp) in flush_tx_list() argument 124 struct hfi1_qp_priv *priv = qp->priv; in flush_tx_list() 130 static void flush_iowait(struct rvt_qp *qp) in flush_iowait() argument 132 struct hfi1_qp_priv *priv = qp->priv; in flush_iowait() 142 rvt_put_qp(qp); in flush_iowait() 160 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_check_modify_qp() argument [all …]
|
| H A D | rc.c | 11 #include "qp.h" 16 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, in find_prev_entry() argument 18 __must_hold(&qp->s_lock) in find_prev_entry() 24 for (i = qp->r_head_ack_queue; ; i = p) { in find_prev_entry() 25 if (i == qp->s_tail_ack_queue) in find_prev_entry() 30 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); in find_prev_entry() 31 if (p == qp->r_head_ack_queue) { in find_prev_entry() 35 e = &qp->s_ack_queue[p]; in find_prev_entry() 41 if (p == qp->s_tail_ack_queue && in find_prev_entry() 58 * @dev: the device for this QP [all …]
|
| H A D | uc.c | 8 #include "qp.h" 15 * @qp: a pointer to the QP 22 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) in hfi1_make_uc_req() argument 24 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_uc_req() 30 u32 pmtu = qp->pmtu; in hfi1_make_uc_req() 33 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_uc_req() 37 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_uc_req() 38 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_uc_req() 41 if (qp->s_last == READ_ONCE(qp->s_head)) in hfi1_make_uc_req() 45 qp->s_flags |= RVT_S_WAIT_DMA; in hfi1_make_uc_req() [all …]
|
| H A D | ruc.c | 10 #include "qp.h" 23 * This should be called with the QP r_lock held. 31 struct rvt_qp *qp = packet->qp; in hfi1_ruc_check_hdr() local 32 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; in hfi1_ruc_check_hdr() 39 if (qp->s_mig_state == IB_MIG_ARMED && migrated) { in hfi1_ruc_check_hdr() 41 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) & in hfi1_ruc_check_hdr() 48 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) & in hfi1_ruc_check_hdr() 51 grh = rdma_ah_read_grh(&qp->alt_ah_attr); in hfi1_ruc_check_hdr() 64 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num, in hfi1_ruc_check_hdr() 69 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) || in hfi1_ruc_check_hdr() [all …]
|
| H A D | tid_rdma.c | 8 #include "qp.h" 58 /* Maximum number of segments in flight per QP request. */ 114 static void hfi1_init_trdma_req(struct rvt_qp *qp, 116 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx); 118 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp); 119 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp); 120 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp); 121 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp); 123 static int make_tid_rdma_ack(struct rvt_qp *qp, 126 static void hfi1_do_tid_send(struct rvt_qp *qp); [all …]
|
| H A D | qp.h | 23 * HFI1_S_WAIT_PIO_DRAIN - qp waiting for PIOs to drain 24 * HFI1_S_WAIT_TID_SPACE - a QP is waiting for TID resource 49 static inline int hfi1_send_ok(struct rvt_qp *qp) in hfi1_send_ok() argument 51 struct hfi1_qp_priv *priv = qp->priv; in hfi1_send_ok() 53 return !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)) && in hfi1_send_ok() 55 (qp->s_flags & RVT_S_RESP_PENDING) || in hfi1_send_ok() 56 !(qp->s_flags & RVT_S_ANY_WAIT_SEND)); in hfi1_send_ok() 60 * free_ahg - clear ahg from QP 62 static inline void clear_ahg(struct rvt_qp *qp) in clear_ahg() argument 64 struct hfi1_qp_priv *priv = qp->priv; in clear_ahg() [all …]
|
| H A D | opfn.c | 8 #include "qp.h" 17 bool (*request)(struct rvt_qp *qp, u64 *data); 18 bool (*response)(struct rvt_qp *qp, u64 *data); 19 bool (*reply)(struct rvt_qp *qp, u64 data); 20 void (*error)(struct rvt_qp *qp); 34 static void opfn_schedule_conn_request(struct rvt_qp *qp); 41 static void opfn_conn_request(struct rvt_qp *qp) in opfn_conn_request() argument 43 struct hfi1_qp_priv *priv = qp->priv; in opfn_conn_request() 51 trace_hfi1_opfn_state_conn_request(qp); in opfn_conn_request() 70 if (!extd || !extd->request || !extd->request(qp, &data)) { in opfn_conn_request() [all …]
|
| H A D | ud.c | 13 #include "qp.h" 23 * @sqp: the sending QP 36 struct rvt_qp *qp; in ud_loopback() local 47 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, in ud_loopback() 49 if (!qp) { in ud_loopback() 57 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback() 58 IB_QPT_UD : qp->ibqp.qp_type; in ud_loopback() 61 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in ud_loopback() 69 if (qp->ibqp.qp_num > 1) { in ud_loopback() 78 qp->s_pkey_index, in ud_loopback() [all …]
|
| /linux/drivers/ntb/ |
| H A D | ntb_transport.c | 121 struct ntb_transport_qp *qp; member 143 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 149 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 161 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 276 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument 283 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 472 struct ntb_transport_qp *qp; in debugfs_read() local 476 qp = filp->private_data; in debugfs_read() 478 if (!qp || !qp->link_is_up) in debugfs_read() 489 "\nNTB QP stats:\n\n"); in debugfs_read() [all …]
|
| /linux/drivers/net/ethernet/qlogic/qed/ |
| H A D | qed_roce.c | 74 /* when destroying a_RoCE QP the control is returned to the user after in qed_roce_stop() 76 * We delay for a short while if an async destroy QP is still expected. in qed_roce_stop() 96 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, in qed_rdma_copy_gids() argument 101 if (qp->roce_mode == ROCE_V2_IPV4) { in qed_rdma_copy_gids() 107 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); in qed_rdma_copy_gids() 108 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); in qed_rdma_copy_gids() 111 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { in qed_rdma_copy_gids() 112 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); in qed_rdma_copy_gids() 113 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); in qed_rdma_copy_gids() 164 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); in qed_roce_alloc_cid() [all …]
|
| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_qp.c | 196 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) in is_sqp() argument 198 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp() 199 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp() 202 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument 204 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0() 205 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0() 208 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument 210 if (qp->is_direct) in get_recv_wqe() 211 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe() 213 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe() [all …]
|
| /linux/drivers/infiniband/hw/vmw_pvrdma/ |
| H A D | pvrdma_qp.c | 56 struct pvrdma_qp *qp); 58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument 61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs() 62 *recv_cq = to_vcq(qp->ibqp.recv_cq); in get_cqs() 101 static void pvrdma_reset_qp(struct pvrdma_qp *qp) in pvrdma_reset_qp() argument 107 get_cqs(qp, &scq, &rcq); in pvrdma_reset_qp() 110 _pvrdma_flush_cqe(qp, scq); in pvrdma_reset_qp() 112 _pvrdma_flush_cqe(qp, rcq); in pvrdma_reset_qp() 120 if (qp->rq.ring) { in pvrdma_reset_qp() 121 atomic_set(&qp->rq.ring->cons_head, 0); in pvrdma_reset_qp() [all …]
|
| /linux/include/rdma/ |
| H A D | rdmavt_qp.h | 31 * If a packet's QP[23:16] bits match this value, then it is 60 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled 61 * RVT_S_BUSY - send tasklet is processing the QP 124 /* Number of bits to pay attention to in the opcode for checking qp type */ 127 /* Flags for checking QP state (see ib_rvt_state_ops[]) */ 164 * The size of the sg_list is determined when the QP is created and stored 165 * in qp->s_max_sge. 314 * @qpt_support - a bit mask indicating QP type support 331 * which only happens in modify_qp() or changing the QP 'state'. 348 u32 qkey; /* QKEY for this QP (fo 510 struct rvt_qp *qp; global() member 531 rvt_get_swqe_ptr(struct rvt_qp * qp,unsigned n) rvt_get_swqe_ptr() argument 556 rvt_is_user_qp(struct rvt_qp * qp) rvt_is_user_qp() argument 565 rvt_get_qp(struct rvt_qp * qp) rvt_get_qp() argument 574 rvt_put_qp(struct rvt_qp * qp) rvt_put_qp() argument 606 rvt_qp_wqe_reserve(struct rvt_qp * qp,struct rvt_swqe * wqe) rvt_qp_wqe_reserve() argument 627 rvt_qp_wqe_unreserve(struct rvt_qp * qp,int flags) rvt_qp_wqe_unreserve() argument 660 rvt_div_round_up_mtu(struct rvt_qp * qp,u32 len) rvt_div_round_up_mtu() argument 671 rvt_div_mtu(struct rvt_qp * qp,u32 len) rvt_div_mtu() argument 702 struct rvt_qp *qp = NULL; rvt_lookup_qpn() local 723 rvt_mod_retry_timer_ext(struct rvt_qp * qp,u8 shift) rvt_mod_retry_timer_ext() argument 735 rvt_mod_retry_timer(struct rvt_qp * qp) rvt_mod_retry_timer() argument 747 rvt_put_qp_swqe(struct rvt_qp * qp,struct rvt_swqe * wqe) rvt_put_qp_swqe() argument 762 rvt_qp_swqe_incr(struct rvt_qp * qp,u32 val) rvt_qp_swqe_incr() argument 782 rvt_recv_cq(struct rvt_qp * qp,struct ib_wc * wc,bool solicited) rvt_recv_cq() argument 802 rvt_send_cq(struct rvt_qp * qp,struct ib_wc * wc,bool solicited) rvt_send_cq() argument 828 rvt_qp_complete_swqe(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_opcode opcode,enum ib_wc_status status) rvt_qp_complete_swqe() argument 879 rvt_add_retry_timer(struct rvt_qp * qp) rvt_add_retry_timer() argument 900 struct rvt_qp *qp; global() member 966 rvt_to_iport(struct rvt_qp * qp) rvt_to_iport() argument 981 rvt_rc_credit_avail(struct rvt_qp * qp,struct rvt_swqe * wqe) rvt_rc_credit_avail() argument [all...] |
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | qp.c | 44 #include "qp.h" 80 struct mlx5_core_qp *qp; member 146 static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, in mlx5_ib_read_kernel_wqe_sq() argument 155 wqe_index = wqe_index & qp->sq.fbc.sz_m1; in mlx5_ib_read_kernel_wqe_sq() 158 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); in mlx5_ib_read_kernel_wqe_sq() 174 wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1; in mlx5_ib_read_kernel_wqe_sq() 175 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); in mlx5_ib_read_kernel_wqe_sq() 181 static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, in mlx5_ib_read_user_wqe_sq() argument 184 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; in mlx5_ib_read_user_wqe_sq() 186 struct mlx5_ib_wq *wq = &qp->sq; in mlx5_ib_read_user_wqe_sq() [all …]
|
| /linux/drivers/interconnect/qcom/ |
| H A D | icc-rpm.c | 56 struct qcom_icc_provider *qp = to_qcom_provider(provider); in qcom_icc_set_qnoc_qos() local 61 rc = regmap_update_bits(qp->regmap, in qcom_icc_set_qnoc_qos() 62 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port), in qcom_icc_set_qnoc_qos() 68 return regmap_update_bits(qp->regmap, in qcom_icc_set_qnoc_qos() 69 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port), in qcom_icc_set_qnoc_qos() 74 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp, in qcom_icc_bimc_set_qos_health() argument 93 return regmap_update_bits(qp->regmap, in qcom_icc_bimc_set_qos_health() 94 qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port), in qcom_icc_bimc_set_qos_health() 100 struct qcom_icc_provider *qp; in qcom_icc_set_bimc_qos() local 109 qp = to_qcom_provider(provider); in qcom_icc_set_bimc_qos() [all …]
|
| H A D | icc-rpmh.c | 21 #define QOSGEN_MAINCTL_LO(p, qp) (0x8 + (p->port_offsets[qp])) argument 28 * @qp: qcom icc provider to which @node belongs 31 static void qcom_icc_set_qos(struct qcom_icc_provider *qp, in qcom_icc_set_qos() argument 38 regmap_update_bits(qp->regmap, QOSGEN_MAINCTL_LO(qos, port), in qcom_icc_set_qos() 42 regmap_update_bits(qp->regmap, QOSGEN_MAINCTL_LO(qos, port), in qcom_icc_set_qos() 46 regmap_update_bits(qp->regmap, QOSGEN_MAINCTL_LO(qos, port), in qcom_icc_set_qos() 60 struct qcom_icc_provider *qp; in qcom_icc_pre_aggregate() local 63 qp = to_qcom_provider(node->provider); in qcom_icc_pre_aggregate() 71 qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); in qcom_icc_pre_aggregate() 123 struct qcom_icc_provider *qp; in qcom_icc_set() local [all …]
|
| /linux/net/ipv4/ |
| H A D | ip_fragment.c | 78 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, 85 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init() local 91 qp->ecn = 0; in ip4_frag_init() 99 qp->peer = p; in ip4_frag_init() 104 struct ipq *qp; in ip4_frag_free() local 106 qp = container_of(q, struct ipq, q); in ip4_frag_free() 107 if (qp->peer) in ip4_frag_free() 108 inet_putpeer(qp->peer); in ip4_frag_free() 130 struct ipq *qp; in ip_expire() local 133 qp in ip_expire() 218 ip_frag_too_far(struct ipq * qp) ip_frag_too_far() argument 241 ip_frag_reinit(struct ipq * qp) ip_frag_reinit() argument 267 ip_frag_queue(struct ipq * qp,struct sk_buff * skb,int * refs) ip_frag_queue() argument 398 ip_frag_coalesce_ok(const struct ipq * qp) ip_frag_coalesce_ok() argument 404 ip_frag_reasm(struct ipq * qp,struct sk_buff * skb,struct sk_buff * prev_tail,struct net_device * dev,int * refs) ip_frag_reasm() argument 481 struct ipq *qp; ip_defrag() local [all...] |
| /linux/drivers/infiniband/hw/qedr/ |
| H A D | qedr_roce_cm.c | 58 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, in qedr_store_gsi_qp_cq() argument 64 dev->gsi_qp = qp; in qedr_store_gsi_qp_cq() 76 struct qedr_qp *qp = dev->gsi_qp; in qedr_ll2_complete_tx_packet() local 81 dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons, in qedr_ll2_complete_tx_packet() 88 spin_lock_irqsave(&qp->q_lock, flags); in qedr_ll2_complete_tx_packet() 89 qedr_inc_sw_gsi_cons(&qp->sq); in qedr_ll2_complete_tx_packet() 90 spin_unlock_irqrestore(&qp->q_lock, flags); in qedr_ll2_complete_tx_packet() 101 struct qedr_qp *qp = dev->gsi_qp; in qedr_ll2_complete_rx_packet() local 104 spin_lock_irqsave(&qp->q_lock, flags); in qedr_ll2_complete_rx_packet() 106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? in qedr_ll2_complete_rx_packet() [all …]
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_qp_rx.c | 41 pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n", in siw_rx_umem() 65 pr_warn("siw: [QP %u]: %s, len %d, page %p, rv %d\n", in siw_rx_umem() 112 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n", in siw_rx_kva() 191 pr_warn("siw: [QP %u]: rresp stag: %08x != %08x\n", in siw_rresp_check_ntoh() 197 pr_warn("siw: [QP %u]: rresp off: %016llx != %016llx\n", in siw_rresp_check_ntoh() 205 pr_warn("siw: [QP %u]: rresp len: %d != %d\n", in siw_rresp_check_ntoh() 244 pr_warn("siw: [QP %u]: write stag: %08x != %08x\n", in siw_write_check_ntoh() 251 pr_warn("siw: [QP %u]: write off: %016llx != %016llx\n", in siw_write_check_ntoh() 289 pr_warn("siw: [QP %u]: invalid ddp qn %d for send\n", in siw_send_check_ntoh() 295 pr_warn("siw: [QP %u]: send msn: %u != %u\n", in siw_send_check_ntoh() [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | rc.c | 48 * @qp: the queue pair to compute the AETH for 52 __be32 rvt_compute_aeth(struct rvt_qp *qp) in rvt_compute_aeth() argument 54 u32 aeth = qp->r_msn & IB_MSN_MASK; in rvt_compute_aeth() 56 if (qp->ibqp.srq) { in rvt_compute_aeth() 68 credits = READ_ONCE(qp->r_rq.kwq->count); in rvt_compute_aeth() 71 if (qp->ip) { in rvt_compute_aeth() 72 head = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->head); in rvt_compute_aeth() 73 tail = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->tail); in rvt_compute_aeth() 75 head = READ_ONCE(qp->r_rq.kwq->head); in rvt_compute_aeth() 76 tail = READ_ONCE(qp->r_rq.kwq->tail); in rvt_compute_aeth() [all …]
|