Home
last modified time | relevance | path

Searched refs:qp (Results 1 – 25 of 259) sorted by relevance

1234567891011

/linux/drivers/infiniband/sw/rxe/
H A Drxe_qp.c151 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument
153 qp->resp.res_head = 0; in alloc_rd_atomic_resources()
154 qp->resp.res_tail = 0; in alloc_rd_atomic_resources()
155 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources()
157 if (!qp->resp.resources) in alloc_rd_atomic_resources()
163 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument
165 if (qp->resp.resources) { in free_rd_atomic_resources()
168 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources()
169 struct resp_res *res = &qp->resp.resources[i]; in free_rd_atomic_resources()
173 kfree(qp->resp.resources); in free_rd_atomic_resources()
[all …]
H A Drxe_comp.c117 struct rxe_qp *qp = timer_container_of(qp, t, retrans_timer); in retransmit_timer() local
120 rxe_dbg_qp(qp, "retransmit timer fired\n"); in retransmit_timer()
122 spin_lock_irqsave(&qp->state_lock, flags); in retransmit_timer()
123 if (qp->valid) { in retransmit_timer()
124 qp->comp.timeout = 1; in retransmit_timer()
125 rxe_sched_task(&qp->send_task); in retransmit_timer()
127 spin_unlock_irqrestore(&qp->state_lock, flags); in retransmit_timer()
130 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument
133 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt()
134 rxe_sched_task(&qp->send_task); in rxe_comp_queue_pkt()
[all …]
H A Drxe_resp.c50 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_resp_queue_pkt() argument
52 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt()
53 rxe_sched_task(&qp->recv_task); in rxe_resp_queue_pkt()
56 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument
61 skb = skb_peek(&qp->req_pkts); in get_req()
67 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req()
70 static enum resp_states check_psn(struct rxe_qp *qp, in check_psn() argument
73 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn()
74 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_psn()
76 switch (qp_type(qp)) { in check_psn()
[all …]
H A Drxe_req.c13 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
16 static inline void retry_first_write_send(struct rxe_qp *qp, in retry_first_write_send() argument
22 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send()
23 qp->mtu : wqe->dma.resid; in retry_first_write_send()
25 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send()
37 static void req_retry(struct rxe_qp *qp) in req_retry() argument
44 struct rxe_queue *q = qp->sq.queue; in req_retry()
51 qp->req.wqe_index = cons; in req_retry()
52 qp->req.psn = qp->comp.psn; in req_retry()
53 qp->req.opcode = -1; in req_retry()
[all …]
H A Drxe_recv.c14 struct rxe_qp *qp) in check_type_state() argument
19 if (unlikely(!qp->valid)) in check_type_state()
24 switch (qp_type(qp)) { in check_type_state()
42 spin_lock_irqsave(&qp->state_lock, flags); in check_type_state()
44 if (unlikely(qp_state(qp) < IB_QPS_RTR)) { in check_type_state()
45 spin_unlock_irqrestore(&qp->state_lock, flags); in check_type_state()
49 if (unlikely(qp_state(qp) < IB_QPS_RTS)) { in check_type_state()
50 spin_unlock_irqrestore(&qp->state_lock, flags); in check_type_state()
54 spin_unlock_irqrestore(&qp->state_lock, flags); in check_type_state()
76 u32 qpn, struct rxe_qp *qp) in check_keys() argument
[all …]
/linux/drivers/infiniband/hw/hfi1/
H A Drc.c16 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, in find_prev_entry() argument
18 __must_hold(&qp->s_lock) in find_prev_entry()
24 for (i = qp->r_head_ack_queue; ; i = p) { in find_prev_entry()
25 if (i == qp->s_tail_ack_queue) in find_prev_entry()
30 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); in find_prev_entry()
31 if (p == qp->r_head_ack_queue) { in find_prev_entry()
35 e = &qp->s_ack_queue[p]; in find_prev_entry()
41 if (p == qp->s_tail_ack_queue && in find_prev_entry()
67 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, in make_rc_ack() argument
75 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); in make_rc_ack()
[all …]
H A Dqp.c24 static void flush_tx_list(struct rvt_qp *qp);
33 static void qp_pio_drain(struct rvt_qp *qp);
122 static void flush_tx_list(struct rvt_qp *qp) in flush_tx_list() argument
124 struct hfi1_qp_priv *priv = qp->priv; in flush_tx_list()
130 static void flush_iowait(struct rvt_qp *qp) in flush_iowait() argument
132 struct hfi1_qp_priv *priv = qp->priv; in flush_iowait()
142 rvt_put_qp(qp); in flush_iowait()
160 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_check_modify_qp() argument
163 struct ib_qp *ibqp = &qp->ibqp; in hfi1_check_modify_qp()
173 if (!qp_to_sdma_engine(qp, sc) && in hfi1_check_modify_qp()
[all …]
H A Duc.c22 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) in hfi1_make_uc_req() argument
24 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_uc_req()
30 u32 pmtu = qp->pmtu; in hfi1_make_uc_req()
33 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_uc_req()
37 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_uc_req()
38 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_uc_req()
41 if (qp->s_last == READ_ONCE(qp->s_head)) in hfi1_make_uc_req()
45 qp->s_flags |= RVT_S_WAIT_DMA; in hfi1_make_uc_req()
48 clear_ahg(qp); in hfi1_make_uc_req()
49 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req()
[all …]
H A Dtid_rdma.c114 static void hfi1_init_trdma_req(struct rvt_qp *qp,
116 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
118 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
119 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
120 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
121 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
123 static int make_tid_rdma_ack(struct rvt_qp *qp,
126 static void hfi1_do_tid_send(struct rvt_qp *qp);
130 struct rvt_qp *qp, u32 psn, int diff, bool fecn);
143 static void tid_rdma_schedule_ack(struct rvt_qp *qp) in tid_rdma_schedule_ack() argument
[all …]
H A Druc.c31 struct rvt_qp *qp = packet->qp; in hfi1_ruc_check_hdr() local
32 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; in hfi1_ruc_check_hdr()
39 if (qp->s_mig_state == IB_MIG_ARMED && migrated) { in hfi1_ruc_check_hdr()
41 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) & in hfi1_ruc_check_hdr()
48 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) & in hfi1_ruc_check_hdr()
51 grh = rdma_ah_read_grh(&qp->alt_ah_attr); in hfi1_ruc_check_hdr()
64 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num, in hfi1_ruc_check_hdr()
69 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) || in hfi1_ruc_check_hdr()
71 rdma_ah_get_port_num(&qp->alt_ah_attr)) in hfi1_ruc_check_hdr()
73 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_ruc_check_hdr()
[all …]
H A Dqp.h49 static inline int hfi1_send_ok(struct rvt_qp *qp) in hfi1_send_ok() argument
51 struct hfi1_qp_priv *priv = qp->priv; in hfi1_send_ok()
53 return !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)) && in hfi1_send_ok()
55 (qp->s_flags & RVT_S_RESP_PENDING) || in hfi1_send_ok()
56 !(qp->s_flags & RVT_S_ANY_WAIT_SEND)); in hfi1_send_ok()
62 static inline void clear_ahg(struct rvt_qp *qp) in clear_ahg() argument
64 struct hfi1_qp_priv *priv = qp->priv; in clear_ahg()
67 qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR); in clear_ahg()
68 if (priv->s_sde && qp->s_ahgidx >= 0) in clear_ahg()
69 sdma_ahg_free(priv->s_sde, qp->s_ahgidx); in clear_ahg()
[all …]
H A Dud.c36 struct rvt_qp *qp; in ud_loopback() local
47 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, in ud_loopback()
49 if (!qp) { in ud_loopback()
57 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback()
58 IB_QPT_UD : qp->ibqp.qp_type; in ud_loopback()
61 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in ud_loopback()
69 if (qp->ibqp.qp_num > 1) { in ud_loopback()
78 qp->s_pkey_index, in ud_loopback()
82 sqp->ibqp.qp_num, qp->ibqp.qp_num, in ud_loopback()
93 if (qp->ibqp.qp_num) { in ud_loopback()
[all …]
H A Dopfn.c17 bool (*request)(struct rvt_qp *qp, u64 *data);
18 bool (*response)(struct rvt_qp *qp, u64 *data);
19 bool (*reply)(struct rvt_qp *qp, u64 data);
20 void (*error)(struct rvt_qp *qp);
34 static void opfn_schedule_conn_request(struct rvt_qp *qp);
41 static void opfn_conn_request(struct rvt_qp *qp) in opfn_conn_request() argument
43 struct hfi1_qp_priv *priv = qp->priv; in opfn_conn_request()
51 trace_hfi1_opfn_state_conn_request(qp); in opfn_conn_request()
70 if (!extd || !extd->request || !extd->request(qp, &data)) { in opfn_conn_request()
80 trace_hfi1_opfn_data_conn_request(qp, capcode, data); in opfn_conn_request()
[all …]
/linux/drivers/ntb/
H A Dntb_transport.c121 struct ntb_transport_qp *qp; member
149 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
161 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
276 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument
283 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
472 struct ntb_transport_qp *qp; in debugfs_read() local
476 qp = filp->private_data; in debugfs_read()
478 if (!qp || !qp->link_is_up) in debugfs_read()
491 "rx_bytes - \t%llu\n", qp->rx_bytes); in debugfs_read()
493 "rx_pkts - \t%llu\n", qp->rx_pkts); in debugfs_read()
[all …]
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c196 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) in is_sqp() argument
198 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
199 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
202 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument
204 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
205 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
208 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument
210 if (qp->is_direct) in get_recv_wqe()
211 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe()
213 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe()
[all …]
/linux/drivers/net/ethernet/qlogic/qed/
H A Dqed_roce.c96 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, in qed_rdma_copy_gids() argument
101 if (qp->roce_mode == ROCE_V2_IPV4) { in qed_rdma_copy_gids()
107 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); in qed_rdma_copy_gids()
108 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); in qed_rdma_copy_gids()
111 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { in qed_rdma_copy_gids()
112 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); in qed_rdma_copy_gids()
113 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); in qed_rdma_copy_gids()
206 static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) in qed_roce_get_qp_tc() argument
210 if (qp->vlan_id) { in qed_roce_get_qp_tc()
211 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; in qed_roce_get_qp_tc()
[all …]
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_qp.c56 struct pvrdma_qp *qp);
58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument
61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs()
62 *recv_cq = to_vcq(qp->ibqp.recv_cq); in get_cqs()
101 static void pvrdma_reset_qp(struct pvrdma_qp *qp) in pvrdma_reset_qp() argument
107 get_cqs(qp, &scq, &rcq); in pvrdma_reset_qp()
110 _pvrdma_flush_cqe(qp, scq); in pvrdma_reset_qp()
112 _pvrdma_flush_cqe(qp, rcq); in pvrdma_reset_qp()
120 if (qp->rq.ring) { in pvrdma_reset_qp()
121 atomic_set(&qp->rq.ring->cons_head, 0); in pvrdma_reset_qp()
[all …]
/linux/drivers/infiniband/hw/bnxt_re/
H A Dqplib_fp.c62 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
64 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp) in bnxt_qplib_cancel_phantom_processing() argument
66 qp->sq.condition = false; in bnxt_qplib_cancel_phantom_processing()
67 qp->sq.send_phantom = false; in bnxt_qplib_cancel_phantom_processing()
68 qp->sq.single = false; in bnxt_qplib_cancel_phantom_processing()
72 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) in __bnxt_qplib_add_flush_qp() argument
76 scq = qp->scq; in __bnxt_qplib_add_flush_qp()
77 rcq = qp->rcq; in __bnxt_qplib_add_flush_qp()
79 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp()
81 "FP: Adding to SQ Flush list = %p\n", qp); in __bnxt_qplib_add_flush_qp()
[all …]
/linux/drivers/interconnect/qcom/
H A Dicc-rpm.c56 struct qcom_icc_provider *qp = to_qcom_provider(provider); in qcom_icc_set_qnoc_qos() local
61 rc = regmap_update_bits(qp->regmap, in qcom_icc_set_qnoc_qos()
62 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port), in qcom_icc_set_qnoc_qos()
68 return regmap_update_bits(qp->regmap, in qcom_icc_set_qnoc_qos()
69 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port), in qcom_icc_set_qnoc_qos()
74 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp, in qcom_icc_bimc_set_qos_health() argument
93 return regmap_update_bits(qp->regmap, in qcom_icc_bimc_set_qos_health()
94 qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port), in qcom_icc_bimc_set_qos_health()
100 struct qcom_icc_provider *qp; in qcom_icc_set_bimc_qos() local
109 qp = to_qcom_provider(provider); in qcom_icc_set_bimc_qos()
[all …]
H A Dicc-rpmh.c21 #define QOSGEN_MAINCTL_LO(p, qp) (0x8 + (p->port_offsets[qp])) argument
31 static void qcom_icc_set_qos(struct qcom_icc_provider *qp, in qcom_icc_set_qos() argument
38 regmap_update_bits(qp->regmap, QOSGEN_MAINCTL_LO(qos, port), in qcom_icc_set_qos()
42 regmap_update_bits(qp->regmap, QOSGEN_MAINCTL_LO(qos, port), in qcom_icc_set_qos()
46 regmap_update_bits(qp->regmap, QOSGEN_MAINCTL_LO(qos, port), in qcom_icc_set_qos()
60 struct qcom_icc_provider *qp; in qcom_icc_pre_aggregate() local
63 qp = to_qcom_provider(node->provider); in qcom_icc_pre_aggregate()
71 qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); in qcom_icc_pre_aggregate()
123 struct qcom_icc_provider *qp; in qcom_icc_set() local
131 qp = to_qcom_provider(node->provider); in qcom_icc_set()
[all …]
/linux/include/rdma/
H A Drdmavt_qp.h511 struct rvt_qp *qp; member
532 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp, in rvt_get_swqe_ptr() argument
535 return (struct rvt_swqe *)((char *)qp->s_wq + in rvt_get_swqe_ptr()
537 qp->s_max_sge * in rvt_get_swqe_ptr()
557 static inline bool rvt_is_user_qp(struct rvt_qp *qp) in rvt_is_user_qp() argument
559 return !!qp->pid; in rvt_is_user_qp()
566 static inline void rvt_get_qp(struct rvt_qp *qp) in rvt_get_qp() argument
568 atomic_inc(&qp->refcount); in rvt_get_qp()
575 static inline void rvt_put_qp(struct rvt_qp *qp) in rvt_put_qp() argument
577 if (qp && atomic_dec_and_test(&qp->refcount)) in rvt_put_qp()
[all …]
/linux/drivers/infiniband/hw/irdma/
H A Duk.c58 static int irdma_nop_1(struct irdma_qp_uk *qp) in irdma_nop_1() argument
65 if (!qp->sq_ring.head) in irdma_nop_1()
68 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); in irdma_nop_1()
69 wqe = qp->sq_base[wqe_idx].elem; in irdma_nop_1()
71 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA; in irdma_nop_1()
79 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); in irdma_nop_1()
94 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) in irdma_clr_wqes() argument
100 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; in irdma_clr_wqes()
101 sq = qp->sq_base + wqe_idx; in irdma_clr_wqes()
103 memset(sq, qp->swqe_polarity ? 0 : 0xFF, in irdma_clr_wqes()
[all …]
/linux/drivers/infiniband/hw/qedr/
H A Dqedr_roce_cm.c58 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, in qedr_store_gsi_qp_cq() argument
64 dev->gsi_qp = qp; in qedr_store_gsi_qp_cq()
76 struct qedr_qp *qp = dev->gsi_qp; in qedr_ll2_complete_tx_packet() local
81 dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons, in qedr_ll2_complete_tx_packet()
88 spin_lock_irqsave(&qp->q_lock, flags); in qedr_ll2_complete_tx_packet()
89 qedr_inc_sw_gsi_cons(&qp->sq); in qedr_ll2_complete_tx_packet()
90 spin_unlock_irqrestore(&qp->q_lock, flags); in qedr_ll2_complete_tx_packet()
101 struct qedr_qp *qp = dev->gsi_qp; in qedr_ll2_complete_rx_packet() local
104 spin_lock_irqsave(&qp->q_lock, flags); in qedr_ll2_complete_rx_packet()
106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? in qedr_ll2_complete_rx_packet()
[all …]
/linux/drivers/infiniband/sw/rdmavt/
H A Drc.c52 __be32 rvt_compute_aeth(struct rvt_qp *qp) in rvt_compute_aeth() argument
54 u32 aeth = qp->r_msn & IB_MSN_MASK; in rvt_compute_aeth()
56 if (qp->ibqp.srq) { in rvt_compute_aeth()
68 credits = READ_ONCE(qp->r_rq.kwq->count); in rvt_compute_aeth()
71 if (qp->ip) { in rvt_compute_aeth()
72 head = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->head); in rvt_compute_aeth()
73 tail = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->tail); in rvt_compute_aeth()
75 head = READ_ONCE(qp->r_rq.kwq->head); in rvt_compute_aeth()
76 tail = READ_ONCE(qp->r_rq.kwq->tail); in rvt_compute_aeth()
78 if (head >= qp->r_rq.size) in rvt_compute_aeth()
[all …]
/linux/drivers/scsi/bnx2i/
H A Dbnx2i_hwi.c153 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; in bnx2i_arm_cq_event_coalescing()
170 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; in bnx2i_arm_cq_event_coalescing()
171 if (cq_index > ep->qp.cqe_size * 2) in bnx2i_arm_cq_event_coalescing()
172 cq_index -= ep->qp.cqe_size * 2; in bnx2i_arm_cq_event_coalescing()
195 if (!bnx2i_conn->ep->qp.rqe_left) in bnx2i_get_rq_buf()
198 bnx2i_conn->ep->qp.rqe_left--; in bnx2i_get_rq_buf()
199 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); in bnx2i_get_rq_buf()
200 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { in bnx2i_get_rq_buf()
201 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; in bnx2i_get_rq_buf()
202 bnx2i_conn->ep->qp.rq_cons_idx = 0; in bnx2i_get_rq_buf()
[all …]

1234567891011