xref: /freebsd/contrib/ofed/libmlx4/cq.c (revision 87181516ef48be852d5e5fee53c6e0dbfc62f21e)
1*d6b92ffaSHans Petter Selasky /*
2*d6b92ffaSHans Petter Selasky  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3*d6b92ffaSHans Petter Selasky  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
4*d6b92ffaSHans Petter Selasky  * Copyright (c) 2006, 2007 Cisco Systems.  All rights reserved.
5*d6b92ffaSHans Petter Selasky  *
6*d6b92ffaSHans Petter Selasky  * This software is available to you under a choice of one of two
7*d6b92ffaSHans Petter Selasky  * licenses.  You may choose to be licensed under the terms of the GNU
8*d6b92ffaSHans Petter Selasky  * General Public License (GPL) Version 2, available from the file
9*d6b92ffaSHans Petter Selasky  * COPYING in the main directory of this source tree, or the
10*d6b92ffaSHans Petter Selasky  * OpenIB.org BSD license below:
11*d6b92ffaSHans Petter Selasky  *
12*d6b92ffaSHans Petter Selasky  *     Redistribution and use in source and binary forms, with or
13*d6b92ffaSHans Petter Selasky  *     without modification, are permitted provided that the following
14*d6b92ffaSHans Petter Selasky  *     conditions are met:
15*d6b92ffaSHans Petter Selasky  *
16*d6b92ffaSHans Petter Selasky  *      - Redistributions of source code must retain the above
17*d6b92ffaSHans Petter Selasky  *        copyright notice, this list of conditions and the following
18*d6b92ffaSHans Petter Selasky  *        disclaimer.
19*d6b92ffaSHans Petter Selasky  *
20*d6b92ffaSHans Petter Selasky  *      - Redistributions in binary form must reproduce the above
21*d6b92ffaSHans Petter Selasky  *        copyright notice, this list of conditions and the following
22*d6b92ffaSHans Petter Selasky  *        disclaimer in the documentation and/or other materials
23*d6b92ffaSHans Petter Selasky  *        provided with the distribution.
24*d6b92ffaSHans Petter Selasky  *
25*d6b92ffaSHans Petter Selasky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*d6b92ffaSHans Petter Selasky  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*d6b92ffaSHans Petter Selasky  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*d6b92ffaSHans Petter Selasky  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*d6b92ffaSHans Petter Selasky  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*d6b92ffaSHans Petter Selasky  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*d6b92ffaSHans Petter Selasky  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*d6b92ffaSHans Petter Selasky  * SOFTWARE.
33*d6b92ffaSHans Petter Selasky  */
34*d6b92ffaSHans Petter Selasky 
35*d6b92ffaSHans Petter Selasky #include <config.h>
36*d6b92ffaSHans Petter Selasky 
37*d6b92ffaSHans Petter Selasky #include <stdio.h>
38*d6b92ffaSHans Petter Selasky #include <stdlib.h>
39*d6b92ffaSHans Petter Selasky #include <pthread.h>
40*d6b92ffaSHans Petter Selasky #include <string.h>
41*d6b92ffaSHans Petter Selasky 
42*d6b92ffaSHans Petter Selasky #include <infiniband/opcode.h>
43*d6b92ffaSHans Petter Selasky 
44*d6b92ffaSHans Petter Selasky #include "mlx4.h"
45*d6b92ffaSHans Petter Selasky #include "doorbell.h"
46*d6b92ffaSHans Petter Selasky 
47*d6b92ffaSHans Petter Selasky enum {
48*d6b92ffaSHans Petter Selasky 	MLX4_CQ_DOORBELL			= 0x20
49*d6b92ffaSHans Petter Selasky };
50*d6b92ffaSHans Petter Selasky 
51*d6b92ffaSHans Petter Selasky enum {
52*d6b92ffaSHans Petter Selasky 	CQ_OK					=  0,
53*d6b92ffaSHans Petter Selasky 	CQ_EMPTY				= -1,
54*d6b92ffaSHans Petter Selasky 	CQ_POLL_ERR				= -2
55*d6b92ffaSHans Petter Selasky };
56*d6b92ffaSHans Petter Selasky 
57*d6b92ffaSHans Petter Selasky #define MLX4_CQ_DB_REQ_NOT_SOL			(1 << 24)
58*d6b92ffaSHans Petter Selasky #define MLX4_CQ_DB_REQ_NOT			(2 << 24)
59*d6b92ffaSHans Petter Selasky 
60*d6b92ffaSHans Petter Selasky enum {
61*d6b92ffaSHans Petter Selasky 	MLX4_CQE_VLAN_PRESENT_MASK		= 1 << 29,
62*d6b92ffaSHans Petter Selasky 	MLX4_CQE_QPN_MASK			= 0xffffff,
63*d6b92ffaSHans Petter Selasky };
64*d6b92ffaSHans Petter Selasky 
65*d6b92ffaSHans Petter Selasky enum {
66*d6b92ffaSHans Petter Selasky 	MLX4_CQE_OWNER_MASK			= 0x80,
67*d6b92ffaSHans Petter Selasky 	MLX4_CQE_IS_SEND_MASK			= 0x40,
68*d6b92ffaSHans Petter Selasky 	MLX4_CQE_OPCODE_MASK			= 0x1f
69*d6b92ffaSHans Petter Selasky };
70*d6b92ffaSHans Petter Selasky 
71*d6b92ffaSHans Petter Selasky enum {
72*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR		= 0x01,
73*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR		= 0x02,
74*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_LOCAL_PROT_ERR		= 0x04,
75*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_WR_FLUSH_ERR			= 0x05,
76*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_MW_BIND_ERR			= 0x06,
77*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_BAD_RESP_ERR			= 0x10,
78*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR		= 0x11,
79*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR		= 0x12,
80*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR		= 0x13,
81*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_REMOTE_OP_ERR			= 0x14,
82*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR	= 0x15,
83*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR		= 0x16,
84*d6b92ffaSHans Petter Selasky 	MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR		= 0x22,
85*d6b92ffaSHans Petter Selasky };
86*d6b92ffaSHans Petter Selasky 
87*d6b92ffaSHans Petter Selasky struct mlx4_err_cqe {
88*d6b92ffaSHans Petter Selasky 	uint32_t	vlan_my_qpn;
89*d6b92ffaSHans Petter Selasky 	uint32_t	reserved1[5];
90*d6b92ffaSHans Petter Selasky 	uint16_t	wqe_index;
91*d6b92ffaSHans Petter Selasky 	uint8_t		vendor_err;
92*d6b92ffaSHans Petter Selasky 	uint8_t		syndrome;
93*d6b92ffaSHans Petter Selasky 	uint8_t		reserved2[3];
94*d6b92ffaSHans Petter Selasky 	uint8_t		owner_sr_opcode;
95*d6b92ffaSHans Petter Selasky };
96*d6b92ffaSHans Petter Selasky 
get_cqe(struct mlx4_cq * cq,int entry)97*d6b92ffaSHans Petter Selasky static struct mlx4_cqe *get_cqe(struct mlx4_cq *cq, int entry)
98*d6b92ffaSHans Petter Selasky {
99*d6b92ffaSHans Petter Selasky 	return cq->buf.buf + entry * cq->cqe_size;
100*d6b92ffaSHans Petter Selasky }
101*d6b92ffaSHans Petter Selasky 
get_sw_cqe(struct mlx4_cq * cq,int n)102*d6b92ffaSHans Petter Selasky static void *get_sw_cqe(struct mlx4_cq *cq, int n)
103*d6b92ffaSHans Petter Selasky {
104*d6b92ffaSHans Petter Selasky 	struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe);
105*d6b92ffaSHans Petter Selasky 	struct mlx4_cqe *tcqe = cq->cqe_size == 64 ? cqe + 1 : cqe;
106*d6b92ffaSHans Petter Selasky 
107*d6b92ffaSHans Petter Selasky 	return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
108*d6b92ffaSHans Petter Selasky 		!!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe;
109*d6b92ffaSHans Petter Selasky }
110*d6b92ffaSHans Petter Selasky 
next_cqe_sw(struct mlx4_cq * cq)111*d6b92ffaSHans Petter Selasky static struct mlx4_cqe *next_cqe_sw(struct mlx4_cq *cq)
112*d6b92ffaSHans Petter Selasky {
113*d6b92ffaSHans Petter Selasky 	return get_sw_cqe(cq, cq->cons_index);
114*d6b92ffaSHans Petter Selasky }
115*d6b92ffaSHans Petter Selasky 
mlx4_handle_error_cqe(struct mlx4_err_cqe * cqe)116*d6b92ffaSHans Petter Selasky static enum ibv_wc_status mlx4_handle_error_cqe(struct mlx4_err_cqe *cqe)
117*d6b92ffaSHans Petter Selasky {
118*d6b92ffaSHans Petter Selasky 	if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR)
119*d6b92ffaSHans Petter Selasky 		printf(PFX "local QP operation err "
120*d6b92ffaSHans Petter Selasky 		       "(QPN %06x, WQE index %x, vendor syndrome %02x, "
121*d6b92ffaSHans Petter Selasky 		       "opcode = %02x)\n",
122*d6b92ffaSHans Petter Selasky 		       htobe32(cqe->vlan_my_qpn), htobe32(cqe->wqe_index),
123*d6b92ffaSHans Petter Selasky 		       cqe->vendor_err,
124*d6b92ffaSHans Petter Selasky 		       cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
125*d6b92ffaSHans Petter Selasky 
126*d6b92ffaSHans Petter Selasky 	switch (cqe->syndrome) {
127*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
128*d6b92ffaSHans Petter Selasky 		return IBV_WC_LOC_LEN_ERR;
129*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
130*d6b92ffaSHans Petter Selasky 		return IBV_WC_LOC_QP_OP_ERR;
131*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
132*d6b92ffaSHans Petter Selasky 		return IBV_WC_LOC_PROT_ERR;
133*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
134*d6b92ffaSHans Petter Selasky 		return IBV_WC_WR_FLUSH_ERR;
135*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_MW_BIND_ERR:
136*d6b92ffaSHans Petter Selasky 		return IBV_WC_MW_BIND_ERR;
137*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
138*d6b92ffaSHans Petter Selasky 		return IBV_WC_BAD_RESP_ERR;
139*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
140*d6b92ffaSHans Petter Selasky 		return IBV_WC_LOC_ACCESS_ERR;
141*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
142*d6b92ffaSHans Petter Selasky 		return IBV_WC_REM_INV_REQ_ERR;
143*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
144*d6b92ffaSHans Petter Selasky 		return IBV_WC_REM_ACCESS_ERR;
145*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
146*d6b92ffaSHans Petter Selasky 		return IBV_WC_REM_OP_ERR;
147*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
148*d6b92ffaSHans Petter Selasky 		return IBV_WC_RETRY_EXC_ERR;
149*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
150*d6b92ffaSHans Petter Selasky 		return IBV_WC_RNR_RETRY_EXC_ERR;
151*d6b92ffaSHans Petter Selasky 	case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
152*d6b92ffaSHans Petter Selasky 		return IBV_WC_REM_ABORT_ERR;
153*d6b92ffaSHans Petter Selasky 	default:
154*d6b92ffaSHans Petter Selasky 		return IBV_WC_GENERAL_ERR;
155*d6b92ffaSHans Petter Selasky 	}
156*d6b92ffaSHans Petter Selasky }
157*d6b92ffaSHans Petter Selasky 
handle_good_req(struct ibv_wc * wc,struct mlx4_cqe * cqe)158*d6b92ffaSHans Petter Selasky static inline void handle_good_req(struct ibv_wc *wc, struct mlx4_cqe *cqe)
159*d6b92ffaSHans Petter Selasky {
160*d6b92ffaSHans Petter Selasky 	wc->wc_flags = 0;
161*d6b92ffaSHans Petter Selasky 	switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
162*d6b92ffaSHans Petter Selasky 	case MLX4_OPCODE_RDMA_WRITE_IMM:
163*d6b92ffaSHans Petter Selasky 		wc->wc_flags |= IBV_WC_WITH_IMM;
164*d6b92ffaSHans Petter Selasky 		SWITCH_FALLTHROUGH;
165*d6b92ffaSHans Petter Selasky 	case MLX4_OPCODE_RDMA_WRITE:
166*d6b92ffaSHans Petter Selasky 		wc->opcode    = IBV_WC_RDMA_WRITE;
167*d6b92ffaSHans Petter Selasky 		break;
168*d6b92ffaSHans Petter Selasky 	case MLX4_OPCODE_SEND_IMM:
169*d6b92ffaSHans Petter Selasky 		wc->wc_flags |= IBV_WC_WITH_IMM;
170*d6b92ffaSHans Petter Selasky 		SWITCH_FALLTHROUGH;
171*d6b92ffaSHans Petter Selasky 	case MLX4_OPCODE_SEND:
172*d6b92ffaSHans Petter Selasky 	case MLX4_OPCODE_SEND_INVAL:
173*d6b92ffaSHans Petter Selasky 		wc->opcode    = IBV_WC_SEND;
174*d6b92ffaSHans Petter Selasky 		break;
175*d6b92ffaSHans Petter Selasky 	case MLX4_OPCODE_RDMA_READ:
176*d6b92ffaSHans Petter Selasky 		wc->opcode    = IBV_WC_RDMA_READ;
177*d6b92ffaSHans Petter Selasky 		wc->byte_len  = be32toh(cqe->byte_cnt);
178*d6b92ffaSHans Petter Selasky 		break;
179*d6b92ffaSHans Petter Selasky 	case MLX4_OPCODE_ATOMIC_CS:
180*d6b92ffaSHans Petter Selasky 		wc->opcode    = IBV_WC_COMP_SWAP;
181*d6b92ffaSHans Petter Selasky 		wc->byte_len  = 8;
182*d6b92ffaSHans Petter Selasky 		break;
183*d6b92ffaSHans Petter Selasky 	case MLX4_OPCODE_ATOMIC_FA:
184*d6b92ffaSHans Petter Selasky 		wc->opcode    = IBV_WC_FETCH_ADD;
185*d6b92ffaSHans Petter Selasky 		wc->byte_len  = 8;
186*d6b92ffaSHans Petter Selasky 		break;
187*d6b92ffaSHans Petter Selasky 	case MLX4_OPCODE_LOCAL_INVAL:
188*d6b92ffaSHans Petter Selasky 		wc->opcode    = IBV_WC_LOCAL_INV;
189*d6b92ffaSHans Petter Selasky 		break;
190*d6b92ffaSHans Petter Selasky 	case MLX4_OPCODE_BIND_MW:
191*d6b92ffaSHans Petter Selasky 		wc->opcode    = IBV_WC_BIND_MW;
192*d6b92ffaSHans Petter Selasky 		break;
193*d6b92ffaSHans Petter Selasky 	default:
194*d6b92ffaSHans Petter Selasky 		/* assume it's a send completion */
195*d6b92ffaSHans Petter Selasky 		wc->opcode    = IBV_WC_SEND;
196*d6b92ffaSHans Petter Selasky 		break;
197*d6b92ffaSHans Petter Selasky 	}
198*d6b92ffaSHans Petter Selasky }
199*d6b92ffaSHans Petter Selasky 
200*d6b92ffaSHans Petter Selasky static inline int mlx4_get_next_cqe(struct mlx4_cq *cq,
201*d6b92ffaSHans Petter Selasky 				    struct mlx4_cqe **pcqe)
202*d6b92ffaSHans Petter Selasky 				    ALWAYS_INLINE;
mlx4_get_next_cqe(struct mlx4_cq * cq,struct mlx4_cqe ** pcqe)203*d6b92ffaSHans Petter Selasky static inline int mlx4_get_next_cqe(struct mlx4_cq *cq,
204*d6b92ffaSHans Petter Selasky 				    struct mlx4_cqe **pcqe)
205*d6b92ffaSHans Petter Selasky {
206*d6b92ffaSHans Petter Selasky 	struct mlx4_cqe *cqe;
207*d6b92ffaSHans Petter Selasky 
208*d6b92ffaSHans Petter Selasky 	cqe = next_cqe_sw(cq);
209*d6b92ffaSHans Petter Selasky 	if (!cqe)
210*d6b92ffaSHans Petter Selasky 		return CQ_EMPTY;
211*d6b92ffaSHans Petter Selasky 
212*d6b92ffaSHans Petter Selasky 	if (cq->cqe_size == 64)
213*d6b92ffaSHans Petter Selasky 		++cqe;
214*d6b92ffaSHans Petter Selasky 
215*d6b92ffaSHans Petter Selasky 	++cq->cons_index;
216*d6b92ffaSHans Petter Selasky 
217*d6b92ffaSHans Petter Selasky 	VALGRIND_MAKE_MEM_DEFINED(cqe, sizeof *cqe);
218*d6b92ffaSHans Petter Selasky 
219*d6b92ffaSHans Petter Selasky 	/*
220*d6b92ffaSHans Petter Selasky 	 * Make sure we read CQ entry contents after we've checked the
221*d6b92ffaSHans Petter Selasky 	 * ownership bit.
222*d6b92ffaSHans Petter Selasky 	 */
223*d6b92ffaSHans Petter Selasky 	udma_from_device_barrier();
224*d6b92ffaSHans Petter Selasky 
225*d6b92ffaSHans Petter Selasky 	*pcqe = cqe;
226*d6b92ffaSHans Petter Selasky 
227*d6b92ffaSHans Petter Selasky 	return CQ_OK;
228*d6b92ffaSHans Petter Selasky }
229*d6b92ffaSHans Petter Selasky 
230*d6b92ffaSHans Petter Selasky static inline int mlx4_parse_cqe(struct mlx4_cq *cq,
231*d6b92ffaSHans Petter Selasky 					struct mlx4_cqe *cqe,
232*d6b92ffaSHans Petter Selasky 					struct mlx4_qp **cur_qp,
233*d6b92ffaSHans Petter Selasky 					struct ibv_wc *wc, int lazy)
234*d6b92ffaSHans Petter Selasky 					ALWAYS_INLINE;
mlx4_parse_cqe(struct mlx4_cq * cq,struct mlx4_cqe * cqe,struct mlx4_qp ** cur_qp,struct ibv_wc * wc,int lazy)235*d6b92ffaSHans Petter Selasky static inline int mlx4_parse_cqe(struct mlx4_cq *cq,
236*d6b92ffaSHans Petter Selasky 					struct mlx4_cqe *cqe,
237*d6b92ffaSHans Petter Selasky 					struct mlx4_qp **cur_qp,
238*d6b92ffaSHans Petter Selasky 					struct ibv_wc *wc, int lazy)
239*d6b92ffaSHans Petter Selasky {
240*d6b92ffaSHans Petter Selasky 	struct mlx4_wq *wq;
241*d6b92ffaSHans Petter Selasky 	struct mlx4_srq *srq;
242*d6b92ffaSHans Petter Selasky 	uint32_t qpn;
243*d6b92ffaSHans Petter Selasky 	uint32_t g_mlpath_rqpn;
244*d6b92ffaSHans Petter Selasky 	uint64_t *pwr_id;
245*d6b92ffaSHans Petter Selasky 	uint16_t wqe_index;
246*d6b92ffaSHans Petter Selasky 	struct mlx4_err_cqe *ecqe;
247*d6b92ffaSHans Petter Selasky 	struct mlx4_context *mctx;
248*d6b92ffaSHans Petter Selasky 	int is_error;
249*d6b92ffaSHans Petter Selasky 	int is_send;
250*d6b92ffaSHans Petter Selasky 	enum ibv_wc_status *pstatus;
251*d6b92ffaSHans Petter Selasky 
252*d6b92ffaSHans Petter Selasky 	mctx = to_mctx(cq->ibv_cq.context);
253*d6b92ffaSHans Petter Selasky 	qpn = be32toh(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK;
254*d6b92ffaSHans Petter Selasky 	if (lazy) {
255*d6b92ffaSHans Petter Selasky 		cq->cqe = cqe;
256*d6b92ffaSHans Petter Selasky 		cq->flags &= (~MLX4_CQ_FLAGS_RX_CSUM_VALID);
257*d6b92ffaSHans Petter Selasky 	} else
258*d6b92ffaSHans Petter Selasky 		wc->qp_num = qpn;
259*d6b92ffaSHans Petter Selasky 
260*d6b92ffaSHans Petter Selasky 	is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
261*d6b92ffaSHans Petter Selasky 	is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
262*d6b92ffaSHans Petter Selasky 		MLX4_CQE_OPCODE_ERROR;
263*d6b92ffaSHans Petter Selasky 
264*d6b92ffaSHans Petter Selasky 	if ((qpn & MLX4_XRC_QPN_BIT) && !is_send) {
265*d6b92ffaSHans Petter Selasky 		/*
266*d6b92ffaSHans Petter Selasky 		 * We do not have to take the XSRQ table lock here,
267*d6b92ffaSHans Petter Selasky 		 * because CQs will be locked while SRQs are removed
268*d6b92ffaSHans Petter Selasky 		 * from the table.
269*d6b92ffaSHans Petter Selasky 		 */
270*d6b92ffaSHans Petter Selasky 		srq = mlx4_find_xsrq(&mctx->xsrq_table,
271*d6b92ffaSHans Petter Selasky 				     be32toh(cqe->g_mlpath_rqpn) & MLX4_CQE_QPN_MASK);
272*d6b92ffaSHans Petter Selasky 		if (!srq)
273*d6b92ffaSHans Petter Selasky 			return CQ_POLL_ERR;
274*d6b92ffaSHans Petter Selasky 	} else {
275*d6b92ffaSHans Petter Selasky 		if (!*cur_qp || (qpn != (*cur_qp)->verbs_qp.qp.qp_num)) {
276*d6b92ffaSHans Petter Selasky 			/*
277*d6b92ffaSHans Petter Selasky 			 * We do not have to take the QP table lock here,
278*d6b92ffaSHans Petter Selasky 			 * because CQs will be locked while QPs are removed
279*d6b92ffaSHans Petter Selasky 			 * from the table.
280*d6b92ffaSHans Petter Selasky 			 */
281*d6b92ffaSHans Petter Selasky 			*cur_qp = mlx4_find_qp(mctx, qpn);
282*d6b92ffaSHans Petter Selasky 			if (!*cur_qp)
283*d6b92ffaSHans Petter Selasky 				return CQ_POLL_ERR;
284*d6b92ffaSHans Petter Selasky 		}
285*d6b92ffaSHans Petter Selasky 		srq = ((*cur_qp)->verbs_qp.qp.srq) ? to_msrq((*cur_qp)->verbs_qp.qp.srq) : NULL;
286*d6b92ffaSHans Petter Selasky 	}
287*d6b92ffaSHans Petter Selasky 
288*d6b92ffaSHans Petter Selasky 	pwr_id = lazy ? &cq->ibv_cq.wr_id : &wc->wr_id;
289*d6b92ffaSHans Petter Selasky 	if (is_send) {
290*d6b92ffaSHans Petter Selasky 		wq = &(*cur_qp)->sq;
291*d6b92ffaSHans Petter Selasky 		wqe_index = be16toh(cqe->wqe_index);
292*d6b92ffaSHans Petter Selasky 		wq->tail += (uint16_t) (wqe_index - (uint16_t) wq->tail);
293*d6b92ffaSHans Petter Selasky 		*pwr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
294*d6b92ffaSHans Petter Selasky 		++wq->tail;
295*d6b92ffaSHans Petter Selasky 	} else if (srq) {
296*d6b92ffaSHans Petter Selasky 		wqe_index = be16toh(cqe->wqe_index);
297*d6b92ffaSHans Petter Selasky 		*pwr_id = srq->wrid[wqe_index];
298*d6b92ffaSHans Petter Selasky 		mlx4_free_srq_wqe(srq, wqe_index);
299*d6b92ffaSHans Petter Selasky 	} else {
300*d6b92ffaSHans Petter Selasky 		wq = &(*cur_qp)->rq;
301*d6b92ffaSHans Petter Selasky 		*pwr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
302*d6b92ffaSHans Petter Selasky 		++wq->tail;
303*d6b92ffaSHans Petter Selasky 	}
304*d6b92ffaSHans Petter Selasky 
305*d6b92ffaSHans Petter Selasky 	pstatus = lazy ? &cq->ibv_cq.status : &wc->status;
306*d6b92ffaSHans Petter Selasky 	if (is_error) {
307*d6b92ffaSHans Petter Selasky 		ecqe = (struct mlx4_err_cqe *)cqe;
308*d6b92ffaSHans Petter Selasky 		*pstatus = mlx4_handle_error_cqe(ecqe);
309*d6b92ffaSHans Petter Selasky 		if (!lazy)
310*d6b92ffaSHans Petter Selasky 			wc->vendor_err = ecqe->vendor_err;
311*d6b92ffaSHans Petter Selasky 		return CQ_OK;
312*d6b92ffaSHans Petter Selasky 	}
313*d6b92ffaSHans Petter Selasky 
314*d6b92ffaSHans Petter Selasky 	*pstatus = IBV_WC_SUCCESS;
315*d6b92ffaSHans Petter Selasky 	if (lazy) {
316*d6b92ffaSHans Petter Selasky 		if (!is_send)
317*d6b92ffaSHans Petter Selasky 			if ((*cur_qp) && ((*cur_qp)->qp_cap_cache & MLX4_RX_CSUM_VALID))
318*d6b92ffaSHans Petter Selasky 				cq->flags |= MLX4_CQ_FLAGS_RX_CSUM_VALID;
319*d6b92ffaSHans Petter Selasky 	} else if (is_send) {
320*d6b92ffaSHans Petter Selasky 		handle_good_req(wc, cqe);
321*d6b92ffaSHans Petter Selasky 	} else {
322*d6b92ffaSHans Petter Selasky 		wc->byte_len = be32toh(cqe->byte_cnt);
323*d6b92ffaSHans Petter Selasky 
324*d6b92ffaSHans Petter Selasky 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
325*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
326*d6b92ffaSHans Petter Selasky 			wc->opcode   = IBV_WC_RECV_RDMA_WITH_IMM;
327*d6b92ffaSHans Petter Selasky 			wc->wc_flags = IBV_WC_WITH_IMM;
328*d6b92ffaSHans Petter Selasky 			wc->imm_data = cqe->immed_rss_invalid;
329*d6b92ffaSHans Petter Selasky 			break;
330*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_SEND_INVAL:
331*d6b92ffaSHans Petter Selasky 			wc->opcode   = IBV_WC_RECV;
332*d6b92ffaSHans Petter Selasky 			wc->wc_flags |= IBV_WC_WITH_INV;
333*d6b92ffaSHans Petter Selasky 			wc->imm_data = be32toh(cqe->immed_rss_invalid);
334*d6b92ffaSHans Petter Selasky 			break;
335*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_SEND:
336*d6b92ffaSHans Petter Selasky 			wc->opcode   = IBV_WC_RECV;
337*d6b92ffaSHans Petter Selasky 			wc->wc_flags = 0;
338*d6b92ffaSHans Petter Selasky 			break;
339*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_SEND_IMM:
340*d6b92ffaSHans Petter Selasky 			wc->opcode   = IBV_WC_RECV;
341*d6b92ffaSHans Petter Selasky 			wc->wc_flags = IBV_WC_WITH_IMM;
342*d6b92ffaSHans Petter Selasky 			wc->imm_data = cqe->immed_rss_invalid;
343*d6b92ffaSHans Petter Selasky 			break;
344*d6b92ffaSHans Petter Selasky 		}
345*d6b92ffaSHans Petter Selasky 
346*d6b92ffaSHans Petter Selasky 		wc->slid	   = be16toh(cqe->rlid);
347*d6b92ffaSHans Petter Selasky 		g_mlpath_rqpn	   = be32toh(cqe->g_mlpath_rqpn);
348*d6b92ffaSHans Petter Selasky 		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
349*d6b92ffaSHans Petter Selasky 		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
350*d6b92ffaSHans Petter Selasky 		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IBV_WC_GRH : 0;
351*d6b92ffaSHans Petter Selasky 		wc->pkey_index     = be32toh(cqe->immed_rss_invalid) & 0x7f;
352*d6b92ffaSHans Petter Selasky 		/* When working with xrc srqs, don't have qp to check link layer.
353*d6b92ffaSHans Petter Selasky 		* Using IB SL, should consider Roce. (TBD)
354*d6b92ffaSHans Petter Selasky 		*/
355*d6b92ffaSHans Petter Selasky 		if ((*cur_qp) && (*cur_qp)->link_layer == IBV_LINK_LAYER_ETHERNET)
356*d6b92ffaSHans Petter Selasky 			wc->sl	   = be16toh(cqe->sl_vid) >> 13;
357*d6b92ffaSHans Petter Selasky 		else
358*d6b92ffaSHans Petter Selasky 			wc->sl	   = be16toh(cqe->sl_vid) >> 12;
359*d6b92ffaSHans Petter Selasky 
360*d6b92ffaSHans Petter Selasky 		if ((*cur_qp) && ((*cur_qp)->qp_cap_cache & MLX4_RX_CSUM_VALID)) {
361*d6b92ffaSHans Petter Selasky 			wc->wc_flags |= ((cqe->status & htobe32(MLX4_CQE_STATUS_IPV4_CSUM_OK)) ==
362*d6b92ffaSHans Petter Selasky 				 htobe32(MLX4_CQE_STATUS_IPV4_CSUM_OK)) <<
363*d6b92ffaSHans Petter Selasky 				IBV_WC_IP_CSUM_OK_SHIFT;
364*d6b92ffaSHans Petter Selasky 		}
365*d6b92ffaSHans Petter Selasky 	}
366*d6b92ffaSHans Petter Selasky 
367*d6b92ffaSHans Petter Selasky 	return CQ_OK;
368*d6b92ffaSHans Petter Selasky }
369*d6b92ffaSHans Petter Selasky 
370*d6b92ffaSHans Petter Selasky static inline int mlx4_parse_lazy_cqe(struct mlx4_cq *cq,
371*d6b92ffaSHans Petter Selasky 				      struct mlx4_cqe *cqe)
372*d6b92ffaSHans Petter Selasky 				      ALWAYS_INLINE;
mlx4_parse_lazy_cqe(struct mlx4_cq * cq,struct mlx4_cqe * cqe)373*d6b92ffaSHans Petter Selasky static inline int mlx4_parse_lazy_cqe(struct mlx4_cq *cq,
374*d6b92ffaSHans Petter Selasky 				      struct mlx4_cqe *cqe)
375*d6b92ffaSHans Petter Selasky {
376*d6b92ffaSHans Petter Selasky 	return mlx4_parse_cqe(cq, cqe, &cq->cur_qp, NULL, 1);
377*d6b92ffaSHans Petter Selasky }
378*d6b92ffaSHans Petter Selasky 
379*d6b92ffaSHans Petter Selasky static inline int mlx4_poll_one(struct mlx4_cq *cq,
380*d6b92ffaSHans Petter Selasky 			 struct mlx4_qp **cur_qp,
381*d6b92ffaSHans Petter Selasky 			 struct ibv_wc *wc)
382*d6b92ffaSHans Petter Selasky 			 ALWAYS_INLINE;
mlx4_poll_one(struct mlx4_cq * cq,struct mlx4_qp ** cur_qp,struct ibv_wc * wc)383*d6b92ffaSHans Petter Selasky static inline int mlx4_poll_one(struct mlx4_cq *cq,
384*d6b92ffaSHans Petter Selasky 			 struct mlx4_qp **cur_qp,
385*d6b92ffaSHans Petter Selasky 			 struct ibv_wc *wc)
386*d6b92ffaSHans Petter Selasky {
387*d6b92ffaSHans Petter Selasky 	struct mlx4_cqe *cqe;
388*d6b92ffaSHans Petter Selasky 	int err;
389*d6b92ffaSHans Petter Selasky 
390*d6b92ffaSHans Petter Selasky 	err = mlx4_get_next_cqe(cq, &cqe);
391*d6b92ffaSHans Petter Selasky 	if (err == CQ_EMPTY)
392*d6b92ffaSHans Petter Selasky 		return err;
393*d6b92ffaSHans Petter Selasky 
394*d6b92ffaSHans Petter Selasky 	return mlx4_parse_cqe(cq, cqe, cur_qp, wc, 0);
395*d6b92ffaSHans Petter Selasky }
396*d6b92ffaSHans Petter Selasky 
mlx4_poll_cq(struct ibv_cq * ibcq,int ne,struct ibv_wc * wc)397*d6b92ffaSHans Petter Selasky int mlx4_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
398*d6b92ffaSHans Petter Selasky {
399*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibcq);
400*d6b92ffaSHans Petter Selasky 	struct mlx4_qp *qp = NULL;
401*d6b92ffaSHans Petter Selasky 	int npolled;
402*d6b92ffaSHans Petter Selasky 	int err = CQ_OK;
403*d6b92ffaSHans Petter Selasky 
404*d6b92ffaSHans Petter Selasky 	pthread_spin_lock(&cq->lock);
405*d6b92ffaSHans Petter Selasky 
406*d6b92ffaSHans Petter Selasky 	for (npolled = 0; npolled < ne; ++npolled) {
407*d6b92ffaSHans Petter Selasky 		err = mlx4_poll_one(cq, &qp, wc + npolled);
408*d6b92ffaSHans Petter Selasky 		if (err != CQ_OK)
409*d6b92ffaSHans Petter Selasky 			break;
410*d6b92ffaSHans Petter Selasky 	}
411*d6b92ffaSHans Petter Selasky 
412*d6b92ffaSHans Petter Selasky 	if (npolled || err == CQ_POLL_ERR)
413*d6b92ffaSHans Petter Selasky 		mlx4_update_cons_index(cq);
414*d6b92ffaSHans Petter Selasky 
415*d6b92ffaSHans Petter Selasky 	pthread_spin_unlock(&cq->lock);
416*d6b92ffaSHans Petter Selasky 
417*d6b92ffaSHans Petter Selasky 	return err == CQ_POLL_ERR ? err : npolled;
418*d6b92ffaSHans Petter Selasky }
419*d6b92ffaSHans Petter Selasky 
420*d6b92ffaSHans Petter Selasky static inline void _mlx4_end_poll(struct ibv_cq_ex *ibcq, int lock)
421*d6b92ffaSHans Petter Selasky 				  ALWAYS_INLINE;
_mlx4_end_poll(struct ibv_cq_ex * ibcq,int lock)422*d6b92ffaSHans Petter Selasky static inline void _mlx4_end_poll(struct ibv_cq_ex *ibcq, int lock)
423*d6b92ffaSHans Petter Selasky {
424*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
425*d6b92ffaSHans Petter Selasky 
426*d6b92ffaSHans Petter Selasky 	mlx4_update_cons_index(cq);
427*d6b92ffaSHans Petter Selasky 
428*d6b92ffaSHans Petter Selasky 	if (lock)
429*d6b92ffaSHans Petter Selasky 		pthread_spin_unlock(&cq->lock);
430*d6b92ffaSHans Petter Selasky }
431*d6b92ffaSHans Petter Selasky 
432*d6b92ffaSHans Petter Selasky static inline int _mlx4_start_poll(struct ibv_cq_ex *ibcq,
433*d6b92ffaSHans Petter Selasky 				   struct ibv_poll_cq_attr *attr,
434*d6b92ffaSHans Petter Selasky 				   int lock)
435*d6b92ffaSHans Petter Selasky 				   ALWAYS_INLINE;
_mlx4_start_poll(struct ibv_cq_ex * ibcq,struct ibv_poll_cq_attr * attr,int lock)436*d6b92ffaSHans Petter Selasky static inline int _mlx4_start_poll(struct ibv_cq_ex *ibcq,
437*d6b92ffaSHans Petter Selasky 				   struct ibv_poll_cq_attr *attr,
438*d6b92ffaSHans Petter Selasky 				   int lock)
439*d6b92ffaSHans Petter Selasky {
440*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
441*d6b92ffaSHans Petter Selasky 	struct mlx4_cqe *cqe;
442*d6b92ffaSHans Petter Selasky 	int err;
443*d6b92ffaSHans Petter Selasky 
444*d6b92ffaSHans Petter Selasky 	if (unlikely(attr->comp_mask))
445*d6b92ffaSHans Petter Selasky 		return EINVAL;
446*d6b92ffaSHans Petter Selasky 
447*d6b92ffaSHans Petter Selasky 	if (lock)
448*d6b92ffaSHans Petter Selasky 		pthread_spin_lock(&cq->lock);
449*d6b92ffaSHans Petter Selasky 
450*d6b92ffaSHans Petter Selasky 	cq->cur_qp = NULL;
451*d6b92ffaSHans Petter Selasky 
452*d6b92ffaSHans Petter Selasky 	err = mlx4_get_next_cqe(cq, &cqe);
453*d6b92ffaSHans Petter Selasky 	if (err == CQ_EMPTY) {
454*d6b92ffaSHans Petter Selasky 		if (lock)
455*d6b92ffaSHans Petter Selasky 			pthread_spin_unlock(&cq->lock);
456*d6b92ffaSHans Petter Selasky 		return ENOENT;
457*d6b92ffaSHans Petter Selasky 	}
458*d6b92ffaSHans Petter Selasky 
459*d6b92ffaSHans Petter Selasky 	err = mlx4_parse_lazy_cqe(cq, cqe);
460*d6b92ffaSHans Petter Selasky 	if (lock && err)
461*d6b92ffaSHans Petter Selasky 		pthread_spin_unlock(&cq->lock);
462*d6b92ffaSHans Petter Selasky 
463*d6b92ffaSHans Petter Selasky 	return err;
464*d6b92ffaSHans Petter Selasky }
465*d6b92ffaSHans Petter Selasky 
mlx4_next_poll(struct ibv_cq_ex * ibcq)466*d6b92ffaSHans Petter Selasky static int mlx4_next_poll(struct ibv_cq_ex *ibcq)
467*d6b92ffaSHans Petter Selasky {
468*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
469*d6b92ffaSHans Petter Selasky 	struct mlx4_cqe *cqe;
470*d6b92ffaSHans Petter Selasky 	int err;
471*d6b92ffaSHans Petter Selasky 
472*d6b92ffaSHans Petter Selasky 	err = mlx4_get_next_cqe(cq, &cqe);
473*d6b92ffaSHans Petter Selasky 	if (err == CQ_EMPTY)
474*d6b92ffaSHans Petter Selasky 		return ENOENT;
475*d6b92ffaSHans Petter Selasky 
476*d6b92ffaSHans Petter Selasky 	return mlx4_parse_lazy_cqe(cq, cqe);
477*d6b92ffaSHans Petter Selasky }
478*d6b92ffaSHans Petter Selasky 
mlx4_end_poll(struct ibv_cq_ex * ibcq)479*d6b92ffaSHans Petter Selasky static void mlx4_end_poll(struct ibv_cq_ex *ibcq)
480*d6b92ffaSHans Petter Selasky {
481*d6b92ffaSHans Petter Selasky 	_mlx4_end_poll(ibcq, 0);
482*d6b92ffaSHans Petter Selasky }
483*d6b92ffaSHans Petter Selasky 
mlx4_end_poll_lock(struct ibv_cq_ex * ibcq)484*d6b92ffaSHans Petter Selasky static void mlx4_end_poll_lock(struct ibv_cq_ex *ibcq)
485*d6b92ffaSHans Petter Selasky {
486*d6b92ffaSHans Petter Selasky 	_mlx4_end_poll(ibcq, 1);
487*d6b92ffaSHans Petter Selasky }
488*d6b92ffaSHans Petter Selasky 
mlx4_start_poll(struct ibv_cq_ex * ibcq,struct ibv_poll_cq_attr * attr)489*d6b92ffaSHans Petter Selasky static int mlx4_start_poll(struct ibv_cq_ex *ibcq,
490*d6b92ffaSHans Petter Selasky 		    struct ibv_poll_cq_attr *attr)
491*d6b92ffaSHans Petter Selasky {
492*d6b92ffaSHans Petter Selasky 	return _mlx4_start_poll(ibcq, attr, 0);
493*d6b92ffaSHans Petter Selasky }
494*d6b92ffaSHans Petter Selasky 
mlx4_start_poll_lock(struct ibv_cq_ex * ibcq,struct ibv_poll_cq_attr * attr)495*d6b92ffaSHans Petter Selasky static int mlx4_start_poll_lock(struct ibv_cq_ex *ibcq,
496*d6b92ffaSHans Petter Selasky 			 struct ibv_poll_cq_attr *attr)
497*d6b92ffaSHans Petter Selasky {
498*d6b92ffaSHans Petter Selasky 	return _mlx4_start_poll(ibcq, attr, 1);
499*d6b92ffaSHans Petter Selasky }
500*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_opcode(struct ibv_cq_ex * ibcq)501*d6b92ffaSHans Petter Selasky static enum ibv_wc_opcode mlx4_cq_read_wc_opcode(struct ibv_cq_ex *ibcq)
502*d6b92ffaSHans Petter Selasky {
503*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
504*d6b92ffaSHans Petter Selasky 
505*d6b92ffaSHans Petter Selasky 	if (cq->cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK) {
506*d6b92ffaSHans Petter Selasky 		switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
507*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_RDMA_WRITE_IMM:
508*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_RDMA_WRITE:
509*d6b92ffaSHans Petter Selasky 			return IBV_WC_RDMA_WRITE;
510*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_SEND_INVAL:
511*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_SEND_IMM:
512*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_SEND:
513*d6b92ffaSHans Petter Selasky 			return IBV_WC_SEND;
514*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_RDMA_READ:
515*d6b92ffaSHans Petter Selasky 			return IBV_WC_RDMA_READ;
516*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_ATOMIC_CS:
517*d6b92ffaSHans Petter Selasky 			return IBV_WC_COMP_SWAP;
518*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_ATOMIC_FA:
519*d6b92ffaSHans Petter Selasky 			return IBV_WC_FETCH_ADD;
520*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_LOCAL_INVAL:
521*d6b92ffaSHans Petter Selasky 			return IBV_WC_LOCAL_INV;
522*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_BIND_MW:
523*d6b92ffaSHans Petter Selasky 			return IBV_WC_BIND_MW;
524*d6b92ffaSHans Petter Selasky 		}
525*d6b92ffaSHans Petter Selasky 	} else {
526*d6b92ffaSHans Petter Selasky 		switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
527*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
528*d6b92ffaSHans Petter Selasky 			return IBV_WC_RECV_RDMA_WITH_IMM;
529*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_SEND_INVAL:
530*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_SEND_IMM:
531*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_SEND:
532*d6b92ffaSHans Petter Selasky 			return IBV_WC_RECV;
533*d6b92ffaSHans Petter Selasky 		}
534*d6b92ffaSHans Petter Selasky 	}
535*d6b92ffaSHans Petter Selasky 
536*d6b92ffaSHans Petter Selasky 	return 0;
537*d6b92ffaSHans Petter Selasky }
538*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_qp_num(struct ibv_cq_ex * ibcq)539*d6b92ffaSHans Petter Selasky static uint32_t mlx4_cq_read_wc_qp_num(struct ibv_cq_ex *ibcq)
540*d6b92ffaSHans Petter Selasky {
541*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
542*d6b92ffaSHans Petter Selasky 
543*d6b92ffaSHans Petter Selasky 	return be32toh(cq->cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK;
544*d6b92ffaSHans Petter Selasky }
545*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_flags(struct ibv_cq_ex * ibcq)546*d6b92ffaSHans Petter Selasky static int mlx4_cq_read_wc_flags(struct ibv_cq_ex *ibcq)
547*d6b92ffaSHans Petter Selasky {
548*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
549*d6b92ffaSHans Petter Selasky 	int is_send  = cq->cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
550*d6b92ffaSHans Petter Selasky 	int wc_flags = 0;
551*d6b92ffaSHans Petter Selasky 
552*d6b92ffaSHans Petter Selasky 	if (is_send) {
553*d6b92ffaSHans Petter Selasky 		switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
554*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_RDMA_WRITE_IMM:
555*d6b92ffaSHans Petter Selasky 		case MLX4_OPCODE_SEND_IMM:
556*d6b92ffaSHans Petter Selasky 			wc_flags |= IBV_WC_WITH_IMM;
557*d6b92ffaSHans Petter Selasky 			break;
558*d6b92ffaSHans Petter Selasky 		}
559*d6b92ffaSHans Petter Selasky 	} else {
560*d6b92ffaSHans Petter Selasky 		if (cq->flags & MLX4_CQ_FLAGS_RX_CSUM_VALID)
561*d6b92ffaSHans Petter Selasky 			wc_flags |= ((cq->cqe->status &
562*d6b92ffaSHans Petter Selasky 				htobe32(MLX4_CQE_STATUS_IPV4_CSUM_OK)) ==
563*d6b92ffaSHans Petter Selasky 				htobe32(MLX4_CQE_STATUS_IPV4_CSUM_OK)) <<
564*d6b92ffaSHans Petter Selasky 				IBV_WC_IP_CSUM_OK_SHIFT;
565*d6b92ffaSHans Petter Selasky 
566*d6b92ffaSHans Petter Selasky 		switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
567*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
568*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_SEND_IMM:
569*d6b92ffaSHans Petter Selasky 			wc_flags |= IBV_WC_WITH_IMM;
570*d6b92ffaSHans Petter Selasky 			break;
571*d6b92ffaSHans Petter Selasky 		case MLX4_RECV_OPCODE_SEND_INVAL:
572*d6b92ffaSHans Petter Selasky 			wc_flags |= IBV_WC_WITH_INV;
573*d6b92ffaSHans Petter Selasky 			break;
574*d6b92ffaSHans Petter Selasky 		}
575*d6b92ffaSHans Petter Selasky 		wc_flags |= (be32toh(cq->cqe->g_mlpath_rqpn) & 0x80000000) ? IBV_WC_GRH : 0;
576*d6b92ffaSHans Petter Selasky 	}
577*d6b92ffaSHans Petter Selasky 
578*d6b92ffaSHans Petter Selasky 	return wc_flags;
579*d6b92ffaSHans Petter Selasky }
580*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_byte_len(struct ibv_cq_ex * ibcq)581*d6b92ffaSHans Petter Selasky static uint32_t mlx4_cq_read_wc_byte_len(struct ibv_cq_ex *ibcq)
582*d6b92ffaSHans Petter Selasky {
583*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
584*d6b92ffaSHans Petter Selasky 
585*d6b92ffaSHans Petter Selasky 	return be32toh(cq->cqe->byte_cnt);
586*d6b92ffaSHans Petter Selasky }
587*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_vendor_err(struct ibv_cq_ex * ibcq)588*d6b92ffaSHans Petter Selasky static uint32_t mlx4_cq_read_wc_vendor_err(struct ibv_cq_ex *ibcq)
589*d6b92ffaSHans Petter Selasky {
590*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
591*d6b92ffaSHans Petter Selasky 	struct mlx4_err_cqe *ecqe = (struct mlx4_err_cqe *)cq->cqe;
592*d6b92ffaSHans Petter Selasky 
593*d6b92ffaSHans Petter Selasky 	return ecqe->vendor_err;
594*d6b92ffaSHans Petter Selasky }
595*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_imm_data(struct ibv_cq_ex * ibcq)596*d6b92ffaSHans Petter Selasky static uint32_t mlx4_cq_read_wc_imm_data(struct ibv_cq_ex *ibcq)
597*d6b92ffaSHans Petter Selasky {
598*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
599*d6b92ffaSHans Petter Selasky 
600*d6b92ffaSHans Petter Selasky 	switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
601*d6b92ffaSHans Petter Selasky 	case MLX4_RECV_OPCODE_SEND_INVAL:
602*d6b92ffaSHans Petter Selasky 		return be32toh(cq->cqe->immed_rss_invalid);
603*d6b92ffaSHans Petter Selasky 	default:
604*d6b92ffaSHans Petter Selasky 		return cq->cqe->immed_rss_invalid;
605*d6b92ffaSHans Petter Selasky 	}
606*d6b92ffaSHans Petter Selasky }
607*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_slid(struct ibv_cq_ex * ibcq)608*d6b92ffaSHans Petter Selasky static uint32_t mlx4_cq_read_wc_slid(struct ibv_cq_ex *ibcq)
609*d6b92ffaSHans Petter Selasky {
610*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
611*d6b92ffaSHans Petter Selasky 
612*d6b92ffaSHans Petter Selasky 	return (uint32_t)be16toh(cq->cqe->rlid);
613*d6b92ffaSHans Petter Selasky }
614*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_sl(struct ibv_cq_ex * ibcq)615*d6b92ffaSHans Petter Selasky static uint8_t mlx4_cq_read_wc_sl(struct ibv_cq_ex *ibcq)
616*d6b92ffaSHans Petter Selasky {
617*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
618*d6b92ffaSHans Petter Selasky 
619*d6b92ffaSHans Petter Selasky 	if ((cq->cur_qp) && (cq->cur_qp->link_layer == IBV_LINK_LAYER_ETHERNET))
620*d6b92ffaSHans Petter Selasky 		return be16toh(cq->cqe->sl_vid) >> 13;
621*d6b92ffaSHans Petter Selasky 	else
622*d6b92ffaSHans Petter Selasky 		return be16toh(cq->cqe->sl_vid) >> 12;
623*d6b92ffaSHans Petter Selasky }
624*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_src_qp(struct ibv_cq_ex * ibcq)625*d6b92ffaSHans Petter Selasky static uint32_t mlx4_cq_read_wc_src_qp(struct ibv_cq_ex *ibcq)
626*d6b92ffaSHans Petter Selasky {
627*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
628*d6b92ffaSHans Petter Selasky 
629*d6b92ffaSHans Petter Selasky 	return be32toh(cq->cqe->g_mlpath_rqpn) & 0xffffff;
630*d6b92ffaSHans Petter Selasky }
631*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_dlid_path_bits(struct ibv_cq_ex * ibcq)632*d6b92ffaSHans Petter Selasky static uint8_t mlx4_cq_read_wc_dlid_path_bits(struct ibv_cq_ex *ibcq)
633*d6b92ffaSHans Petter Selasky {
634*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
635*d6b92ffaSHans Petter Selasky 
636*d6b92ffaSHans Petter Selasky 	return (be32toh(cq->cqe->g_mlpath_rqpn) >> 24) & 0x7f;
637*d6b92ffaSHans Petter Selasky }
638*d6b92ffaSHans Petter Selasky 
mlx4_cq_read_wc_completion_ts(struct ibv_cq_ex * ibcq)639*d6b92ffaSHans Petter Selasky static uint64_t mlx4_cq_read_wc_completion_ts(struct ibv_cq_ex *ibcq)
640*d6b92ffaSHans Petter Selasky {
641*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
642*d6b92ffaSHans Petter Selasky 
643*d6b92ffaSHans Petter Selasky 	return ((uint64_t)be32toh(cq->cqe->ts_47_16) << 16) |
644*d6b92ffaSHans Petter Selasky 			       (cq->cqe->ts_15_8   <<  8) |
645*d6b92ffaSHans Petter Selasky 			       (cq->cqe->ts_7_0);
646*d6b92ffaSHans Petter Selasky }
647*d6b92ffaSHans Petter Selasky 
mlx4_cq_fill_pfns(struct mlx4_cq * cq,const struct ibv_cq_init_attr_ex * cq_attr)648*d6b92ffaSHans Petter Selasky void mlx4_cq_fill_pfns(struct mlx4_cq *cq, const struct ibv_cq_init_attr_ex *cq_attr)
649*d6b92ffaSHans Petter Selasky {
650*d6b92ffaSHans Petter Selasky 
651*d6b92ffaSHans Petter Selasky 	if (cq->flags & MLX4_CQ_FLAGS_SINGLE_THREADED) {
652*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.start_poll = mlx4_start_poll;
653*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.end_poll = mlx4_end_poll;
654*d6b92ffaSHans Petter Selasky 	} else {
655*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.start_poll = mlx4_start_poll_lock;
656*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.end_poll = mlx4_end_poll_lock;
657*d6b92ffaSHans Petter Selasky 	}
658*d6b92ffaSHans Petter Selasky 	cq->ibv_cq.next_poll = mlx4_next_poll;
659*d6b92ffaSHans Petter Selasky 
660*d6b92ffaSHans Petter Selasky 	cq->ibv_cq.read_opcode = mlx4_cq_read_wc_opcode;
661*d6b92ffaSHans Petter Selasky 	cq->ibv_cq.read_vendor_err = mlx4_cq_read_wc_vendor_err;
662*d6b92ffaSHans Petter Selasky 	cq->ibv_cq.read_wc_flags = mlx4_cq_read_wc_flags;
663*d6b92ffaSHans Petter Selasky 	if (cq_attr->wc_flags & IBV_WC_EX_WITH_BYTE_LEN)
664*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.read_byte_len = mlx4_cq_read_wc_byte_len;
665*d6b92ffaSHans Petter Selasky 	if (cq_attr->wc_flags & IBV_WC_EX_WITH_IMM)
666*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.read_imm_data = mlx4_cq_read_wc_imm_data;
667*d6b92ffaSHans Petter Selasky 	if (cq_attr->wc_flags & IBV_WC_EX_WITH_QP_NUM)
668*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.read_qp_num = mlx4_cq_read_wc_qp_num;
669*d6b92ffaSHans Petter Selasky 	if (cq_attr->wc_flags & IBV_WC_EX_WITH_SRC_QP)
670*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.read_src_qp = mlx4_cq_read_wc_src_qp;
671*d6b92ffaSHans Petter Selasky 	if (cq_attr->wc_flags & IBV_WC_EX_WITH_SLID)
672*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.read_slid = mlx4_cq_read_wc_slid;
673*d6b92ffaSHans Petter Selasky 	if (cq_attr->wc_flags & IBV_WC_EX_WITH_SL)
674*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.read_sl = mlx4_cq_read_wc_sl;
675*d6b92ffaSHans Petter Selasky 	if (cq_attr->wc_flags & IBV_WC_EX_WITH_DLID_PATH_BITS)
676*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.read_dlid_path_bits = mlx4_cq_read_wc_dlid_path_bits;
677*d6b92ffaSHans Petter Selasky 	if (cq_attr->wc_flags & IBV_WC_EX_WITH_COMPLETION_TIMESTAMP)
678*d6b92ffaSHans Petter Selasky 		cq->ibv_cq.read_completion_ts = mlx4_cq_read_wc_completion_ts;
679*d6b92ffaSHans Petter Selasky }
680*d6b92ffaSHans Petter Selasky 
mlx4_arm_cq(struct ibv_cq * ibvcq,int solicited)681*d6b92ffaSHans Petter Selasky int mlx4_arm_cq(struct ibv_cq *ibvcq, int solicited)
682*d6b92ffaSHans Petter Selasky {
683*d6b92ffaSHans Petter Selasky 	struct mlx4_cq *cq = to_mcq(ibvcq);
684*d6b92ffaSHans Petter Selasky 	uint32_t doorbell[2];
685*d6b92ffaSHans Petter Selasky 	uint32_t sn;
686*d6b92ffaSHans Petter Selasky 	uint32_t ci;
687*d6b92ffaSHans Petter Selasky 	uint32_t cmd;
688*d6b92ffaSHans Petter Selasky 
689*d6b92ffaSHans Petter Selasky 	sn  = cq->arm_sn & 3;
690*d6b92ffaSHans Petter Selasky 	ci  = cq->cons_index & 0xffffff;
691*d6b92ffaSHans Petter Selasky 	cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT;
692*d6b92ffaSHans Petter Selasky 
693*d6b92ffaSHans Petter Selasky 	*cq->arm_db = htobe32(sn << 28 | cmd | ci);
694*d6b92ffaSHans Petter Selasky 
695*d6b92ffaSHans Petter Selasky 	/*
696*d6b92ffaSHans Petter Selasky 	 * Make sure that the doorbell record in host memory is
697*d6b92ffaSHans Petter Selasky 	 * written before ringing the doorbell via PCI MMIO.
698*d6b92ffaSHans Petter Selasky 	 */
699*d6b92ffaSHans Petter Selasky 	udma_to_device_barrier();
700*d6b92ffaSHans Petter Selasky 
701*d6b92ffaSHans Petter Selasky 	doorbell[0] = htobe32(sn << 28 | cmd | cq->cqn);
702*d6b92ffaSHans Petter Selasky 	doorbell[1] = htobe32(ci);
703*d6b92ffaSHans Petter Selasky 
704*d6b92ffaSHans Petter Selasky 	mlx4_write64(doorbell, to_mctx(ibvcq->context), MLX4_CQ_DOORBELL);
705*d6b92ffaSHans Petter Selasky 
706*d6b92ffaSHans Petter Selasky 	return 0;
707*d6b92ffaSHans Petter Selasky }
708*d6b92ffaSHans Petter Selasky 
mlx4_cq_event(struct ibv_cq * cq)709*d6b92ffaSHans Petter Selasky void mlx4_cq_event(struct ibv_cq *cq)
710*d6b92ffaSHans Petter Selasky {
711*d6b92ffaSHans Petter Selasky 	to_mcq(cq)->arm_sn++;
712*d6b92ffaSHans Petter Selasky }
713*d6b92ffaSHans Petter Selasky 
__mlx4_cq_clean(struct mlx4_cq * cq,uint32_t qpn,struct mlx4_srq * srq)714*d6b92ffaSHans Petter Selasky void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
715*d6b92ffaSHans Petter Selasky {
716*d6b92ffaSHans Petter Selasky 	struct mlx4_cqe *cqe, *dest;
717*d6b92ffaSHans Petter Selasky 	uint32_t prod_index;
718*d6b92ffaSHans Petter Selasky 	uint8_t owner_bit;
719*d6b92ffaSHans Petter Selasky 	int nfreed = 0;
720*d6b92ffaSHans Petter Selasky 	int cqe_inc = cq->cqe_size == 64 ? 1 : 0;
721*d6b92ffaSHans Petter Selasky 
722*d6b92ffaSHans Petter Selasky 	/*
723*d6b92ffaSHans Petter Selasky 	 * First we need to find the current producer index, so we
724*d6b92ffaSHans Petter Selasky 	 * know where to start cleaning from.  It doesn't matter if HW
725*d6b92ffaSHans Petter Selasky 	 * adds new entries after this loop -- the QP we're worried
726*d6b92ffaSHans Petter Selasky 	 * about is already in RESET, so the new entries won't come
727*d6b92ffaSHans Petter Selasky 	 * from our QP and therefore don't need to be checked.
728*d6b92ffaSHans Petter Selasky 	 */
729*d6b92ffaSHans Petter Selasky 	for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
730*d6b92ffaSHans Petter Selasky 		if (prod_index == cq->cons_index + cq->ibv_cq.cqe)
731*d6b92ffaSHans Petter Selasky 			break;
732*d6b92ffaSHans Petter Selasky 
733*d6b92ffaSHans Petter Selasky 	/*
734*d6b92ffaSHans Petter Selasky 	 * Now sweep backwards through the CQ, removing CQ entries
735*d6b92ffaSHans Petter Selasky 	 * that match our QP by copying older entries on top of them.
736*d6b92ffaSHans Petter Selasky 	 */
737*d6b92ffaSHans Petter Selasky 	while ((int) --prod_index - (int) cq->cons_index >= 0) {
738*d6b92ffaSHans Petter Selasky 		cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe);
739*d6b92ffaSHans Petter Selasky 		cqe += cqe_inc;
740*d6b92ffaSHans Petter Selasky 		if (srq && srq->ext_srq &&
741*d6b92ffaSHans Petter Selasky 		    (be32toh(cqe->g_mlpath_rqpn) & MLX4_CQE_QPN_MASK) == srq->verbs_srq.srq_num &&
742*d6b92ffaSHans Petter Selasky 		    !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) {
743*d6b92ffaSHans Petter Selasky 			mlx4_free_srq_wqe(srq, be16toh(cqe->wqe_index));
744*d6b92ffaSHans Petter Selasky 			++nfreed;
745*d6b92ffaSHans Petter Selasky 		} else if ((be32toh(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
746*d6b92ffaSHans Petter Selasky 			if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
747*d6b92ffaSHans Petter Selasky 				mlx4_free_srq_wqe(srq, be16toh(cqe->wqe_index));
748*d6b92ffaSHans Petter Selasky 			++nfreed;
749*d6b92ffaSHans Petter Selasky 		} else if (nfreed) {
750*d6b92ffaSHans Petter Selasky 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe);
751*d6b92ffaSHans Petter Selasky 			dest += cqe_inc;
752*d6b92ffaSHans Petter Selasky 			owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
753*d6b92ffaSHans Petter Selasky 			memcpy(dest, cqe, sizeof *cqe);
754*d6b92ffaSHans Petter Selasky 			dest->owner_sr_opcode = owner_bit |
755*d6b92ffaSHans Petter Selasky 				(dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
756*d6b92ffaSHans Petter Selasky 		}
757*d6b92ffaSHans Petter Selasky 	}
758*d6b92ffaSHans Petter Selasky 
759*d6b92ffaSHans Petter Selasky 	if (nfreed) {
760*d6b92ffaSHans Petter Selasky 		cq->cons_index += nfreed;
761*d6b92ffaSHans Petter Selasky 		/*
762*d6b92ffaSHans Petter Selasky 		 * Make sure update of buffer contents is done before
763*d6b92ffaSHans Petter Selasky 		 * updating consumer index.
764*d6b92ffaSHans Petter Selasky 		 */
765*d6b92ffaSHans Petter Selasky 		udma_to_device_barrier();
766*d6b92ffaSHans Petter Selasky 		mlx4_update_cons_index(cq);
767*d6b92ffaSHans Petter Selasky 	}
768*d6b92ffaSHans Petter Selasky }
769*d6b92ffaSHans Petter Selasky 
mlx4_cq_clean(struct mlx4_cq * cq,uint32_t qpn,struct mlx4_srq * srq)770*d6b92ffaSHans Petter Selasky void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
771*d6b92ffaSHans Petter Selasky {
772*d6b92ffaSHans Petter Selasky 	pthread_spin_lock(&cq->lock);
773*d6b92ffaSHans Petter Selasky 	__mlx4_cq_clean(cq, qpn, srq);
774*d6b92ffaSHans Petter Selasky 	pthread_spin_unlock(&cq->lock);
775*d6b92ffaSHans Petter Selasky }
776*d6b92ffaSHans Petter Selasky 
mlx4_get_outstanding_cqes(struct mlx4_cq * cq)777*d6b92ffaSHans Petter Selasky int mlx4_get_outstanding_cqes(struct mlx4_cq *cq)
778*d6b92ffaSHans Petter Selasky {
779*d6b92ffaSHans Petter Selasky 	uint32_t i;
780*d6b92ffaSHans Petter Selasky 
781*d6b92ffaSHans Petter Selasky 	for (i = cq->cons_index; get_sw_cqe(cq, i); ++i)
782*d6b92ffaSHans Petter Selasky 		;
783*d6b92ffaSHans Petter Selasky 
784*d6b92ffaSHans Petter Selasky 	return i - cq->cons_index;
785*d6b92ffaSHans Petter Selasky }
786*d6b92ffaSHans Petter Selasky 
mlx4_cq_resize_copy_cqes(struct mlx4_cq * cq,void * buf,int old_cqe)787*d6b92ffaSHans Petter Selasky void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int old_cqe)
788*d6b92ffaSHans Petter Selasky {
789*d6b92ffaSHans Petter Selasky 	struct mlx4_cqe *cqe;
790*d6b92ffaSHans Petter Selasky 	int i;
791*d6b92ffaSHans Petter Selasky 	int cqe_inc = cq->cqe_size == 64 ? 1 : 0;
792*d6b92ffaSHans Petter Selasky 
793*d6b92ffaSHans Petter Selasky 	i = cq->cons_index;
794*d6b92ffaSHans Petter Selasky 	cqe = get_cqe(cq, (i & old_cqe));
795*d6b92ffaSHans Petter Selasky 	cqe += cqe_inc;
796*d6b92ffaSHans Petter Selasky 
797*d6b92ffaSHans Petter Selasky 	while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
798*d6b92ffaSHans Petter Selasky 		cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
799*d6b92ffaSHans Petter Selasky 			(((i + 1) & (cq->ibv_cq.cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
800*d6b92ffaSHans Petter Selasky 		memcpy(buf + ((i + 1) & cq->ibv_cq.cqe) * cq->cqe_size,
801*d6b92ffaSHans Petter Selasky 		       cqe - cqe_inc, cq->cqe_size);
802*d6b92ffaSHans Petter Selasky 		++i;
803*d6b92ffaSHans Petter Selasky 		cqe = get_cqe(cq, (i & old_cqe));
804*d6b92ffaSHans Petter Selasky 		cqe += cqe_inc;
805*d6b92ffaSHans Petter Selasky 	}
806*d6b92ffaSHans Petter Selasky 
807*d6b92ffaSHans Petter Selasky 	++cq->cons_index;
808*d6b92ffaSHans Petter Selasky }
809*d6b92ffaSHans Petter Selasky 
mlx4_alloc_cq_buf(struct mlx4_device * dev,struct mlx4_buf * buf,int nent,int entry_size)810*d6b92ffaSHans Petter Selasky int mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent,
811*d6b92ffaSHans Petter Selasky 		      int entry_size)
812*d6b92ffaSHans Petter Selasky {
813*d6b92ffaSHans Petter Selasky 	if (mlx4_alloc_buf(buf, align(nent * entry_size, dev->page_size),
814*d6b92ffaSHans Petter Selasky 			   dev->page_size))
815*d6b92ffaSHans Petter Selasky 		return -1;
816*d6b92ffaSHans Petter Selasky 	memset(buf->buf, 0, nent * entry_size);
817*d6b92ffaSHans Petter Selasky 
818*d6b92ffaSHans Petter Selasky 	return 0;
819*d6b92ffaSHans Petter Selasky }
820