xref: /freebsd/sys/dev/irdma/irdma_uk.c (revision 2a63c3be158216222d89a073dcbd6a72ee4aab5a)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "osdep.h"
36 #include "irdma_defs.h"
37 #include "irdma_user.h"
38 #include "irdma.h"
39 
40 /**
41  * irdma_set_fragment - set fragment in wqe
42  * @wqe: wqe for setting fragment
43  * @offset: offset value
44  * @sge: sge length and stag
45  * @valid: The wqe valid
46  */
47 static void
48 irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
49 		   u8 valid)
50 {
51 	if (sge) {
52 		set_64bit_val(wqe, offset,
53 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
54 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
55 			      FIELD_PREP(IRDMAQPSQ_VALID, valid) |
56 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
57 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
58 	} else {
59 		set_64bit_val(wqe, offset, 0);
60 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
61 			      FIELD_PREP(IRDMAQPSQ_VALID, valid));
62 	}
63 }
64 
65 /**
66  * irdma_set_fragment_gen_1 - set fragment in wqe
67  * @wqe: wqe for setting fragment
68  * @offset: offset value
69  * @sge: sge length and stag
70  * @valid: wqe valid flag
71  */
72 static void
73 irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
74 			 struct irdma_sge *sge, u8 valid)
75 {
76 	if (sge) {
77 		set_64bit_val(wqe, offset,
78 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
79 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
80 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
81 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
82 	} else {
83 		set_64bit_val(wqe, offset, 0);
84 		set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
85 	}
86 }
87 
88 /**
89  * irdma_nop_hdr - Format header section of noop WQE
90  * @qp: hw qp ptr
91  */
92 static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){
93 	return FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
94 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, false) |
95 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
96 }
97 
98 /**
99  * irdma_nop_1 - insert a NOP wqe
100  * @qp: hw qp ptr
101  */
102 static int
103 irdma_nop_1(struct irdma_qp_uk *qp)
104 {
105 	__le64 *wqe;
106 	u32 wqe_idx;
107 
108 	if (!qp->sq_ring.head)
109 		return -EINVAL;
110 
111 	wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
112 	wqe = qp->sq_base[wqe_idx].elem;
113 
114 	qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
115 
116 	set_64bit_val(wqe, IRDMA_BYTE_0, 0);
117 	set_64bit_val(wqe, IRDMA_BYTE_8, 0);
118 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
119 
120 	/* make sure WQE is written before valid bit is set */
121 	irdma_wmb();
122 
123 	set_64bit_val(wqe, IRDMA_BYTE_24, irdma_nop_hdr(qp));
124 
125 	return 0;
126 }
127 
128 /**
129  * irdma_clr_wqes - clear next 128 sq entries
130  * @qp: hw qp ptr
131  * @qp_wqe_idx: wqe_idx
132  */
133 void
134 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
135 {
136 	__le64 *wqe;
137 	u32 wqe_idx;
138 
139 	if (!(qp_wqe_idx & 0x7F)) {
140 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
141 		wqe = qp->sq_base[wqe_idx].elem;
142 		if (wqe_idx)
143 			memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
144 		else
145 			memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
146 	}
147 }
148 
149 /**
150  * irdma_uk_qp_post_wr - ring doorbell
151  * @qp: hw qp ptr
152  */
153 void
154 irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
155 {
156 	u64 temp;
157 	u32 hw_sq_tail;
158 	u32 sw_sq_head;
159 
160 	/* valid bit is written and loads completed before reading shadow */
161 	irdma_mb();
162 
163 	/* read the doorbell shadow area */
164 	get_64bit_val(qp->shadow_area, IRDMA_BYTE_0, &temp);
165 
166 	hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
167 	sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
168 	if (sw_sq_head != qp->initial_ring.head) {
169 		if (qp->push_dropped) {
170 			db_wr32(qp->qp_id, qp->wqe_alloc_db);
171 			qp->push_dropped = false;
172 		} else if (sw_sq_head != hw_sq_tail) {
173 			if (sw_sq_head > qp->initial_ring.head) {
174 				if (hw_sq_tail >= qp->initial_ring.head &&
175 				    hw_sq_tail < sw_sq_head)
176 					db_wr32(qp->qp_id, qp->wqe_alloc_db);
177 			} else {
178 				if (hw_sq_tail >= qp->initial_ring.head ||
179 				    hw_sq_tail < sw_sq_head)
180 					db_wr32(qp->qp_id, qp->wqe_alloc_db);
181 			}
182 		}
183 	}
184 
185 	qp->initial_ring.head = qp->sq_ring.head;
186 }
187 
188 /**
189  * irdma_qp_ring_push_db -  ring qp doorbell
190  * @qp: hw qp ptr
191  * @wqe_idx: wqe index
192  */
193 static void
194 irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
195 {
196 	set_32bit_val(qp->push_db, 0,
197 		      FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
198 	qp->initial_ring.head = qp->sq_ring.head;
199 	qp->push_mode = true;
200 	qp->push_dropped = false;
201 }
202 
203 void
204 irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
205 		  u32 wqe_idx, bool post_sq)
206 {
207 	__le64 *push;
208 
209 	if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
210 	    IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
211 	    !qp->push_mode) {
212 		if (post_sq)
213 			irdma_uk_qp_post_wr(qp);
214 	} else {
215 		push = (__le64 *) ((uintptr_t)qp->push_wqe +
216 				   (wqe_idx & 0x7) * 0x20);
217 		irdma_memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
218 		irdma_qp_ring_push_db(qp, wqe_idx);
219 	}
220 }
221 
222 /**
223  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
224  * @qp: hw qp ptr
225  * @wqe_idx: return wqe index
226  * @quanta: (in/out) ptr to size of WR in quanta. Modified in case pad is needed
227  * @total_size: size of WR in bytes
228  * @info: info on WR
229  */
230 __le64 *
231 irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
232 			   u16 *quanta, u32 total_size,
233 			   struct irdma_post_sq_info *info)
234 {
235 	__le64 *wqe;
236 	__le64 *wqe_0 = NULL;
237 	u32 nop_wqe_idx;
238 	u16 avail_quanta, wqe_quanta = *quanta;
239 	u16 i;
240 
241 	avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
242 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
243 	     qp->uk_attrs->max_hw_sq_chunk);
244 
245 	if (*quanta <= avail_quanta) {
246 		/* WR fits in current chunk */
247 		if (*quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
248 			return NULL;
249 	} else {
250 		/* Need to pad with NOP */
251 		if (*quanta + avail_quanta >
252 		    IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
253 			return NULL;
254 
255 		nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
256 		for (i = 0; i < avail_quanta; i++) {
257 			irdma_nop_1(qp);
258 			IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
259 		}
260 		if (qp->push_db && info->push_wqe)
261 			irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
262 					  avail_quanta, nop_wqe_idx, true);
263 	}
264 
265 	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
266 	if (!*wqe_idx)
267 		qp->swqe_polarity = !qp->swqe_polarity;
268 
269 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, *quanta);
270 
271 	irdma_clr_wqes(qp, *wqe_idx);
272 
273 	wqe = qp->sq_base[*wqe_idx].elem;
274 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && wqe_quanta == 1 &&
275 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
276 		wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
277 		wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, !qp->swqe_polarity));
278 	}
279 	qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
280 	qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
281 	qp->sq_wrtrk_array[*wqe_idx].quanta = wqe_quanta;
282 	qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
283 
284 	return wqe;
285 }
286 
287 /**
288  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
289  * @qp: hw qp ptr
290  * @wqe_idx: return wqe index
291  */
292 __le64 *
293 irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
294 {
295 	__le64 *wqe;
296 	int ret_code;
297 
298 	if (IRDMA_RING_FULL_ERR(qp->rq_ring))
299 		return NULL;
300 
301 	IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
302 	if (ret_code)
303 		return NULL;
304 
305 	if (!*wqe_idx)
306 		qp->rwqe_polarity = !qp->rwqe_polarity;
307 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
308 	wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
309 
310 	return wqe;
311 }
312 
313 /**
314  * irdma_uk_rdma_write - rdma write operation
315  * @qp: hw qp ptr
316  * @info: post sq information
317  * @post_sq: flag to post sq
318  */
319 int
320 irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
321 		    bool post_sq)
322 {
323 	u64 hdr;
324 	__le64 *wqe;
325 	struct irdma_rdma_write *op_info;
326 	u32 i, wqe_idx;
327 	u32 total_size = 0, byte_off;
328 	int ret_code;
329 	u32 frag_cnt, addl_frag_cnt;
330 	bool read_fence = false;
331 	u16 quanta;
332 
333 	info->push_wqe = qp->push_db ? true : false;
334 
335 	op_info = &info->op.rdma_write;
336 	if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
337 		return -EINVAL;
338 
339 	for (i = 0; i < op_info->num_lo_sges; i++)
340 		total_size += op_info->lo_sg_list[i].len;
341 
342 	read_fence |= info->read_fence;
343 
344 	if (info->imm_data_valid)
345 		frag_cnt = op_info->num_lo_sges + 1;
346 	else
347 		frag_cnt = op_info->num_lo_sges;
348 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
349 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
350 	if (ret_code)
351 		return ret_code;
352 
353 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
354 	if (!wqe)
355 		return -ENOSPC;
356 
357 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
358 	set_64bit_val(wqe, IRDMA_BYTE_16,
359 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
360 
361 	if (info->imm_data_valid) {
362 		set_64bit_val(wqe, IRDMA_BYTE_0,
363 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
364 		i = 0;
365 	} else {
366 		qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
367 					    op_info->lo_sg_list,
368 					    qp->swqe_polarity);
369 		i = 1;
370 	}
371 
372 	for (byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; i++) {
373 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
374 					    &op_info->lo_sg_list[i],
375 					    qp->swqe_polarity);
376 		byte_off += 16;
377 	}
378 
379 	/* if not an odd number set valid bit in next fragment */
380 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
381 	    frag_cnt) {
382 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
383 					    qp->swqe_polarity);
384 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
385 			++addl_frag_cnt;
386 	}
387 
388 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
389 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
390 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
391 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
392 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
393 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
394 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
395 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
396 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
397 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
398 
399 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
400 
401 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
402 	if (info->push_wqe)
403 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
404 	else if (post_sq)
405 		irdma_uk_qp_post_wr(qp);
406 
407 	return 0;
408 }
409 
410 /**
411  * irdma_uk_rdma_read - rdma read command
412  * @qp: hw qp ptr
413  * @info: post sq information
414  * @inv_stag: flag for inv_stag
415  * @post_sq: flag to post sq
416  */
417 int
418 irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
419 		   bool inv_stag, bool post_sq)
420 {
421 	struct irdma_rdma_read *op_info;
422 	int ret_code;
423 	u32 i, byte_off, total_size = 0;
424 	bool local_fence = false;
425 	bool ord_fence = false;
426 	u32 addl_frag_cnt;
427 	__le64 *wqe;
428 	u32 wqe_idx;
429 	u16 quanta;
430 	u64 hdr;
431 
432 	info->push_wqe = qp->push_db ? true : false;
433 
434 	op_info = &info->op.rdma_read;
435 	if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
436 		return -EINVAL;
437 
438 	for (i = 0; i < op_info->num_lo_sges; i++)
439 		total_size += op_info->lo_sg_list[i].len;
440 
441 	ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
442 	if (ret_code)
443 		return ret_code;
444 
445 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
446 	if (!wqe)
447 		return -ENOSPC;
448 
449 	if (qp->rd_fence_rate && (qp->ord_cnt++ == qp->rd_fence_rate)) {
450 		ord_fence = true;
451 		qp->ord_cnt = 0;
452 	}
453 
454 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
455 	addl_frag_cnt = op_info->num_lo_sges > 1 ?
456 	    (op_info->num_lo_sges - 1) : 0;
457 	local_fence |= info->local_fence;
458 
459 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, op_info->lo_sg_list,
460 				    qp->swqe_polarity);
461 	for (i = 1, byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; ++i) {
462 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
463 					    &op_info->lo_sg_list[i],
464 					    qp->swqe_polarity);
465 		byte_off += IRDMA_BYTE_16;
466 	}
467 
468 	/* if not an odd number set valid bit in next fragment */
469 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
470 	    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
471 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
472 					    qp->swqe_polarity);
473 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
474 			++addl_frag_cnt;
475 	}
476 	set_64bit_val(wqe, IRDMA_BYTE_16,
477 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
478 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
479 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
480 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
481 	    FIELD_PREP(IRDMAQPSQ_OPCODE,
482 		       (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
483 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
484 	    FIELD_PREP(IRDMAQPSQ_READFENCE,
485 		       info->read_fence || ord_fence ? 1 : 0) |
486 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
487 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
488 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
489 
490 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
491 
492 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
493 	if (info->push_wqe)
494 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
495 	else if (post_sq)
496 		irdma_uk_qp_post_wr(qp);
497 
498 	return 0;
499 }
500 
501 /**
502  * irdma_uk_send - rdma send command
503  * @qp: hw qp ptr
504  * @info: post sq information
505  * @post_sq: flag to post sq
506  */
507 int
508 irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
509 	      bool post_sq)
510 {
511 	__le64 *wqe;
512 	struct irdma_post_send *op_info;
513 	u64 hdr;
514 	u32 i, wqe_idx, total_size = 0, byte_off;
515 	int ret_code;
516 	u32 frag_cnt, addl_frag_cnt;
517 	bool read_fence = false;
518 	u16 quanta;
519 
520 	info->push_wqe = qp->push_db ? true : false;
521 
522 	op_info = &info->op.send;
523 	if (qp->max_sq_frag_cnt < op_info->num_sges)
524 		return -EINVAL;
525 
526 	for (i = 0; i < op_info->num_sges; i++)
527 		total_size += op_info->sg_list[i].len;
528 
529 	if (info->imm_data_valid)
530 		frag_cnt = op_info->num_sges + 1;
531 	else
532 		frag_cnt = op_info->num_sges;
533 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
534 	if (ret_code)
535 		return ret_code;
536 
537 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
538 	if (!wqe)
539 		return -ENOSPC;
540 
541 	read_fence |= info->read_fence;
542 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
543 	if (info->imm_data_valid) {
544 		set_64bit_val(wqe, IRDMA_BYTE_0,
545 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
546 		i = 0;
547 	} else {
548 		qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
549 					    frag_cnt ? op_info->sg_list : NULL,
550 					    qp->swqe_polarity);
551 		i = 1;
552 	}
553 
554 	for (byte_off = IRDMA_BYTE_32; i < op_info->num_sges; i++) {
555 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
556 					    qp->swqe_polarity);
557 		byte_off += IRDMA_BYTE_16;
558 	}
559 
560 	/* if not an odd number set valid bit in next fragment */
561 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
562 	    frag_cnt) {
563 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
564 					    qp->swqe_polarity);
565 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
566 			++addl_frag_cnt;
567 	}
568 
569 	set_64bit_val(wqe, IRDMA_BYTE_16,
570 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
571 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
572 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
573 	    FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
574 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
575 		       (info->imm_data_valid ? 1 : 0)) |
576 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
577 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
578 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
579 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
580 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
581 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
582 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
583 	    FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
584 	    FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
585 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
586 
587 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
588 
589 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
590 	if (info->push_wqe)
591 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
592 	else if (post_sq)
593 		irdma_uk_qp_post_wr(qp);
594 
595 	return 0;
596 }
597 
598 /**
599  * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
600  * @wqe: wqe for setting fragment
601  * @op_info: info for setting bind wqe values
602  */
603 static void
604 irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,
605 			    struct irdma_bind_window *op_info)
606 {
607 	set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
608 	set_64bit_val(wqe, IRDMA_BYTE_8,
609 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
610 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
611 	set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
612 }
613 
614 /**
615  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
616  * @wqe: pointer to wqe
617  * @sge_list: table of pointers to inline data
618  * @num_sges: Total inline data length
619  * @polarity: compatibility parameter
620  */
621 static void
622 irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
623 			     u32 num_sges, u8 polarity)
624 {
625 	u32 quanta_bytes_remaining = 16;
626 	u32 i;
627 
628 	for (i = 0; i < num_sges; i++) {
629 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
630 		u32 sge_len = sge_list[i].len;
631 
632 		while (sge_len) {
633 			u32 bytes_copied;
634 
635 			bytes_copied = min(sge_len, quanta_bytes_remaining);
636 			irdma_memcpy(wqe, cur_sge, bytes_copied);
637 			wqe += bytes_copied;
638 			cur_sge += bytes_copied;
639 			quanta_bytes_remaining -= bytes_copied;
640 			sge_len -= bytes_copied;
641 
642 			if (!quanta_bytes_remaining) {
643 				/* Remaining inline bytes reside after hdr */
644 				wqe += 16;
645 				quanta_bytes_remaining = 32;
646 			}
647 		}
648 	}
649 }
650 
651 /**
652  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
653  * @data_size: data size for inline
654  *
655  * Gets the quanta based on inline and immediate data.
656  */
657 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) {
658 	return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
659 }
660 
661 /**
662  * irdma_set_mw_bind_wqe - set mw bind in wqe
663  * @wqe: wqe for setting mw bind
664  * @op_info: info for setting wqe values
665  */
666 static void
667 irdma_set_mw_bind_wqe(__le64 * wqe,
668 		      struct irdma_bind_window *op_info)
669 {
670 	set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
671 	set_64bit_val(wqe, IRDMA_BYTE_8,
672 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
673 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
674 	set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
675 }
676 
677 /**
678  * irdma_copy_inline_data - Copy inline data to wqe
679  * @wqe: pointer to wqe
680  * @sge_list: table of pointers to inline data
681  * @num_sges: number of SGE's
682  * @polarity: polarity of wqe valid bit
683  */
684 static void
685 irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list,
686 		       u32 num_sges, u8 polarity)
687 {
688 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
689 	u32 quanta_bytes_remaining = 8;
690 	u32 i;
691 	bool first_quanta = true;
692 
693 	wqe += 8;
694 
695 	for (i = 0; i < num_sges; i++) {
696 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
697 		u32 sge_len = sge_list[i].len;
698 
699 		while (sge_len) {
700 			u32 bytes_copied;
701 
702 			bytes_copied = min(sge_len, quanta_bytes_remaining);
703 			irdma_memcpy(wqe, cur_sge, bytes_copied);
704 			wqe += bytes_copied;
705 			cur_sge += bytes_copied;
706 			quanta_bytes_remaining -= bytes_copied;
707 			sge_len -= bytes_copied;
708 
709 			if (!quanta_bytes_remaining) {
710 				quanta_bytes_remaining = 31;
711 
712 				/* Remaining inline bytes reside after hdr */
713 				if (first_quanta) {
714 					first_quanta = false;
715 					wqe += 16;
716 				} else {
717 					*wqe = inline_valid;
718 					wqe++;
719 				}
720 			}
721 		}
722 	}
723 	if (!first_quanta && quanta_bytes_remaining < 31)
724 		*(wqe + quanta_bytes_remaining) = inline_valid;
725 }
726 
727 /**
728  * irdma_inline_data_size_to_quanta - based on inline data, quanta
729  * @data_size: data size for inline
730  *
731  * Gets the quanta based on inline and immediate data.
732  */
733 static u16 irdma_inline_data_size_to_quanta(u32 data_size) {
734 	if (data_size <= 8)
735 		return IRDMA_QP_WQE_MIN_QUANTA;
736 	else if (data_size <= 39)
737 		return 2;
738 	else if (data_size <= 70)
739 		return 3;
740 	else if (data_size <= 101)
741 		return 4;
742 	else if (data_size <= 132)
743 		return 5;
744 	else if (data_size <= 163)
745 		return 6;
746 	else if (data_size <= 194)
747 		return 7;
748 	else
749 		return 8;
750 }
751 
752 /**
753  * irdma_uk_inline_rdma_write - inline rdma write operation
754  * @qp: hw qp ptr
755  * @info: post sq information
756  * @post_sq: flag to post sq
757  */
758 int
759 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
760 			   struct irdma_post_sq_info *info, bool post_sq)
761 {
762 	__le64 *wqe;
763 	struct irdma_rdma_write *op_info;
764 	u64 hdr = 0;
765 	u32 wqe_idx;
766 	bool read_fence = false;
767 	u16 quanta;
768 	u32 i, total_size = 0;
769 
770 	info->push_wqe = qp->push_db ? true : false;
771 	op_info = &info->op.rdma_write;
772 
773 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
774 		return -EINVAL;
775 
776 	for (i = 0; i < op_info->num_lo_sges; i++)
777 		total_size += op_info->lo_sg_list[i].len;
778 
779 	if (unlikely(total_size > qp->max_inline_data))
780 		return -EINVAL;
781 
782 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
783 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
784 	if (!wqe)
785 		return -ENOSPC;
786 
787 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
788 	read_fence |= info->read_fence;
789 	set_64bit_val(wqe, IRDMA_BYTE_16,
790 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
791 
792 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
793 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
794 	    FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
795 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
796 	    FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
797 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
798 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
799 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
800 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
801 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
802 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
803 
804 	if (info->imm_data_valid)
805 		set_64bit_val(wqe, IRDMA_BYTE_0,
806 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
807 
808 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
809 					op_info->num_lo_sges, qp->swqe_polarity);
810 
811 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
812 
813 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
814 
815 	if (info->push_wqe)
816 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
817 	else if (post_sq)
818 		irdma_uk_qp_post_wr(qp);
819 
820 	return 0;
821 }
822 
823 /**
824  * irdma_uk_inline_send - inline send operation
825  * @qp: hw qp ptr
826  * @info: post sq information
827  * @post_sq: flag to post sq
828  */
829 int
830 irdma_uk_inline_send(struct irdma_qp_uk *qp,
831 		     struct irdma_post_sq_info *info, bool post_sq)
832 {
833 	__le64 *wqe;
834 	struct irdma_post_send *op_info;
835 	u64 hdr;
836 	u32 wqe_idx;
837 	bool read_fence = false;
838 	u16 quanta;
839 	u32 i, total_size = 0;
840 
841 	info->push_wqe = qp->push_db ? true : false;
842 	op_info = &info->op.send;
843 
844 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
845 		return -EINVAL;
846 
847 	for (i = 0; i < op_info->num_sges; i++)
848 		total_size += op_info->sg_list[i].len;
849 
850 	if (unlikely(total_size > qp->max_inline_data))
851 		return -EINVAL;
852 
853 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
854 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
855 	if (!wqe)
856 		return -ENOSPC;
857 
858 	set_64bit_val(wqe, IRDMA_BYTE_16,
859 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
860 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
861 
862 	read_fence |= info->read_fence;
863 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
864 	    FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
865 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
866 	    FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
867 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
868 		       (info->imm_data_valid ? 1 : 0)) |
869 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
870 	    FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
871 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
872 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
873 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
874 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
875 	    FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
876 	    FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
877 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
878 
879 	if (info->imm_data_valid)
880 		set_64bit_val(wqe, IRDMA_BYTE_0,
881 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
882 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
883 					op_info->num_sges, qp->swqe_polarity);
884 
885 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
886 
887 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
888 
889 	if (info->push_wqe)
890 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
891 	else if (post_sq)
892 		irdma_uk_qp_post_wr(qp);
893 
894 	return 0;
895 }
896 
897 /**
898  * irdma_uk_stag_local_invalidate - stag invalidate operation
899  * @qp: hw qp ptr
900  * @info: post sq information
901  * @post_sq: flag to post sq
902  */
903 int
904 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
905 			       struct irdma_post_sq_info *info,
906 			       bool post_sq)
907 {
908 	__le64 *wqe;
909 	struct irdma_inv_local_stag *op_info;
910 	u64 hdr;
911 	u32 wqe_idx;
912 	bool local_fence = false;
913 	struct irdma_sge sge = {0};
914 	u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
915 
916 	info->push_wqe = qp->push_db ? true : false;
917 	op_info = &info->op.inv_local_stag;
918 	local_fence = info->local_fence;
919 
920 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
921 	if (!wqe)
922 		return -ENOSPC;
923 
924 	sge.stag = op_info->target_stag;
925 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
926 
927 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
928 
929 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
930 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
931 	    FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
932 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
933 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
934 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
935 
936 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
937 
938 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
939 
940 	if (info->push_wqe)
941 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
942 	else if (post_sq)
943 		irdma_uk_qp_post_wr(qp);
944 
945 	return 0;
946 }
947 
948 /**
949  * irdma_uk_post_receive - post receive wqe
950  * @qp: hw qp ptr
951  * @info: post rq information
952  */
953 int
954 irdma_uk_post_receive(struct irdma_qp_uk *qp,
955 		      struct irdma_post_rq_info *info)
956 {
957 	u32 wqe_idx, i, byte_off;
958 	u32 addl_frag_cnt;
959 	__le64 *wqe;
960 	u64 hdr;
961 
962 	if (qp->max_rq_frag_cnt < info->num_sges)
963 		return -EINVAL;
964 
965 	wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
966 	if (!wqe)
967 		return -ENOSPC;
968 
969 	qp->rq_wrid_array[wqe_idx] = info->wr_id;
970 	addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
971 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, info->sg_list,
972 				    qp->rwqe_polarity);
973 
974 	for (i = 1, byte_off = IRDMA_BYTE_32; i < info->num_sges; i++) {
975 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
976 					    qp->rwqe_polarity);
977 		byte_off += 16;
978 	}
979 
980 	/* if not an odd number set valid bit in next fragment */
981 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
982 	    info->num_sges) {
983 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
984 					    qp->rwqe_polarity);
985 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
986 			++addl_frag_cnt;
987 	}
988 
989 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
990 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
991 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
992 
993 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
994 
995 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
996 
997 	return 0;
998 }
999 
1000 /**
1001  * irdma_uk_cq_resize - reset the cq buffer info
1002  * @cq: cq to resize
1003  * @cq_base: new cq buffer addr
1004  * @cq_size: number of cqes
1005  */
1006 void
1007 irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
1008 {
1009 	cq->cq_base = cq_base;
1010 	cq->cq_size = cq_size;
1011 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1012 	cq->polarity = 1;
1013 }
1014 
1015 /**
1016  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
1017  * @cq: cq to resize
1018  * @cq_cnt: the count of the resized cq buffers
1019  */
1020 void
1021 irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
1022 {
1023 	u64 temp_val;
1024 	u16 sw_cq_sel;
1025 	u8 arm_next_se;
1026 	u8 arm_next;
1027 	u8 arm_seq_num;
1028 
1029 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1030 
1031 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1032 	sw_cq_sel += cq_cnt;
1033 
1034 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1035 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1036 	arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1037 
1038 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1039 	    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1040 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1041 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1042 
1043 	set_64bit_val(cq->shadow_area, 32, temp_val);
1044 }
1045 
1046 /**
1047  * irdma_uk_cq_request_notification - cq notification request (door bell)
1048  * @cq: hw cq
1049  * @cq_notify: notification type
1050  */
1051 void
1052 irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1053 				 enum irdma_cmpl_notify cq_notify)
1054 {
1055 	u64 temp_val;
1056 	u16 sw_cq_sel;
1057 	u8 arm_next_se = 0;
1058 	u8 arm_next = 0;
1059 	u8 arm_seq_num;
1060 
1061 	get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
1062 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1063 	arm_seq_num++;
1064 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1065 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1066 	arm_next_se |= 1;
1067 	if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1068 		arm_next = 1;
1069 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1070 	    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1071 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1072 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1073 
1074 	set_64bit_val(cq->shadow_area, IRDMA_BYTE_32, temp_val);
1075 
1076 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
1077 
1078 	db_wr32(cq->cq_id, cq->cqe_alloc_db);
1079 }
1080 
1081 static int
1082 irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx)
1083 {
1084 	u32 exp_idx = (qp->last_rx_cmpl_idx + 1) % qp->rq_size;
1085 
1086 	if (*array_idx != exp_idx) {
1087 
1088 		*array_idx = exp_idx;
1089 		qp->last_rx_cmpl_idx = exp_idx;
1090 
1091 		return -1;
1092 	}
1093 
1094 	qp->last_rx_cmpl_idx = *array_idx;
1095 
1096 	return 0;
1097 }
1098 
1099 /**
1100  * irdma_skip_duplicate_flush_cmpl - check last cmpl and update wqe if needed
1101  *
1102  * @ring: sq/rq ring
1103  * @flush_seen: information if flush for specific ring was already seen
1104  * @comp_status: completion status
1105  * @wqe_idx: new value of WQE index returned if there is more work on ring
1106  */
1107 static inline int
1108 irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring, u8 flush_seen,
1109 				enum irdma_cmpl_status comp_status,
1110 				u32 *wqe_idx)
1111 {
1112 	if (flush_seen) {
1113 		if (IRDMA_RING_MORE_WORK(ring))
1114 			*wqe_idx = ring.tail;
1115 		else
1116 			return -ENOENT;
1117 	}
1118 
1119 	return 0;
1120 }
1121 
1122 /**
1123  * irdma_detect_unsignaled_cmpls - check if unsignaled cmpl is to be reported
1124  * @cq: hw cq
1125  * @qp: hw qp
1126  * @info: cq poll information collected
1127  * @wge_idx: index of the WR in SQ ring
1128  */
1129 static int
1130 irdma_detect_unsignaled_cmpls(struct irdma_cq_uk *cq,
1131 			      struct irdma_qp_uk *qp,
1132 			      struct irdma_cq_poll_info *info,
1133 			      u32 wqe_idx)
1134 {
1135 	u64 qword0, qword1, qword2, qword3;
1136 	__le64 *cqe, *wqe;
1137 	int i;
1138 	u32 widx;
1139 
1140 	if (qp->sq_wrtrk_array[wqe_idx].signaled == 0) {
1141 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1142 		irdma_pr_err("%p %d %d\n", cqe, cq->cq_ring.head, wqe_idx);
1143 		for (i = -10; i <= 10; i++) {
1144 			IRDMA_GET_CQ_ELEM_AT_OFFSET(cq, i + cq->cq_ring.size, cqe);
1145 			get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
1146 			get_64bit_val(cqe, IRDMA_BYTE_8, &qword1);
1147 			get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
1148 			get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1149 			widx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1150 			irdma_pr_err("%d %04x %p %016lx %016lx %016lx %016lx ",
1151 				     i, widx, cqe, qword0, qword1, qword2, qword3);
1152 			if ((u8)FIELD_GET(IRDMA_CQ_SQ, qword3)) {
1153 				irdma_pr_err("%lx %x %x %x ",
1154 					     qp->sq_wrtrk_array[widx].wrid, qp->sq_wrtrk_array[widx].wr_len,
1155 					     qp->sq_wrtrk_array[widx].quanta, qp->sq_wrtrk_array[widx].signaled);
1156 				wqe = qp->sq_base[widx].elem;
1157 				get_64bit_val(wqe, IRDMA_BYTE_0, &qword0);
1158 				get_64bit_val(wqe, IRDMA_BYTE_8, &qword1);
1159 				get_64bit_val(wqe, IRDMA_BYTE_16, &qword2);
1160 				get_64bit_val(wqe, IRDMA_BYTE_24, &qword3);
1161 
1162 				irdma_pr_err("%016lx %016lx %016lx %016lx \n",
1163 					     qword0, qword1, qword2, qword3);
1164 			} else {
1165 				irdma_pr_err("\n");
1166 			}
1167 		}
1168 		return -ENOENT;
1169 	}
1170 
1171 	return 0;
1172 }
1173 
1174 /**
1175  * irdma_uk_cq_poll_cmpl - get cq completion info
1176  * @cq: hw cq
1177  * @info: cq poll information returned
1178  */
1179 int
1180 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1181 		      struct irdma_cq_poll_info *info)
1182 {
1183 	u64 comp_ctx, qword0, qword2, qword3;
1184 	__le64 *cqe;
1185 	struct irdma_qp_uk *qp;
1186 	struct irdma_ring *pring = NULL;
1187 	u32 wqe_idx;
1188 	int ret_code;
1189 	bool move_cq_head = true;
1190 	u8 polarity;
1191 	bool ext_valid;
1192 	__le64 *ext_cqe;
1193 
1194 	if (cq->avoid_mem_cflct)
1195 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1196 	else
1197 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1198 
1199 	get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1200 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1201 	if (polarity != cq->polarity)
1202 		return -ENOENT;
1203 
1204 	/* Ensure CQE contents are read after valid bit is checked */
1205 	rmb();
1206 
1207 	ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1208 	if (ext_valid) {
1209 		u64 qword6, qword7;
1210 		u32 peek_head;
1211 
1212 		if (cq->avoid_mem_cflct) {
1213 			ext_cqe = (__le64 *) ((u8 *)cqe + 32);
1214 			get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
1215 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1216 		} else {
1217 			peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1218 			ext_cqe = cq->cq_base[peek_head].buf;
1219 			get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
1220 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1221 			if (!peek_head)
1222 				polarity ^= 1;
1223 		}
1224 		if (polarity != cq->polarity)
1225 			return -ENOENT;
1226 
1227 		/* Ensure ext CQE contents are read after ext valid bit is checked */
1228 		rmb();
1229 
1230 		info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1231 		if (info->imm_valid) {
1232 			u64 qword4;
1233 
1234 			get_64bit_val(ext_cqe, IRDMA_BYTE_0, &qword4);
1235 			info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1236 		}
1237 		info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1238 		info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1239 		if (info->ud_smac_valid || info->ud_vlan_valid) {
1240 			get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
1241 			if (info->ud_vlan_valid)
1242 				info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1243 			if (info->ud_smac_valid) {
1244 				info->ud_smac[5] = qword6 & 0xFF;
1245 				info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1246 				info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1247 				info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1248 				info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1249 				info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1250 			}
1251 		}
1252 	} else {
1253 		info->imm_valid = false;
1254 		info->ud_smac_valid = false;
1255 		info->ud_vlan_valid = false;
1256 	}
1257 
1258 	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1259 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1260 	info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1261 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1262 	get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
1263 	qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
1264 	if (info->error) {
1265 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1266 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1267 		switch (info->major_err) {
1268 		case IRDMA_FLUSH_MAJOR_ERR:
1269 			/* Set the min error to standard flush error code for remaining cqes */
1270 			if (info->minor_err != FLUSH_GENERAL_ERR) {
1271 				qword3 &= ~IRDMA_CQ_MINERR;
1272 				qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1273 				set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
1274 			}
1275 			info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1276 			break;
1277 		default:
1278 			info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1279 			break;
1280 		}
1281 	} else {
1282 		info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1283 	}
1284 
1285 	get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
1286 	get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
1287 
1288 	info->stat.raw = (u32)FIELD_GET(IRDMACQ_TCPSQN_ROCEPSN_RTT_TS, qword0);
1289 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1290 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1291 
1292 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1293 	if (!qp || qp->destroy_pending) {
1294 		ret_code = -EFAULT;
1295 		goto exit;
1296 	}
1297 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1298 	info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp;
1299 	info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1300 
1301 	if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
1302 		u32 array_idx;
1303 
1304 		ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring,
1305 							   qp->rq_flush_seen,
1306 							   info->comp_status,
1307 							   &wqe_idx);
1308 		if (ret_code != 0)
1309 			goto exit;
1310 
1311 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1312 
1313 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1314 		    info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1315 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1316 				ret_code = -ENOENT;
1317 				goto exit;
1318 			}
1319 
1320 			info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1321 			info->signaled = 1;
1322 			array_idx = qp->rq_ring.tail;
1323 		} else {
1324 			info->wr_id = qp->rq_wrid_array[array_idx];
1325 			info->signaled = 1;
1326 			if (irdma_check_rq_cqe(qp, &array_idx)) {
1327 				info->wr_id = qp->rq_wrid_array[array_idx];
1328 				info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1329 				IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1330 				return 0;
1331 			}
1332 		}
1333 
1334 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1335 
1336 		if (qword3 & IRDMACQ_STAG) {
1337 			info->stag_invalid_set = true;
1338 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1339 		} else {
1340 			info->stag_invalid_set = false;
1341 		}
1342 		IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1343 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1344 			qp->rq_flush_seen = true;
1345 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1346 				qp->rq_flush_complete = true;
1347 			else
1348 				move_cq_head = false;
1349 		}
1350 		pring = &qp->rq_ring;
1351 	} else {		/* q_type is IRDMA_CQE_QTYPE_SQ */
1352 		if (qp->first_sq_wq) {
1353 			if (wqe_idx + 1 >= qp->conn_wqes)
1354 				qp->first_sq_wq = false;
1355 
1356 			if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1357 				IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1358 				IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1359 				set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
1360 					      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1361 				memset(info, 0,
1362 				       sizeof(struct irdma_cq_poll_info));
1363 				return irdma_uk_cq_poll_cmpl(cq, info);
1364 			}
1365 		}
1366 		/* cease posting push mode on push drop */
1367 		if (info->push_dropped) {
1368 			qp->push_mode = false;
1369 			qp->push_dropped = true;
1370 		}
1371 		ret_code = irdma_skip_duplicate_flush_cmpl(qp->sq_ring,
1372 							   qp->sq_flush_seen,
1373 							   info->comp_status,
1374 							   &wqe_idx);
1375 		if (ret_code != 0)
1376 			goto exit;
1377 		if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1378 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1379 			info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
1380 			if (!info->comp_status)
1381 				info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1382 			ret_code = irdma_detect_unsignaled_cmpls(cq, qp, info, wqe_idx);
1383 			if (ret_code != 0)
1384 				goto exit;
1385 			info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1386 			IRDMA_RING_SET_TAIL(qp->sq_ring,
1387 					    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1388 		} else {
1389 			unsigned long flags;
1390 
1391 			spin_lock_irqsave(qp->lock, flags);
1392 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1393 				spin_unlock_irqrestore(qp->lock, flags);
1394 				ret_code = -ENOENT;
1395 				goto exit;
1396 			}
1397 
1398 			do {
1399 				__le64 *sw_wqe;
1400 				u64 wqe_qword;
1401 				u32 tail;
1402 
1403 				tail = qp->sq_ring.tail;
1404 				sw_wqe = qp->sq_base[tail].elem;
1405 				get_64bit_val(sw_wqe, IRDMA_BYTE_24,
1406 					      &wqe_qword);
1407 				info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1408 							      wqe_qword);
1409 				IRDMA_RING_SET_TAIL(qp->sq_ring,
1410 						    tail + qp->sq_wrtrk_array[tail].quanta);
1411 				if (info->op_type != IRDMAQP_OP_NOP) {
1412 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1413 					info->signaled = qp->sq_wrtrk_array[tail].signaled;
1414 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1415 					break;
1416 				}
1417 			} while (1);
1418 
1419 			if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1420 			    info->minor_err == FLUSH_PROT_ERR)
1421 				info->minor_err = FLUSH_MW_BIND_ERR;
1422 			qp->sq_flush_seen = true;
1423 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1424 				qp->sq_flush_complete = true;
1425 			spin_unlock_irqrestore(qp->lock, flags);
1426 		}
1427 		pring = &qp->sq_ring;
1428 	}
1429 
1430 	ret_code = 0;
1431 
1432 exit:
1433 	if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1434 		if (pring && IRDMA_RING_MORE_WORK(*pring))
1435 			move_cq_head = false;
1436 	}
1437 
1438 	if (move_cq_head) {
1439 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1440 		if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1441 			cq->polarity ^= 1;
1442 
1443 		if (ext_valid && !cq->avoid_mem_cflct) {
1444 			IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1445 			if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1446 				cq->polarity ^= 1;
1447 		}
1448 
1449 		IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1450 		if (!cq->avoid_mem_cflct && ext_valid)
1451 			IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1452 		set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
1453 			      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1454 	} else {
1455 		qword3 &= ~IRDMA_CQ_WQEIDX;
1456 		qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1457 		set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
1458 	}
1459 
1460 	return ret_code;
1461 }
1462 
1463 /**
1464  * irdma_round_up_wq - return round up qp wq depth
1465  * @wqdepth: wq depth in quanta to round up
1466  */
1467 static int
1468 irdma_round_up_wq(u32 wqdepth)
1469 {
1470 	int scount = 1;
1471 
1472 	for (wqdepth--; scount <= 16; scount *= 2)
1473 		wqdepth |= wqdepth >> scount;
1474 
1475 	return ++wqdepth;
1476 }
1477 
1478 /**
1479  * irdma_get_wqe_shift - get shift count for maximum wqe size
1480  * @uk_attrs: qp HW attributes
1481  * @sge: Maximum Scatter Gather Elements wqe
1482  * @inline_data: Maximum inline data size
1483  * @shift: Returns the shift needed based on sge
1484  *
1485  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1486  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1487  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1488  * size of 64 bytes).
1489  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1490  * size of 256 bytes).
1491  */
1492 void
1493 irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1494 		    u32 inline_data, u8 *shift)
1495 {
1496 	*shift = 0;
1497 	if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1498 		if (sge > 1 || inline_data > 8) {
1499 			if (sge < 4 && inline_data <= 39)
1500 				*shift = 1;
1501 			else if (sge < 8 && inline_data <= 101)
1502 				*shift = 2;
1503 			else
1504 				*shift = 3;
1505 		}
1506 	} else if (sge > 1 || inline_data > 16) {
1507 		*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1508 	}
1509 }
1510 
1511 /*
1512  * irdma_get_sqdepth - get SQ depth (quanta) @uk_attrs: qp HW attributes @sq_size: SQ size @shift: shift which
1513  * determines size of WQE @sqdepth: depth of SQ
1514  */
1515 int
1516 irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
1517 {
1518 	*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
1519 
1520 	if (*sqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
1521 		*sqdepth = uk_attrs->min_hw_wq_size << shift;
1522 	else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1523 		return -EINVAL;
1524 
1525 	return 0;
1526 }
1527 
1528 /*
1529  * irdma_get_rqdepth - get RQ depth (quanta) @uk_attrs: qp HW attributes @rq_size: SRQ size @shift: shift which
1530  * determines size of WQE @rqdepth: depth of RQ/SRQ
1531  */
1532 int
1533 irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
1534 {
1535 	*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
1536 
1537 	if (*rqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
1538 		*rqdepth = uk_attrs->min_hw_wq_size << shift;
1539 	else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1540 		return -EINVAL;
1541 
1542 	return 0;
1543 }
1544 
1545 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1546 	.iw_copy_inline_data = irdma_copy_inline_data,
1547 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1548 	.iw_set_fragment = irdma_set_fragment,
1549 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1550 };
1551 
1552 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1553 	.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1554 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1555 	.iw_set_fragment = irdma_set_fragment_gen_1,
1556 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1557 };
1558 
1559 /**
1560  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1561  * connection.
1562  * @qp: hw qp (user and kernel)
1563  * @info: qp initialization info
1564  */
1565 static void
1566 irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1567 			    struct irdma_qp_uk_init_info *info)
1568 {
1569 	u16 move_cnt = 1;
1570 
1571 	if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
1572 		move_cnt = 3;
1573 
1574 	qp->conn_wqes = move_cnt;
1575 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1576 	IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1577 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1578 }
1579 
1580 /**
1581  * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
1582  * @ukinfo: qp initialization info
1583  * @sq_shift: Returns shift of SQ
1584  * @rq_shift: Returns shift of RQ
1585  */
1586 void
1587 irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
1588 		       u8 *rq_shift)
1589 {
1590 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
1591 
1592 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1593 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1594 			    ukinfo->max_sq_frag_cnt,
1595 			    ukinfo->max_inline_data, sq_shift);
1596 
1597 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1598 			    rq_shift);
1599 
1600 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1601 		if (ukinfo->abi_ver > 4)
1602 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1603 	}
1604 }
1605 
1606 /**
1607  * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
1608  * @ukinfo: qp initialization info
1609  * @sq_depth: Returns depth of SQ
1610  * @sq_shift: Returns shift of SQ
1611  */
1612 int
1613 irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
1614 			     u32 *sq_depth, u8 *sq_shift)
1615 {
1616 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
1617 	int status;
1618 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1619 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1620 			    ukinfo->max_sq_frag_cnt,
1621 			    ukinfo->max_inline_data, sq_shift);
1622 	status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
1623 				   *sq_shift, sq_depth);
1624 
1625 	return status;
1626 }
1627 
1628 /**
1629  * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
1630  * @ukinfo: qp initialization info
1631  * @rq_depth: Returns depth of RQ
1632  * @rq_shift: Returns shift of RQ
1633  */
1634 int
1635 irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
1636 			     u32 *rq_depth, u8 *rq_shift)
1637 {
1638 	int status;
1639 
1640 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1641 			    rq_shift);
1642 
1643 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1644 		if (ukinfo->abi_ver > 4)
1645 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1646 	}
1647 
1648 	status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
1649 				   *rq_shift, rq_depth);
1650 
1651 	return status;
1652 }
1653 
1654 /**
1655  * irdma_uk_qp_init - initialize shared qp
1656  * @qp: hw qp (user and kernel)
1657  * @info: qp initialization info
1658  *
1659  * initializes the vars used in both user and kernel mode.
1660  * size of the wqe depends on numbers of max. fragements
1661  * allowed. Then size of wqe * the number of wqes should be the
1662  * amount of memory allocated for sq and rq.
1663  */
1664 int
1665 irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1666 {
1667 	int ret_code = 0;
1668 	u32 sq_ring_size;
1669 
1670 	qp->uk_attrs = info->uk_attrs;
1671 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1672 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1673 		return -EINVAL;
1674 
1675 	qp->qp_caps = info->qp_caps;
1676 	qp->sq_base = info->sq;
1677 	qp->rq_base = info->rq;
1678 	qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1679 	qp->shadow_area = info->shadow_area;
1680 	qp->sq_wrtrk_array = info->sq_wrtrk_array;
1681 
1682 	qp->rq_wrid_array = info->rq_wrid_array;
1683 	qp->wqe_alloc_db = info->wqe_alloc_db;
1684 	qp->last_rx_cmpl_idx = 0xffffffff;
1685 	qp->rd_fence_rate = info->rd_fence_rate;
1686 	qp->qp_id = info->qp_id;
1687 	qp->sq_size = info->sq_size;
1688 	qp->push_mode = false;
1689 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1690 	sq_ring_size = qp->sq_size << info->sq_shift;
1691 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1692 	IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1693 	if (info->first_sq_wq) {
1694 		irdma_setup_connection_wqes(qp, info);
1695 		qp->swqe_polarity = 1;
1696 		qp->first_sq_wq = true;
1697 	} else {
1698 		qp->swqe_polarity = 0;
1699 	}
1700 	qp->swqe_polarity_deferred = 1;
1701 	qp->rwqe_polarity = 0;
1702 	qp->rq_size = info->rq_size;
1703 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1704 	qp->max_inline_data = info->max_inline_data;
1705 	qp->rq_wqe_size = info->rq_shift;
1706 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1707 	qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
1708 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1709 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1710 	else
1711 		qp->wqe_ops = iw_wqe_uk_ops;
1712 	return ret_code;
1713 }
1714 
1715 /**
1716  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1717  * @cq: hw cq
1718  * @info: hw cq initialization info
1719  */
1720 int
1721 irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info)
1722 {
1723 	cq->cq_base = info->cq_base;
1724 	cq->cq_id = info->cq_id;
1725 	cq->cq_size = info->cq_size;
1726 	cq->cqe_alloc_db = info->cqe_alloc_db;
1727 	cq->cq_ack_db = info->cq_ack_db;
1728 	cq->shadow_area = info->shadow_area;
1729 	cq->avoid_mem_cflct = info->avoid_mem_cflct;
1730 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1731 	cq->polarity = 1;
1732 
1733 	return 0;
1734 }
1735 
1736 /**
1737  * irdma_uk_clean_cq - clean cq entries
1738  * @q: completion context
1739  * @cq: cq to clean
1740  */
1741 int
1742 irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1743 {
1744 	__le64 *cqe;
1745 	u64 qword3, comp_ctx;
1746 	u32 cq_head;
1747 	u8 polarity, temp;
1748 
1749 	cq_head = cq->cq_ring.head;
1750 	temp = cq->polarity;
1751 	do {
1752 		if (cq->avoid_mem_cflct)
1753 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1754 		else
1755 			cqe = cq->cq_base[cq_head].buf;
1756 		get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1757 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1758 
1759 		if (polarity != temp)
1760 			break;
1761 
1762 		get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
1763 		if ((void *)(irdma_uintptr) comp_ctx == q)
1764 			set_64bit_val(cqe, IRDMA_BYTE_8, 0);
1765 
1766 		cq_head = (cq_head + 1) % cq->cq_ring.size;
1767 		if (!cq_head)
1768 			temp ^= 1;
1769 	} while (true);
1770 	return 0;
1771 }
1772 
1773 /**
1774  * irdma_nop - post a nop
1775  * @qp: hw qp ptr
1776  * @wr_id: work request id
1777  * @signaled: signaled for completion
1778  * @post_sq: ring doorbell
1779  */
1780 int
1781 irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
1782 {
1783 	__le64 *wqe;
1784 	u64 hdr;
1785 	u32 wqe_idx;
1786 	struct irdma_post_sq_info info = {0};
1787 	u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
1788 
1789 	info.push_wqe = qp->push_db ? true : false;
1790 	info.wr_id = wr_id;
1791 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, &info);
1792 	if (!wqe)
1793 		return -ENOSPC;
1794 
1795 	set_64bit_val(wqe, IRDMA_BYTE_0, 0);
1796 	set_64bit_val(wqe, IRDMA_BYTE_8, 0);
1797 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
1798 
1799 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1800 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1801 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1802 
1803 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
1804 
1805 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1806 
1807 	if (info.push_wqe)
1808 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
1809 	else if (post_sq)
1810 		irdma_uk_qp_post_wr(qp);
1811 
1812 	return 0;
1813 }
1814 
1815 /**
1816  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1817  * @frag_cnt: number of fragments
1818  * @quanta: quanta for frag_cnt
1819  */
1820 int
1821 irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1822 {
1823 	switch (frag_cnt) {
1824 	case 0:
1825 	case 1:
1826 		*quanta = IRDMA_QP_WQE_MIN_QUANTA;
1827 		break;
1828 	case 2:
1829 	case 3:
1830 		*quanta = 2;
1831 		break;
1832 	case 4:
1833 	case 5:
1834 		*quanta = 3;
1835 		break;
1836 	case 6:
1837 	case 7:
1838 		*quanta = 4;
1839 		break;
1840 	case 8:
1841 	case 9:
1842 		*quanta = 5;
1843 		break;
1844 	case 10:
1845 	case 11:
1846 		*quanta = 6;
1847 		break;
1848 	case 12:
1849 	case 13:
1850 		*quanta = 7;
1851 		break;
1852 	case 14:
1853 	case 15:		/* when immediate data is present */
1854 		*quanta = 8;
1855 		break;
1856 	default:
1857 		return -EINVAL;
1858 	}
1859 
1860 	return 0;
1861 }
1862 
1863 /**
1864  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1865  * @frag_cnt: number of fragments
1866  * @wqe_size: size in bytes given frag_cnt
1867  */
1868 int
1869 irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1870 {
1871 	switch (frag_cnt) {
1872 	case 0:
1873 	case 1:
1874 		*wqe_size = 32;
1875 		break;
1876 	case 2:
1877 	case 3:
1878 		*wqe_size = 64;
1879 		break;
1880 	case 4:
1881 	case 5:
1882 	case 6:
1883 	case 7:
1884 		*wqe_size = 128;
1885 		break;
1886 	case 8:
1887 	case 9:
1888 	case 10:
1889 	case 11:
1890 	case 12:
1891 	case 13:
1892 	case 14:
1893 		*wqe_size = 256;
1894 		break;
1895 	default:
1896 		return -EINVAL;
1897 	}
1898 
1899 	return 0;
1900 }
1901