xref: /freebsd/contrib/ofed/libirdma/irdma_uk.c (revision 058ac3e8063366dafa634d9107642e12b038bf09)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "osdep.h"
37 #include "irdma_defs.h"
38 #include "irdma_user.h"
39 #include "irdma.h"
40 
41 /**
42  * irdma_set_fragment - set fragment in wqe
43  * @wqe: wqe for setting fragment
44  * @offset: offset value
45  * @sge: sge length and stag
46  * @valid: The wqe valid
47  */
48 static void
49 irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
50 		   u8 valid)
51 {
52 	if (sge) {
53 		set_64bit_val(wqe, offset,
54 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
55 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
56 			      FIELD_PREP(IRDMAQPSQ_VALID, valid) |
57 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
58 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
59 	} else {
60 		set_64bit_val(wqe, offset, 0);
61 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
62 			      FIELD_PREP(IRDMAQPSQ_VALID, valid));
63 	}
64 }
65 
66 /**
67  * irdma_set_fragment_gen_1 - set fragment in wqe
68  * @wqe: wqe for setting fragment
69  * @offset: offset value
70  * @sge: sge length and stag
71  * @valid: wqe valid flag
72  */
73 static void
74 irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
75 			 struct irdma_sge *sge, u8 valid)
76 {
77 	if (sge) {
78 		set_64bit_val(wqe, offset,
79 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
80 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
81 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
82 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
83 	} else {
84 		set_64bit_val(wqe, offset, 0);
85 		set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
86 	}
87 }
88 
89 /**
90  * irdma_nop_hdr - Format header section of noop WQE
91  * @qp: hw qp ptr
92  */
93 static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){
94 	return FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
95 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, false) |
96 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
97 }
98 
99 /**
100  * irdma_nop_1 - insert a NOP wqe
101  * @qp: hw qp ptr
102  */
103 static int
104 irdma_nop_1(struct irdma_qp_uk *qp)
105 {
106 	__le64 *wqe;
107 	u32 wqe_idx;
108 
109 	if (!qp->sq_ring.head)
110 		return EINVAL;
111 
112 	wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
113 	wqe = qp->sq_base[wqe_idx].elem;
114 
115 	qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
116 
117 	set_64bit_val(wqe, IRDMA_BYTE_0, 0);
118 	set_64bit_val(wqe, IRDMA_BYTE_8, 0);
119 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
120 
121 	/* make sure WQE is written before valid bit is set */
122 	udma_to_device_barrier();
123 
124 	set_64bit_val(wqe, IRDMA_BYTE_24, irdma_nop_hdr(qp));
125 
126 	return 0;
127 }
128 
129 /**
130  * irdma_clr_wqes - clear next 128 sq entries
131  * @qp: hw qp ptr
132  * @qp_wqe_idx: wqe_idx
133  */
134 void
135 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
136 {
137 	__le64 *wqe;
138 	u32 wqe_idx;
139 
140 	if (!(qp_wqe_idx & 0x7F)) {
141 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
142 		wqe = qp->sq_base[wqe_idx].elem;
143 		if (wqe_idx)
144 			memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
145 		else
146 			memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
147 	}
148 }
149 
150 /**
151  * irdma_uk_qp_post_wr - ring doorbell
152  * @qp: hw qp ptr
153  */
154 void
155 irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
156 {
157 	u64 temp;
158 	u32 hw_sq_tail;
159 	u32 sw_sq_head;
160 
161 	/* valid bit is written and loads completed before reading shadow */
162 	atomic_thread_fence(memory_order_seq_cst);
163 
164 	/* read the doorbell shadow area */
165 	get_64bit_val(qp->shadow_area, IRDMA_BYTE_0, &temp);
166 
167 	hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
168 	sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
169 	if (sw_sq_head != qp->initial_ring.head) {
170 		if (qp->push_dropped) {
171 			db_wr32(qp->qp_id, qp->wqe_alloc_db);
172 			qp->push_dropped = false;
173 		} else if (sw_sq_head != hw_sq_tail) {
174 			if (sw_sq_head > qp->initial_ring.head) {
175 				if (hw_sq_tail >= qp->initial_ring.head &&
176 				    hw_sq_tail < sw_sq_head)
177 					db_wr32(qp->qp_id, qp->wqe_alloc_db);
178 			} else {
179 				if (hw_sq_tail >= qp->initial_ring.head ||
180 				    hw_sq_tail < sw_sq_head)
181 					db_wr32(qp->qp_id, qp->wqe_alloc_db);
182 			}
183 		}
184 	}
185 
186 	qp->initial_ring.head = qp->sq_ring.head;
187 }
188 
189 /**
190  * irdma_qp_ring_push_db -  ring qp doorbell
191  * @qp: hw qp ptr
192  * @wqe_idx: wqe index
193  */
194 static void
195 irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
196 {
197 	set_32bit_val(qp->push_db, 0,
198 		      FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
199 	qp->initial_ring.head = qp->sq_ring.head;
200 	qp->push_mode = true;
201 	qp->push_dropped = false;
202 }
203 
204 void
205 irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
206 		  u32 wqe_idx, bool post_sq)
207 {
208 	__le64 *push;
209 
210 	if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
211 	    IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
212 	    !qp->push_mode) {
213 		if (post_sq)
214 			irdma_uk_qp_post_wr(qp);
215 	} else {
216 		push = (__le64 *) ((uintptr_t)qp->push_wqe +
217 				   (wqe_idx & 0x7) * 0x20);
218 		irdma_memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
219 		irdma_qp_ring_push_db(qp, wqe_idx);
220 	}
221 }
222 
223 /**
224  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
225  * @qp: hw qp ptr
226  * @wqe_idx: return wqe index
227  * @quanta: (in/out) ptr to size of WR in quanta. Modified in case pad is needed
228  * @total_size: size of WR in bytes
229  * @info: info on WR
230  */
231 __le64 *
232 irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
233 			   u16 *quanta, u32 total_size,
234 			   struct irdma_post_sq_info *info)
235 {
236 	__le64 *wqe;
237 	__le64 *wqe_0 = NULL;
238 	u32 nop_wqe_idx;
239 	u16 avail_quanta, wqe_quanta = *quanta;
240 	u16 i;
241 
242 	avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
243 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
244 	     qp->uk_attrs->max_hw_sq_chunk);
245 
246 	if (*quanta <= avail_quanta) {
247 		/* WR fits in current chunk */
248 		if (*quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
249 			return NULL;
250 	} else {
251 		/* Need to pad with NOP */
252 		if (*quanta + avail_quanta >
253 		    IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
254 			return NULL;
255 
256 		nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
257 		for (i = 0; i < avail_quanta; i++) {
258 			irdma_nop_1(qp);
259 			IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
260 		}
261 		if (qp->push_db && info->push_wqe)
262 			irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
263 					  avail_quanta, nop_wqe_idx, true);
264 	}
265 
266 	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
267 	if (!*wqe_idx)
268 		qp->swqe_polarity = !qp->swqe_polarity;
269 
270 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, *quanta);
271 
272 	irdma_clr_wqes(qp, *wqe_idx);
273 
274 	wqe = qp->sq_base[*wqe_idx].elem;
275 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && wqe_quanta == 1 &&
276 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
277 		wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
278 		wqe_0[3] = htole64(FIELD_PREP(IRDMAQPSQ_VALID, !qp->swqe_polarity));
279 	}
280 	qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
281 	qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
282 	qp->sq_wrtrk_array[*wqe_idx].quanta = wqe_quanta;
283 	qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
284 
285 	return wqe;
286 }
287 
288 /**
289  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
290  * @qp: hw qp ptr
291  * @wqe_idx: return wqe index
292  */
293 __le64 *
294 irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
295 {
296 	__le64 *wqe;
297 	int ret_code;
298 
299 	if (IRDMA_RING_FULL_ERR(qp->rq_ring))
300 		return NULL;
301 
302 	IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
303 	if (ret_code)
304 		return NULL;
305 
306 	if (!*wqe_idx)
307 		qp->rwqe_polarity = !qp->rwqe_polarity;
308 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
309 	wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
310 
311 	return wqe;
312 }
313 
314 /**
315  * irdma_uk_rdma_write - rdma write operation
316  * @qp: hw qp ptr
317  * @info: post sq information
318  * @post_sq: flag to post sq
319  */
320 int
321 irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
322 		    bool post_sq)
323 {
324 	u64 hdr;
325 	__le64 *wqe;
326 	struct irdma_rdma_write *op_info;
327 	u32 i, wqe_idx;
328 	u32 total_size = 0, byte_off;
329 	int ret_code;
330 	u32 frag_cnt, addl_frag_cnt;
331 	bool read_fence = false;
332 	u16 quanta;
333 
334 	info->push_wqe = qp->push_db ? true : false;
335 
336 	op_info = &info->op.rdma_write;
337 	if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
338 		return EINVAL;
339 
340 	for (i = 0; i < op_info->num_lo_sges; i++)
341 		total_size += op_info->lo_sg_list[i].len;
342 
343 	read_fence |= info->read_fence;
344 
345 	if (info->imm_data_valid)
346 		frag_cnt = op_info->num_lo_sges + 1;
347 	else
348 		frag_cnt = op_info->num_lo_sges;
349 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
350 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
351 	if (ret_code)
352 		return ret_code;
353 
354 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
355 	if (!wqe)
356 		return ENOSPC;
357 
358 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
359 	set_64bit_val(wqe, IRDMA_BYTE_16,
360 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
361 
362 	if (info->imm_data_valid) {
363 		set_64bit_val(wqe, IRDMA_BYTE_0,
364 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
365 		i = 0;
366 	} else {
367 		qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
368 					    op_info->lo_sg_list,
369 					    qp->swqe_polarity);
370 		i = 1;
371 	}
372 
373 	for (byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; i++) {
374 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
375 					    &op_info->lo_sg_list[i],
376 					    qp->swqe_polarity);
377 		byte_off += 16;
378 	}
379 
380 	/* if not an odd number set valid bit in next fragment */
381 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
382 	    frag_cnt) {
383 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
384 					    qp->swqe_polarity);
385 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
386 			++addl_frag_cnt;
387 	}
388 
389 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
390 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
391 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
392 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
393 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
394 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
395 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
396 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
397 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
398 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
399 
400 	udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
401 
402 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
403 	if (info->push_wqe)
404 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
405 	else if (post_sq)
406 		irdma_uk_qp_post_wr(qp);
407 
408 	return 0;
409 }
410 
411 /**
412  * irdma_uk_rdma_read - rdma read command
413  * @qp: hw qp ptr
414  * @info: post sq information
415  * @inv_stag: flag for inv_stag
416  * @post_sq: flag to post sq
417  */
418 int
419 irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
420 		   bool inv_stag, bool post_sq)
421 {
422 	struct irdma_rdma_read *op_info;
423 	int ret_code;
424 	u32 i, byte_off, total_size = 0;
425 	bool local_fence = false;
426 	bool ord_fence = false;
427 	u32 addl_frag_cnt;
428 	__le64 *wqe;
429 	u32 wqe_idx;
430 	u16 quanta;
431 	u64 hdr;
432 
433 	info->push_wqe = qp->push_db ? true : false;
434 
435 	op_info = &info->op.rdma_read;
436 	if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
437 		return EINVAL;
438 
439 	for (i = 0; i < op_info->num_lo_sges; i++)
440 		total_size += op_info->lo_sg_list[i].len;
441 
442 	ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
443 	if (ret_code)
444 		return ret_code;
445 
446 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
447 	if (!wqe)
448 		return ENOSPC;
449 
450 	if (qp->rd_fence_rate && (qp->ord_cnt++ == qp->rd_fence_rate)) {
451 		ord_fence = true;
452 		qp->ord_cnt = 0;
453 	}
454 
455 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
456 	addl_frag_cnt = op_info->num_lo_sges > 1 ?
457 	    (op_info->num_lo_sges - 1) : 0;
458 	local_fence |= info->local_fence;
459 
460 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, op_info->lo_sg_list,
461 				    qp->swqe_polarity);
462 	for (i = 1, byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; ++i) {
463 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
464 					    &op_info->lo_sg_list[i],
465 					    qp->swqe_polarity);
466 		byte_off += IRDMA_BYTE_16;
467 	}
468 
469 	/* if not an odd number set valid bit in next fragment */
470 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
471 	    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
472 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
473 					    qp->swqe_polarity);
474 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
475 			++addl_frag_cnt;
476 	}
477 	set_64bit_val(wqe, IRDMA_BYTE_16,
478 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
479 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
480 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
481 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
482 	    FIELD_PREP(IRDMAQPSQ_OPCODE,
483 		       (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
484 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
485 	    FIELD_PREP(IRDMAQPSQ_READFENCE,
486 		       info->read_fence || ord_fence ? 1 : 0) |
487 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
488 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
489 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
490 
491 	udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
492 
493 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
494 	if (info->push_wqe)
495 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
496 	else if (post_sq)
497 		irdma_uk_qp_post_wr(qp);
498 
499 	return 0;
500 }
501 
502 /**
503  * irdma_uk_send - rdma send command
504  * @qp: hw qp ptr
505  * @info: post sq information
506  * @post_sq: flag to post sq
507  */
508 int
509 irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
510 	      bool post_sq)
511 {
512 	__le64 *wqe;
513 	struct irdma_post_send *op_info;
514 	u64 hdr;
515 	u32 i, wqe_idx, total_size = 0, byte_off;
516 	int ret_code;
517 	u32 frag_cnt, addl_frag_cnt;
518 	bool read_fence = false;
519 	u16 quanta;
520 
521 	info->push_wqe = qp->push_db ? true : false;
522 
523 	op_info = &info->op.send;
524 	if (qp->max_sq_frag_cnt < op_info->num_sges)
525 		return EINVAL;
526 
527 	for (i = 0; i < op_info->num_sges; i++)
528 		total_size += op_info->sg_list[i].len;
529 
530 	if (info->imm_data_valid)
531 		frag_cnt = op_info->num_sges + 1;
532 	else
533 		frag_cnt = op_info->num_sges;
534 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
535 	if (ret_code)
536 		return ret_code;
537 
538 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
539 	if (!wqe)
540 		return ENOSPC;
541 
542 	read_fence |= info->read_fence;
543 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
544 	if (info->imm_data_valid) {
545 		set_64bit_val(wqe, IRDMA_BYTE_0,
546 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
547 		i = 0;
548 	} else {
549 		qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
550 					    frag_cnt ? op_info->sg_list : NULL,
551 					    qp->swqe_polarity);
552 		i = 1;
553 	}
554 
555 	for (byte_off = IRDMA_BYTE_32; i < op_info->num_sges; i++) {
556 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
557 					    qp->swqe_polarity);
558 		byte_off += IRDMA_BYTE_16;
559 	}
560 
561 	/* if not an odd number set valid bit in next fragment */
562 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
563 	    frag_cnt) {
564 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
565 					    qp->swqe_polarity);
566 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
567 			++addl_frag_cnt;
568 	}
569 
570 	set_64bit_val(wqe, IRDMA_BYTE_16,
571 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
572 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
573 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
574 	    FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
575 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
576 		       (info->imm_data_valid ? 1 : 0)) |
577 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
578 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
579 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
580 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
581 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
582 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
583 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
584 	    FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
585 	    FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
586 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
587 
588 	udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
589 
590 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
591 	if (info->push_wqe)
592 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
593 	else if (post_sq)
594 		irdma_uk_qp_post_wr(qp);
595 
596 	return 0;
597 }
598 
599 /**
600  * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
601  * @wqe: wqe for setting fragment
602  * @op_info: info for setting bind wqe values
603  */
604 static void
605 irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,
606 			    struct irdma_bind_window *op_info)
607 {
608 	set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
609 	set_64bit_val(wqe, IRDMA_BYTE_8,
610 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
611 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
612 	set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
613 }
614 
615 /**
616  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
617  * @wqe: pointer to wqe
618  * @sge_list: table of pointers to inline data
619  * @num_sges: Total inline data length
620  * @polarity: compatibility parameter
621  */
622 static void
623 irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
624 			     u32 num_sges, u8 polarity)
625 {
626 	u32 quanta_bytes_remaining = 16;
627 	u32 i;
628 
629 	for (i = 0; i < num_sges; i++) {
630 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
631 		u32 sge_len = sge_list[i].len;
632 
633 		while (sge_len) {
634 			u32 bytes_copied;
635 
636 			bytes_copied = min(sge_len, quanta_bytes_remaining);
637 			irdma_memcpy(wqe, cur_sge, bytes_copied);
638 			wqe += bytes_copied;
639 			cur_sge += bytes_copied;
640 			quanta_bytes_remaining -= bytes_copied;
641 			sge_len -= bytes_copied;
642 
643 			if (!quanta_bytes_remaining) {
644 				/* Remaining inline bytes reside after the hdr */
645 				wqe += 16;
646 				quanta_bytes_remaining = 32;
647 			}
648 		}
649 	}
650 }
651 
652 /**
653  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
654  * @data_size: data size for inline
655  *
656  * Gets the quanta based on inline and immediate data.
657  */
658 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) {
659 	return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
660 }
661 
662 /**
663  * irdma_set_mw_bind_wqe - set mw bind in wqe
664  * @wqe: wqe for setting mw bind
665  * @op_info: info for setting wqe values
666  */
667 static void
668 irdma_set_mw_bind_wqe(__le64 * wqe,
669 		      struct irdma_bind_window *op_info)
670 {
671 	set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
672 	set_64bit_val(wqe, IRDMA_BYTE_8,
673 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
674 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
675 	set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
676 }
677 
678 /**
679  * irdma_copy_inline_data - Copy inline data to wqe
680  * @wqe: pointer to wqe
681  * @sge_list: table of pointers to inline data
682  * @num_sges: number of SGE's
683  * @polarity: polarity of wqe valid bit
684  */
685 static void
686 irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list, u32 num_sges,
687 		       u8 polarity)
688 {
689 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
690 	u32 quanta_bytes_remaining = 8;
691 	u32 i;
692 	bool first_quanta = true;
693 
694 	wqe += 8;
695 
696 	for (i = 0; i < num_sges; i++) {
697 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
698 		u32 sge_len = sge_list[i].len;
699 
700 		while (sge_len) {
701 			u32 bytes_copied;
702 
703 			bytes_copied = min(sge_len, quanta_bytes_remaining);
704 			irdma_memcpy(wqe, cur_sge, bytes_copied);
705 			wqe += bytes_copied;
706 			cur_sge += bytes_copied;
707 			quanta_bytes_remaining -= bytes_copied;
708 			sge_len -= bytes_copied;
709 
710 			if (!quanta_bytes_remaining) {
711 				quanta_bytes_remaining = 31;
712 
713 				/* Remaining inline bytes reside after the hdr */
714 				if (first_quanta) {
715 					first_quanta = false;
716 					wqe += 16;
717 				} else {
718 					*wqe = inline_valid;
719 					wqe++;
720 				}
721 			}
722 		}
723 	}
724 	if (!first_quanta && quanta_bytes_remaining < 31)
725 		*(wqe + quanta_bytes_remaining) = inline_valid;
726 }
727 
728 /**
729  * irdma_inline_data_size_to_quanta - based on inline data, quanta
730  * @data_size: data size for inline
731  *
732  * Gets the quanta based on inline and immediate data.
733  */
734 static u16 irdma_inline_data_size_to_quanta(u32 data_size) {
735 	if (data_size <= 8)
736 		return IRDMA_QP_WQE_MIN_QUANTA;
737 	else if (data_size <= 39)
738 		return 2;
739 	else if (data_size <= 70)
740 		return 3;
741 	else if (data_size <= 101)
742 		return 4;
743 	else if (data_size <= 132)
744 		return 5;
745 	else if (data_size <= 163)
746 		return 6;
747 	else if (data_size <= 194)
748 		return 7;
749 	else
750 		return 8;
751 }
752 
753 /**
754  * irdma_uk_inline_rdma_write - inline rdma write operation
755  * @qp: hw qp ptr
756  * @info: post sq information
757  * @post_sq: flag to post sq
758  */
759 int
760 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
761 			   struct irdma_post_sq_info *info, bool post_sq)
762 {
763 	__le64 *wqe;
764 	struct irdma_rdma_write *op_info;
765 	u64 hdr = 0;
766 	u32 wqe_idx;
767 	bool read_fence = false;
768 	u16 quanta;
769 	u32 i, total_size = 0;
770 
771 	info->push_wqe = qp->push_db ? true : false;
772 	op_info = &info->op.rdma_write;
773 
774 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
775 		return EINVAL;
776 
777 	for (i = 0; i < op_info->num_lo_sges; i++)
778 		total_size += op_info->lo_sg_list[i].len;
779 
780 	if (unlikely(total_size > qp->max_inline_data))
781 		return EINVAL;
782 
783 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
784 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
785 	if (!wqe)
786 		return ENOSPC;
787 
788 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
789 	read_fence |= info->read_fence;
790 	set_64bit_val(wqe, IRDMA_BYTE_16,
791 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
792 
793 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
794 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
795 	    FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
796 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
797 	    FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
798 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
799 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
800 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
801 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
802 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
803 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
804 
805 	if (info->imm_data_valid)
806 		set_64bit_val(wqe, IRDMA_BYTE_0,
807 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
808 
809 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
810 					op_info->num_lo_sges, qp->swqe_polarity);
811 
812 	udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
813 
814 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
815 
816 	if (info->push_wqe)
817 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
818 	else if (post_sq)
819 		irdma_uk_qp_post_wr(qp);
820 
821 	return 0;
822 }
823 
824 /**
825  * irdma_uk_inline_send - inline send operation
826  * @qp: hw qp ptr
827  * @info: post sq information
828  * @post_sq: flag to post sq
829  */
830 int
831 irdma_uk_inline_send(struct irdma_qp_uk *qp,
832 		     struct irdma_post_sq_info *info, bool post_sq)
833 {
834 	__le64 *wqe;
835 	struct irdma_post_send *op_info;
836 	u64 hdr;
837 	u32 wqe_idx;
838 	bool read_fence = false;
839 	u16 quanta;
840 	u32 i, total_size = 0;
841 
842 	info->push_wqe = qp->push_db ? true : false;
843 	op_info = &info->op.send;
844 
845 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
846 		return EINVAL;
847 
848 	for (i = 0; i < op_info->num_sges; i++)
849 		total_size += op_info->sg_list[i].len;
850 
851 	if (unlikely(total_size > qp->max_inline_data))
852 		return EINVAL;
853 
854 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
855 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
856 	if (!wqe)
857 		return ENOSPC;
858 
859 	set_64bit_val(wqe, IRDMA_BYTE_16,
860 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
861 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
862 
863 	read_fence |= info->read_fence;
864 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
865 	    FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
866 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
867 	    FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
868 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
869 		       (info->imm_data_valid ? 1 : 0)) |
870 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
871 	    FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
872 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
873 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
874 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
875 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
876 	    FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
877 	    FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
878 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
879 
880 	if (info->imm_data_valid)
881 		set_64bit_val(wqe, IRDMA_BYTE_0,
882 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
883 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
884 					op_info->num_sges, qp->swqe_polarity);
885 
886 	udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
887 
888 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
889 
890 	if (info->push_wqe)
891 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
892 	else if (post_sq)
893 		irdma_uk_qp_post_wr(qp);
894 
895 	return 0;
896 }
897 
898 /**
899  * irdma_uk_stag_local_invalidate - stag invalidate operation
900  * @qp: hw qp ptr
901  * @info: post sq information
902  * @post_sq: flag to post sq
903  */
904 int
905 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
906 			       struct irdma_post_sq_info *info,
907 			       bool post_sq)
908 {
909 	__le64 *wqe;
910 	struct irdma_inv_local_stag *op_info;
911 	u64 hdr;
912 	u32 wqe_idx;
913 	bool local_fence = false;
914 	struct irdma_sge sge = {0};
915 	u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
916 
917 	info->push_wqe = qp->push_db ? true : false;
918 	op_info = &info->op.inv_local_stag;
919 	local_fence = info->local_fence;
920 
921 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
922 	if (!wqe)
923 		return ENOSPC;
924 
925 	sge.stag = op_info->target_stag;
926 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
927 
928 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
929 
930 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
931 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
932 	    FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
933 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
934 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
935 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
936 
937 	udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
938 
939 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
940 
941 	if (info->push_wqe)
942 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
943 	else if (post_sq)
944 		irdma_uk_qp_post_wr(qp);
945 
946 	return 0;
947 }
948 
949 /**
950  * irdma_uk_mw_bind - bind Memory Window
951  * @qp: hw qp ptr
952  * @info: post sq information
953  * @post_sq: flag to post sq
954  */
955 int
956 irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
957 		 bool post_sq)
958 {
959 	__le64 *wqe;
960 	struct irdma_bind_window *op_info;
961 	u64 hdr;
962 	u32 wqe_idx;
963 	bool local_fence;
964 	u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
965 
966 	info->push_wqe = qp->push_db ? true : false;
967 	op_info = &info->op.bind_window;
968 	local_fence = info->local_fence;
969 
970 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
971 	if (!wqe)
972 		return ENOSPC;
973 
974 	qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
975 
976 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
977 	    FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
978 		       ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
979 	    FIELD_PREP(IRDMAQPSQ_VABASEDTO,
980 		       (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
981 	    FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
982 		       (op_info->mem_window_type_1 ? 1 : 0)) |
983 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
984 	    FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
985 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
986 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
987 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
988 
989 	udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
990 
991 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
992 
993 	if (info->push_wqe)
994 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
995 	else if (post_sq)
996 		irdma_uk_qp_post_wr(qp);
997 
998 	return 0;
999 }
1000 
1001 /**
1002  * irdma_uk_post_receive - post receive wqe
1003  * @qp: hw qp ptr
1004  * @info: post rq information
1005  */
1006 int
1007 irdma_uk_post_receive(struct irdma_qp_uk *qp,
1008 		      struct irdma_post_rq_info *info)
1009 {
1010 	u32 wqe_idx, i, byte_off;
1011 	u32 addl_frag_cnt;
1012 	__le64 *wqe;
1013 	u64 hdr;
1014 
1015 	if (qp->max_rq_frag_cnt < info->num_sges)
1016 		return EINVAL;
1017 
1018 	wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
1019 	if (!wqe)
1020 		return ENOSPC;
1021 
1022 	qp->rq_wrid_array[wqe_idx] = info->wr_id;
1023 	addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
1024 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, info->sg_list,
1025 				    qp->rwqe_polarity);
1026 
1027 	for (i = 1, byte_off = IRDMA_BYTE_32; i < info->num_sges; i++) {
1028 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
1029 					    qp->rwqe_polarity);
1030 		byte_off += 16;
1031 	}
1032 
1033 	/* if not an odd number set valid bit in next fragment */
1034 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
1035 	    info->num_sges) {
1036 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
1037 					    qp->rwqe_polarity);
1038 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
1039 			++addl_frag_cnt;
1040 	}
1041 
1042 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
1043 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
1044 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
1045 
1046 	udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
1047 
1048 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1049 
1050 	return 0;
1051 }
1052 
1053 /**
1054  * irdma_uk_cq_resize - reset the cq buffer info
1055  * @cq: cq to resize
1056  * @cq_base: new cq buffer addr
1057  * @cq_size: number of cqes
1058  */
1059 void
1060 irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
1061 {
1062 	cq->cq_base = cq_base;
1063 	cq->cq_size = cq_size;
1064 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1065 	cq->polarity = 1;
1066 }
1067 
1068 /**
1069  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
1070  * @cq: cq to resize
1071  * @cq_cnt: the count of the resized cq buffers
1072  */
1073 void
1074 irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
1075 {
1076 	u64 temp_val;
1077 	u16 sw_cq_sel;
1078 	u8 arm_next_se;
1079 	u8 arm_next;
1080 	u8 arm_seq_num;
1081 
1082 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1083 
1084 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1085 	sw_cq_sel += cq_cnt;
1086 
1087 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1088 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1089 	arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1090 
1091 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1092 	    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1093 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1094 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1095 
1096 	set_64bit_val(cq->shadow_area, 32, temp_val);
1097 }
1098 
1099 /**
1100  * irdma_uk_cq_request_notification - cq notification request (door bell)
1101  * @cq: hw cq
1102  * @cq_notify: notification type
1103  */
1104 void
1105 irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1106 				 enum irdma_cmpl_notify cq_notify)
1107 {
1108 	u64 temp_val;
1109 	u16 sw_cq_sel;
1110 	u8 arm_next_se = 0;
1111 	u8 arm_next = 0;
1112 	u8 arm_seq_num;
1113 
1114 	cq->armed = true;
1115 	get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
1116 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1117 	arm_seq_num++;
1118 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1119 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1120 	arm_next_se |= 1;
1121 	if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1122 		arm_next = 1;
1123 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1124 	    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1125 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1126 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1127 
1128 	set_64bit_val(cq->shadow_area, IRDMA_BYTE_32, temp_val);
1129 
1130 	udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
1131 
1132 	db_wr32(cq->cq_id, cq->cqe_alloc_db);
1133 }
1134 
1135 static void
1136 irdma_copy_quanta(__le64 * dst, __le64 * src, u32 offset, bool flip,
1137 		  bool barrier)
1138 {
1139 	__le64 val;
1140 
1141 	get_64bit_val(src, offset, &val);
1142 	set_64bit_val(dst, offset, val);
1143 
1144 	get_64bit_val(src, offset + 8, &val);
1145 	if (flip)
1146 		val ^= IRDMAQPSQ_VALID;
1147 	set_64bit_val(dst, offset + 8, val);
1148 
1149 	get_64bit_val(src, offset + 24, &val);
1150 	if (flip)
1151 		val ^= IRDMAQPSQ_VALID;
1152 	if (barrier)
1153 		udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
1154 	set_64bit_val(dst, offset + 24, val);
1155 }
1156 
1157 static void
1158 irdma_copy_wqe(__le64 * dst, __le64 * src, u8 wqe_quanta,
1159 	       bool flip_polarity)
1160 {
1161 	u32 offset;
1162 
1163 	offset = 32;
1164 	while (--wqe_quanta) {
1165 		irdma_copy_quanta(dst, src, offset, flip_polarity, false);
1166 		offset += 32;
1167 	}
1168 
1169 	irdma_copy_quanta(dst, src, 0, flip_polarity, true);
1170 }
1171 
1172 static void
1173 irdma_repost_rq_wqes(struct irdma_qp_uk *qp, u32 start_idx,
1174 		     u32 end_idx)
1175 {
1176 	__le64 *dst_wqe, *src_wqe;
1177 	u32 wqe_idx;
1178 	u8 wqe_quanta = qp->rq_wqe_size_multiplier;
1179 	bool flip_polarity;
1180 	u64 val;
1181 
1182 	libirdma_debug("reposting_wqes: from start_idx=%d to end_idx = %d\n", start_idx, end_idx);
1183 	if (pthread_spin_lock(qp->lock))
1184 		return;
1185 	while (start_idx != end_idx) {
1186 		IRDMA_RING_SET_TAIL(qp->rq_ring, start_idx + 1);
1187 		src_wqe = qp->rq_base[start_idx * qp->rq_wqe_size_multiplier].elem;
1188 		dst_wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
1189 
1190 		/* Check to see if polarity has changed */
1191 		get_64bit_val(src_wqe, 24, &val);
1192 		if (FIELD_GET(IRDMAQPSQ_VALID, val) != qp->rwqe_polarity)
1193 			flip_polarity = true;
1194 		else
1195 			flip_polarity = false;
1196 
1197 		qp->rq_wrid_array[wqe_idx] = qp->rq_wrid_array[start_idx];
1198 		irdma_copy_wqe(dst_wqe, src_wqe, wqe_quanta, flip_polarity);
1199 
1200 		start_idx = (start_idx + 1) % qp->rq_size;
1201 	}
1202 
1203 	pthread_spin_unlock(qp->lock);
1204 }
1205 
1206 static int
1207 irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx)
1208 {
1209 	u32 exp_idx = (qp->last_rx_cmpl_idx + 1) % qp->rq_size;
1210 
1211 	if (*array_idx != exp_idx) {
1212 		if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RELAX_RQ_ORDER) {
1213 			irdma_repost_rq_wqes(qp, exp_idx, *array_idx);
1214 			qp->last_rx_cmpl_idx = *array_idx;
1215 
1216 			return 0;
1217 		}
1218 
1219 		*array_idx = exp_idx;
1220 		qp->last_rx_cmpl_idx = exp_idx;
1221 
1222 		return -1;
1223 	}
1224 
1225 	qp->last_rx_cmpl_idx = *array_idx;
1226 
1227 	return 0;
1228 }
1229 
1230 /**
1231  * irdma_skip_duplicate_flush_cmpl - check last cmpl and update wqe if needed
1232  *
1233  * @ring: sq/rq ring
1234  * @flush_seen: information if flush for specific ring was already seen
1235  * @comp_status: completion status
1236  * @wqe_idx: new value of WQE index returned if there is more work on ring
1237  */
1238 static inline int
1239 irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring, u8 flush_seen,
1240 				enum irdma_cmpl_status comp_status,
1241 				u32 *wqe_idx)
1242 {
1243 	if (flush_seen) {
1244 		if (IRDMA_RING_MORE_WORK(ring))
1245 			*wqe_idx = ring.tail;
1246 		else
1247 			return ENOENT;
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 /**
1254  * irdma_uk_cq_poll_cmpl - get cq completion info
1255  * @cq: hw cq
1256  * @info: cq poll information returned
1257  */
1258 int
1259 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1260 		      struct irdma_cq_poll_info *info)
1261 {
1262 	u64 comp_ctx, qword0, qword2, qword3;
1263 	__le64 *cqe;
1264 	struct irdma_qp_uk *qp;
1265 	struct irdma_ring *pring = NULL;
1266 	u32 wqe_idx;
1267 	int ret_code;
1268 	bool move_cq_head = true;
1269 	u8 polarity;
1270 	bool ext_valid;
1271 	__le64 *ext_cqe;
1272 
1273 	if (cq->avoid_mem_cflct)
1274 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1275 	else
1276 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1277 
1278 	get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1279 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1280 	if (polarity != cq->polarity)
1281 		return ENOENT;
1282 
1283 	/* Ensure CQE contents are read after valid bit is checked */
1284 	udma_from_device_barrier();
1285 
1286 	ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1287 	if (ext_valid) {
1288 		u64 qword6, qword7;
1289 		u32 peek_head;
1290 
1291 		if (cq->avoid_mem_cflct) {
1292 			ext_cqe = (__le64 *) ((u8 *)cqe + 32);
1293 			get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
1294 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1295 		} else {
1296 			peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1297 			ext_cqe = cq->cq_base[peek_head].buf;
1298 			get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
1299 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1300 			if (!peek_head)
1301 				polarity ^= 1;
1302 		}
1303 		if (polarity != cq->polarity)
1304 			return ENOENT;
1305 
1306 		/* Ensure ext CQE contents are read after ext valid bit is checked */
1307 		udma_from_device_barrier();
1308 
1309 		info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1310 		if (info->imm_valid) {
1311 			u64 qword4;
1312 
1313 			get_64bit_val(ext_cqe, IRDMA_BYTE_0, &qword4);
1314 			info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1315 		}
1316 		info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1317 		info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1318 		if (info->ud_smac_valid || info->ud_vlan_valid) {
1319 			get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
1320 			if (info->ud_vlan_valid)
1321 				info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1322 			if (info->ud_smac_valid) {
1323 				info->ud_smac[5] = qword6 & 0xFF;
1324 				info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1325 				info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1326 				info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1327 				info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1328 				info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1329 			}
1330 		}
1331 	} else {
1332 		info->imm_valid = false;
1333 		info->ud_smac_valid = false;
1334 		info->ud_vlan_valid = false;
1335 	}
1336 
1337 	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1338 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1339 	info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1340 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1341 	if (info->error) {
1342 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1343 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1344 		switch (info->major_err) {
1345 		case IRDMA_FLUSH_MAJOR_ERR:
1346 			/* Set the min error to standard flush error code for remaining cqes */
1347 			if (info->minor_err != FLUSH_GENERAL_ERR) {
1348 				qword3 &= ~IRDMA_CQ_MINERR;
1349 				qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1350 				set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
1351 			}
1352 			info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1353 			break;
1354 		default:
1355 			info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1356 			break;
1357 		}
1358 	} else {
1359 		info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1360 	}
1361 
1362 	get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
1363 	get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
1364 
1365 	info->stat.raw = (u32)FIELD_GET(IRDMACQ_TCPSQN_ROCEPSN_RTT_TS, qword0);
1366 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1367 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1368 
1369 	get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
1370 
1371 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1372 	qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
1373 	if (!qp || qp->destroy_pending) {
1374 		ret_code = EFAULT;
1375 		goto exit;
1376 	}
1377 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1378 	info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp;
1379 	info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1380 
1381 	if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
1382 		u32 array_idx;
1383 
1384 		ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring,
1385 							   qp->rq_flush_seen,
1386 							   info->comp_status,
1387 							   &wqe_idx);
1388 		if (ret_code != 0)
1389 			goto exit;
1390 
1391 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1392 
1393 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1394 		    info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1395 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1396 				ret_code = ENOENT;
1397 				goto exit;
1398 			}
1399 
1400 			info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1401 			info->signaled = 1;
1402 			array_idx = qp->rq_ring.tail;
1403 		} else {
1404 			info->wr_id = qp->rq_wrid_array[array_idx];
1405 			info->signaled = 1;
1406 			if (irdma_check_rq_cqe(qp, &array_idx)) {
1407 				info->wr_id = qp->rq_wrid_array[array_idx];
1408 				info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1409 				IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1410 				return 0;
1411 			}
1412 		}
1413 
1414 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1415 
1416 		if (qword3 & IRDMACQ_STAG) {
1417 			info->stag_invalid_set = true;
1418 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1419 		} else {
1420 			info->stag_invalid_set = false;
1421 		}
1422 		IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1423 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1424 			qp->rq_flush_seen = true;
1425 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1426 				qp->rq_flush_complete = true;
1427 			else
1428 				move_cq_head = false;
1429 		}
1430 		pring = &qp->rq_ring;
1431 	} else {		/* q_type is IRDMA_CQE_QTYPE_SQ */
1432 		if (qp->first_sq_wq) {
1433 			if (wqe_idx + 1 >= qp->conn_wqes)
1434 				qp->first_sq_wq = false;
1435 
1436 			if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1437 				IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1438 				IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1439 				set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
1440 					      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1441 				memset(info, 0,
1442 				       sizeof(struct irdma_cq_poll_info));
1443 				return irdma_uk_cq_poll_cmpl(cq, info);
1444 			}
1445 		}
1446 		/* cease posting push mode on push drop */
1447 		if (info->push_dropped) {
1448 			qp->push_mode = false;
1449 			qp->push_dropped = true;
1450 		}
1451 		ret_code = irdma_skip_duplicate_flush_cmpl(qp->sq_ring,
1452 							   qp->sq_flush_seen,
1453 							   info->comp_status,
1454 							   &wqe_idx);
1455 		if (ret_code != 0)
1456 			goto exit;
1457 		if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1458 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1459 			info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
1460 			if (!info->comp_status)
1461 				info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1462 			info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1463 			IRDMA_RING_SET_TAIL(qp->sq_ring,
1464 					    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1465 		} else {
1466 			if (pthread_spin_lock(qp->lock)) {
1467 				ret_code = ENOENT;
1468 				goto exit;
1469 			}
1470 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1471 				pthread_spin_unlock(qp->lock);
1472 				ret_code = ENOENT;
1473 				goto exit;
1474 			}
1475 
1476 			do {
1477 				__le64 *sw_wqe;
1478 				u64 wqe_qword;
1479 				u32 tail;
1480 
1481 				tail = qp->sq_ring.tail;
1482 				sw_wqe = qp->sq_base[tail].elem;
1483 				get_64bit_val(sw_wqe, IRDMA_BYTE_24,
1484 					      &wqe_qword);
1485 				info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
1486 				IRDMA_RING_SET_TAIL(qp->sq_ring,
1487 						    tail + qp->sq_wrtrk_array[tail].quanta);
1488 				if (info->op_type != IRDMAQP_OP_NOP) {
1489 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1490 					info->signaled = qp->sq_wrtrk_array[tail].signaled;
1491 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1492 					break;
1493 				}
1494 			} while (1);
1495 
1496 			if (info->op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
1497 				info->minor_err = FLUSH_MW_BIND_ERR;
1498 			qp->sq_flush_seen = true;
1499 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1500 				qp->sq_flush_complete = true;
1501 			pthread_spin_unlock(qp->lock);
1502 		}
1503 		pring = &qp->sq_ring;
1504 	}
1505 
1506 	ret_code = 0;
1507 
1508 exit:
1509 	if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1510 		if (pring && IRDMA_RING_MORE_WORK(*pring))
1511 			move_cq_head = false;
1512 	}
1513 
1514 	if (move_cq_head) {
1515 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1516 		if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1517 			cq->polarity ^= 1;
1518 
1519 		if (ext_valid && !cq->avoid_mem_cflct) {
1520 			IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1521 			if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1522 				cq->polarity ^= 1;
1523 		}
1524 
1525 		IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1526 		if (!cq->avoid_mem_cflct && ext_valid)
1527 			IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1528 		set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
1529 			      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1530 	} else {
1531 		qword3 &= ~IRDMA_CQ_WQEIDX;
1532 		qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1533 		set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
1534 	}
1535 
1536 	return ret_code;
1537 }
1538 
1539 /**
1540  * irdma_round_up_wq - return round up qp wq depth
1541  * @wqdepth: wq depth in quanta to round up
1542  */
1543 static int
1544 irdma_round_up_wq(u32 wqdepth)
1545 {
1546 	int scount = 1;
1547 
1548 	for (wqdepth--; scount <= 16; scount *= 2)
1549 		wqdepth |= wqdepth >> scount;
1550 
1551 	return ++wqdepth;
1552 }
1553 
1554 /**
1555  * irdma_get_wqe_shift - get shift count for maximum wqe size
1556  * @uk_attrs: qp HW attributes
1557  * @sge: Maximum Scatter Gather Elements wqe
1558  * @inline_data: Maximum inline data size
1559  * @shift: Returns the shift needed based on sge
1560  *
1561  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1562  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1563  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1564  * size of 64 bytes).
1565  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1566  * size of 256 bytes).
1567  */
1568 void
1569 irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1570 		    u32 inline_data, u8 *shift)
1571 {
1572 	*shift = 0;
1573 	if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1574 		if (sge > 1 || inline_data > 8) {
1575 			if (sge < 4 && inline_data <= 39)
1576 				*shift = 1;
1577 			else if (sge < 8 && inline_data <= 101)
1578 				*shift = 2;
1579 			else
1580 				*shift = 3;
1581 		}
1582 	} else if (sge > 1 || inline_data > 16) {
1583 		*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1584 	}
1585 }
1586 
1587 /*
1588  * irdma_get_sqdepth - get SQ depth (quanta) @uk_attrs: qp HW attributes @sq_size: SQ size @shift: shift which
1589  * determines size of WQE @sqdepth: depth of SQ
1590  */
1591 int
1592 irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
1593 {
1594 	*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
1595 
1596 	if (*sqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
1597 		*sqdepth = uk_attrs->min_hw_wq_size << shift;
1598 	else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1599 		return EINVAL;
1600 
1601 	return 0;
1602 }
1603 
1604 /*
1605  * irdma_get_rqdepth - get RQ depth (quanta) @uk_attrs: qp HW attributes @rq_size: SRQ size @shift: shift which
1606  * determines size of WQE @rqdepth: depth of RQ/SRQ
1607  */
1608 int
1609 irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
1610 {
1611 	*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
1612 
1613 	if (*rqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
1614 		*rqdepth = uk_attrs->min_hw_wq_size << shift;
1615 	else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1616 		return EINVAL;
1617 
1618 	return 0;
1619 }
1620 
1621 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1622 	.iw_copy_inline_data = irdma_copy_inline_data,
1623 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1624 	.iw_set_fragment = irdma_set_fragment,
1625 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1626 };
1627 
1628 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1629 	.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1630 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1631 	.iw_set_fragment = irdma_set_fragment_gen_1,
1632 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1633 };
1634 
1635 /**
1636  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1637  * connection.
1638  * @qp: hw qp (user and kernel)
1639  * @info: qp initialization info
1640  */
1641 static void
1642 irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1643 			    struct irdma_qp_uk_init_info *info)
1644 {
1645 	u16 move_cnt = 1;
1646 
1647 	if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
1648 		move_cnt = 3;
1649 
1650 	qp->conn_wqes = move_cnt;
1651 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1652 	IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1653 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1654 }
1655 
1656 /**
1657  * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
1658  * @ukinfo: qp initialization info
1659  * @sq_shift: Returns shift of SQ
1660  * @rq_shift: Returns shift of RQ
1661  */
1662 void
1663 irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
1664 		       u8 *rq_shift)
1665 {
1666 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
1667 
1668 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1669 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1670 			    ukinfo->max_sq_frag_cnt,
1671 			    ukinfo->max_inline_data, sq_shift);
1672 
1673 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1674 			    rq_shift);
1675 
1676 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1677 		if (ukinfo->abi_ver > 4)
1678 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1679 	}
1680 }
1681 
1682 /**
1683  * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
1684  * @ukinfo: qp initialization info
1685  * @sq_depth: Returns depth of SQ
1686  * @sq_shift: Returns shift of SQ
1687  */
1688 int
1689 irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
1690 			     u32 *sq_depth, u8 *sq_shift)
1691 {
1692 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
1693 	int status;
1694 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1695 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1696 			    ukinfo->max_sq_frag_cnt,
1697 			    ukinfo->max_inline_data, sq_shift);
1698 	status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
1699 				   *sq_shift, sq_depth);
1700 
1701 	return status;
1702 }
1703 
1704 /**
1705  * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
1706  * @ukinfo: qp initialization info
1707  * @rq_depth: Returns depth of RQ
1708  * @rq_shift: Returns shift of RQ
1709  */
1710 int
1711 irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
1712 			     u32 *rq_depth, u8 *rq_shift)
1713 {
1714 	int status;
1715 
1716 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1717 			    rq_shift);
1718 
1719 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1720 		if (ukinfo->abi_ver > 4)
1721 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1722 	}
1723 
1724 	status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
1725 				   *rq_shift, rq_depth);
1726 
1727 	return status;
1728 }
1729 
1730 /**
1731  * irdma_uk_qp_init - initialize shared qp
1732  * @qp: hw qp (user and kernel)
1733  * @info: qp initialization info
1734  *
1735  * initializes the vars used in both user and kernel mode.
1736  * size of the wqe depends on numbers of max. fragements
1737  * allowed. Then size of wqe * the number of wqes should be the
1738  * amount of memory allocated for sq and rq.
1739  */
1740 int
1741 irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1742 {
1743 	int ret_code = 0;
1744 	u32 sq_ring_size;
1745 
1746 	qp->uk_attrs = info->uk_attrs;
1747 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1748 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1749 		return EINVAL;
1750 
1751 	qp->qp_caps = info->qp_caps;
1752 	qp->sq_base = info->sq;
1753 	qp->rq_base = info->rq;
1754 	qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1755 	qp->shadow_area = info->shadow_area;
1756 	qp->sq_wrtrk_array = info->sq_wrtrk_array;
1757 
1758 	qp->rq_wrid_array = info->rq_wrid_array;
1759 	qp->wqe_alloc_db = info->wqe_alloc_db;
1760 	qp->last_rx_cmpl_idx = 0xffffffff;
1761 	qp->rd_fence_rate = info->rd_fence_rate;
1762 	qp->qp_id = info->qp_id;
1763 	qp->sq_size = info->sq_size;
1764 	qp->push_mode = false;
1765 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1766 	sq_ring_size = qp->sq_size << info->sq_shift;
1767 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1768 	IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1769 	if (info->first_sq_wq) {
1770 		irdma_setup_connection_wqes(qp, info);
1771 		qp->swqe_polarity = 1;
1772 		qp->first_sq_wq = true;
1773 	} else {
1774 		qp->swqe_polarity = 0;
1775 	}
1776 	qp->swqe_polarity_deferred = 1;
1777 	qp->rwqe_polarity = 0;
1778 	qp->rq_size = info->rq_size;
1779 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1780 	qp->max_inline_data = info->max_inline_data;
1781 	qp->rq_wqe_size = info->rq_shift;
1782 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1783 	qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
1784 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1785 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1786 	else
1787 		qp->wqe_ops = iw_wqe_uk_ops;
1788 	return ret_code;
1789 }
1790 
1791 /**
1792  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1793  * @cq: hw cq
1794  * @info: hw cq initialization info
1795  */
1796 int
1797 irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info)
1798 {
1799 	cq->cq_base = info->cq_base;
1800 	cq->cq_id = info->cq_id;
1801 	cq->cq_size = info->cq_size;
1802 	cq->cqe_alloc_db = info->cqe_alloc_db;
1803 	cq->cq_ack_db = info->cq_ack_db;
1804 	cq->shadow_area = info->shadow_area;
1805 	cq->avoid_mem_cflct = info->avoid_mem_cflct;
1806 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1807 	cq->polarity = 1;
1808 
1809 	return 0;
1810 }
1811 
1812 /**
1813  * irdma_uk_clean_cq - clean cq entries
1814  * @q: completion context
1815  * @cq: cq to clean
1816  */
1817 int
1818 irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1819 {
1820 	__le64 *cqe;
1821 	u64 qword3, comp_ctx;
1822 	u32 cq_head;
1823 	u8 polarity, temp;
1824 
1825 	cq_head = cq->cq_ring.head;
1826 	temp = cq->polarity;
1827 	do {
1828 		if (cq->avoid_mem_cflct)
1829 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1830 		else
1831 			cqe = cq->cq_base[cq_head].buf;
1832 		get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1833 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1834 
1835 		if (polarity != temp)
1836 			break;
1837 
1838 		get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
1839 		if ((void *)(irdma_uintptr) comp_ctx == q)
1840 			set_64bit_val(cqe, IRDMA_BYTE_8, 0);
1841 
1842 		cq_head = (cq_head + 1) % cq->cq_ring.size;
1843 		if (!cq_head)
1844 			temp ^= 1;
1845 	} while (true);
1846 	return 0;
1847 }
1848 
1849 /**
1850  * irdma_nop - post a nop
1851  * @qp: hw qp ptr
1852  * @wr_id: work request id
1853  * @signaled: signaled for completion
1854  * @post_sq: ring doorbell
1855  */
1856 int
1857 irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
1858 {
1859 	__le64 *wqe;
1860 	u64 hdr;
1861 	u32 wqe_idx;
1862 	struct irdma_post_sq_info info = {0};
1863 	u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
1864 
1865 	info.push_wqe = qp->push_db ? true : false;
1866 	info.wr_id = wr_id;
1867 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, &info);
1868 	if (!wqe)
1869 		return ENOSPC;
1870 
1871 	set_64bit_val(wqe, IRDMA_BYTE_0, 0);
1872 	set_64bit_val(wqe, IRDMA_BYTE_8, 0);
1873 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
1874 
1875 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1876 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1877 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1878 
1879 	udma_to_device_barrier();	/* make sure WQE is populated before valid bit is set */
1880 
1881 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1882 
1883 	if (info.push_wqe)
1884 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
1885 	else if (post_sq)
1886 		irdma_uk_qp_post_wr(qp);
1887 
1888 	return 0;
1889 }
1890 
1891 /**
1892  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1893  * @frag_cnt: number of fragments
1894  * @quanta: quanta for frag_cnt
1895  */
1896 int
1897 irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1898 {
1899 	switch (frag_cnt) {
1900 	case 0:
1901 	case 1:
1902 		*quanta = IRDMA_QP_WQE_MIN_QUANTA;
1903 		break;
1904 	case 2:
1905 	case 3:
1906 		*quanta = 2;
1907 		break;
1908 	case 4:
1909 	case 5:
1910 		*quanta = 3;
1911 		break;
1912 	case 6:
1913 	case 7:
1914 		*quanta = 4;
1915 		break;
1916 	case 8:
1917 	case 9:
1918 		*quanta = 5;
1919 		break;
1920 	case 10:
1921 	case 11:
1922 		*quanta = 6;
1923 		break;
1924 	case 12:
1925 	case 13:
1926 		*quanta = 7;
1927 		break;
1928 	case 14:
1929 	case 15:		/* when immediate data is present */
1930 		*quanta = 8;
1931 		break;
1932 	default:
1933 		return EINVAL;
1934 	}
1935 
1936 	return 0;
1937 }
1938 
1939 /**
1940  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1941  * @frag_cnt: number of fragments
1942  * @wqe_size: size in bytes given frag_cnt
1943  */
1944 int
1945 irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1946 {
1947 	switch (frag_cnt) {
1948 	case 0:
1949 	case 1:
1950 		*wqe_size = 32;
1951 		break;
1952 	case 2:
1953 	case 3:
1954 		*wqe_size = 64;
1955 		break;
1956 	case 4:
1957 	case 5:
1958 	case 6:
1959 	case 7:
1960 		*wqe_size = 128;
1961 		break;
1962 	case 8:
1963 	case 9:
1964 	case 10:
1965 	case 11:
1966 	case 12:
1967 	case 13:
1968 	case 14:
1969 		*wqe_size = 256;
1970 		break;
1971 	default:
1972 		return EINVAL;
1973 	}
1974 
1975 	return 0;
1976 }
1977